Commit cd81a4bac67d44742ab0aa1848f4a78e9d7e1093
Committed by
Steven Whitehouse
1 parent
0507ecf50f
Exists in
master
and in
4 other branches
[GFS2] Addendum patch 2 for gfs2_grow
This addendum patch 2 corrects three things: 1. It fixes a stupid mistake in the previous addendum that broke gfs2. Ref: https://www.redhat.com/archives/cluster-devel/2007-May/msg00162.html 2. It fixes a problem that Dave Teigland pointed out regarding the external declarations in ops_address.h being in the wrong place. 3. It recasts a couple more %llu printks to (unsigned long long) as requested by Steve Whitehouse. I would have loved to put this all in one revised patch, but there was a rush to get some patches for RHEL5. Therefore, the previous patches were applied to the git tree "as is" and therefore, I'm posting another addendum. Sorry. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Showing 5 changed files with 9 additions and 9 deletions Inline Diff
fs/gfs2/glock.c
1 | /* | 1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | 5 | * This copyrighted material is made available to anyone wishing to use, |
6 | * modify, copy, or redistribute it subject to the terms and conditions | 6 | * modify, copy, or redistribute it subject to the terms and conditions |
7 | * of the GNU General Public License version 2. | 7 | * of the GNU General Public License version 2. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/completion.h> | 13 | #include <linux/completion.h> |
14 | #include <linux/buffer_head.h> | 14 | #include <linux/buffer_head.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/sort.h> | 16 | #include <linux/sort.h> |
17 | #include <linux/jhash.h> | 17 | #include <linux/jhash.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/gfs2_ondisk.h> | 19 | #include <linux/gfs2_ondisk.h> |
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/lm_interface.h> | 21 | #include <linux/lm_interface.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/rwsem.h> | 24 | #include <linux/rwsem.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
27 | #include <linux/debugfs.h> | 27 | #include <linux/debugfs.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/kallsyms.h> | 29 | #include <linux/kallsyms.h> |
30 | 30 | ||
31 | #include "gfs2.h" | 31 | #include "gfs2.h" |
32 | #include "incore.h" | 32 | #include "incore.h" |
33 | #include "glock.h" | 33 | #include "glock.h" |
34 | #include "glops.h" | 34 | #include "glops.h" |
35 | #include "inode.h" | 35 | #include "inode.h" |
36 | #include "lm.h" | 36 | #include "lm.h" |
37 | #include "lops.h" | 37 | #include "lops.h" |
38 | #include "meta_io.h" | 38 | #include "meta_io.h" |
39 | #include "quota.h" | 39 | #include "quota.h" |
40 | #include "super.h" | 40 | #include "super.h" |
41 | #include "util.h" | 41 | #include "util.h" |
42 | 42 | ||
43 | struct gfs2_gl_hash_bucket { | 43 | struct gfs2_gl_hash_bucket { |
44 | struct hlist_head hb_list; | 44 | struct hlist_head hb_list; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | struct glock_iter { | 47 | struct glock_iter { |
48 | int hash; /* hash bucket index */ | 48 | int hash; /* hash bucket index */ |
49 | struct gfs2_sbd *sdp; /* incore superblock */ | 49 | struct gfs2_sbd *sdp; /* incore superblock */ |
50 | struct gfs2_glock *gl; /* current glock struct */ | 50 | struct gfs2_glock *gl; /* current glock struct */ |
51 | struct hlist_head *hb_list; /* current hash bucket ptr */ | 51 | struct hlist_head *hb_list; /* current hash bucket ptr */ |
52 | struct seq_file *seq; /* sequence file for debugfs */ | 52 | struct seq_file *seq; /* sequence file for debugfs */ |
53 | char string[512]; /* scratch space */ | 53 | char string[512]; /* scratch space */ |
54 | }; | 54 | }; |
55 | 55 | ||
56 | typedef void (*glock_examiner) (struct gfs2_glock * gl); | 56 | typedef void (*glock_examiner) (struct gfs2_glock * gl); |
57 | 57 | ||
58 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | 58 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
59 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); | 59 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); |
60 | static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); | 60 | static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); |
61 | static void gfs2_glock_drop_th(struct gfs2_glock *gl); | 61 | static void gfs2_glock_drop_th(struct gfs2_glock *gl); |
62 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 62 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
63 | static struct dentry *gfs2_root; | 63 | static struct dentry *gfs2_root; |
64 | 64 | ||
65 | #define GFS2_GL_HASH_SHIFT 15 | 65 | #define GFS2_GL_HASH_SHIFT 15 |
66 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | 66 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) |
67 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) | 67 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) |
68 | 68 | ||
69 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; | 69 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; |
70 | static struct dentry *gfs2_root; | 70 | static struct dentry *gfs2_root; |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Despite what you might think, the numbers below are not arbitrary :-) | 73 | * Despite what you might think, the numbers below are not arbitrary :-) |
74 | * They are taken from the ipv4 routing hash code, which is well tested | 74 | * They are taken from the ipv4 routing hash code, which is well tested |
75 | * and thus should be nearly optimal. Later on we might tweek the numbers | 75 | * and thus should be nearly optimal. Later on we might tweek the numbers |
76 | * but for now this should be fine. | 76 | * but for now this should be fine. |
77 | * | 77 | * |
78 | * The reason for putting the locks in a separate array from the list heads | 78 | * The reason for putting the locks in a separate array from the list heads |
79 | * is that we can have fewer locks than list heads and save memory. We use | 79 | * is that we can have fewer locks than list heads and save memory. We use |
80 | * the same hash function for both, but with a different hash mask. | 80 | * the same hash function for both, but with a different hash mask. |
81 | */ | 81 | */ |
82 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ | 82 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ |
83 | defined(CONFIG_PROVE_LOCKING) | 83 | defined(CONFIG_PROVE_LOCKING) |
84 | 84 | ||
85 | #ifdef CONFIG_LOCKDEP | 85 | #ifdef CONFIG_LOCKDEP |
86 | # define GL_HASH_LOCK_SZ 256 | 86 | # define GL_HASH_LOCK_SZ 256 |
87 | #else | 87 | #else |
88 | # if NR_CPUS >= 32 | 88 | # if NR_CPUS >= 32 |
89 | # define GL_HASH_LOCK_SZ 4096 | 89 | # define GL_HASH_LOCK_SZ 4096 |
90 | # elif NR_CPUS >= 16 | 90 | # elif NR_CPUS >= 16 |
91 | # define GL_HASH_LOCK_SZ 2048 | 91 | # define GL_HASH_LOCK_SZ 2048 |
92 | # elif NR_CPUS >= 8 | 92 | # elif NR_CPUS >= 8 |
93 | # define GL_HASH_LOCK_SZ 1024 | 93 | # define GL_HASH_LOCK_SZ 1024 |
94 | # elif NR_CPUS >= 4 | 94 | # elif NR_CPUS >= 4 |
95 | # define GL_HASH_LOCK_SZ 512 | 95 | # define GL_HASH_LOCK_SZ 512 |
96 | # else | 96 | # else |
97 | # define GL_HASH_LOCK_SZ 256 | 97 | # define GL_HASH_LOCK_SZ 256 |
98 | # endif | 98 | # endif |
99 | #endif | 99 | #endif |
100 | 100 | ||
101 | /* We never want more locks than chains */ | 101 | /* We never want more locks than chains */ |
102 | #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ | 102 | #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ |
103 | # undef GL_HASH_LOCK_SZ | 103 | # undef GL_HASH_LOCK_SZ |
104 | # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE | 104 | # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ]; | 107 | static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ]; |
108 | 108 | ||
109 | static inline rwlock_t *gl_lock_addr(unsigned int x) | 109 | static inline rwlock_t *gl_lock_addr(unsigned int x) |
110 | { | 110 | { |
111 | return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; | 111 | return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; |
112 | } | 112 | } |
113 | #else /* not SMP, so no spinlocks required */ | 113 | #else /* not SMP, so no spinlocks required */ |
114 | static inline rwlock_t *gl_lock_addr(unsigned int x) | 114 | static inline rwlock_t *gl_lock_addr(unsigned int x) |
115 | { | 115 | { |
116 | return NULL; | 116 | return NULL; |
117 | } | 117 | } |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | /** | 120 | /** |
121 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? | 121 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? |
122 | * @actual: the current state of the lock | 122 | * @actual: the current state of the lock |
123 | * @requested: the lock state that was requested by the caller | 123 | * @requested: the lock state that was requested by the caller |
124 | * @flags: the modifier flags passed in by the caller | 124 | * @flags: the modifier flags passed in by the caller |
125 | * | 125 | * |
126 | * Returns: 1 if the locks are compatible, 0 otherwise | 126 | * Returns: 1 if the locks are compatible, 0 otherwise |
127 | */ | 127 | */ |
128 | 128 | ||
129 | static inline int relaxed_state_ok(unsigned int actual, unsigned requested, | 129 | static inline int relaxed_state_ok(unsigned int actual, unsigned requested, |
130 | int flags) | 130 | int flags) |
131 | { | 131 | { |
132 | if (actual == requested) | 132 | if (actual == requested) |
133 | return 1; | 133 | return 1; |
134 | 134 | ||
135 | if (flags & GL_EXACT) | 135 | if (flags & GL_EXACT) |
136 | return 0; | 136 | return 0; |
137 | 137 | ||
138 | if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) | 138 | if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) |
139 | return 1; | 139 | return 1; |
140 | 140 | ||
141 | if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) | 141 | if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) |
142 | return 1; | 142 | return 1; |
143 | 143 | ||
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * gl_hash() - Turn glock number into hash bucket number | 148 | * gl_hash() - Turn glock number into hash bucket number |
149 | * @lock: The glock number | 149 | * @lock: The glock number |
150 | * | 150 | * |
151 | * Returns: The number of the corresponding hash bucket | 151 | * Returns: The number of the corresponding hash bucket |
152 | */ | 152 | */ |
153 | 153 | ||
154 | static unsigned int gl_hash(const struct gfs2_sbd *sdp, | 154 | static unsigned int gl_hash(const struct gfs2_sbd *sdp, |
155 | const struct lm_lockname *name) | 155 | const struct lm_lockname *name) |
156 | { | 156 | { |
157 | unsigned int h; | 157 | unsigned int h; |
158 | 158 | ||
159 | h = jhash(&name->ln_number, sizeof(u64), 0); | 159 | h = jhash(&name->ln_number, sizeof(u64), 0); |
160 | h = jhash(&name->ln_type, sizeof(unsigned int), h); | 160 | h = jhash(&name->ln_type, sizeof(unsigned int), h); |
161 | h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); | 161 | h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); |
162 | h &= GFS2_GL_HASH_MASK; | 162 | h &= GFS2_GL_HASH_MASK; |
163 | 163 | ||
164 | return h; | 164 | return h; |
165 | } | 165 | } |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * glock_free() - Perform a few checks and then release struct gfs2_glock | 168 | * glock_free() - Perform a few checks and then release struct gfs2_glock |
169 | * @gl: The glock to release | 169 | * @gl: The glock to release |
170 | * | 170 | * |
171 | * Also calls lock module to release its internal structure for this glock. | 171 | * Also calls lock module to release its internal structure for this glock. |
172 | * | 172 | * |
173 | */ | 173 | */ |
174 | 174 | ||
175 | static void glock_free(struct gfs2_glock *gl) | 175 | static void glock_free(struct gfs2_glock *gl) |
176 | { | 176 | { |
177 | struct gfs2_sbd *sdp = gl->gl_sbd; | 177 | struct gfs2_sbd *sdp = gl->gl_sbd; |
178 | struct inode *aspace = gl->gl_aspace; | 178 | struct inode *aspace = gl->gl_aspace; |
179 | 179 | ||
180 | gfs2_lm_put_lock(sdp, gl->gl_lock); | 180 | gfs2_lm_put_lock(sdp, gl->gl_lock); |
181 | 181 | ||
182 | if (aspace) | 182 | if (aspace) |
183 | gfs2_aspace_put(aspace); | 183 | gfs2_aspace_put(aspace); |
184 | 184 | ||
185 | kmem_cache_free(gfs2_glock_cachep, gl); | 185 | kmem_cache_free(gfs2_glock_cachep, gl); |
186 | } | 186 | } |
187 | 187 | ||
188 | /** | 188 | /** |
189 | * gfs2_glock_hold() - increment reference count on glock | 189 | * gfs2_glock_hold() - increment reference count on glock |
190 | * @gl: The glock to hold | 190 | * @gl: The glock to hold |
191 | * | 191 | * |
192 | */ | 192 | */ |
193 | 193 | ||
194 | void gfs2_glock_hold(struct gfs2_glock *gl) | 194 | void gfs2_glock_hold(struct gfs2_glock *gl) |
195 | { | 195 | { |
196 | atomic_inc(&gl->gl_ref); | 196 | atomic_inc(&gl->gl_ref); |
197 | } | 197 | } |
198 | 198 | ||
199 | /** | 199 | /** |
200 | * gfs2_glock_put() - Decrement reference count on glock | 200 | * gfs2_glock_put() - Decrement reference count on glock |
201 | * @gl: The glock to put | 201 | * @gl: The glock to put |
202 | * | 202 | * |
203 | */ | 203 | */ |
204 | 204 | ||
205 | int gfs2_glock_put(struct gfs2_glock *gl) | 205 | int gfs2_glock_put(struct gfs2_glock *gl) |
206 | { | 206 | { |
207 | int rv = 0; | 207 | int rv = 0; |
208 | struct gfs2_sbd *sdp = gl->gl_sbd; | 208 | struct gfs2_sbd *sdp = gl->gl_sbd; |
209 | 209 | ||
210 | write_lock(gl_lock_addr(gl->gl_hash)); | 210 | write_lock(gl_lock_addr(gl->gl_hash)); |
211 | if (atomic_dec_and_test(&gl->gl_ref)) { | 211 | if (atomic_dec_and_test(&gl->gl_ref)) { |
212 | hlist_del(&gl->gl_list); | 212 | hlist_del(&gl->gl_list); |
213 | write_unlock(gl_lock_addr(gl->gl_hash)); | 213 | write_unlock(gl_lock_addr(gl->gl_hash)); |
214 | BUG_ON(spin_is_locked(&gl->gl_spin)); | 214 | BUG_ON(spin_is_locked(&gl->gl_spin)); |
215 | gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); | 215 | gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); |
216 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); | 216 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); |
217 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); | 217 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); |
218 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); | 218 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); |
219 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); | 219 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); |
220 | glock_free(gl); | 220 | glock_free(gl); |
221 | rv = 1; | 221 | rv = 1; |
222 | goto out; | 222 | goto out; |
223 | } | 223 | } |
224 | write_unlock(gl_lock_addr(gl->gl_hash)); | 224 | write_unlock(gl_lock_addr(gl->gl_hash)); |
225 | out: | 225 | out: |
226 | return rv; | 226 | return rv; |
227 | } | 227 | } |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * search_bucket() - Find struct gfs2_glock by lock number | 230 | * search_bucket() - Find struct gfs2_glock by lock number |
231 | * @bucket: the bucket to search | 231 | * @bucket: the bucket to search |
232 | * @name: The lock name | 232 | * @name: The lock name |
233 | * | 233 | * |
234 | * Returns: NULL, or the struct gfs2_glock with the requested number | 234 | * Returns: NULL, or the struct gfs2_glock with the requested number |
235 | */ | 235 | */ |
236 | 236 | ||
237 | static struct gfs2_glock *search_bucket(unsigned int hash, | 237 | static struct gfs2_glock *search_bucket(unsigned int hash, |
238 | const struct gfs2_sbd *sdp, | 238 | const struct gfs2_sbd *sdp, |
239 | const struct lm_lockname *name) | 239 | const struct lm_lockname *name) |
240 | { | 240 | { |
241 | struct gfs2_glock *gl; | 241 | struct gfs2_glock *gl; |
242 | struct hlist_node *h; | 242 | struct hlist_node *h; |
243 | 243 | ||
244 | hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) { | 244 | hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) { |
245 | if (!lm_name_equal(&gl->gl_name, name)) | 245 | if (!lm_name_equal(&gl->gl_name, name)) |
246 | continue; | 246 | continue; |
247 | if (gl->gl_sbd != sdp) | 247 | if (gl->gl_sbd != sdp) |
248 | continue; | 248 | continue; |
249 | 249 | ||
250 | atomic_inc(&gl->gl_ref); | 250 | atomic_inc(&gl->gl_ref); |
251 | 251 | ||
252 | return gl; | 252 | return gl; |
253 | } | 253 | } |
254 | 254 | ||
255 | return NULL; | 255 | return NULL; |
256 | } | 256 | } |
257 | 257 | ||
258 | /** | 258 | /** |
259 | * gfs2_glock_find() - Find glock by lock number | 259 | * gfs2_glock_find() - Find glock by lock number |
260 | * @sdp: The GFS2 superblock | 260 | * @sdp: The GFS2 superblock |
261 | * @name: The lock name | 261 | * @name: The lock name |
262 | * | 262 | * |
263 | * Returns: NULL, or the struct gfs2_glock with the requested number | 263 | * Returns: NULL, or the struct gfs2_glock with the requested number |
264 | */ | 264 | */ |
265 | 265 | ||
266 | static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, | 266 | static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, |
267 | const struct lm_lockname *name) | 267 | const struct lm_lockname *name) |
268 | { | 268 | { |
269 | unsigned int hash = gl_hash(sdp, name); | 269 | unsigned int hash = gl_hash(sdp, name); |
270 | struct gfs2_glock *gl; | 270 | struct gfs2_glock *gl; |
271 | 271 | ||
272 | read_lock(gl_lock_addr(hash)); | 272 | read_lock(gl_lock_addr(hash)); |
273 | gl = search_bucket(hash, sdp, name); | 273 | gl = search_bucket(hash, sdp, name); |
274 | read_unlock(gl_lock_addr(hash)); | 274 | read_unlock(gl_lock_addr(hash)); |
275 | 275 | ||
276 | return gl; | 276 | return gl; |
277 | } | 277 | } |
278 | 278 | ||
279 | /** | 279 | /** |
280 | * gfs2_glock_get() - Get a glock, or create one if one doesn't exist | 280 | * gfs2_glock_get() - Get a glock, or create one if one doesn't exist |
281 | * @sdp: The GFS2 superblock | 281 | * @sdp: The GFS2 superblock |
282 | * @number: the lock number | 282 | * @number: the lock number |
283 | * @glops: The glock_operations to use | 283 | * @glops: The glock_operations to use |
284 | * @create: If 0, don't create the glock if it doesn't exist | 284 | * @create: If 0, don't create the glock if it doesn't exist |
285 | * @glp: the glock is returned here | 285 | * @glp: the glock is returned here |
286 | * | 286 | * |
287 | * This does not lock a glock, just finds/creates structures for one. | 287 | * This does not lock a glock, just finds/creates structures for one. |
288 | * | 288 | * |
289 | * Returns: errno | 289 | * Returns: errno |
290 | */ | 290 | */ |
291 | 291 | ||
292 | int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | 292 | int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, |
293 | const struct gfs2_glock_operations *glops, int create, | 293 | const struct gfs2_glock_operations *glops, int create, |
294 | struct gfs2_glock **glp) | 294 | struct gfs2_glock **glp) |
295 | { | 295 | { |
296 | struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; | 296 | struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; |
297 | struct gfs2_glock *gl, *tmp; | 297 | struct gfs2_glock *gl, *tmp; |
298 | unsigned int hash = gl_hash(sdp, &name); | 298 | unsigned int hash = gl_hash(sdp, &name); |
299 | int error; | 299 | int error; |
300 | 300 | ||
301 | read_lock(gl_lock_addr(hash)); | 301 | read_lock(gl_lock_addr(hash)); |
302 | gl = search_bucket(hash, sdp, &name); | 302 | gl = search_bucket(hash, sdp, &name); |
303 | read_unlock(gl_lock_addr(hash)); | 303 | read_unlock(gl_lock_addr(hash)); |
304 | 304 | ||
305 | if (gl || !create) { | 305 | if (gl || !create) { |
306 | *glp = gl; | 306 | *glp = gl; |
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); | 310 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); |
311 | if (!gl) | 311 | if (!gl) |
312 | return -ENOMEM; | 312 | return -ENOMEM; |
313 | 313 | ||
314 | gl->gl_flags = 0; | 314 | gl->gl_flags = 0; |
315 | gl->gl_name = name; | 315 | gl->gl_name = name; |
316 | atomic_set(&gl->gl_ref, 1); | 316 | atomic_set(&gl->gl_ref, 1); |
317 | gl->gl_state = LM_ST_UNLOCKED; | 317 | gl->gl_state = LM_ST_UNLOCKED; |
318 | gl->gl_hash = hash; | 318 | gl->gl_hash = hash; |
319 | gl->gl_owner_pid = 0; | 319 | gl->gl_owner_pid = 0; |
320 | gl->gl_ip = 0; | 320 | gl->gl_ip = 0; |
321 | gl->gl_ops = glops; | 321 | gl->gl_ops = glops; |
322 | gl->gl_req_gh = NULL; | 322 | gl->gl_req_gh = NULL; |
323 | gl->gl_req_bh = NULL; | 323 | gl->gl_req_bh = NULL; |
324 | gl->gl_vn = 0; | 324 | gl->gl_vn = 0; |
325 | gl->gl_stamp = jiffies; | 325 | gl->gl_stamp = jiffies; |
326 | gl->gl_object = NULL; | 326 | gl->gl_object = NULL; |
327 | gl->gl_sbd = sdp; | 327 | gl->gl_sbd = sdp; |
328 | gl->gl_aspace = NULL; | 328 | gl->gl_aspace = NULL; |
329 | lops_init_le(&gl->gl_le, &gfs2_glock_lops); | 329 | lops_init_le(&gl->gl_le, &gfs2_glock_lops); |
330 | 330 | ||
331 | /* If this glock protects actual on-disk data or metadata blocks, | 331 | /* If this glock protects actual on-disk data or metadata blocks, |
332 | create a VFS inode to manage the pages/buffers holding them. */ | 332 | create a VFS inode to manage the pages/buffers holding them. */ |
333 | if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { | 333 | if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { |
334 | gl->gl_aspace = gfs2_aspace_get(sdp); | 334 | gl->gl_aspace = gfs2_aspace_get(sdp); |
335 | if (!gl->gl_aspace) { | 335 | if (!gl->gl_aspace) { |
336 | error = -ENOMEM; | 336 | error = -ENOMEM; |
337 | goto fail; | 337 | goto fail; |
338 | } | 338 | } |
339 | } | 339 | } |
340 | 340 | ||
341 | error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); | 341 | error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); |
342 | if (error) | 342 | if (error) |
343 | goto fail_aspace; | 343 | goto fail_aspace; |
344 | 344 | ||
345 | write_lock(gl_lock_addr(hash)); | 345 | write_lock(gl_lock_addr(hash)); |
346 | tmp = search_bucket(hash, sdp, &name); | 346 | tmp = search_bucket(hash, sdp, &name); |
347 | if (tmp) { | 347 | if (tmp) { |
348 | write_unlock(gl_lock_addr(hash)); | 348 | write_unlock(gl_lock_addr(hash)); |
349 | glock_free(gl); | 349 | glock_free(gl); |
350 | gl = tmp; | 350 | gl = tmp; |
351 | } else { | 351 | } else { |
352 | hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list); | 352 | hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list); |
353 | write_unlock(gl_lock_addr(hash)); | 353 | write_unlock(gl_lock_addr(hash)); |
354 | } | 354 | } |
355 | 355 | ||
356 | *glp = gl; | 356 | *glp = gl; |
357 | 357 | ||
358 | return 0; | 358 | return 0; |
359 | 359 | ||
360 | fail_aspace: | 360 | fail_aspace: |
361 | if (gl->gl_aspace) | 361 | if (gl->gl_aspace) |
362 | gfs2_aspace_put(gl->gl_aspace); | 362 | gfs2_aspace_put(gl->gl_aspace); |
363 | fail: | 363 | fail: |
364 | kmem_cache_free(gfs2_glock_cachep, gl); | 364 | kmem_cache_free(gfs2_glock_cachep, gl); |
365 | return error; | 365 | return error; |
366 | } | 366 | } |
367 | 367 | ||
368 | /** | 368 | /** |
369 | * gfs2_holder_init - initialize a struct gfs2_holder in the default way | 369 | * gfs2_holder_init - initialize a struct gfs2_holder in the default way |
370 | * @gl: the glock | 370 | * @gl: the glock |
371 | * @state: the state we're requesting | 371 | * @state: the state we're requesting |
372 | * @flags: the modifier flags | 372 | * @flags: the modifier flags |
373 | * @gh: the holder structure | 373 | * @gh: the holder structure |
374 | * | 374 | * |
375 | */ | 375 | */ |
376 | 376 | ||
377 | void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, | 377 | void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, |
378 | struct gfs2_holder *gh) | 378 | struct gfs2_holder *gh) |
379 | { | 379 | { |
380 | INIT_LIST_HEAD(&gh->gh_list); | 380 | INIT_LIST_HEAD(&gh->gh_list); |
381 | gh->gh_gl = gl; | 381 | gh->gh_gl = gl; |
382 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 382 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
383 | gh->gh_owner_pid = current->pid; | 383 | gh->gh_owner_pid = current->pid; |
384 | gh->gh_state = state; | 384 | gh->gh_state = state; |
385 | gh->gh_flags = flags; | 385 | gh->gh_flags = flags; |
386 | gh->gh_error = 0; | 386 | gh->gh_error = 0; |
387 | gh->gh_iflags = 0; | 387 | gh->gh_iflags = 0; |
388 | gfs2_glock_hold(gl); | 388 | gfs2_glock_hold(gl); |
389 | } | 389 | } |
390 | 390 | ||
391 | /** | 391 | /** |
392 | * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it | 392 | * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it |
393 | * @state: the state we're requesting | 393 | * @state: the state we're requesting |
394 | * @flags: the modifier flags | 394 | * @flags: the modifier flags |
395 | * @gh: the holder structure | 395 | * @gh: the holder structure |
396 | * | 396 | * |
397 | * Don't mess with the glock. | 397 | * Don't mess with the glock. |
398 | * | 398 | * |
399 | */ | 399 | */ |
400 | 400 | ||
401 | void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) | 401 | void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) |
402 | { | 402 | { |
403 | gh->gh_state = state; | 403 | gh->gh_state = state; |
404 | gh->gh_flags = flags; | 404 | gh->gh_flags = flags; |
405 | gh->gh_iflags = 0; | 405 | gh->gh_iflags = 0; |
406 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 406 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
407 | } | 407 | } |
408 | 408 | ||
409 | /** | 409 | /** |
410 | * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) | 410 | * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) |
411 | * @gh: the holder structure | 411 | * @gh: the holder structure |
412 | * | 412 | * |
413 | */ | 413 | */ |
414 | 414 | ||
415 | void gfs2_holder_uninit(struct gfs2_holder *gh) | 415 | void gfs2_holder_uninit(struct gfs2_holder *gh) |
416 | { | 416 | { |
417 | gfs2_glock_put(gh->gh_gl); | 417 | gfs2_glock_put(gh->gh_gl); |
418 | gh->gh_gl = NULL; | 418 | gh->gh_gl = NULL; |
419 | gh->gh_ip = 0; | 419 | gh->gh_ip = 0; |
420 | } | 420 | } |
421 | 421 | ||
422 | static void gfs2_holder_wake(struct gfs2_holder *gh) | 422 | static void gfs2_holder_wake(struct gfs2_holder *gh) |
423 | { | 423 | { |
424 | clear_bit(HIF_WAIT, &gh->gh_iflags); | 424 | clear_bit(HIF_WAIT, &gh->gh_iflags); |
425 | smp_mb(); | 425 | smp_mb(); |
426 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | 426 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); |
427 | } | 427 | } |
428 | 428 | ||
429 | static int holder_wait(void *word) | 429 | static int holder_wait(void *word) |
430 | { | 430 | { |
431 | schedule(); | 431 | schedule(); |
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
435 | static void wait_on_holder(struct gfs2_holder *gh) | 435 | static void wait_on_holder(struct gfs2_holder *gh) |
436 | { | 436 | { |
437 | might_sleep(); | 437 | might_sleep(); |
438 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE); | 438 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE); |
439 | } | 439 | } |
440 | 440 | ||
441 | /** | 441 | /** |
442 | * rq_mutex - process a mutex request in the queue | 442 | * rq_mutex - process a mutex request in the queue |
443 | * @gh: the glock holder | 443 | * @gh: the glock holder |
444 | * | 444 | * |
445 | * Returns: 1 if the queue is blocked | 445 | * Returns: 1 if the queue is blocked |
446 | */ | 446 | */ |
447 | 447 | ||
448 | static int rq_mutex(struct gfs2_holder *gh) | 448 | static int rq_mutex(struct gfs2_holder *gh) |
449 | { | 449 | { |
450 | struct gfs2_glock *gl = gh->gh_gl; | 450 | struct gfs2_glock *gl = gh->gh_gl; |
451 | 451 | ||
452 | list_del_init(&gh->gh_list); | 452 | list_del_init(&gh->gh_list); |
453 | /* gh->gh_error never examined. */ | 453 | /* gh->gh_error never examined. */ |
454 | set_bit(GLF_LOCK, &gl->gl_flags); | 454 | set_bit(GLF_LOCK, &gl->gl_flags); |
455 | clear_bit(HIF_WAIT, &gh->gh_iflags); | 455 | clear_bit(HIF_WAIT, &gh->gh_iflags); |
456 | smp_mb(); | 456 | smp_mb(); |
457 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | 457 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); |
458 | 458 | ||
459 | return 1; | 459 | return 1; |
460 | } | 460 | } |
461 | 461 | ||
462 | /** | 462 | /** |
463 | * rq_promote - process a promote request in the queue | 463 | * rq_promote - process a promote request in the queue |
464 | * @gh: the glock holder | 464 | * @gh: the glock holder |
465 | * | 465 | * |
466 | * Acquire a new inter-node lock, or change a lock state to more restrictive. | 466 | * Acquire a new inter-node lock, or change a lock state to more restrictive. |
467 | * | 467 | * |
468 | * Returns: 1 if the queue is blocked | 468 | * Returns: 1 if the queue is blocked |
469 | */ | 469 | */ |
470 | 470 | ||
471 | static int rq_promote(struct gfs2_holder *gh) | 471 | static int rq_promote(struct gfs2_holder *gh) |
472 | { | 472 | { |
473 | struct gfs2_glock *gl = gh->gh_gl; | 473 | struct gfs2_glock *gl = gh->gh_gl; |
474 | struct gfs2_sbd *sdp = gl->gl_sbd; | 474 | struct gfs2_sbd *sdp = gl->gl_sbd; |
475 | 475 | ||
476 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 476 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { |
477 | if (list_empty(&gl->gl_holders)) { | 477 | if (list_empty(&gl->gl_holders)) { |
478 | gl->gl_req_gh = gh; | 478 | gl->gl_req_gh = gh; |
479 | set_bit(GLF_LOCK, &gl->gl_flags); | 479 | set_bit(GLF_LOCK, &gl->gl_flags); |
480 | spin_unlock(&gl->gl_spin); | 480 | spin_unlock(&gl->gl_spin); |
481 | 481 | ||
482 | if (atomic_read(&sdp->sd_reclaim_count) > | 482 | if (atomic_read(&sdp->sd_reclaim_count) > |
483 | gfs2_tune_get(sdp, gt_reclaim_limit) && | 483 | gfs2_tune_get(sdp, gt_reclaim_limit) && |
484 | !(gh->gh_flags & LM_FLAG_PRIORITY)) { | 484 | !(gh->gh_flags & LM_FLAG_PRIORITY)) { |
485 | gfs2_reclaim_glock(sdp); | 485 | gfs2_reclaim_glock(sdp); |
486 | gfs2_reclaim_glock(sdp); | 486 | gfs2_reclaim_glock(sdp); |
487 | } | 487 | } |
488 | 488 | ||
489 | gfs2_glock_xmote_th(gh->gh_gl, gh); | 489 | gfs2_glock_xmote_th(gh->gh_gl, gh); |
490 | spin_lock(&gl->gl_spin); | 490 | spin_lock(&gl->gl_spin); |
491 | } | 491 | } |
492 | return 1; | 492 | return 1; |
493 | } | 493 | } |
494 | 494 | ||
495 | if (list_empty(&gl->gl_holders)) { | 495 | if (list_empty(&gl->gl_holders)) { |
496 | set_bit(HIF_FIRST, &gh->gh_iflags); | 496 | set_bit(HIF_FIRST, &gh->gh_iflags); |
497 | set_bit(GLF_LOCK, &gl->gl_flags); | 497 | set_bit(GLF_LOCK, &gl->gl_flags); |
498 | } else { | 498 | } else { |
499 | struct gfs2_holder *next_gh; | 499 | struct gfs2_holder *next_gh; |
500 | if (gh->gh_state == LM_ST_EXCLUSIVE) | 500 | if (gh->gh_state == LM_ST_EXCLUSIVE) |
501 | return 1; | 501 | return 1; |
502 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, | 502 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, |
503 | gh_list); | 503 | gh_list); |
504 | if (next_gh->gh_state == LM_ST_EXCLUSIVE) | 504 | if (next_gh->gh_state == LM_ST_EXCLUSIVE) |
505 | return 1; | 505 | return 1; |
506 | } | 506 | } |
507 | 507 | ||
508 | list_move_tail(&gh->gh_list, &gl->gl_holders); | 508 | list_move_tail(&gh->gh_list, &gl->gl_holders); |
509 | gh->gh_error = 0; | 509 | gh->gh_error = 0; |
510 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 510 | set_bit(HIF_HOLDER, &gh->gh_iflags); |
511 | 511 | ||
512 | gfs2_holder_wake(gh); | 512 | gfs2_holder_wake(gh); |
513 | 513 | ||
514 | return 0; | 514 | return 0; |
515 | } | 515 | } |
516 | 516 | ||
517 | /** | 517 | /** |
518 | * rq_demote - process a demote request in the queue | 518 | * rq_demote - process a demote request in the queue |
519 | * @gh: the glock holder | 519 | * @gh: the glock holder |
520 | * | 520 | * |
521 | * Returns: 1 if the queue is blocked | 521 | * Returns: 1 if the queue is blocked |
522 | */ | 522 | */ |
523 | 523 | ||
524 | static int rq_demote(struct gfs2_glock *gl) | 524 | static int rq_demote(struct gfs2_glock *gl) |
525 | { | 525 | { |
526 | if (!list_empty(&gl->gl_holders)) | 526 | if (!list_empty(&gl->gl_holders)) |
527 | return 1; | 527 | return 1; |
528 | 528 | ||
529 | if (gl->gl_state == gl->gl_demote_state || | 529 | if (gl->gl_state == gl->gl_demote_state || |
530 | gl->gl_state == LM_ST_UNLOCKED) { | 530 | gl->gl_state == LM_ST_UNLOCKED) { |
531 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | 531 | clear_bit(GLF_DEMOTE, &gl->gl_flags); |
532 | return 0; | 532 | return 0; |
533 | } | 533 | } |
534 | set_bit(GLF_LOCK, &gl->gl_flags); | 534 | set_bit(GLF_LOCK, &gl->gl_flags); |
535 | spin_unlock(&gl->gl_spin); | 535 | spin_unlock(&gl->gl_spin); |
536 | if (gl->gl_demote_state == LM_ST_UNLOCKED || | 536 | if (gl->gl_demote_state == LM_ST_UNLOCKED || |
537 | gl->gl_state != LM_ST_EXCLUSIVE) | 537 | gl->gl_state != LM_ST_EXCLUSIVE) |
538 | gfs2_glock_drop_th(gl); | 538 | gfs2_glock_drop_th(gl); |
539 | else | 539 | else |
540 | gfs2_glock_xmote_th(gl, NULL); | 540 | gfs2_glock_xmote_th(gl, NULL); |
541 | spin_lock(&gl->gl_spin); | 541 | spin_lock(&gl->gl_spin); |
542 | 542 | ||
543 | return 0; | 543 | return 0; |
544 | } | 544 | } |
545 | 545 | ||
546 | /** | 546 | /** |
547 | * run_queue - process holder structures on a glock | 547 | * run_queue - process holder structures on a glock |
548 | * @gl: the glock | 548 | * @gl: the glock |
549 | * | 549 | * |
550 | */ | 550 | */ |
551 | static void run_queue(struct gfs2_glock *gl) | 551 | static void run_queue(struct gfs2_glock *gl) |
552 | { | 552 | { |
553 | struct gfs2_holder *gh; | 553 | struct gfs2_holder *gh; |
554 | int blocked = 1; | 554 | int blocked = 1; |
555 | 555 | ||
556 | for (;;) { | 556 | for (;;) { |
557 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | 557 | if (test_bit(GLF_LOCK, &gl->gl_flags)) |
558 | break; | 558 | break; |
559 | 559 | ||
560 | if (!list_empty(&gl->gl_waiters1)) { | 560 | if (!list_empty(&gl->gl_waiters1)) { |
561 | gh = list_entry(gl->gl_waiters1.next, | 561 | gh = list_entry(gl->gl_waiters1.next, |
562 | struct gfs2_holder, gh_list); | 562 | struct gfs2_holder, gh_list); |
563 | 563 | ||
564 | if (test_bit(HIF_MUTEX, &gh->gh_iflags)) | 564 | if (test_bit(HIF_MUTEX, &gh->gh_iflags)) |
565 | blocked = rq_mutex(gh); | 565 | blocked = rq_mutex(gh); |
566 | else | 566 | else |
567 | gfs2_assert_warn(gl->gl_sbd, 0); | 567 | gfs2_assert_warn(gl->gl_sbd, 0); |
568 | 568 | ||
569 | } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | 569 | } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { |
570 | blocked = rq_demote(gl); | 570 | blocked = rq_demote(gl); |
571 | } else if (!list_empty(&gl->gl_waiters3)) { | 571 | } else if (!list_empty(&gl->gl_waiters3)) { |
572 | gh = list_entry(gl->gl_waiters3.next, | 572 | gh = list_entry(gl->gl_waiters3.next, |
573 | struct gfs2_holder, gh_list); | 573 | struct gfs2_holder, gh_list); |
574 | 574 | ||
575 | if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) | 575 | if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) |
576 | blocked = rq_promote(gh); | 576 | blocked = rq_promote(gh); |
577 | else | 577 | else |
578 | gfs2_assert_warn(gl->gl_sbd, 0); | 578 | gfs2_assert_warn(gl->gl_sbd, 0); |
579 | 579 | ||
580 | } else | 580 | } else |
581 | break; | 581 | break; |
582 | 582 | ||
583 | if (blocked) | 583 | if (blocked) |
584 | break; | 584 | break; |
585 | } | 585 | } |
586 | } | 586 | } |
587 | 587 | ||
588 | /** | 588 | /** |
589 | * gfs2_glmutex_lock - acquire a local lock on a glock | 589 | * gfs2_glmutex_lock - acquire a local lock on a glock |
590 | * @gl: the glock | 590 | * @gl: the glock |
591 | * | 591 | * |
592 | * Gives caller exclusive access to manipulate a glock structure. | 592 | * Gives caller exclusive access to manipulate a glock structure. |
593 | */ | 593 | */ |
594 | 594 | ||
595 | static void gfs2_glmutex_lock(struct gfs2_glock *gl) | 595 | static void gfs2_glmutex_lock(struct gfs2_glock *gl) |
596 | { | 596 | { |
597 | struct gfs2_holder gh; | 597 | struct gfs2_holder gh; |
598 | 598 | ||
599 | gfs2_holder_init(gl, 0, 0, &gh); | 599 | gfs2_holder_init(gl, 0, 0, &gh); |
600 | set_bit(HIF_MUTEX, &gh.gh_iflags); | 600 | set_bit(HIF_MUTEX, &gh.gh_iflags); |
601 | if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags)) | 601 | if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags)) |
602 | BUG(); | 602 | BUG(); |
603 | 603 | ||
604 | spin_lock(&gl->gl_spin); | 604 | spin_lock(&gl->gl_spin); |
605 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 605 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
606 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); | 606 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); |
607 | } else { | 607 | } else { |
608 | gl->gl_owner_pid = current->pid; | 608 | gl->gl_owner_pid = current->pid; |
609 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 609 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
610 | clear_bit(HIF_WAIT, &gh.gh_iflags); | 610 | clear_bit(HIF_WAIT, &gh.gh_iflags); |
611 | smp_mb(); | 611 | smp_mb(); |
612 | wake_up_bit(&gh.gh_iflags, HIF_WAIT); | 612 | wake_up_bit(&gh.gh_iflags, HIF_WAIT); |
613 | } | 613 | } |
614 | spin_unlock(&gl->gl_spin); | 614 | spin_unlock(&gl->gl_spin); |
615 | 615 | ||
616 | wait_on_holder(&gh); | 616 | wait_on_holder(&gh); |
617 | gfs2_holder_uninit(&gh); | 617 | gfs2_holder_uninit(&gh); |
618 | } | 618 | } |
619 | 619 | ||
620 | /** | 620 | /** |
621 | * gfs2_glmutex_trylock - try to acquire a local lock on a glock | 621 | * gfs2_glmutex_trylock - try to acquire a local lock on a glock |
622 | * @gl: the glock | 622 | * @gl: the glock |
623 | * | 623 | * |
624 | * Returns: 1 if the glock is acquired | 624 | * Returns: 1 if the glock is acquired |
625 | */ | 625 | */ |
626 | 626 | ||
627 | static int gfs2_glmutex_trylock(struct gfs2_glock *gl) | 627 | static int gfs2_glmutex_trylock(struct gfs2_glock *gl) |
628 | { | 628 | { |
629 | int acquired = 1; | 629 | int acquired = 1; |
630 | 630 | ||
631 | spin_lock(&gl->gl_spin); | 631 | spin_lock(&gl->gl_spin); |
632 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 632 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
633 | acquired = 0; | 633 | acquired = 0; |
634 | } else { | 634 | } else { |
635 | gl->gl_owner_pid = current->pid; | 635 | gl->gl_owner_pid = current->pid; |
636 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 636 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
637 | } | 637 | } |
638 | spin_unlock(&gl->gl_spin); | 638 | spin_unlock(&gl->gl_spin); |
639 | 639 | ||
640 | return acquired; | 640 | return acquired; |
641 | } | 641 | } |
642 | 642 | ||
643 | /** | 643 | /** |
644 | * gfs2_glmutex_unlock - release a local lock on a glock | 644 | * gfs2_glmutex_unlock - release a local lock on a glock |
645 | * @gl: the glock | 645 | * @gl: the glock |
646 | * | 646 | * |
647 | */ | 647 | */ |
648 | 648 | ||
649 | static void gfs2_glmutex_unlock(struct gfs2_glock *gl) | 649 | static void gfs2_glmutex_unlock(struct gfs2_glock *gl) |
650 | { | 650 | { |
651 | spin_lock(&gl->gl_spin); | 651 | spin_lock(&gl->gl_spin); |
652 | clear_bit(GLF_LOCK, &gl->gl_flags); | 652 | clear_bit(GLF_LOCK, &gl->gl_flags); |
653 | gl->gl_owner_pid = 0; | 653 | gl->gl_owner_pid = 0; |
654 | gl->gl_ip = 0; | 654 | gl->gl_ip = 0; |
655 | run_queue(gl); | 655 | run_queue(gl); |
656 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | 656 | BUG_ON(!spin_is_locked(&gl->gl_spin)); |
657 | spin_unlock(&gl->gl_spin); | 657 | spin_unlock(&gl->gl_spin); |
658 | } | 658 | } |
659 | 659 | ||
660 | /** | 660 | /** |
661 | * handle_callback - process a demote request | 661 | * handle_callback - process a demote request |
662 | * @gl: the glock | 662 | * @gl: the glock |
663 | * @state: the state the caller wants us to change to | 663 | * @state: the state the caller wants us to change to |
664 | * | 664 | * |
665 | * There are only two requests that we are going to see in actual | 665 | * There are only two requests that we are going to see in actual |
666 | * practise: LM_ST_SHARED and LM_ST_UNLOCKED | 666 | * practise: LM_ST_SHARED and LM_ST_UNLOCKED |
667 | */ | 667 | */ |
668 | 668 | ||
669 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) | 669 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) |
670 | { | 670 | { |
671 | spin_lock(&gl->gl_spin); | 671 | spin_lock(&gl->gl_spin); |
672 | if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) { | 672 | if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) { |
673 | gl->gl_demote_state = state; | 673 | gl->gl_demote_state = state; |
674 | gl->gl_demote_time = jiffies; | 674 | gl->gl_demote_time = jiffies; |
675 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED) { | 675 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED) { |
676 | gl->gl_demote_state = state; | 676 | gl->gl_demote_state = state; |
677 | } | 677 | } |
678 | spin_unlock(&gl->gl_spin); | 678 | spin_unlock(&gl->gl_spin); |
679 | } | 679 | } |
680 | 680 | ||
681 | /** | 681 | /** |
682 | * state_change - record that the glock is now in a different state | 682 | * state_change - record that the glock is now in a different state |
683 | * @gl: the glock | 683 | * @gl: the glock |
684 | * @new_state the new state | 684 | * @new_state the new state |
685 | * | 685 | * |
686 | */ | 686 | */ |
687 | 687 | ||
688 | static void state_change(struct gfs2_glock *gl, unsigned int new_state) | 688 | static void state_change(struct gfs2_glock *gl, unsigned int new_state) |
689 | { | 689 | { |
690 | int held1, held2; | 690 | int held1, held2; |
691 | 691 | ||
692 | held1 = (gl->gl_state != LM_ST_UNLOCKED); | 692 | held1 = (gl->gl_state != LM_ST_UNLOCKED); |
693 | held2 = (new_state != LM_ST_UNLOCKED); | 693 | held2 = (new_state != LM_ST_UNLOCKED); |
694 | 694 | ||
695 | if (held1 != held2) { | 695 | if (held1 != held2) { |
696 | if (held2) | 696 | if (held2) |
697 | gfs2_glock_hold(gl); | 697 | gfs2_glock_hold(gl); |
698 | else | 698 | else |
699 | gfs2_glock_put(gl); | 699 | gfs2_glock_put(gl); |
700 | } | 700 | } |
701 | 701 | ||
702 | gl->gl_state = new_state; | 702 | gl->gl_state = new_state; |
703 | } | 703 | } |
704 | 704 | ||
705 | /** | 705 | /** |
706 | * xmote_bh - Called after the lock module is done acquiring a lock | 706 | * xmote_bh - Called after the lock module is done acquiring a lock |
707 | * @gl: The glock in question | 707 | * @gl: The glock in question |
708 | * @ret: the int returned from the lock module | 708 | * @ret: the int returned from the lock module |
709 | * | 709 | * |
710 | */ | 710 | */ |
711 | 711 | ||
712 | static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | 712 | static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) |
713 | { | 713 | { |
714 | struct gfs2_sbd *sdp = gl->gl_sbd; | 714 | struct gfs2_sbd *sdp = gl->gl_sbd; |
715 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 715 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
716 | struct gfs2_holder *gh = gl->gl_req_gh; | 716 | struct gfs2_holder *gh = gl->gl_req_gh; |
717 | int prev_state = gl->gl_state; | 717 | int prev_state = gl->gl_state; |
718 | int op_done = 1; | 718 | int op_done = 1; |
719 | 719 | ||
720 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 720 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
721 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | 721 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
722 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); | 722 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); |
723 | 723 | ||
724 | state_change(gl, ret & LM_OUT_ST_MASK); | 724 | state_change(gl, ret & LM_OUT_ST_MASK); |
725 | 725 | ||
726 | if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { | 726 | if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { |
727 | if (glops->go_inval) | 727 | if (glops->go_inval) |
728 | glops->go_inval(gl, DIO_METADATA); | 728 | glops->go_inval(gl, DIO_METADATA); |
729 | } else if (gl->gl_state == LM_ST_DEFERRED) { | 729 | } else if (gl->gl_state == LM_ST_DEFERRED) { |
730 | /* We might not want to do this here. | 730 | /* We might not want to do this here. |
731 | Look at moving to the inode glops. */ | 731 | Look at moving to the inode glops. */ |
732 | if (glops->go_inval) | 732 | if (glops->go_inval) |
733 | glops->go_inval(gl, 0); | 733 | glops->go_inval(gl, 0); |
734 | } | 734 | } |
735 | 735 | ||
736 | /* Deal with each possible exit condition */ | 736 | /* Deal with each possible exit condition */ |
737 | 737 | ||
738 | if (!gh) { | 738 | if (!gh) { |
739 | gl->gl_stamp = jiffies; | 739 | gl->gl_stamp = jiffies; |
740 | if (ret & LM_OUT_CANCELED) | 740 | if (ret & LM_OUT_CANCELED) |
741 | op_done = 0; | 741 | op_done = 0; |
742 | else | 742 | else |
743 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | 743 | clear_bit(GLF_DEMOTE, &gl->gl_flags); |
744 | } else { | 744 | } else { |
745 | spin_lock(&gl->gl_spin); | 745 | spin_lock(&gl->gl_spin); |
746 | list_del_init(&gh->gh_list); | 746 | list_del_init(&gh->gh_list); |
747 | gh->gh_error = -EIO; | 747 | gh->gh_error = -EIO; |
748 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 748 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
749 | goto out; | 749 | goto out; |
750 | gh->gh_error = GLR_CANCELED; | 750 | gh->gh_error = GLR_CANCELED; |
751 | if (ret & LM_OUT_CANCELED) | 751 | if (ret & LM_OUT_CANCELED) |
752 | goto out; | 752 | goto out; |
753 | if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 753 | if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { |
754 | list_add_tail(&gh->gh_list, &gl->gl_holders); | 754 | list_add_tail(&gh->gh_list, &gl->gl_holders); |
755 | gh->gh_error = 0; | 755 | gh->gh_error = 0; |
756 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 756 | set_bit(HIF_HOLDER, &gh->gh_iflags); |
757 | set_bit(HIF_FIRST, &gh->gh_iflags); | 757 | set_bit(HIF_FIRST, &gh->gh_iflags); |
758 | op_done = 0; | 758 | op_done = 0; |
759 | goto out; | 759 | goto out; |
760 | } | 760 | } |
761 | gh->gh_error = GLR_TRYFAILED; | 761 | gh->gh_error = GLR_TRYFAILED; |
762 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | 762 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) |
763 | goto out; | 763 | goto out; |
764 | gh->gh_error = -EINVAL; | 764 | gh->gh_error = -EINVAL; |
765 | if (gfs2_assert_withdraw(sdp, 0) == -1) | 765 | if (gfs2_assert_withdraw(sdp, 0) == -1) |
766 | fs_err(sdp, "ret = 0x%.8X\n", ret); | 766 | fs_err(sdp, "ret = 0x%.8X\n", ret); |
767 | out: | 767 | out: |
768 | spin_unlock(&gl->gl_spin); | 768 | spin_unlock(&gl->gl_spin); |
769 | } | 769 | } |
770 | 770 | ||
771 | if (glops->go_xmote_bh) | 771 | if (glops->go_xmote_bh) |
772 | glops->go_xmote_bh(gl); | 772 | glops->go_xmote_bh(gl); |
773 | 773 | ||
774 | if (op_done) { | 774 | if (op_done) { |
775 | spin_lock(&gl->gl_spin); | 775 | spin_lock(&gl->gl_spin); |
776 | gl->gl_req_gh = NULL; | 776 | gl->gl_req_gh = NULL; |
777 | gl->gl_req_bh = NULL; | 777 | gl->gl_req_bh = NULL; |
778 | clear_bit(GLF_LOCK, &gl->gl_flags); | 778 | clear_bit(GLF_LOCK, &gl->gl_flags); |
779 | run_queue(gl); | 779 | run_queue(gl); |
780 | spin_unlock(&gl->gl_spin); | 780 | spin_unlock(&gl->gl_spin); |
781 | } | 781 | } |
782 | 782 | ||
783 | gfs2_glock_put(gl); | 783 | gfs2_glock_put(gl); |
784 | 784 | ||
785 | if (gh) | 785 | if (gh) |
786 | gfs2_holder_wake(gh); | 786 | gfs2_holder_wake(gh); |
787 | } | 787 | } |
788 | 788 | ||
789 | /** | 789 | /** |
790 | * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock | 790 | * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock |
791 | * @gl: The glock in question | 791 | * @gl: The glock in question |
792 | * @state: the requested state | 792 | * @state: the requested state |
793 | * @flags: modifier flags to the lock call | 793 | * @flags: modifier flags to the lock call |
794 | * | 794 | * |
795 | */ | 795 | */ |
796 | 796 | ||
797 | void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) | 797 | void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) |
798 | { | 798 | { |
799 | struct gfs2_sbd *sdp = gl->gl_sbd; | 799 | struct gfs2_sbd *sdp = gl->gl_sbd; |
800 | int flags = gh ? gh->gh_flags : 0; | 800 | int flags = gh ? gh->gh_flags : 0; |
801 | unsigned state = gh ? gh->gh_state : gl->gl_demote_state; | 801 | unsigned state = gh ? gh->gh_state : gl->gl_demote_state; |
802 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 802 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
803 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | | 803 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | |
804 | LM_FLAG_NOEXP | LM_FLAG_ANY | | 804 | LM_FLAG_NOEXP | LM_FLAG_ANY | |
805 | LM_FLAG_PRIORITY); | 805 | LM_FLAG_PRIORITY); |
806 | unsigned int lck_ret; | 806 | unsigned int lck_ret; |
807 | 807 | ||
808 | if (glops->go_xmote_th) | 808 | if (glops->go_xmote_th) |
809 | glops->go_xmote_th(gl); | 809 | glops->go_xmote_th(gl); |
810 | 810 | ||
811 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 811 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
812 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | 812 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
813 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); | 813 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); |
814 | gfs2_assert_warn(sdp, state != gl->gl_state); | 814 | gfs2_assert_warn(sdp, state != gl->gl_state); |
815 | 815 | ||
816 | gfs2_glock_hold(gl); | 816 | gfs2_glock_hold(gl); |
817 | gl->gl_req_bh = xmote_bh; | 817 | gl->gl_req_bh = xmote_bh; |
818 | 818 | ||
819 | lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); | 819 | lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); |
820 | 820 | ||
821 | if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) | 821 | if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) |
822 | return; | 822 | return; |
823 | 823 | ||
824 | if (lck_ret & LM_OUT_ASYNC) | 824 | if (lck_ret & LM_OUT_ASYNC) |
825 | gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); | 825 | gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); |
826 | else | 826 | else |
827 | xmote_bh(gl, lck_ret); | 827 | xmote_bh(gl, lck_ret); |
828 | } | 828 | } |
829 | 829 | ||
830 | /** | 830 | /** |
831 | * drop_bh - Called after a lock module unlock completes | 831 | * drop_bh - Called after a lock module unlock completes |
832 | * @gl: the glock | 832 | * @gl: the glock |
833 | * @ret: the return status | 833 | * @ret: the return status |
834 | * | 834 | * |
835 | * Doesn't wake up the process waiting on the struct gfs2_holder (if any) | 835 | * Doesn't wake up the process waiting on the struct gfs2_holder (if any) |
836 | * Doesn't drop the reference on the glock the top half took out | 836 | * Doesn't drop the reference on the glock the top half took out |
837 | * | 837 | * |
838 | */ | 838 | */ |
839 | 839 | ||
840 | static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | 840 | static void drop_bh(struct gfs2_glock *gl, unsigned int ret) |
841 | { | 841 | { |
842 | struct gfs2_sbd *sdp = gl->gl_sbd; | 842 | struct gfs2_sbd *sdp = gl->gl_sbd; |
843 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 843 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
844 | struct gfs2_holder *gh = gl->gl_req_gh; | 844 | struct gfs2_holder *gh = gl->gl_req_gh; |
845 | 845 | ||
846 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 846 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
847 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | 847 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
848 | gfs2_assert_warn(sdp, !ret); | 848 | gfs2_assert_warn(sdp, !ret); |
849 | 849 | ||
850 | state_change(gl, LM_ST_UNLOCKED); | 850 | state_change(gl, LM_ST_UNLOCKED); |
851 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | 851 | clear_bit(GLF_DEMOTE, &gl->gl_flags); |
852 | 852 | ||
853 | if (glops->go_inval) | 853 | if (glops->go_inval) |
854 | glops->go_inval(gl, DIO_METADATA); | 854 | glops->go_inval(gl, DIO_METADATA); |
855 | 855 | ||
856 | if (gh) { | 856 | if (gh) { |
857 | spin_lock(&gl->gl_spin); | 857 | spin_lock(&gl->gl_spin); |
858 | list_del_init(&gh->gh_list); | 858 | list_del_init(&gh->gh_list); |
859 | gh->gh_error = 0; | 859 | gh->gh_error = 0; |
860 | spin_unlock(&gl->gl_spin); | 860 | spin_unlock(&gl->gl_spin); |
861 | } | 861 | } |
862 | 862 | ||
863 | spin_lock(&gl->gl_spin); | 863 | spin_lock(&gl->gl_spin); |
864 | gl->gl_req_gh = NULL; | 864 | gl->gl_req_gh = NULL; |
865 | gl->gl_req_bh = NULL; | 865 | gl->gl_req_bh = NULL; |
866 | clear_bit(GLF_LOCK, &gl->gl_flags); | 866 | clear_bit(GLF_LOCK, &gl->gl_flags); |
867 | run_queue(gl); | 867 | run_queue(gl); |
868 | spin_unlock(&gl->gl_spin); | 868 | spin_unlock(&gl->gl_spin); |
869 | 869 | ||
870 | gfs2_glock_put(gl); | 870 | gfs2_glock_put(gl); |
871 | 871 | ||
872 | if (gh) | 872 | if (gh) |
873 | gfs2_holder_wake(gh); | 873 | gfs2_holder_wake(gh); |
874 | } | 874 | } |
875 | 875 | ||
876 | /** | 876 | /** |
877 | * gfs2_glock_drop_th - call into the lock module to unlock a lock | 877 | * gfs2_glock_drop_th - call into the lock module to unlock a lock |
878 | * @gl: the glock | 878 | * @gl: the glock |
879 | * | 879 | * |
880 | */ | 880 | */ |
881 | 881 | ||
882 | static void gfs2_glock_drop_th(struct gfs2_glock *gl) | 882 | static void gfs2_glock_drop_th(struct gfs2_glock *gl) |
883 | { | 883 | { |
884 | struct gfs2_sbd *sdp = gl->gl_sbd; | 884 | struct gfs2_sbd *sdp = gl->gl_sbd; |
885 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 885 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
886 | unsigned int ret; | 886 | unsigned int ret; |
887 | 887 | ||
888 | if (glops->go_drop_th) | 888 | if (glops->go_drop_th) |
889 | glops->go_drop_th(gl); | 889 | glops->go_drop_th(gl); |
890 | 890 | ||
891 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 891 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
892 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | 892 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
893 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); | 893 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); |
894 | 894 | ||
895 | gfs2_glock_hold(gl); | 895 | gfs2_glock_hold(gl); |
896 | gl->gl_req_bh = drop_bh; | 896 | gl->gl_req_bh = drop_bh; |
897 | 897 | ||
898 | ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); | 898 | ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); |
899 | 899 | ||
900 | if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) | 900 | if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) |
901 | return; | 901 | return; |
902 | 902 | ||
903 | if (!ret) | 903 | if (!ret) |
904 | drop_bh(gl, ret); | 904 | drop_bh(gl, ret); |
905 | else | 905 | else |
906 | gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); | 906 | gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); |
907 | } | 907 | } |
908 | 908 | ||
909 | /** | 909 | /** |
910 | * do_cancels - cancel requests for locks stuck waiting on an expire flag | 910 | * do_cancels - cancel requests for locks stuck waiting on an expire flag |
911 | * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock | 911 | * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock |
912 | * | 912 | * |
913 | * Don't cancel GL_NOCANCEL requests. | 913 | * Don't cancel GL_NOCANCEL requests. |
914 | */ | 914 | */ |
915 | 915 | ||
916 | static void do_cancels(struct gfs2_holder *gh) | 916 | static void do_cancels(struct gfs2_holder *gh) |
917 | { | 917 | { |
918 | struct gfs2_glock *gl = gh->gh_gl; | 918 | struct gfs2_glock *gl = gh->gh_gl; |
919 | 919 | ||
920 | spin_lock(&gl->gl_spin); | 920 | spin_lock(&gl->gl_spin); |
921 | 921 | ||
922 | while (gl->gl_req_gh != gh && | 922 | while (gl->gl_req_gh != gh && |
923 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | 923 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && |
924 | !list_empty(&gh->gh_list)) { | 924 | !list_empty(&gh->gh_list)) { |
925 | if (gl->gl_req_bh && !(gl->gl_req_gh && | 925 | if (gl->gl_req_bh && !(gl->gl_req_gh && |
926 | (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { | 926 | (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { |
927 | spin_unlock(&gl->gl_spin); | 927 | spin_unlock(&gl->gl_spin); |
928 | gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); | 928 | gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); |
929 | msleep(100); | 929 | msleep(100); |
930 | spin_lock(&gl->gl_spin); | 930 | spin_lock(&gl->gl_spin); |
931 | } else { | 931 | } else { |
932 | spin_unlock(&gl->gl_spin); | 932 | spin_unlock(&gl->gl_spin); |
933 | msleep(100); | 933 | msleep(100); |
934 | spin_lock(&gl->gl_spin); | 934 | spin_lock(&gl->gl_spin); |
935 | } | 935 | } |
936 | } | 936 | } |
937 | 937 | ||
938 | spin_unlock(&gl->gl_spin); | 938 | spin_unlock(&gl->gl_spin); |
939 | } | 939 | } |
940 | 940 | ||
941 | /** | 941 | /** |
942 | * glock_wait_internal - wait on a glock acquisition | 942 | * glock_wait_internal - wait on a glock acquisition |
943 | * @gh: the glock holder | 943 | * @gh: the glock holder |
944 | * | 944 | * |
945 | * Returns: 0 on success | 945 | * Returns: 0 on success |
946 | */ | 946 | */ |
947 | 947 | ||
948 | static int glock_wait_internal(struct gfs2_holder *gh) | 948 | static int glock_wait_internal(struct gfs2_holder *gh) |
949 | { | 949 | { |
950 | struct gfs2_glock *gl = gh->gh_gl; | 950 | struct gfs2_glock *gl = gh->gh_gl; |
951 | struct gfs2_sbd *sdp = gl->gl_sbd; | 951 | struct gfs2_sbd *sdp = gl->gl_sbd; |
952 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 952 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
953 | 953 | ||
954 | if (test_bit(HIF_ABORTED, &gh->gh_iflags)) | 954 | if (test_bit(HIF_ABORTED, &gh->gh_iflags)) |
955 | return -EIO; | 955 | return -EIO; |
956 | 956 | ||
957 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | 957 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { |
958 | spin_lock(&gl->gl_spin); | 958 | spin_lock(&gl->gl_spin); |
959 | if (gl->gl_req_gh != gh && | 959 | if (gl->gl_req_gh != gh && |
960 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | 960 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && |
961 | !list_empty(&gh->gh_list)) { | 961 | !list_empty(&gh->gh_list)) { |
962 | list_del_init(&gh->gh_list); | 962 | list_del_init(&gh->gh_list); |
963 | gh->gh_error = GLR_TRYFAILED; | 963 | gh->gh_error = GLR_TRYFAILED; |
964 | run_queue(gl); | 964 | run_queue(gl); |
965 | spin_unlock(&gl->gl_spin); | 965 | spin_unlock(&gl->gl_spin); |
966 | return gh->gh_error; | 966 | return gh->gh_error; |
967 | } | 967 | } |
968 | spin_unlock(&gl->gl_spin); | 968 | spin_unlock(&gl->gl_spin); |
969 | } | 969 | } |
970 | 970 | ||
971 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 971 | if (gh->gh_flags & LM_FLAG_PRIORITY) |
972 | do_cancels(gh); | 972 | do_cancels(gh); |
973 | 973 | ||
974 | wait_on_holder(gh); | 974 | wait_on_holder(gh); |
975 | if (gh->gh_error) | 975 | if (gh->gh_error) |
976 | return gh->gh_error; | 976 | return gh->gh_error; |
977 | 977 | ||
978 | gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); | 978 | gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); |
979 | gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, | 979 | gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, |
980 | gh->gh_flags)); | 980 | gh->gh_flags)); |
981 | 981 | ||
982 | if (test_bit(HIF_FIRST, &gh->gh_iflags)) { | 982 | if (test_bit(HIF_FIRST, &gh->gh_iflags)) { |
983 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 983 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
984 | 984 | ||
985 | if (glops->go_lock) { | 985 | if (glops->go_lock) { |
986 | gh->gh_error = glops->go_lock(gh); | 986 | gh->gh_error = glops->go_lock(gh); |
987 | if (gh->gh_error) { | 987 | if (gh->gh_error) { |
988 | spin_lock(&gl->gl_spin); | 988 | spin_lock(&gl->gl_spin); |
989 | list_del_init(&gh->gh_list); | 989 | list_del_init(&gh->gh_list); |
990 | spin_unlock(&gl->gl_spin); | 990 | spin_unlock(&gl->gl_spin); |
991 | } | 991 | } |
992 | } | 992 | } |
993 | 993 | ||
994 | spin_lock(&gl->gl_spin); | 994 | spin_lock(&gl->gl_spin); |
995 | gl->gl_req_gh = NULL; | 995 | gl->gl_req_gh = NULL; |
996 | gl->gl_req_bh = NULL; | 996 | gl->gl_req_bh = NULL; |
997 | clear_bit(GLF_LOCK, &gl->gl_flags); | 997 | clear_bit(GLF_LOCK, &gl->gl_flags); |
998 | run_queue(gl); | 998 | run_queue(gl); |
999 | spin_unlock(&gl->gl_spin); | 999 | spin_unlock(&gl->gl_spin); |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | return gh->gh_error; | 1002 | return gh->gh_error; |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | static inline struct gfs2_holder * | 1005 | static inline struct gfs2_holder * |
1006 | find_holder_by_owner(struct list_head *head, pid_t pid) | 1006 | find_holder_by_owner(struct list_head *head, pid_t pid) |
1007 | { | 1007 | { |
1008 | struct gfs2_holder *gh; | 1008 | struct gfs2_holder *gh; |
1009 | 1009 | ||
1010 | list_for_each_entry(gh, head, gh_list) { | 1010 | list_for_each_entry(gh, head, gh_list) { |
1011 | if (gh->gh_owner_pid == pid) | 1011 | if (gh->gh_owner_pid == pid) |
1012 | return gh; | 1012 | return gh; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | return NULL; | 1015 | return NULL; |
1016 | } | 1016 | } |
1017 | 1017 | ||
1018 | static void print_dbg(struct glock_iter *gi, const char *fmt, ...) | 1018 | static void print_dbg(struct glock_iter *gi, const char *fmt, ...) |
1019 | { | 1019 | { |
1020 | va_list args; | 1020 | va_list args; |
1021 | 1021 | ||
1022 | va_start(args, fmt); | 1022 | va_start(args, fmt); |
1023 | if (gi) { | 1023 | if (gi) { |
1024 | vsprintf(gi->string, fmt, args); | 1024 | vsprintf(gi->string, fmt, args); |
1025 | seq_printf(gi->seq, gi->string); | 1025 | seq_printf(gi->seq, gi->string); |
1026 | } | 1026 | } |
1027 | else | 1027 | else |
1028 | vprintk(fmt, args); | 1028 | vprintk(fmt, args); |
1029 | va_end(args); | 1029 | va_end(args); |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | /** | 1032 | /** |
1033 | * add_to_queue - Add a holder to the wait queue (but look for recursion) | 1033 | * add_to_queue - Add a holder to the wait queue (but look for recursion) |
1034 | * @gh: the holder structure to add | 1034 | * @gh: the holder structure to add |
1035 | * | 1035 | * |
1036 | */ | 1036 | */ |
1037 | 1037 | ||
1038 | static void add_to_queue(struct gfs2_holder *gh) | 1038 | static void add_to_queue(struct gfs2_holder *gh) |
1039 | { | 1039 | { |
1040 | struct gfs2_glock *gl = gh->gh_gl; | 1040 | struct gfs2_glock *gl = gh->gh_gl; |
1041 | struct gfs2_holder *existing; | 1041 | struct gfs2_holder *existing; |
1042 | 1042 | ||
1043 | BUG_ON(!gh->gh_owner_pid); | 1043 | BUG_ON(!gh->gh_owner_pid); |
1044 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) | 1044 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) |
1045 | BUG(); | 1045 | BUG(); |
1046 | 1046 | ||
1047 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid); | 1047 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid); |
1048 | if (existing) { | 1048 | if (existing) { |
1049 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); | 1049 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
1050 | printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); | 1050 | printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); |
1051 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 1051 | printk(KERN_INFO "lock type : %d lock state : %d\n", |
1052 | existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); | 1052 | existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); |
1053 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 1053 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
1054 | printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); | 1054 | printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); |
1055 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 1055 | printk(KERN_INFO "lock type : %d lock state : %d\n", |
1056 | gl->gl_name.ln_type, gl->gl_state); | 1056 | gl->gl_name.ln_type, gl->gl_state); |
1057 | BUG(); | 1057 | BUG(); |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid); | 1060 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid); |
1061 | if (existing) { | 1061 | if (existing) { |
1062 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); | 1062 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
1063 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 1063 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
1064 | BUG(); | 1064 | BUG(); |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 1067 | if (gh->gh_flags & LM_FLAG_PRIORITY) |
1068 | list_add(&gh->gh_list, &gl->gl_waiters3); | 1068 | list_add(&gh->gh_list, &gl->gl_waiters3); |
1069 | else | 1069 | else |
1070 | list_add_tail(&gh->gh_list, &gl->gl_waiters3); | 1070 | list_add_tail(&gh->gh_list, &gl->gl_waiters3); |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | /** | 1073 | /** |
1074 | * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) | 1074 | * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) |
1075 | * @gh: the holder structure | 1075 | * @gh: the holder structure |
1076 | * | 1076 | * |
1077 | * if (gh->gh_flags & GL_ASYNC), this never returns an error | 1077 | * if (gh->gh_flags & GL_ASYNC), this never returns an error |
1078 | * | 1078 | * |
1079 | * Returns: 0, GLR_TRYFAILED, or errno on failure | 1079 | * Returns: 0, GLR_TRYFAILED, or errno on failure |
1080 | */ | 1080 | */ |
1081 | 1081 | ||
1082 | int gfs2_glock_nq(struct gfs2_holder *gh) | 1082 | int gfs2_glock_nq(struct gfs2_holder *gh) |
1083 | { | 1083 | { |
1084 | struct gfs2_glock *gl = gh->gh_gl; | 1084 | struct gfs2_glock *gl = gh->gh_gl; |
1085 | struct gfs2_sbd *sdp = gl->gl_sbd; | 1085 | struct gfs2_sbd *sdp = gl->gl_sbd; |
1086 | int error = 0; | 1086 | int error = 0; |
1087 | 1087 | ||
1088 | restart: | 1088 | restart: |
1089 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { | 1089 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { |
1090 | set_bit(HIF_ABORTED, &gh->gh_iflags); | 1090 | set_bit(HIF_ABORTED, &gh->gh_iflags); |
1091 | return -EIO; | 1091 | return -EIO; |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | set_bit(HIF_PROMOTE, &gh->gh_iflags); | 1094 | set_bit(HIF_PROMOTE, &gh->gh_iflags); |
1095 | 1095 | ||
1096 | spin_lock(&gl->gl_spin); | 1096 | spin_lock(&gl->gl_spin); |
1097 | add_to_queue(gh); | 1097 | add_to_queue(gh); |
1098 | run_queue(gl); | 1098 | run_queue(gl); |
1099 | spin_unlock(&gl->gl_spin); | 1099 | spin_unlock(&gl->gl_spin); |
1100 | 1100 | ||
1101 | if (!(gh->gh_flags & GL_ASYNC)) { | 1101 | if (!(gh->gh_flags & GL_ASYNC)) { |
1102 | error = glock_wait_internal(gh); | 1102 | error = glock_wait_internal(gh); |
1103 | if (error == GLR_CANCELED) { | 1103 | if (error == GLR_CANCELED) { |
1104 | msleep(100); | 1104 | msleep(100); |
1105 | goto restart; | 1105 | goto restart; |
1106 | } | 1106 | } |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | return error; | 1109 | return error; |
1110 | } | 1110 | } |
1111 | 1111 | ||
1112 | /** | 1112 | /** |
1113 | * gfs2_glock_poll - poll to see if an async request has been completed | 1113 | * gfs2_glock_poll - poll to see if an async request has been completed |
1114 | * @gh: the holder | 1114 | * @gh: the holder |
1115 | * | 1115 | * |
1116 | * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on | 1116 | * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on |
1117 | */ | 1117 | */ |
1118 | 1118 | ||
1119 | int gfs2_glock_poll(struct gfs2_holder *gh) | 1119 | int gfs2_glock_poll(struct gfs2_holder *gh) |
1120 | { | 1120 | { |
1121 | struct gfs2_glock *gl = gh->gh_gl; | 1121 | struct gfs2_glock *gl = gh->gh_gl; |
1122 | int ready = 0; | 1122 | int ready = 0; |
1123 | 1123 | ||
1124 | spin_lock(&gl->gl_spin); | 1124 | spin_lock(&gl->gl_spin); |
1125 | 1125 | ||
1126 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | 1126 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) |
1127 | ready = 1; | 1127 | ready = 1; |
1128 | else if (list_empty(&gh->gh_list)) { | 1128 | else if (list_empty(&gh->gh_list)) { |
1129 | if (gh->gh_error == GLR_CANCELED) { | 1129 | if (gh->gh_error == GLR_CANCELED) { |
1130 | spin_unlock(&gl->gl_spin); | 1130 | spin_unlock(&gl->gl_spin); |
1131 | msleep(100); | 1131 | msleep(100); |
1132 | if (gfs2_glock_nq(gh)) | 1132 | if (gfs2_glock_nq(gh)) |
1133 | return 1; | 1133 | return 1; |
1134 | return 0; | 1134 | return 0; |
1135 | } else | 1135 | } else |
1136 | ready = 1; | 1136 | ready = 1; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | spin_unlock(&gl->gl_spin); | 1139 | spin_unlock(&gl->gl_spin); |
1140 | 1140 | ||
1141 | return ready; | 1141 | return ready; |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | /** | 1144 | /** |
1145 | * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC | 1145 | * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC |
1146 | * @gh: the holder structure | 1146 | * @gh: the holder structure |
1147 | * | 1147 | * |
1148 | * Returns: 0, GLR_TRYFAILED, or errno on failure | 1148 | * Returns: 0, GLR_TRYFAILED, or errno on failure |
1149 | */ | 1149 | */ |
1150 | 1150 | ||
1151 | int gfs2_glock_wait(struct gfs2_holder *gh) | 1151 | int gfs2_glock_wait(struct gfs2_holder *gh) |
1152 | { | 1152 | { |
1153 | int error; | 1153 | int error; |
1154 | 1154 | ||
1155 | error = glock_wait_internal(gh); | 1155 | error = glock_wait_internal(gh); |
1156 | if (error == GLR_CANCELED) { | 1156 | if (error == GLR_CANCELED) { |
1157 | msleep(100); | 1157 | msleep(100); |
1158 | gh->gh_flags &= ~GL_ASYNC; | 1158 | gh->gh_flags &= ~GL_ASYNC; |
1159 | error = gfs2_glock_nq(gh); | 1159 | error = gfs2_glock_nq(gh); |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | return error; | 1162 | return error; |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | /** | 1165 | /** |
1166 | * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) | 1166 | * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) |
1167 | * @gh: the glock holder | 1167 | * @gh: the glock holder |
1168 | * | 1168 | * |
1169 | */ | 1169 | */ |
1170 | 1170 | ||
1171 | void gfs2_glock_dq(struct gfs2_holder *gh) | 1171 | void gfs2_glock_dq(struct gfs2_holder *gh) |
1172 | { | 1172 | { |
1173 | struct gfs2_glock *gl = gh->gh_gl; | 1173 | struct gfs2_glock *gl = gh->gh_gl; |
1174 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1174 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
1175 | 1175 | ||
1176 | if (gh->gh_flags & GL_NOCACHE) | 1176 | if (gh->gh_flags & GL_NOCACHE) |
1177 | handle_callback(gl, LM_ST_UNLOCKED); | 1177 | handle_callback(gl, LM_ST_UNLOCKED); |
1178 | 1178 | ||
1179 | gfs2_glmutex_lock(gl); | 1179 | gfs2_glmutex_lock(gl); |
1180 | 1180 | ||
1181 | spin_lock(&gl->gl_spin); | 1181 | spin_lock(&gl->gl_spin); |
1182 | list_del_init(&gh->gh_list); | 1182 | list_del_init(&gh->gh_list); |
1183 | 1183 | ||
1184 | if (list_empty(&gl->gl_holders)) { | 1184 | if (list_empty(&gl->gl_holders)) { |
1185 | spin_unlock(&gl->gl_spin); | 1185 | spin_unlock(&gl->gl_spin); |
1186 | 1186 | ||
1187 | if (glops->go_unlock) | 1187 | if (glops->go_unlock) |
1188 | glops->go_unlock(gh); | 1188 | glops->go_unlock(gh); |
1189 | 1189 | ||
1190 | spin_lock(&gl->gl_spin); | 1190 | spin_lock(&gl->gl_spin); |
1191 | gl->gl_stamp = jiffies; | 1191 | gl->gl_stamp = jiffies; |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | clear_bit(GLF_LOCK, &gl->gl_flags); | 1194 | clear_bit(GLF_LOCK, &gl->gl_flags); |
1195 | run_queue(gl); | 1195 | run_queue(gl); |
1196 | spin_unlock(&gl->gl_spin); | 1196 | spin_unlock(&gl->gl_spin); |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | /** | 1199 | /** |
1200 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it | 1200 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it |
1201 | * @gh: the holder structure | 1201 | * @gh: the holder structure |
1202 | * | 1202 | * |
1203 | */ | 1203 | */ |
1204 | 1204 | ||
1205 | void gfs2_glock_dq_uninit(struct gfs2_holder *gh) | 1205 | void gfs2_glock_dq_uninit(struct gfs2_holder *gh) |
1206 | { | 1206 | { |
1207 | gfs2_glock_dq(gh); | 1207 | gfs2_glock_dq(gh); |
1208 | gfs2_holder_uninit(gh); | 1208 | gfs2_holder_uninit(gh); |
1209 | } | 1209 | } |
1210 | 1210 | ||
1211 | /** | 1211 | /** |
1212 | * gfs2_glock_nq_num - acquire a glock based on lock number | 1212 | * gfs2_glock_nq_num - acquire a glock based on lock number |
1213 | * @sdp: the filesystem | 1213 | * @sdp: the filesystem |
1214 | * @number: the lock number | 1214 | * @number: the lock number |
1215 | * @glops: the glock operations for the type of glock | 1215 | * @glops: the glock operations for the type of glock |
1216 | * @state: the state to acquire the glock in | 1216 | * @state: the state to acquire the glock in |
1217 | * @flags: modifier flags for the aquisition | 1217 | * @flags: modifier flags for the aquisition |
1218 | * @gh: the struct gfs2_holder | 1218 | * @gh: the struct gfs2_holder |
1219 | * | 1219 | * |
1220 | * Returns: errno | 1220 | * Returns: errno |
1221 | */ | 1221 | */ |
1222 | 1222 | ||
1223 | int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, | 1223 | int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, |
1224 | const struct gfs2_glock_operations *glops, | 1224 | const struct gfs2_glock_operations *glops, |
1225 | unsigned int state, int flags, struct gfs2_holder *gh) | 1225 | unsigned int state, int flags, struct gfs2_holder *gh) |
1226 | { | 1226 | { |
1227 | struct gfs2_glock *gl; | 1227 | struct gfs2_glock *gl; |
1228 | int error; | 1228 | int error; |
1229 | 1229 | ||
1230 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); | 1230 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); |
1231 | if (!error) { | 1231 | if (!error) { |
1232 | error = gfs2_glock_nq_init(gl, state, flags, gh); | 1232 | error = gfs2_glock_nq_init(gl, state, flags, gh); |
1233 | gfs2_glock_put(gl); | 1233 | gfs2_glock_put(gl); |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | return error; | 1236 | return error; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | /** | 1239 | /** |
1240 | * glock_compare - Compare two struct gfs2_glock structures for sorting | 1240 | * glock_compare - Compare two struct gfs2_glock structures for sorting |
1241 | * @arg_a: the first structure | 1241 | * @arg_a: the first structure |
1242 | * @arg_b: the second structure | 1242 | * @arg_b: the second structure |
1243 | * | 1243 | * |
1244 | */ | 1244 | */ |
1245 | 1245 | ||
1246 | static int glock_compare(const void *arg_a, const void *arg_b) | 1246 | static int glock_compare(const void *arg_a, const void *arg_b) |
1247 | { | 1247 | { |
1248 | const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; | 1248 | const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; |
1249 | const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; | 1249 | const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; |
1250 | const struct lm_lockname *a = &gh_a->gh_gl->gl_name; | 1250 | const struct lm_lockname *a = &gh_a->gh_gl->gl_name; |
1251 | const struct lm_lockname *b = &gh_b->gh_gl->gl_name; | 1251 | const struct lm_lockname *b = &gh_b->gh_gl->gl_name; |
1252 | 1252 | ||
1253 | if (a->ln_number > b->ln_number) | 1253 | if (a->ln_number > b->ln_number) |
1254 | return 1; | 1254 | return 1; |
1255 | if (a->ln_number < b->ln_number) | 1255 | if (a->ln_number < b->ln_number) |
1256 | return -1; | 1256 | return -1; |
1257 | BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); | 1257 | BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); |
1258 | return 0; | 1258 | return 0; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | /** | 1261 | /** |
1262 | * nq_m_sync - synchonously acquire more than one glock in deadlock free order | 1262 | * nq_m_sync - synchonously acquire more than one glock in deadlock free order |
1263 | * @num_gh: the number of structures | 1263 | * @num_gh: the number of structures |
1264 | * @ghs: an array of struct gfs2_holder structures | 1264 | * @ghs: an array of struct gfs2_holder structures |
1265 | * | 1265 | * |
1266 | * Returns: 0 on success (all glocks acquired), | 1266 | * Returns: 0 on success (all glocks acquired), |
1267 | * errno on failure (no glocks acquired) | 1267 | * errno on failure (no glocks acquired) |
1268 | */ | 1268 | */ |
1269 | 1269 | ||
1270 | static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, | 1270 | static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, |
1271 | struct gfs2_holder **p) | 1271 | struct gfs2_holder **p) |
1272 | { | 1272 | { |
1273 | unsigned int x; | 1273 | unsigned int x; |
1274 | int error = 0; | 1274 | int error = 0; |
1275 | 1275 | ||
1276 | for (x = 0; x < num_gh; x++) | 1276 | for (x = 0; x < num_gh; x++) |
1277 | p[x] = &ghs[x]; | 1277 | p[x] = &ghs[x]; |
1278 | 1278 | ||
1279 | sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); | 1279 | sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); |
1280 | 1280 | ||
1281 | for (x = 0; x < num_gh; x++) { | 1281 | for (x = 0; x < num_gh; x++) { |
1282 | p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | 1282 | p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); |
1283 | 1283 | ||
1284 | error = gfs2_glock_nq(p[x]); | 1284 | error = gfs2_glock_nq(p[x]); |
1285 | if (error) { | 1285 | if (error) { |
1286 | while (x--) | 1286 | while (x--) |
1287 | gfs2_glock_dq(p[x]); | 1287 | gfs2_glock_dq(p[x]); |
1288 | break; | 1288 | break; |
1289 | } | 1289 | } |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | return error; | 1292 | return error; |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | /** | 1295 | /** |
1296 | * gfs2_glock_nq_m - acquire multiple glocks | 1296 | * gfs2_glock_nq_m - acquire multiple glocks |
1297 | * @num_gh: the number of structures | 1297 | * @num_gh: the number of structures |
1298 | * @ghs: an array of struct gfs2_holder structures | 1298 | * @ghs: an array of struct gfs2_holder structures |
1299 | * | 1299 | * |
1300 | * Figure out how big an impact this function has. Either: | 1300 | * Figure out how big an impact this function has. Either: |
1301 | * 1) Replace this code with code that calls gfs2_glock_prefetch() | 1301 | * 1) Replace this code with code that calls gfs2_glock_prefetch() |
1302 | * 2) Forget async stuff and just call nq_m_sync() | 1302 | * 2) Forget async stuff and just call nq_m_sync() |
1303 | * 3) Leave it like it is | 1303 | * 3) Leave it like it is |
1304 | * | 1304 | * |
1305 | * Returns: 0 on success (all glocks acquired), | 1305 | * Returns: 0 on success (all glocks acquired), |
1306 | * errno on failure (no glocks acquired) | 1306 | * errno on failure (no glocks acquired) |
1307 | */ | 1307 | */ |
1308 | 1308 | ||
1309 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) | 1309 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) |
1310 | { | 1310 | { |
1311 | int *e; | 1311 | int *e; |
1312 | unsigned int x; | 1312 | unsigned int x; |
1313 | int borked = 0, serious = 0; | 1313 | int borked = 0, serious = 0; |
1314 | int error = 0; | 1314 | int error = 0; |
1315 | 1315 | ||
1316 | if (!num_gh) | 1316 | if (!num_gh) |
1317 | return 0; | 1317 | return 0; |
1318 | 1318 | ||
1319 | if (num_gh == 1) { | 1319 | if (num_gh == 1) { |
1320 | ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | 1320 | ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); |
1321 | return gfs2_glock_nq(ghs); | 1321 | return gfs2_glock_nq(ghs); |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); | 1324 | e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); |
1325 | if (!e) | 1325 | if (!e) |
1326 | return -ENOMEM; | 1326 | return -ENOMEM; |
1327 | 1327 | ||
1328 | for (x = 0; x < num_gh; x++) { | 1328 | for (x = 0; x < num_gh; x++) { |
1329 | ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC; | 1329 | ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC; |
1330 | error = gfs2_glock_nq(&ghs[x]); | 1330 | error = gfs2_glock_nq(&ghs[x]); |
1331 | if (error) { | 1331 | if (error) { |
1332 | borked = 1; | 1332 | borked = 1; |
1333 | serious = error; | 1333 | serious = error; |
1334 | num_gh = x; | 1334 | num_gh = x; |
1335 | break; | 1335 | break; |
1336 | } | 1336 | } |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | for (x = 0; x < num_gh; x++) { | 1339 | for (x = 0; x < num_gh; x++) { |
1340 | error = e[x] = glock_wait_internal(&ghs[x]); | 1340 | error = e[x] = glock_wait_internal(&ghs[x]); |
1341 | if (error) { | 1341 | if (error) { |
1342 | borked = 1; | 1342 | borked = 1; |
1343 | if (error != GLR_TRYFAILED && error != GLR_CANCELED) | 1343 | if (error != GLR_TRYFAILED && error != GLR_CANCELED) |
1344 | serious = error; | 1344 | serious = error; |
1345 | } | 1345 | } |
1346 | } | 1346 | } |
1347 | 1347 | ||
1348 | if (!borked) { | 1348 | if (!borked) { |
1349 | kfree(e); | 1349 | kfree(e); |
1350 | return 0; | 1350 | return 0; |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | for (x = 0; x < num_gh; x++) | 1353 | for (x = 0; x < num_gh; x++) |
1354 | if (!e[x]) | 1354 | if (!e[x]) |
1355 | gfs2_glock_dq(&ghs[x]); | 1355 | gfs2_glock_dq(&ghs[x]); |
1356 | 1356 | ||
1357 | if (serious) | 1357 | if (serious) |
1358 | error = serious; | 1358 | error = serious; |
1359 | else { | 1359 | else { |
1360 | for (x = 0; x < num_gh; x++) | 1360 | for (x = 0; x < num_gh; x++) |
1361 | gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags, | 1361 | gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags, |
1362 | &ghs[x]); | 1362 | &ghs[x]); |
1363 | error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e); | 1363 | error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e); |
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | kfree(e); | 1366 | kfree(e); |
1367 | 1367 | ||
1368 | return error; | 1368 | return error; |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | /** | 1371 | /** |
1372 | * gfs2_glock_dq_m - release multiple glocks | 1372 | * gfs2_glock_dq_m - release multiple glocks |
1373 | * @num_gh: the number of structures | 1373 | * @num_gh: the number of structures |
1374 | * @ghs: an array of struct gfs2_holder structures | 1374 | * @ghs: an array of struct gfs2_holder structures |
1375 | * | 1375 | * |
1376 | */ | 1376 | */ |
1377 | 1377 | ||
1378 | void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) | 1378 | void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) |
1379 | { | 1379 | { |
1380 | unsigned int x; | 1380 | unsigned int x; |
1381 | 1381 | ||
1382 | for (x = 0; x < num_gh; x++) | 1382 | for (x = 0; x < num_gh; x++) |
1383 | gfs2_glock_dq(&ghs[x]); | 1383 | gfs2_glock_dq(&ghs[x]); |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | /** | 1386 | /** |
1387 | * gfs2_glock_dq_uninit_m - release multiple glocks | 1387 | * gfs2_glock_dq_uninit_m - release multiple glocks |
1388 | * @num_gh: the number of structures | 1388 | * @num_gh: the number of structures |
1389 | * @ghs: an array of struct gfs2_holder structures | 1389 | * @ghs: an array of struct gfs2_holder structures |
1390 | * | 1390 | * |
1391 | */ | 1391 | */ |
1392 | 1392 | ||
1393 | void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) | 1393 | void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) |
1394 | { | 1394 | { |
1395 | unsigned int x; | 1395 | unsigned int x; |
1396 | 1396 | ||
1397 | for (x = 0; x < num_gh; x++) | 1397 | for (x = 0; x < num_gh; x++) |
1398 | gfs2_glock_dq_uninit(&ghs[x]); | 1398 | gfs2_glock_dq_uninit(&ghs[x]); |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | /** | 1401 | /** |
1402 | * gfs2_lvb_hold - attach a LVB from a glock | 1402 | * gfs2_lvb_hold - attach a LVB from a glock |
1403 | * @gl: The glock in question | 1403 | * @gl: The glock in question |
1404 | * | 1404 | * |
1405 | */ | 1405 | */ |
1406 | 1406 | ||
1407 | int gfs2_lvb_hold(struct gfs2_glock *gl) | 1407 | int gfs2_lvb_hold(struct gfs2_glock *gl) |
1408 | { | 1408 | { |
1409 | int error; | 1409 | int error; |
1410 | 1410 | ||
1411 | gfs2_glmutex_lock(gl); | 1411 | gfs2_glmutex_lock(gl); |
1412 | 1412 | ||
1413 | if (!atomic_read(&gl->gl_lvb_count)) { | 1413 | if (!atomic_read(&gl->gl_lvb_count)) { |
1414 | error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); | 1414 | error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); |
1415 | if (error) { | 1415 | if (error) { |
1416 | gfs2_glmutex_unlock(gl); | 1416 | gfs2_glmutex_unlock(gl); |
1417 | return error; | 1417 | return error; |
1418 | } | 1418 | } |
1419 | gfs2_glock_hold(gl); | 1419 | gfs2_glock_hold(gl); |
1420 | } | 1420 | } |
1421 | atomic_inc(&gl->gl_lvb_count); | 1421 | atomic_inc(&gl->gl_lvb_count); |
1422 | 1422 | ||
1423 | gfs2_glmutex_unlock(gl); | 1423 | gfs2_glmutex_unlock(gl); |
1424 | 1424 | ||
1425 | return 0; | 1425 | return 0; |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | /** | 1428 | /** |
1429 | * gfs2_lvb_unhold - detach a LVB from a glock | 1429 | * gfs2_lvb_unhold - detach a LVB from a glock |
1430 | * @gl: The glock in question | 1430 | * @gl: The glock in question |
1431 | * | 1431 | * |
1432 | */ | 1432 | */ |
1433 | 1433 | ||
1434 | void gfs2_lvb_unhold(struct gfs2_glock *gl) | 1434 | void gfs2_lvb_unhold(struct gfs2_glock *gl) |
1435 | { | 1435 | { |
1436 | gfs2_glock_hold(gl); | 1436 | gfs2_glock_hold(gl); |
1437 | gfs2_glmutex_lock(gl); | 1437 | gfs2_glmutex_lock(gl); |
1438 | 1438 | ||
1439 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); | 1439 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); |
1440 | if (atomic_dec_and_test(&gl->gl_lvb_count)) { | 1440 | if (atomic_dec_and_test(&gl->gl_lvb_count)) { |
1441 | gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); | 1441 | gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); |
1442 | gl->gl_lvb = NULL; | 1442 | gl->gl_lvb = NULL; |
1443 | gfs2_glock_put(gl); | 1443 | gfs2_glock_put(gl); |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | gfs2_glmutex_unlock(gl); | 1446 | gfs2_glmutex_unlock(gl); |
1447 | gfs2_glock_put(gl); | 1447 | gfs2_glock_put(gl); |
1448 | } | 1448 | } |
1449 | 1449 | ||
1450 | static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | 1450 | static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, |
1451 | unsigned int state) | 1451 | unsigned int state) |
1452 | { | 1452 | { |
1453 | struct gfs2_glock *gl; | 1453 | struct gfs2_glock *gl; |
1454 | 1454 | ||
1455 | gl = gfs2_glock_find(sdp, name); | 1455 | gl = gfs2_glock_find(sdp, name); |
1456 | if (!gl) | 1456 | if (!gl) |
1457 | return; | 1457 | return; |
1458 | 1458 | ||
1459 | handle_callback(gl, state); | 1459 | handle_callback(gl, state); |
1460 | 1460 | ||
1461 | spin_lock(&gl->gl_spin); | 1461 | spin_lock(&gl->gl_spin); |
1462 | run_queue(gl); | 1462 | run_queue(gl); |
1463 | spin_unlock(&gl->gl_spin); | 1463 | spin_unlock(&gl->gl_spin); |
1464 | 1464 | ||
1465 | gfs2_glock_put(gl); | 1465 | gfs2_glock_put(gl); |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | /** | 1468 | /** |
1469 | * gfs2_glock_cb - Callback used by locking module | 1469 | * gfs2_glock_cb - Callback used by locking module |
1470 | * @sdp: Pointer to the superblock | 1470 | * @sdp: Pointer to the superblock |
1471 | * @type: Type of callback | 1471 | * @type: Type of callback |
1472 | * @data: Type dependent data pointer | 1472 | * @data: Type dependent data pointer |
1473 | * | 1473 | * |
1474 | * Called by the locking module when it wants to tell us something. | 1474 | * Called by the locking module when it wants to tell us something. |
1475 | * Either we need to drop a lock, one of our ASYNC requests completed, or | 1475 | * Either we need to drop a lock, one of our ASYNC requests completed, or |
1476 | * a journal from another client needs to be recovered. | 1476 | * a journal from another client needs to be recovered. |
1477 | */ | 1477 | */ |
1478 | 1478 | ||
1479 | void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | 1479 | void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) |
1480 | { | 1480 | { |
1481 | struct gfs2_sbd *sdp = cb_data; | 1481 | struct gfs2_sbd *sdp = cb_data; |
1482 | 1482 | ||
1483 | switch (type) { | 1483 | switch (type) { |
1484 | case LM_CB_NEED_E: | 1484 | case LM_CB_NEED_E: |
1485 | blocking_cb(sdp, data, LM_ST_UNLOCKED); | 1485 | blocking_cb(sdp, data, LM_ST_UNLOCKED); |
1486 | return; | 1486 | return; |
1487 | 1487 | ||
1488 | case LM_CB_NEED_D: | 1488 | case LM_CB_NEED_D: |
1489 | blocking_cb(sdp, data, LM_ST_DEFERRED); | 1489 | blocking_cb(sdp, data, LM_ST_DEFERRED); |
1490 | return; | 1490 | return; |
1491 | 1491 | ||
1492 | case LM_CB_NEED_S: | 1492 | case LM_CB_NEED_S: |
1493 | blocking_cb(sdp, data, LM_ST_SHARED); | 1493 | blocking_cb(sdp, data, LM_ST_SHARED); |
1494 | return; | 1494 | return; |
1495 | 1495 | ||
1496 | case LM_CB_ASYNC: { | 1496 | case LM_CB_ASYNC: { |
1497 | struct lm_async_cb *async = data; | 1497 | struct lm_async_cb *async = data; |
1498 | struct gfs2_glock *gl; | 1498 | struct gfs2_glock *gl; |
1499 | 1499 | ||
1500 | down_read(&gfs2_umount_flush_sem); | 1500 | down_read(&gfs2_umount_flush_sem); |
1501 | gl = gfs2_glock_find(sdp, &async->lc_name); | 1501 | gl = gfs2_glock_find(sdp, &async->lc_name); |
1502 | if (gfs2_assert_warn(sdp, gl)) | 1502 | if (gfs2_assert_warn(sdp, gl)) |
1503 | return; | 1503 | return; |
1504 | if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) | 1504 | if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) |
1505 | gl->gl_req_bh(gl, async->lc_ret); | 1505 | gl->gl_req_bh(gl, async->lc_ret); |
1506 | gfs2_glock_put(gl); | 1506 | gfs2_glock_put(gl); |
1507 | up_read(&gfs2_umount_flush_sem); | 1507 | up_read(&gfs2_umount_flush_sem); |
1508 | return; | 1508 | return; |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | case LM_CB_NEED_RECOVERY: | 1511 | case LM_CB_NEED_RECOVERY: |
1512 | gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); | 1512 | gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); |
1513 | if (sdp->sd_recoverd_process) | 1513 | if (sdp->sd_recoverd_process) |
1514 | wake_up_process(sdp->sd_recoverd_process); | 1514 | wake_up_process(sdp->sd_recoverd_process); |
1515 | return; | 1515 | return; |
1516 | 1516 | ||
1517 | case LM_CB_DROPLOCKS: | 1517 | case LM_CB_DROPLOCKS: |
1518 | gfs2_gl_hash_clear(sdp, NO_WAIT); | 1518 | gfs2_gl_hash_clear(sdp, NO_WAIT); |
1519 | gfs2_quota_scan(sdp); | 1519 | gfs2_quota_scan(sdp); |
1520 | return; | 1520 | return; |
1521 | 1521 | ||
1522 | default: | 1522 | default: |
1523 | gfs2_assert_warn(sdp, 0); | 1523 | gfs2_assert_warn(sdp, 0); |
1524 | return; | 1524 | return; |
1525 | } | 1525 | } |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | /** | 1528 | /** |
1529 | * demote_ok - Check to see if it's ok to unlock a glock | 1529 | * demote_ok - Check to see if it's ok to unlock a glock |
1530 | * @gl: the glock | 1530 | * @gl: the glock |
1531 | * | 1531 | * |
1532 | * Returns: 1 if it's ok | 1532 | * Returns: 1 if it's ok |
1533 | */ | 1533 | */ |
1534 | 1534 | ||
1535 | static int demote_ok(struct gfs2_glock *gl) | 1535 | static int demote_ok(struct gfs2_glock *gl) |
1536 | { | 1536 | { |
1537 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1537 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
1538 | int demote = 1; | 1538 | int demote = 1; |
1539 | 1539 | ||
1540 | if (test_bit(GLF_STICKY, &gl->gl_flags)) | 1540 | if (test_bit(GLF_STICKY, &gl->gl_flags)) |
1541 | demote = 0; | 1541 | demote = 0; |
1542 | else if (glops->go_demote_ok) | 1542 | else if (glops->go_demote_ok) |
1543 | demote = glops->go_demote_ok(gl); | 1543 | demote = glops->go_demote_ok(gl); |
1544 | 1544 | ||
1545 | return demote; | 1545 | return demote; |
1546 | } | 1546 | } |
1547 | 1547 | ||
1548 | /** | 1548 | /** |
1549 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | 1549 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list |
1550 | * @gl: the glock | 1550 | * @gl: the glock |
1551 | * | 1551 | * |
1552 | */ | 1552 | */ |
1553 | 1553 | ||
1554 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | 1554 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) |
1555 | { | 1555 | { |
1556 | struct gfs2_sbd *sdp = gl->gl_sbd; | 1556 | struct gfs2_sbd *sdp = gl->gl_sbd; |
1557 | 1557 | ||
1558 | spin_lock(&sdp->sd_reclaim_lock); | 1558 | spin_lock(&sdp->sd_reclaim_lock); |
1559 | if (list_empty(&gl->gl_reclaim)) { | 1559 | if (list_empty(&gl->gl_reclaim)) { |
1560 | gfs2_glock_hold(gl); | 1560 | gfs2_glock_hold(gl); |
1561 | list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); | 1561 | list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); |
1562 | atomic_inc(&sdp->sd_reclaim_count); | 1562 | atomic_inc(&sdp->sd_reclaim_count); |
1563 | } | 1563 | } |
1564 | spin_unlock(&sdp->sd_reclaim_lock); | 1564 | spin_unlock(&sdp->sd_reclaim_lock); |
1565 | 1565 | ||
1566 | wake_up(&sdp->sd_reclaim_wq); | 1566 | wake_up(&sdp->sd_reclaim_wq); |
1567 | } | 1567 | } |
1568 | 1568 | ||
1569 | /** | 1569 | /** |
1570 | * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list | 1570 | * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list |
1571 | * @sdp: the filesystem | 1571 | * @sdp: the filesystem |
1572 | * | 1572 | * |
1573 | * Called from gfs2_glockd() glock reclaim daemon, or when promoting a | 1573 | * Called from gfs2_glockd() glock reclaim daemon, or when promoting a |
1574 | * different glock and we notice that there are a lot of glocks in the | 1574 | * different glock and we notice that there are a lot of glocks in the |
1575 | * reclaim list. | 1575 | * reclaim list. |
1576 | * | 1576 | * |
1577 | */ | 1577 | */ |
1578 | 1578 | ||
1579 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | 1579 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) |
1580 | { | 1580 | { |
1581 | struct gfs2_glock *gl; | 1581 | struct gfs2_glock *gl; |
1582 | 1582 | ||
1583 | spin_lock(&sdp->sd_reclaim_lock); | 1583 | spin_lock(&sdp->sd_reclaim_lock); |
1584 | if (list_empty(&sdp->sd_reclaim_list)) { | 1584 | if (list_empty(&sdp->sd_reclaim_list)) { |
1585 | spin_unlock(&sdp->sd_reclaim_lock); | 1585 | spin_unlock(&sdp->sd_reclaim_lock); |
1586 | return; | 1586 | return; |
1587 | } | 1587 | } |
1588 | gl = list_entry(sdp->sd_reclaim_list.next, | 1588 | gl = list_entry(sdp->sd_reclaim_list.next, |
1589 | struct gfs2_glock, gl_reclaim); | 1589 | struct gfs2_glock, gl_reclaim); |
1590 | list_del_init(&gl->gl_reclaim); | 1590 | list_del_init(&gl->gl_reclaim); |
1591 | spin_unlock(&sdp->sd_reclaim_lock); | 1591 | spin_unlock(&sdp->sd_reclaim_lock); |
1592 | 1592 | ||
1593 | atomic_dec(&sdp->sd_reclaim_count); | 1593 | atomic_dec(&sdp->sd_reclaim_count); |
1594 | atomic_inc(&sdp->sd_reclaimed); | 1594 | atomic_inc(&sdp->sd_reclaimed); |
1595 | 1595 | ||
1596 | if (gfs2_glmutex_trylock(gl)) { | 1596 | if (gfs2_glmutex_trylock(gl)) { |
1597 | if (list_empty(&gl->gl_holders) && | 1597 | if (list_empty(&gl->gl_holders) && |
1598 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1598 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1599 | handle_callback(gl, LM_ST_UNLOCKED); | 1599 | handle_callback(gl, LM_ST_UNLOCKED); |
1600 | gfs2_glmutex_unlock(gl); | 1600 | gfs2_glmutex_unlock(gl); |
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | gfs2_glock_put(gl); | 1603 | gfs2_glock_put(gl); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | /** | 1606 | /** |
1607 | * examine_bucket - Call a function for glock in a hash bucket | 1607 | * examine_bucket - Call a function for glock in a hash bucket |
1608 | * @examiner: the function | 1608 | * @examiner: the function |
1609 | * @sdp: the filesystem | 1609 | * @sdp: the filesystem |
1610 | * @bucket: the bucket | 1610 | * @bucket: the bucket |
1611 | * | 1611 | * |
1612 | * Returns: 1 if the bucket has entries | 1612 | * Returns: 1 if the bucket has entries |
1613 | */ | 1613 | */ |
1614 | 1614 | ||
1615 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | 1615 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, |
1616 | unsigned int hash) | 1616 | unsigned int hash) |
1617 | { | 1617 | { |
1618 | struct gfs2_glock *gl, *prev = NULL; | 1618 | struct gfs2_glock *gl, *prev = NULL; |
1619 | int has_entries = 0; | 1619 | int has_entries = 0; |
1620 | struct hlist_head *head = &gl_hash_table[hash].hb_list; | 1620 | struct hlist_head *head = &gl_hash_table[hash].hb_list; |
1621 | 1621 | ||
1622 | read_lock(gl_lock_addr(hash)); | 1622 | read_lock(gl_lock_addr(hash)); |
1623 | /* Can't use hlist_for_each_entry - don't want prefetch here */ | 1623 | /* Can't use hlist_for_each_entry - don't want prefetch here */ |
1624 | if (hlist_empty(head)) | 1624 | if (hlist_empty(head)) |
1625 | goto out; | 1625 | goto out; |
1626 | gl = list_entry(head->first, struct gfs2_glock, gl_list); | 1626 | gl = list_entry(head->first, struct gfs2_glock, gl_list); |
1627 | while(1) { | 1627 | while(1) { |
1628 | if (gl->gl_sbd == sdp) { | 1628 | if (gl->gl_sbd == sdp) { |
1629 | gfs2_glock_hold(gl); | 1629 | gfs2_glock_hold(gl); |
1630 | read_unlock(gl_lock_addr(hash)); | 1630 | read_unlock(gl_lock_addr(hash)); |
1631 | if (prev) | 1631 | if (prev) |
1632 | gfs2_glock_put(prev); | 1632 | gfs2_glock_put(prev); |
1633 | prev = gl; | 1633 | prev = gl; |
1634 | examiner(gl); | 1634 | examiner(gl); |
1635 | has_entries = 1; | 1635 | has_entries = 1; |
1636 | read_lock(gl_lock_addr(hash)); | 1636 | read_lock(gl_lock_addr(hash)); |
1637 | } | 1637 | } |
1638 | if (gl->gl_list.next == NULL) | 1638 | if (gl->gl_list.next == NULL) |
1639 | break; | 1639 | break; |
1640 | gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); | 1640 | gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); |
1641 | } | 1641 | } |
1642 | out: | 1642 | out: |
1643 | read_unlock(gl_lock_addr(hash)); | 1643 | read_unlock(gl_lock_addr(hash)); |
1644 | if (prev) | 1644 | if (prev) |
1645 | gfs2_glock_put(prev); | 1645 | gfs2_glock_put(prev); |
1646 | return has_entries; | 1646 | return has_entries; |
1647 | } | 1647 | } |
1648 | 1648 | ||
1649 | /** | 1649 | /** |
1650 | * scan_glock - look at a glock and see if we can reclaim it | 1650 | * scan_glock - look at a glock and see if we can reclaim it |
1651 | * @gl: the glock to look at | 1651 | * @gl: the glock to look at |
1652 | * | 1652 | * |
1653 | */ | 1653 | */ |
1654 | 1654 | ||
1655 | static void scan_glock(struct gfs2_glock *gl) | 1655 | static void scan_glock(struct gfs2_glock *gl) |
1656 | { | 1656 | { |
1657 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) | 1657 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) |
1658 | return; | 1658 | return; |
1659 | 1659 | ||
1660 | if (gfs2_glmutex_trylock(gl)) { | 1660 | if (gfs2_glmutex_trylock(gl)) { |
1661 | if (list_empty(&gl->gl_holders) && | 1661 | if (list_empty(&gl->gl_holders) && |
1662 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1662 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1663 | goto out_schedule; | 1663 | goto out_schedule; |
1664 | gfs2_glmutex_unlock(gl); | 1664 | gfs2_glmutex_unlock(gl); |
1665 | } | 1665 | } |
1666 | return; | 1666 | return; |
1667 | 1667 | ||
1668 | out_schedule: | 1668 | out_schedule: |
1669 | gfs2_glmutex_unlock(gl); | 1669 | gfs2_glmutex_unlock(gl); |
1670 | gfs2_glock_schedule_for_reclaim(gl); | 1670 | gfs2_glock_schedule_for_reclaim(gl); |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | /** | 1673 | /** |
1674 | * gfs2_scand_internal - Look for glocks and inodes to toss from memory | 1674 | * gfs2_scand_internal - Look for glocks and inodes to toss from memory |
1675 | * @sdp: the filesystem | 1675 | * @sdp: the filesystem |
1676 | * | 1676 | * |
1677 | */ | 1677 | */ |
1678 | 1678 | ||
1679 | void gfs2_scand_internal(struct gfs2_sbd *sdp) | 1679 | void gfs2_scand_internal(struct gfs2_sbd *sdp) |
1680 | { | 1680 | { |
1681 | unsigned int x; | 1681 | unsigned int x; |
1682 | 1682 | ||
1683 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | 1683 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) |
1684 | examine_bucket(scan_glock, sdp, x); | 1684 | examine_bucket(scan_glock, sdp, x); |
1685 | } | 1685 | } |
1686 | 1686 | ||
1687 | /** | 1687 | /** |
1688 | * clear_glock - look at a glock and see if we can free it from glock cache | 1688 | * clear_glock - look at a glock and see if we can free it from glock cache |
1689 | * @gl: the glock to look at | 1689 | * @gl: the glock to look at |
1690 | * | 1690 | * |
1691 | */ | 1691 | */ |
1692 | 1692 | ||
1693 | static void clear_glock(struct gfs2_glock *gl) | 1693 | static void clear_glock(struct gfs2_glock *gl) |
1694 | { | 1694 | { |
1695 | struct gfs2_sbd *sdp = gl->gl_sbd; | 1695 | struct gfs2_sbd *sdp = gl->gl_sbd; |
1696 | int released; | 1696 | int released; |
1697 | 1697 | ||
1698 | spin_lock(&sdp->sd_reclaim_lock); | 1698 | spin_lock(&sdp->sd_reclaim_lock); |
1699 | if (!list_empty(&gl->gl_reclaim)) { | 1699 | if (!list_empty(&gl->gl_reclaim)) { |
1700 | list_del_init(&gl->gl_reclaim); | 1700 | list_del_init(&gl->gl_reclaim); |
1701 | atomic_dec(&sdp->sd_reclaim_count); | 1701 | atomic_dec(&sdp->sd_reclaim_count); |
1702 | spin_unlock(&sdp->sd_reclaim_lock); | 1702 | spin_unlock(&sdp->sd_reclaim_lock); |
1703 | released = gfs2_glock_put(gl); | 1703 | released = gfs2_glock_put(gl); |
1704 | gfs2_assert(sdp, !released); | 1704 | gfs2_assert(sdp, !released); |
1705 | } else { | 1705 | } else { |
1706 | spin_unlock(&sdp->sd_reclaim_lock); | 1706 | spin_unlock(&sdp->sd_reclaim_lock); |
1707 | } | 1707 | } |
1708 | 1708 | ||
1709 | if (gfs2_glmutex_trylock(gl)) { | 1709 | if (gfs2_glmutex_trylock(gl)) { |
1710 | if (list_empty(&gl->gl_holders) && | 1710 | if (list_empty(&gl->gl_holders) && |
1711 | gl->gl_state != LM_ST_UNLOCKED) | 1711 | gl->gl_state != LM_ST_UNLOCKED) |
1712 | handle_callback(gl, LM_ST_UNLOCKED); | 1712 | handle_callback(gl, LM_ST_UNLOCKED); |
1713 | gfs2_glmutex_unlock(gl); | 1713 | gfs2_glmutex_unlock(gl); |
1714 | } | 1714 | } |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | /** | 1717 | /** |
1718 | * gfs2_gl_hash_clear - Empty out the glock hash table | 1718 | * gfs2_gl_hash_clear - Empty out the glock hash table |
1719 | * @sdp: the filesystem | 1719 | * @sdp: the filesystem |
1720 | * @wait: wait until it's all gone | 1720 | * @wait: wait until it's all gone |
1721 | * | 1721 | * |
1722 | * Called when unmounting the filesystem, or when inter-node lock manager | 1722 | * Called when unmounting the filesystem, or when inter-node lock manager |
1723 | * requests DROPLOCKS because it is running out of capacity. | 1723 | * requests DROPLOCKS because it is running out of capacity. |
1724 | */ | 1724 | */ |
1725 | 1725 | ||
1726 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | 1726 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) |
1727 | { | 1727 | { |
1728 | unsigned long t; | 1728 | unsigned long t; |
1729 | unsigned int x; | 1729 | unsigned int x; |
1730 | int cont; | 1730 | int cont; |
1731 | 1731 | ||
1732 | t = jiffies; | 1732 | t = jiffies; |
1733 | 1733 | ||
1734 | for (;;) { | 1734 | for (;;) { |
1735 | cont = 0; | 1735 | cont = 0; |
1736 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 1736 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { |
1737 | if (examine_bucket(clear_glock, sdp, x)) | 1737 | if (examine_bucket(clear_glock, sdp, x)) |
1738 | cont = 1; | 1738 | cont = 1; |
1739 | } | 1739 | } |
1740 | 1740 | ||
1741 | if (!wait || !cont) | 1741 | if (!wait || !cont) |
1742 | break; | 1742 | break; |
1743 | 1743 | ||
1744 | if (time_after_eq(jiffies, | 1744 | if (time_after_eq(jiffies, |
1745 | t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { | 1745 | t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { |
1746 | fs_warn(sdp, "Unmount seems to be stalled. " | 1746 | fs_warn(sdp, "Unmount seems to be stalled. " |
1747 | "Dumping lock state...\n"); | 1747 | "Dumping lock state...\n"); |
1748 | gfs2_dump_lockstate(sdp); | 1748 | gfs2_dump_lockstate(sdp); |
1749 | t = jiffies; | 1749 | t = jiffies; |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | down_write(&gfs2_umount_flush_sem); | 1752 | down_write(&gfs2_umount_flush_sem); |
1753 | invalidate_inodes(sdp->sd_vfs); | 1753 | invalidate_inodes(sdp->sd_vfs); |
1754 | up_write(&gfs2_umount_flush_sem); | 1754 | up_write(&gfs2_umount_flush_sem); |
1755 | msleep(10); | 1755 | msleep(10); |
1756 | } | 1756 | } |
1757 | } | 1757 | } |
1758 | 1758 | ||
1759 | /* | 1759 | /* |
1760 | * Diagnostic routines to help debug distributed deadlock | 1760 | * Diagnostic routines to help debug distributed deadlock |
1761 | */ | 1761 | */ |
1762 | 1762 | ||
1763 | static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, | 1763 | static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, |
1764 | unsigned long address) | 1764 | unsigned long address) |
1765 | { | 1765 | { |
1766 | char buffer[KSYM_SYMBOL_LEN]; | 1766 | char buffer[KSYM_SYMBOL_LEN]; |
1767 | 1767 | ||
1768 | sprint_symbol(buffer, address); | 1768 | sprint_symbol(buffer, address); |
1769 | print_dbg(gi, fmt, buffer); | 1769 | print_dbg(gi, fmt, buffer); |
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | /** | 1772 | /** |
1773 | * dump_holder - print information about a glock holder | 1773 | * dump_holder - print information about a glock holder |
1774 | * @str: a string naming the type of holder | 1774 | * @str: a string naming the type of holder |
1775 | * @gh: the glock holder | 1775 | * @gh: the glock holder |
1776 | * | 1776 | * |
1777 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1777 | * Returns: 0 on success, -ENOBUFS when we run out of space |
1778 | */ | 1778 | */ |
1779 | 1779 | ||
1780 | static int dump_holder(struct glock_iter *gi, char *str, | 1780 | static int dump_holder(struct glock_iter *gi, char *str, |
1781 | struct gfs2_holder *gh) | 1781 | struct gfs2_holder *gh) |
1782 | { | 1782 | { |
1783 | unsigned int x; | 1783 | unsigned int x; |
1784 | struct task_struct *gh_owner; | 1784 | struct task_struct *gh_owner; |
1785 | 1785 | ||
1786 | print_dbg(gi, " %s\n", str); | 1786 | print_dbg(gi, " %s\n", str); |
1787 | if (gh->gh_owner_pid) { | 1787 | if (gh->gh_owner_pid) { |
1788 | print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid); | 1788 | print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid); |
1789 | gh_owner = find_task_by_pid(gh->gh_owner_pid); | 1789 | gh_owner = find_task_by_pid(gh->gh_owner_pid); |
1790 | if (gh_owner) | 1790 | if (gh_owner) |
1791 | print_dbg(gi, "(%s)\n", gh_owner->comm); | 1791 | print_dbg(gi, "(%s)\n", gh_owner->comm); |
1792 | else | 1792 | else |
1793 | print_dbg(gi, "(ended)\n"); | 1793 | print_dbg(gi, "(ended)\n"); |
1794 | } else | 1794 | } else |
1795 | print_dbg(gi, " owner = -1\n"); | 1795 | print_dbg(gi, " owner = -1\n"); |
1796 | print_dbg(gi, " gh_state = %u\n", gh->gh_state); | 1796 | print_dbg(gi, " gh_state = %u\n", gh->gh_state); |
1797 | print_dbg(gi, " gh_flags ="); | 1797 | print_dbg(gi, " gh_flags ="); |
1798 | for (x = 0; x < 32; x++) | 1798 | for (x = 0; x < 32; x++) |
1799 | if (gh->gh_flags & (1 << x)) | 1799 | if (gh->gh_flags & (1 << x)) |
1800 | print_dbg(gi, " %u", x); | 1800 | print_dbg(gi, " %u", x); |
1801 | print_dbg(gi, " \n"); | 1801 | print_dbg(gi, " \n"); |
1802 | print_dbg(gi, " error = %d\n", gh->gh_error); | 1802 | print_dbg(gi, " error = %d\n", gh->gh_error); |
1803 | print_dbg(gi, " gh_iflags ="); | 1803 | print_dbg(gi, " gh_iflags ="); |
1804 | for (x = 0; x < 32; x++) | 1804 | for (x = 0; x < 32; x++) |
1805 | if (test_bit(x, &gh->gh_iflags)) | 1805 | if (test_bit(x, &gh->gh_iflags)) |
1806 | print_dbg(gi, " %u", x); | 1806 | print_dbg(gi, " %u", x); |
1807 | print_dbg(gi, " \n"); | 1807 | print_dbg(gi, " \n"); |
1808 | gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); | 1808 | gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); |
1809 | 1809 | ||
1810 | return 0; | 1810 | return 0; |
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | /** | 1813 | /** |
1814 | * dump_inode - print information about an inode | 1814 | * dump_inode - print information about an inode |
1815 | * @ip: the inode | 1815 | * @ip: the inode |
1816 | * | 1816 | * |
1817 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1817 | * Returns: 0 on success, -ENOBUFS when we run out of space |
1818 | */ | 1818 | */ |
1819 | 1819 | ||
1820 | static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) | 1820 | static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) |
1821 | { | 1821 | { |
1822 | unsigned int x; | 1822 | unsigned int x; |
1823 | 1823 | ||
1824 | print_dbg(gi, " Inode:\n"); | 1824 | print_dbg(gi, " Inode:\n"); |
1825 | print_dbg(gi, " num = %llu/%llu\n", | 1825 | print_dbg(gi, " num = %llu/%llu\n", |
1826 | ip->i_num.no_formal_ino, ip->i_num.no_addr); | 1826 | (unsigned long long)ip->i_num.no_formal_ino, |
1827 | (unsigned long long)ip->i_num.no_addr); | ||
1827 | print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); | 1828 | print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); |
1828 | print_dbg(gi, " i_flags ="); | 1829 | print_dbg(gi, " i_flags ="); |
1829 | for (x = 0; x < 32; x++) | 1830 | for (x = 0; x < 32; x++) |
1830 | if (test_bit(x, &ip->i_flags)) | 1831 | if (test_bit(x, &ip->i_flags)) |
1831 | print_dbg(gi, " %u", x); | 1832 | print_dbg(gi, " %u", x); |
1832 | print_dbg(gi, " \n"); | 1833 | print_dbg(gi, " \n"); |
1833 | return 0; | 1834 | return 0; |
1834 | } | 1835 | } |
1835 | 1836 | ||
1836 | /** | 1837 | /** |
1837 | * dump_glock - print information about a glock | 1838 | * dump_glock - print information about a glock |
1838 | * @gl: the glock | 1839 | * @gl: the glock |
1839 | * @count: where we are in the buffer | 1840 | * @count: where we are in the buffer |
1840 | * | 1841 | * |
1841 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1842 | * Returns: 0 on success, -ENOBUFS when we run out of space |
1842 | */ | 1843 | */ |
1843 | 1844 | ||
1844 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) | 1845 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) |
1845 | { | 1846 | { |
1846 | struct gfs2_holder *gh; | 1847 | struct gfs2_holder *gh; |
1847 | unsigned int x; | 1848 | unsigned int x; |
1848 | int error = -ENOBUFS; | 1849 | int error = -ENOBUFS; |
1849 | struct task_struct *gl_owner; | 1850 | struct task_struct *gl_owner; |
1850 | 1851 | ||
1851 | spin_lock(&gl->gl_spin); | 1852 | spin_lock(&gl->gl_spin); |
1852 | 1853 | ||
1853 | print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, | 1854 | print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, |
1854 | (unsigned long long)gl->gl_name.ln_number); | 1855 | (unsigned long long)gl->gl_name.ln_number); |
1855 | print_dbg(gi, " gl_flags ="); | 1856 | print_dbg(gi, " gl_flags ="); |
1856 | for (x = 0; x < 32; x++) { | 1857 | for (x = 0; x < 32; x++) { |
1857 | if (test_bit(x, &gl->gl_flags)) | 1858 | if (test_bit(x, &gl->gl_flags)) |
1858 | print_dbg(gi, " %u", x); | 1859 | print_dbg(gi, " %u", x); |
1859 | } | 1860 | } |
1860 | if (!test_bit(GLF_LOCK, &gl->gl_flags)) | 1861 | if (!test_bit(GLF_LOCK, &gl->gl_flags)) |
1861 | print_dbg(gi, " (unlocked)"); | 1862 | print_dbg(gi, " (unlocked)"); |
1862 | print_dbg(gi, " \n"); | 1863 | print_dbg(gi, " \n"); |
1863 | print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); | 1864 | print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); |
1864 | print_dbg(gi, " gl_state = %u\n", gl->gl_state); | 1865 | print_dbg(gi, " gl_state = %u\n", gl->gl_state); |
1865 | if (gl->gl_owner_pid) { | 1866 | if (gl->gl_owner_pid) { |
1866 | gl_owner = find_task_by_pid(gl->gl_owner_pid); | 1867 | gl_owner = find_task_by_pid(gl->gl_owner_pid); |
1867 | if (gl_owner) | 1868 | if (gl_owner) |
1868 | print_dbg(gi, " gl_owner = pid %d (%s)\n", | 1869 | print_dbg(gi, " gl_owner = pid %d (%s)\n", |
1869 | gl->gl_owner_pid, gl_owner->comm); | 1870 | gl->gl_owner_pid, gl_owner->comm); |
1870 | else | 1871 | else |
1871 | print_dbg(gi, " gl_owner = %d (ended)\n", | 1872 | print_dbg(gi, " gl_owner = %d (ended)\n", |
1872 | gl->gl_owner_pid); | 1873 | gl->gl_owner_pid); |
1873 | } else | 1874 | } else |
1874 | print_dbg(gi, " gl_owner = -1\n"); | 1875 | print_dbg(gi, " gl_owner = -1\n"); |
1875 | print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); | 1876 | print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); |
1876 | print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); | 1877 | print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); |
1877 | print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); | 1878 | print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); |
1878 | print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); | 1879 | print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); |
1879 | print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); | 1880 | print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); |
1880 | print_dbg(gi, " le = %s\n", | 1881 | print_dbg(gi, " le = %s\n", |
1881 | (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); | 1882 | (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); |
1882 | print_dbg(gi, " reclaim = %s\n", | 1883 | print_dbg(gi, " reclaim = %s\n", |
1883 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); | 1884 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); |
1884 | if (gl->gl_aspace) | 1885 | if (gl->gl_aspace) |
1885 | print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, | 1886 | print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, |
1886 | gl->gl_aspace->i_mapping->nrpages); | 1887 | gl->gl_aspace->i_mapping->nrpages); |
1887 | else | 1888 | else |
1888 | print_dbg(gi, " aspace = no\n"); | 1889 | print_dbg(gi, " aspace = no\n"); |
1889 | print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); | 1890 | print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); |
1890 | if (gl->gl_req_gh) { | 1891 | if (gl->gl_req_gh) { |
1891 | error = dump_holder(gi, "Request", gl->gl_req_gh); | 1892 | error = dump_holder(gi, "Request", gl->gl_req_gh); |
1892 | if (error) | 1893 | if (error) |
1893 | goto out; | 1894 | goto out; |
1894 | } | 1895 | } |
1895 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 1896 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { |
1896 | error = dump_holder(gi, "Holder", gh); | 1897 | error = dump_holder(gi, "Holder", gh); |
1897 | if (error) | 1898 | if (error) |
1898 | goto out; | 1899 | goto out; |
1899 | } | 1900 | } |
1900 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { | 1901 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { |
1901 | error = dump_holder(gi, "Waiter1", gh); | 1902 | error = dump_holder(gi, "Waiter1", gh); |
1902 | if (error) | 1903 | if (error) |
1903 | goto out; | 1904 | goto out; |
1904 | } | 1905 | } |
1905 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { | 1906 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { |
1906 | error = dump_holder(gi, "Waiter3", gh); | 1907 | error = dump_holder(gi, "Waiter3", gh); |
1907 | if (error) | 1908 | if (error) |
1908 | goto out; | 1909 | goto out; |
1909 | } | 1910 | } |
1910 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | 1911 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { |
1911 | print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", | 1912 | print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", |
1912 | gl->gl_demote_state, | 1913 | gl->gl_demote_state, (unsigned long long) |
1913 | (u64)(jiffies - gl->gl_demote_time)*(1000000/HZ)); | 1914 | (jiffies - gl->gl_demote_time)*(1000000/HZ)); |
1914 | } | 1915 | } |
1915 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { | 1916 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { |
1916 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && | 1917 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && |
1917 | list_empty(&gl->gl_holders)) { | 1918 | list_empty(&gl->gl_holders)) { |
1918 | error = dump_inode(gi, gl->gl_object); | 1919 | error = dump_inode(gi, gl->gl_object); |
1919 | if (error) | 1920 | if (error) |
1920 | goto out; | 1921 | goto out; |
1921 | } else { | 1922 | } else { |
1922 | error = -ENOBUFS; | 1923 | error = -ENOBUFS; |
1923 | print_dbg(gi, " Inode: busy\n"); | 1924 | print_dbg(gi, " Inode: busy\n"); |
1924 | } | 1925 | } |
1925 | } | 1926 | } |
1926 | 1927 | ||
1927 | error = 0; | 1928 | error = 0; |
1928 | 1929 | ||
1929 | out: | 1930 | out: |
1930 | spin_unlock(&gl->gl_spin); | 1931 | spin_unlock(&gl->gl_spin); |
1931 | return error; | 1932 | return error; |
1932 | } | 1933 | } |
1933 | 1934 | ||
1934 | /** | 1935 | /** |
1935 | * gfs2_dump_lockstate - print out the current lockstate | 1936 | * gfs2_dump_lockstate - print out the current lockstate |
1936 | * @sdp: the filesystem | 1937 | * @sdp: the filesystem |
1937 | * @ub: the buffer to copy the information into | 1938 | * @ub: the buffer to copy the information into |
1938 | * | 1939 | * |
1939 | * If @ub is NULL, dump the lockstate to the console. | 1940 | * If @ub is NULL, dump the lockstate to the console. |
1940 | * | 1941 | * |
1941 | */ | 1942 | */ |
1942 | 1943 | ||
1943 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | 1944 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) |
1944 | { | 1945 | { |
1945 | struct gfs2_glock *gl; | 1946 | struct gfs2_glock *gl; |
1946 | struct hlist_node *h; | 1947 | struct hlist_node *h; |
1947 | unsigned int x; | 1948 | unsigned int x; |
1948 | int error = 0; | 1949 | int error = 0; |
1949 | 1950 | ||
1950 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 1951 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { |
1951 | 1952 | ||
1952 | read_lock(gl_lock_addr(x)); | 1953 | read_lock(gl_lock_addr(x)); |
1953 | 1954 | ||
1954 | hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) { | 1955 | hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) { |
1955 | if (gl->gl_sbd != sdp) | 1956 | if (gl->gl_sbd != sdp) |
1956 | continue; | 1957 | continue; |
1957 | 1958 | ||
1958 | error = dump_glock(NULL, gl); | 1959 | error = dump_glock(NULL, gl); |
1959 | if (error) | 1960 | if (error) |
1960 | break; | 1961 | break; |
1961 | } | 1962 | } |
1962 | 1963 | ||
1963 | read_unlock(gl_lock_addr(x)); | 1964 | read_unlock(gl_lock_addr(x)); |
1964 | 1965 | ||
1965 | if (error) | 1966 | if (error) |
1966 | break; | 1967 | break; |
1967 | } | 1968 | } |
1968 | 1969 | ||
1969 | 1970 | ||
1970 | return error; | 1971 | return error; |
1971 | } | 1972 | } |
1972 | 1973 | ||
1973 | int __init gfs2_glock_init(void) | 1974 | int __init gfs2_glock_init(void) |
1974 | { | 1975 | { |
1975 | unsigned i; | 1976 | unsigned i; |
1976 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { | 1977 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { |
1977 | INIT_HLIST_HEAD(&gl_hash_table[i].hb_list); | 1978 | INIT_HLIST_HEAD(&gl_hash_table[i].hb_list); |
1978 | } | 1979 | } |
1979 | #ifdef GL_HASH_LOCK_SZ | 1980 | #ifdef GL_HASH_LOCK_SZ |
1980 | for(i = 0; i < GL_HASH_LOCK_SZ; i++) { | 1981 | for(i = 0; i < GL_HASH_LOCK_SZ; i++) { |
1981 | rwlock_init(&gl_hash_locks[i]); | 1982 | rwlock_init(&gl_hash_locks[i]); |
1982 | } | 1983 | } |
1983 | #endif | 1984 | #endif |
1984 | return 0; | 1985 | return 0; |
1985 | } | 1986 | } |
1986 | 1987 | ||
1987 | static int gfs2_glock_iter_next(struct glock_iter *gi) | 1988 | static int gfs2_glock_iter_next(struct glock_iter *gi) |
1988 | { | 1989 | { |
1989 | read_lock(gl_lock_addr(gi->hash)); | 1990 | read_lock(gl_lock_addr(gi->hash)); |
1990 | while (1) { | 1991 | while (1) { |
1991 | if (!gi->hb_list) { /* If we don't have a hash bucket yet */ | 1992 | if (!gi->hb_list) { /* If we don't have a hash bucket yet */ |
1992 | gi->hb_list = &gl_hash_table[gi->hash].hb_list; | 1993 | gi->hb_list = &gl_hash_table[gi->hash].hb_list; |
1993 | if (hlist_empty(gi->hb_list)) { | 1994 | if (hlist_empty(gi->hb_list)) { |
1994 | read_unlock(gl_lock_addr(gi->hash)); | 1995 | read_unlock(gl_lock_addr(gi->hash)); |
1995 | gi->hash++; | 1996 | gi->hash++; |
1996 | read_lock(gl_lock_addr(gi->hash)); | 1997 | read_lock(gl_lock_addr(gi->hash)); |
1997 | gi->hb_list = NULL; | 1998 | gi->hb_list = NULL; |
1998 | if (gi->hash >= GFS2_GL_HASH_SIZE) { | 1999 | if (gi->hash >= GFS2_GL_HASH_SIZE) { |
1999 | read_unlock(gl_lock_addr(gi->hash)); | 2000 | read_unlock(gl_lock_addr(gi->hash)); |
2000 | return 1; | 2001 | return 1; |
2001 | } | 2002 | } |
2002 | else | 2003 | else |
2003 | continue; | 2004 | continue; |
2004 | } | 2005 | } |
2005 | if (!hlist_empty(gi->hb_list)) { | 2006 | if (!hlist_empty(gi->hb_list)) { |
2006 | gi->gl = list_entry(gi->hb_list->first, | 2007 | gi->gl = list_entry(gi->hb_list->first, |
2007 | struct gfs2_glock, | 2008 | struct gfs2_glock, |
2008 | gl_list); | 2009 | gl_list); |
2009 | } | 2010 | } |
2010 | } else { | 2011 | } else { |
2011 | if (gi->gl->gl_list.next == NULL) { | 2012 | if (gi->gl->gl_list.next == NULL) { |
2012 | read_unlock(gl_lock_addr(gi->hash)); | 2013 | read_unlock(gl_lock_addr(gi->hash)); |
2013 | gi->hash++; | 2014 | gi->hash++; |
2014 | read_lock(gl_lock_addr(gi->hash)); | 2015 | read_lock(gl_lock_addr(gi->hash)); |
2015 | gi->hb_list = NULL; | 2016 | gi->hb_list = NULL; |
2016 | continue; | 2017 | continue; |
2017 | } | 2018 | } |
2018 | gi->gl = list_entry(gi->gl->gl_list.next, | 2019 | gi->gl = list_entry(gi->gl->gl_list.next, |
2019 | struct gfs2_glock, gl_list); | 2020 | struct gfs2_glock, gl_list); |
2020 | } | 2021 | } |
2021 | if (gi->gl) | 2022 | if (gi->gl) |
2022 | break; | 2023 | break; |
2023 | } | 2024 | } |
2024 | read_unlock(gl_lock_addr(gi->hash)); | 2025 | read_unlock(gl_lock_addr(gi->hash)); |
2025 | return 0; | 2026 | return 0; |
2026 | } | 2027 | } |
2027 | 2028 | ||
2028 | static void gfs2_glock_iter_free(struct glock_iter *gi) | 2029 | static void gfs2_glock_iter_free(struct glock_iter *gi) |
2029 | { | 2030 | { |
2030 | kfree(gi); | 2031 | kfree(gi); |
2031 | } | 2032 | } |
2032 | 2033 | ||
2033 | static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) | 2034 | static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) |
2034 | { | 2035 | { |
2035 | struct glock_iter *gi; | 2036 | struct glock_iter *gi; |
2036 | 2037 | ||
2037 | gi = kmalloc(sizeof (*gi), GFP_KERNEL); | 2038 | gi = kmalloc(sizeof (*gi), GFP_KERNEL); |
2038 | if (!gi) | 2039 | if (!gi) |
2039 | return NULL; | 2040 | return NULL; |
2040 | 2041 | ||
2041 | gi->sdp = sdp; | 2042 | gi->sdp = sdp; |
2042 | gi->hash = 0; | 2043 | gi->hash = 0; |
2043 | gi->gl = NULL; | 2044 | gi->gl = NULL; |
2044 | gi->hb_list = NULL; | 2045 | gi->hb_list = NULL; |
2045 | gi->seq = NULL; | 2046 | gi->seq = NULL; |
2046 | memset(gi->string, 0, sizeof(gi->string)); | 2047 | memset(gi->string, 0, sizeof(gi->string)); |
2047 | 2048 | ||
2048 | if (gfs2_glock_iter_next(gi)) { | 2049 | if (gfs2_glock_iter_next(gi)) { |
2049 | gfs2_glock_iter_free(gi); | 2050 | gfs2_glock_iter_free(gi); |
2050 | return NULL; | 2051 | return NULL; |
2051 | } | 2052 | } |
2052 | 2053 | ||
2053 | return gi; | 2054 | return gi; |
2054 | } | 2055 | } |
2055 | 2056 | ||
2056 | static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) | 2057 | static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) |
2057 | { | 2058 | { |
2058 | struct glock_iter *gi; | 2059 | struct glock_iter *gi; |
2059 | loff_t n = *pos; | 2060 | loff_t n = *pos; |
2060 | 2061 | ||
2061 | gi = gfs2_glock_iter_init(file->private); | 2062 | gi = gfs2_glock_iter_init(file->private); |
2062 | if (!gi) | 2063 | if (!gi) |
2063 | return NULL; | 2064 | return NULL; |
2064 | 2065 | ||
2065 | while (n--) { | 2066 | while (n--) { |
2066 | if (gfs2_glock_iter_next(gi)) { | 2067 | if (gfs2_glock_iter_next(gi)) { |
2067 | gfs2_glock_iter_free(gi); | 2068 | gfs2_glock_iter_free(gi); |
2068 | return NULL; | 2069 | return NULL; |
2069 | } | 2070 | } |
2070 | } | 2071 | } |
2071 | 2072 | ||
2072 | return gi; | 2073 | return gi; |
2073 | } | 2074 | } |
2074 | 2075 | ||
2075 | static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, | 2076 | static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, |
2076 | loff_t *pos) | 2077 | loff_t *pos) |
2077 | { | 2078 | { |
2078 | struct glock_iter *gi = iter_ptr; | 2079 | struct glock_iter *gi = iter_ptr; |
2079 | 2080 | ||
2080 | (*pos)++; | 2081 | (*pos)++; |
2081 | 2082 | ||
2082 | if (gfs2_glock_iter_next(gi)) { | 2083 | if (gfs2_glock_iter_next(gi)) { |
2083 | gfs2_glock_iter_free(gi); | 2084 | gfs2_glock_iter_free(gi); |
2084 | return NULL; | 2085 | return NULL; |
2085 | } | 2086 | } |
2086 | 2087 | ||
2087 | return gi; | 2088 | return gi; |
2088 | } | 2089 | } |
2089 | 2090 | ||
2090 | static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) | 2091 | static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) |
2091 | { | 2092 | { |
2092 | /* nothing for now */ | 2093 | /* nothing for now */ |
2093 | } | 2094 | } |
2094 | 2095 | ||
2095 | static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) | 2096 | static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) |
2096 | { | 2097 | { |
2097 | struct glock_iter *gi = iter_ptr; | 2098 | struct glock_iter *gi = iter_ptr; |
2098 | 2099 | ||
2099 | gi->seq = file; | 2100 | gi->seq = file; |
2100 | dump_glock(gi, gi->gl); | 2101 | dump_glock(gi, gi->gl); |
2101 | 2102 | ||
2102 | return 0; | 2103 | return 0; |
2103 | } | 2104 | } |
2104 | 2105 | ||
2105 | static struct seq_operations gfs2_glock_seq_ops = { | 2106 | static struct seq_operations gfs2_glock_seq_ops = { |
2106 | .start = gfs2_glock_seq_start, | 2107 | .start = gfs2_glock_seq_start, |
2107 | .next = gfs2_glock_seq_next, | 2108 | .next = gfs2_glock_seq_next, |
2108 | .stop = gfs2_glock_seq_stop, | 2109 | .stop = gfs2_glock_seq_stop, |
2109 | .show = gfs2_glock_seq_show, | 2110 | .show = gfs2_glock_seq_show, |
2110 | }; | 2111 | }; |
2111 | 2112 | ||
2112 | static int gfs2_debugfs_open(struct inode *inode, struct file *file) | 2113 | static int gfs2_debugfs_open(struct inode *inode, struct file *file) |
2113 | { | 2114 | { |
2114 | struct seq_file *seq; | 2115 | struct seq_file *seq; |
2115 | int ret; | 2116 | int ret; |
2116 | 2117 | ||
2117 | ret = seq_open(file, &gfs2_glock_seq_ops); | 2118 | ret = seq_open(file, &gfs2_glock_seq_ops); |
2118 | if (ret) | 2119 | if (ret) |
2119 | return ret; | 2120 | return ret; |
2120 | 2121 | ||
2121 | seq = file->private_data; | 2122 | seq = file->private_data; |
2122 | seq->private = inode->i_private; | 2123 | seq->private = inode->i_private; |
2123 | 2124 | ||
2124 | return 0; | 2125 | return 0; |
2125 | } | 2126 | } |
2126 | 2127 | ||
2127 | static const struct file_operations gfs2_debug_fops = { | 2128 | static const struct file_operations gfs2_debug_fops = { |
2128 | .owner = THIS_MODULE, | 2129 | .owner = THIS_MODULE, |
2129 | .open = gfs2_debugfs_open, | 2130 | .open = gfs2_debugfs_open, |
2130 | .read = seq_read, | 2131 | .read = seq_read, |
2131 | .llseek = seq_lseek, | 2132 | .llseek = seq_lseek, |
2132 | .release = seq_release | 2133 | .release = seq_release |
2133 | }; | 2134 | }; |
2134 | 2135 | ||
2135 | int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) | 2136 | int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) |
2136 | { | 2137 | { |
2137 | sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); | 2138 | sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); |
2138 | if (!sdp->debugfs_dir) | 2139 | if (!sdp->debugfs_dir) |
2139 | return -ENOMEM; | 2140 | return -ENOMEM; |
2140 | sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", | 2141 | sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", |
2141 | S_IFREG | S_IRUGO, | 2142 | S_IFREG | S_IRUGO, |
2142 | sdp->debugfs_dir, sdp, | 2143 | sdp->debugfs_dir, sdp, |
2143 | &gfs2_debug_fops); | 2144 | &gfs2_debug_fops); |
2144 | if (!sdp->debugfs_dentry_glocks) | 2145 | if (!sdp->debugfs_dentry_glocks) |
2145 | return -ENOMEM; | 2146 | return -ENOMEM; |
2146 | 2147 | ||
2147 | return 0; | 2148 | return 0; |
2148 | } | 2149 | } |
2149 | 2150 | ||
2150 | void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) | 2151 | void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) |
2151 | { | 2152 | { |
2152 | if (sdp && sdp->debugfs_dir) { | 2153 | if (sdp && sdp->debugfs_dir) { |
2153 | if (sdp->debugfs_dentry_glocks) { | 2154 | if (sdp->debugfs_dentry_glocks) { |
2154 | debugfs_remove(sdp->debugfs_dentry_glocks); | 2155 | debugfs_remove(sdp->debugfs_dentry_glocks); |
2155 | sdp->debugfs_dentry_glocks = NULL; | 2156 | sdp->debugfs_dentry_glocks = NULL; |
2156 | } | 2157 | } |
2157 | debugfs_remove(sdp->debugfs_dir); | 2158 | debugfs_remove(sdp->debugfs_dir); |
2158 | sdp->debugfs_dir = NULL; | 2159 | sdp->debugfs_dir = NULL; |
2159 | } | 2160 | } |
2160 | } | 2161 | } |
2161 | 2162 | ||
2162 | int gfs2_register_debugfs(void) | 2163 | int gfs2_register_debugfs(void) |
2163 | { | 2164 | { |
2164 | gfs2_root = debugfs_create_dir("gfs2", NULL); | 2165 | gfs2_root = debugfs_create_dir("gfs2", NULL); |
2165 | return gfs2_root ? 0 : -ENOMEM; | 2166 | return gfs2_root ? 0 : -ENOMEM; |
2166 | } | 2167 | } |
2167 | 2168 | ||
2168 | void gfs2_unregister_debugfs(void) | 2169 | void gfs2_unregister_debugfs(void) |
2169 | { | 2170 | { |
2170 | debugfs_remove(gfs2_root); | 2171 | debugfs_remove(gfs2_root); |
2171 | gfs2_root = NULL; | 2172 | gfs2_root = NULL; |
2172 | } | 2173 | } |
2173 | 2174 |
fs/gfs2/ops_address.c
1 | /* | 1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | 5 | * This copyrighted material is made available to anyone wishing to use, |
6 | * modify, copy, or redistribute it subject to the terms and conditions | 6 | * modify, copy, or redistribute it subject to the terms and conditions |
7 | * of the GNU General Public License version 2. | 7 | * of the GNU General Public License version 2. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/completion.h> | 13 | #include <linux/completion.h> |
14 | #include <linux/buffer_head.h> | 14 | #include <linux/buffer_head.h> |
15 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
16 | #include <linux/pagevec.h> | 16 | #include <linux/pagevec.h> |
17 | #include <linux/mpage.h> | 17 | #include <linux/mpage.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/writeback.h> | 19 | #include <linux/writeback.h> |
20 | #include <linux/gfs2_ondisk.h> | 20 | #include <linux/gfs2_ondisk.h> |
21 | #include <linux/lm_interface.h> | 21 | #include <linux/lm_interface.h> |
22 | 22 | ||
23 | #include "gfs2.h" | 23 | #include "gfs2.h" |
24 | #include "incore.h" | 24 | #include "incore.h" |
25 | #include "bmap.h" | 25 | #include "bmap.h" |
26 | #include "glock.h" | 26 | #include "glock.h" |
27 | #include "inode.h" | 27 | #include "inode.h" |
28 | #include "log.h" | 28 | #include "log.h" |
29 | #include "meta_io.h" | 29 | #include "meta_io.h" |
30 | #include "ops_address.h" | 30 | #include "ops_address.h" |
31 | #include "quota.h" | 31 | #include "quota.h" |
32 | #include "trans.h" | 32 | #include "trans.h" |
33 | #include "rgrp.h" | 33 | #include "rgrp.h" |
34 | #include "ops_file.h" | 34 | #include "ops_file.h" |
35 | #include "super.h" | ||
35 | #include "util.h" | 36 | #include "util.h" |
36 | #include "glops.h" | 37 | #include "glops.h" |
37 | 38 | ||
38 | 39 | ||
39 | static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, | 40 | static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, |
40 | unsigned int from, unsigned int to) | 41 | unsigned int from, unsigned int to) |
41 | { | 42 | { |
42 | struct buffer_head *head = page_buffers(page); | 43 | struct buffer_head *head = page_buffers(page); |
43 | unsigned int bsize = head->b_size; | 44 | unsigned int bsize = head->b_size; |
44 | struct buffer_head *bh; | 45 | struct buffer_head *bh; |
45 | unsigned int start, end; | 46 | unsigned int start, end; |
46 | 47 | ||
47 | for (bh = head, start = 0; bh != head || !start; | 48 | for (bh = head, start = 0; bh != head || !start; |
48 | bh = bh->b_this_page, start = end) { | 49 | bh = bh->b_this_page, start = end) { |
49 | end = start + bsize; | 50 | end = start + bsize; |
50 | if (end <= from || start >= to) | 51 | if (end <= from || start >= to) |
51 | continue; | 52 | continue; |
52 | gfs2_trans_add_bh(ip->i_gl, bh, 0); | 53 | gfs2_trans_add_bh(ip->i_gl, bh, 0); |
53 | } | 54 | } |
54 | } | 55 | } |
55 | 56 | ||
56 | /** | 57 | /** |
57 | * gfs2_get_block - Fills in a buffer head with details about a block | 58 | * gfs2_get_block - Fills in a buffer head with details about a block |
58 | * @inode: The inode | 59 | * @inode: The inode |
59 | * @lblock: The block number to look up | 60 | * @lblock: The block number to look up |
60 | * @bh_result: The buffer head to return the result in | 61 | * @bh_result: The buffer head to return the result in |
61 | * @create: Non-zero if we may add block to the file | 62 | * @create: Non-zero if we may add block to the file |
62 | * | 63 | * |
63 | * Returns: errno | 64 | * Returns: errno |
64 | */ | 65 | */ |
65 | 66 | ||
66 | int gfs2_get_block(struct inode *inode, sector_t lblock, | 67 | int gfs2_get_block(struct inode *inode, sector_t lblock, |
67 | struct buffer_head *bh_result, int create) | 68 | struct buffer_head *bh_result, int create) |
68 | { | 69 | { |
69 | return gfs2_block_map(inode, lblock, create, bh_result); | 70 | return gfs2_block_map(inode, lblock, create, bh_result); |
70 | } | 71 | } |
71 | 72 | ||
72 | /** | 73 | /** |
73 | * gfs2_get_block_noalloc - Fills in a buffer head with details about a block | 74 | * gfs2_get_block_noalloc - Fills in a buffer head with details about a block |
74 | * @inode: The inode | 75 | * @inode: The inode |
75 | * @lblock: The block number to look up | 76 | * @lblock: The block number to look up |
76 | * @bh_result: The buffer head to return the result in | 77 | * @bh_result: The buffer head to return the result in |
77 | * @create: Non-zero if we may add block to the file | 78 | * @create: Non-zero if we may add block to the file |
78 | * | 79 | * |
79 | * Returns: errno | 80 | * Returns: errno |
80 | */ | 81 | */ |
81 | 82 | ||
82 | static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, | 83 | static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, |
83 | struct buffer_head *bh_result, int create) | 84 | struct buffer_head *bh_result, int create) |
84 | { | 85 | { |
85 | int error; | 86 | int error; |
86 | 87 | ||
87 | error = gfs2_block_map(inode, lblock, 0, bh_result); | 88 | error = gfs2_block_map(inode, lblock, 0, bh_result); |
88 | if (error) | 89 | if (error) |
89 | return error; | 90 | return error; |
90 | if (bh_result->b_blocknr == 0) | 91 | if (bh_result->b_blocknr == 0) |
91 | return -EIO; | 92 | return -EIO; |
92 | return 0; | 93 | return 0; |
93 | } | 94 | } |
94 | 95 | ||
95 | static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, | 96 | static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, |
96 | struct buffer_head *bh_result, int create) | 97 | struct buffer_head *bh_result, int create) |
97 | { | 98 | { |
98 | return gfs2_block_map(inode, lblock, 0, bh_result); | 99 | return gfs2_block_map(inode, lblock, 0, bh_result); |
99 | } | 100 | } |
100 | 101 | ||
101 | /** | 102 | /** |
102 | * gfs2_writepage - Write complete page | 103 | * gfs2_writepage - Write complete page |
103 | * @page: Page to write | 104 | * @page: Page to write |
104 | * | 105 | * |
105 | * Returns: errno | 106 | * Returns: errno |
106 | * | 107 | * |
107 | * Some of this is copied from block_write_full_page() although we still | 108 | * Some of this is copied from block_write_full_page() although we still |
108 | * call it to do most of the work. | 109 | * call it to do most of the work. |
109 | */ | 110 | */ |
110 | 111 | ||
111 | static int gfs2_writepage(struct page *page, struct writeback_control *wbc) | 112 | static int gfs2_writepage(struct page *page, struct writeback_control *wbc) |
112 | { | 113 | { |
113 | struct inode *inode = page->mapping->host; | 114 | struct inode *inode = page->mapping->host; |
114 | struct gfs2_inode *ip = GFS2_I(inode); | 115 | struct gfs2_inode *ip = GFS2_I(inode); |
115 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 116 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
116 | loff_t i_size = i_size_read(inode); | 117 | loff_t i_size = i_size_read(inode); |
117 | pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 118 | pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; |
118 | unsigned offset; | 119 | unsigned offset; |
119 | int error; | 120 | int error; |
120 | int done_trans = 0; | 121 | int done_trans = 0; |
121 | 122 | ||
122 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) { | 123 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) { |
123 | unlock_page(page); | 124 | unlock_page(page); |
124 | return -EIO; | 125 | return -EIO; |
125 | } | 126 | } |
126 | if (current->journal_info) | 127 | if (current->journal_info) |
127 | goto out_ignore; | 128 | goto out_ignore; |
128 | 129 | ||
129 | /* Is the page fully outside i_size? (truncate in progress) */ | 130 | /* Is the page fully outside i_size? (truncate in progress) */ |
130 | offset = i_size & (PAGE_CACHE_SIZE-1); | 131 | offset = i_size & (PAGE_CACHE_SIZE-1); |
131 | if (page->index > end_index || (page->index == end_index && !offset)) { | 132 | if (page->index > end_index || (page->index == end_index && !offset)) { |
132 | page->mapping->a_ops->invalidatepage(page, 0); | 133 | page->mapping->a_ops->invalidatepage(page, 0); |
133 | unlock_page(page); | 134 | unlock_page(page); |
134 | return 0; /* don't care */ | 135 | return 0; /* don't care */ |
135 | } | 136 | } |
136 | 137 | ||
137 | if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) { | 138 | if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) { |
138 | error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); | 139 | error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); |
139 | if (error) | 140 | if (error) |
140 | goto out_ignore; | 141 | goto out_ignore; |
141 | if (!page_has_buffers(page)) { | 142 | if (!page_has_buffers(page)) { |
142 | create_empty_buffers(page, inode->i_sb->s_blocksize, | 143 | create_empty_buffers(page, inode->i_sb->s_blocksize, |
143 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 144 | (1 << BH_Dirty)|(1 << BH_Uptodate)); |
144 | } | 145 | } |
145 | gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); | 146 | gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); |
146 | done_trans = 1; | 147 | done_trans = 1; |
147 | } | 148 | } |
148 | error = block_write_full_page(page, gfs2_get_block_noalloc, wbc); | 149 | error = block_write_full_page(page, gfs2_get_block_noalloc, wbc); |
149 | if (done_trans) | 150 | if (done_trans) |
150 | gfs2_trans_end(sdp); | 151 | gfs2_trans_end(sdp); |
151 | gfs2_meta_cache_flush(ip); | 152 | gfs2_meta_cache_flush(ip); |
152 | return error; | 153 | return error; |
153 | 154 | ||
154 | out_ignore: | 155 | out_ignore: |
155 | redirty_page_for_writepage(wbc, page); | 156 | redirty_page_for_writepage(wbc, page); |
156 | unlock_page(page); | 157 | unlock_page(page); |
157 | return 0; | 158 | return 0; |
158 | } | 159 | } |
159 | 160 | ||
160 | /** | 161 | /** |
161 | * gfs2_writepages - Write a bunch of dirty pages back to disk | 162 | * gfs2_writepages - Write a bunch of dirty pages back to disk |
162 | * @mapping: The mapping to write | 163 | * @mapping: The mapping to write |
163 | * @wbc: Write-back control | 164 | * @wbc: Write-back control |
164 | * | 165 | * |
165 | * For journaled files and/or ordered writes this just falls back to the | 166 | * For journaled files and/or ordered writes this just falls back to the |
166 | * kernel's default writepages path for now. We will probably want to change | 167 | * kernel's default writepages path for now. We will probably want to change |
167 | * that eventually (i.e. when we look at allocate on flush). | 168 | * that eventually (i.e. when we look at allocate on flush). |
168 | * | 169 | * |
169 | * For the data=writeback case though we can already ignore buffer heads | 170 | * For the data=writeback case though we can already ignore buffer heads |
170 | * and write whole extents at once. This is a big reduction in the | 171 | * and write whole extents at once. This is a big reduction in the |
171 | * number of I/O requests we send and the bmap calls we make in this case. | 172 | * number of I/O requests we send and the bmap calls we make in this case. |
172 | */ | 173 | */ |
173 | static int gfs2_writepages(struct address_space *mapping, | 174 | static int gfs2_writepages(struct address_space *mapping, |
174 | struct writeback_control *wbc) | 175 | struct writeback_control *wbc) |
175 | { | 176 | { |
176 | struct inode *inode = mapping->host; | 177 | struct inode *inode = mapping->host; |
177 | struct gfs2_inode *ip = GFS2_I(inode); | 178 | struct gfs2_inode *ip = GFS2_I(inode); |
178 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 179 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
179 | 180 | ||
180 | if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip)) | 181 | if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip)) |
181 | return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); | 182 | return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); |
182 | 183 | ||
183 | return generic_writepages(mapping, wbc); | 184 | return generic_writepages(mapping, wbc); |
184 | } | 185 | } |
185 | 186 | ||
186 | /** | 187 | /** |
187 | * stuffed_readpage - Fill in a Linux page with stuffed file data | 188 | * stuffed_readpage - Fill in a Linux page with stuffed file data |
188 | * @ip: the inode | 189 | * @ip: the inode |
189 | * @page: the page | 190 | * @page: the page |
190 | * | 191 | * |
191 | * Returns: errno | 192 | * Returns: errno |
192 | */ | 193 | */ |
193 | 194 | ||
194 | static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) | 195 | static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) |
195 | { | 196 | { |
196 | struct buffer_head *dibh; | 197 | struct buffer_head *dibh; |
197 | void *kaddr; | 198 | void *kaddr; |
198 | int error; | 199 | int error; |
199 | 200 | ||
200 | /* | 201 | /* |
201 | * Due to the order of unstuffing files and ->nopage(), we can be | 202 | * Due to the order of unstuffing files and ->nopage(), we can be |
202 | * asked for a zero page in the case of a stuffed file being extended, | 203 | * asked for a zero page in the case of a stuffed file being extended, |
203 | * so we need to supply one here. It doesn't happen often. | 204 | * so we need to supply one here. It doesn't happen often. |
204 | */ | 205 | */ |
205 | if (unlikely(page->index)) { | 206 | if (unlikely(page->index)) { |
206 | kaddr = kmap_atomic(page, KM_USER0); | 207 | kaddr = kmap_atomic(page, KM_USER0); |
207 | memset(kaddr, 0, PAGE_CACHE_SIZE); | 208 | memset(kaddr, 0, PAGE_CACHE_SIZE); |
208 | kunmap_atomic(kaddr, KM_USER0); | 209 | kunmap_atomic(kaddr, KM_USER0); |
209 | flush_dcache_page(page); | 210 | flush_dcache_page(page); |
210 | SetPageUptodate(page); | 211 | SetPageUptodate(page); |
211 | return 0; | 212 | return 0; |
212 | } | 213 | } |
213 | 214 | ||
214 | error = gfs2_meta_inode_buffer(ip, &dibh); | 215 | error = gfs2_meta_inode_buffer(ip, &dibh); |
215 | if (error) | 216 | if (error) |
216 | return error; | 217 | return error; |
217 | 218 | ||
218 | kaddr = kmap_atomic(page, KM_USER0); | 219 | kaddr = kmap_atomic(page, KM_USER0); |
219 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), | 220 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), |
220 | ip->i_di.di_size); | 221 | ip->i_di.di_size); |
221 | memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size); | 222 | memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size); |
222 | kunmap_atomic(kaddr, KM_USER0); | 223 | kunmap_atomic(kaddr, KM_USER0); |
223 | flush_dcache_page(page); | 224 | flush_dcache_page(page); |
224 | brelse(dibh); | 225 | brelse(dibh); |
225 | SetPageUptodate(page); | 226 | SetPageUptodate(page); |
226 | 227 | ||
227 | return 0; | 228 | return 0; |
228 | } | 229 | } |
229 | 230 | ||
230 | 231 | ||
231 | /** | 232 | /** |
232 | * gfs2_readpage - readpage with locking | 233 | * gfs2_readpage - readpage with locking |
233 | * @file: The file to read a page for. N.B. This may be NULL if we are | 234 | * @file: The file to read a page for. N.B. This may be NULL if we are |
234 | * reading an internal file. | 235 | * reading an internal file. |
235 | * @page: The page to read | 236 | * @page: The page to read |
236 | * | 237 | * |
237 | * Returns: errno | 238 | * Returns: errno |
238 | */ | 239 | */ |
239 | 240 | ||
240 | static int gfs2_readpage(struct file *file, struct page *page) | 241 | static int gfs2_readpage(struct file *file, struct page *page) |
241 | { | 242 | { |
242 | struct gfs2_inode *ip = GFS2_I(page->mapping->host); | 243 | struct gfs2_inode *ip = GFS2_I(page->mapping->host); |
243 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); | 244 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); |
244 | struct gfs2_file *gf = NULL; | 245 | struct gfs2_file *gf = NULL; |
245 | struct gfs2_holder gh; | 246 | struct gfs2_holder gh; |
246 | int error; | 247 | int error; |
247 | int do_unlock = 0; | 248 | int do_unlock = 0; |
248 | 249 | ||
249 | if (likely(file != &gfs2_internal_file_sentinel)) { | 250 | if (likely(file != &gfs2_internal_file_sentinel)) { |
250 | if (file) { | 251 | if (file) { |
251 | gf = file->private_data; | 252 | gf = file->private_data; |
252 | if (test_bit(GFF_EXLOCK, &gf->f_flags)) | 253 | if (test_bit(GFF_EXLOCK, &gf->f_flags)) |
253 | /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */ | 254 | /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */ |
254 | goto skip_lock; | 255 | goto skip_lock; |
255 | } | 256 | } |
256 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); | 257 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); |
257 | do_unlock = 1; | 258 | do_unlock = 1; |
258 | error = gfs2_glock_nq_atime(&gh); | 259 | error = gfs2_glock_nq_atime(&gh); |
259 | if (unlikely(error)) | 260 | if (unlikely(error)) |
260 | goto out_unlock; | 261 | goto out_unlock; |
261 | } | 262 | } |
262 | 263 | ||
263 | skip_lock: | 264 | skip_lock: |
264 | if (gfs2_is_stuffed(ip)) { | 265 | if (gfs2_is_stuffed(ip)) { |
265 | error = stuffed_readpage(ip, page); | 266 | error = stuffed_readpage(ip, page); |
266 | unlock_page(page); | 267 | unlock_page(page); |
267 | } else | 268 | } else |
268 | error = mpage_readpage(page, gfs2_get_block); | 269 | error = mpage_readpage(page, gfs2_get_block); |
269 | 270 | ||
270 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 271 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
271 | error = -EIO; | 272 | error = -EIO; |
272 | 273 | ||
273 | if (do_unlock) { | 274 | if (do_unlock) { |
274 | gfs2_glock_dq_m(1, &gh); | 275 | gfs2_glock_dq_m(1, &gh); |
275 | gfs2_holder_uninit(&gh); | 276 | gfs2_holder_uninit(&gh); |
276 | } | 277 | } |
277 | out: | 278 | out: |
278 | return error; | 279 | return error; |
279 | out_unlock: | 280 | out_unlock: |
280 | unlock_page(page); | 281 | unlock_page(page); |
281 | if (error == GLR_TRYFAILED) { | 282 | if (error == GLR_TRYFAILED) { |
282 | error = AOP_TRUNCATED_PAGE; | 283 | error = AOP_TRUNCATED_PAGE; |
283 | yield(); | 284 | yield(); |
284 | } | 285 | } |
285 | if (do_unlock) | 286 | if (do_unlock) |
286 | gfs2_holder_uninit(&gh); | 287 | gfs2_holder_uninit(&gh); |
287 | goto out; | 288 | goto out; |
288 | } | 289 | } |
289 | 290 | ||
290 | /** | 291 | /** |
291 | * gfs2_readpages - Read a bunch of pages at once | 292 | * gfs2_readpages - Read a bunch of pages at once |
292 | * | 293 | * |
293 | * Some notes: | 294 | * Some notes: |
294 | * 1. This is only for readahead, so we can simply ignore any things | 295 | * 1. This is only for readahead, so we can simply ignore any things |
295 | * which are slightly inconvenient (such as locking conflicts between | 296 | * which are slightly inconvenient (such as locking conflicts between |
296 | * the page lock and the glock) and return having done no I/O. Its | 297 | * the page lock and the glock) and return having done no I/O. Its |
297 | * obviously not something we'd want to do on too regular a basis. | 298 | * obviously not something we'd want to do on too regular a basis. |
298 | * Any I/O we ignore at this time will be done via readpage later. | 299 | * Any I/O we ignore at this time will be done via readpage later. |
299 | * 2. We don't handle stuffed files here we let readpage do the honours. | 300 | * 2. We don't handle stuffed files here we let readpage do the honours. |
300 | * 3. mpage_readpages() does most of the heavy lifting in the common case. | 301 | * 3. mpage_readpages() does most of the heavy lifting in the common case. |
301 | * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. | 302 | * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. |
302 | * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as | 303 | * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as |
303 | * well as read-ahead. | 304 | * well as read-ahead. |
304 | */ | 305 | */ |
305 | static int gfs2_readpages(struct file *file, struct address_space *mapping, | 306 | static int gfs2_readpages(struct file *file, struct address_space *mapping, |
306 | struct list_head *pages, unsigned nr_pages) | 307 | struct list_head *pages, unsigned nr_pages) |
307 | { | 308 | { |
308 | struct inode *inode = mapping->host; | 309 | struct inode *inode = mapping->host; |
309 | struct gfs2_inode *ip = GFS2_I(inode); | 310 | struct gfs2_inode *ip = GFS2_I(inode); |
310 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 311 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
311 | struct gfs2_holder gh; | 312 | struct gfs2_holder gh; |
312 | int ret = 0; | 313 | int ret = 0; |
313 | int do_unlock = 0; | 314 | int do_unlock = 0; |
314 | 315 | ||
315 | if (likely(file != &gfs2_internal_file_sentinel)) { | 316 | if (likely(file != &gfs2_internal_file_sentinel)) { |
316 | if (file) { | 317 | if (file) { |
317 | struct gfs2_file *gf = file->private_data; | 318 | struct gfs2_file *gf = file->private_data; |
318 | if (test_bit(GFF_EXLOCK, &gf->f_flags)) | 319 | if (test_bit(GFF_EXLOCK, &gf->f_flags)) |
319 | goto skip_lock; | 320 | goto skip_lock; |
320 | } | 321 | } |
321 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, | 322 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, |
322 | LM_FLAG_TRY_1CB|GL_ATIME, &gh); | 323 | LM_FLAG_TRY_1CB|GL_ATIME, &gh); |
323 | do_unlock = 1; | 324 | do_unlock = 1; |
324 | ret = gfs2_glock_nq_atime(&gh); | 325 | ret = gfs2_glock_nq_atime(&gh); |
325 | if (ret == GLR_TRYFAILED) | 326 | if (ret == GLR_TRYFAILED) |
326 | goto out_noerror; | 327 | goto out_noerror; |
327 | if (unlikely(ret)) | 328 | if (unlikely(ret)) |
328 | goto out_unlock; | 329 | goto out_unlock; |
329 | } | 330 | } |
330 | skip_lock: | 331 | skip_lock: |
331 | if (!gfs2_is_stuffed(ip)) | 332 | if (!gfs2_is_stuffed(ip)) |
332 | ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); | 333 | ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); |
333 | 334 | ||
334 | if (do_unlock) { | 335 | if (do_unlock) { |
335 | gfs2_glock_dq_m(1, &gh); | 336 | gfs2_glock_dq_m(1, &gh); |
336 | gfs2_holder_uninit(&gh); | 337 | gfs2_holder_uninit(&gh); |
337 | } | 338 | } |
338 | out: | 339 | out: |
339 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 340 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
340 | ret = -EIO; | 341 | ret = -EIO; |
341 | return ret; | 342 | return ret; |
342 | out_noerror: | 343 | out_noerror: |
343 | ret = 0; | 344 | ret = 0; |
344 | out_unlock: | 345 | out_unlock: |
345 | if (do_unlock) | 346 | if (do_unlock) |
346 | gfs2_holder_uninit(&gh); | 347 | gfs2_holder_uninit(&gh); |
347 | goto out; | 348 | goto out; |
348 | } | 349 | } |
349 | 350 | ||
350 | /** | 351 | /** |
351 | * gfs2_prepare_write - Prepare to write a page to a file | 352 | * gfs2_prepare_write - Prepare to write a page to a file |
352 | * @file: The file to write to | 353 | * @file: The file to write to |
353 | * @page: The page which is to be prepared for writing | 354 | * @page: The page which is to be prepared for writing |
354 | * @from: From (byte range within page) | 355 | * @from: From (byte range within page) |
355 | * @to: To (byte range within page) | 356 | * @to: To (byte range within page) |
356 | * | 357 | * |
357 | * Returns: errno | 358 | * Returns: errno |
358 | */ | 359 | */ |
359 | 360 | ||
360 | static int gfs2_prepare_write(struct file *file, struct page *page, | 361 | static int gfs2_prepare_write(struct file *file, struct page *page, |
361 | unsigned from, unsigned to) | 362 | unsigned from, unsigned to) |
362 | { | 363 | { |
363 | struct gfs2_inode *ip = GFS2_I(page->mapping->host); | 364 | struct gfs2_inode *ip = GFS2_I(page->mapping->host); |
364 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); | 365 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); |
365 | unsigned int data_blocks, ind_blocks, rblocks; | 366 | unsigned int data_blocks, ind_blocks, rblocks; |
366 | int alloc_required; | 367 | int alloc_required; |
367 | int error = 0; | 368 | int error = 0; |
368 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from; | 369 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from; |
369 | loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | 370 | loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; |
370 | struct gfs2_alloc *al; | 371 | struct gfs2_alloc *al; |
371 | unsigned int write_len = to - from; | 372 | unsigned int write_len = to - from; |
372 | 373 | ||
373 | 374 | ||
374 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh); | 375 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh); |
375 | error = gfs2_glock_nq_atime(&ip->i_gh); | 376 | error = gfs2_glock_nq_atime(&ip->i_gh); |
376 | if (unlikely(error)) { | 377 | if (unlikely(error)) { |
377 | if (error == GLR_TRYFAILED) { | 378 | if (error == GLR_TRYFAILED) { |
378 | unlock_page(page); | 379 | unlock_page(page); |
379 | error = AOP_TRUNCATED_PAGE; | 380 | error = AOP_TRUNCATED_PAGE; |
380 | yield(); | 381 | yield(); |
381 | } | 382 | } |
382 | goto out_uninit; | 383 | goto out_uninit; |
383 | } | 384 | } |
384 | 385 | ||
385 | gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks); | 386 | gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks); |
386 | 387 | ||
387 | error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required); | 388 | error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required); |
388 | if (error) | 389 | if (error) |
389 | goto out_unlock; | 390 | goto out_unlock; |
390 | 391 | ||
391 | 392 | ||
392 | ip->i_alloc.al_requested = 0; | 393 | ip->i_alloc.al_requested = 0; |
393 | if (alloc_required) { | 394 | if (alloc_required) { |
394 | al = gfs2_alloc_get(ip); | 395 | al = gfs2_alloc_get(ip); |
395 | 396 | ||
396 | error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); | 397 | error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); |
397 | if (error) | 398 | if (error) |
398 | goto out_alloc_put; | 399 | goto out_alloc_put; |
399 | 400 | ||
400 | error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); | 401 | error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); |
401 | if (error) | 402 | if (error) |
402 | goto out_qunlock; | 403 | goto out_qunlock; |
403 | 404 | ||
404 | al->al_requested = data_blocks + ind_blocks; | 405 | al->al_requested = data_blocks + ind_blocks; |
405 | error = gfs2_inplace_reserve(ip); | 406 | error = gfs2_inplace_reserve(ip); |
406 | if (error) | 407 | if (error) |
407 | goto out_qunlock; | 408 | goto out_qunlock; |
408 | } | 409 | } |
409 | 410 | ||
410 | rblocks = RES_DINODE + ind_blocks; | 411 | rblocks = RES_DINODE + ind_blocks; |
411 | if (gfs2_is_jdata(ip)) | 412 | if (gfs2_is_jdata(ip)) |
412 | rblocks += data_blocks ? data_blocks : 1; | 413 | rblocks += data_blocks ? data_blocks : 1; |
413 | if (ind_blocks || data_blocks) | 414 | if (ind_blocks || data_blocks) |
414 | rblocks += RES_STATFS + RES_QUOTA; | 415 | rblocks += RES_STATFS + RES_QUOTA; |
415 | 416 | ||
416 | error = gfs2_trans_begin(sdp, rblocks, 0); | 417 | error = gfs2_trans_begin(sdp, rblocks, 0); |
417 | if (error) | 418 | if (error) |
418 | goto out; | 419 | goto out; |
419 | 420 | ||
420 | if (gfs2_is_stuffed(ip)) { | 421 | if (gfs2_is_stuffed(ip)) { |
421 | if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { | 422 | if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { |
422 | error = gfs2_unstuff_dinode(ip, page); | 423 | error = gfs2_unstuff_dinode(ip, page); |
423 | if (error == 0) | 424 | if (error == 0) |
424 | goto prepare_write; | 425 | goto prepare_write; |
425 | } else if (!PageUptodate(page)) | 426 | } else if (!PageUptodate(page)) |
426 | error = stuffed_readpage(ip, page); | 427 | error = stuffed_readpage(ip, page); |
427 | goto out; | 428 | goto out; |
428 | } | 429 | } |
429 | 430 | ||
430 | prepare_write: | 431 | prepare_write: |
431 | error = block_prepare_write(page, from, to, gfs2_get_block); | 432 | error = block_prepare_write(page, from, to, gfs2_get_block); |
432 | 433 | ||
433 | out: | 434 | out: |
434 | if (error) { | 435 | if (error) { |
435 | gfs2_trans_end(sdp); | 436 | gfs2_trans_end(sdp); |
436 | if (alloc_required) { | 437 | if (alloc_required) { |
437 | gfs2_inplace_release(ip); | 438 | gfs2_inplace_release(ip); |
438 | out_qunlock: | 439 | out_qunlock: |
439 | gfs2_quota_unlock(ip); | 440 | gfs2_quota_unlock(ip); |
440 | out_alloc_put: | 441 | out_alloc_put: |
441 | gfs2_alloc_put(ip); | 442 | gfs2_alloc_put(ip); |
442 | } | 443 | } |
443 | out_unlock: | 444 | out_unlock: |
444 | gfs2_glock_dq_m(1, &ip->i_gh); | 445 | gfs2_glock_dq_m(1, &ip->i_gh); |
445 | out_uninit: | 446 | out_uninit: |
446 | gfs2_holder_uninit(&ip->i_gh); | 447 | gfs2_holder_uninit(&ip->i_gh); |
447 | } | 448 | } |
448 | 449 | ||
449 | return error; | 450 | return error; |
450 | } | 451 | } |
451 | 452 | ||
452 | /** | 453 | /** |
453 | * adjust_fs_space - Adjusts the free space available due to gfs2_grow | 454 | * adjust_fs_space - Adjusts the free space available due to gfs2_grow |
454 | * @inode: the rindex inode | 455 | * @inode: the rindex inode |
455 | */ | 456 | */ |
456 | static void adjust_fs_space(struct inode *inode) | 457 | static void adjust_fs_space(struct inode *inode) |
457 | { | 458 | { |
458 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; | 459 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; |
459 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; | 460 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; |
460 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; | 461 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; |
461 | u64 fs_total, new_free; | 462 | u64 fs_total, new_free; |
462 | 463 | ||
463 | /* Total up the file system space, according to the latest rindex. */ | 464 | /* Total up the file system space, according to the latest rindex. */ |
464 | fs_total = gfs2_ri_total(sdp); | 465 | fs_total = gfs2_ri_total(sdp); |
465 | 466 | ||
466 | spin_lock(&sdp->sd_statfs_spin); | 467 | spin_lock(&sdp->sd_statfs_spin); |
467 | if (fs_total > (m_sc->sc_total + l_sc->sc_total)) | 468 | if (fs_total > (m_sc->sc_total + l_sc->sc_total)) |
468 | new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); | 469 | new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); |
469 | else | 470 | else |
470 | new_free = 0; | 471 | new_free = 0; |
471 | spin_unlock(&sdp->sd_statfs_spin); | 472 | spin_unlock(&sdp->sd_statfs_spin); |
472 | fs_warn(sdp, "File system extended by %llu blocks.\n", | 473 | fs_warn(sdp, "File system extended by %llu blocks.\n", |
473 | (unsigned long long)new_free); | 474 | (unsigned long long)new_free); |
474 | gfs2_statfs_change(sdp, new_free, new_free, 0); | 475 | gfs2_statfs_change(sdp, new_free, new_free, 0); |
475 | } | 476 | } |
476 | 477 | ||
477 | /** | 478 | /** |
478 | * gfs2_commit_write - Commit write to a file | 479 | * gfs2_commit_write - Commit write to a file |
479 | * @file: The file to write to | 480 | * @file: The file to write to |
480 | * @page: The page containing the data | 481 | * @page: The page containing the data |
481 | * @from: From (byte range within page) | 482 | * @from: From (byte range within page) |
482 | * @to: To (byte range within page) | 483 | * @to: To (byte range within page) |
483 | * | 484 | * |
484 | * Returns: errno | 485 | * Returns: errno |
485 | */ | 486 | */ |
486 | 487 | ||
487 | static int gfs2_commit_write(struct file *file, struct page *page, | 488 | static int gfs2_commit_write(struct file *file, struct page *page, |
488 | unsigned from, unsigned to) | 489 | unsigned from, unsigned to) |
489 | { | 490 | { |
490 | struct inode *inode = page->mapping->host; | 491 | struct inode *inode = page->mapping->host; |
491 | struct gfs2_inode *ip = GFS2_I(inode); | 492 | struct gfs2_inode *ip = GFS2_I(inode); |
492 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 493 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
493 | int error = -EOPNOTSUPP; | 494 | int error = -EOPNOTSUPP; |
494 | struct buffer_head *dibh; | 495 | struct buffer_head *dibh; |
495 | struct gfs2_alloc *al = &ip->i_alloc; | 496 | struct gfs2_alloc *al = &ip->i_alloc; |
496 | struct gfs2_dinode *di; | 497 | struct gfs2_dinode *di; |
497 | 498 | ||
498 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl))) | 499 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl))) |
499 | goto fail_nounlock; | 500 | goto fail_nounlock; |
500 | 501 | ||
501 | error = gfs2_meta_inode_buffer(ip, &dibh); | 502 | error = gfs2_meta_inode_buffer(ip, &dibh); |
502 | if (error) | 503 | if (error) |
503 | goto fail_endtrans; | 504 | goto fail_endtrans; |
504 | 505 | ||
505 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 506 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
506 | di = (struct gfs2_dinode *)dibh->b_data; | 507 | di = (struct gfs2_dinode *)dibh->b_data; |
507 | 508 | ||
508 | if (gfs2_is_stuffed(ip)) { | 509 | if (gfs2_is_stuffed(ip)) { |
509 | u64 file_size; | 510 | u64 file_size; |
510 | void *kaddr; | 511 | void *kaddr; |
511 | 512 | ||
512 | file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to; | 513 | file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to; |
513 | 514 | ||
514 | kaddr = kmap_atomic(page, KM_USER0); | 515 | kaddr = kmap_atomic(page, KM_USER0); |
515 | memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from, | 516 | memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from, |
516 | kaddr + from, to - from); | 517 | kaddr + from, to - from); |
517 | kunmap_atomic(kaddr, KM_USER0); | 518 | kunmap_atomic(kaddr, KM_USER0); |
518 | 519 | ||
519 | SetPageUptodate(page); | 520 | SetPageUptodate(page); |
520 | 521 | ||
521 | if (inode->i_size < file_size) { | 522 | if (inode->i_size < file_size) { |
522 | i_size_write(inode, file_size); | 523 | i_size_write(inode, file_size); |
523 | mark_inode_dirty(inode); | 524 | mark_inode_dirty(inode); |
524 | } | 525 | } |
525 | } else { | 526 | } else { |
526 | if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || | 527 | if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || |
527 | gfs2_is_jdata(ip)) | 528 | gfs2_is_jdata(ip)) |
528 | gfs2_page_add_databufs(ip, page, from, to); | 529 | gfs2_page_add_databufs(ip, page, from, to); |
529 | error = generic_commit_write(file, page, from, to); | 530 | error = generic_commit_write(file, page, from, to); |
530 | if (error) | 531 | if (error) |
531 | goto fail; | 532 | goto fail; |
532 | } | 533 | } |
533 | 534 | ||
534 | if (ip->i_di.di_size < inode->i_size) { | 535 | if (ip->i_di.di_size < inode->i_size) { |
535 | ip->i_di.di_size = inode->i_size; | 536 | ip->i_di.di_size = inode->i_size; |
536 | di->di_size = cpu_to_be64(inode->i_size); | 537 | di->di_size = cpu_to_be64(inode->i_size); |
537 | } | 538 | } |
538 | 539 | ||
539 | if (inode == sdp->sd_rindex) | 540 | if (inode == sdp->sd_rindex) |
540 | adjust_fs_space(inode); | 541 | adjust_fs_space(inode); |
541 | 542 | ||
542 | brelse(dibh); | 543 | brelse(dibh); |
543 | gfs2_trans_end(sdp); | 544 | gfs2_trans_end(sdp); |
544 | if (al->al_requested) { | 545 | if (al->al_requested) { |
545 | gfs2_inplace_release(ip); | 546 | gfs2_inplace_release(ip); |
546 | gfs2_quota_unlock(ip); | 547 | gfs2_quota_unlock(ip); |
547 | gfs2_alloc_put(ip); | 548 | gfs2_alloc_put(ip); |
548 | } | 549 | } |
549 | unlock_page(page); | 550 | unlock_page(page); |
550 | gfs2_glock_dq_m(1, &ip->i_gh); | 551 | gfs2_glock_dq_m(1, &ip->i_gh); |
551 | lock_page(page); | 552 | lock_page(page); |
552 | gfs2_holder_uninit(&ip->i_gh); | 553 | gfs2_holder_uninit(&ip->i_gh); |
553 | return 0; | 554 | return 0; |
554 | 555 | ||
555 | fail: | 556 | fail: |
556 | brelse(dibh); | 557 | brelse(dibh); |
557 | fail_endtrans: | 558 | fail_endtrans: |
558 | gfs2_trans_end(sdp); | 559 | gfs2_trans_end(sdp); |
559 | if (al->al_requested) { | 560 | if (al->al_requested) { |
560 | gfs2_inplace_release(ip); | 561 | gfs2_inplace_release(ip); |
561 | gfs2_quota_unlock(ip); | 562 | gfs2_quota_unlock(ip); |
562 | gfs2_alloc_put(ip); | 563 | gfs2_alloc_put(ip); |
563 | } | 564 | } |
564 | unlock_page(page); | 565 | unlock_page(page); |
565 | gfs2_glock_dq_m(1, &ip->i_gh); | 566 | gfs2_glock_dq_m(1, &ip->i_gh); |
566 | lock_page(page); | 567 | lock_page(page); |
567 | gfs2_holder_uninit(&ip->i_gh); | 568 | gfs2_holder_uninit(&ip->i_gh); |
568 | fail_nounlock: | 569 | fail_nounlock: |
569 | ClearPageUptodate(page); | 570 | ClearPageUptodate(page); |
570 | return error; | 571 | return error; |
571 | } | 572 | } |
572 | 573 | ||
573 | /** | 574 | /** |
574 | * gfs2_bmap - Block map function | 575 | * gfs2_bmap - Block map function |
575 | * @mapping: Address space info | 576 | * @mapping: Address space info |
576 | * @lblock: The block to map | 577 | * @lblock: The block to map |
577 | * | 578 | * |
578 | * Returns: The disk address for the block or 0 on hole or error | 579 | * Returns: The disk address for the block or 0 on hole or error |
579 | */ | 580 | */ |
580 | 581 | ||
581 | static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) | 582 | static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) |
582 | { | 583 | { |
583 | struct gfs2_inode *ip = GFS2_I(mapping->host); | 584 | struct gfs2_inode *ip = GFS2_I(mapping->host); |
584 | struct gfs2_holder i_gh; | 585 | struct gfs2_holder i_gh; |
585 | sector_t dblock = 0; | 586 | sector_t dblock = 0; |
586 | int error; | 587 | int error; |
587 | 588 | ||
588 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); | 589 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); |
589 | if (error) | 590 | if (error) |
590 | return 0; | 591 | return 0; |
591 | 592 | ||
592 | if (!gfs2_is_stuffed(ip)) | 593 | if (!gfs2_is_stuffed(ip)) |
593 | dblock = generic_block_bmap(mapping, lblock, gfs2_get_block); | 594 | dblock = generic_block_bmap(mapping, lblock, gfs2_get_block); |
594 | 595 | ||
595 | gfs2_glock_dq_uninit(&i_gh); | 596 | gfs2_glock_dq_uninit(&i_gh); |
596 | 597 | ||
597 | return dblock; | 598 | return dblock; |
598 | } | 599 | } |
599 | 600 | ||
600 | static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh) | 601 | static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh) |
601 | { | 602 | { |
602 | struct gfs2_bufdata *bd; | 603 | struct gfs2_bufdata *bd; |
603 | 604 | ||
604 | gfs2_log_lock(sdp); | 605 | gfs2_log_lock(sdp); |
605 | bd = bh->b_private; | 606 | bd = bh->b_private; |
606 | if (bd) { | 607 | if (bd) { |
607 | bd->bd_bh = NULL; | 608 | bd->bd_bh = NULL; |
608 | bh->b_private = NULL; | 609 | bh->b_private = NULL; |
609 | } | 610 | } |
610 | gfs2_log_unlock(sdp); | 611 | gfs2_log_unlock(sdp); |
611 | 612 | ||
612 | lock_buffer(bh); | 613 | lock_buffer(bh); |
613 | clear_buffer_dirty(bh); | 614 | clear_buffer_dirty(bh); |
614 | bh->b_bdev = NULL; | 615 | bh->b_bdev = NULL; |
615 | clear_buffer_mapped(bh); | 616 | clear_buffer_mapped(bh); |
616 | clear_buffer_req(bh); | 617 | clear_buffer_req(bh); |
617 | clear_buffer_new(bh); | 618 | clear_buffer_new(bh); |
618 | clear_buffer_delay(bh); | 619 | clear_buffer_delay(bh); |
619 | unlock_buffer(bh); | 620 | unlock_buffer(bh); |
620 | } | 621 | } |
621 | 622 | ||
622 | static void gfs2_invalidatepage(struct page *page, unsigned long offset) | 623 | static void gfs2_invalidatepage(struct page *page, unsigned long offset) |
623 | { | 624 | { |
624 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); | 625 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); |
625 | struct buffer_head *head, *bh, *next; | 626 | struct buffer_head *head, *bh, *next; |
626 | unsigned int curr_off = 0; | 627 | unsigned int curr_off = 0; |
627 | 628 | ||
628 | BUG_ON(!PageLocked(page)); | 629 | BUG_ON(!PageLocked(page)); |
629 | if (!page_has_buffers(page)) | 630 | if (!page_has_buffers(page)) |
630 | return; | 631 | return; |
631 | 632 | ||
632 | bh = head = page_buffers(page); | 633 | bh = head = page_buffers(page); |
633 | do { | 634 | do { |
634 | unsigned int next_off = curr_off + bh->b_size; | 635 | unsigned int next_off = curr_off + bh->b_size; |
635 | next = bh->b_this_page; | 636 | next = bh->b_this_page; |
636 | 637 | ||
637 | if (offset <= curr_off) | 638 | if (offset <= curr_off) |
638 | discard_buffer(sdp, bh); | 639 | discard_buffer(sdp, bh); |
639 | 640 | ||
640 | curr_off = next_off; | 641 | curr_off = next_off; |
641 | bh = next; | 642 | bh = next; |
642 | } while (bh != head); | 643 | } while (bh != head); |
643 | 644 | ||
644 | if (!offset) | 645 | if (!offset) |
645 | try_to_release_page(page, 0); | 646 | try_to_release_page(page, 0); |
646 | 647 | ||
647 | return; | 648 | return; |
648 | } | 649 | } |
649 | 650 | ||
650 | /** | 651 | /** |
651 | * gfs2_ok_for_dio - check that dio is valid on this file | 652 | * gfs2_ok_for_dio - check that dio is valid on this file |
652 | * @ip: The inode | 653 | * @ip: The inode |
653 | * @rw: READ or WRITE | 654 | * @rw: READ or WRITE |
654 | * @offset: The offset at which we are reading or writing | 655 | * @offset: The offset at which we are reading or writing |
655 | * | 656 | * |
656 | * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) | 657 | * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) |
657 | * 1 (to accept the i/o request) | 658 | * 1 (to accept the i/o request) |
658 | */ | 659 | */ |
659 | static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) | 660 | static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) |
660 | { | 661 | { |
661 | /* | 662 | /* |
662 | * Should we return an error here? I can't see that O_DIRECT for | 663 | * Should we return an error here? I can't see that O_DIRECT for |
663 | * a journaled file makes any sense. For now we'll silently fall | 664 | * a journaled file makes any sense. For now we'll silently fall |
664 | * back to buffered I/O, likewise we do the same for stuffed | 665 | * back to buffered I/O, likewise we do the same for stuffed |
665 | * files since they are (a) small and (b) unaligned. | 666 | * files since they are (a) small and (b) unaligned. |
666 | */ | 667 | */ |
667 | if (gfs2_is_jdata(ip)) | 668 | if (gfs2_is_jdata(ip)) |
668 | return 0; | 669 | return 0; |
669 | 670 | ||
670 | if (gfs2_is_stuffed(ip)) | 671 | if (gfs2_is_stuffed(ip)) |
671 | return 0; | 672 | return 0; |
672 | 673 | ||
673 | if (offset > i_size_read(&ip->i_inode)) | 674 | if (offset > i_size_read(&ip->i_inode)) |
674 | return 0; | 675 | return 0; |
675 | return 1; | 676 | return 1; |
676 | } | 677 | } |
677 | 678 | ||
678 | 679 | ||
679 | 680 | ||
680 | static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | 681 | static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, |
681 | const struct iovec *iov, loff_t offset, | 682 | const struct iovec *iov, loff_t offset, |
682 | unsigned long nr_segs) | 683 | unsigned long nr_segs) |
683 | { | 684 | { |
684 | struct file *file = iocb->ki_filp; | 685 | struct file *file = iocb->ki_filp; |
685 | struct inode *inode = file->f_mapping->host; | 686 | struct inode *inode = file->f_mapping->host; |
686 | struct gfs2_inode *ip = GFS2_I(inode); | 687 | struct gfs2_inode *ip = GFS2_I(inode); |
687 | struct gfs2_holder gh; | 688 | struct gfs2_holder gh; |
688 | int rv; | 689 | int rv; |
689 | 690 | ||
690 | /* | 691 | /* |
691 | * Deferred lock, even if its a write, since we do no allocation | 692 | * Deferred lock, even if its a write, since we do no allocation |
692 | * on this path. All we need change is atime, and this lock mode | 693 | * on this path. All we need change is atime, and this lock mode |
693 | * ensures that other nodes have flushed their buffered read caches | 694 | * ensures that other nodes have flushed their buffered read caches |
694 | * (i.e. their page cache entries for this inode). We do not, | 695 | * (i.e. their page cache entries for this inode). We do not, |
695 | * unfortunately have the option of only flushing a range like | 696 | * unfortunately have the option of only flushing a range like |
696 | * the VFS does. | 697 | * the VFS does. |
697 | */ | 698 | */ |
698 | gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh); | 699 | gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh); |
699 | rv = gfs2_glock_nq_atime(&gh); | 700 | rv = gfs2_glock_nq_atime(&gh); |
700 | if (rv) | 701 | if (rv) |
701 | return rv; | 702 | return rv; |
702 | rv = gfs2_ok_for_dio(ip, rw, offset); | 703 | rv = gfs2_ok_for_dio(ip, rw, offset); |
703 | if (rv != 1) | 704 | if (rv != 1) |
704 | goto out; /* dio not valid, fall back to buffered i/o */ | 705 | goto out; /* dio not valid, fall back to buffered i/o */ |
705 | 706 | ||
706 | rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, | 707 | rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, |
707 | iov, offset, nr_segs, | 708 | iov, offset, nr_segs, |
708 | gfs2_get_block_direct, NULL); | 709 | gfs2_get_block_direct, NULL); |
709 | out: | 710 | out: |
710 | gfs2_glock_dq_m(1, &gh); | 711 | gfs2_glock_dq_m(1, &gh); |
711 | gfs2_holder_uninit(&gh); | 712 | gfs2_holder_uninit(&gh); |
712 | return rv; | 713 | return rv; |
713 | } | 714 | } |
714 | 715 | ||
715 | /** | 716 | /** |
716 | * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out. | 717 | * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out. |
717 | * @bh: the buffer we're stuck on | 718 | * @bh: the buffer we're stuck on |
718 | * | 719 | * |
719 | */ | 720 | */ |
720 | 721 | ||
721 | static void stuck_releasepage(struct buffer_head *bh) | 722 | static void stuck_releasepage(struct buffer_head *bh) |
722 | { | 723 | { |
723 | struct inode *inode = bh->b_page->mapping->host; | 724 | struct inode *inode = bh->b_page->mapping->host; |
724 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; | 725 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; |
725 | struct gfs2_bufdata *bd = bh->b_private; | 726 | struct gfs2_bufdata *bd = bh->b_private; |
726 | struct gfs2_glock *gl; | 727 | struct gfs2_glock *gl; |
727 | static unsigned limit = 0; | 728 | static unsigned limit = 0; |
728 | 729 | ||
729 | if (limit > 3) | 730 | if (limit > 3) |
730 | return; | 731 | return; |
731 | limit++; | 732 | limit++; |
732 | 733 | ||
733 | fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode); | 734 | fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode); |
734 | fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n", | 735 | fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n", |
735 | (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count)); | 736 | (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count)); |
736 | fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh)); | 737 | fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh)); |
737 | fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL"); | 738 | fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL"); |
738 | 739 | ||
739 | if (!bd) | 740 | if (!bd) |
740 | return; | 741 | return; |
741 | 742 | ||
742 | gl = bd->bd_gl; | 743 | gl = bd->bd_gl; |
743 | 744 | ||
744 | fs_warn(sdp, "gl = (%u, %llu)\n", | 745 | fs_warn(sdp, "gl = (%u, %llu)\n", |
745 | gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number); | 746 | gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number); |
746 | 747 | ||
747 | fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n", | 748 | fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n", |
748 | (list_empty(&bd->bd_list_tr)) ? "no" : "yes", | 749 | (list_empty(&bd->bd_list_tr)) ? "no" : "yes", |
749 | (list_empty(&bd->bd_le.le_list)) ? "no" : "yes"); | 750 | (list_empty(&bd->bd_le.le_list)) ? "no" : "yes"); |
750 | 751 | ||
751 | if (gl->gl_ops == &gfs2_inode_glops) { | 752 | if (gl->gl_ops == &gfs2_inode_glops) { |
752 | struct gfs2_inode *ip = gl->gl_object; | 753 | struct gfs2_inode *ip = gl->gl_object; |
753 | unsigned int x; | 754 | unsigned int x; |
754 | 755 | ||
755 | if (!ip) | 756 | if (!ip) |
756 | return; | 757 | return; |
757 | 758 | ||
758 | fs_warn(sdp, "ip = %llu %llu\n", | 759 | fs_warn(sdp, "ip = %llu %llu\n", |
759 | (unsigned long long)ip->i_num.no_formal_ino, | 760 | (unsigned long long)ip->i_num.no_formal_ino, |
760 | (unsigned long long)ip->i_num.no_addr); | 761 | (unsigned long long)ip->i_num.no_addr); |
761 | 762 | ||
762 | for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) | 763 | for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) |
763 | fs_warn(sdp, "ip->i_cache[%u] = %s\n", | 764 | fs_warn(sdp, "ip->i_cache[%u] = %s\n", |
764 | x, (ip->i_cache[x]) ? "!NULL" : "NULL"); | 765 | x, (ip->i_cache[x]) ? "!NULL" : "NULL"); |
765 | } | 766 | } |
766 | } | 767 | } |
767 | 768 | ||
768 | /** | 769 | /** |
769 | * gfs2_releasepage - free the metadata associated with a page | 770 | * gfs2_releasepage - free the metadata associated with a page |
770 | * @page: the page that's being released | 771 | * @page: the page that's being released |
771 | * @gfp_mask: passed from Linux VFS, ignored by us | 772 | * @gfp_mask: passed from Linux VFS, ignored by us |
772 | * | 773 | * |
773 | * Call try_to_free_buffers() if the buffers in this page can be | 774 | * Call try_to_free_buffers() if the buffers in this page can be |
774 | * released. | 775 | * released. |
775 | * | 776 | * |
776 | * Returns: 0 | 777 | * Returns: 0 |
777 | */ | 778 | */ |
778 | 779 | ||
779 | int gfs2_releasepage(struct page *page, gfp_t gfp_mask) | 780 | int gfs2_releasepage(struct page *page, gfp_t gfp_mask) |
780 | { | 781 | { |
781 | struct inode *aspace = page->mapping->host; | 782 | struct inode *aspace = page->mapping->host; |
782 | struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info; | 783 | struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info; |
783 | struct buffer_head *bh, *head; | 784 | struct buffer_head *bh, *head; |
784 | struct gfs2_bufdata *bd; | 785 | struct gfs2_bufdata *bd; |
785 | unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ; | 786 | unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ; |
786 | 787 | ||
787 | if (!page_has_buffers(page)) | 788 | if (!page_has_buffers(page)) |
788 | goto out; | 789 | goto out; |
789 | 790 | ||
790 | head = bh = page_buffers(page); | 791 | head = bh = page_buffers(page); |
791 | do { | 792 | do { |
792 | while (atomic_read(&bh->b_count)) { | 793 | while (atomic_read(&bh->b_count)) { |
793 | if (!atomic_read(&aspace->i_writecount)) | 794 | if (!atomic_read(&aspace->i_writecount)) |
794 | return 0; | 795 | return 0; |
795 | 796 | ||
796 | if (!(gfp_mask & __GFP_WAIT)) | 797 | if (!(gfp_mask & __GFP_WAIT)) |
797 | return 0; | 798 | return 0; |
798 | 799 | ||
799 | if (time_after_eq(jiffies, t)) { | 800 | if (time_after_eq(jiffies, t)) { |
800 | stuck_releasepage(bh); | 801 | stuck_releasepage(bh); |
801 | /* should we withdraw here? */ | 802 | /* should we withdraw here? */ |
802 | return 0; | 803 | return 0; |
803 | } | 804 | } |
804 | 805 | ||
805 | yield(); | 806 | yield(); |
806 | } | 807 | } |
807 | 808 | ||
808 | gfs2_assert_warn(sdp, !buffer_pinned(bh)); | 809 | gfs2_assert_warn(sdp, !buffer_pinned(bh)); |
809 | gfs2_assert_warn(sdp, !buffer_dirty(bh)); | 810 | gfs2_assert_warn(sdp, !buffer_dirty(bh)); |
810 | 811 | ||
811 | gfs2_log_lock(sdp); | 812 | gfs2_log_lock(sdp); |
812 | bd = bh->b_private; | 813 | bd = bh->b_private; |
813 | if (bd) { | 814 | if (bd) { |
814 | gfs2_assert_warn(sdp, bd->bd_bh == bh); | 815 | gfs2_assert_warn(sdp, bd->bd_bh == bh); |
815 | gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr)); | 816 | gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr)); |
816 | gfs2_assert_warn(sdp, !bd->bd_ail); | 817 | gfs2_assert_warn(sdp, !bd->bd_ail); |
817 | bd->bd_bh = NULL; | 818 | bd->bd_bh = NULL; |
818 | if (!list_empty(&bd->bd_le.le_list)) | 819 | if (!list_empty(&bd->bd_le.le_list)) |
819 | bd = NULL; | 820 | bd = NULL; |
820 | bh->b_private = NULL; | 821 | bh->b_private = NULL; |
821 | } | 822 | } |
822 | gfs2_log_unlock(sdp); | 823 | gfs2_log_unlock(sdp); |
823 | if (bd) | 824 | if (bd) |
824 | kmem_cache_free(gfs2_bufdata_cachep, bd); | 825 | kmem_cache_free(gfs2_bufdata_cachep, bd); |
825 | 826 | ||
826 | bh = bh->b_this_page; | 827 | bh = bh->b_this_page; |
827 | } while (bh != head); | 828 | } while (bh != head); |
828 | 829 | ||
829 | out: | 830 | out: |
830 | return try_to_free_buffers(page); | 831 | return try_to_free_buffers(page); |
831 | } | 832 | } |
832 | 833 | ||
833 | const struct address_space_operations gfs2_file_aops = { | 834 | const struct address_space_operations gfs2_file_aops = { |
834 | .writepage = gfs2_writepage, | 835 | .writepage = gfs2_writepage, |
835 | .writepages = gfs2_writepages, | 836 | .writepages = gfs2_writepages, |
836 | .readpage = gfs2_readpage, | 837 | .readpage = gfs2_readpage, |
837 | .readpages = gfs2_readpages, | 838 | .readpages = gfs2_readpages, |
838 | .sync_page = block_sync_page, | 839 | .sync_page = block_sync_page, |
839 | .prepare_write = gfs2_prepare_write, | 840 | .prepare_write = gfs2_prepare_write, |
840 | .commit_write = gfs2_commit_write, | 841 | .commit_write = gfs2_commit_write, |
841 | .bmap = gfs2_bmap, | 842 | .bmap = gfs2_bmap, |
842 | .invalidatepage = gfs2_invalidatepage, | 843 | .invalidatepage = gfs2_invalidatepage, |
843 | .releasepage = gfs2_releasepage, | 844 | .releasepage = gfs2_releasepage, |
844 | .direct_IO = gfs2_direct_IO, | 845 | .direct_IO = gfs2_direct_IO, |
845 | }; | 846 | }; |
846 | 847 | ||
847 | 848 |
fs/gfs2/ops_address.h
1 | /* | 1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | 5 | * This copyrighted material is made available to anyone wishing to use, |
6 | * modify, copy, or redistribute it subject to the terms and conditions | 6 | * modify, copy, or redistribute it subject to the terms and conditions |
7 | * of the GNU General Public License version 2. | 7 | * of the GNU General Public License version 2. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef __OPS_ADDRESS_DOT_H__ | 10 | #ifndef __OPS_ADDRESS_DOT_H__ |
11 | #define __OPS_ADDRESS_DOT_H__ | 11 | #define __OPS_ADDRESS_DOT_H__ |
12 | 12 | ||
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/buffer_head.h> | 14 | #include <linux/buffer_head.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | 16 | ||
17 | extern const struct address_space_operations gfs2_file_aops; | 17 | extern const struct address_space_operations gfs2_file_aops; |
18 | extern int gfs2_get_block(struct inode *inode, sector_t lblock, | 18 | extern int gfs2_get_block(struct inode *inode, sector_t lblock, |
19 | struct buffer_head *bh_result, int create); | 19 | struct buffer_head *bh_result, int create); |
20 | extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask); | 20 | extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask); |
21 | extern u64 gfs2_ri_total(struct gfs2_sbd *sdp); | ||
22 | extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, | ||
23 | s64 dinodes); | ||
24 | 21 | ||
25 | #endif /* __OPS_ADDRESS_DOT_H__ */ | 22 | #endif /* __OPS_ADDRESS_DOT_H__ */ |
26 | 23 |
fs/gfs2/rgrp.c
1 | /* | 1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | 5 | * This copyrighted material is made available to anyone wishing to use, |
6 | * modify, copy, or redistribute it subject to the terms and conditions | 6 | * modify, copy, or redistribute it subject to the terms and conditions |
7 | * of the GNU General Public License version 2. | 7 | * of the GNU General Public License version 2. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/completion.h> | 12 | #include <linux/completion.h> |
13 | #include <linux/buffer_head.h> | 13 | #include <linux/buffer_head.h> |
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | #include <linux/gfs2_ondisk.h> | 15 | #include <linux/gfs2_ondisk.h> |
16 | #include <linux/lm_interface.h> | 16 | #include <linux/lm_interface.h> |
17 | 17 | ||
18 | #include "gfs2.h" | 18 | #include "gfs2.h" |
19 | #include "incore.h" | 19 | #include "incore.h" |
20 | #include "glock.h" | 20 | #include "glock.h" |
21 | #include "glops.h" | 21 | #include "glops.h" |
22 | #include "lops.h" | 22 | #include "lops.h" |
23 | #include "meta_io.h" | 23 | #include "meta_io.h" |
24 | #include "quota.h" | 24 | #include "quota.h" |
25 | #include "rgrp.h" | 25 | #include "rgrp.h" |
26 | #include "super.h" | 26 | #include "super.h" |
27 | #include "trans.h" | 27 | #include "trans.h" |
28 | #include "ops_file.h" | 28 | #include "ops_file.h" |
29 | #include "util.h" | 29 | #include "util.h" |
30 | #include "log.h" | 30 | #include "log.h" |
31 | 31 | ||
32 | #define BFITNOENT ((u32)~0) | 32 | #define BFITNOENT ((u32)~0) |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * These routines are used by the resource group routines (rgrp.c) | 35 | * These routines are used by the resource group routines (rgrp.c) |
36 | * to keep track of block allocation. Each block is represented by two | 36 | * to keep track of block allocation. Each block is represented by two |
37 | * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. | 37 | * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. |
38 | * | 38 | * |
39 | * 0 = Free | 39 | * 0 = Free |
40 | * 1 = Used (not metadata) | 40 | * 1 = Used (not metadata) |
41 | * 2 = Unlinked (still in use) inode | 41 | * 2 = Unlinked (still in use) inode |
42 | * 3 = Used (metadata) | 42 | * 3 = Used (metadata) |
43 | */ | 43 | */ |
44 | 44 | ||
45 | static const char valid_change[16] = { | 45 | static const char valid_change[16] = { |
46 | /* current */ | 46 | /* current */ |
47 | /* n */ 0, 1, 1, 1, | 47 | /* n */ 0, 1, 1, 1, |
48 | /* e */ 1, 0, 0, 0, | 48 | /* e */ 1, 0, 0, 0, |
49 | /* w */ 0, 0, 0, 1, | 49 | /* w */ 0, 0, 0, 1, |
50 | 1, 0, 0, 0 | 50 | 1, 0, 0, 0 |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * gfs2_setbit - Set a bit in the bitmaps | 54 | * gfs2_setbit - Set a bit in the bitmaps |
55 | * @buffer: the buffer that holds the bitmaps | 55 | * @buffer: the buffer that holds the bitmaps |
56 | * @buflen: the length (in bytes) of the buffer | 56 | * @buflen: the length (in bytes) of the buffer |
57 | * @block: the block to set | 57 | * @block: the block to set |
58 | * @new_state: the new state of the block | 58 | * @new_state: the new state of the block |
59 | * | 59 | * |
60 | */ | 60 | */ |
61 | 61 | ||
62 | static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, | 62 | static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, |
63 | unsigned int buflen, u32 block, | 63 | unsigned int buflen, u32 block, |
64 | unsigned char new_state) | 64 | unsigned char new_state) |
65 | { | 65 | { |
66 | unsigned char *byte, *end, cur_state; | 66 | unsigned char *byte, *end, cur_state; |
67 | unsigned int bit; | 67 | unsigned int bit; |
68 | 68 | ||
69 | byte = buffer + (block / GFS2_NBBY); | 69 | byte = buffer + (block / GFS2_NBBY); |
70 | bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; | 70 | bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; |
71 | end = buffer + buflen; | 71 | end = buffer + buflen; |
72 | 72 | ||
73 | gfs2_assert(rgd->rd_sbd, byte < end); | 73 | gfs2_assert(rgd->rd_sbd, byte < end); |
74 | 74 | ||
75 | cur_state = (*byte >> bit) & GFS2_BIT_MASK; | 75 | cur_state = (*byte >> bit) & GFS2_BIT_MASK; |
76 | 76 | ||
77 | if (valid_change[new_state * 4 + cur_state]) { | 77 | if (valid_change[new_state * 4 + cur_state]) { |
78 | *byte ^= cur_state << bit; | 78 | *byte ^= cur_state << bit; |
79 | *byte |= new_state << bit; | 79 | *byte |= new_state << bit; |
80 | } else | 80 | } else |
81 | gfs2_consist_rgrpd(rgd); | 81 | gfs2_consist_rgrpd(rgd); |
82 | } | 82 | } |
83 | 83 | ||
84 | /** | 84 | /** |
85 | * gfs2_testbit - test a bit in the bitmaps | 85 | * gfs2_testbit - test a bit in the bitmaps |
86 | * @buffer: the buffer that holds the bitmaps | 86 | * @buffer: the buffer that holds the bitmaps |
87 | * @buflen: the length (in bytes) of the buffer | 87 | * @buflen: the length (in bytes) of the buffer |
88 | * @block: the block to read | 88 | * @block: the block to read |
89 | * | 89 | * |
90 | */ | 90 | */ |
91 | 91 | ||
92 | static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, | 92 | static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, |
93 | unsigned int buflen, u32 block) | 93 | unsigned int buflen, u32 block) |
94 | { | 94 | { |
95 | unsigned char *byte, *end, cur_state; | 95 | unsigned char *byte, *end, cur_state; |
96 | unsigned int bit; | 96 | unsigned int bit; |
97 | 97 | ||
98 | byte = buffer + (block / GFS2_NBBY); | 98 | byte = buffer + (block / GFS2_NBBY); |
99 | bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; | 99 | bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; |
100 | end = buffer + buflen; | 100 | end = buffer + buflen; |
101 | 101 | ||
102 | gfs2_assert(rgd->rd_sbd, byte < end); | 102 | gfs2_assert(rgd->rd_sbd, byte < end); |
103 | 103 | ||
104 | cur_state = (*byte >> bit) & GFS2_BIT_MASK; | 104 | cur_state = (*byte >> bit) & GFS2_BIT_MASK; |
105 | 105 | ||
106 | return cur_state; | 106 | return cur_state; |
107 | } | 107 | } |
108 | 108 | ||
109 | /** | 109 | /** |
110 | * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing | 110 | * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing |
111 | * a block in a given allocation state. | 111 | * a block in a given allocation state. |
112 | * @buffer: the buffer that holds the bitmaps | 112 | * @buffer: the buffer that holds the bitmaps |
113 | * @buflen: the length (in bytes) of the buffer | 113 | * @buflen: the length (in bytes) of the buffer |
114 | * @goal: start search at this block's bit-pair (within @buffer) | 114 | * @goal: start search at this block's bit-pair (within @buffer) |
115 | * @old_state: GFS2_BLKST_XXX the state of the block we're looking for; | 115 | * @old_state: GFS2_BLKST_XXX the state of the block we're looking for; |
116 | * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0) | 116 | * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0) |
117 | * | 117 | * |
118 | * Scope of @goal and returned block number is only within this bitmap buffer, | 118 | * Scope of @goal and returned block number is only within this bitmap buffer, |
119 | * not entire rgrp or filesystem. @buffer will be offset from the actual | 119 | * not entire rgrp or filesystem. @buffer will be offset from the actual |
120 | * beginning of a bitmap block buffer, skipping any header structures. | 120 | * beginning of a bitmap block buffer, skipping any header structures. |
121 | * | 121 | * |
122 | * Return: the block number (bitmap buffer scope) that was found | 122 | * Return: the block number (bitmap buffer scope) that was found |
123 | */ | 123 | */ |
124 | 124 | ||
125 | static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer, | 125 | static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer, |
126 | unsigned int buflen, u32 goal, | 126 | unsigned int buflen, u32 goal, |
127 | unsigned char old_state) | 127 | unsigned char old_state) |
128 | { | 128 | { |
129 | unsigned char *byte, *end, alloc; | 129 | unsigned char *byte, *end, alloc; |
130 | u32 blk = goal; | 130 | u32 blk = goal; |
131 | unsigned int bit; | 131 | unsigned int bit; |
132 | 132 | ||
133 | byte = buffer + (goal / GFS2_NBBY); | 133 | byte = buffer + (goal / GFS2_NBBY); |
134 | bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; | 134 | bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; |
135 | end = buffer + buflen; | 135 | end = buffer + buflen; |
136 | alloc = (old_state & 1) ? 0 : 0x55; | 136 | alloc = (old_state & 1) ? 0 : 0x55; |
137 | 137 | ||
138 | while (byte < end) { | 138 | while (byte < end) { |
139 | if ((*byte & 0x55) == alloc) { | 139 | if ((*byte & 0x55) == alloc) { |
140 | blk += (8 - bit) >> 1; | 140 | blk += (8 - bit) >> 1; |
141 | 141 | ||
142 | bit = 0; | 142 | bit = 0; |
143 | byte++; | 143 | byte++; |
144 | 144 | ||
145 | continue; | 145 | continue; |
146 | } | 146 | } |
147 | 147 | ||
148 | if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) | 148 | if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) |
149 | return blk; | 149 | return blk; |
150 | 150 | ||
151 | bit += GFS2_BIT_SIZE; | 151 | bit += GFS2_BIT_SIZE; |
152 | if (bit >= 8) { | 152 | if (bit >= 8) { |
153 | bit = 0; | 153 | bit = 0; |
154 | byte++; | 154 | byte++; |
155 | } | 155 | } |
156 | 156 | ||
157 | blk++; | 157 | blk++; |
158 | } | 158 | } |
159 | 159 | ||
160 | return BFITNOENT; | 160 | return BFITNOENT; |
161 | } | 161 | } |
162 | 162 | ||
163 | /** | 163 | /** |
164 | * gfs2_bitcount - count the number of bits in a certain state | 164 | * gfs2_bitcount - count the number of bits in a certain state |
165 | * @buffer: the buffer that holds the bitmaps | 165 | * @buffer: the buffer that holds the bitmaps |
166 | * @buflen: the length (in bytes) of the buffer | 166 | * @buflen: the length (in bytes) of the buffer |
167 | * @state: the state of the block we're looking for | 167 | * @state: the state of the block we're looking for |
168 | * | 168 | * |
169 | * Returns: The number of bits | 169 | * Returns: The number of bits |
170 | */ | 170 | */ |
171 | 171 | ||
172 | static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer, | 172 | static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer, |
173 | unsigned int buflen, unsigned char state) | 173 | unsigned int buflen, unsigned char state) |
174 | { | 174 | { |
175 | unsigned char *byte = buffer; | 175 | unsigned char *byte = buffer; |
176 | unsigned char *end = buffer + buflen; | 176 | unsigned char *end = buffer + buflen; |
177 | unsigned char state1 = state << 2; | 177 | unsigned char state1 = state << 2; |
178 | unsigned char state2 = state << 4; | 178 | unsigned char state2 = state << 4; |
179 | unsigned char state3 = state << 6; | 179 | unsigned char state3 = state << 6; |
180 | u32 count = 0; | 180 | u32 count = 0; |
181 | 181 | ||
182 | for (; byte < end; byte++) { | 182 | for (; byte < end; byte++) { |
183 | if (((*byte) & 0x03) == state) | 183 | if (((*byte) & 0x03) == state) |
184 | count++; | 184 | count++; |
185 | if (((*byte) & 0x0C) == state1) | 185 | if (((*byte) & 0x0C) == state1) |
186 | count++; | 186 | count++; |
187 | if (((*byte) & 0x30) == state2) | 187 | if (((*byte) & 0x30) == state2) |
188 | count++; | 188 | count++; |
189 | if (((*byte) & 0xC0) == state3) | 189 | if (((*byte) & 0xC0) == state3) |
190 | count++; | 190 | count++; |
191 | } | 191 | } |
192 | 192 | ||
193 | return count; | 193 | return count; |
194 | } | 194 | } |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * gfs2_rgrp_verify - Verify that a resource group is consistent | 197 | * gfs2_rgrp_verify - Verify that a resource group is consistent |
198 | * @sdp: the filesystem | 198 | * @sdp: the filesystem |
199 | * @rgd: the rgrp | 199 | * @rgd: the rgrp |
200 | * | 200 | * |
201 | */ | 201 | */ |
202 | 202 | ||
203 | void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) | 203 | void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) |
204 | { | 204 | { |
205 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 205 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
206 | struct gfs2_bitmap *bi = NULL; | 206 | struct gfs2_bitmap *bi = NULL; |
207 | u32 length = rgd->rd_ri.ri_length; | 207 | u32 length = rgd->rd_ri.ri_length; |
208 | u32 count[4], tmp; | 208 | u32 count[4], tmp; |
209 | int buf, x; | 209 | int buf, x; |
210 | 210 | ||
211 | memset(count, 0, 4 * sizeof(u32)); | 211 | memset(count, 0, 4 * sizeof(u32)); |
212 | 212 | ||
213 | /* Count # blocks in each of 4 possible allocation states */ | 213 | /* Count # blocks in each of 4 possible allocation states */ |
214 | for (buf = 0; buf < length; buf++) { | 214 | for (buf = 0; buf < length; buf++) { |
215 | bi = rgd->rd_bits + buf; | 215 | bi = rgd->rd_bits + buf; |
216 | for (x = 0; x < 4; x++) | 216 | for (x = 0; x < 4; x++) |
217 | count[x] += gfs2_bitcount(rgd, | 217 | count[x] += gfs2_bitcount(rgd, |
218 | bi->bi_bh->b_data + | 218 | bi->bi_bh->b_data + |
219 | bi->bi_offset, | 219 | bi->bi_offset, |
220 | bi->bi_len, x); | 220 | bi->bi_len, x); |
221 | } | 221 | } |
222 | 222 | ||
223 | if (count[0] != rgd->rd_rg.rg_free) { | 223 | if (count[0] != rgd->rd_rg.rg_free) { |
224 | if (gfs2_consist_rgrpd(rgd)) | 224 | if (gfs2_consist_rgrpd(rgd)) |
225 | fs_err(sdp, "free data mismatch: %u != %u\n", | 225 | fs_err(sdp, "free data mismatch: %u != %u\n", |
226 | count[0], rgd->rd_rg.rg_free); | 226 | count[0], rgd->rd_rg.rg_free); |
227 | return; | 227 | return; |
228 | } | 228 | } |
229 | 229 | ||
230 | tmp = rgd->rd_ri.ri_data - | 230 | tmp = rgd->rd_ri.ri_data - |
231 | rgd->rd_rg.rg_free - | 231 | rgd->rd_rg.rg_free - |
232 | rgd->rd_rg.rg_dinodes; | 232 | rgd->rd_rg.rg_dinodes; |
233 | if (count[1] + count[2] != tmp) { | 233 | if (count[1] + count[2] != tmp) { |
234 | if (gfs2_consist_rgrpd(rgd)) | 234 | if (gfs2_consist_rgrpd(rgd)) |
235 | fs_err(sdp, "used data mismatch: %u != %u\n", | 235 | fs_err(sdp, "used data mismatch: %u != %u\n", |
236 | count[1], tmp); | 236 | count[1], tmp); |
237 | return; | 237 | return; |
238 | } | 238 | } |
239 | 239 | ||
240 | if (count[3] != rgd->rd_rg.rg_dinodes) { | 240 | if (count[3] != rgd->rd_rg.rg_dinodes) { |
241 | if (gfs2_consist_rgrpd(rgd)) | 241 | if (gfs2_consist_rgrpd(rgd)) |
242 | fs_err(sdp, "used metadata mismatch: %u != %u\n", | 242 | fs_err(sdp, "used metadata mismatch: %u != %u\n", |
243 | count[3], rgd->rd_rg.rg_dinodes); | 243 | count[3], rgd->rd_rg.rg_dinodes); |
244 | return; | 244 | return; |
245 | } | 245 | } |
246 | 246 | ||
247 | if (count[2] > count[3]) { | 247 | if (count[2] > count[3]) { |
248 | if (gfs2_consist_rgrpd(rgd)) | 248 | if (gfs2_consist_rgrpd(rgd)) |
249 | fs_err(sdp, "unlinked inodes > inodes: %u\n", | 249 | fs_err(sdp, "unlinked inodes > inodes: %u\n", |
250 | count[2]); | 250 | count[2]); |
251 | return; | 251 | return; |
252 | } | 252 | } |
253 | 253 | ||
254 | } | 254 | } |
255 | 255 | ||
256 | static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block) | 256 | static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block) |
257 | { | 257 | { |
258 | u64 first = ri->ri_data0; | 258 | u64 first = ri->ri_data0; |
259 | u64 last = first + ri->ri_data; | 259 | u64 last = first + ri->ri_data; |
260 | return first <= block && block < last; | 260 | return first <= block && block < last; |
261 | } | 261 | } |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * gfs2_blk2rgrpd - Find resource group for a given data/meta block number | 264 | * gfs2_blk2rgrpd - Find resource group for a given data/meta block number |
265 | * @sdp: The GFS2 superblock | 265 | * @sdp: The GFS2 superblock |
266 | * @n: The data block number | 266 | * @n: The data block number |
267 | * | 267 | * |
268 | * Returns: The resource group, or NULL if not found | 268 | * Returns: The resource group, or NULL if not found |
269 | */ | 269 | */ |
270 | 270 | ||
271 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) | 271 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) |
272 | { | 272 | { |
273 | struct gfs2_rgrpd *rgd; | 273 | struct gfs2_rgrpd *rgd; |
274 | 274 | ||
275 | spin_lock(&sdp->sd_rindex_spin); | 275 | spin_lock(&sdp->sd_rindex_spin); |
276 | 276 | ||
277 | list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) { | 277 | list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) { |
278 | if (rgrp_contains_block(&rgd->rd_ri, blk)) { | 278 | if (rgrp_contains_block(&rgd->rd_ri, blk)) { |
279 | list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); | 279 | list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); |
280 | spin_unlock(&sdp->sd_rindex_spin); | 280 | spin_unlock(&sdp->sd_rindex_spin); |
281 | return rgd; | 281 | return rgd; |
282 | } | 282 | } |
283 | } | 283 | } |
284 | 284 | ||
285 | spin_unlock(&sdp->sd_rindex_spin); | 285 | spin_unlock(&sdp->sd_rindex_spin); |
286 | 286 | ||
287 | return NULL; | 287 | return NULL; |
288 | } | 288 | } |
289 | 289 | ||
290 | /** | 290 | /** |
291 | * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem | 291 | * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem |
292 | * @sdp: The GFS2 superblock | 292 | * @sdp: The GFS2 superblock |
293 | * | 293 | * |
294 | * Returns: The first rgrp in the filesystem | 294 | * Returns: The first rgrp in the filesystem |
295 | */ | 295 | */ |
296 | 296 | ||
297 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) | 297 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) |
298 | { | 298 | { |
299 | gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list)); | 299 | gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list)); |
300 | return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list); | 300 | return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list); |
301 | } | 301 | } |
302 | 302 | ||
303 | /** | 303 | /** |
304 | * gfs2_rgrpd_get_next - get the next RG | 304 | * gfs2_rgrpd_get_next - get the next RG |
305 | * @rgd: A RG | 305 | * @rgd: A RG |
306 | * | 306 | * |
307 | * Returns: The next rgrp | 307 | * Returns: The next rgrp |
308 | */ | 308 | */ |
309 | 309 | ||
310 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) | 310 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) |
311 | { | 311 | { |
312 | if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list) | 312 | if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list) |
313 | return NULL; | 313 | return NULL; |
314 | return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list); | 314 | return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list); |
315 | } | 315 | } |
316 | 316 | ||
317 | static void clear_rgrpdi(struct gfs2_sbd *sdp) | 317 | static void clear_rgrpdi(struct gfs2_sbd *sdp) |
318 | { | 318 | { |
319 | struct list_head *head; | 319 | struct list_head *head; |
320 | struct gfs2_rgrpd *rgd; | 320 | struct gfs2_rgrpd *rgd; |
321 | struct gfs2_glock *gl; | 321 | struct gfs2_glock *gl; |
322 | 322 | ||
323 | spin_lock(&sdp->sd_rindex_spin); | 323 | spin_lock(&sdp->sd_rindex_spin); |
324 | sdp->sd_rindex_forward = NULL; | 324 | sdp->sd_rindex_forward = NULL; |
325 | head = &sdp->sd_rindex_recent_list; | 325 | head = &sdp->sd_rindex_recent_list; |
326 | while (!list_empty(head)) { | 326 | while (!list_empty(head)) { |
327 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); | 327 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); |
328 | list_del(&rgd->rd_recent); | 328 | list_del(&rgd->rd_recent); |
329 | } | 329 | } |
330 | spin_unlock(&sdp->sd_rindex_spin); | 330 | spin_unlock(&sdp->sd_rindex_spin); |
331 | 331 | ||
332 | head = &sdp->sd_rindex_list; | 332 | head = &sdp->sd_rindex_list; |
333 | while (!list_empty(head)) { | 333 | while (!list_empty(head)) { |
334 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list); | 334 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list); |
335 | gl = rgd->rd_gl; | 335 | gl = rgd->rd_gl; |
336 | 336 | ||
337 | list_del(&rgd->rd_list); | 337 | list_del(&rgd->rd_list); |
338 | list_del(&rgd->rd_list_mru); | 338 | list_del(&rgd->rd_list_mru); |
339 | 339 | ||
340 | if (gl) { | 340 | if (gl) { |
341 | gl->gl_object = NULL; | 341 | gl->gl_object = NULL; |
342 | gfs2_glock_put(gl); | 342 | gfs2_glock_put(gl); |
343 | } | 343 | } |
344 | 344 | ||
345 | kfree(rgd->rd_bits); | 345 | kfree(rgd->rd_bits); |
346 | kfree(rgd); | 346 | kfree(rgd); |
347 | } | 347 | } |
348 | } | 348 | } |
349 | 349 | ||
350 | void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) | 350 | void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) |
351 | { | 351 | { |
352 | mutex_lock(&sdp->sd_rindex_mutex); | 352 | mutex_lock(&sdp->sd_rindex_mutex); |
353 | clear_rgrpdi(sdp); | 353 | clear_rgrpdi(sdp); |
354 | mutex_unlock(&sdp->sd_rindex_mutex); | 354 | mutex_unlock(&sdp->sd_rindex_mutex); |
355 | } | 355 | } |
356 | 356 | ||
357 | /** | 357 | /** |
358 | * gfs2_compute_bitstructs - Compute the bitmap sizes | 358 | * gfs2_compute_bitstructs - Compute the bitmap sizes |
359 | * @rgd: The resource group descriptor | 359 | * @rgd: The resource group descriptor |
360 | * | 360 | * |
361 | * Calculates bitmap descriptors, one for each block that contains bitmap data | 361 | * Calculates bitmap descriptors, one for each block that contains bitmap data |
362 | * | 362 | * |
363 | * Returns: errno | 363 | * Returns: errno |
364 | */ | 364 | */ |
365 | 365 | ||
366 | static int compute_bitstructs(struct gfs2_rgrpd *rgd) | 366 | static int compute_bitstructs(struct gfs2_rgrpd *rgd) |
367 | { | 367 | { |
368 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 368 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
369 | struct gfs2_bitmap *bi; | 369 | struct gfs2_bitmap *bi; |
370 | u32 length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */ | 370 | u32 length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */ |
371 | u32 bytes_left, bytes; | 371 | u32 bytes_left, bytes; |
372 | int x; | 372 | int x; |
373 | 373 | ||
374 | if (!length) | 374 | if (!length) |
375 | return -EINVAL; | 375 | return -EINVAL; |
376 | 376 | ||
377 | rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); | 377 | rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); |
378 | if (!rgd->rd_bits) | 378 | if (!rgd->rd_bits) |
379 | return -ENOMEM; | 379 | return -ENOMEM; |
380 | 380 | ||
381 | bytes_left = rgd->rd_ri.ri_bitbytes; | 381 | bytes_left = rgd->rd_ri.ri_bitbytes; |
382 | 382 | ||
383 | for (x = 0; x < length; x++) { | 383 | for (x = 0; x < length; x++) { |
384 | bi = rgd->rd_bits + x; | 384 | bi = rgd->rd_bits + x; |
385 | 385 | ||
386 | /* small rgrp; bitmap stored completely in header block */ | 386 | /* small rgrp; bitmap stored completely in header block */ |
387 | if (length == 1) { | 387 | if (length == 1) { |
388 | bytes = bytes_left; | 388 | bytes = bytes_left; |
389 | bi->bi_offset = sizeof(struct gfs2_rgrp); | 389 | bi->bi_offset = sizeof(struct gfs2_rgrp); |
390 | bi->bi_start = 0; | 390 | bi->bi_start = 0; |
391 | bi->bi_len = bytes; | 391 | bi->bi_len = bytes; |
392 | /* header block */ | 392 | /* header block */ |
393 | } else if (x == 0) { | 393 | } else if (x == 0) { |
394 | bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); | 394 | bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); |
395 | bi->bi_offset = sizeof(struct gfs2_rgrp); | 395 | bi->bi_offset = sizeof(struct gfs2_rgrp); |
396 | bi->bi_start = 0; | 396 | bi->bi_start = 0; |
397 | bi->bi_len = bytes; | 397 | bi->bi_len = bytes; |
398 | /* last block */ | 398 | /* last block */ |
399 | } else if (x + 1 == length) { | 399 | } else if (x + 1 == length) { |
400 | bytes = bytes_left; | 400 | bytes = bytes_left; |
401 | bi->bi_offset = sizeof(struct gfs2_meta_header); | 401 | bi->bi_offset = sizeof(struct gfs2_meta_header); |
402 | bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left; | 402 | bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left; |
403 | bi->bi_len = bytes; | 403 | bi->bi_len = bytes; |
404 | /* other blocks */ | 404 | /* other blocks */ |
405 | } else { | 405 | } else { |
406 | bytes = sdp->sd_sb.sb_bsize - | 406 | bytes = sdp->sd_sb.sb_bsize - |
407 | sizeof(struct gfs2_meta_header); | 407 | sizeof(struct gfs2_meta_header); |
408 | bi->bi_offset = sizeof(struct gfs2_meta_header); | 408 | bi->bi_offset = sizeof(struct gfs2_meta_header); |
409 | bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left; | 409 | bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left; |
410 | bi->bi_len = bytes; | 410 | bi->bi_len = bytes; |
411 | } | 411 | } |
412 | 412 | ||
413 | bytes_left -= bytes; | 413 | bytes_left -= bytes; |
414 | } | 414 | } |
415 | 415 | ||
416 | if (bytes_left) { | 416 | if (bytes_left) { |
417 | gfs2_consist_rgrpd(rgd); | 417 | gfs2_consist_rgrpd(rgd); |
418 | return -EIO; | 418 | return -EIO; |
419 | } | 419 | } |
420 | bi = rgd->rd_bits + (length - 1); | 420 | bi = rgd->rd_bits + (length - 1); |
421 | if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) { | 421 | if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) { |
422 | if (gfs2_consist_rgrpd(rgd)) { | 422 | if (gfs2_consist_rgrpd(rgd)) { |
423 | gfs2_rindex_print(&rgd->rd_ri); | 423 | gfs2_rindex_print(&rgd->rd_ri); |
424 | fs_err(sdp, "start=%u len=%u offset=%u\n", | 424 | fs_err(sdp, "start=%u len=%u offset=%u\n", |
425 | bi->bi_start, bi->bi_len, bi->bi_offset); | 425 | bi->bi_start, bi->bi_len, bi->bi_offset); |
426 | } | 426 | } |
427 | return -EIO; | 427 | return -EIO; |
428 | } | 428 | } |
429 | 429 | ||
430 | return 0; | 430 | return 0; |
431 | } | 431 | } |
432 | 432 | ||
433 | /** | 433 | /** |
434 | * gfs2_ri_total - Total up the file system space, according to the rindex. | 434 | * gfs2_ri_total - Total up the file system space, according to the rindex. |
435 | * | 435 | * |
436 | */ | 436 | */ |
437 | u64 gfs2_ri_total(struct gfs2_sbd *sdp) | 437 | u64 gfs2_ri_total(struct gfs2_sbd *sdp) |
438 | { | 438 | { |
439 | u64 total_data = 0; | 439 | u64 total_data = 0; |
440 | struct inode *inode = sdp->sd_rindex; | 440 | struct inode *inode = sdp->sd_rindex; |
441 | struct gfs2_inode *ip = GFS2_I(inode); | 441 | struct gfs2_inode *ip = GFS2_I(inode); |
442 | struct gfs2_rindex_host ri; | 442 | struct gfs2_rindex_host ri; |
443 | char buf[sizeof(struct gfs2_rindex)]; | 443 | char buf[sizeof(struct gfs2_rindex)]; |
444 | struct file_ra_state ra_state; | 444 | struct file_ra_state ra_state; |
445 | int error, rgrps; | 445 | int error, rgrps; |
446 | 446 | ||
447 | mutex_lock(&sdp->sd_rindex_mutex); | 447 | mutex_lock(&sdp->sd_rindex_mutex); |
448 | file_ra_state_init(&ra_state, inode->i_mapping); | 448 | file_ra_state_init(&ra_state, inode->i_mapping); |
449 | for (rgrps = 0;; rgrps++) { | 449 | for (rgrps = 0;; rgrps++) { |
450 | loff_t pos = rgrps * sizeof(struct gfs2_rindex); | 450 | loff_t pos = rgrps * sizeof(struct gfs2_rindex); |
451 | 451 | ||
452 | if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size) | 452 | if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size) |
453 | break; | 453 | break; |
454 | error = gfs2_internal_read(ip, &ra_state, buf, &pos, | 454 | error = gfs2_internal_read(ip, &ra_state, buf, &pos, |
455 | sizeof(struct gfs2_rindex)); | 455 | sizeof(struct gfs2_rindex)); |
456 | if (error != sizeof(struct gfs2_rindex)) | 456 | if (error != sizeof(struct gfs2_rindex)) |
457 | break; | 457 | break; |
458 | gfs2_rindex_in(&ri, buf); | 458 | gfs2_rindex_in(&ri, buf); |
459 | total_data += ri.ri_data; | 459 | total_data += ri.ri_data; |
460 | } | 460 | } |
461 | mutex_unlock(&sdp->sd_rindex_mutex); | 461 | mutex_unlock(&sdp->sd_rindex_mutex); |
462 | return total_data; | 462 | return total_data; |
463 | } | 463 | } |
464 | 464 | ||
465 | /** | 465 | /** |
466 | * read_rindex_entry - Pull in a new resource index entry from the disk | 466 | * read_rindex_entry - Pull in a new resource index entry from the disk |
467 | * @gl: The glock covering the rindex inode | 467 | * @gl: The glock covering the rindex inode |
468 | * | 468 | * |
469 | * Returns: 0 on success, error code otherwise | 469 | * Returns: 0 on success, error code otherwise |
470 | */ | 470 | */ |
471 | 471 | ||
472 | static int read_rindex_entry(struct gfs2_inode *ip, | 472 | static int read_rindex_entry(struct gfs2_inode *ip, |
473 | struct file_ra_state *ra_state) | 473 | struct file_ra_state *ra_state) |
474 | { | 474 | { |
475 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 475 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
476 | loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); | 476 | loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); |
477 | char buf[sizeof(struct gfs2_rindex)]; | 477 | char buf[sizeof(struct gfs2_rindex)]; |
478 | int error; | 478 | int error; |
479 | struct gfs2_rgrpd *rgd; | 479 | struct gfs2_rgrpd *rgd; |
480 | 480 | ||
481 | error = gfs2_internal_read(ip, ra_state, buf, &pos, | 481 | error = gfs2_internal_read(ip, ra_state, buf, &pos, |
482 | sizeof(struct gfs2_rindex)); | 482 | sizeof(struct gfs2_rindex)); |
483 | if (!error) | 483 | if (!error) |
484 | return 0; | 484 | return 0; |
485 | if (error != sizeof(struct gfs2_rindex)) { | 485 | if (error != sizeof(struct gfs2_rindex)) { |
486 | if (error > 0) | 486 | if (error > 0) |
487 | error = -EIO; | 487 | error = -EIO; |
488 | return error; | 488 | return error; |
489 | } | 489 | } |
490 | 490 | ||
491 | rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS); | 491 | rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS); |
492 | error = -ENOMEM; | 492 | error = -ENOMEM; |
493 | if (!rgd) | 493 | if (!rgd) |
494 | return error; | 494 | return error; |
495 | 495 | ||
496 | mutex_init(&rgd->rd_mutex); | 496 | mutex_init(&rgd->rd_mutex); |
497 | lops_init_le(&rgd->rd_le, &gfs2_rg_lops); | 497 | lops_init_le(&rgd->rd_le, &gfs2_rg_lops); |
498 | rgd->rd_sbd = sdp; | 498 | rgd->rd_sbd = sdp; |
499 | 499 | ||
500 | list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list); | 500 | list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list); |
501 | list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); | 501 | list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); |
502 | 502 | ||
503 | gfs2_rindex_in(&rgd->rd_ri, buf); | 503 | gfs2_rindex_in(&rgd->rd_ri, buf); |
504 | error = compute_bitstructs(rgd); | 504 | error = compute_bitstructs(rgd); |
505 | if (error) | 505 | if (error) |
506 | return error; | 506 | return error; |
507 | 507 | ||
508 | error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr, | 508 | error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr, |
509 | &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); | 509 | &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); |
510 | if (error) | 510 | if (error) |
511 | return error; | 511 | return error; |
512 | 512 | ||
513 | rgd->rd_gl->gl_object = rgd; | 513 | rgd->rd_gl->gl_object = rgd; |
514 | rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1; | 514 | rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1; |
515 | return error; | 515 | return error; |
516 | } | 516 | } |
517 | 517 | ||
518 | /** | 518 | /** |
519 | * gfs2_ri_update - Pull in a new resource index from the disk | 519 | * gfs2_ri_update - Pull in a new resource index from the disk |
520 | * @ip: pointer to the rindex inode | 520 | * @ip: pointer to the rindex inode |
521 | * | 521 | * |
522 | * Returns: 0 on successful update, error code otherwise | 522 | * Returns: 0 on successful update, error code otherwise |
523 | */ | 523 | */ |
524 | 524 | ||
525 | static int gfs2_ri_update(struct gfs2_inode *ip) | 525 | static int gfs2_ri_update(struct gfs2_inode *ip) |
526 | { | 526 | { |
527 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 527 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
528 | struct inode *inode = &ip->i_inode; | 528 | struct inode *inode = &ip->i_inode; |
529 | struct file_ra_state ra_state; | 529 | struct file_ra_state ra_state; |
530 | u64 junk = ip->i_di.di_size; | 530 | u64 rgrp_count = ip->i_di.di_size; |
531 | int error; | 531 | int error; |
532 | 532 | ||
533 | if (do_div(junk, sizeof(struct gfs2_rindex))) { | 533 | if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) { |
534 | gfs2_consist_inode(ip); | 534 | gfs2_consist_inode(ip); |
535 | return -EIO; | 535 | return -EIO; |
536 | } | 536 | } |
537 | 537 | ||
538 | clear_rgrpdi(sdp); | 538 | clear_rgrpdi(sdp); |
539 | 539 | ||
540 | file_ra_state_init(&ra_state, inode->i_mapping); | 540 | file_ra_state_init(&ra_state, inode->i_mapping); |
541 | for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { | 541 | for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) { |
542 | error = read_rindex_entry(ip, &ra_state); | 542 | error = read_rindex_entry(ip, &ra_state); |
543 | if (error) { | 543 | if (error) { |
544 | clear_rgrpdi(sdp); | 544 | clear_rgrpdi(sdp); |
545 | return error; | 545 | return error; |
546 | } | 546 | } |
547 | } | 547 | } |
548 | 548 | ||
549 | sdp->sd_rindex_vn = ip->i_gl->gl_vn; | 549 | sdp->sd_rindex_vn = ip->i_gl->gl_vn; |
550 | return 0; | 550 | return 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | /** | 553 | /** |
554 | * gfs2_ri_update_special - Pull in a new resource index from the disk | 554 | * gfs2_ri_update_special - Pull in a new resource index from the disk |
555 | * | 555 | * |
556 | * This is a special version that's safe to call from gfs2_inplace_reserve_i. | 556 | * This is a special version that's safe to call from gfs2_inplace_reserve_i. |
557 | * In this case we know that we don't have any resource groups in memory yet. | 557 | * In this case we know that we don't have any resource groups in memory yet. |
558 | * | 558 | * |
559 | * @ip: pointer to the rindex inode | 559 | * @ip: pointer to the rindex inode |
560 | * | 560 | * |
561 | * Returns: 0 on successful update, error code otherwise | 561 | * Returns: 0 on successful update, error code otherwise |
562 | */ | 562 | */ |
563 | static int gfs2_ri_update_special(struct gfs2_inode *ip) | 563 | static int gfs2_ri_update_special(struct gfs2_inode *ip) |
564 | { | 564 | { |
565 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 565 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
566 | struct inode *inode = &ip->i_inode; | 566 | struct inode *inode = &ip->i_inode; |
567 | struct file_ra_state ra_state; | 567 | struct file_ra_state ra_state; |
568 | int error; | 568 | int error; |
569 | 569 | ||
570 | file_ra_state_init(&ra_state, inode->i_mapping); | 570 | file_ra_state_init(&ra_state, inode->i_mapping); |
571 | for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { | 571 | for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { |
572 | /* Ignore partials */ | 572 | /* Ignore partials */ |
573 | if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > | 573 | if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > |
574 | ip->i_di.di_size) | 574 | ip->i_di.di_size) |
575 | break; | 575 | break; |
576 | error = read_rindex_entry(ip, &ra_state); | 576 | error = read_rindex_entry(ip, &ra_state); |
577 | if (error) { | 577 | if (error) { |
578 | clear_rgrpdi(sdp); | 578 | clear_rgrpdi(sdp); |
579 | return error; | 579 | return error; |
580 | } | 580 | } |
581 | } | 581 | } |
582 | 582 | ||
583 | sdp->sd_rindex_vn = ip->i_gl->gl_vn; | 583 | sdp->sd_rindex_vn = ip->i_gl->gl_vn; |
584 | return 0; | 584 | return 0; |
585 | } | 585 | } |
586 | 586 | ||
587 | /** | 587 | /** |
588 | * gfs2_rindex_hold - Grab a lock on the rindex | 588 | * gfs2_rindex_hold - Grab a lock on the rindex |
589 | * @sdp: The GFS2 superblock | 589 | * @sdp: The GFS2 superblock |
590 | * @ri_gh: the glock holder | 590 | * @ri_gh: the glock holder |
591 | * | 591 | * |
592 | * We grab a lock on the rindex inode to make sure that it doesn't | 592 | * We grab a lock on the rindex inode to make sure that it doesn't |
593 | * change whilst we are performing an operation. We keep this lock | 593 | * change whilst we are performing an operation. We keep this lock |
594 | * for quite long periods of time compared to other locks. This | 594 | * for quite long periods of time compared to other locks. This |
595 | * doesn't matter, since it is shared and it is very, very rarely | 595 | * doesn't matter, since it is shared and it is very, very rarely |
596 | * accessed in the exclusive mode (i.e. only when expanding the filesystem). | 596 | * accessed in the exclusive mode (i.e. only when expanding the filesystem). |
597 | * | 597 | * |
598 | * This makes sure that we're using the latest copy of the resource index | 598 | * This makes sure that we're using the latest copy of the resource index |
599 | * special file, which might have been updated if someone expanded the | 599 | * special file, which might have been updated if someone expanded the |
600 | * filesystem (via gfs2_grow utility), which adds new resource groups. | 600 | * filesystem (via gfs2_grow utility), which adds new resource groups. |
601 | * | 601 | * |
602 | * Returns: 0 on success, error code otherwise | 602 | * Returns: 0 on success, error code otherwise |
603 | */ | 603 | */ |
604 | 604 | ||
605 | int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh) | 605 | int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh) |
606 | { | 606 | { |
607 | struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); | 607 | struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); |
608 | struct gfs2_glock *gl = ip->i_gl; | 608 | struct gfs2_glock *gl = ip->i_gl; |
609 | int error; | 609 | int error; |
610 | 610 | ||
611 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh); | 611 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh); |
612 | if (error) | 612 | if (error) |
613 | return error; | 613 | return error; |
614 | 614 | ||
615 | /* Read new copy from disk if we don't have the latest */ | 615 | /* Read new copy from disk if we don't have the latest */ |
616 | if (sdp->sd_rindex_vn != gl->gl_vn) { | 616 | if (sdp->sd_rindex_vn != gl->gl_vn) { |
617 | mutex_lock(&sdp->sd_rindex_mutex); | 617 | mutex_lock(&sdp->sd_rindex_mutex); |
618 | if (sdp->sd_rindex_vn != gl->gl_vn) { | 618 | if (sdp->sd_rindex_vn != gl->gl_vn) { |
619 | error = gfs2_ri_update(ip); | 619 | error = gfs2_ri_update(ip); |
620 | if (error) | 620 | if (error) |
621 | gfs2_glock_dq_uninit(ri_gh); | 621 | gfs2_glock_dq_uninit(ri_gh); |
622 | } | 622 | } |
623 | mutex_unlock(&sdp->sd_rindex_mutex); | 623 | mutex_unlock(&sdp->sd_rindex_mutex); |
624 | } | 624 | } |
625 | 625 | ||
626 | return error; | 626 | return error; |
627 | } | 627 | } |
628 | 628 | ||
629 | /** | 629 | /** |
630 | * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps | 630 | * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps |
631 | * @rgd: the struct gfs2_rgrpd describing the RG to read in | 631 | * @rgd: the struct gfs2_rgrpd describing the RG to read in |
632 | * | 632 | * |
633 | * Read in all of a Resource Group's header and bitmap blocks. | 633 | * Read in all of a Resource Group's header and bitmap blocks. |
634 | * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. | 634 | * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. |
635 | * | 635 | * |
636 | * Returns: errno | 636 | * Returns: errno |
637 | */ | 637 | */ |
638 | 638 | ||
639 | int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) | 639 | int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) |
640 | { | 640 | { |
641 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 641 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
642 | struct gfs2_glock *gl = rgd->rd_gl; | 642 | struct gfs2_glock *gl = rgd->rd_gl; |
643 | unsigned int length = rgd->rd_ri.ri_length; | 643 | unsigned int length = rgd->rd_ri.ri_length; |
644 | struct gfs2_bitmap *bi; | 644 | struct gfs2_bitmap *bi; |
645 | unsigned int x, y; | 645 | unsigned int x, y; |
646 | int error; | 646 | int error; |
647 | 647 | ||
648 | mutex_lock(&rgd->rd_mutex); | 648 | mutex_lock(&rgd->rd_mutex); |
649 | 649 | ||
650 | spin_lock(&sdp->sd_rindex_spin); | 650 | spin_lock(&sdp->sd_rindex_spin); |
651 | if (rgd->rd_bh_count) { | 651 | if (rgd->rd_bh_count) { |
652 | rgd->rd_bh_count++; | 652 | rgd->rd_bh_count++; |
653 | spin_unlock(&sdp->sd_rindex_spin); | 653 | spin_unlock(&sdp->sd_rindex_spin); |
654 | mutex_unlock(&rgd->rd_mutex); | 654 | mutex_unlock(&rgd->rd_mutex); |
655 | return 0; | 655 | return 0; |
656 | } | 656 | } |
657 | spin_unlock(&sdp->sd_rindex_spin); | 657 | spin_unlock(&sdp->sd_rindex_spin); |
658 | 658 | ||
659 | for (x = 0; x < length; x++) { | 659 | for (x = 0; x < length; x++) { |
660 | bi = rgd->rd_bits + x; | 660 | bi = rgd->rd_bits + x; |
661 | error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, 0, &bi->bi_bh); | 661 | error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, 0, &bi->bi_bh); |
662 | if (error) | 662 | if (error) |
663 | goto fail; | 663 | goto fail; |
664 | } | 664 | } |
665 | 665 | ||
666 | for (y = length; y--;) { | 666 | for (y = length; y--;) { |
667 | bi = rgd->rd_bits + y; | 667 | bi = rgd->rd_bits + y; |
668 | error = gfs2_meta_wait(sdp, bi->bi_bh); | 668 | error = gfs2_meta_wait(sdp, bi->bi_bh); |
669 | if (error) | 669 | if (error) |
670 | goto fail; | 670 | goto fail; |
671 | if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : | 671 | if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : |
672 | GFS2_METATYPE_RG)) { | 672 | GFS2_METATYPE_RG)) { |
673 | error = -EIO; | 673 | error = -EIO; |
674 | goto fail; | 674 | goto fail; |
675 | } | 675 | } |
676 | } | 676 | } |
677 | 677 | ||
678 | if (rgd->rd_rg_vn != gl->gl_vn) { | 678 | if (rgd->rd_rg_vn != gl->gl_vn) { |
679 | gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data); | 679 | gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data); |
680 | rgd->rd_rg_vn = gl->gl_vn; | 680 | rgd->rd_rg_vn = gl->gl_vn; |
681 | } | 681 | } |
682 | 682 | ||
683 | spin_lock(&sdp->sd_rindex_spin); | 683 | spin_lock(&sdp->sd_rindex_spin); |
684 | rgd->rd_free_clone = rgd->rd_rg.rg_free; | 684 | rgd->rd_free_clone = rgd->rd_rg.rg_free; |
685 | rgd->rd_bh_count++; | 685 | rgd->rd_bh_count++; |
686 | spin_unlock(&sdp->sd_rindex_spin); | 686 | spin_unlock(&sdp->sd_rindex_spin); |
687 | 687 | ||
688 | mutex_unlock(&rgd->rd_mutex); | 688 | mutex_unlock(&rgd->rd_mutex); |
689 | 689 | ||
690 | return 0; | 690 | return 0; |
691 | 691 | ||
692 | fail: | 692 | fail: |
693 | while (x--) { | 693 | while (x--) { |
694 | bi = rgd->rd_bits + x; | 694 | bi = rgd->rd_bits + x; |
695 | brelse(bi->bi_bh); | 695 | brelse(bi->bi_bh); |
696 | bi->bi_bh = NULL; | 696 | bi->bi_bh = NULL; |
697 | gfs2_assert_warn(sdp, !bi->bi_clone); | 697 | gfs2_assert_warn(sdp, !bi->bi_clone); |
698 | } | 698 | } |
699 | mutex_unlock(&rgd->rd_mutex); | 699 | mutex_unlock(&rgd->rd_mutex); |
700 | 700 | ||
701 | return error; | 701 | return error; |
702 | } | 702 | } |
703 | 703 | ||
704 | void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd) | 704 | void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd) |
705 | { | 705 | { |
706 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 706 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
707 | 707 | ||
708 | spin_lock(&sdp->sd_rindex_spin); | 708 | spin_lock(&sdp->sd_rindex_spin); |
709 | gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); | 709 | gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); |
710 | rgd->rd_bh_count++; | 710 | rgd->rd_bh_count++; |
711 | spin_unlock(&sdp->sd_rindex_spin); | 711 | spin_unlock(&sdp->sd_rindex_spin); |
712 | } | 712 | } |
713 | 713 | ||
714 | /** | 714 | /** |
715 | * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get() | 715 | * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get() |
716 | * @rgd: the struct gfs2_rgrpd describing the RG to read in | 716 | * @rgd: the struct gfs2_rgrpd describing the RG to read in |
717 | * | 717 | * |
718 | */ | 718 | */ |
719 | 719 | ||
720 | void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd) | 720 | void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd) |
721 | { | 721 | { |
722 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 722 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
723 | int x, length = rgd->rd_ri.ri_length; | 723 | int x, length = rgd->rd_ri.ri_length; |
724 | 724 | ||
725 | spin_lock(&sdp->sd_rindex_spin); | 725 | spin_lock(&sdp->sd_rindex_spin); |
726 | gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); | 726 | gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); |
727 | if (--rgd->rd_bh_count) { | 727 | if (--rgd->rd_bh_count) { |
728 | spin_unlock(&sdp->sd_rindex_spin); | 728 | spin_unlock(&sdp->sd_rindex_spin); |
729 | return; | 729 | return; |
730 | } | 730 | } |
731 | 731 | ||
732 | for (x = 0; x < length; x++) { | 732 | for (x = 0; x < length; x++) { |
733 | struct gfs2_bitmap *bi = rgd->rd_bits + x; | 733 | struct gfs2_bitmap *bi = rgd->rd_bits + x; |
734 | kfree(bi->bi_clone); | 734 | kfree(bi->bi_clone); |
735 | bi->bi_clone = NULL; | 735 | bi->bi_clone = NULL; |
736 | brelse(bi->bi_bh); | 736 | brelse(bi->bi_bh); |
737 | bi->bi_bh = NULL; | 737 | bi->bi_bh = NULL; |
738 | } | 738 | } |
739 | 739 | ||
740 | spin_unlock(&sdp->sd_rindex_spin); | 740 | spin_unlock(&sdp->sd_rindex_spin); |
741 | } | 741 | } |
742 | 742 | ||
743 | void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) | 743 | void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) |
744 | { | 744 | { |
745 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 745 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
746 | unsigned int length = rgd->rd_ri.ri_length; | 746 | unsigned int length = rgd->rd_ri.ri_length; |
747 | unsigned int x; | 747 | unsigned int x; |
748 | 748 | ||
749 | for (x = 0; x < length; x++) { | 749 | for (x = 0; x < length; x++) { |
750 | struct gfs2_bitmap *bi = rgd->rd_bits + x; | 750 | struct gfs2_bitmap *bi = rgd->rd_bits + x; |
751 | if (!bi->bi_clone) | 751 | if (!bi->bi_clone) |
752 | continue; | 752 | continue; |
753 | memcpy(bi->bi_clone + bi->bi_offset, | 753 | memcpy(bi->bi_clone + bi->bi_offset, |
754 | bi->bi_bh->b_data + bi->bi_offset, bi->bi_len); | 754 | bi->bi_bh->b_data + bi->bi_offset, bi->bi_len); |
755 | } | 755 | } |
756 | 756 | ||
757 | spin_lock(&sdp->sd_rindex_spin); | 757 | spin_lock(&sdp->sd_rindex_spin); |
758 | rgd->rd_free_clone = rgd->rd_rg.rg_free; | 758 | rgd->rd_free_clone = rgd->rd_rg.rg_free; |
759 | spin_unlock(&sdp->sd_rindex_spin); | 759 | spin_unlock(&sdp->sd_rindex_spin); |
760 | } | 760 | } |
761 | 761 | ||
762 | /** | 762 | /** |
763 | * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode | 763 | * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode |
764 | * @ip: the incore GFS2 inode structure | 764 | * @ip: the incore GFS2 inode structure |
765 | * | 765 | * |
766 | * Returns: the struct gfs2_alloc | 766 | * Returns: the struct gfs2_alloc |
767 | */ | 767 | */ |
768 | 768 | ||
769 | struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) | 769 | struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) |
770 | { | 770 | { |
771 | struct gfs2_alloc *al = &ip->i_alloc; | 771 | struct gfs2_alloc *al = &ip->i_alloc; |
772 | 772 | ||
773 | /* FIXME: Should assert that the correct locks are held here... */ | 773 | /* FIXME: Should assert that the correct locks are held here... */ |
774 | memset(al, 0, sizeof(*al)); | 774 | memset(al, 0, sizeof(*al)); |
775 | return al; | 775 | return al; |
776 | } | 776 | } |
777 | 777 | ||
778 | /** | 778 | /** |
779 | * try_rgrp_fit - See if a given reservation will fit in a given RG | 779 | * try_rgrp_fit - See if a given reservation will fit in a given RG |
780 | * @rgd: the RG data | 780 | * @rgd: the RG data |
781 | * @al: the struct gfs2_alloc structure describing the reservation | 781 | * @al: the struct gfs2_alloc structure describing the reservation |
782 | * | 782 | * |
783 | * If there's room for the requested blocks to be allocated from the RG: | 783 | * If there's room for the requested blocks to be allocated from the RG: |
784 | * Sets the $al_rgd field in @al. | 784 | * Sets the $al_rgd field in @al. |
785 | * | 785 | * |
786 | * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) | 786 | * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) |
787 | */ | 787 | */ |
788 | 788 | ||
789 | static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) | 789 | static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) |
790 | { | 790 | { |
791 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 791 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
792 | int ret = 0; | 792 | int ret = 0; |
793 | 793 | ||
794 | if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC) | 794 | if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC) |
795 | return 0; | 795 | return 0; |
796 | 796 | ||
797 | spin_lock(&sdp->sd_rindex_spin); | 797 | spin_lock(&sdp->sd_rindex_spin); |
798 | if (rgd->rd_free_clone >= al->al_requested) { | 798 | if (rgd->rd_free_clone >= al->al_requested) { |
799 | al->al_rgd = rgd; | 799 | al->al_rgd = rgd; |
800 | ret = 1; | 800 | ret = 1; |
801 | } | 801 | } |
802 | spin_unlock(&sdp->sd_rindex_spin); | 802 | spin_unlock(&sdp->sd_rindex_spin); |
803 | 803 | ||
804 | return ret; | 804 | return ret; |
805 | } | 805 | } |
806 | 806 | ||
807 | /** | 807 | /** |
808 | * recent_rgrp_first - get first RG from "recent" list | 808 | * recent_rgrp_first - get first RG from "recent" list |
809 | * @sdp: The GFS2 superblock | 809 | * @sdp: The GFS2 superblock |
810 | * @rglast: address of the rgrp used last | 810 | * @rglast: address of the rgrp used last |
811 | * | 811 | * |
812 | * Returns: The first rgrp in the recent list | 812 | * Returns: The first rgrp in the recent list |
813 | */ | 813 | */ |
814 | 814 | ||
815 | static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp, | 815 | static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp, |
816 | u64 rglast) | 816 | u64 rglast) |
817 | { | 817 | { |
818 | struct gfs2_rgrpd *rgd = NULL; | 818 | struct gfs2_rgrpd *rgd = NULL; |
819 | 819 | ||
820 | spin_lock(&sdp->sd_rindex_spin); | 820 | spin_lock(&sdp->sd_rindex_spin); |
821 | 821 | ||
822 | if (list_empty(&sdp->sd_rindex_recent_list)) | 822 | if (list_empty(&sdp->sd_rindex_recent_list)) |
823 | goto out; | 823 | goto out; |
824 | 824 | ||
825 | if (!rglast) | 825 | if (!rglast) |
826 | goto first; | 826 | goto first; |
827 | 827 | ||
828 | list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { | 828 | list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { |
829 | if (rgd->rd_ri.ri_addr == rglast) | 829 | if (rgd->rd_ri.ri_addr == rglast) |
830 | goto out; | 830 | goto out; |
831 | } | 831 | } |
832 | 832 | ||
833 | first: | 833 | first: |
834 | rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd, | 834 | rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd, |
835 | rd_recent); | 835 | rd_recent); |
836 | out: | 836 | out: |
837 | spin_unlock(&sdp->sd_rindex_spin); | 837 | spin_unlock(&sdp->sd_rindex_spin); |
838 | return rgd; | 838 | return rgd; |
839 | } | 839 | } |
840 | 840 | ||
841 | /** | 841 | /** |
842 | * recent_rgrp_next - get next RG from "recent" list | 842 | * recent_rgrp_next - get next RG from "recent" list |
843 | * @cur_rgd: current rgrp | 843 | * @cur_rgd: current rgrp |
844 | * @remove: | 844 | * @remove: |
845 | * | 845 | * |
846 | * Returns: The next rgrp in the recent list | 846 | * Returns: The next rgrp in the recent list |
847 | */ | 847 | */ |
848 | 848 | ||
849 | static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd, | 849 | static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd, |
850 | int remove) | 850 | int remove) |
851 | { | 851 | { |
852 | struct gfs2_sbd *sdp = cur_rgd->rd_sbd; | 852 | struct gfs2_sbd *sdp = cur_rgd->rd_sbd; |
853 | struct list_head *head; | 853 | struct list_head *head; |
854 | struct gfs2_rgrpd *rgd; | 854 | struct gfs2_rgrpd *rgd; |
855 | 855 | ||
856 | spin_lock(&sdp->sd_rindex_spin); | 856 | spin_lock(&sdp->sd_rindex_spin); |
857 | 857 | ||
858 | head = &sdp->sd_rindex_recent_list; | 858 | head = &sdp->sd_rindex_recent_list; |
859 | 859 | ||
860 | list_for_each_entry(rgd, head, rd_recent) { | 860 | list_for_each_entry(rgd, head, rd_recent) { |
861 | if (rgd == cur_rgd) { | 861 | if (rgd == cur_rgd) { |
862 | if (cur_rgd->rd_recent.next != head) | 862 | if (cur_rgd->rd_recent.next != head) |
863 | rgd = list_entry(cur_rgd->rd_recent.next, | 863 | rgd = list_entry(cur_rgd->rd_recent.next, |
864 | struct gfs2_rgrpd, rd_recent); | 864 | struct gfs2_rgrpd, rd_recent); |
865 | else | 865 | else |
866 | rgd = NULL; | 866 | rgd = NULL; |
867 | 867 | ||
868 | if (remove) | 868 | if (remove) |
869 | list_del(&cur_rgd->rd_recent); | 869 | list_del(&cur_rgd->rd_recent); |
870 | 870 | ||
871 | goto out; | 871 | goto out; |
872 | } | 872 | } |
873 | } | 873 | } |
874 | 874 | ||
875 | rgd = NULL; | 875 | rgd = NULL; |
876 | if (!list_empty(head)) | 876 | if (!list_empty(head)) |
877 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); | 877 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); |
878 | 878 | ||
879 | out: | 879 | out: |
880 | spin_unlock(&sdp->sd_rindex_spin); | 880 | spin_unlock(&sdp->sd_rindex_spin); |
881 | return rgd; | 881 | return rgd; |
882 | } | 882 | } |
883 | 883 | ||
884 | /** | 884 | /** |
885 | * recent_rgrp_add - add an RG to tail of "recent" list | 885 | * recent_rgrp_add - add an RG to tail of "recent" list |
886 | * @new_rgd: The rgrp to add | 886 | * @new_rgd: The rgrp to add |
887 | * | 887 | * |
888 | */ | 888 | */ |
889 | 889 | ||
890 | static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd) | 890 | static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd) |
891 | { | 891 | { |
892 | struct gfs2_sbd *sdp = new_rgd->rd_sbd; | 892 | struct gfs2_sbd *sdp = new_rgd->rd_sbd; |
893 | struct gfs2_rgrpd *rgd; | 893 | struct gfs2_rgrpd *rgd; |
894 | unsigned int count = 0; | 894 | unsigned int count = 0; |
895 | unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp); | 895 | unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp); |
896 | 896 | ||
897 | spin_lock(&sdp->sd_rindex_spin); | 897 | spin_lock(&sdp->sd_rindex_spin); |
898 | 898 | ||
899 | list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { | 899 | list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { |
900 | if (rgd == new_rgd) | 900 | if (rgd == new_rgd) |
901 | goto out; | 901 | goto out; |
902 | 902 | ||
903 | if (++count >= max) | 903 | if (++count >= max) |
904 | goto out; | 904 | goto out; |
905 | } | 905 | } |
906 | list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list); | 906 | list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list); |
907 | 907 | ||
908 | out: | 908 | out: |
909 | spin_unlock(&sdp->sd_rindex_spin); | 909 | spin_unlock(&sdp->sd_rindex_spin); |
910 | } | 910 | } |
911 | 911 | ||
912 | /** | 912 | /** |
913 | * forward_rgrp_get - get an rgrp to try next from full list | 913 | * forward_rgrp_get - get an rgrp to try next from full list |
914 | * @sdp: The GFS2 superblock | 914 | * @sdp: The GFS2 superblock |
915 | * | 915 | * |
916 | * Returns: The rgrp to try next | 916 | * Returns: The rgrp to try next |
917 | */ | 917 | */ |
918 | 918 | ||
919 | static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp) | 919 | static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp) |
920 | { | 920 | { |
921 | struct gfs2_rgrpd *rgd; | 921 | struct gfs2_rgrpd *rgd; |
922 | unsigned int journals = gfs2_jindex_size(sdp); | 922 | unsigned int journals = gfs2_jindex_size(sdp); |
923 | unsigned int rg = 0, x; | 923 | unsigned int rg = 0, x; |
924 | 924 | ||
925 | spin_lock(&sdp->sd_rindex_spin); | 925 | spin_lock(&sdp->sd_rindex_spin); |
926 | 926 | ||
927 | rgd = sdp->sd_rindex_forward; | 927 | rgd = sdp->sd_rindex_forward; |
928 | if (!rgd) { | 928 | if (!rgd) { |
929 | if (sdp->sd_rgrps >= journals) | 929 | if (sdp->sd_rgrps >= journals) |
930 | rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals; | 930 | rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals; |
931 | 931 | ||
932 | for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg; | 932 | for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg; |
933 | x++, rgd = gfs2_rgrpd_get_next(rgd)) | 933 | x++, rgd = gfs2_rgrpd_get_next(rgd)) |
934 | /* Do Nothing */; | 934 | /* Do Nothing */; |
935 | 935 | ||
936 | sdp->sd_rindex_forward = rgd; | 936 | sdp->sd_rindex_forward = rgd; |
937 | } | 937 | } |
938 | 938 | ||
939 | spin_unlock(&sdp->sd_rindex_spin); | 939 | spin_unlock(&sdp->sd_rindex_spin); |
940 | 940 | ||
941 | return rgd; | 941 | return rgd; |
942 | } | 942 | } |
943 | 943 | ||
944 | /** | 944 | /** |
945 | * forward_rgrp_set - set the forward rgrp pointer | 945 | * forward_rgrp_set - set the forward rgrp pointer |
946 | * @sdp: the filesystem | 946 | * @sdp: the filesystem |
947 | * @rgd: The new forward rgrp | 947 | * @rgd: The new forward rgrp |
948 | * | 948 | * |
949 | */ | 949 | */ |
950 | 950 | ||
951 | static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) | 951 | static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) |
952 | { | 952 | { |
953 | spin_lock(&sdp->sd_rindex_spin); | 953 | spin_lock(&sdp->sd_rindex_spin); |
954 | sdp->sd_rindex_forward = rgd; | 954 | sdp->sd_rindex_forward = rgd; |
955 | spin_unlock(&sdp->sd_rindex_spin); | 955 | spin_unlock(&sdp->sd_rindex_spin); |
956 | } | 956 | } |
957 | 957 | ||
958 | /** | 958 | /** |
959 | * get_local_rgrp - Choose and lock a rgrp for allocation | 959 | * get_local_rgrp - Choose and lock a rgrp for allocation |
960 | * @ip: the inode to reserve space for | 960 | * @ip: the inode to reserve space for |
961 | * @rgp: the chosen and locked rgrp | 961 | * @rgp: the chosen and locked rgrp |
962 | * | 962 | * |
963 | * Try to acquire rgrp in way which avoids contending with others. | 963 | * Try to acquire rgrp in way which avoids contending with others. |
964 | * | 964 | * |
965 | * Returns: errno | 965 | * Returns: errno |
966 | */ | 966 | */ |
967 | 967 | ||
968 | static int get_local_rgrp(struct gfs2_inode *ip) | 968 | static int get_local_rgrp(struct gfs2_inode *ip) |
969 | { | 969 | { |
970 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 970 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
971 | struct gfs2_rgrpd *rgd, *begin = NULL; | 971 | struct gfs2_rgrpd *rgd, *begin = NULL; |
972 | struct gfs2_alloc *al = &ip->i_alloc; | 972 | struct gfs2_alloc *al = &ip->i_alloc; |
973 | int flags = LM_FLAG_TRY; | 973 | int flags = LM_FLAG_TRY; |
974 | int skipped = 0; | 974 | int skipped = 0; |
975 | int loops = 0; | 975 | int loops = 0; |
976 | int error; | 976 | int error; |
977 | 977 | ||
978 | /* Try recently successful rgrps */ | 978 | /* Try recently successful rgrps */ |
979 | 979 | ||
980 | rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc); | 980 | rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc); |
981 | 981 | ||
982 | while (rgd) { | 982 | while (rgd) { |
983 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, | 983 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, |
984 | LM_FLAG_TRY, &al->al_rgd_gh); | 984 | LM_FLAG_TRY, &al->al_rgd_gh); |
985 | switch (error) { | 985 | switch (error) { |
986 | case 0: | 986 | case 0: |
987 | if (try_rgrp_fit(rgd, al)) | 987 | if (try_rgrp_fit(rgd, al)) |
988 | goto out; | 988 | goto out; |
989 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 989 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
990 | rgd = recent_rgrp_next(rgd, 1); | 990 | rgd = recent_rgrp_next(rgd, 1); |
991 | break; | 991 | break; |
992 | 992 | ||
993 | case GLR_TRYFAILED: | 993 | case GLR_TRYFAILED: |
994 | rgd = recent_rgrp_next(rgd, 0); | 994 | rgd = recent_rgrp_next(rgd, 0); |
995 | break; | 995 | break; |
996 | 996 | ||
997 | default: | 997 | default: |
998 | return error; | 998 | return error; |
999 | } | 999 | } |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | /* Go through full list of rgrps */ | 1002 | /* Go through full list of rgrps */ |
1003 | 1003 | ||
1004 | begin = rgd = forward_rgrp_get(sdp); | 1004 | begin = rgd = forward_rgrp_get(sdp); |
1005 | 1005 | ||
1006 | for (;;) { | 1006 | for (;;) { |
1007 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags, | 1007 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags, |
1008 | &al->al_rgd_gh); | 1008 | &al->al_rgd_gh); |
1009 | switch (error) { | 1009 | switch (error) { |
1010 | case 0: | 1010 | case 0: |
1011 | if (try_rgrp_fit(rgd, al)) | 1011 | if (try_rgrp_fit(rgd, al)) |
1012 | goto out; | 1012 | goto out; |
1013 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1013 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1014 | break; | 1014 | break; |
1015 | 1015 | ||
1016 | case GLR_TRYFAILED: | 1016 | case GLR_TRYFAILED: |
1017 | skipped++; | 1017 | skipped++; |
1018 | break; | 1018 | break; |
1019 | 1019 | ||
1020 | default: | 1020 | default: |
1021 | return error; | 1021 | return error; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | rgd = gfs2_rgrpd_get_next(rgd); | 1024 | rgd = gfs2_rgrpd_get_next(rgd); |
1025 | if (!rgd) | 1025 | if (!rgd) |
1026 | rgd = gfs2_rgrpd_get_first(sdp); | 1026 | rgd = gfs2_rgrpd_get_first(sdp); |
1027 | 1027 | ||
1028 | if (rgd == begin) { | 1028 | if (rgd == begin) { |
1029 | if (++loops >= 3) | 1029 | if (++loops >= 3) |
1030 | return -ENOSPC; | 1030 | return -ENOSPC; |
1031 | if (!skipped) | 1031 | if (!skipped) |
1032 | loops++; | 1032 | loops++; |
1033 | flags = 0; | 1033 | flags = 0; |
1034 | if (loops == 2) | 1034 | if (loops == 2) |
1035 | gfs2_log_flush(sdp, NULL); | 1035 | gfs2_log_flush(sdp, NULL); |
1036 | } | 1036 | } |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | out: | 1039 | out: |
1040 | ip->i_last_rg_alloc = rgd->rd_ri.ri_addr; | 1040 | ip->i_last_rg_alloc = rgd->rd_ri.ri_addr; |
1041 | 1041 | ||
1042 | if (begin) { | 1042 | if (begin) { |
1043 | recent_rgrp_add(rgd); | 1043 | recent_rgrp_add(rgd); |
1044 | rgd = gfs2_rgrpd_get_next(rgd); | 1044 | rgd = gfs2_rgrpd_get_next(rgd); |
1045 | if (!rgd) | 1045 | if (!rgd) |
1046 | rgd = gfs2_rgrpd_get_first(sdp); | 1046 | rgd = gfs2_rgrpd_get_first(sdp); |
1047 | forward_rgrp_set(sdp, rgd); | 1047 | forward_rgrp_set(sdp, rgd); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | return 0; | 1050 | return 0; |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | /** | 1053 | /** |
1054 | * gfs2_inplace_reserve_i - Reserve space in the filesystem | 1054 | * gfs2_inplace_reserve_i - Reserve space in the filesystem |
1055 | * @ip: the inode to reserve space for | 1055 | * @ip: the inode to reserve space for |
1056 | * | 1056 | * |
1057 | * Returns: errno | 1057 | * Returns: errno |
1058 | */ | 1058 | */ |
1059 | 1059 | ||
1060 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) | 1060 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) |
1061 | { | 1061 | { |
1062 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1062 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1063 | struct gfs2_alloc *al = &ip->i_alloc; | 1063 | struct gfs2_alloc *al = &ip->i_alloc; |
1064 | int error = 0; | 1064 | int error = 0; |
1065 | 1065 | ||
1066 | if (gfs2_assert_warn(sdp, al->al_requested)) | 1066 | if (gfs2_assert_warn(sdp, al->al_requested)) |
1067 | return -EINVAL; | 1067 | return -EINVAL; |
1068 | 1068 | ||
1069 | /* We need to hold the rindex unless the inode we're using is | 1069 | /* We need to hold the rindex unless the inode we're using is |
1070 | the rindex itself, in which case it's already held. */ | 1070 | the rindex itself, in which case it's already held. */ |
1071 | if (ip != GFS2_I(sdp->sd_rindex)) | 1071 | if (ip != GFS2_I(sdp->sd_rindex)) |
1072 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); | 1072 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); |
1073 | else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */ | 1073 | else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */ |
1074 | error = gfs2_ri_update_special(ip); | 1074 | error = gfs2_ri_update_special(ip); |
1075 | 1075 | ||
1076 | if (error) | 1076 | if (error) |
1077 | return error; | 1077 | return error; |
1078 | 1078 | ||
1079 | error = get_local_rgrp(ip); | 1079 | error = get_local_rgrp(ip); |
1080 | if (error) { | 1080 | if (error) { |
1081 | if (ip != GFS2_I(sdp->sd_rindex)) | 1081 | if (ip != GFS2_I(sdp->sd_rindex)) |
1082 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1082 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
1083 | return error; | 1083 | return error; |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | al->al_file = file; | 1086 | al->al_file = file; |
1087 | al->al_line = line; | 1087 | al->al_line = line; |
1088 | 1088 | ||
1089 | return 0; | 1089 | return 0; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | /** | 1092 | /** |
1093 | * gfs2_inplace_release - release an inplace reservation | 1093 | * gfs2_inplace_release - release an inplace reservation |
1094 | * @ip: the inode the reservation was taken out on | 1094 | * @ip: the inode the reservation was taken out on |
1095 | * | 1095 | * |
1096 | * Release a reservation made by gfs2_inplace_reserve(). | 1096 | * Release a reservation made by gfs2_inplace_reserve(). |
1097 | */ | 1097 | */ |
1098 | 1098 | ||
1099 | void gfs2_inplace_release(struct gfs2_inode *ip) | 1099 | void gfs2_inplace_release(struct gfs2_inode *ip) |
1100 | { | 1100 | { |
1101 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1101 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1102 | struct gfs2_alloc *al = &ip->i_alloc; | 1102 | struct gfs2_alloc *al = &ip->i_alloc; |
1103 | 1103 | ||
1104 | if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1) | 1104 | if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1) |
1105 | fs_warn(sdp, "al_alloced = %u, al_requested = %u " | 1105 | fs_warn(sdp, "al_alloced = %u, al_requested = %u " |
1106 | "al_file = %s, al_line = %u\n", | 1106 | "al_file = %s, al_line = %u\n", |
1107 | al->al_alloced, al->al_requested, al->al_file, | 1107 | al->al_alloced, al->al_requested, al->al_file, |
1108 | al->al_line); | 1108 | al->al_line); |
1109 | 1109 | ||
1110 | al->al_rgd = NULL; | 1110 | al->al_rgd = NULL; |
1111 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1111 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1112 | if (ip != GFS2_I(sdp->sd_rindex)) | 1112 | if (ip != GFS2_I(sdp->sd_rindex)) |
1113 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1113 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | /** | 1116 | /** |
1117 | * gfs2_get_block_type - Check a block in a RG is of given type | 1117 | * gfs2_get_block_type - Check a block in a RG is of given type |
1118 | * @rgd: the resource group holding the block | 1118 | * @rgd: the resource group holding the block |
1119 | * @block: the block number | 1119 | * @block: the block number |
1120 | * | 1120 | * |
1121 | * Returns: The block type (GFS2_BLKST_*) | 1121 | * Returns: The block type (GFS2_BLKST_*) |
1122 | */ | 1122 | */ |
1123 | 1123 | ||
1124 | unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) | 1124 | unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) |
1125 | { | 1125 | { |
1126 | struct gfs2_bitmap *bi = NULL; | 1126 | struct gfs2_bitmap *bi = NULL; |
1127 | u32 length, rgrp_block, buf_block; | 1127 | u32 length, rgrp_block, buf_block; |
1128 | unsigned int buf; | 1128 | unsigned int buf; |
1129 | unsigned char type; | 1129 | unsigned char type; |
1130 | 1130 | ||
1131 | length = rgd->rd_ri.ri_length; | 1131 | length = rgd->rd_ri.ri_length; |
1132 | rgrp_block = block - rgd->rd_ri.ri_data0; | 1132 | rgrp_block = block - rgd->rd_ri.ri_data0; |
1133 | 1133 | ||
1134 | for (buf = 0; buf < length; buf++) { | 1134 | for (buf = 0; buf < length; buf++) { |
1135 | bi = rgd->rd_bits + buf; | 1135 | bi = rgd->rd_bits + buf; |
1136 | if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY) | 1136 | if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY) |
1137 | break; | 1137 | break; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | gfs2_assert(rgd->rd_sbd, buf < length); | 1140 | gfs2_assert(rgd->rd_sbd, buf < length); |
1141 | buf_block = rgrp_block - bi->bi_start * GFS2_NBBY; | 1141 | buf_block = rgrp_block - bi->bi_start * GFS2_NBBY; |
1142 | 1142 | ||
1143 | type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset, | 1143 | type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset, |
1144 | bi->bi_len, buf_block); | 1144 | bi->bi_len, buf_block); |
1145 | 1145 | ||
1146 | return type; | 1146 | return type; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | /** | 1149 | /** |
1150 | * rgblk_search - find a block in @old_state, change allocation | 1150 | * rgblk_search - find a block in @old_state, change allocation |
1151 | * state to @new_state | 1151 | * state to @new_state |
1152 | * @rgd: the resource group descriptor | 1152 | * @rgd: the resource group descriptor |
1153 | * @goal: the goal block within the RG (start here to search for avail block) | 1153 | * @goal: the goal block within the RG (start here to search for avail block) |
1154 | * @old_state: GFS2_BLKST_XXX the before-allocation state to find | 1154 | * @old_state: GFS2_BLKST_XXX the before-allocation state to find |
1155 | * @new_state: GFS2_BLKST_XXX the after-allocation block state | 1155 | * @new_state: GFS2_BLKST_XXX the after-allocation block state |
1156 | * | 1156 | * |
1157 | * Walk rgrp's bitmap to find bits that represent a block in @old_state. | 1157 | * Walk rgrp's bitmap to find bits that represent a block in @old_state. |
1158 | * Add the found bitmap buffer to the transaction. | 1158 | * Add the found bitmap buffer to the transaction. |
1159 | * Set the found bits to @new_state to change block's allocation state. | 1159 | * Set the found bits to @new_state to change block's allocation state. |
1160 | * | 1160 | * |
1161 | * This function never fails, because we wouldn't call it unless we | 1161 | * This function never fails, because we wouldn't call it unless we |
1162 | * know (from reservation results, etc.) that a block is available. | 1162 | * know (from reservation results, etc.) that a block is available. |
1163 | * | 1163 | * |
1164 | * Scope of @goal and returned block is just within rgrp, not the whole | 1164 | * Scope of @goal and returned block is just within rgrp, not the whole |
1165 | * filesystem. | 1165 | * filesystem. |
1166 | * | 1166 | * |
1167 | * Returns: the block number allocated | 1167 | * Returns: the block number allocated |
1168 | */ | 1168 | */ |
1169 | 1169 | ||
1170 | static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, | 1170 | static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, |
1171 | unsigned char old_state, unsigned char new_state) | 1171 | unsigned char old_state, unsigned char new_state) |
1172 | { | 1172 | { |
1173 | struct gfs2_bitmap *bi = NULL; | 1173 | struct gfs2_bitmap *bi = NULL; |
1174 | u32 length = rgd->rd_ri.ri_length; | 1174 | u32 length = rgd->rd_ri.ri_length; |
1175 | u32 blk = 0; | 1175 | u32 blk = 0; |
1176 | unsigned int buf, x; | 1176 | unsigned int buf, x; |
1177 | 1177 | ||
1178 | /* Find bitmap block that contains bits for goal block */ | 1178 | /* Find bitmap block that contains bits for goal block */ |
1179 | for (buf = 0; buf < length; buf++) { | 1179 | for (buf = 0; buf < length; buf++) { |
1180 | bi = rgd->rd_bits + buf; | 1180 | bi = rgd->rd_bits + buf; |
1181 | if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) | 1181 | if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) |
1182 | break; | 1182 | break; |
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | gfs2_assert(rgd->rd_sbd, buf < length); | 1185 | gfs2_assert(rgd->rd_sbd, buf < length); |
1186 | 1186 | ||
1187 | /* Convert scope of "goal" from rgrp-wide to within found bit block */ | 1187 | /* Convert scope of "goal" from rgrp-wide to within found bit block */ |
1188 | goal -= bi->bi_start * GFS2_NBBY; | 1188 | goal -= bi->bi_start * GFS2_NBBY; |
1189 | 1189 | ||
1190 | /* Search (up to entire) bitmap in this rgrp for allocatable block. | 1190 | /* Search (up to entire) bitmap in this rgrp for allocatable block. |
1191 | "x <= length", instead of "x < length", because we typically start | 1191 | "x <= length", instead of "x < length", because we typically start |
1192 | the search in the middle of a bit block, but if we can't find an | 1192 | the search in the middle of a bit block, but if we can't find an |
1193 | allocatable block anywhere else, we want to be able wrap around and | 1193 | allocatable block anywhere else, we want to be able wrap around and |
1194 | search in the first part of our first-searched bit block. */ | 1194 | search in the first part of our first-searched bit block. */ |
1195 | for (x = 0; x <= length; x++) { | 1195 | for (x = 0; x <= length; x++) { |
1196 | if (bi->bi_clone) | 1196 | if (bi->bi_clone) |
1197 | blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset, | 1197 | blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset, |
1198 | bi->bi_len, goal, old_state); | 1198 | bi->bi_len, goal, old_state); |
1199 | else | 1199 | else |
1200 | blk = gfs2_bitfit(rgd, | 1200 | blk = gfs2_bitfit(rgd, |
1201 | bi->bi_bh->b_data + bi->bi_offset, | 1201 | bi->bi_bh->b_data + bi->bi_offset, |
1202 | bi->bi_len, goal, old_state); | 1202 | bi->bi_len, goal, old_state); |
1203 | if (blk != BFITNOENT) | 1203 | if (blk != BFITNOENT) |
1204 | break; | 1204 | break; |
1205 | 1205 | ||
1206 | /* Try next bitmap block (wrap back to rgrp header if at end) */ | 1206 | /* Try next bitmap block (wrap back to rgrp header if at end) */ |
1207 | buf = (buf + 1) % length; | 1207 | buf = (buf + 1) % length; |
1208 | bi = rgd->rd_bits + buf; | 1208 | bi = rgd->rd_bits + buf; |
1209 | goal = 0; | 1209 | goal = 0; |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length)) | 1212 | if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length)) |
1213 | blk = 0; | 1213 | blk = 0; |
1214 | 1214 | ||
1215 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); | 1215 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); |
1216 | gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, | 1216 | gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, |
1217 | bi->bi_len, blk, new_state); | 1217 | bi->bi_len, blk, new_state); |
1218 | if (bi->bi_clone) | 1218 | if (bi->bi_clone) |
1219 | gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset, | 1219 | gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset, |
1220 | bi->bi_len, blk, new_state); | 1220 | bi->bi_len, blk, new_state); |
1221 | 1221 | ||
1222 | return bi->bi_start * GFS2_NBBY + blk; | 1222 | return bi->bi_start * GFS2_NBBY + blk; |
1223 | } | 1223 | } |
1224 | 1224 | ||
1225 | /** | 1225 | /** |
1226 | * rgblk_free - Change alloc state of given block(s) | 1226 | * rgblk_free - Change alloc state of given block(s) |
1227 | * @sdp: the filesystem | 1227 | * @sdp: the filesystem |
1228 | * @bstart: the start of a run of blocks to free | 1228 | * @bstart: the start of a run of blocks to free |
1229 | * @blen: the length of the block run (all must lie within ONE RG!) | 1229 | * @blen: the length of the block run (all must lie within ONE RG!) |
1230 | * @new_state: GFS2_BLKST_XXX the after-allocation block state | 1230 | * @new_state: GFS2_BLKST_XXX the after-allocation block state |
1231 | * | 1231 | * |
1232 | * Returns: Resource group containing the block(s) | 1232 | * Returns: Resource group containing the block(s) |
1233 | */ | 1233 | */ |
1234 | 1234 | ||
1235 | static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, | 1235 | static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, |
1236 | u32 blen, unsigned char new_state) | 1236 | u32 blen, unsigned char new_state) |
1237 | { | 1237 | { |
1238 | struct gfs2_rgrpd *rgd; | 1238 | struct gfs2_rgrpd *rgd; |
1239 | struct gfs2_bitmap *bi = NULL; | 1239 | struct gfs2_bitmap *bi = NULL; |
1240 | u32 length, rgrp_blk, buf_blk; | 1240 | u32 length, rgrp_blk, buf_blk; |
1241 | unsigned int buf; | 1241 | unsigned int buf; |
1242 | 1242 | ||
1243 | rgd = gfs2_blk2rgrpd(sdp, bstart); | 1243 | rgd = gfs2_blk2rgrpd(sdp, bstart); |
1244 | if (!rgd) { | 1244 | if (!rgd) { |
1245 | if (gfs2_consist(sdp)) | 1245 | if (gfs2_consist(sdp)) |
1246 | fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); | 1246 | fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); |
1247 | return NULL; | 1247 | return NULL; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | length = rgd->rd_ri.ri_length; | 1250 | length = rgd->rd_ri.ri_length; |
1251 | 1251 | ||
1252 | rgrp_blk = bstart - rgd->rd_ri.ri_data0; | 1252 | rgrp_blk = bstart - rgd->rd_ri.ri_data0; |
1253 | 1253 | ||
1254 | while (blen--) { | 1254 | while (blen--) { |
1255 | for (buf = 0; buf < length; buf++) { | 1255 | for (buf = 0; buf < length; buf++) { |
1256 | bi = rgd->rd_bits + buf; | 1256 | bi = rgd->rd_bits + buf; |
1257 | if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY) | 1257 | if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY) |
1258 | break; | 1258 | break; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | gfs2_assert(rgd->rd_sbd, buf < length); | 1261 | gfs2_assert(rgd->rd_sbd, buf < length); |
1262 | 1262 | ||
1263 | buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY; | 1263 | buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY; |
1264 | rgrp_blk++; | 1264 | rgrp_blk++; |
1265 | 1265 | ||
1266 | if (!bi->bi_clone) { | 1266 | if (!bi->bi_clone) { |
1267 | bi->bi_clone = kmalloc(bi->bi_bh->b_size, | 1267 | bi->bi_clone = kmalloc(bi->bi_bh->b_size, |
1268 | GFP_NOFS | __GFP_NOFAIL); | 1268 | GFP_NOFS | __GFP_NOFAIL); |
1269 | memcpy(bi->bi_clone + bi->bi_offset, | 1269 | memcpy(bi->bi_clone + bi->bi_offset, |
1270 | bi->bi_bh->b_data + bi->bi_offset, | 1270 | bi->bi_bh->b_data + bi->bi_offset, |
1271 | bi->bi_len); | 1271 | bi->bi_len); |
1272 | } | 1272 | } |
1273 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); | 1273 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); |
1274 | gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, | 1274 | gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, |
1275 | bi->bi_len, buf_blk, new_state); | 1275 | bi->bi_len, buf_blk, new_state); |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | return rgd; | 1278 | return rgd; |
1279 | } | 1279 | } |
1280 | 1280 | ||
1281 | /** | 1281 | /** |
1282 | * gfs2_alloc_data - Allocate a data block | 1282 | * gfs2_alloc_data - Allocate a data block |
1283 | * @ip: the inode to allocate the data block for | 1283 | * @ip: the inode to allocate the data block for |
1284 | * | 1284 | * |
1285 | * Returns: the allocated block | 1285 | * Returns: the allocated block |
1286 | */ | 1286 | */ |
1287 | 1287 | ||
1288 | u64 gfs2_alloc_data(struct gfs2_inode *ip) | 1288 | u64 gfs2_alloc_data(struct gfs2_inode *ip) |
1289 | { | 1289 | { |
1290 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1290 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1291 | struct gfs2_alloc *al = &ip->i_alloc; | 1291 | struct gfs2_alloc *al = &ip->i_alloc; |
1292 | struct gfs2_rgrpd *rgd = al->al_rgd; | 1292 | struct gfs2_rgrpd *rgd = al->al_rgd; |
1293 | u32 goal, blk; | 1293 | u32 goal, blk; |
1294 | u64 block; | 1294 | u64 block; |
1295 | 1295 | ||
1296 | if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data)) | 1296 | if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data)) |
1297 | goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0; | 1297 | goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0; |
1298 | else | 1298 | else |
1299 | goal = rgd->rd_last_alloc_data; | 1299 | goal = rgd->rd_last_alloc_data; |
1300 | 1300 | ||
1301 | blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); | 1301 | blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); |
1302 | rgd->rd_last_alloc_data = blk; | 1302 | rgd->rd_last_alloc_data = blk; |
1303 | 1303 | ||
1304 | block = rgd->rd_ri.ri_data0 + blk; | 1304 | block = rgd->rd_ri.ri_data0 + blk; |
1305 | ip->i_di.di_goal_data = block; | 1305 | ip->i_di.di_goal_data = block; |
1306 | 1306 | ||
1307 | gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); | 1307 | gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); |
1308 | rgd->rd_rg.rg_free--; | 1308 | rgd->rd_rg.rg_free--; |
1309 | 1309 | ||
1310 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1310 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1311 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1311 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1312 | 1312 | ||
1313 | al->al_alloced++; | 1313 | al->al_alloced++; |
1314 | 1314 | ||
1315 | gfs2_statfs_change(sdp, 0, -1, 0); | 1315 | gfs2_statfs_change(sdp, 0, -1, 0); |
1316 | gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1316 | gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1317 | 1317 | ||
1318 | spin_lock(&sdp->sd_rindex_spin); | 1318 | spin_lock(&sdp->sd_rindex_spin); |
1319 | rgd->rd_free_clone--; | 1319 | rgd->rd_free_clone--; |
1320 | spin_unlock(&sdp->sd_rindex_spin); | 1320 | spin_unlock(&sdp->sd_rindex_spin); |
1321 | 1321 | ||
1322 | return block; | 1322 | return block; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | /** | 1325 | /** |
1326 | * gfs2_alloc_meta - Allocate a metadata block | 1326 | * gfs2_alloc_meta - Allocate a metadata block |
1327 | * @ip: the inode to allocate the metadata block for | 1327 | * @ip: the inode to allocate the metadata block for |
1328 | * | 1328 | * |
1329 | * Returns: the allocated block | 1329 | * Returns: the allocated block |
1330 | */ | 1330 | */ |
1331 | 1331 | ||
1332 | u64 gfs2_alloc_meta(struct gfs2_inode *ip) | 1332 | u64 gfs2_alloc_meta(struct gfs2_inode *ip) |
1333 | { | 1333 | { |
1334 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1334 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1335 | struct gfs2_alloc *al = &ip->i_alloc; | 1335 | struct gfs2_alloc *al = &ip->i_alloc; |
1336 | struct gfs2_rgrpd *rgd = al->al_rgd; | 1336 | struct gfs2_rgrpd *rgd = al->al_rgd; |
1337 | u32 goal, blk; | 1337 | u32 goal, blk; |
1338 | u64 block; | 1338 | u64 block; |
1339 | 1339 | ||
1340 | if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta)) | 1340 | if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta)) |
1341 | goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0; | 1341 | goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0; |
1342 | else | 1342 | else |
1343 | goal = rgd->rd_last_alloc_meta; | 1343 | goal = rgd->rd_last_alloc_meta; |
1344 | 1344 | ||
1345 | blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); | 1345 | blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); |
1346 | rgd->rd_last_alloc_meta = blk; | 1346 | rgd->rd_last_alloc_meta = blk; |
1347 | 1347 | ||
1348 | block = rgd->rd_ri.ri_data0 + blk; | 1348 | block = rgd->rd_ri.ri_data0 + blk; |
1349 | ip->i_di.di_goal_meta = block; | 1349 | ip->i_di.di_goal_meta = block; |
1350 | 1350 | ||
1351 | gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); | 1351 | gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); |
1352 | rgd->rd_rg.rg_free--; | 1352 | rgd->rd_rg.rg_free--; |
1353 | 1353 | ||
1354 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1354 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1355 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1355 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1356 | 1356 | ||
1357 | al->al_alloced++; | 1357 | al->al_alloced++; |
1358 | 1358 | ||
1359 | gfs2_statfs_change(sdp, 0, -1, 0); | 1359 | gfs2_statfs_change(sdp, 0, -1, 0); |
1360 | gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1360 | gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1361 | gfs2_trans_add_unrevoke(sdp, block); | 1361 | gfs2_trans_add_unrevoke(sdp, block); |
1362 | 1362 | ||
1363 | spin_lock(&sdp->sd_rindex_spin); | 1363 | spin_lock(&sdp->sd_rindex_spin); |
1364 | rgd->rd_free_clone--; | 1364 | rgd->rd_free_clone--; |
1365 | spin_unlock(&sdp->sd_rindex_spin); | 1365 | spin_unlock(&sdp->sd_rindex_spin); |
1366 | 1366 | ||
1367 | return block; | 1367 | return block; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | /** | 1370 | /** |
1371 | * gfs2_alloc_di - Allocate a dinode | 1371 | * gfs2_alloc_di - Allocate a dinode |
1372 | * @dip: the directory that the inode is going in | 1372 | * @dip: the directory that the inode is going in |
1373 | * | 1373 | * |
1374 | * Returns: the block allocated | 1374 | * Returns: the block allocated |
1375 | */ | 1375 | */ |
1376 | 1376 | ||
1377 | u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) | 1377 | u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) |
1378 | { | 1378 | { |
1379 | struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); | 1379 | struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); |
1380 | struct gfs2_alloc *al = &dip->i_alloc; | 1380 | struct gfs2_alloc *al = &dip->i_alloc; |
1381 | struct gfs2_rgrpd *rgd = al->al_rgd; | 1381 | struct gfs2_rgrpd *rgd = al->al_rgd; |
1382 | u32 blk; | 1382 | u32 blk; |
1383 | u64 block; | 1383 | u64 block; |
1384 | 1384 | ||
1385 | blk = rgblk_search(rgd, rgd->rd_last_alloc_meta, | 1385 | blk = rgblk_search(rgd, rgd->rd_last_alloc_meta, |
1386 | GFS2_BLKST_FREE, GFS2_BLKST_DINODE); | 1386 | GFS2_BLKST_FREE, GFS2_BLKST_DINODE); |
1387 | 1387 | ||
1388 | rgd->rd_last_alloc_meta = blk; | 1388 | rgd->rd_last_alloc_meta = blk; |
1389 | 1389 | ||
1390 | block = rgd->rd_ri.ri_data0 + blk; | 1390 | block = rgd->rd_ri.ri_data0 + blk; |
1391 | 1391 | ||
1392 | gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); | 1392 | gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); |
1393 | rgd->rd_rg.rg_free--; | 1393 | rgd->rd_rg.rg_free--; |
1394 | rgd->rd_rg.rg_dinodes++; | 1394 | rgd->rd_rg.rg_dinodes++; |
1395 | *generation = rgd->rd_rg.rg_igeneration++; | 1395 | *generation = rgd->rd_rg.rg_igeneration++; |
1396 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1396 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1397 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1397 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1398 | 1398 | ||
1399 | al->al_alloced++; | 1399 | al->al_alloced++; |
1400 | 1400 | ||
1401 | gfs2_statfs_change(sdp, 0, -1, +1); | 1401 | gfs2_statfs_change(sdp, 0, -1, +1); |
1402 | gfs2_trans_add_unrevoke(sdp, block); | 1402 | gfs2_trans_add_unrevoke(sdp, block); |
1403 | 1403 | ||
1404 | spin_lock(&sdp->sd_rindex_spin); | 1404 | spin_lock(&sdp->sd_rindex_spin); |
1405 | rgd->rd_free_clone--; | 1405 | rgd->rd_free_clone--; |
1406 | spin_unlock(&sdp->sd_rindex_spin); | 1406 | spin_unlock(&sdp->sd_rindex_spin); |
1407 | 1407 | ||
1408 | return block; | 1408 | return block; |
1409 | } | 1409 | } |
1410 | 1410 | ||
1411 | /** | 1411 | /** |
1412 | * gfs2_free_data - free a contiguous run of data block(s) | 1412 | * gfs2_free_data - free a contiguous run of data block(s) |
1413 | * @ip: the inode these blocks are being freed from | 1413 | * @ip: the inode these blocks are being freed from |
1414 | * @bstart: first block of a run of contiguous blocks | 1414 | * @bstart: first block of a run of contiguous blocks |
1415 | * @blen: the length of the block run | 1415 | * @blen: the length of the block run |
1416 | * | 1416 | * |
1417 | */ | 1417 | */ |
1418 | 1418 | ||
1419 | void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) | 1419 | void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) |
1420 | { | 1420 | { |
1421 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1421 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1422 | struct gfs2_rgrpd *rgd; | 1422 | struct gfs2_rgrpd *rgd; |
1423 | 1423 | ||
1424 | rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); | 1424 | rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); |
1425 | if (!rgd) | 1425 | if (!rgd) |
1426 | return; | 1426 | return; |
1427 | 1427 | ||
1428 | rgd->rd_rg.rg_free += blen; | 1428 | rgd->rd_rg.rg_free += blen; |
1429 | 1429 | ||
1430 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1430 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1431 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1431 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1432 | 1432 | ||
1433 | gfs2_trans_add_rg(rgd); | 1433 | gfs2_trans_add_rg(rgd); |
1434 | 1434 | ||
1435 | gfs2_statfs_change(sdp, 0, +blen, 0); | 1435 | gfs2_statfs_change(sdp, 0, +blen, 0); |
1436 | gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1436 | gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | /** | 1439 | /** |
1440 | * gfs2_free_meta - free a contiguous run of data block(s) | 1440 | * gfs2_free_meta - free a contiguous run of data block(s) |
1441 | * @ip: the inode these blocks are being freed from | 1441 | * @ip: the inode these blocks are being freed from |
1442 | * @bstart: first block of a run of contiguous blocks | 1442 | * @bstart: first block of a run of contiguous blocks |
1443 | * @blen: the length of the block run | 1443 | * @blen: the length of the block run |
1444 | * | 1444 | * |
1445 | */ | 1445 | */ |
1446 | 1446 | ||
1447 | void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) | 1447 | void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) |
1448 | { | 1448 | { |
1449 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1449 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1450 | struct gfs2_rgrpd *rgd; | 1450 | struct gfs2_rgrpd *rgd; |
1451 | 1451 | ||
1452 | rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); | 1452 | rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); |
1453 | if (!rgd) | 1453 | if (!rgd) |
1454 | return; | 1454 | return; |
1455 | 1455 | ||
1456 | rgd->rd_rg.rg_free += blen; | 1456 | rgd->rd_rg.rg_free += blen; |
1457 | 1457 | ||
1458 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1458 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1459 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1459 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1460 | 1460 | ||
1461 | gfs2_trans_add_rg(rgd); | 1461 | gfs2_trans_add_rg(rgd); |
1462 | 1462 | ||
1463 | gfs2_statfs_change(sdp, 0, +blen, 0); | 1463 | gfs2_statfs_change(sdp, 0, +blen, 0); |
1464 | gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1464 | gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1465 | gfs2_meta_wipe(ip, bstart, blen); | 1465 | gfs2_meta_wipe(ip, bstart, blen); |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | void gfs2_unlink_di(struct inode *inode) | 1468 | void gfs2_unlink_di(struct inode *inode) |
1469 | { | 1469 | { |
1470 | struct gfs2_inode *ip = GFS2_I(inode); | 1470 | struct gfs2_inode *ip = GFS2_I(inode); |
1471 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 1471 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
1472 | struct gfs2_rgrpd *rgd; | 1472 | struct gfs2_rgrpd *rgd; |
1473 | u64 blkno = ip->i_num.no_addr; | 1473 | u64 blkno = ip->i_num.no_addr; |
1474 | 1474 | ||
1475 | rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); | 1475 | rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); |
1476 | if (!rgd) | 1476 | if (!rgd) |
1477 | return; | 1477 | return; |
1478 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1478 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1479 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1479 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1480 | gfs2_trans_add_rg(rgd); | 1480 | gfs2_trans_add_rg(rgd); |
1481 | } | 1481 | } |
1482 | 1482 | ||
1483 | static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) | 1483 | static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) |
1484 | { | 1484 | { |
1485 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 1485 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
1486 | struct gfs2_rgrpd *tmp_rgd; | 1486 | struct gfs2_rgrpd *tmp_rgd; |
1487 | 1487 | ||
1488 | tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); | 1488 | tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); |
1489 | if (!tmp_rgd) | 1489 | if (!tmp_rgd) |
1490 | return; | 1490 | return; |
1491 | gfs2_assert_withdraw(sdp, rgd == tmp_rgd); | 1491 | gfs2_assert_withdraw(sdp, rgd == tmp_rgd); |
1492 | 1492 | ||
1493 | if (!rgd->rd_rg.rg_dinodes) | 1493 | if (!rgd->rd_rg.rg_dinodes) |
1494 | gfs2_consist_rgrpd(rgd); | 1494 | gfs2_consist_rgrpd(rgd); |
1495 | rgd->rd_rg.rg_dinodes--; | 1495 | rgd->rd_rg.rg_dinodes--; |
1496 | rgd->rd_rg.rg_free++; | 1496 | rgd->rd_rg.rg_free++; |
1497 | 1497 | ||
1498 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1498 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1499 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); | 1499 | gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); |
1500 | 1500 | ||
1501 | gfs2_statfs_change(sdp, 0, +1, -1); | 1501 | gfs2_statfs_change(sdp, 0, +1, -1); |
1502 | gfs2_trans_add_rg(rgd); | 1502 | gfs2_trans_add_rg(rgd); |
1503 | } | 1503 | } |
1504 | 1504 | ||
1505 | 1505 | ||
1506 | void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) | 1506 | void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) |
1507 | { | 1507 | { |
1508 | gfs2_free_uninit_di(rgd, ip->i_num.no_addr); | 1508 | gfs2_free_uninit_di(rgd, ip->i_num.no_addr); |
1509 | gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1509 | gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1510 | gfs2_meta_wipe(ip, ip->i_num.no_addr, 1); | 1510 | gfs2_meta_wipe(ip, ip->i_num.no_addr, 1); |
1511 | } | 1511 | } |
1512 | 1512 | ||
1513 | /** | 1513 | /** |
1514 | * gfs2_rlist_add - add a RG to a list of RGs | 1514 | * gfs2_rlist_add - add a RG to a list of RGs |
1515 | * @sdp: the filesystem | 1515 | * @sdp: the filesystem |
1516 | * @rlist: the list of resource groups | 1516 | * @rlist: the list of resource groups |
1517 | * @block: the block | 1517 | * @block: the block |
1518 | * | 1518 | * |
1519 | * Figure out what RG a block belongs to and add that RG to the list | 1519 | * Figure out what RG a block belongs to and add that RG to the list |
1520 | * | 1520 | * |
1521 | * FIXME: Don't use NOFAIL | 1521 | * FIXME: Don't use NOFAIL |
1522 | * | 1522 | * |
1523 | */ | 1523 | */ |
1524 | 1524 | ||
1525 | void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, | 1525 | void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, |
1526 | u64 block) | 1526 | u64 block) |
1527 | { | 1527 | { |
1528 | struct gfs2_rgrpd *rgd; | 1528 | struct gfs2_rgrpd *rgd; |
1529 | struct gfs2_rgrpd **tmp; | 1529 | struct gfs2_rgrpd **tmp; |
1530 | unsigned int new_space; | 1530 | unsigned int new_space; |
1531 | unsigned int x; | 1531 | unsigned int x; |
1532 | 1532 | ||
1533 | if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) | 1533 | if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) |
1534 | return; | 1534 | return; |
1535 | 1535 | ||
1536 | rgd = gfs2_blk2rgrpd(sdp, block); | 1536 | rgd = gfs2_blk2rgrpd(sdp, block); |
1537 | if (!rgd) { | 1537 | if (!rgd) { |
1538 | if (gfs2_consist(sdp)) | 1538 | if (gfs2_consist(sdp)) |
1539 | fs_err(sdp, "block = %llu\n", (unsigned long long)block); | 1539 | fs_err(sdp, "block = %llu\n", (unsigned long long)block); |
1540 | return; | 1540 | return; |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | for (x = 0; x < rlist->rl_rgrps; x++) | 1543 | for (x = 0; x < rlist->rl_rgrps; x++) |
1544 | if (rlist->rl_rgd[x] == rgd) | 1544 | if (rlist->rl_rgd[x] == rgd) |
1545 | return; | 1545 | return; |
1546 | 1546 | ||
1547 | if (rlist->rl_rgrps == rlist->rl_space) { | 1547 | if (rlist->rl_rgrps == rlist->rl_space) { |
1548 | new_space = rlist->rl_space + 10; | 1548 | new_space = rlist->rl_space + 10; |
1549 | 1549 | ||
1550 | tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), | 1550 | tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), |
1551 | GFP_NOFS | __GFP_NOFAIL); | 1551 | GFP_NOFS | __GFP_NOFAIL); |
1552 | 1552 | ||
1553 | if (rlist->rl_rgd) { | 1553 | if (rlist->rl_rgd) { |
1554 | memcpy(tmp, rlist->rl_rgd, | 1554 | memcpy(tmp, rlist->rl_rgd, |
1555 | rlist->rl_space * sizeof(struct gfs2_rgrpd *)); | 1555 | rlist->rl_space * sizeof(struct gfs2_rgrpd *)); |
1556 | kfree(rlist->rl_rgd); | 1556 | kfree(rlist->rl_rgd); |
1557 | } | 1557 | } |
1558 | 1558 | ||
1559 | rlist->rl_space = new_space; | 1559 | rlist->rl_space = new_space; |
1560 | rlist->rl_rgd = tmp; | 1560 | rlist->rl_rgd = tmp; |
1561 | } | 1561 | } |
1562 | 1562 | ||
1563 | rlist->rl_rgd[rlist->rl_rgrps++] = rgd; | 1563 | rlist->rl_rgd[rlist->rl_rgrps++] = rgd; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | /** | 1566 | /** |
1567 | * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate | 1567 | * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate |
1568 | * and initialize an array of glock holders for them | 1568 | * and initialize an array of glock holders for them |
1569 | * @rlist: the list of resource groups | 1569 | * @rlist: the list of resource groups |
1570 | * @state: the lock state to acquire the RG lock in | 1570 | * @state: the lock state to acquire the RG lock in |
1571 | * @flags: the modifier flags for the holder structures | 1571 | * @flags: the modifier flags for the holder structures |
1572 | * | 1572 | * |
1573 | * FIXME: Don't use NOFAIL | 1573 | * FIXME: Don't use NOFAIL |
1574 | * | 1574 | * |
1575 | */ | 1575 | */ |
1576 | 1576 | ||
1577 | void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, | 1577 | void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, |
1578 | int flags) | 1578 | int flags) |
1579 | { | 1579 | { |
1580 | unsigned int x; | 1580 | unsigned int x; |
1581 | 1581 | ||
1582 | rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), | 1582 | rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), |
1583 | GFP_NOFS | __GFP_NOFAIL); | 1583 | GFP_NOFS | __GFP_NOFAIL); |
1584 | for (x = 0; x < rlist->rl_rgrps; x++) | 1584 | for (x = 0; x < rlist->rl_rgrps; x++) |
1585 | gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, | 1585 | gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, |
1586 | state, flags, | 1586 | state, flags, |
1587 | &rlist->rl_ghs[x]); | 1587 | &rlist->rl_ghs[x]); |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | /** | 1590 | /** |
1591 | * gfs2_rlist_free - free a resource group list | 1591 | * gfs2_rlist_free - free a resource group list |
1592 | * @list: the list of resource groups | 1592 | * @list: the list of resource groups |
1593 | * | 1593 | * |
1594 | */ | 1594 | */ |
1595 | 1595 | ||
1596 | void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) | 1596 | void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) |
1597 | { | 1597 | { |
1598 | unsigned int x; | 1598 | unsigned int x; |
1599 | 1599 | ||
1600 | kfree(rlist->rl_rgd); | 1600 | kfree(rlist->rl_rgd); |
1601 | 1601 | ||
1602 | if (rlist->rl_ghs) { | 1602 | if (rlist->rl_ghs) { |
1603 | for (x = 0; x < rlist->rl_rgrps; x++) | 1603 | for (x = 0; x < rlist->rl_rgrps; x++) |
1604 | gfs2_holder_uninit(&rlist->rl_ghs[x]); | 1604 | gfs2_holder_uninit(&rlist->rl_ghs[x]); |
1605 | kfree(rlist->rl_ghs); | 1605 | kfree(rlist->rl_ghs); |
1606 | } | 1606 | } |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | 1609 |
fs/gfs2/rgrp.h
1 | /* | 1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | 5 | * This copyrighted material is made available to anyone wishing to use, |
6 | * modify, copy, or redistribute it subject to the terms and conditions | 6 | * modify, copy, or redistribute it subject to the terms and conditions |
7 | * of the GNU General Public License version 2. | 7 | * of the GNU General Public License version 2. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef __RGRP_DOT_H__ | 10 | #ifndef __RGRP_DOT_H__ |
11 | #define __RGRP_DOT_H__ | 11 | #define __RGRP_DOT_H__ |
12 | 12 | ||
13 | struct gfs2_rgrpd; | 13 | struct gfs2_rgrpd; |
14 | struct gfs2_sbd; | 14 | struct gfs2_sbd; |
15 | struct gfs2_holder; | 15 | struct gfs2_holder; |
16 | 16 | ||
17 | void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd); | 17 | void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd); |
18 | 18 | ||
19 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk); | 19 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk); |
20 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp); | 20 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp); |
21 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd); | 21 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd); |
22 | 22 | ||
23 | void gfs2_clear_rgrpd(struct gfs2_sbd *sdp); | 23 | void gfs2_clear_rgrpd(struct gfs2_sbd *sdp); |
24 | int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh); | 24 | int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh); |
25 | 25 | ||
26 | int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd); | 26 | int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd); |
27 | void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd); | 27 | void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd); |
28 | void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd); | 28 | void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd); |
29 | 29 | ||
30 | void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd); | 30 | void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd); |
31 | 31 | ||
32 | struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); | 32 | struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); |
33 | static inline void gfs2_alloc_put(struct gfs2_inode *ip) | 33 | static inline void gfs2_alloc_put(struct gfs2_inode *ip) |
34 | { | 34 | { |
35 | return; /* So we can see where ip->i_alloc is used */ | 35 | return; /* So we can see where ip->i_alloc is used */ |
36 | } | 36 | } |
37 | 37 | ||
38 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, | 38 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, |
39 | char *file, unsigned int line); | 39 | char *file, unsigned int line); |
40 | #define gfs2_inplace_reserve(ip) \ | 40 | #define gfs2_inplace_reserve(ip) \ |
41 | gfs2_inplace_reserve_i((ip), __FILE__, __LINE__) | 41 | gfs2_inplace_reserve_i((ip), __FILE__, __LINE__) |
42 | 42 | ||
43 | void gfs2_inplace_release(struct gfs2_inode *ip); | 43 | void gfs2_inplace_release(struct gfs2_inode *ip); |
44 | 44 | ||
45 | unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block); | 45 | unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block); |
46 | 46 | ||
47 | u64 gfs2_alloc_data(struct gfs2_inode *ip); | 47 | u64 gfs2_alloc_data(struct gfs2_inode *ip); |
48 | u64 gfs2_alloc_meta(struct gfs2_inode *ip); | 48 | u64 gfs2_alloc_meta(struct gfs2_inode *ip); |
49 | u64 gfs2_alloc_di(struct gfs2_inode *ip, u64 *generation); | 49 | u64 gfs2_alloc_di(struct gfs2_inode *ip, u64 *generation); |
50 | 50 | ||
51 | void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen); | 51 | void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen); |
52 | void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); | 52 | void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); |
53 | void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip); | 53 | void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip); |
54 | void gfs2_unlink_di(struct inode *inode); | 54 | void gfs2_unlink_di(struct inode *inode); |
55 | 55 | ||
56 | struct gfs2_rgrp_list { | 56 | struct gfs2_rgrp_list { |
57 | unsigned int rl_rgrps; | 57 | unsigned int rl_rgrps; |
58 | unsigned int rl_space; | 58 | unsigned int rl_space; |
59 | struct gfs2_rgrpd **rl_rgd; | 59 | struct gfs2_rgrpd **rl_rgd; |
60 | struct gfs2_holder *rl_ghs; | 60 | struct gfs2_holder *rl_ghs; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, | 63 | void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, |
64 | u64 block); | 64 | u64 block); |
65 | void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, | 65 | void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, |
66 | int flags); | 66 | int flags); |
67 | void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); | 67 | void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); |
68 | u64 gfs2_ri_total(struct gfs2_sbd *sdp); | ||
68 | 69 | ||
69 | #endif /* __RGRP_DOT_H__ */ | 70 | #endif /* __RGRP_DOT_H__ */ |
70 | 71 |