Commit b94a170e96dc416828af9d350ae2e34b70ae7347

Authored by Benjamin Marzinski
Committed by Steven Whitehouse
1 parent 6b94617024

GFS2: remove dcache entries for remote deleted inodes

When a file is deleted from a gfs2 filesystem on one node, a dcache
entry for it may still exist on other nodes in the cluster. If this
happens, gfs2 will be unable to free this file on disk. Because of this,
it's possible to have a gfs2 filesystem with no files on it and no free
space. With this patch, when a node receives a callback notifying it
that the file is being deleted on another node, it schedules a new
workqueue thread to remove the file's dcache entry.

Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>

Showing 5 changed files with 65 additions and 5 deletions Side-by-side Diff

... ... @@ -63,6 +63,7 @@
63 63 static DECLARE_RWSEM(gfs2_umount_flush_sem);
64 64 static struct dentry *gfs2_root;
65 65 static struct workqueue_struct *glock_workqueue;
  66 +struct workqueue_struct *gfs2_delete_workqueue;
66 67 static LIST_HEAD(lru_list);
67 68 static atomic_t lru_count = ATOMIC_INIT(0);
68 69 static DEFINE_SPINLOCK(lru_lock);
... ... @@ -167,7 +168,7 @@
167 168 *
168 169 */
169 170  
170   -static void gfs2_glock_hold(struct gfs2_glock *gl)
  171 +void gfs2_glock_hold(struct gfs2_glock *gl)
171 172 {
172 173 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
173 174 atomic_inc(&gl->gl_ref);
... ... @@ -222,7 +223,7 @@
222 223 * to the glock, in addition to the one it is dropping.
223 224 */
224 225  
225   -static void gfs2_glock_put_nolock(struct gfs2_glock *gl)
  226 +void gfs2_glock_put_nolock(struct gfs2_glock *gl)
226 227 {
227 228 if (atomic_dec_and_test(&gl->gl_ref))
228 229 GLOCK_BUG_ON(gl, 1);
... ... @@ -679,6 +680,29 @@
679 680 goto out;
680 681 }
681 682  
  683 +static void delete_work_func(struct work_struct *work)
  684 +{
  685 + struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
  686 + struct gfs2_sbd *sdp = gl->gl_sbd;
  687 + struct gfs2_inode *ip = NULL;
  688 + struct inode *inode;
  689 + u64 no_addr = 0;
  690 +
  691 + spin_lock(&gl->gl_spin);
  692 + ip = (struct gfs2_inode *)gl->gl_object;
  693 + if (ip)
  694 + no_addr = ip->i_no_addr;
  695 + spin_unlock(&gl->gl_spin);
  696 + if (ip) {
  697 + inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
  698 + if (inode) {
  699 + d_prune_aliases(inode);
  700 + iput(inode);
  701 + }
  702 + }
  703 + gfs2_glock_put(gl);
  704 +}
  705 +
682 706 static void glock_work_func(struct work_struct *work)
683 707 {
684 708 unsigned long delay = 0;
... ... @@ -757,6 +781,7 @@
757 781 gl->gl_sbd = sdp;
758 782 gl->gl_aspace = NULL;
759 783 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  784 + INIT_WORK(&gl->gl_delete, delete_work_func);
760 785  
761 786 /* If this glock protects actual on-disk data or metadata blocks,
762 787 create a VFS inode to manage the pages/buffers holding them. */
... ... @@ -898,6 +923,8 @@
898 923 gl->gl_demote_state != state) {
899 924 gl->gl_demote_state = LM_ST_UNLOCKED;
900 925 }
  926 + if (gl->gl_ops->go_callback)
  927 + gl->gl_ops->go_callback(gl);
901 928 trace_gfs2_demote_rq(gl);
902 929 }
903 930  
904 931  
... ... @@ -1344,14 +1371,14 @@
1344 1371 spin_unlock(&lru_lock);
1345 1372 spin_lock(&gl->gl_spin);
1346 1373 may_demote = demote_ok(gl);
1347   - spin_unlock(&gl->gl_spin);
1348   - clear_bit(GLF_LOCK, &gl->gl_flags);
1349 1374 if (may_demote) {
1350 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1351 1376 nr--;
1352 1377 }
1353 1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1354   - gfs2_glock_put(gl);
  1379 + gfs2_glock_put_nolock(gl);
  1380 + spin_unlock(&gl->gl_spin);
  1381 + clear_bit(GLF_LOCK, &gl->gl_flags);
1355 1382 spin_lock(&lru_lock);
1356 1383 continue;
1357 1384 }
... ... @@ -1738,6 +1765,11 @@
1738 1765 glock_workqueue = create_workqueue("glock_workqueue");
1739 1766 if (IS_ERR(glock_workqueue))
1740 1767 return PTR_ERR(glock_workqueue);
  1768 + gfs2_delete_workqueue = create_workqueue("delete_workqueue");
  1769 + if (IS_ERR(gfs2_delete_workqueue)) {
  1770 + destroy_workqueue(glock_workqueue);
  1771 + return PTR_ERR(gfs2_delete_workqueue);
  1772 + }
1741 1773  
1742 1774 register_shrinker(&glock_shrinker);
1743 1775  
... ... @@ -1748,6 +1780,7 @@
1748 1780 {
1749 1781 unregister_shrinker(&glock_shrinker);
1750 1782 destroy_workqueue(glock_workqueue);
  1783 + destroy_workqueue(gfs2_delete_workqueue);
1751 1784 }
1752 1785  
1753 1786 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
... ... @@ -143,6 +143,7 @@
143 143  
144 144 #define GLR_TRYFAILED 13
145 145  
  146 +extern struct workqueue_struct *gfs2_delete_workqueue;
146 147 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
147 148 {
148 149 struct gfs2_holder *gh;
... ... @@ -191,6 +192,8 @@
191 192 int gfs2_glock_get(struct gfs2_sbd *sdp,
192 193 u64 number, const struct gfs2_glock_operations *glops,
193 194 int create, struct gfs2_glock **glp);
  195 +void gfs2_glock_hold(struct gfs2_glock *gl);
  196 +void gfs2_glock_put_nolock(struct gfs2_glock *gl);
194 197 int gfs2_glock_put(struct gfs2_glock *gl);
195 198 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
196 199 struct gfs2_holder *gh);
... ... @@ -323,6 +323,7 @@
323 323  
324 324 if (gl->gl_state != LM_ST_UNLOCKED &&
325 325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  326 + flush_workqueue(gfs2_delete_workqueue);
326 327 gfs2_meta_syncfs(sdp);
327 328 gfs2_log_shutdown(sdp);
328 329 }
... ... @@ -372,6 +373,25 @@
372 373 return 0;
373 374 }
374 375  
  376 +/**
  377 + * iopen_go_callback - schedule the dcache entry for the inode to be deleted
  378 + * @gl: the glock
  379 + *
  380 + * gl_spin lock is held while calling this
  381 + */
  382 +static void iopen_go_callback(struct gfs2_glock *gl)
  383 +{
  384 + struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
  385 +
  386 + if (gl->gl_demote_state == LM_ST_UNLOCKED &&
  387 + gl->gl_state == LM_ST_SHARED &&
  388 + ip && test_bit(GIF_USER, &ip->i_flags)) {
  389 + gfs2_glock_hold(gl);
  390 + if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
  391 + gfs2_glock_put_nolock(gl);
  392 + }
  393 +}
  394 +
375 395 const struct gfs2_glock_operations gfs2_meta_glops = {
376 396 .go_type = LM_TYPE_META,
377 397 };
... ... @@ -406,6 +426,7 @@
406 426  
407 427 const struct gfs2_glock_operations gfs2_iopen_glops = {
408 428 .go_type = LM_TYPE_IOPEN,
  429 + .go_callback = iopen_go_callback,
409 430 };
410 431  
411 432 const struct gfs2_glock_operations gfs2_flock_glops = {
... ... @@ -159,6 +159,7 @@
159 159 int (*go_lock) (struct gfs2_holder *gh);
160 160 void (*go_unlock) (struct gfs2_holder *gh);
161 161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
  162 + void (*go_callback) (struct gfs2_glock *gl);
162 163 const int go_type;
163 164 const unsigned long go_min_hold_time;
164 165 };
... ... @@ -228,6 +229,7 @@
228 229 struct list_head gl_ail_list;
229 230 atomic_t gl_ail_count;
230 231 struct delayed_work gl_work;
  232 + struct work_struct gl_delete;
231 233 };
232 234  
233 235 #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
... ... @@ -691,6 +691,7 @@
691 691 struct gfs2_holder t_gh;
692 692 int error;
693 693  
  694 + flush_workqueue(gfs2_delete_workqueue);
694 695 gfs2_quota_sync(sdp);
695 696 gfs2_statfs_sync(sdp);
696 697