Commit f661f1e0bf5002bdcc8b5810ad0a184a1841537f

Authored by Dave Chinner
Committed by Ben Myers
1 parent 7f7bebefba

xfs: sync work is now only periodic log work

The only thing the periodic sync work does now is flush the AIL and
idle the log. These are really functions of the log code, so move
the work to xfs_log.c and rename it appropriately.

The only wart that this leaves behind is the xfssyncd_centisecs
sysctl, otherwise the xfssyncd is dead. Clean up any comments that
related to xfssyncd to reflect it's passing.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ben Myers <bpm@sgi.com>

Showing 7 changed files with 62 additions and 61 deletions Side-by-side Diff

... ... @@ -34,6 +34,7 @@
34 34 #include "xfs_dinode.h"
35 35 #include "xfs_inode.h"
36 36 #include "xfs_trace.h"
  37 +#include "xfs_fsops.h"
37 38  
38 39 kmem_zone_t *xfs_log_ticket_zone;
39 40  
40 41  
41 42  
42 43  
43 44  
44 45  
... ... @@ -679,25 +680,29 @@
679 680 }
680 681  
681 682 /*
682   - * Finish the recovery of the file system. This is separate from
683   - * the xfs_log_mount() call, because it depends on the code in
684   - * xfs_mountfs() to read in the root and real-time bitmap inodes
685   - * between calling xfs_log_mount() and here.
  683 + * Finish the recovery of the file system. This is separate from the
  684 + * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
  685 + * in the root and real-time bitmap inodes between calling xfs_log_mount() and
  686 + * here.
686 687 *
687   - * mp - ubiquitous xfs mount point structure
  688 + * If we finish recovery successfully, start the background log work. If we are
  689 + * not doing recovery, then we have a RO filesystem and we don't need to start
  690 + * it.
688 691 */
689 692 int
690 693 xfs_log_mount_finish(xfs_mount_t *mp)
691 694 {
692   - int error;
  695 + int error = 0;
693 696  
694   - if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
  697 + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
695 698 error = xlog_recover_finish(mp->m_log);
696   - else {
697   - error = 0;
  699 + if (!error)
  700 + xfs_log_work_queue(mp);
  701 + } else {
698 702 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
699 703 }
700 704  
  705 +
701 706 return error;
702 707 }
703 708  
... ... @@ -858,7 +863,7 @@
858 863 void
859 864 xfs_log_unmount(xfs_mount_t *mp)
860 865 {
861   - cancel_delayed_work_sync(&mp->m_sync_work);
  866 + cancel_delayed_work_sync(&mp->m_log->l_work);
862 867 xfs_trans_ail_destroy(mp);
863 868 xlog_dealloc_log(mp->m_log);
864 869 }
865 870  
... ... @@ -1161,7 +1166,41 @@
1161 1166 } /* xlog_get_iclog_buffer_size */
1162 1167  
1163 1168  
  1169 +void
  1170 +xfs_log_work_queue(
  1171 + struct xfs_mount *mp)
  1172 +{
  1173 + queue_delayed_work(xfs_syncd_wq, &mp->m_log->l_work,
  1174 + msecs_to_jiffies(xfs_syncd_centisecs * 10));
  1175 +}
  1176 +
1164 1177 /*
  1178 + * Every sync period we need to unpin all items in the AIL and push them to
  1179 + * disk. If there is nothing dirty, then we might need to cover the log to
  1180 + * indicate that the filesystem is idle.
  1181 + */
  1182 +void
  1183 +xfs_log_worker(
  1184 + struct work_struct *work)
  1185 +{
  1186 + struct xlog *log = container_of(to_delayed_work(work),
  1187 + struct xlog, l_work);
  1188 + struct xfs_mount *mp = log->l_mp;
  1189 +
  1190 + /* dgc: errors ignored - not fatal and nowhere to report them */
  1191 + if (xfs_log_need_covered(mp))
  1192 + xfs_fs_log_dummy(mp);
  1193 + else
  1194 + xfs_log_force(mp, 0);
  1195 +
  1196 + /* start pushing all the metadata that is currently dirty */
  1197 + xfs_ail_push_all(mp->m_ail);
  1198 +
  1199 + /* queue us up again */
  1200 + xfs_log_work_queue(mp);
  1201 +}
  1202 +
  1203 +/*
1165 1204 * This routine initializes some of the log structure for a given mount point.
1166 1205 * Its primary purpose is to fill in enough, so recovery can occur. However,
1167 1206 * some other stuff may be filled in too.
... ... @@ -1195,6 +1234,7 @@
1195 1234 log->l_logBBsize = num_bblks;
1196 1235 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1197 1236 log->l_flags |= XLOG_ACTIVE_RECOVERY;
  1237 + INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1198 1238  
1199 1239 log->l_prev_block = -1;
1200 1240 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
... ... @@ -181,6 +181,9 @@
181 181 xfs_lsn_t *commit_lsn, int flags);
182 182 bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
183 183  
  184 +void xfs_log_work_queue(struct xfs_mount *mp);
  185 +void xfs_log_worker(struct work_struct *work);
  186 +
184 187 #endif
185 188 #endif /* __XFS_LOG_H__ */
fs/xfs/xfs_log_priv.h
... ... @@ -495,6 +495,7 @@
495 495 struct xfs_buf *l_xbuf; /* extra buffer for log
496 496 * wrapping */
497 497 struct xfs_buftarg *l_targ; /* buftarg of log */
  498 + struct delayed_work l_work; /* background flush work */
498 499 uint l_flags;
499 500 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
500 501 struct list_head *l_buf_cancel_table;
... ... @@ -197,7 +197,6 @@
197 197 struct mutex m_icsb_mutex; /* balancer sync lock */
198 198 #endif
199 199 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
200   - struct delayed_work m_sync_work; /* background sync work */
201 200 struct delayed_work m_reclaim_work; /* background inode reclaim */
202 201 struct work_struct m_flush_work; /* background inode flush */
203 202 __int64_t m_update_flags; /* sb flags we need to update
... ... @@ -1005,7 +1005,6 @@
1005 1005 {
1006 1006 struct xfs_mount *mp = XFS_M(sb);
1007 1007  
1008   - cancel_delayed_work_sync(&mp->m_sync_work);
1009 1008 cancel_work_sync(&mp->m_flush_work);
1010 1009  
1011 1010 xfs_filestream_unmount(mp);
1012 1011  
... ... @@ -1040,10 +1039,10 @@
1040 1039 if (laptop_mode) {
1041 1040 /*
1042 1041 * The disk must be active because we're syncing.
1043   - * We schedule xfssyncd now (now that the disk is
  1042 + * We schedule log work now (now that the disk is
1044 1043 * active) instead of later (when it might not be).
1045 1044 */
1046   - flush_delayed_work(&mp->m_sync_work);
  1045 + flush_delayed_work(&mp->m_log->l_work);
1047 1046 }
1048 1047  
1049 1048 return 0;
... ... @@ -1200,7 +1199,7 @@
1200 1199 * value if it is non-zero, otherwise go with the default.
1201 1200 */
1202 1201 xfs_restore_resvblks(mp);
1203   - xfs_syncd_queue_sync(mp);
  1202 + xfs_log_work_queue(mp);
1204 1203 }
1205 1204  
1206 1205 /* rw -> ro */
... ... @@ -1246,7 +1245,7 @@
1246 1245 struct xfs_mount *mp = XFS_M(sb);
1247 1246  
1248 1247 xfs_restore_resvblks(mp);
1249   - xfs_syncd_queue_sync(mp);
  1248 + xfs_log_work_queue(mp);
1250 1249 return 0;
1251 1250 }
1252 1251  
... ... @@ -1326,7 +1325,6 @@
1326 1325 mutex_init(&mp->m_growlock);
1327 1326 atomic_set(&mp->m_active_trans, 0);
1328 1327 INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
1329   - INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
1330 1328 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1331 1329  
1332 1330 mp->m_super = sb;
... ... @@ -1409,12 +1407,6 @@
1409 1407 error = ENOMEM;
1410 1408 goto out_unmount;
1411 1409 }
1412   -
1413   - /*
1414   - * The filesystem is successfully mounted, so we can start background
1415   - * sync work now.
1416   - */
1417   - xfs_syncd_queue_sync(mp);
1418 1410  
1419 1411 return 0;
1420 1412  
... ... @@ -19,6 +19,7 @@
19 19 #include "xfs_fs.h"
20 20 #include "xfs_types.h"
21 21 #include "xfs_log.h"
  22 +#include "xfs_log_priv.h"
22 23 #include "xfs_inum.h"
23 24 #include "xfs_trans.h"
24 25 #include "xfs_trans_priv.h"
... ... @@ -344,8 +345,8 @@
344 345 /* flush all pending changes from the AIL */
345 346 xfs_ail_push_all_sync(mp->m_ail);
346 347  
347   - /* stop background sync work */
348   - cancel_delayed_work_sync(&mp->m_sync_work);
  348 + /* stop background log work */
  349 + cancel_delayed_work_sync(&mp->m_log->l_work);
349 350  
350 351 /*
351 352 * Just warn here till VFS can correctly support
... ... @@ -374,40 +375,6 @@
374 375 */
375 376 xfs_buf_lock(mp->m_sb_bp);
376 377 xfs_buf_unlock(mp->m_sb_bp);
377   -}
378   -
379   -void
380   -xfs_syncd_queue_sync(
381   - struct xfs_mount *mp)
382   -{
383   - queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
384   - msecs_to_jiffies(xfs_syncd_centisecs * 10));
385   -}
386   -
387   -/*
388   - * Every sync period we need to push dirty metadata and try to cover the log
389   - * to indicate the filesystem is idle and not frozen.
390   - */
391   -void
392   -xfs_sync_worker(
393   - struct work_struct *work)
394   -{
395   - struct xfs_mount *mp = container_of(to_delayed_work(work),
396   - struct xfs_mount, m_sync_work);
397   - int error;
398   -
399   - /* dgc: errors ignored here */
400   - if (mp->m_super->s_writers.frozen == SB_UNFROZEN &&
401   - xfs_log_need_covered(mp))
402   - error = xfs_fs_log_dummy(mp);
403   - else
404   - xfs_log_force(mp, 0);
405   -
406   - /* start pushing all the metadata that is currently dirty */
407   - xfs_ail_push_all(mp->m_ail);
408   -
409   - /* queue us up again */
410   - xfs_syncd_queue_sync(mp);
411 378 }
412 379  
413 380 /*
... ... @@ -26,8 +26,6 @@
26 26  
27 27 extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
28 28  
29   -void xfs_syncd_queue_sync(struct xfs_mount *mp);
30   -void xfs_sync_worker(struct work_struct *work);
31 29 void xfs_flush_worker(struct work_struct *work);
32 30 void xfs_reclaim_worker(struct work_struct *work);
33 31