Commit 87e99511ea54510ffb60b98001d108794d5037f8

Authored by Christoph Hellwig
Committed by Al Viro
1 parent dad5eb6daa

kill BH_Ordered flag

Instead of abusing a buffer_head flag just add a variant of
sync_dirty_buffer which allows passing the exact type of write
flag required.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 5 changed files with 63 additions and 73 deletions Side-by-side Diff

... ... @@ -2912,13 +2912,6 @@
2912 2912 BUG_ON(buffer_unwritten(bh));
2913 2913  
2914 2914 /*
2915   - * Mask in barrier bit for a write (could be either a WRITE or a
2916   - * WRITE_SYNC
2917   - */
2918   - if (buffer_ordered(bh) && (rw & WRITE))
2919   - rw |= WRITE_BARRIER;
2920   -
2921   - /*
2922 2915 * Only clear out a write error when rewriting
2923 2916 */
2924 2917 if (test_set_buffer_req(bh) && (rw & WRITE))
... ... @@ -3021,7 +3014,7 @@
3021 3014 * and then start new I/O and then wait upon it. The caller must have a ref on
3022 3015 * the buffer_head.
3023 3016 */
3024   -int sync_dirty_buffer(struct buffer_head *bh)
  3017 +int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3025 3018 {
3026 3019 int ret = 0;
3027 3020  
... ... @@ -3030,7 +3023,7 @@
3030 3023 if (test_clear_buffer_dirty(bh)) {
3031 3024 get_bh(bh);
3032 3025 bh->b_end_io = end_buffer_write_sync;
3033   - ret = submit_bh(WRITE_SYNC, bh);
  3026 + ret = submit_bh(rw, bh);
3034 3027 wait_on_buffer(bh);
3035 3028 if (buffer_eopnotsupp(bh)) {
3036 3029 clear_buffer_eopnotsupp(bh);
... ... @@ -3042,6 +3035,12 @@
3042 3035 unlock_buffer(bh);
3043 3036 }
3044 3037 return ret;
  3038 +}
  3039 +EXPORT_SYMBOL(__sync_dirty_buffer);
  3040 +
  3041 +int sync_dirty_buffer(struct buffer_head *bh)
  3042 +{
  3043 + return __sync_dirty_buffer(bh, WRITE_SYNC);
3045 3044 }
3046 3045 EXPORT_SYMBOL(sync_dirty_buffer);
3047 3046  
... ... @@ -119,7 +119,6 @@
119 119 struct buffer_head *bh;
120 120 journal_header_t *header;
121 121 int ret;
122   - int barrier_done = 0;
123 122  
124 123 if (is_journal_aborted(journal))
125 124 return 0;
126 125  
127 126  
128 127  
129 128  
... ... @@ -137,34 +136,36 @@
137 136  
138 137 JBUFFER_TRACE(descriptor, "write commit block");
139 138 set_buffer_dirty(bh);
  139 +
140 140 if (journal->j_flags & JFS_BARRIER) {
141   - set_buffer_ordered(bh);
142   - barrier_done = 1;
143   - }
144   - ret = sync_dirty_buffer(bh);
145   - if (barrier_done)
146   - clear_buffer_ordered(bh);
147   - /* is it possible for another commit to fail at roughly
148   - * the same time as this one? If so, we don't want to
149   - * trust the barrier flag in the super, but instead want
150   - * to remember if we sent a barrier request
151   - */
152   - if (ret == -EOPNOTSUPP && barrier_done) {
153   - char b[BDEVNAME_SIZE];
  141 + ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
154 142  
155   - printk(KERN_WARNING
156   - "JBD: barrier-based sync failed on %s - "
157   - "disabling barriers\n",
158   - bdevname(journal->j_dev, b));
159   - spin_lock(&journal->j_state_lock);
160   - journal->j_flags &= ~JFS_BARRIER;
161   - spin_unlock(&journal->j_state_lock);
  143 + /*
  144 + * Is it possible for another commit to fail at roughly
  145 + * the same time as this one? If so, we don't want to
  146 + * trust the barrier flag in the super, but instead want
  147 + * to remember if we sent a barrier request
  148 + */
  149 + if (ret == -EOPNOTSUPP) {
  150 + char b[BDEVNAME_SIZE];
162 151  
163   - /* And try again, without the barrier */
164   - set_buffer_uptodate(bh);
165   - set_buffer_dirty(bh);
  152 + printk(KERN_WARNING
  153 + "JBD: barrier-based sync failed on %s - "
  154 + "disabling barriers\n",
  155 + bdevname(journal->j_dev, b));
  156 + spin_lock(&journal->j_state_lock);
  157 + journal->j_flags &= ~JFS_BARRIER;
  158 + spin_unlock(&journal->j_state_lock);
  159 +
  160 + /* And try again, without the barrier */
  161 + set_buffer_uptodate(bh);
  162 + set_buffer_dirty(bh);
  163 + ret = sync_dirty_buffer(bh);
  164 + }
  165 + } else {
166 166 ret = sync_dirty_buffer(bh);
167 167 }
  168 +
168 169 put_bh(bh); /* One for getblk() */
169 170 journal_put_journal_head(descriptor);
170 171  
... ... @@ -101,7 +101,6 @@
101 101 struct commit_header *tmp;
102 102 struct buffer_head *bh;
103 103 int ret;
104   - int barrier_done = 0;
105 104 struct timespec now = current_kernel_time();
106 105  
107 106 if (is_journal_aborted(journal))
108 107  
... ... @@ -136,30 +135,22 @@
136 135 if (journal->j_flags & JBD2_BARRIER &&
137 136 !JBD2_HAS_INCOMPAT_FEATURE(journal,
138 137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
139   - set_buffer_ordered(bh);
140   - barrier_done = 1;
141   - }
142   - ret = submit_bh(WRITE_SYNC_PLUG, bh);
143   - if (barrier_done)
144   - clear_buffer_ordered(bh);
  138 + ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
  139 + if (ret == -EOPNOTSUPP) {
  140 + printk(KERN_WARNING
  141 + "JBD2: Disabling barriers on %s, "
  142 + "not supported by device\n", journal->j_devname);
  143 + write_lock(&journal->j_state_lock);
  144 + journal->j_flags &= ~JBD2_BARRIER;
  145 + write_unlock(&journal->j_state_lock);
145 146  
146   - /* is it possible for another commit to fail at roughly
147   - * the same time as this one? If so, we don't want to
148   - * trust the barrier flag in the super, but instead want
149   - * to remember if we sent a barrier request
150   - */
151   - if (ret == -EOPNOTSUPP && barrier_done) {
152   - printk(KERN_WARNING
153   - "JBD2: Disabling barriers on %s, "
154   - "not supported by device\n", journal->j_devname);
155   - write_lock(&journal->j_state_lock);
156   - journal->j_flags &= ~JBD2_BARRIER;
157   - write_unlock(&journal->j_state_lock);
158   -
159   - /* And try again, without the barrier */
160   - lock_buffer(bh);
161   - set_buffer_uptodate(bh);
162   - clear_buffer_dirty(bh);
  147 + /* And try again, without the barrier */
  148 + lock_buffer(bh);
  149 + set_buffer_uptodate(bh);
  150 + clear_buffer_dirty(bh);
  151 + ret = submit_bh(WRITE_SYNC_PLUG, bh);
  152 + }
  153 + } else {
163 154 ret = submit_bh(WRITE_SYNC_PLUG, bh);
164 155 }
165 156 *cbh = bh;
... ... @@ -175,24 +175,24 @@
175 175 {
176 176 struct the_nilfs *nilfs = sbi->s_nilfs;
177 177 int err;
178   - int barrier_done = 0;
179 178  
180   - if (nilfs_test_opt(sbi, BARRIER)) {
181   - set_buffer_ordered(nilfs->ns_sbh[0]);
182   - barrier_done = 1;
183   - }
184 179 retry:
185 180 set_buffer_dirty(nilfs->ns_sbh[0]);
186   - err = sync_dirty_buffer(nilfs->ns_sbh[0]);
187   - if (err == -EOPNOTSUPP && barrier_done) {
188   - nilfs_warning(sbi->s_super, __func__,
189   - "barrier-based sync failed. "
190   - "disabling barriers\n");
191   - nilfs_clear_opt(sbi, BARRIER);
192   - barrier_done = 0;
193   - clear_buffer_ordered(nilfs->ns_sbh[0]);
194   - goto retry;
  181 +
  182 + if (nilfs_test_opt(sbi, BARRIER)) {
  183 + err = __sync_dirty_buffer(nilfs->ns_sbh[0],
  184 + WRITE_SYNC | WRITE_BARRIER);
  185 + if (err == -EOPNOTSUPP) {
  186 + nilfs_warning(sbi->s_super, __func__,
  187 + "barrier-based sync failed. "
  188 + "disabling barriers\n");
  189 + nilfs_clear_opt(sbi, BARRIER);
  190 + goto retry;
  191 + }
  192 + } else {
  193 + err = sync_dirty_buffer(nilfs->ns_sbh[0]);
195 194 }
  195 +
196 196 if (unlikely(err)) {
197 197 printk(KERN_ERR
198 198 "NILFS: unable to write superblock (err=%d)\n", err);
include/linux/buffer_head.h
... ... @@ -32,7 +32,6 @@
32 32 BH_Delay, /* Buffer is not yet allocated on disk */
33 33 BH_Boundary, /* Block is followed by a discontiguity */
34 34 BH_Write_EIO, /* I/O error on write */
35   - BH_Ordered, /* ordered write */
36 35 BH_Eopnotsupp, /* operation not supported (barrier) */
37 36 BH_Unwritten, /* Buffer is allocated on disk but not written */
38 37 BH_Quiet, /* Buffer Error Prinks to be quiet */
... ... @@ -125,7 +124,6 @@
125 124 BUFFER_FNS(Delay, delay)
126 125 BUFFER_FNS(Boundary, boundary)
127 126 BUFFER_FNS(Write_EIO, write_io_error)
128   -BUFFER_FNS(Ordered, ordered)
129 127 BUFFER_FNS(Eopnotsupp, eopnotsupp)
130 128 BUFFER_FNS(Unwritten, unwritten)
131 129  
... ... @@ -183,6 +181,7 @@
183 181 void __lock_buffer(struct buffer_head *bh);
184 182 void ll_rw_block(int, int, struct buffer_head * bh[]);
185 183 int sync_dirty_buffer(struct buffer_head *bh);
  184 +int __sync_dirty_buffer(struct buffer_head *bh, int rw);
186 185 int submit_bh(int, struct buffer_head *);
187 186 void write_boundary_block(struct block_device *bdev,
188 187 sector_t bblock, unsigned blocksize);