Commit f7f4bccb729844a0fa873e224e3a6f7eeed095bb

Authored by Mingming Cao
Committed by Linus Torvalds
1 parent 470decc613

[PATCH] jbd2: rename jbd2 symbols to avoid duplication of jbd symbols

Mingming Cao originally did this work, and Shaggy reproduced it using some
scripts from her.

Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 9 changed files with 672 additions and 672 deletions Side-by-side Diff

... ... @@ -2,7 +2,7 @@
2 2 # Makefile for the linux journaling routines.
3 3 #
4 4  
5   -obj-$(CONFIG_JBD) += jbd.o
  5 +obj-$(CONFIG_JBD2) += jbd2.o
6 6  
7   -jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
  7 +jbd2-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
fs/jbd2/checkpoint.c
... ... @@ -19,7 +19,7 @@
19 19  
20 20 #include <linux/time.h>
21 21 #include <linux/fs.h>
22   -#include <linux/jbd.h>
  22 +#include <linux/jbd2.h>
23 23 #include <linux/errno.h>
24 24 #include <linux/slab.h>
25 25  
26 26  
... ... @@ -95,9 +95,9 @@
95 95  
96 96 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
97 97 JBUFFER_TRACE(jh, "remove from checkpoint list");
98   - ret = __journal_remove_checkpoint(jh) + 1;
  98 + ret = __jbd2_journal_remove_checkpoint(jh) + 1;
99 99 jbd_unlock_bh_state(bh);
100   - journal_remove_journal_head(bh);
  100 + jbd2_journal_remove_journal_head(bh);
101 101 BUFFER_TRACE(bh, "release");
102 102 __brelse(bh);
103 103 } else {
104 104  
105 105  
... ... @@ -107,19 +107,19 @@
107 107 }
108 108  
109 109 /*
110   - * __log_wait_for_space: wait until there is space in the journal.
  110 + * __jbd2_log_wait_for_space: wait until there is space in the journal.
111 111 *
112 112 * Called under j-state_lock *only*. It will be unlocked if we have to wait
113 113 * for a checkpoint to free up some space in the log.
114 114 */
115   -void __log_wait_for_space(journal_t *journal)
  115 +void __jbd2_log_wait_for_space(journal_t *journal)
116 116 {
117 117 int nblocks;
118 118 assert_spin_locked(&journal->j_state_lock);
119 119  
120 120 nblocks = jbd_space_needed(journal);
121   - while (__log_space_left(journal) < nblocks) {
122   - if (journal->j_flags & JFS_ABORT)
  121 + while (__jbd2_log_space_left(journal) < nblocks) {
  122 + if (journal->j_flags & JBD2_ABORT)
123 123 return;
124 124 spin_unlock(&journal->j_state_lock);
125 125 mutex_lock(&journal->j_checkpoint_mutex);
126 126  
... ... @@ -130,9 +130,9 @@
130 130 */
131 131 spin_lock(&journal->j_state_lock);
132 132 nblocks = jbd_space_needed(journal);
133   - if (__log_space_left(journal) < nblocks) {
  133 + if (__jbd2_log_space_left(journal) < nblocks) {
134 134 spin_unlock(&journal->j_state_lock);
135   - log_do_checkpoint(journal);
  135 + jbd2_log_do_checkpoint(journal);
136 136 spin_lock(&journal->j_state_lock);
137 137 }
138 138 mutex_unlock(&journal->j_checkpoint_mutex);
139 139  
... ... @@ -198,9 +198,9 @@
198 198 * Now in whatever state the buffer currently is, we know that
199 199 * it has been written out and so we can drop it from the list
200 200 */
201   - released = __journal_remove_checkpoint(jh);
  201 + released = __jbd2_journal_remove_checkpoint(jh);
202 202 jbd_unlock_bh_state(bh);
203   - journal_remove_journal_head(bh);
  203 + jbd2_journal_remove_journal_head(bh);
204 204 __brelse(bh);
205 205 }
206 206 }
207 207  
208 208  
... ... @@ -252,16 +252,16 @@
252 252  
253 253 spin_unlock(&journal->j_list_lock);
254 254 jbd_unlock_bh_state(bh);
255   - log_start_commit(journal, tid);
256   - log_wait_commit(journal, tid);
  255 + jbd2_log_start_commit(journal, tid);
  256 + jbd2_log_wait_commit(journal, tid);
257 257 ret = 1;
258 258 } else if (!buffer_dirty(bh)) {
259 259 J_ASSERT_JH(jh, !buffer_jbddirty(bh));
260 260 BUFFER_TRACE(bh, "remove from checkpoint");
261   - __journal_remove_checkpoint(jh);
  261 + __jbd2_journal_remove_checkpoint(jh);
262 262 spin_unlock(&journal->j_list_lock);
263 263 jbd_unlock_bh_state(bh);
264   - journal_remove_journal_head(bh);
  264 + jbd2_journal_remove_journal_head(bh);
265 265 __brelse(bh);
266 266 ret = 1;
267 267 } else {
... ... @@ -296,7 +296,7 @@
296 296 *
297 297 * The journal should be locked before calling this function.
298 298 */
299   -int log_do_checkpoint(journal_t *journal)
  299 +int jbd2_log_do_checkpoint(journal_t *journal)
300 300 {
301 301 transaction_t *transaction;
302 302 tid_t this_tid;
... ... @@ -309,7 +309,7 @@
309 309 * don't need checkpointing, just eliminate them from the
310 310 * journal straight away.
311 311 */
312   - result = cleanup_journal_tail(journal);
  312 + result = jbd2_cleanup_journal_tail(journal);
313 313 jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
314 314 if (result <= 0)
315 315 return result;
... ... @@ -374,7 +374,7 @@
374 374 }
375 375 out:
376 376 spin_unlock(&journal->j_list_lock);
377   - result = cleanup_journal_tail(journal);
  377 + result = jbd2_cleanup_journal_tail(journal);
378 378 if (result < 0)
379 379 return result;
380 380 return 0;
... ... @@ -397,7 +397,7 @@
397 397 * we have an abort error outstanding.
398 398 */
399 399  
400   -int cleanup_journal_tail(journal_t *journal)
  400 +int jbd2_cleanup_journal_tail(journal_t *journal)
401 401 {
402 402 transaction_t * transaction;
403 403 tid_t first_tid;
... ... @@ -452,8 +452,8 @@
452 452 journal->j_tail_sequence = first_tid;
453 453 journal->j_tail = blocknr;
454 454 spin_unlock(&journal->j_state_lock);
455   - if (!(journal->j_flags & JFS_ABORT))
456   - journal_update_superblock(journal, 1);
  455 + if (!(journal->j_flags & JBD2_ABORT))
  456 + jbd2_journal_update_superblock(journal, 1);
457 457 return 0;
458 458 }
459 459  
... ... @@ -518,7 +518,7 @@
518 518 * Returns number of buffers reaped (for debug)
519 519 */
520 520  
521   -int __journal_clean_checkpoint_list(journal_t *journal)
  521 +int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
522 522 {
523 523 transaction_t *transaction, *last_transaction, *next_transaction;
524 524 int ret = 0;
... ... @@ -578,7 +578,7 @@
578 578 * This function is called with jbd_lock_bh_state(jh2bh(jh))
579 579 */
580 580  
581   -int __journal_remove_checkpoint(struct journal_head *jh)
  581 +int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
582 582 {
583 583 transaction_t *transaction;
584 584 journal_t *journal;
... ... @@ -607,7 +607,7 @@
607 607 * dropped!
608 608 *
609 609 * The locking here around j_committing_transaction is a bit sleazy.
610   - * See the comment at the end of journal_commit_transaction().
  610 + * See the comment at the end of jbd2_journal_commit_transaction().
611 611 */
612 612 if (transaction == journal->j_committing_transaction) {
613 613 JBUFFER_TRACE(jh, "belongs to committing transaction");
... ... @@ -617,7 +617,7 @@
617 617 /* OK, that was the last buffer for the transaction: we can now
618 618 safely remove this transaction from the log */
619 619  
620   - __journal_drop_transaction(journal, transaction);
  620 + __jbd2_journal_drop_transaction(journal, transaction);
621 621  
622 622 /* Just in case anybody was waiting for more transactions to be
623 623 checkpointed... */
... ... @@ -636,7 +636,7 @@
636 636 * Called with the journal locked.
637 637 * Called with j_list_lock held.
638 638 */
639   -void __journal_insert_checkpoint(struct journal_head *jh,
  639 +void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
640 640 transaction_t *transaction)
641 641 {
642 642 JBUFFER_TRACE(jh, "entry");
... ... @@ -666,7 +666,7 @@
666 666 * Called with j_list_lock held.
667 667 */
668 668  
669   -void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
  669 +void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
670 670 {
671 671 assert_spin_locked(&journal->j_list_lock);
672 672 if (transaction->t_cpnext) {
1 1 /*
2   - * linux/fs/jbd/commit.c
  2 + * linux/fs/jbd2/commit.c
3 3 *
4 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 5 *
... ... @@ -15,7 +15,7 @@
15 15  
16 16 #include <linux/time.h>
17 17 #include <linux/fs.h>
18   -#include <linux/jbd.h>
  18 +#include <linux/jbd2.h>
19 19 #include <linux/errno.h>
20 20 #include <linux/slab.h>
21 21 #include <linux/mm.h>
... ... @@ -111,7 +111,7 @@
111 111 if (is_journal_aborted(journal))
112 112 return 0;
113 113  
114   - descriptor = journal_get_descriptor_buffer(journal);
  114 + descriptor = jbd2_journal_get_descriptor_buffer(journal);
115 115 if (!descriptor)
116 116 return 1;
117 117  
118 118  
... ... @@ -120,14 +120,14 @@
120 120 /* AKPM: buglet - add `i' to tmp! */
121 121 for (i = 0; i < bh->b_size; i += 512) {
122 122 journal_header_t *tmp = (journal_header_t*)bh->b_data;
123   - tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
124   - tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
  123 + tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
  124 + tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
125 125 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
126 126 }
127 127  
128 128 JBUFFER_TRACE(descriptor, "write commit block");
129 129 set_buffer_dirty(bh);
130   - if (journal->j_flags & JFS_BARRIER) {
  130 + if (journal->j_flags & JBD2_BARRIER) {
131 131 set_buffer_ordered(bh);
132 132 barrier_done = 1;
133 133 }
... ... @@ -145,7 +145,7 @@
145 145 "disabling barriers\n",
146 146 bdevname(journal->j_dev, b));
147 147 spin_lock(&journal->j_state_lock);
148   - journal->j_flags &= ~JFS_BARRIER;
  148 + journal->j_flags &= ~JBD2_BARRIER;
149 149 spin_unlock(&journal->j_state_lock);
150 150  
151 151 /* And try again, without the barrier */
... ... @@ -155,7 +155,7 @@
155 155 ret = sync_dirty_buffer(bh);
156 156 }
157 157 put_bh(bh); /* One for getblk() */
158   - journal_put_journal_head(descriptor);
  158 + jbd2_journal_put_journal_head(descriptor);
159 159  
160 160 return (ret == -EIO);
161 161 }
... ... @@ -239,7 +239,7 @@
239 239 if (locked && test_clear_buffer_dirty(bh)) {
240 240 BUFFER_TRACE(bh, "needs writeout, adding to array");
241 241 wbuf[bufs++] = bh;
242   - __journal_file_buffer(jh, commit_transaction,
  242 + __jbd2_journal_file_buffer(jh, commit_transaction,
243 243 BJ_Locked);
244 244 jbd_unlock_bh_state(bh);
245 245 if (bufs == journal->j_wbufsize) {
246 246  
247 247  
... ... @@ -251,13 +251,13 @@
251 251 }
252 252 else {
253 253 BUFFER_TRACE(bh, "writeout complete: unfile");
254   - __journal_unfile_buffer(jh);
  254 + __jbd2_journal_unfile_buffer(jh);
255 255 jbd_unlock_bh_state(bh);
256 256 if (locked)
257 257 unlock_buffer(bh);
258   - journal_remove_journal_head(bh);
  258 + jbd2_journal_remove_journal_head(bh);
259 259 /* Once for our safety reference, once for
260   - * journal_remove_journal_head() */
  260 + * jbd2_journal_remove_journal_head() */
261 261 put_bh(bh);
262 262 put_bh(bh);
263 263 }
264 264  
... ... @@ -272,12 +272,12 @@
272 272 }
273 273  
274 274 /*
275   - * journal_commit_transaction
  275 + * jbd2_journal_commit_transaction
276 276 *
277 277 * The primary function for committing a transaction to the log. This
278 278 * function is called by the journal thread to begin a complete commit.
279 279 */
280   -void journal_commit_transaction(journal_t *journal)
  280 +void jbd2_journal_commit_transaction(journal_t *journal)
281 281 {
282 282 transaction_t *commit_transaction;
283 283 struct journal_head *jh, *new_jh, *descriptor;
284 284  
... ... @@ -305,10 +305,10 @@
305 305 spin_unlock(&journal->j_list_lock);
306 306 #endif
307 307  
308   - /* Do we need to erase the effects of a prior journal_flush? */
309   - if (journal->j_flags & JFS_FLUSHED) {
  308 + /* Do we need to erase the effects of a prior jbd2_journal_flush? */
  309 + if (journal->j_flags & JBD2_FLUSHED) {
310 310 jbd_debug(3, "super block updated\n");
311   - journal_update_superblock(journal, 1);
  311 + jbd2_journal_update_superblock(journal, 1);
312 312 } else {
313 313 jbd_debug(3, "superblock not updated\n");
314 314 }
... ... @@ -350,7 +350,7 @@
350 350 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
351 351 * that there are no such buffers: if a large filesystem
352 352 * operation like a truncate needs to split itself over multiple
353   - * transactions, then it may try to do a journal_restart() while
  353 + * transactions, then it may try to do a jbd2_journal_restart() while
354 354 * there are still BJ_Reserved buffers outstanding. These must
355 355 * be released cleanly from the current transaction.
356 356 *
357 357  
358 358  
359 359  
... ... @@ -358,25 +358,25 @@
358 358 * again before modifying the buffer in the new transaction, but
359 359 * we do not require it to remember exactly which old buffers it
360 360 * has reserved. This is consistent with the existing behaviour
361   - * that multiple journal_get_write_access() calls to the same
  361 + * that multiple jbd2_journal_get_write_access() calls to the same
362 362 * buffer are perfectly permissable.
363 363 */
364 364 while (commit_transaction->t_reserved_list) {
365 365 jh = commit_transaction->t_reserved_list;
366 366 JBUFFER_TRACE(jh, "reserved, unused: refile");
367 367 /*
368   - * A journal_get_undo_access()+journal_release_buffer() may
  368 + * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
369 369 * leave undo-committed data.
370 370 */
371 371 if (jh->b_committed_data) {
372 372 struct buffer_head *bh = jh2bh(jh);
373 373  
374 374 jbd_lock_bh_state(bh);
375   - jbd_slab_free(jh->b_committed_data, bh->b_size);
  375 + jbd2_slab_free(jh->b_committed_data, bh->b_size);
376 376 jh->b_committed_data = NULL;
377 377 jbd_unlock_bh_state(bh);
378 378 }
379   - journal_refile_buffer(journal, jh);
  379 + jbd2_journal_refile_buffer(journal, jh);
380 380 }
381 381  
382 382 /*
... ... @@ -385,7 +385,7 @@
385 385 * frees some memory
386 386 */
387 387 spin_lock(&journal->j_list_lock);
388   - __journal_clean_checkpoint_list(journal);
  388 + __jbd2_journal_clean_checkpoint_list(journal);
389 389 spin_unlock(&journal->j_list_lock);
390 390  
391 391 jbd_debug (3, "JBD: commit phase 1\n");
... ... @@ -393,7 +393,7 @@
393 393 /*
394 394 * Switch to a new revoke table.
395 395 */
396   - journal_switch_revoke_table(journal);
  396 + jbd2_journal_switch_revoke_table(journal);
397 397  
398 398 commit_transaction->t_state = T_FLUSH;
399 399 journal->j_committing_transaction = commit_transaction;
400 400  
... ... @@ -450,9 +450,9 @@
450 450 continue;
451 451 }
452 452 if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
453   - __journal_unfile_buffer(jh);
  453 + __jbd2_journal_unfile_buffer(jh);
454 454 jbd_unlock_bh_state(bh);
455   - journal_remove_journal_head(bh);
  455 + jbd2_journal_remove_journal_head(bh);
456 456 put_bh(bh);
457 457 } else {
458 458 jbd_unlock_bh_state(bh);
459 459  
... ... @@ -463,9 +463,9 @@
463 463 spin_unlock(&journal->j_list_lock);
464 464  
465 465 if (err)
466   - __journal_abort_hard(journal);
  466 + __jbd2_journal_abort_hard(journal);
467 467  
468   - journal_write_revoke_records(journal, commit_transaction);
  468 + jbd2_journal_write_revoke_records(journal, commit_transaction);
469 469  
470 470 jbd_debug(3, "JBD: commit phase 2\n");
471 471  
... ... @@ -499,7 +499,7 @@
499 499  
500 500 if (is_journal_aborted(journal)) {
501 501 JBUFFER_TRACE(jh, "journal is aborting: refile");
502   - journal_refile_buffer(journal, jh);
  502 + jbd2_journal_refile_buffer(journal, jh);
503 503 /* If that was the last one, we need to clean up
504 504 * any descriptor buffers which may have been
505 505 * already allocated, even if we are now
506 506  
... ... @@ -519,9 +519,9 @@
519 519  
520 520 jbd_debug(4, "JBD: get descriptor\n");
521 521  
522   - descriptor = journal_get_descriptor_buffer(journal);
  522 + descriptor = jbd2_journal_get_descriptor_buffer(journal);
523 523 if (!descriptor) {
524   - __journal_abort_hard(journal);
  524 + __jbd2_journal_abort_hard(journal);
525 525 continue;
526 526 }
527 527  
... ... @@ -529,8 +529,8 @@
529 529 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
530 530 (unsigned long long)bh->b_blocknr, bh->b_data);
531 531 header = (journal_header_t *)&bh->b_data[0];
532   - header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
533   - header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
  532 + header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
  533 + header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
534 534 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
535 535  
536 536 tagp = &bh->b_data[sizeof(journal_header_t)];
537 537  
538 538  
539 539  
... ... @@ -543,25 +543,25 @@
543 543 /* Record it so that we can wait for IO
544 544 completion later */
545 545 BUFFER_TRACE(bh, "ph3: file as descriptor");
546   - journal_file_buffer(descriptor, commit_transaction,
  546 + jbd2_journal_file_buffer(descriptor, commit_transaction,
547 547 BJ_LogCtl);
548 548 }
549 549  
550 550 /* Where is the buffer to be written? */
551 551  
552   - err = journal_next_log_block(journal, &blocknr);
  552 + err = jbd2_journal_next_log_block(journal, &blocknr);
553 553 /* If the block mapping failed, just abandon the buffer
554 554 and repeat this loop: we'll fall into the
555 555 refile-on-abort condition above. */
556 556 if (err) {
557   - __journal_abort_hard(journal);
  557 + __jbd2_journal_abort_hard(journal);
558 558 continue;
559 559 }
560 560  
561 561 /*
562 562 * start_this_handle() uses t_outstanding_credits to determine
563 563 * the free space in the log, but this counter is changed
564   - * by journal_next_log_block() also.
  564 + * by jbd2_journal_next_log_block() also.
565 565 */
566 566 commit_transaction->t_outstanding_credits--;
567 567  
568 568  
... ... @@ -576,13 +576,13 @@
576 576  
577 577 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
578 578 /*
579   - * akpm: journal_write_metadata_buffer() sets
  579 + * akpm: jbd2_journal_write_metadata_buffer() sets
580 580 * new_bh->b_transaction to commit_transaction.
581 581 * We need to clean this up before we release new_bh
582 582 * (which is of type BJ_IO)
583 583 */
584 584 JBUFFER_TRACE(jh, "ph3: write metadata");
585   - flags = journal_write_metadata_buffer(commit_transaction,
  585 + flags = jbd2_journal_write_metadata_buffer(commit_transaction,
586 586 jh, &new_jh, blocknr);
587 587 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
588 588 wbuf[bufs++] = jh2bh(new_jh);
589 589  
... ... @@ -592,9 +592,9 @@
592 592  
593 593 tag_flag = 0;
594 594 if (flags & 1)
595   - tag_flag |= JFS_FLAG_ESCAPE;
  595 + tag_flag |= JBD2_FLAG_ESCAPE;
596 596 if (!first_tag)
597   - tag_flag |= JFS_FLAG_SAME_UUID;
  597 + tag_flag |= JBD2_FLAG_SAME_UUID;
598 598  
599 599 tag = (journal_block_tag_t *) tagp;
600 600 tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
... ... @@ -622,7 +622,7 @@
622 622 submitting the IOs. "tag" still points to
623 623 the last tag we set up. */
624 624  
625   - tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
  625 + tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
626 626  
627 627 start_journal_io:
628 628 for (i = 0; i < bufs; i++) {
629 629  
630 630  
... ... @@ -678,14 +678,14 @@
678 678 clear_buffer_jwrite(bh);
679 679  
680 680 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
681   - journal_unfile_buffer(journal, jh);
  681 + jbd2_journal_unfile_buffer(journal, jh);
682 682  
683 683 /*
684 684 * ->t_iobuf_list should contain only dummy buffer_heads
685   - * which were created by journal_write_metadata_buffer().
  685 + * which were created by jbd2_journal_write_metadata_buffer().
686 686 */
687 687 BUFFER_TRACE(bh, "dumping temporary bh");
688   - journal_put_journal_head(jh);
  688 + jbd2_journal_put_journal_head(jh);
689 689 __brelse(bh);
690 690 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
691 691 free_buffer_head(bh);
... ... @@ -702,7 +702,7 @@
702 702 we finally commit, we can do any checkpointing
703 703 required. */
704 704 JBUFFER_TRACE(jh, "file as BJ_Forget");
705   - journal_file_buffer(jh, commit_transaction, BJ_Forget);
  705 + jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
706 706 /* Wake up any transactions which were waiting for this
707 707 IO to complete */
708 708 wake_up_bit(&bh->b_state, BH_Unshadow);
... ... @@ -733,8 +733,8 @@
733 733  
734 734 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
735 735 clear_buffer_jwrite(bh);
736   - journal_unfile_buffer(journal, jh);
737   - journal_put_journal_head(jh);
  736 + jbd2_journal_unfile_buffer(journal, jh);
  737 + jbd2_journal_put_journal_head(jh);
738 738 __brelse(bh); /* One for getblk */
739 739 /* AKPM: bforget here */
740 740 }
... ... @@ -745,7 +745,7 @@
745 745 err = -EIO;
746 746  
747 747 if (err)
748   - __journal_abort_hard(journal);
  748 + __jbd2_journal_abort_hard(journal);
749 749  
750 750 /* End of a transaction! Finally, we can do checkpoint
751 751 processing: any buffers committed as a result of this
752 752  
... ... @@ -789,14 +789,14 @@
789 789 * Otherwise, we can just throw away the frozen data now.
790 790 */
791 791 if (jh->b_committed_data) {
792   - jbd_slab_free(jh->b_committed_data, bh->b_size);
  792 + jbd2_slab_free(jh->b_committed_data, bh->b_size);
793 793 jh->b_committed_data = NULL;
794 794 if (jh->b_frozen_data) {
795 795 jh->b_committed_data = jh->b_frozen_data;
796 796 jh->b_frozen_data = NULL;
797 797 }
798 798 } else if (jh->b_frozen_data) {
799   - jbd_slab_free(jh->b_frozen_data, bh->b_size);
  799 + jbd2_slab_free(jh->b_frozen_data, bh->b_size);
800 800 jh->b_frozen_data = NULL;
801 801 }
802 802  
803 803  
... ... @@ -804,12 +804,12 @@
804 804 cp_transaction = jh->b_cp_transaction;
805 805 if (cp_transaction) {
806 806 JBUFFER_TRACE(jh, "remove from old cp transaction");
807   - __journal_remove_checkpoint(jh);
  807 + __jbd2_journal_remove_checkpoint(jh);
808 808 }
809 809  
810 810 /* Only re-checkpoint the buffer_head if it is marked
811 811 * dirty. If the buffer was added to the BJ_Forget list
812   - * by journal_forget, it may no longer be dirty and
  812 + * by jbd2_journal_forget, it may no longer be dirty and
813 813 * there's no point in keeping a checkpoint record for
814 814 * it. */
815 815  
816 816  
... ... @@ -828,9 +828,9 @@
828 828  
829 829 if (buffer_jbddirty(bh)) {
830 830 JBUFFER_TRACE(jh, "add to new checkpointing trans");
831   - __journal_insert_checkpoint(jh, commit_transaction);
  831 + __jbd2_journal_insert_checkpoint(jh, commit_transaction);
832 832 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
833   - __journal_refile_buffer(jh);
  833 + __jbd2_journal_refile_buffer(jh);
834 834 jbd_unlock_bh_state(bh);
835 835 } else {
836 836 J_ASSERT_BH(bh, !buffer_dirty(bh));
837 837  
... ... @@ -842,11 +842,11 @@
842 842 * disk and before we process the buffer on BJ_Forget
843 843 * list. */
844 844 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
845   - __journal_refile_buffer(jh);
  845 + __jbd2_journal_refile_buffer(jh);
846 846 if (!jh->b_transaction) {
847 847 jbd_unlock_bh_state(bh);
848 848 /* needs a brelse */
849   - journal_remove_journal_head(bh);
  849 + jbd2_journal_remove_journal_head(bh);
850 850 release_buffer_page(bh);
851 851 } else
852 852 jbd_unlock_bh_state(bh);
... ... @@ -856,9 +856,9 @@
856 856 spin_unlock(&journal->j_list_lock);
857 857 /*
858 858 * This is a bit sleazy. We borrow j_list_lock to protect
859   - * journal->j_committing_transaction in __journal_remove_checkpoint.
860   - * Really, __journal_remove_checkpoint should be using j_state_lock but
861   - * it's a bit hassle to hold that across __journal_remove_checkpoint
  859 + * journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
  860 + * Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
  861 + * it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
862 862 */
863 863 spin_lock(&journal->j_state_lock);
864 864 spin_lock(&journal->j_list_lock);
... ... @@ -885,7 +885,7 @@
885 885 spin_unlock(&journal->j_state_lock);
886 886  
887 887 if (commit_transaction->t_checkpoint_list == NULL) {
888   - __journal_drop_transaction(journal, commit_transaction);
  888 + __jbd2_journal_drop_transaction(journal, commit_transaction);
889 889 } else {
890 890 if (journal->j_checkpoint_transactions == NULL) {
891 891 journal->j_checkpoint_transactions = commit_transaction;
Changes suppressed. Click to show
1 1 /*
2   - * linux/fs/jbd/journal.c
  2 + * linux/fs/jbd2/journal.c
3 3 *
4 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 5 *
... ... @@ -25,7 +25,7 @@
25 25 #include <linux/module.h>
26 26 #include <linux/time.h>
27 27 #include <linux/fs.h>
28   -#include <linux/jbd.h>
  28 +#include <linux/jbd2.h>
29 29 #include <linux/errno.h>
30 30 #include <linux/slab.h>
31 31 #include <linux/smp_lock.h>
32 32  
33 33  
34 34  
... ... @@ -40,51 +40,51 @@
40 40 #include <asm/uaccess.h>
41 41 #include <asm/page.h>
42 42  
43   -EXPORT_SYMBOL(journal_start);
44   -EXPORT_SYMBOL(journal_restart);
45   -EXPORT_SYMBOL(journal_extend);
46   -EXPORT_SYMBOL(journal_stop);
47   -EXPORT_SYMBOL(journal_lock_updates);
48   -EXPORT_SYMBOL(journal_unlock_updates);
49   -EXPORT_SYMBOL(journal_get_write_access);
50   -EXPORT_SYMBOL(journal_get_create_access);
51   -EXPORT_SYMBOL(journal_get_undo_access);
52   -EXPORT_SYMBOL(journal_dirty_data);
53   -EXPORT_SYMBOL(journal_dirty_metadata);
54   -EXPORT_SYMBOL(journal_release_buffer);
55   -EXPORT_SYMBOL(journal_forget);
  43 +EXPORT_SYMBOL(jbd2_journal_start);
  44 +EXPORT_SYMBOL(jbd2_journal_restart);
  45 +EXPORT_SYMBOL(jbd2_journal_extend);
  46 +EXPORT_SYMBOL(jbd2_journal_stop);
  47 +EXPORT_SYMBOL(jbd2_journal_lock_updates);
  48 +EXPORT_SYMBOL(jbd2_journal_unlock_updates);
  49 +EXPORT_SYMBOL(jbd2_journal_get_write_access);
  50 +EXPORT_SYMBOL(jbd2_journal_get_create_access);
  51 +EXPORT_SYMBOL(jbd2_journal_get_undo_access);
  52 +EXPORT_SYMBOL(jbd2_journal_dirty_data);
  53 +EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
  54 +EXPORT_SYMBOL(jbd2_journal_release_buffer);
  55 +EXPORT_SYMBOL(jbd2_journal_forget);
56 56 #if 0
57 57 EXPORT_SYMBOL(journal_sync_buffer);
58 58 #endif
59   -EXPORT_SYMBOL(journal_flush);
60   -EXPORT_SYMBOL(journal_revoke);
  59 +EXPORT_SYMBOL(jbd2_journal_flush);
  60 +EXPORT_SYMBOL(jbd2_journal_revoke);
61 61  
62   -EXPORT_SYMBOL(journal_init_dev);
63   -EXPORT_SYMBOL(journal_init_inode);
64   -EXPORT_SYMBOL(journal_update_format);
65   -EXPORT_SYMBOL(journal_check_used_features);
66   -EXPORT_SYMBOL(journal_check_available_features);
67   -EXPORT_SYMBOL(journal_set_features);
68   -EXPORT_SYMBOL(journal_create);
69   -EXPORT_SYMBOL(journal_load);
70   -EXPORT_SYMBOL(journal_destroy);
71   -EXPORT_SYMBOL(journal_update_superblock);
72   -EXPORT_SYMBOL(journal_abort);
73   -EXPORT_SYMBOL(journal_errno);
74   -EXPORT_SYMBOL(journal_ack_err);
75   -EXPORT_SYMBOL(journal_clear_err);
76   -EXPORT_SYMBOL(log_wait_commit);
77   -EXPORT_SYMBOL(journal_start_commit);
78   -EXPORT_SYMBOL(journal_force_commit_nested);
79   -EXPORT_SYMBOL(journal_wipe);
80   -EXPORT_SYMBOL(journal_blocks_per_page);
81   -EXPORT_SYMBOL(journal_invalidatepage);
82   -EXPORT_SYMBOL(journal_try_to_free_buffers);
83   -EXPORT_SYMBOL(journal_force_commit);
  62 +EXPORT_SYMBOL(jbd2_journal_init_dev);
  63 +EXPORT_SYMBOL(jbd2_journal_init_inode);
  64 +EXPORT_SYMBOL(jbd2_journal_update_format);
  65 +EXPORT_SYMBOL(jbd2_journal_check_used_features);
  66 +EXPORT_SYMBOL(jbd2_journal_check_available_features);
  67 +EXPORT_SYMBOL(jbd2_journal_set_features);
  68 +EXPORT_SYMBOL(jbd2_journal_create);
  69 +EXPORT_SYMBOL(jbd2_journal_load);
  70 +EXPORT_SYMBOL(jbd2_journal_destroy);
  71 +EXPORT_SYMBOL(jbd2_journal_update_superblock);
  72 +EXPORT_SYMBOL(jbd2_journal_abort);
  73 +EXPORT_SYMBOL(jbd2_journal_errno);
  74 +EXPORT_SYMBOL(jbd2_journal_ack_err);
  75 +EXPORT_SYMBOL(jbd2_journal_clear_err);
  76 +EXPORT_SYMBOL(jbd2_log_wait_commit);
  77 +EXPORT_SYMBOL(jbd2_journal_start_commit);
  78 +EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
  79 +EXPORT_SYMBOL(jbd2_journal_wipe);
  80 +EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
  81 +EXPORT_SYMBOL(jbd2_journal_invalidatepage);
  82 +EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
  83 +EXPORT_SYMBOL(jbd2_journal_force_commit);
84 84  
85 85 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86 86 static void __journal_abort_soft (journal_t *journal, int errno);
87   -static int journal_create_jbd_slab(size_t slab_size);
  87 +static int jbd2_journal_create_jbd_slab(size_t slab_size);
88 88  
89 89 /*
90 90 * Helper function used to manage commit timeouts
... ... @@ -98,7 +98,7 @@
98 98 }
99 99  
100 100 /*
101   - * kjournald: The main thread function used to manage a logging device
  101 + * kjournald2: The main thread function used to manage a logging device
102 102 * journal.
103 103 *
104 104 * This kernel thread is responsible for two things:
... ... @@ -113,7 +113,7 @@
113 113 * known as checkpointing, and this thread is responsible for that job.
114 114 */
115 115  
116   -static int kjournald(void *arg)
  116 +static int kjournald2(void *arg)
117 117 {
118 118 journal_t *journal = arg;
119 119 transaction_t *transaction;
... ... @@ -129,7 +129,7 @@
129 129 journal->j_task = current;
130 130 wake_up(&journal->j_wait_done_commit);
131 131  
132   - printk(KERN_INFO "kjournald starting. Commit interval %ld seconds\n",
  132 + printk(KERN_INFO "kjournald2 starting. Commit interval %ld seconds\n",
133 133 journal->j_commit_interval / HZ);
134 134  
135 135 /*
... ... @@ -138,7 +138,7 @@
138 138 spin_lock(&journal->j_state_lock);
139 139  
140 140 loop:
141   - if (journal->j_flags & JFS_UNMOUNT)
  141 + if (journal->j_flags & JBD2_UNMOUNT)
142 142 goto end_loop;
143 143  
144 144 jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
... ... @@ -148,7 +148,7 @@
148 148 jbd_debug(1, "OK, requests differ\n");
149 149 spin_unlock(&journal->j_state_lock);
150 150 del_timer_sync(&journal->j_commit_timer);
151   - journal_commit_transaction(journal);
  151 + jbd2_journal_commit_transaction(journal);
152 152 spin_lock(&journal->j_state_lock);
153 153 goto loop;
154 154 }
... ... @@ -160,7 +160,7 @@
160 160 * good idea, because that depends on threads that may
161 161 * be already stopped.
162 162 */
163   - jbd_debug(1, "Now suspending kjournald\n");
  163 + jbd_debug(1, "Now suspending kjournald2\n");
164 164 spin_unlock(&journal->j_state_lock);
165 165 refrigerator();
166 166 spin_lock(&journal->j_state_lock);
... ... @@ -180,7 +180,7 @@
180 180 if (transaction && time_after_eq(jiffies,
181 181 transaction->t_expires))
182 182 should_sleep = 0;
183   - if (journal->j_flags & JFS_UNMOUNT)
  183 + if (journal->j_flags & JBD2_UNMOUNT)
184 184 should_sleep = 0;
185 185 if (should_sleep) {
186 186 spin_unlock(&journal->j_state_lock);
... ... @@ -190,7 +190,7 @@
190 190 finish_wait(&journal->j_wait_commit, &wait);
191 191 }
192 192  
193   - jbd_debug(1, "kjournald wakes\n");
  193 + jbd_debug(1, "kjournald2 wakes\n");
194 194  
195 195 /*
196 196 * Were we woken up by a commit wakeup event?
197 197  
198 198  
... ... @@ -211,16 +211,16 @@
211 211 return 0;
212 212 }
213 213  
214   -static void journal_start_thread(journal_t *journal)
  214 +static void jbd2_journal_start_thread(journal_t *journal)
215 215 {
216   - kthread_run(kjournald, journal, "kjournald");
  216 + kthread_run(kjournald2, journal, "kjournald2");
217 217 wait_event(journal->j_wait_done_commit, journal->j_task != 0);
218 218 }
219 219  
220 220 static void journal_kill_thread(journal_t *journal)
221 221 {
222 222 spin_lock(&journal->j_state_lock);
223   - journal->j_flags |= JFS_UNMOUNT;
  223 + journal->j_flags |= JBD2_UNMOUNT;
224 224  
225 225 while (journal->j_task) {
226 226 wake_up(&journal->j_wait_commit);
... ... @@ -232,7 +232,7 @@
232 232 }
233 233  
234 234 /*
235   - * journal_write_metadata_buffer: write a metadata buffer to the journal.
  235 + * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
236 236 *
237 237 * Writes a metadata buffer to a given disk block. The actual IO is not
238 238 * performed but a new buffer_head is constructed which labels the data
... ... @@ -240,7 +240,7 @@
240 240 *
241 241 * Any magic-number escaping which needs to be done will cause a
242 242 * copy-out here. If the buffer happens to start with the
243   - * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
  243 + * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the
244 244 * magic number is only written to the log for descripter blocks. In
245 245 * this case, we copy the data and replace the first word with 0, and we
246 246 * return a result code which indicates that this buffer needs to be
... ... @@ -268,7 +268,7 @@
268 268 * Bit 1 set == buffer copy-out performed (kfree the data after IO)
269 269 */
270 270  
271   -int journal_write_metadata_buffer(transaction_t *transaction,
  271 +int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
272 272 struct journal_head *jh_in,
273 273 struct journal_head **jh_out,
274 274 unsigned long blocknr)
... ... @@ -316,7 +316,7 @@
316 316 * Check for escaping
317 317 */
318 318 if (*((__be32 *)(mapped_data + new_offset)) ==
319   - cpu_to_be32(JFS_MAGIC_NUMBER)) {
  319 + cpu_to_be32(JBD2_MAGIC_NUMBER)) {
320 320 need_copy_out = 1;
321 321 do_escape = 1;
322 322 }
323 323  
... ... @@ -329,10 +329,10 @@
329 329 char *tmp;
330 330  
331 331 jbd_unlock_bh_state(bh_in);
332   - tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
  332 + tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS);
333 333 jbd_lock_bh_state(bh_in);
334 334 if (jh_in->b_frozen_data) {
335   - jbd_slab_free(tmp, bh_in->b_size);
  335 + jbd2_slab_free(tmp, bh_in->b_size);
336 336 goto repeat;
337 337 }
338 338  
... ... @@ -362,7 +362,7 @@
362 362 atomic_set(&new_bh->b_count, 1);
363 363 jbd_unlock_bh_state(bh_in);
364 364  
365   - new_jh = journal_add_journal_head(new_bh); /* This sleeps */
  365 + new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
366 366  
367 367 set_bh_page(new_bh, new_page, new_offset);
368 368 new_jh->b_transaction = NULL;
369 369  
... ... @@ -380,9 +380,9 @@
380 380 * copying is moved to the transaction's shadow queue.
381 381 */
382 382 JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
383   - journal_file_buffer(jh_in, transaction, BJ_Shadow);
  383 + jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
384 384 JBUFFER_TRACE(new_jh, "file as BJ_IO");
385   - journal_file_buffer(new_jh, transaction, BJ_IO);
  385 + jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
386 386  
387 387 return do_escape | (done_copy_out << 1);
388 388 }
389 389  
... ... @@ -393,14 +393,14 @@
393 393 */
394 394  
395 395 /*
396   - * __log_space_left: Return the number of free blocks left in the journal.
  396 + * __jbd2_log_space_left: Return the number of free blocks left in the journal.
397 397 *
398 398 * Called with the journal already locked.
399 399 *
400 400 * Called under j_state_lock
401 401 */
402 402  
403   -int __log_space_left(journal_t *journal)
  403 +int __jbd2_log_space_left(journal_t *journal)
404 404 {
405 405 int left = journal->j_free;
406 406  
... ... @@ -424,7 +424,7 @@
424 424 /*
425 425 * Called under j_state_lock. Returns true if a transaction was started.
426 426 */
427   -int __log_start_commit(journal_t *journal, tid_t target)
  427 +int __jbd2_log_start_commit(journal_t *journal, tid_t target)
428 428 {
429 429 /*
430 430 * Are we already doing a recent enough commit?
431 431  
... ... @@ -445,12 +445,12 @@
445 445 return 0;
446 446 }
447 447  
448   -int log_start_commit(journal_t *journal, tid_t tid)
  448 +int jbd2_log_start_commit(journal_t *journal, tid_t tid)
449 449 {
450 450 int ret;
451 451  
452 452 spin_lock(&journal->j_state_lock);
453   - ret = __log_start_commit(journal, tid);
  453 + ret = __jbd2_log_start_commit(journal, tid);
454 454 spin_unlock(&journal->j_state_lock);
455 455 return ret;
456 456 }
... ... @@ -465,7 +465,7 @@
465 465 *
466 466 * Returns true if a transaction was started.
467 467 */
468   -int journal_force_commit_nested(journal_t *journal)
  468 +int jbd2_journal_force_commit_nested(journal_t *journal)
469 469 {
470 470 transaction_t *transaction = NULL;
471 471 tid_t tid;
... ... @@ -473,7 +473,7 @@
473 473 spin_lock(&journal->j_state_lock);
474 474 if (journal->j_running_transaction && !current->journal_info) {
475 475 transaction = journal->j_running_transaction;
476   - __log_start_commit(journal, transaction->t_tid);
  476 + __jbd2_log_start_commit(journal, transaction->t_tid);
477 477 } else if (journal->j_committing_transaction)
478 478 transaction = journal->j_committing_transaction;
479 479  
... ... @@ -484,7 +484,7 @@
484 484  
485 485 tid = transaction->t_tid;
486 486 spin_unlock(&journal->j_state_lock);
487   - log_wait_commit(journal, tid);
  487 + jbd2_log_wait_commit(journal, tid);
488 488 return 1;
489 489 }
490 490  
... ... @@ -492,7 +492,7 @@
492 492 * Start a commit of the current running transaction (if any). Returns true
493 493 * if a transaction was started, and fills its tid in at *ptid
494 494 */
495   -int journal_start_commit(journal_t *journal, tid_t *ptid)
  495 +int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
496 496 {
497 497 int ret = 0;
498 498  
... ... @@ -500,7 +500,7 @@
500 500 if (journal->j_running_transaction) {
501 501 tid_t tid = journal->j_running_transaction->t_tid;
502 502  
503   - ret = __log_start_commit(journal, tid);
  503 + ret = __jbd2_log_start_commit(journal, tid);
504 504 if (ret && ptid)
505 505 *ptid = tid;
506 506 } else if (journal->j_committing_transaction && ptid) {
... ... @@ -519,7 +519,7 @@
519 519 * Wait for a specified commit to complete.
520 520 * The caller may not hold the journal lock.
521 521 */
522   -int log_wait_commit(journal_t *journal, tid_t tid)
  522 +int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
523 523 {
524 524 int err = 0;
525 525  
... ... @@ -555,7 +555,7 @@
555 555 * Log buffer allocation routines:
556 556 */
557 557  
558   -int journal_next_log_block(journal_t *journal, unsigned long *retp)
  558 +int jbd2_journal_next_log_block(journal_t *journal, unsigned long *retp)
559 559 {
560 560 unsigned long blocknr;
561 561  
... ... @@ -568,7 +568,7 @@
568 568 if (journal->j_head == journal->j_last)
569 569 journal->j_head = journal->j_first;
570 570 spin_unlock(&journal->j_state_lock);
571   - return journal_bmap(journal, blocknr, retp);
  571 + return jbd2_journal_bmap(journal, blocknr, retp);
572 572 }
573 573  
574 574 /*
... ... @@ -578,7 +578,7 @@
578 578 * this is a no-op. If needed, we can use j_blk_offset - everything is
579 579 * ready.
580 580 */
581   -int journal_bmap(journal_t *journal, unsigned long blocknr,
  581 +int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
582 582 unsigned long *retp)
583 583 {
584 584 int err = 0;
585 585  
586 586  
... ... @@ -610,18 +610,18 @@
610 610 * the journal without copying their contents, but for journal
611 611 * descriptor blocks we do need to generate bona fide buffers.
612 612 *
613   - * After the caller of journal_get_descriptor_buffer() has finished modifying
  613 + * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
614 614 * the buffer's contents they really should run flush_dcache_page(bh->b_page).
615 615 * But we don't bother doing that, so there will be coherency problems with
616 616 * mmaps of blockdevs which hold live JBD-controlled filesystems.
617 617 */
618   -struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
  618 +struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
619 619 {
620 620 struct buffer_head *bh;
621 621 unsigned long blocknr;
622 622 int err;
623 623  
624   - err = journal_next_log_block(journal, &blocknr);
  624 + err = jbd2_journal_next_log_block(journal, &blocknr);
625 625  
626 626 if (err)
627 627 return NULL;
... ... @@ -632,7 +632,7 @@
632 632 set_buffer_uptodate(bh);
633 633 unlock_buffer(bh);
634 634 BUFFER_TRACE(bh, "return this buffer");
635   - return journal_add_journal_head(bh);
  635 + return jbd2_journal_add_journal_head(bh);
636 636 }
637 637  
638 638 /*
639 639  
... ... @@ -669,10 +669,10 @@
669 669 journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
670 670  
671 671 /* The journal is marked for error until we succeed with recovery! */
672   - journal->j_flags = JFS_ABORT;
  672 + journal->j_flags = JBD2_ABORT;
673 673  
674 674 /* Set up a default-sized revoke table for the new mount. */
675   - err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
  675 + err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
676 676 if (err) {
677 677 kfree(journal);
678 678 goto fail;
... ... @@ -682,7 +682,7 @@
682 682 return NULL;
683 683 }
684 684  
685   -/* journal_init_dev and journal_init_inode:
  685 +/* jbd2_journal_init_dev and jbd2_journal_init_inode:
686 686 *
687 687 * Create a journal structure assigned some fixed set of disk blocks to
688 688 * the journal. We don't actually touch those disk blocks yet, but we
... ... @@ -692,7 +692,7 @@
692 692 */
693 693  
694 694 /**
695   - * journal_t * journal_init_dev() - creates an initialises a journal structure
  695 + * journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
696 696 * @bdev: Block device on which to create the journal
697 697 * @fs_dev: Device which hold journalled filesystem for this journal.
698 698 * @start: Block nr Start of journal.
699 699  
... ... @@ -700,11 +700,11 @@
700 700 * @blocksize: blocksize of journalling device
701 701 * @returns: a newly created journal_t *
702 702 *
703   - * journal_init_dev creates a journal which maps a fixed contiguous
  703 + * jbd2_journal_init_dev creates a journal which maps a fixed contiguous
704 704 * range of blocks on an arbitrary block device.
705 705 *
706 706 */
707   -journal_t * journal_init_dev(struct block_device *bdev,
  707 +journal_t * jbd2_journal_init_dev(struct block_device *bdev,
708 708 struct block_device *fs_dev,
709 709 int start, int len, int blocksize)
710 710 {
711 711  
712 712  
... ... @@ -740,14 +740,14 @@
740 740 }
741 741  
742 742 /**
743   - * journal_t * journal_init_inode () - creates a journal which maps to a inode.
  743 + * journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode.
744 744 * @inode: An inode to create the journal in
745 745 *
746   - * journal_init_inode creates a journal which maps an on-disk inode as
  746 + * jbd2_journal_init_inode creates a journal which maps an on-disk inode as
747 747 * the journal. The inode must exist already, must support bmap() and
748 748 * must have all data blocks preallocated.
749 749 */
750   -journal_t * journal_init_inode (struct inode *inode)
  750 +journal_t * jbd2_journal_init_inode (struct inode *inode)
751 751 {
752 752 struct buffer_head *bh;
753 753 journal_t *journal = journal_init_common();
... ... @@ -780,7 +780,7 @@
780 780 return NULL;
781 781 }
782 782  
783   - err = journal_bmap(journal, 0, &blocknr);
  783 + err = jbd2_journal_bmap(journal, 0, &blocknr);
784 784 /* If that failed, give up */
785 785 if (err) {
786 786 printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
787 787  
788 788  
789 789  
... ... @@ -838,27 +838,27 @@
838 838 journal->j_max_transaction_buffers = journal->j_maxlen / 4;
839 839  
840 840 /* Add the dynamic fields and write it to disk. */
841   - journal_update_superblock(journal, 1);
842   - journal_start_thread(journal);
  841 + jbd2_journal_update_superblock(journal, 1);
  842 + jbd2_journal_start_thread(journal);
843 843 return 0;
844 844 }
845 845  
846 846 /**
847   - * int journal_create() - Initialise the new journal file
  847 + * int jbd2_journal_create() - Initialise the new journal file
848 848 * @journal: Journal to create. This structure must have been initialised
849 849 *
850 850 * Given a journal_t structure which tells us which disk blocks we can
851 851 * use, create a new journal superblock and initialise all of the
852 852 * journal fields from scratch.
853 853 **/
854   -int journal_create(journal_t *journal)
  854 +int jbd2_journal_create(journal_t *journal)
855 855 {
856 856 unsigned long blocknr;
857 857 struct buffer_head *bh;
858 858 journal_superblock_t *sb;
859 859 int i, err;
860 860  
861   - if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
  861 + if (journal->j_maxlen < JBD2_MIN_JOURNAL_BLOCKS) {
862 862 printk (KERN_ERR "Journal length (%d blocks) too short.\n",
863 863 journal->j_maxlen);
864 864 journal_fail_superblock(journal);
865 865  
... ... @@ -876,10 +876,10 @@
876 876 }
877 877  
878 878 /* Zero out the entire journal on disk. We cannot afford to
879   - have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
  879 + have any blocks on disk beginning with JBD2_MAGIC_NUMBER. */
880 880 jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
881 881 for (i = 0; i < journal->j_maxlen; i++) {
882   - err = journal_bmap(journal, i, &blocknr);
  882 + err = jbd2_journal_bmap(journal, i, &blocknr);
883 883 if (err)
884 884 return err;
885 885 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
... ... @@ -899,8 +899,8 @@
899 899 /* OK, fill in the initial static fields in the new superblock */
900 900 sb = journal->j_superblock;
901 901  
902   - sb->s_header.h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
903   - sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
  902 + sb->s_header.h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
  903 + sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
904 904  
905 905 sb->s_blocksize = cpu_to_be32(journal->j_blocksize);
906 906 sb->s_maxlen = cpu_to_be32(journal->j_maxlen);
907 907  
908 908  
... ... @@ -908,21 +908,21 @@
908 908  
909 909 journal->j_transaction_sequence = 1;
910 910  
911   - journal->j_flags &= ~JFS_ABORT;
  911 + journal->j_flags &= ~JBD2_ABORT;
912 912 journal->j_format_version = 2;
913 913  
914 914 return journal_reset(journal);
915 915 }
916 916  
917 917 /**
918   - * void journal_update_superblock() - Update journal sb on disk.
  918 + * void jbd2_journal_update_superblock() - Update journal sb on disk.
919 919 * @journal: The journal to update.
920 920 * @wait: Set to '0' if you don't want to wait for IO completion.
921 921 *
922 922 * Update a journal's dynamic superblock fields and write it to disk,
923 923 * optionally waiting for the IO to complete.
924 924 */
925   -void journal_update_superblock(journal_t *journal, int wait)
  925 +void jbd2_journal_update_superblock(journal_t *journal, int wait)
926 926 {
927 927 journal_superblock_t *sb = journal->j_superblock;
928 928 struct buffer_head *bh = journal->j_sb_buffer;
... ... @@ -931,7 +931,7 @@
931 931 * As a special case, if the on-disk copy is already marked as needing
932 932 * no recovery (s_start == 0) and there are no outstanding transactions
933 933 * in the filesystem, then we can safely defer the superblock update
934   - * until the next commit by setting JFS_FLUSHED. This avoids
  934 + * until the next commit by setting JBD2_FLUSHED. This avoids
935 935 * attempting a write to a potential-readonly device.
936 936 */
937 937 if (sb->s_start == 0 && journal->j_tail_sequence ==
938 938  
... ... @@ -966,9 +966,9 @@
966 966  
967 967 spin_lock(&journal->j_state_lock);
968 968 if (sb->s_start)
969   - journal->j_flags &= ~JFS_FLUSHED;
  969 + journal->j_flags &= ~JBD2_FLUSHED;
970 970 else
971   - journal->j_flags |= JFS_FLUSHED;
  971 + journal->j_flags |= JBD2_FLUSHED;
972 972 spin_unlock(&journal->j_state_lock);
973 973 }
974 974  
975 975  
976 976  
... ... @@ -1000,17 +1000,17 @@
1000 1000  
1001 1001 err = -EINVAL;
1002 1002  
1003   - if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
  1003 + if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
1004 1004 sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
1005 1005 printk(KERN_WARNING "JBD: no valid journal superblock found\n");
1006 1006 goto out;
1007 1007 }
1008 1008  
1009 1009 switch(be32_to_cpu(sb->s_header.h_blocktype)) {
1010   - case JFS_SUPERBLOCK_V1:
  1010 + case JBD2_SUPERBLOCK_V1:
1011 1011 journal->j_format_version = 1;
1012 1012 break;
1013   - case JFS_SUPERBLOCK_V2:
  1013 + case JBD2_SUPERBLOCK_V2:
1014 1014 journal->j_format_version = 2;
1015 1015 break;
1016 1016 default:
1017 1017  
... ... @@ -1059,14 +1059,14 @@
1059 1059  
1060 1060  
1061 1061 /**
1062   - * int journal_load() - Read journal from disk.
  1062 + * int jbd2_journal_load() - Read journal from disk.
1063 1063 * @journal: Journal to act on.
1064 1064 *
1065 1065 * Given a journal_t structure which tells us which disk blocks contain
1066 1066 * a journal, read the journal from disk to initialise the in-memory
1067 1067 * structures.
1068 1068 */
1069   -int journal_load(journal_t *journal)
  1069 +int jbd2_journal_load(journal_t *journal)
1070 1070 {
1071 1071 int err;
1072 1072 journal_superblock_t *sb;
1073 1073  
... ... @@ -1081,9 +1081,9 @@
1081 1081  
1082 1082 if (journal->j_format_version >= 2) {
1083 1083 if ((sb->s_feature_ro_compat &
1084   - ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
  1084 + ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
1085 1085 (sb->s_feature_incompat &
1086   - ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
  1086 + ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
1087 1087 printk (KERN_WARNING
1088 1088 "JBD: Unrecognised features on journal\n");
1089 1089 return -EINVAL;
1090 1090  
... ... @@ -1093,13 +1093,13 @@
1093 1093 /*
1094 1094 * Create a slab for this blocksize
1095 1095 */
1096   - err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
  1096 + err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1097 1097 if (err)
1098 1098 return err;
1099 1099  
1100 1100 /* Let the recovery code check whether it needs to recover any
1101 1101 * data from the journal. */
1102   - if (journal_recover(journal))
  1102 + if (jbd2_journal_recover(journal))
1103 1103 goto recovery_error;
1104 1104  
1105 1105 /* OK, we've finished with the dynamic journal bits:
... ... @@ -1108,8 +1108,8 @@
1108 1108 if (journal_reset(journal))
1109 1109 goto recovery_error;
1110 1110  
1111   - journal->j_flags &= ~JFS_ABORT;
1112   - journal->j_flags |= JFS_LOADED;
  1111 + journal->j_flags &= ~JBD2_ABORT;
  1112 + journal->j_flags |= JBD2_LOADED;
1113 1113 return 0;
1114 1114  
1115 1115 recovery_error:
1116 1116  
1117 1117  
... ... @@ -1118,20 +1118,20 @@
1118 1118 }
1119 1119  
1120 1120 /**
1121   - * void journal_destroy() - Release a journal_t structure.
  1121 + * void jbd2_journal_destroy() - Release a journal_t structure.
1122 1122 * @journal: Journal to act on.
1123 1123 *
1124 1124 * Release a journal_t structure once it is no longer in use by the
1125 1125 * journaled object.
1126 1126 */
1127   -void journal_destroy(journal_t *journal)
  1127 +void jbd2_journal_destroy(journal_t *journal)
1128 1128 {
1129 1129 /* Wait for the commit thread to wake up and die. */
1130 1130 journal_kill_thread(journal);
1131 1131  
1132 1132 /* Force a final log commit */
1133 1133 if (journal->j_running_transaction)
1134   - journal_commit_transaction(journal);
  1134 + jbd2_journal_commit_transaction(journal);
1135 1135  
1136 1136 /* Force any old transactions to disk */
1137 1137  
... ... @@ -1139,7 +1139,7 @@
1139 1139 spin_lock(&journal->j_list_lock);
1140 1140 while (journal->j_checkpoint_transactions != NULL) {
1141 1141 spin_unlock(&journal->j_list_lock);
1142   - log_do_checkpoint(journal);
  1142 + jbd2_log_do_checkpoint(journal);
1143 1143 spin_lock(&journal->j_list_lock);
1144 1144 }
1145 1145  
1146 1146  
1147 1147  
... ... @@ -1152,21 +1152,21 @@
1152 1152 journal->j_tail = 0;
1153 1153 journal->j_tail_sequence = ++journal->j_transaction_sequence;
1154 1154 if (journal->j_sb_buffer) {
1155   - journal_update_superblock(journal, 1);
  1155 + jbd2_journal_update_superblock(journal, 1);
1156 1156 brelse(journal->j_sb_buffer);
1157 1157 }
1158 1158  
1159 1159 if (journal->j_inode)
1160 1160 iput(journal->j_inode);
1161 1161 if (journal->j_revoke)
1162   - journal_destroy_revoke(journal);
  1162 + jbd2_journal_destroy_revoke(journal);
1163 1163 kfree(journal->j_wbuf);
1164 1164 kfree(journal);
1165 1165 }
1166 1166  
1167 1167  
1168 1168 /**
1169   - *int journal_check_used_features () - Check if features specified are used.
  1169 + *int jbd2_journal_check_used_features () - Check if features specified are used.
1170 1170 * @journal: Journal to check.
1171 1171 * @compat: bitmask of compatible features
1172 1172 * @ro: bitmask of features that force read-only mount
... ... @@ -1176,7 +1176,7 @@
1176 1176 * features. Return true (non-zero) if it does.
1177 1177 **/
1178 1178  
1179   -int journal_check_used_features (journal_t *journal, unsigned long compat,
  1179 +int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
1180 1180 unsigned long ro, unsigned long incompat)
1181 1181 {
1182 1182 journal_superblock_t *sb;
... ... @@ -1197,7 +1197,7 @@
1197 1197 }
1198 1198  
1199 1199 /**
1200   - * int journal_check_available_features() - Check feature set in journalling layer
  1200 + * int jbd2_journal_check_available_features() - Check feature set in journalling layer
1201 1201 * @journal: Journal to check.
1202 1202 * @compat: bitmask of compatible features
1203 1203 * @ro: bitmask of features that force read-only mount
... ... @@ -1207,7 +1207,7 @@
1207 1207 * all of a given set of features on this journal. Return true
1208 1208 * (non-zero) if it can. */
1209 1209  
1210   -int journal_check_available_features (journal_t *journal, unsigned long compat,
  1210 +int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
1211 1211 unsigned long ro, unsigned long incompat)
1212 1212 {
1213 1213 journal_superblock_t *sb;
1214 1214  
... ... @@ -1224,16 +1224,16 @@
1224 1224 if (journal->j_format_version != 2)
1225 1225 return 0;
1226 1226  
1227   - if ((compat & JFS_KNOWN_COMPAT_FEATURES) == compat &&
1228   - (ro & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
1229   - (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
  1227 + if ((compat & JBD2_KNOWN_COMPAT_FEATURES) == compat &&
  1228 + (ro & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro &&
  1229 + (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat)
1230 1230 return 1;
1231 1231  
1232 1232 return 0;
1233 1233 }
1234 1234  
1235 1235 /**
1236   - * int journal_set_features () - Mark a given journal feature in the superblock
  1236 + * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
1237 1237 * @journal: Journal to act on.
1238 1238 * @compat: bitmask of compatible features
1239 1239 * @ro: bitmask of features that force read-only mount
1240 1240  
1241 1241  
... ... @@ -1244,15 +1244,15 @@
1244 1244 *
1245 1245 */
1246 1246  
1247   -int journal_set_features (journal_t *journal, unsigned long compat,
  1247 +int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1248 1248 unsigned long ro, unsigned long incompat)
1249 1249 {
1250 1250 journal_superblock_t *sb;
1251 1251  
1252   - if (journal_check_used_features(journal, compat, ro, incompat))
  1252 + if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
1253 1253 return 1;
1254 1254  
1255   - if (!journal_check_available_features(journal, compat, ro, incompat))
  1255 + if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
1256 1256 return 0;
1257 1257  
1258 1258 jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
1259 1259  
... ... @@ -1269,13 +1269,13 @@
1269 1269  
1270 1270  
1271 1271 /**
1272   - * int journal_update_format () - Update on-disk journal structure.
  1272 + * int jbd2_journal_update_format () - Update on-disk journal structure.
1273 1273 * @journal: Journal to act on.
1274 1274 *
1275 1275 * Given an initialised but unloaded journal struct, poke about in the
1276 1276 * on-disk structure to update it to the most recent supported version.
1277 1277 */
1278   -int journal_update_format (journal_t *journal)
  1278 +int jbd2_journal_update_format (journal_t *journal)
1279 1279 {
1280 1280 journal_superblock_t *sb;
1281 1281 int err;
1282 1282  
... ... @@ -1287,9 +1287,9 @@
1287 1287 sb = journal->j_superblock;
1288 1288  
1289 1289 switch (be32_to_cpu(sb->s_header.h_blocktype)) {
1290   - case JFS_SUPERBLOCK_V2:
  1290 + case JBD2_SUPERBLOCK_V2:
1291 1291 return 0;
1292   - case JFS_SUPERBLOCK_V1:
  1292 + case JBD2_SUPERBLOCK_V1:
1293 1293 return journal_convert_superblock_v1(journal, sb);
1294 1294 default:
1295 1295 break;
... ... @@ -1312,7 +1312,7 @@
1312 1312 memset(&sb->s_feature_compat, 0, blocksize-offset);
1313 1313  
1314 1314 sb->s_nr_users = cpu_to_be32(1);
1315   - sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
  1315 + sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
1316 1316 journal->j_format_version = 2;
1317 1317  
1318 1318 bh = journal->j_sb_buffer;
... ... @@ -1324,7 +1324,7 @@
1324 1324  
1325 1325  
1326 1326 /**
1327   - * int journal_flush () - Flush journal
  1327 + * int jbd2_journal_flush () - Flush journal
1328 1328 * @journal: Journal to act on.
1329 1329 *
1330 1330 * Flush all data for a given journal to disk and empty the journal.
... ... @@ -1332,7 +1332,7 @@
1332 1332 * recovery does not need to happen on remount.
1333 1333 */
1334 1334  
1335   -int journal_flush(journal_t *journal)
  1335 +int jbd2_journal_flush(journal_t *journal)
1336 1336 {
1337 1337 int err = 0;
1338 1338 transaction_t *transaction = NULL;
... ... @@ -1343,7 +1343,7 @@
1343 1343 /* Force everything buffered to the log... */
1344 1344 if (journal->j_running_transaction) {
1345 1345 transaction = journal->j_running_transaction;
1346   - __log_start_commit(journal, transaction->t_tid);
  1346 + __jbd2_log_start_commit(journal, transaction->t_tid);
1347 1347 } else if (journal->j_committing_transaction)
1348 1348 transaction = journal->j_committing_transaction;
1349 1349  
... ... @@ -1352,7 +1352,7 @@
1352 1352 tid_t tid = transaction->t_tid;
1353 1353  
1354 1354 spin_unlock(&journal->j_state_lock);
1355   - log_wait_commit(journal, tid);
  1355 + jbd2_log_wait_commit(journal, tid);
1356 1356 } else {
1357 1357 spin_unlock(&journal->j_state_lock);
1358 1358 }
1359 1359  
... ... @@ -1361,11 +1361,11 @@
1361 1361 spin_lock(&journal->j_list_lock);
1362 1362 while (!err && journal->j_checkpoint_transactions != NULL) {
1363 1363 spin_unlock(&journal->j_list_lock);
1364   - err = log_do_checkpoint(journal);
  1364 + err = jbd2_log_do_checkpoint(journal);
1365 1365 spin_lock(&journal->j_list_lock);
1366 1366 }
1367 1367 spin_unlock(&journal->j_list_lock);
1368   - cleanup_journal_tail(journal);
  1368 + jbd2_cleanup_journal_tail(journal);
1369 1369  
1370 1370 /* Finally, mark the journal as really needing no recovery.
1371 1371 * This sets s_start==0 in the underlying superblock, which is
... ... @@ -1376,7 +1376,7 @@
1376 1376 old_tail = journal->j_tail;
1377 1377 journal->j_tail = 0;
1378 1378 spin_unlock(&journal->j_state_lock);
1379   - journal_update_superblock(journal, 1);
  1379 + jbd2_journal_update_superblock(journal, 1);
1380 1380 spin_lock(&journal->j_state_lock);
1381 1381 journal->j_tail = old_tail;
1382 1382  
1383 1383  
1384 1384  
1385 1385  
... ... @@ -1390,24 +1390,24 @@
1390 1390 }
1391 1391  
1392 1392 /**
1393   - * int journal_wipe() - Wipe journal contents
  1393 + * int jbd2_journal_wipe() - Wipe journal contents
1394 1394 * @journal: Journal to act on.
1395 1395 * @write: flag (see below)
1396 1396 *
1397 1397 * Wipe out all of the contents of a journal, safely. This will produce
1398 1398 * a warning if the journal contains any valid recovery information.
1399   - * Must be called between journal_init_*() and journal_load().
  1399 + * Must be called between journal_init_*() and jbd2_journal_load().
1400 1400 *
1401 1401 * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
1402 1402 * we merely suppress recovery.
1403 1403 */
1404 1404  
1405   -int journal_wipe(journal_t *journal, int write)
  1405 +int jbd2_journal_wipe(journal_t *journal, int write)
1406 1406 {
1407 1407 journal_superblock_t *sb;
1408 1408 int err = 0;
1409 1409  
1410   - J_ASSERT (!(journal->j_flags & JFS_LOADED));
  1410 + J_ASSERT (!(journal->j_flags & JBD2_LOADED));
1411 1411  
1412 1412 err = load_superblock(journal);
1413 1413 if (err)
1414 1414  
... ... @@ -1421,9 +1421,9 @@
1421 1421 printk (KERN_WARNING "JBD: %s recovery information on journal\n",
1422 1422 write ? "Clearing" : "Ignoring");
1423 1423  
1424   - err = journal_skip_recovery(journal);
  1424 + err = jbd2_journal_skip_recovery(journal);
1425 1425 if (write)
1426   - journal_update_superblock(journal, 1);
  1426 + jbd2_journal_update_superblock(journal, 1);
1427 1427  
1428 1428 no_recovery:
1429 1429 return err;
1430 1430  
1431 1431  
1432 1432  
... ... @@ -1459,22 +1459,22 @@
1459 1459 * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
1460 1460 * and don't attempt to make any other journal updates.
1461 1461 */
1462   -void __journal_abort_hard(journal_t *journal)
  1462 +void __jbd2_journal_abort_hard(journal_t *journal)
1463 1463 {
1464 1464 transaction_t *transaction;
1465 1465 char b[BDEVNAME_SIZE];
1466 1466  
1467   - if (journal->j_flags & JFS_ABORT)
  1467 + if (journal->j_flags & JBD2_ABORT)
1468 1468 return;
1469 1469  
1470 1470 printk(KERN_ERR "Aborting journal on device %s.\n",
1471 1471 journal_dev_name(journal, b));
1472 1472  
1473 1473 spin_lock(&journal->j_state_lock);
1474   - journal->j_flags |= JFS_ABORT;
  1474 + journal->j_flags |= JBD2_ABORT;
1475 1475 transaction = journal->j_running_transaction;
1476 1476 if (transaction)
1477   - __log_start_commit(journal, transaction->t_tid);
  1477 + __jbd2_log_start_commit(journal, transaction->t_tid);
1478 1478 spin_unlock(&journal->j_state_lock);
1479 1479 }
1480 1480  
1481 1481  
1482 1482  
1483 1483  
... ... @@ -1482,20 +1482,20 @@
1482 1482 * but don't do any other IO. */
1483 1483 static void __journal_abort_soft (journal_t *journal, int errno)
1484 1484 {
1485   - if (journal->j_flags & JFS_ABORT)
  1485 + if (journal->j_flags & JBD2_ABORT)
1486 1486 return;
1487 1487  
1488 1488 if (!journal->j_errno)
1489 1489 journal->j_errno = errno;
1490 1490  
1491   - __journal_abort_hard(journal);
  1491 + __jbd2_journal_abort_hard(journal);
1492 1492  
1493 1493 if (errno)
1494   - journal_update_superblock(journal, 1);
  1494 + jbd2_journal_update_superblock(journal, 1);
1495 1495 }
1496 1496  
1497 1497 /**
1498   - * void journal_abort () - Shutdown the journal immediately.
  1498 + * void jbd2_journal_abort () - Shutdown the journal immediately.
1499 1499 * @journal: the journal to shutdown.
1500 1500 * @errno: an error number to record in the journal indicating
1501 1501 * the reason for the shutdown.
... ... @@ -1504,7 +1504,7 @@
1504 1504 * journal (not of a single transaction). This operation cannot be
1505 1505 * undone without closing and reopening the journal.
1506 1506 *
1507   - * The journal_abort function is intended to support higher level error
  1507 + * The jbd2_journal_abort function is intended to support higher level error
1508 1508 * recovery mechanisms such as the ext2/ext3 remount-readonly error
1509 1509 * mode.
1510 1510 *
1511 1511  
1512 1512  
... ... @@ -1520,13 +1520,13 @@
1520 1520 *
1521 1521 * Any attempt to get a new transaction handle on a journal which is in
1522 1522 * ABORT state will just result in an -EROFS error return. A
1523   - * journal_stop on an existing handle will return -EIO if we have
  1523 + * jbd2_journal_stop on an existing handle will return -EIO if we have
1524 1524 * entered abort state during the update.
1525 1525 *
1526 1526 * Recursive transactions are not disturbed by journal abort until the
1527   - * final journal_stop, which will receive the -EIO error.
  1527 + * final jbd2_journal_stop, which will receive the -EIO error.
1528 1528 *
1529   - * Finally, the journal_abort call allows the caller to supply an errno
  1529 + * Finally, the jbd2_journal_abort call allows the caller to supply an errno
1530 1530 * which will be recorded (if possible) in the journal superblock. This
1531 1531 * allows a client to record failure conditions in the middle of a
1532 1532 * transaction without having to complete the transaction to record the
1533 1533  
1534 1534  
1535 1535  
1536 1536  
... ... @@ -1540,28 +1540,28 @@
1540 1540 *
1541 1541 */
1542 1542  
1543   -void journal_abort(journal_t *journal, int errno)
  1543 +void jbd2_journal_abort(journal_t *journal, int errno)
1544 1544 {
1545 1545 __journal_abort_soft(journal, errno);
1546 1546 }
1547 1547  
1548 1548 /**
1549   - * int journal_errno () - returns the journal's error state.
  1549 + * int jbd2_journal_errno () - returns the journal's error state.
1550 1550 * @journal: journal to examine.
1551 1551 *
1552   - * This is the errno numbet set with journal_abort(), the last
  1552 + * This is the errno numbet set with jbd2_journal_abort(), the last
1553 1553 * time the journal was mounted - if the journal was stopped
1554 1554 * without calling abort this will be 0.
1555 1555 *
1556 1556 * If the journal has been aborted on this mount time -EROFS will
1557 1557 * be returned.
1558 1558 */
1559   -int journal_errno(journal_t *journal)
  1559 +int jbd2_journal_errno(journal_t *journal)
1560 1560 {
1561 1561 int err;
1562 1562  
1563 1563 spin_lock(&journal->j_state_lock);
1564   - if (journal->j_flags & JFS_ABORT)
  1564 + if (journal->j_flags & JBD2_ABORT)
1565 1565 err = -EROFS;
1566 1566 else
1567 1567 err = journal->j_errno;
1568 1568  
1569 1569  
... ... @@ -1570,18 +1570,18 @@
1570 1570 }
1571 1571  
1572 1572 /**
1573   - * int journal_clear_err () - clears the journal's error state
  1573 + * int jbd2_journal_clear_err () - clears the journal's error state
1574 1574 * @journal: journal to act on.
1575 1575 *
1576 1576 * An error must be cleared or Acked to take a FS out of readonly
1577 1577 * mode.
1578 1578 */
1579   -int journal_clear_err(journal_t *journal)
  1579 +int jbd2_journal_clear_err(journal_t *journal)
1580 1580 {
1581 1581 int err = 0;
1582 1582  
1583 1583 spin_lock(&journal->j_state_lock);
1584   - if (journal->j_flags & JFS_ABORT)
  1584 + if (journal->j_flags & JBD2_ABORT)
1585 1585 err = -EROFS;
1586 1586 else
1587 1587 journal->j_errno = 0;
1588 1588  
1589 1589  
1590 1590  
... ... @@ -1590,21 +1590,21 @@
1590 1590 }
1591 1591  
1592 1592 /**
1593   - * void journal_ack_err() - Ack journal err.
  1593 + * void jbd2_journal_ack_err() - Ack journal err.
1594 1594 * @journal: journal to act on.
1595 1595 *
1596 1596 * An error must be cleared or Acked to take a FS out of readonly
1597 1597 * mode.
1598 1598 */
1599   -void journal_ack_err(journal_t *journal)
  1599 +void jbd2_journal_ack_err(journal_t *journal)
1600 1600 {
1601 1601 spin_lock(&journal->j_state_lock);
1602 1602 if (journal->j_errno)
1603   - journal->j_flags |= JFS_ACK_ERR;
  1603 + journal->j_flags |= JBD2_ACK_ERR;
1604 1604 spin_unlock(&journal->j_state_lock);
1605 1605 }
1606 1606  
1607   -int journal_blocks_per_page(struct inode *inode)
  1607 +int jbd2_journal_blocks_per_page(struct inode *inode)
1608 1608 {
1609 1609 return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1610 1610 }
... ... @@ -1613,7 +1613,7 @@
1613 1613 * Simple support for retrying memory allocations. Introduced to help to
1614 1614 * debug different VM deadlock avoidance strategies.
1615 1615 */
1616   -void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
  1616 +void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1617 1617 {
1618 1618 return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
1619 1619 }
... ... @@ -1634,7 +1634,7 @@
1634 1634 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1635 1635 };
1636 1636  
1637   -static void journal_destroy_jbd_slabs(void)
  1637 +static void jbd2_journal_destroy_jbd_slabs(void)
1638 1638 {
1639 1639 int i;
1640 1640  
... ... @@ -1645,7 +1645,7 @@
1645 1645 }
1646 1646 }
1647 1647  
1648   -static int journal_create_jbd_slab(size_t slab_size)
  1648 +static int jbd2_journal_create_jbd_slab(size_t slab_size)
1649 1649 {
1650 1650 int i = JBD_SLAB_INDEX(slab_size);
1651 1651  
... ... @@ -1671,7 +1671,7 @@
1671 1671 return 0;
1672 1672 }
1673 1673  
1674   -void * jbd_slab_alloc(size_t size, gfp_t flags)
  1674 +void * jbd2_slab_alloc(size_t size, gfp_t flags)
1675 1675 {
1676 1676 int idx;
1677 1677  
... ... @@ -1680,7 +1680,7 @@
1680 1680 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1681 1681 }
1682 1682  
1683   -void jbd_slab_free(void *ptr, size_t size)
  1683 +void jbd2_slab_free(void *ptr, size_t size)
1684 1684 {
1685 1685 int idx;
1686 1686  
1687 1687  
1688 1688  
1689 1689  
1690 1690  
1691 1691  
... ... @@ -1692,35 +1692,35 @@
1692 1692 /*
1693 1693 * Journal_head storage management
1694 1694 */
1695   -static kmem_cache_t *journal_head_cache;
  1695 +static kmem_cache_t *jbd2_journal_head_cache;
1696 1696 #ifdef CONFIG_JBD_DEBUG
1697 1697 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
1698 1698 #endif
1699 1699  
1700   -static int journal_init_journal_head_cache(void)
  1700 +static int journal_init_jbd2_journal_head_cache(void)
1701 1701 {
1702 1702 int retval;
1703 1703  
1704   - J_ASSERT(journal_head_cache == 0);
1705   - journal_head_cache = kmem_cache_create("journal_head",
  1704 + J_ASSERT(jbd2_journal_head_cache == 0);
  1705 + jbd2_journal_head_cache = kmem_cache_create("journal_head",
1706 1706 sizeof(struct journal_head),
1707 1707 0, /* offset */
1708 1708 0, /* flags */
1709 1709 NULL, /* ctor */
1710 1710 NULL); /* dtor */
1711 1711 retval = 0;
1712   - if (journal_head_cache == 0) {
  1712 + if (jbd2_journal_head_cache == 0) {
1713 1713 retval = -ENOMEM;
1714 1714 printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
1715 1715 }
1716 1716 return retval;
1717 1717 }
1718 1718  
1719   -static void journal_destroy_journal_head_cache(void)
  1719 +static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
1720 1720 {
1721   - J_ASSERT(journal_head_cache != NULL);
1722   - kmem_cache_destroy(journal_head_cache);
1723   - journal_head_cache = NULL;
  1721 + J_ASSERT(jbd2_journal_head_cache != NULL);
  1722 + kmem_cache_destroy(jbd2_journal_head_cache);
  1723 + jbd2_journal_head_cache = NULL;
1724 1724 }
1725 1725  
1726 1726 /*
... ... @@ -1734,7 +1734,7 @@
1734 1734 #ifdef CONFIG_JBD_DEBUG
1735 1735 atomic_inc(&nr_journal_heads);
1736 1736 #endif
1737   - ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
  1737 + ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
1738 1738 if (ret == 0) {
1739 1739 jbd_debug(1, "out of memory for journal_head\n");
1740 1740 if (time_after(jiffies, last_warning + 5*HZ)) {
... ... @@ -1744,7 +1744,7 @@
1744 1744 }
1745 1745 while (ret == 0) {
1746 1746 yield();
1747   - ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
  1747 + ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
1748 1748 }
1749 1749 }
1750 1750 return ret;
... ... @@ -1756,7 +1756,7 @@
1756 1756 atomic_dec(&nr_journal_heads);
1757 1757 memset(jh, JBD_POISON_FREE, sizeof(*jh));
1758 1758 #endif
1759   - kmem_cache_free(journal_head_cache, jh);
  1759 + kmem_cache_free(jbd2_journal_head_cache, jh);
1760 1760 }
1761 1761  
1762 1762 /*
1763 1763  
1764 1764  
1765 1765  
1766 1766  
... ... @@ -1775,22 +1775,22 @@
1775 1775 *
1776 1776 * A journal_head may be detached from its buffer_head when the journal_head's
1777 1777 * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
1778   - * Various places in JBD call journal_remove_journal_head() to indicate that the
  1778 + * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
1779 1779 * journal_head can be dropped if needed.
1780 1780 *
1781 1781 * Various places in the kernel want to attach a journal_head to a buffer_head
1782 1782 * _before_ attaching the journal_head to a transaction. To protect the
1783   - * journal_head in this situation, journal_add_journal_head elevates the
  1783 + * journal_head in this situation, jbd2_journal_add_journal_head elevates the
1784 1784 * journal_head's b_jcount refcount by one. The caller must call
1785   - * journal_put_journal_head() to undo this.
  1785 + * jbd2_journal_put_journal_head() to undo this.
1786 1786 *
1787 1787 * So the typical usage would be:
1788 1788 *
1789 1789 * (Attach a journal_head if needed. Increments b_jcount)
1790   - * struct journal_head *jh = journal_add_journal_head(bh);
  1790 + * struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1791 1791 * ...
1792 1792 * jh->b_transaction = xxx;
1793   - * journal_put_journal_head(jh);
  1793 + * jbd2_journal_put_journal_head(jh);
1794 1794 *
1795 1795 * Now, the journal_head's b_jcount is zero, but it is safe from being released
1796 1796 * because it has a non-zero b_transaction.
... ... @@ -1802,7 +1802,7 @@
1802 1802 * Doesn't need the journal lock.
1803 1803 * May sleep.
1804 1804 */
1805   -struct journal_head *journal_add_journal_head(struct buffer_head *bh)
  1805 +struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
1806 1806 {
1807 1807 struct journal_head *jh;
1808 1808 struct journal_head *new_jh = NULL;
... ... @@ -1845,7 +1845,7 @@
1845 1845 * Grab a ref against this buffer_head's journal_head. If it ended up not
1846 1846 * having a journal_head, return NULL
1847 1847 */
1848   -struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
  1848 +struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
1849 1849 {
1850 1850 struct journal_head *jh = NULL;
1851 1851  
1852 1852  
... ... @@ -1877,13 +1877,13 @@
1877 1877 printk(KERN_WARNING "%s: freeing "
1878 1878 "b_frozen_data\n",
1879 1879 __FUNCTION__);
1880   - jbd_slab_free(jh->b_frozen_data, bh->b_size);
  1880 + jbd2_slab_free(jh->b_frozen_data, bh->b_size);
1881 1881 }
1882 1882 if (jh->b_committed_data) {
1883 1883 printk(KERN_WARNING "%s: freeing "
1884 1884 "b_committed_data\n",
1885 1885 __FUNCTION__);
1886   - jbd_slab_free(jh->b_committed_data, bh->b_size);
  1886 + jbd2_slab_free(jh->b_committed_data, bh->b_size);
1887 1887 }
1888 1888 bh->b_private = NULL;
1889 1889 jh->b_bh = NULL; /* debug, really */
... ... @@ -1897,7 +1897,7 @@
1897 1897 }
1898 1898  
1899 1899 /*
1900   - * journal_remove_journal_head(): if the buffer isn't attached to a transaction
  1900 + * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
1901 1901 * and has a zero b_jcount then remove and release its journal_head. If we did
1902 1902 * see that the buffer is not used by any transaction we also "logically"
1903 1903 * decrement ->b_count.
1904 1904  
... ... @@ -1905,11 +1905,11 @@
1905 1905 * We in fact take an additional increment on ->b_count as a convenience,
1906 1906 * because the caller usually wants to do additional things with the bh
1907 1907 * after calling here.
1908   - * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some
  1908 + * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
1909 1909 * time. Once the caller has run __brelse(), the buffer is eligible for
1910 1910 * reaping by try_to_free_buffers().
1911 1911 */
1912   -void journal_remove_journal_head(struct buffer_head *bh)
  1912 +void jbd2_journal_remove_journal_head(struct buffer_head *bh)
1913 1913 {
1914 1914 jbd_lock_bh_journal_head(bh);
1915 1915 __journal_remove_journal_head(bh);
... ... @@ -1920,7 +1920,7 @@
1920 1920 * Drop a reference on the passed journal_head. If it fell to zero then try to
1921 1921 * release the journal_head from the buffer_head.
1922 1922 */
1923   -void journal_put_journal_head(struct journal_head *jh)
  1923 +void jbd2_journal_put_journal_head(struct journal_head *jh)
1924 1924 {
1925 1925 struct buffer_head *bh = jh2bh(jh);
1926 1926  
... ... @@ -1938,8 +1938,8 @@
1938 1938 * /proc tunables
1939 1939 */
1940 1940 #if defined(CONFIG_JBD_DEBUG)
1941   -int journal_enable_debug;
1942   -EXPORT_SYMBOL(journal_enable_debug);
  1941 +int jbd2_journal_enable_debug;
  1942 +EXPORT_SYMBOL(jbd2_journal_enable_debug);
1943 1943 #endif
1944 1944  
1945 1945 #if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
... ... @@ -1951,7 +1951,7 @@
1951 1951 {
1952 1952 int ret;
1953 1953  
1954   - ret = sprintf(page + off, "%d\n", journal_enable_debug);
  1954 + ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
1955 1955 *eof = 1;
1956 1956 return ret;
1957 1957 }
1958 1958  
... ... @@ -1966,11 +1966,11 @@
1966 1966 if (copy_from_user(buf, buffer, count))
1967 1967 return -EFAULT;
1968 1968 buf[ARRAY_SIZE(buf) - 1] = '\0';
1969   - journal_enable_debug = simple_strtoul(buf, NULL, 10);
  1969 + jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
1970 1970 return count;
1971 1971 }
1972 1972  
1973   -#define JBD_PROC_NAME "sys/fs/jbd-debug"
  1973 +#define JBD_PROC_NAME "sys/fs/jbd2-debug"
1974 1974  
1975 1975 static void __init create_jbd_proc_entry(void)
1976 1976 {
... ... @@ -1982,7 +1982,7 @@
1982 1982 }
1983 1983 }
1984 1984  
1985   -static void __exit remove_jbd_proc_entry(void)
  1985 +static void __exit jbd2_remove_jbd_proc_entry(void)
1986 1986 {
1987 1987 if (proc_jbd_debug)
1988 1988 remove_proc_entry(JBD_PROC_NAME, NULL);
1989 1989  
1990 1990  
1991 1991  
1992 1992  
1993 1993  
... ... @@ -1991,31 +1991,31 @@
1991 1991 #else
1992 1992  
1993 1993 #define create_jbd_proc_entry() do {} while (0)
1994   -#define remove_jbd_proc_entry() do {} while (0)
  1994 +#define jbd2_remove_jbd_proc_entry() do {} while (0)
1995 1995  
1996 1996 #endif
1997 1997  
1998   -kmem_cache_t *jbd_handle_cache;
  1998 +kmem_cache_t *jbd2_handle_cache;
1999 1999  
2000 2000 static int __init journal_init_handle_cache(void)
2001 2001 {
2002   - jbd_handle_cache = kmem_cache_create("journal_handle",
  2002 + jbd2_handle_cache = kmem_cache_create("journal_handle",
2003 2003 sizeof(handle_t),
2004 2004 0, /* offset */
2005 2005 0, /* flags */
2006 2006 NULL, /* ctor */
2007 2007 NULL); /* dtor */
2008   - if (jbd_handle_cache == NULL) {
  2008 + if (jbd2_handle_cache == NULL) {
2009 2009 printk(KERN_EMERG "JBD: failed to create handle cache\n");
2010 2010 return -ENOMEM;
2011 2011 }
2012 2012 return 0;
2013 2013 }
2014 2014  
2015   -static void journal_destroy_handle_cache(void)
  2015 +static void jbd2_journal_destroy_handle_cache(void)
2016 2016 {
2017   - if (jbd_handle_cache)
2018   - kmem_cache_destroy(jbd_handle_cache);
  2017 + if (jbd2_handle_cache)
  2018 + kmem_cache_destroy(jbd2_handle_cache);
2019 2019 }
2020 2020  
2021 2021 /*
2022 2022  
2023 2023  
2024 2024  
... ... @@ -2026,20 +2026,20 @@
2026 2026 {
2027 2027 int ret;
2028 2028  
2029   - ret = journal_init_revoke_caches();
  2029 + ret = jbd2_journal_init_revoke_caches();
2030 2030 if (ret == 0)
2031   - ret = journal_init_journal_head_cache();
  2031 + ret = journal_init_jbd2_journal_head_cache();
2032 2032 if (ret == 0)
2033 2033 ret = journal_init_handle_cache();
2034 2034 return ret;
2035 2035 }
2036 2036  
2037   -static void journal_destroy_caches(void)
  2037 +static void jbd2_journal_destroy_caches(void)
2038 2038 {
2039   - journal_destroy_revoke_caches();
2040   - journal_destroy_journal_head_cache();
2041   - journal_destroy_handle_cache();
2042   - journal_destroy_jbd_slabs();
  2039 + jbd2_journal_destroy_revoke_caches();
  2040 + jbd2_journal_destroy_jbd2_journal_head_cache();
  2041 + jbd2_journal_destroy_handle_cache();
  2042 + jbd2_journal_destroy_jbd_slabs();
2043 2043 }
2044 2044  
2045 2045 static int __init journal_init(void)
... ... @@ -2050,7 +2050,7 @@
2050 2050  
2051 2051 ret = journal_init_caches();
2052 2052 if (ret != 0)
2053   - journal_destroy_caches();
  2053 + jbd2_journal_destroy_caches();
2054 2054 create_jbd_proc_entry();
2055 2055 return ret;
2056 2056 }
... ... @@ -2062,8 +2062,8 @@
2062 2062 if (n)
2063 2063 printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
2064 2064 #endif
2065   - remove_jbd_proc_entry();
2066   - journal_destroy_caches();
  2065 + jbd2_remove_jbd_proc_entry();
  2066 + jbd2_journal_destroy_caches();
2067 2067 }
2068 2068  
2069 2069 MODULE_LICENSE("GPL");
... ... @@ -18,7 +18,7 @@
18 18 #else
19 19 #include <linux/time.h>
20 20 #include <linux/fs.h>
21   -#include <linux/jbd.h>
  21 +#include <linux/jbd2.h>
22 22 #include <linux/errno.h>
23 23 #include <linux/slab.h>
24 24 #endif
... ... @@ -86,7 +86,7 @@
86 86 nbufs = 0;
87 87  
88 88 for (next = start; next < max; next++) {
89   - err = journal_bmap(journal, next, &blocknr);
  89 + err = jbd2_journal_bmap(journal, next, &blocknr);
90 90  
91 91 if (err) {
92 92 printk (KERN_ERR "JBD: bad block at offset %u\n",
... ... @@ -142,7 +142,7 @@
142 142 return -EIO;
143 143 }
144 144  
145   - err = journal_bmap(journal, offset, &blocknr);
  145 + err = jbd2_journal_bmap(journal, offset, &blocknr);
146 146  
147 147 if (err) {
148 148 printk (KERN_ERR "JBD: bad block at offset %u\n",
149 149  
... ... @@ -191,10 +191,10 @@
191 191  
192 192 nr++;
193 193 tagp += sizeof(journal_block_tag_t);
194   - if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
  194 + if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
195 195 tagp += 16;
196 196  
197   - if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
  197 + if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
198 198 break;
199 199 }
200 200  
... ... @@ -210,7 +210,7 @@
210 210 } while (0)
211 211  
212 212 /**
213   - * journal_recover - recovers a on-disk journal
  213 + * jbd2_journal_recover - recovers a on-disk journal
214 214 * @journal: the journal to recover
215 215 *
216 216 * The primary function for recovering the log contents when mounting a
... ... @@ -221,7 +221,7 @@
221 221 * blocks. In the third and final pass, we replay any un-revoked blocks
222 222 * in the log.
223 223 */
224   -int journal_recover(journal_t *journal)
  224 +int jbd2_journal_recover(journal_t *journal)
225 225 {
226 226 int err;
227 227 journal_superblock_t * sb;
228 228  
... ... @@ -260,13 +260,13 @@
260 260 * any existing commit records in the log. */
261 261 journal->j_transaction_sequence = ++info.end_transaction;
262 262  
263   - journal_clear_revoke(journal);
  263 + jbd2_journal_clear_revoke(journal);
264 264 sync_blockdev(journal->j_fs_dev);
265 265 return err;
266 266 }
267 267  
268 268 /**
269   - * journal_skip_recovery - Start journal and wipe exiting records
  269 + * jbd2_journal_skip_recovery - Start journal and wipe exiting records
270 270 * @journal: journal to startup
271 271 *
272 272 * Locate any valid recovery information from the journal and set up the
... ... @@ -278,7 +278,7 @@
278 278 * much recovery information is being erased, and to let us initialise
279 279 * the journal transaction sequence numbers to the next unused ID.
280 280 */
281   -int journal_skip_recovery(journal_t *journal)
  281 +int jbd2_journal_skip_recovery(journal_t *journal)
282 282 {
283 283 int err;
284 284 journal_superblock_t * sb;
... ... @@ -387,7 +387,7 @@
387 387  
388 388 tmp = (journal_header_t *)bh->b_data;
389 389  
390   - if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
  390 + if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
391 391 brelse(bh);
392 392 break;
393 393 }
... ... @@ -407,7 +407,7 @@
407 407 * to do with it? That depends on the pass... */
408 408  
409 409 switch(blocktype) {
410   - case JFS_DESCRIPTOR_BLOCK:
  410 + case JBD2_DESCRIPTOR_BLOCK:
411 411 /* If it is a valid descriptor block, replay it
412 412 * in pass REPLAY; otherwise, just skip over the
413 413 * blocks it describes. */
... ... @@ -451,7 +451,7 @@
451 451 /* If the block has been
452 452 * revoked, then we're all done
453 453 * here. */
454   - if (journal_test_revoke
  454 + if (jbd2_journal_test_revoke
455 455 (journal, blocknr,
456 456 next_commit_ID)) {
457 457 brelse(obh);
458 458  
... ... @@ -477,9 +477,9 @@
477 477 lock_buffer(nbh);
478 478 memcpy(nbh->b_data, obh->b_data,
479 479 journal->j_blocksize);
480   - if (flags & JFS_FLAG_ESCAPE) {
  480 + if (flags & JBD2_FLAG_ESCAPE) {
481 481 *((__be32 *)bh->b_data) =
482   - cpu_to_be32(JFS_MAGIC_NUMBER);
  482 + cpu_to_be32(JBD2_MAGIC_NUMBER);
483 483 }
484 484  
485 485 BUFFER_TRACE(nbh, "marking dirty");
486 486  
487 487  
... ... @@ -495,17 +495,17 @@
495 495  
496 496 skip_write:
497 497 tagp += sizeof(journal_block_tag_t);
498   - if (!(flags & JFS_FLAG_SAME_UUID))
  498 + if (!(flags & JBD2_FLAG_SAME_UUID))
499 499 tagp += 16;
500 500  
501   - if (flags & JFS_FLAG_LAST_TAG)
  501 + if (flags & JBD2_FLAG_LAST_TAG)
502 502 break;
503 503 }
504 504  
505 505 brelse(bh);
506 506 continue;
507 507  
508   - case JFS_COMMIT_BLOCK:
  508 + case JBD2_COMMIT_BLOCK:
509 509 /* Found an expected commit block: not much to
510 510 * do other than move on to the next sequence
511 511 * number. */
... ... @@ -513,7 +513,7 @@
513 513 next_commit_ID++;
514 514 continue;
515 515  
516   - case JFS_REVOKE_BLOCK:
  516 + case JBD2_REVOKE_BLOCK:
517 517 /* If we aren't in the REVOKE pass, then we can
518 518 * just skip over this block. */
519 519 if (pass != PASS_REVOKE) {
520 520  
... ... @@ -570,11 +570,11 @@
570 570 static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
571 571 tid_t sequence, struct recovery_info *info)
572 572 {
573   - journal_revoke_header_t *header;
  573 + jbd2_journal_revoke_header_t *header;
574 574 int offset, max;
575 575  
576   - header = (journal_revoke_header_t *) bh->b_data;
577   - offset = sizeof(journal_revoke_header_t);
  576 + header = (jbd2_journal_revoke_header_t *) bh->b_data;
  577 + offset = sizeof(jbd2_journal_revoke_header_t);
578 578 max = be32_to_cpu(header->r_count);
579 579  
580 580 while (offset < max) {
... ... @@ -583,7 +583,7 @@
583 583  
584 584 blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
585 585 offset += 4;
586   - err = journal_set_revoke(journal, blocknr, sequence);
  586 + err = jbd2_journal_set_revoke(journal, blocknr, sequence);
587 587 if (err)
588 588 return err;
589 589 ++info->nr_revokes;
... ... @@ -62,7 +62,7 @@
62 62 #else
63 63 #include <linux/time.h>
64 64 #include <linux/fs.h>
65   -#include <linux/jbd.h>
  65 +#include <linux/jbd2.h>
66 66 #include <linux/errno.h>
67 67 #include <linux/slab.h>
68 68 #include <linux/list.h>
69 69  
... ... @@ -70,14 +70,14 @@
70 70 #include <linux/init.h>
71 71 #endif
72 72  
73   -static kmem_cache_t *revoke_record_cache;
74   -static kmem_cache_t *revoke_table_cache;
  73 +static kmem_cache_t *jbd2_revoke_record_cache;
  74 +static kmem_cache_t *jbd2_revoke_table_cache;
75 75  
76 76 /* Each revoke record represents one single revoked block. During
77 77 journal replay, this involves recording the transaction ID of the
78 78 last transaction to revoke this block. */
79 79  
80   -struct jbd_revoke_record_s
  80 +struct jbd2_revoke_record_s
81 81 {
82 82 struct list_head hash;
83 83 tid_t sequence; /* Used for recovery only */
... ... @@ -86,7 +86,7 @@
86 86  
87 87  
88 88 /* The revoke table is just a simple hash table of revoke records. */
89   -struct jbd_revoke_table_s
  89 +struct jbd2_revoke_table_s
90 90 {
91 91 /* It is conceivable that we might want a larger hash table
92 92 * for recovery. Must be a power of two. */
... ... @@ -99,7 +99,7 @@
99 99 #ifdef __KERNEL__
100 100 static void write_one_revoke_record(journal_t *, transaction_t *,
101 101 struct journal_head **, int *,
102   - struct jbd_revoke_record_s *);
  102 + struct jbd2_revoke_record_s *);
103 103 static void flush_descriptor(journal_t *, struct journal_head *, int);
104 104 #endif
105 105  
... ... @@ -108,7 +108,7 @@
108 108 /* Borrowed from buffer.c: this is a tried and tested block hash function */
109 109 static inline int hash(journal_t *journal, unsigned long block)
110 110 {
111   - struct jbd_revoke_table_s *table = journal->j_revoke;
  111 + struct jbd2_revoke_table_s *table = journal->j_revoke;
112 112 int hash_shift = table->hash_shift;
113 113  
114 114 return ((block << (hash_shift - 6)) ^
115 115  
... ... @@ -120,10 +120,10 @@
120 120 tid_t seq)
121 121 {
122 122 struct list_head *hash_list;
123   - struct jbd_revoke_record_s *record;
  123 + struct jbd2_revoke_record_s *record;
124 124  
125 125 repeat:
126   - record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
  126 + record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
127 127 if (!record)
128 128 goto oom;
129 129  
130 130  
131 131  
132 132  
133 133  
134 134  
135 135  
136 136  
137 137  
138 138  
139 139  
140 140  
... ... @@ -145,57 +145,57 @@
145 145  
146 146 /* Find a revoke record in the journal's hash table. */
147 147  
148   -static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
  148 +static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
149 149 unsigned long blocknr)
150 150 {
151 151 struct list_head *hash_list;
152   - struct jbd_revoke_record_s *record;
  152 + struct jbd2_revoke_record_s *record;
153 153  
154 154 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
155 155  
156 156 spin_lock(&journal->j_revoke_lock);
157   - record = (struct jbd_revoke_record_s *) hash_list->next;
  157 + record = (struct jbd2_revoke_record_s *) hash_list->next;
158 158 while (&(record->hash) != hash_list) {
159 159 if (record->blocknr == blocknr) {
160 160 spin_unlock(&journal->j_revoke_lock);
161 161 return record;
162 162 }
163   - record = (struct jbd_revoke_record_s *) record->hash.next;
  163 + record = (struct jbd2_revoke_record_s *) record->hash.next;
164 164 }
165 165 spin_unlock(&journal->j_revoke_lock);
166 166 return NULL;
167 167 }
168 168  
169   -int __init journal_init_revoke_caches(void)
  169 +int __init jbd2_journal_init_revoke_caches(void)
170 170 {
171   - revoke_record_cache = kmem_cache_create("revoke_record",
172   - sizeof(struct jbd_revoke_record_s),
  171 + jbd2_revoke_record_cache = kmem_cache_create("revoke_record",
  172 + sizeof(struct jbd2_revoke_record_s),
173 173 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
174   - if (revoke_record_cache == 0)
  174 + if (jbd2_revoke_record_cache == 0)
175 175 return -ENOMEM;
176 176  
177   - revoke_table_cache = kmem_cache_create("revoke_table",
178   - sizeof(struct jbd_revoke_table_s),
  177 + jbd2_revoke_table_cache = kmem_cache_create("revoke_table",
  178 + sizeof(struct jbd2_revoke_table_s),
179 179 0, 0, NULL, NULL);
180   - if (revoke_table_cache == 0) {
181   - kmem_cache_destroy(revoke_record_cache);
182   - revoke_record_cache = NULL;
  180 + if (jbd2_revoke_table_cache == 0) {
  181 + kmem_cache_destroy(jbd2_revoke_record_cache);
  182 + jbd2_revoke_record_cache = NULL;
183 183 return -ENOMEM;
184 184 }
185 185 return 0;
186 186 }
187 187  
188   -void journal_destroy_revoke_caches(void)
  188 +void jbd2_journal_destroy_revoke_caches(void)
189 189 {
190   - kmem_cache_destroy(revoke_record_cache);
191   - revoke_record_cache = NULL;
192   - kmem_cache_destroy(revoke_table_cache);
193   - revoke_table_cache = NULL;
  190 + kmem_cache_destroy(jbd2_revoke_record_cache);
  191 + jbd2_revoke_record_cache = NULL;
  192 + kmem_cache_destroy(jbd2_revoke_table_cache);
  193 + jbd2_revoke_table_cache = NULL;
194 194 }
195 195  
196 196 /* Initialise the revoke table for a given journal to a given size. */
197 197  
198   -int journal_init_revoke(journal_t *journal, int hash_size)
  198 +int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
199 199 {
200 200 int shift, tmp;
201 201  
... ... @@ -206,7 +206,7 @@
206 206 while((tmp >>= 1UL) != 0UL)
207 207 shift++;
208 208  
209   - journal->j_revoke_table[0] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
  209 + journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
210 210 if (!journal->j_revoke_table[0])
211 211 return -ENOMEM;
212 212 journal->j_revoke = journal->j_revoke_table[0];
... ... @@ -221,7 +221,7 @@
221 221 journal->j_revoke->hash_table =
222 222 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
223 223 if (!journal->j_revoke->hash_table) {
224   - kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
  224 + kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
225 225 journal->j_revoke = NULL;
226 226 return -ENOMEM;
227 227 }
228 228  
... ... @@ -229,10 +229,10 @@
229 229 for (tmp = 0; tmp < hash_size; tmp++)
230 230 INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
231 231  
232   - journal->j_revoke_table[1] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
  232 + journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
233 233 if (!journal->j_revoke_table[1]) {
234 234 kfree(journal->j_revoke_table[0]->hash_table);
235   - kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
  235 + kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
236 236 return -ENOMEM;
237 237 }
238 238  
... ... @@ -249,8 +249,8 @@
249 249 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
250 250 if (!journal->j_revoke->hash_table) {
251 251 kfree(journal->j_revoke_table[0]->hash_table);
252   - kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
253   - kmem_cache_free(revoke_table_cache, journal->j_revoke_table[1]);
  252 + kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
  253 + kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
254 254 journal->j_revoke = NULL;
255 255 return -ENOMEM;
256 256 }
257 257  
... ... @@ -265,9 +265,9 @@
265 265  
266 266 /* Destoy a journal's revoke table. The table must already be empty! */
267 267  
268   -void journal_destroy_revoke(journal_t *journal)
  268 +void jbd2_journal_destroy_revoke(journal_t *journal)
269 269 {
270   - struct jbd_revoke_table_s *table;
  270 + struct jbd2_revoke_table_s *table;
271 271 struct list_head *hash_list;
272 272 int i;
273 273  
... ... @@ -281,7 +281,7 @@
281 281 }
282 282  
283 283 kfree(table->hash_table);
284   - kmem_cache_free(revoke_table_cache, table);
  284 + kmem_cache_free(jbd2_revoke_table_cache, table);
285 285 journal->j_revoke = NULL;
286 286  
287 287 table = journal->j_revoke_table[1];
... ... @@ -294,7 +294,7 @@
294 294 }
295 295  
296 296 kfree(table->hash_table);
297   - kmem_cache_free(revoke_table_cache, table);
  297 + kmem_cache_free(jbd2_revoke_table_cache, table);
298 298 journal->j_revoke = NULL;
299 299 }
300 300  
... ... @@ -302,7 +302,7 @@
302 302 #ifdef __KERNEL__
303 303  
304 304 /*
305   - * journal_revoke: revoke a given buffer_head from the journal. This
  305 + * jbd2_journal_revoke: revoke a given buffer_head from the journal. This
306 306 * prevents the block from being replayed during recovery if we take a
307 307 * crash after this current transaction commits. Any subsequent
308 308 * metadata writes of the buffer in this transaction cancel the
309 309  
310 310  
... ... @@ -314,18 +314,18 @@
314 314 * revoke before clearing the block bitmap when we are deleting
315 315 * metadata.
316 316 *
317   - * Revoke performs a journal_forget on any buffer_head passed in as a
  317 + * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
318 318 * parameter, but does _not_ forget the buffer_head if the bh was only
319 319 * found implicitly.
320 320 *
321 321 * bh_in may not be a journalled buffer - it may have come off
322 322 * the hash tables without an attached journal_head.
323 323 *
324   - * If bh_in is non-zero, journal_revoke() will decrement its b_count
  324 + * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
325 325 * by one.
326 326 */
327 327  
328   -int journal_revoke(handle_t *handle, unsigned long blocknr,
  328 +int jbd2_journal_revoke(handle_t *handle, unsigned long blocknr,
329 329 struct buffer_head *bh_in)
330 330 {
331 331 struct buffer_head *bh = NULL;
... ... @@ -338,7 +338,7 @@
338 338 BUFFER_TRACE(bh_in, "enter");
339 339  
340 340 journal = handle->h_transaction->t_journal;
341   - if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
  341 + if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
342 342 J_ASSERT (!"Cannot set revoke feature!");
343 343 return -EINVAL;
344 344 }
... ... @@ -386,8 +386,8 @@
386 386 set_buffer_revoked(bh);
387 387 set_buffer_revokevalid(bh);
388 388 if (bh_in) {
389   - BUFFER_TRACE(bh_in, "call journal_forget");
390   - journal_forget(handle, bh_in);
  389 + BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
  390 + jbd2_journal_forget(handle, bh_in);
391 391 } else {
392 392 BUFFER_TRACE(bh, "call brelse");
393 393 __brelse(bh);
... ... @@ -403,7 +403,7 @@
403 403  
404 404 /*
405 405 * Cancel an outstanding revoke. For use only internally by the
406   - * journaling code (called from journal_get_write_access).
  406 + * journaling code (called from jbd2_journal_get_write_access).
407 407 *
408 408 * We trust buffer_revoked() on the buffer if the buffer is already
409 409 * being journaled: if there is no revoke pending on the buffer, then we
410 410  
... ... @@ -418,9 +418,9 @@
418 418 *
419 419 * The caller must have the journal locked.
420 420 */
421   -int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
  421 +int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
422 422 {
423   - struct jbd_revoke_record_s *record;
  423 + struct jbd2_revoke_record_s *record;
424 424 journal_t *journal = handle->h_transaction->t_journal;
425 425 int need_cancel;
426 426 int did_revoke = 0; /* akpm: debug */
... ... @@ -447,7 +447,7 @@
447 447 spin_lock(&journal->j_revoke_lock);
448 448 list_del(&record->hash);
449 449 spin_unlock(&journal->j_revoke_lock);
450   - kmem_cache_free(revoke_record_cache, record);
  450 + kmem_cache_free(jbd2_revoke_record_cache, record);
451 451 did_revoke = 1;
452 452 }
453 453 }
... ... @@ -478,7 +478,7 @@
478 478 * we do not want to suspend any processing until all revokes are
479 479 * written -bzzz
480 480 */
481   -void journal_switch_revoke_table(journal_t *journal)
  481 +void jbd2_journal_switch_revoke_table(journal_t *journal)
482 482 {
483 483 int i;
484 484  
485 485  
... ... @@ -498,12 +498,12 @@
498 498 * Called with the journal lock held.
499 499 */
500 500  
501   -void journal_write_revoke_records(journal_t *journal,
  501 +void jbd2_journal_write_revoke_records(journal_t *journal,
502 502 transaction_t *transaction)
503 503 {
504 504 struct journal_head *descriptor;
505   - struct jbd_revoke_record_s *record;
506   - struct jbd_revoke_table_s *revoke;
  505 + struct jbd2_revoke_record_s *record;
  506 + struct jbd2_revoke_table_s *revoke;
507 507 struct list_head *hash_list;
508 508 int i, offset, count;
509 509  
510 510  
... ... @@ -519,14 +519,14 @@
519 519 hash_list = &revoke->hash_table[i];
520 520  
521 521 while (!list_empty(hash_list)) {
522   - record = (struct jbd_revoke_record_s *)
  522 + record = (struct jbd2_revoke_record_s *)
523 523 hash_list->next;
524 524 write_one_revoke_record(journal, transaction,
525 525 &descriptor, &offset,
526 526 record);
527 527 count++;
528 528 list_del(&record->hash);
529   - kmem_cache_free(revoke_record_cache, record);
  529 + kmem_cache_free(jbd2_revoke_record_cache, record);
530 530 }
531 531 }
532 532 if (descriptor)
... ... @@ -543,7 +543,7 @@
543 543 transaction_t *transaction,
544 544 struct journal_head **descriptorp,
545 545 int *offsetp,
546   - struct jbd_revoke_record_s *record)
  546 + struct jbd2_revoke_record_s *record)
547 547 {
548 548 struct journal_head *descriptor;
549 549 int offset;
... ... @@ -551,7 +551,7 @@
551 551  
552 552 /* If we are already aborting, this all becomes a noop. We
553 553 still need to go round the loop in
554   - journal_write_revoke_records in order to free all of the
  554 + jbd2_journal_write_revoke_records in order to free all of the
555 555 revoke records: only the IO to the journal is omitted. */
556 556 if (is_journal_aborted(journal))
557 557 return;
558 558  
559 559  
560 560  
... ... @@ -568,19 +568,19 @@
568 568 }
569 569  
570 570 if (!descriptor) {
571   - descriptor = journal_get_descriptor_buffer(journal);
  571 + descriptor = jbd2_journal_get_descriptor_buffer(journal);
572 572 if (!descriptor)
573 573 return;
574 574 header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
575   - header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
576   - header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
  575 + header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
  576 + header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
577 577 header->h_sequence = cpu_to_be32(transaction->t_tid);
578 578  
579 579 /* Record it so that we can wait for IO completion later */
580 580 JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
581   - journal_file_buffer(descriptor, transaction, BJ_LogCtl);
  581 + jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
582 582  
583   - offset = sizeof(journal_revoke_header_t);
  583 + offset = sizeof(jbd2_journal_revoke_header_t);
584 584 *descriptorp = descriptor;
585 585 }
586 586  
... ... @@ -601,7 +601,7 @@
601 601 struct journal_head *descriptor,
602 602 int offset)
603 603 {
604   - journal_revoke_header_t *header;
  604 + jbd2_journal_revoke_header_t *header;
605 605 struct buffer_head *bh = jh2bh(descriptor);
606 606  
607 607 if (is_journal_aborted(journal)) {
... ... @@ -609,7 +609,7 @@
609 609 return;
610 610 }
611 611  
612   - header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
  612 + header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
613 613 header->r_count = cpu_to_be32(offset);
614 614 set_buffer_jwrite(bh);
615 615 BUFFER_TRACE(bh, "write");
616 616  
... ... @@ -640,11 +640,11 @@
640 640 * single block.
641 641 */
642 642  
643   -int journal_set_revoke(journal_t *journal,
  643 +int jbd2_journal_set_revoke(journal_t *journal,
644 644 unsigned long blocknr,
645 645 tid_t sequence)
646 646 {
647   - struct jbd_revoke_record_s *record;
  647 + struct jbd2_revoke_record_s *record;
648 648  
649 649 record = find_revoke_record(journal, blocknr);
650 650 if (record) {
651 651  
... ... @@ -664,11 +664,11 @@
664 664 * ones, but later transactions still need replayed.
665 665 */
666 666  
667   -int journal_test_revoke(journal_t *journal,
  667 +int jbd2_journal_test_revoke(journal_t *journal,
668 668 unsigned long blocknr,
669 669 tid_t sequence)
670 670 {
671   - struct jbd_revoke_record_s *record;
  671 + struct jbd2_revoke_record_s *record;
672 672  
673 673 record = find_revoke_record(journal, blocknr);
674 674 if (!record)
675 675  
676 676  
677 677  
... ... @@ -683,21 +683,21 @@
683 683 * that it can be reused by the running filesystem.
684 684 */
685 685  
686   -void journal_clear_revoke(journal_t *journal)
  686 +void jbd2_journal_clear_revoke(journal_t *journal)
687 687 {
688 688 int i;
689 689 struct list_head *hash_list;
690   - struct jbd_revoke_record_s *record;
691   - struct jbd_revoke_table_s *revoke;
  690 + struct jbd2_revoke_record_s *record;
  691 + struct jbd2_revoke_table_s *revoke;
692 692  
693 693 revoke = journal->j_revoke;
694 694  
695 695 for (i = 0; i < revoke->hash_size; i++) {
696 696 hash_list = &revoke->hash_table[i];
697 697 while (!list_empty(hash_list)) {
698   - record = (struct jbd_revoke_record_s*) hash_list->next;
  698 + record = (struct jbd2_revoke_record_s*) hash_list->next;
699 699 list_del(&record->hash);
700   - kmem_cache_free(revoke_record_cache, record);
  700 + kmem_cache_free(jbd2_revoke_record_cache, record);
701 701 }
702 702 }
703 703 }
fs/jbd2/transaction.c
... ... @@ -19,7 +19,7 @@
19 19  
20 20 #include <linux/time.h>
21 21 #include <linux/fs.h>
22   -#include <linux/jbd.h>
  22 +#include <linux/jbd2.h>
23 23 #include <linux/errno.h>
24 24 #include <linux/slab.h>
25 25 #include <linux/timer.h>
... ... @@ -28,7 +28,7 @@
28 28 #include <linux/highmem.h>
29 29  
30 30 /*
31   - * get_transaction: obtain a new transaction_t object.
  31 + * jbd2_get_transaction: obtain a new transaction_t object.
32 32 *
33 33 * Simply allocate and initialise a new transaction. Create it in
34 34 * RUNNING state and add it to the current journal (which should not
... ... @@ -44,7 +44,7 @@
44 44 */
45 45  
46 46 static transaction_t *
47   -get_transaction(journal_t *journal, transaction_t *transaction)
  47 +jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
48 48 {
49 49 transaction->t_journal = journal;
50 50 transaction->t_state = T_RUNNING;
... ... @@ -115,7 +115,7 @@
115 115 spin_lock(&journal->j_state_lock);
116 116 repeat_locked:
117 117 if (is_journal_aborted(journal) ||
118   - (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
  118 + (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
119 119 spin_unlock(&journal->j_state_lock);
120 120 ret = -EROFS;
121 121 goto out;
... ... @@ -134,7 +134,7 @@
134 134 spin_unlock(&journal->j_state_lock);
135 135 goto alloc_transaction;
136 136 }
137   - get_transaction(journal, new_transaction);
  137 + jbd2_get_transaction(journal, new_transaction);
138 138 new_transaction = NULL;
139 139 }
140 140  
... ... @@ -175,7 +175,7 @@
175 175 spin_unlock(&transaction->t_handle_lock);
176 176 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
177 177 TASK_UNINTERRUPTIBLE);
178   - __log_start_commit(journal, transaction->t_tid);
  178 + __jbd2_log_start_commit(journal, transaction->t_tid);
179 179 spin_unlock(&journal->j_state_lock);
180 180 schedule();
181 181 finish_wait(&journal->j_wait_transaction_locked, &wait);
182 182  
183 183  
... ... @@ -205,12 +205,12 @@
205 205 * committing_transaction->t_outstanding_credits plus "enough" for
206 206 * the log control blocks.
207 207 * Also, this test is inconsitent with the matching one in
208   - * journal_extend().
  208 + * jbd2_journal_extend().
209 209 */
210   - if (__log_space_left(journal) < jbd_space_needed(journal)) {
  210 + if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
211 211 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
212 212 spin_unlock(&transaction->t_handle_lock);
213   - __log_wait_for_space(journal);
  213 + __jbd2_log_wait_for_space(journal);
214 214 goto repeat_locked;
215 215 }
216 216  
... ... @@ -223,7 +223,7 @@
223 223 transaction->t_handle_count++;
224 224 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
225 225 handle, nblocks, transaction->t_outstanding_credits,
226   - __log_space_left(journal));
  226 + __jbd2_log_space_left(journal));
227 227 spin_unlock(&transaction->t_handle_lock);
228 228 spin_unlock(&journal->j_state_lock);
229 229 out:
... ... @@ -246,7 +246,7 @@
246 246 }
247 247  
248 248 /**
249   - * handle_t *journal_start() - Obtain a new handle.
  249 + * handle_t *jbd2_journal_start() - Obtain a new handle.
250 250 * @journal: Journal to start transaction on.
251 251 * @nblocks: number of block buffer we might modify
252 252 *
... ... @@ -259,7 +259,7 @@
259 259 *
260 260 * Return a pointer to a newly allocated handle, or NULL on failure
261 261 */
262   -handle_t *journal_start(journal_t *journal, int nblocks)
  262 +handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
263 263 {
264 264 handle_t *handle = journal_current_handle();
265 265 int err;
... ... @@ -289,7 +289,7 @@
289 289 }
290 290  
291 291 /**
292   - * int journal_extend() - extend buffer credits.
  292 + * int jbd2_journal_extend() - extend buffer credits.
293 293 * @handle: handle to 'extend'
294 294 * @nblocks: nr blocks to try to extend by.
295 295 *
... ... @@ -298,7 +298,7 @@
298 298 * a credit for a number of buffer modications in advance, but can
299 299 * extend its credit if it needs more.
300 300 *
301   - * journal_extend tries to give the running handle more buffer credits.
  301 + * jbd2_journal_extend tries to give the running handle more buffer credits.
302 302 * It does not guarantee that allocation - this is a best-effort only.
303 303 * The calling process MUST be able to deal cleanly with a failure to
304 304 * extend here.
... ... @@ -308,7 +308,7 @@
308 308 * return code < 0 implies an error
309 309 * return code > 0 implies normal transaction-full status.
310 310 */
311   -int journal_extend(handle_t *handle, int nblocks)
  311 +int jbd2_journal_extend(handle_t *handle, int nblocks)
312 312 {
313 313 transaction_t *transaction = handle->h_transaction;
314 314 journal_t *journal = transaction->t_journal;
... ... @@ -339,7 +339,7 @@
339 339 goto unlock;
340 340 }
341 341  
342   - if (wanted > __log_space_left(journal)) {
  342 + if (wanted > __jbd2_log_space_left(journal)) {
343 343 jbd_debug(3, "denied handle %p %d blocks: "
344 344 "insufficient log space\n", handle, nblocks);
345 345 goto unlock;
346 346  
347 347  
... ... @@ -360,21 +360,21 @@
360 360  
361 361  
362 362 /**
363   - * int journal_restart() - restart a handle .
  363 + * int jbd2_journal_restart() - restart a handle .
364 364 * @handle: handle to restart
365 365 * @nblocks: nr credits requested
366 366 *
367 367 * Restart a handle for a multi-transaction filesystem
368 368 * operation.
369 369 *
370   - * If the journal_extend() call above fails to grant new buffer credits
371   - * to a running handle, a call to journal_restart will commit the
  370 + * If the jbd2_journal_extend() call above fails to grant new buffer credits
  371 + * to a running handle, a call to jbd2_journal_restart will commit the
372 372 * handle's transaction so far and reattach the handle to a new
373 373 * transaction capabable of guaranteeing the requested number of
374 374 * credits.
375 375 */
376 376  
377   -int journal_restart(handle_t *handle, int nblocks)
  377 +int jbd2_journal_restart(handle_t *handle, int nblocks)
378 378 {
379 379 transaction_t *transaction = handle->h_transaction;
380 380 journal_t *journal = transaction->t_journal;
... ... @@ -402,7 +402,7 @@
402 402 spin_unlock(&transaction->t_handle_lock);
403 403  
404 404 jbd_debug(2, "restarting handle %p\n", handle);
405   - __log_start_commit(journal, transaction->t_tid);
  405 + __jbd2_log_start_commit(journal, transaction->t_tid);
406 406 spin_unlock(&journal->j_state_lock);
407 407  
408 408 handle->h_buffer_credits = nblocks;
... ... @@ -412,7 +412,7 @@
412 412  
413 413  
414 414 /**
415   - * void journal_lock_updates () - establish a transaction barrier.
  415 + * void jbd2_journal_lock_updates () - establish a transaction barrier.
416 416 * @journal: Journal to establish a barrier on.
417 417 *
418 418 * This locks out any further updates from being started, and blocks
... ... @@ -421,7 +421,7 @@
421 421 *
422 422 * The journal lock should not be held on entry.
423 423 */
424   -void journal_lock_updates(journal_t *journal)
  424 +void jbd2_journal_lock_updates(journal_t *journal)
425 425 {
426 426 DEFINE_WAIT(wait);
427 427  
... ... @@ -452,7 +452,7 @@
452 452  
453 453 /*
454 454 * We have now established a barrier against other normal updates, but
455   - * we also need to barrier against other journal_lock_updates() calls
  455 + * we also need to barrier against other jbd2_journal_lock_updates() calls
456 456 * to make sure that we serialise special journal-locked operations
457 457 * too.
458 458 */
459 459  
460 460  
... ... @@ -460,14 +460,14 @@
460 460 }
461 461  
462 462 /**
463   - * void journal_unlock_updates (journal_t* journal) - release barrier
  463 + * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
464 464 * @journal: Journal to release the barrier on.
465 465 *
466   - * Release a transaction barrier obtained with journal_lock_updates().
  466 + * Release a transaction barrier obtained with jbd2_journal_lock_updates().
467 467 *
468 468 * Should be called without the journal lock held.
469 469 */
470   -void journal_unlock_updates (journal_t *journal)
  470 +void jbd2_journal_unlock_updates (journal_t *journal)
471 471 {
472 472 J_ASSERT(journal->j_barrier_count != 0);
473 473  
... ... @@ -667,7 +667,7 @@
667 667 JBUFFER_TRACE(jh, "allocate memory for buffer");
668 668 jbd_unlock_bh_state(bh);
669 669 frozen_buffer =
670   - jbd_slab_alloc(jh2bh(jh)->b_size,
  670 + jbd2_slab_alloc(jh2bh(jh)->b_size,
671 671 GFP_NOFS);
672 672 if (!frozen_buffer) {
673 673 printk(KERN_EMERG
... ... @@ -699,7 +699,7 @@
699 699 jh->b_transaction = transaction;
700 700 JBUFFER_TRACE(jh, "file as BJ_Reserved");
701 701 spin_lock(&journal->j_list_lock);
702   - __journal_file_buffer(jh, transaction, BJ_Reserved);
  702 + __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
703 703 spin_unlock(&journal->j_list_lock);
704 704 }
705 705  
706 706  
707 707  
... ... @@ -723,18 +723,18 @@
723 723 * If we are about to journal a buffer, then any revoke pending on it is
724 724 * no longer valid
725 725 */
726   - journal_cancel_revoke(handle, jh);
  726 + jbd2_journal_cancel_revoke(handle, jh);
727 727  
728 728 out:
729 729 if (unlikely(frozen_buffer)) /* It's usually NULL */
730   - jbd_slab_free(frozen_buffer, bh->b_size);
  730 + jbd2_slab_free(frozen_buffer, bh->b_size);
731 731  
732 732 JBUFFER_TRACE(jh, "exit");
733 733 return error;
734 734 }
735 735  
736 736 /**
737   - * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
  737 + * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
738 738 * @handle: transaction to add buffer modifications to
739 739 * @bh: bh to be used for metadata writes
740 740 * @credits: variable that will receive credits for the buffer
741 741  
742 742  
... ... @@ -745,16 +745,16 @@
745 745 * because we're write()ing a buffer which is also part of a shared mapping.
746 746 */
747 747  
748   -int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
  748 +int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
749 749 {
750   - struct journal_head *jh = journal_add_journal_head(bh);
  750 + struct journal_head *jh = jbd2_journal_add_journal_head(bh);
751 751 int rc;
752 752  
753 753 /* We do not want to get caught playing with fields which the
754 754 * log thread also manipulates. Make sure that the buffer
755 755 * completes any outstanding IO before proceeding. */
756 756 rc = do_get_write_access(handle, jh, 0);
757   - journal_put_journal_head(jh);
  757 + jbd2_journal_put_journal_head(jh);
758 758 return rc;
759 759 }
760 760  
761 761  
762 762  
... ... @@ -772,17 +772,17 @@
772 772 * unlocked buffer beforehand. */
773 773  
774 774 /**
775   - * int journal_get_create_access () - notify intent to use newly created bh
  775 + * int jbd2_journal_get_create_access () - notify intent to use newly created bh
776 776 * @handle: transaction to new buffer to
777 777 * @bh: new buffer.
778 778 *
779 779 * Call this if you create a new bh.
780 780 */
781   -int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
  781 +int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
782 782 {
783 783 transaction_t *transaction = handle->h_transaction;
784 784 journal_t *journal = transaction->t_journal;
785   - struct journal_head *jh = journal_add_journal_head(bh);
  785 + struct journal_head *jh = jbd2_journal_add_journal_head(bh);
786 786 int err;
787 787  
788 788 jbd_debug(5, "journal_head %p\n", jh);
... ... @@ -812,7 +812,7 @@
812 812 if (jh->b_transaction == NULL) {
813 813 jh->b_transaction = transaction;
814 814 JBUFFER_TRACE(jh, "file as BJ_Reserved");
815   - __journal_file_buffer(jh, transaction, BJ_Reserved);
  815 + __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
816 816 } else if (jh->b_transaction == journal->j_committing_transaction) {
817 817 JBUFFER_TRACE(jh, "set next transaction");
818 818 jh->b_next_transaction = transaction;
819 819  
... ... @@ -828,14 +828,14 @@
828 828 * which hits an assertion error.
829 829 */
830 830 JBUFFER_TRACE(jh, "cancelling revoke");
831   - journal_cancel_revoke(handle, jh);
832   - journal_put_journal_head(jh);
  831 + jbd2_journal_cancel_revoke(handle, jh);
  832 + jbd2_journal_put_journal_head(jh);
833 833 out:
834 834 return err;
835 835 }
836 836  
837 837 /**
838   - * int journal_get_undo_access() - Notify intent to modify metadata with
  838 + * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
839 839 * non-rewindable consequences
840 840 * @handle: transaction
841 841 * @bh: buffer to undo
... ... @@ -848,7 +848,7 @@
848 848 * since if we overwrote that space we would make the delete
849 849 * un-rewindable in case of a crash.
850 850 *
851   - * To deal with that, journal_get_undo_access requests write access to a
  851 + * To deal with that, jbd2_journal_get_undo_access requests write access to a
852 852 * buffer for parts of non-rewindable operations such as delete
853 853 * operations on the bitmaps. The journaling code must keep a copy of
854 854 * the buffer's contents prior to the undo_access call until such time
855 855  
... ... @@ -861,10 +861,10 @@
861 861 *
862 862 * Returns error number or 0 on success.
863 863 */
864   -int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
  864 +int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
865 865 {
866 866 int err;
867   - struct journal_head *jh = journal_add_journal_head(bh);
  867 + struct journal_head *jh = jbd2_journal_add_journal_head(bh);
868 868 char *committed_data = NULL;
869 869  
870 870 JBUFFER_TRACE(jh, "entry");
... ... @@ -880,7 +880,7 @@
880 880  
881 881 repeat:
882 882 if (!jh->b_committed_data) {
883   - committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
  883 + committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
884 884 if (!committed_data) {
885 885 printk(KERN_EMERG "%s: No memory for committed data\n",
886 886 __FUNCTION__);
887 887  
888 888  
... ... @@ -905,14 +905,14 @@
905 905 }
906 906 jbd_unlock_bh_state(bh);
907 907 out:
908   - journal_put_journal_head(jh);
  908 + jbd2_journal_put_journal_head(jh);
909 909 if (unlikely(committed_data))
910   - jbd_slab_free(committed_data, bh->b_size);
  910 + jbd2_slab_free(committed_data, bh->b_size);
911 911 return err;
912 912 }
913 913  
914 914 /**
915   - * int journal_dirty_data() - mark a buffer as containing dirty data which
  915 + * int jbd2_journal_dirty_data() - mark a buffer as containing dirty data which
916 916 * needs to be flushed before we can commit the
917 917 * current transaction.
918 918 * @handle: transaction
919 919  
... ... @@ -923,10 +923,10 @@
923 923 *
924 924 * Returns error number or 0 on success.
925 925 *
926   - * journal_dirty_data() can be called via page_launder->ext3_writepage
  926 + * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
927 927 * by kswapd.
928 928 */
929   -int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
  929 +int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
930 930 {
931 931 journal_t *journal = handle->h_transaction->t_journal;
932 932 int need_brelse = 0;
... ... @@ -935,7 +935,7 @@
935 935 if (is_handle_aborted(handle))
936 936 return 0;
937 937  
938   - jh = journal_add_journal_head(bh);
  938 + jh = jbd2_journal_add_journal_head(bh);
939 939 JBUFFER_TRACE(jh, "entry");
940 940  
941 941 /*
... ... @@ -984,7 +984,7 @@
984 984 * And while we're in that state, someone does a
985 985 * writepage() in an attempt to pageout the same area
986 986 * of the file via a shared mapping. At present that
987   - * calls journal_dirty_data(), and we get right here.
  987 + * calls jbd2_journal_dirty_data(), and we get right here.
988 988 * It may be too late to journal the data. Simply
989 989 * falling through to the next test will suffice: the
990 990 * data will be dirty and wil be checkpointed. The
... ... @@ -1035,7 +1035,7 @@
1035 1035 /* journal_clean_data_list() may have got there first */
1036 1036 if (jh->b_transaction != NULL) {
1037 1037 JBUFFER_TRACE(jh, "unfile from commit");
1038   - __journal_temp_unlink_buffer(jh);
  1038 + __jbd2_journal_temp_unlink_buffer(jh);
1039 1039 /* It still points to the committing
1040 1040 * transaction; move it to this one so
1041 1041 * that the refile assert checks are
1042 1042  
1043 1043  
... ... @@ -1054,15 +1054,15 @@
1054 1054 if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
1055 1055 JBUFFER_TRACE(jh, "not on correct data list: unfile");
1056 1056 J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
1057   - __journal_temp_unlink_buffer(jh);
  1057 + __jbd2_journal_temp_unlink_buffer(jh);
1058 1058 jh->b_transaction = handle->h_transaction;
1059 1059 JBUFFER_TRACE(jh, "file as data");
1060   - __journal_file_buffer(jh, handle->h_transaction,
  1060 + __jbd2_journal_file_buffer(jh, handle->h_transaction,
1061 1061 BJ_SyncData);
1062 1062 }
1063 1063 } else {
1064 1064 JBUFFER_TRACE(jh, "not on a transaction");
1065   - __journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
  1065 + __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
1066 1066 }
1067 1067 no_journal:
1068 1068 spin_unlock(&journal->j_list_lock);
1069 1069  
... ... @@ -1072,12 +1072,12 @@
1072 1072 __brelse(bh);
1073 1073 }
1074 1074 JBUFFER_TRACE(jh, "exit");
1075   - journal_put_journal_head(jh);
  1075 + jbd2_journal_put_journal_head(jh);
1076 1076 return 0;
1077 1077 }
1078 1078  
1079 1079 /**
1080   - * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
  1080 + * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1081 1081 * @handle: transaction to add buffer to.
1082 1082 * @bh: buffer to mark
1083 1083 *
... ... @@ -1095,7 +1095,7 @@
1095 1095 * buffer: that only gets done when the old transaction finally
1096 1096 * completes its commit.
1097 1097 */
1098   -int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
  1098 +int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1099 1099 {
1100 1100 transaction_t *transaction = handle->h_transaction;
1101 1101 journal_t *journal = transaction->t_journal;
... ... @@ -1156,7 +1156,7 @@
1156 1156  
1157 1157 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1158 1158 spin_lock(&journal->j_list_lock);
1159   - __journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
  1159 + __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1160 1160 spin_unlock(&journal->j_list_lock);
1161 1161 out_unlock_bh:
1162 1162 jbd_unlock_bh_state(bh);
1163 1163  
1164 1164  
... ... @@ -1166,18 +1166,18 @@
1166 1166 }
1167 1167  
1168 1168 /*
1169   - * journal_release_buffer: undo a get_write_access without any buffer
  1169 + * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1170 1170 * updates, if the update decided in the end that it didn't need access.
1171 1171 *
1172 1172 */
1173 1173 void
1174   -journal_release_buffer(handle_t *handle, struct buffer_head *bh)
  1174 +jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1175 1175 {
1176 1176 BUFFER_TRACE(bh, "entry");
1177 1177 }
1178 1178  
1179 1179 /**
1180   - * void journal_forget() - bforget() for potentially-journaled buffers.
  1180 + * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1181 1181 * @handle: transaction handle
1182 1182 * @bh: bh to 'forget'
1183 1183 *
... ... @@ -1193,7 +1193,7 @@
1193 1193 * Allow this call even if the handle has aborted --- it may be part of
1194 1194 * the caller's cleanup after an abort.
1195 1195 */
1196   -int journal_forget (handle_t *handle, struct buffer_head *bh)
  1196 +int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1197 1197 {
1198 1198 transaction_t *transaction = handle->h_transaction;
1199 1199 journal_t *journal = transaction->t_journal;
1200 1200  
... ... @@ -1250,11 +1250,11 @@
1250 1250 */
1251 1251  
1252 1252 if (jh->b_cp_transaction) {
1253   - __journal_temp_unlink_buffer(jh);
1254   - __journal_file_buffer(jh, transaction, BJ_Forget);
  1253 + __jbd2_journal_temp_unlink_buffer(jh);
  1254 + __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1255 1255 } else {
1256   - __journal_unfile_buffer(jh);
1257   - journal_remove_journal_head(bh);
  1256 + __jbd2_journal_unfile_buffer(jh);
  1257 + jbd2_journal_remove_journal_head(bh);
1258 1258 __brelse(bh);
1259 1259 if (!buffer_jbd(bh)) {
1260 1260 spin_unlock(&journal->j_list_lock);
... ... @@ -1292,7 +1292,7 @@
1292 1292 }
1293 1293  
1294 1294 /**
1295   - * int journal_stop() - complete a transaction
  1295 + * int jbd2_journal_stop() - complete a transaction
1296 1296 * @handle: tranaction to complete.
1297 1297 *
1298 1298 * All done for a particular handle.
1299 1299  
1300 1300  
... ... @@ -1302,12 +1302,12 @@
1302 1302 * complication is that we need to start a commit operation if the
1303 1303 * filesystem is marked for synchronous update.
1304 1304 *
1305   - * journal_stop itself will not usually return an error, but it may
  1305 + * jbd2_journal_stop itself will not usually return an error, but it may
1306 1306 * do so in unusual circumstances. In particular, expect it to
1307   - * return -EIO if a journal_abort has been executed since the
  1307 + * return -EIO if a jbd2_journal_abort has been executed since the
1308 1308 * transaction began.
1309 1309 */
1310   -int journal_stop(handle_t *handle)
  1310 +int jbd2_journal_stop(handle_t *handle)
1311 1311 {
1312 1312 transaction_t *transaction = handle->h_transaction;
1313 1313 journal_t *journal = transaction->t_journal;
1314 1314  
1315 1315  
... ... @@ -1383,15 +1383,15 @@
1383 1383 jbd_debug(2, "transaction too old, requesting commit for "
1384 1384 "handle %p\n", handle);
1385 1385 /* This is non-blocking */
1386   - __log_start_commit(journal, transaction->t_tid);
  1386 + __jbd2_log_start_commit(journal, transaction->t_tid);
1387 1387 spin_unlock(&journal->j_state_lock);
1388 1388  
1389 1389 /*
1390   - * Special case: JFS_SYNC synchronous updates require us
  1390 + * Special case: JBD2_SYNC synchronous updates require us
1391 1391 * to wait for the commit to complete.
1392 1392 */
1393 1393 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1394   - err = log_wait_commit(journal, tid);
  1394 + err = jbd2_log_wait_commit(journal, tid);
1395 1395 } else {
1396 1396 spin_unlock(&transaction->t_handle_lock);
1397 1397 spin_unlock(&journal->j_state_lock);
1398 1398  
1399 1399  
1400 1400  
... ... @@ -1401,24 +1401,24 @@
1401 1401 return err;
1402 1402 }
1403 1403  
1404   -/**int journal_force_commit() - force any uncommitted transactions
  1404 +/**int jbd2_journal_force_commit() - force any uncommitted transactions
1405 1405 * @journal: journal to force
1406 1406 *
1407 1407 * For synchronous operations: force any uncommitted transactions
1408 1408 * to disk. May seem kludgy, but it reuses all the handle batching
1409 1409 * code in a very simple manner.
1410 1410 */
1411   -int journal_force_commit(journal_t *journal)
  1411 +int jbd2_journal_force_commit(journal_t *journal)
1412 1412 {
1413 1413 handle_t *handle;
1414 1414 int ret;
1415 1415  
1416   - handle = journal_start(journal, 1);
  1416 + handle = jbd2_journal_start(journal, 1);
1417 1417 if (IS_ERR(handle)) {
1418 1418 ret = PTR_ERR(handle);
1419 1419 } else {
1420 1420 handle->h_sync = 1;
1421   - ret = journal_stop(handle);
  1421 + ret = jbd2_journal_stop(handle);
1422 1422 }
1423 1423 return ret;
1424 1424 }
... ... @@ -1486,7 +1486,7 @@
1486 1486 *
1487 1487 * Called under j_list_lock. The journal may not be locked.
1488 1488 */
1489   -void __journal_temp_unlink_buffer(struct journal_head *jh)
  1489 +void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1490 1490 {
1491 1491 struct journal_head **list = NULL;
1492 1492 transaction_t *transaction;
1493 1493  
1494 1494  
1495 1495  
1496 1496  
... ... @@ -1538,23 +1538,23 @@
1538 1538 mark_buffer_dirty(bh); /* Expose it to the VM */
1539 1539 }
1540 1540  
1541   -void __journal_unfile_buffer(struct journal_head *jh)
  1541 +void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1542 1542 {
1543   - __journal_temp_unlink_buffer(jh);
  1543 + __jbd2_journal_temp_unlink_buffer(jh);
1544 1544 jh->b_transaction = NULL;
1545 1545 }
1546 1546  
1547   -void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
  1547 +void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1548 1548 {
1549 1549 jbd_lock_bh_state(jh2bh(jh));
1550 1550 spin_lock(&journal->j_list_lock);
1551   - __journal_unfile_buffer(jh);
  1551 + __jbd2_journal_unfile_buffer(jh);
1552 1552 spin_unlock(&journal->j_list_lock);
1553 1553 jbd_unlock_bh_state(jh2bh(jh));
1554 1554 }
1555 1555  
1556 1556 /*
1557   - * Called from journal_try_to_free_buffers().
  1557 + * Called from jbd2_journal_try_to_free_buffers().
1558 1558 *
1559 1559 * Called under jbd_lock_bh_state(bh)
1560 1560 */
1561 1561  
... ... @@ -1576,16 +1576,16 @@
1576 1576 if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
1577 1577 /* A written-back ordered data buffer */
1578 1578 JBUFFER_TRACE(jh, "release data");
1579   - __journal_unfile_buffer(jh);
1580   - journal_remove_journal_head(bh);
  1579 + __jbd2_journal_unfile_buffer(jh);
  1580 + jbd2_journal_remove_journal_head(bh);
1581 1581 __brelse(bh);
1582 1582 }
1583 1583 } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
1584 1584 /* written-back checkpointed metadata buffer */
1585 1585 if (jh->b_jlist == BJ_None) {
1586 1586 JBUFFER_TRACE(jh, "remove from checkpoint list");
1587   - __journal_remove_checkpoint(jh);
1588   - journal_remove_journal_head(bh);
  1587 + __jbd2_journal_remove_checkpoint(jh);
  1588 + jbd2_journal_remove_journal_head(bh);
1589 1589 __brelse(bh);
1590 1590 }
1591 1591 }
... ... @@ -1596,7 +1596,7 @@
1596 1596  
1597 1597  
1598 1598 /**
1599   - * int journal_try_to_free_buffers() - try to free page buffers.
  1599 + * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1600 1600 * @journal: journal for operation
1601 1601 * @page: to try and free
1602 1602 * @unused_gfp_mask: unused
1603 1603  
... ... @@ -1613,13 +1613,13 @@
1613 1613 *
1614 1614 * This complicates JBD locking somewhat. We aren't protected by the
1615 1615 * BKL here. We wish to remove the buffer from its committing or
1616   - * running transaction's ->t_datalist via __journal_unfile_buffer.
  1616 + * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1617 1617 *
1618 1618 * This may *change* the value of transaction_t->t_datalist, so anyone
1619 1619 * who looks at t_datalist needs to lock against this function.
1620 1620 *
1621   - * Even worse, someone may be doing a journal_dirty_data on this
1622   - * buffer. So we need to lock against that. journal_dirty_data()
  1621 + * Even worse, someone may be doing a jbd2_journal_dirty_data on this
  1622 + * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1623 1623 * will come out of the lock with the buffer dirty, which makes it
1624 1624 * ineligible for release here.
1625 1625 *
... ... @@ -1629,7 +1629,7 @@
1629 1629 * cannot happen because we never reallocate freed data as metadata
1630 1630 * while the data is part of a transaction. Yes?
1631 1631 */
1632   -int journal_try_to_free_buffers(journal_t *journal,
  1632 +int jbd2_journal_try_to_free_buffers(journal_t *journal,
1633 1633 struct page *page, gfp_t unused_gfp_mask)
1634 1634 {
1635 1635 struct buffer_head *head;
1636 1636  
1637 1637  
... ... @@ -1646,15 +1646,15 @@
1646 1646 /*
1647 1647 * We take our own ref against the journal_head here to avoid
1648 1648 * having to add tons of locking around each instance of
1649   - * journal_remove_journal_head() and journal_put_journal_head().
  1649 + * jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head().
1650 1650 */
1651   - jh = journal_grab_journal_head(bh);
  1651 + jh = jbd2_journal_grab_journal_head(bh);
1652 1652 if (!jh)
1653 1653 continue;
1654 1654  
1655 1655 jbd_lock_bh_state(bh);
1656 1656 __journal_try_to_free_buffer(journal, bh);
1657   - journal_put_journal_head(jh);
  1657 + jbd2_journal_put_journal_head(jh);
1658 1658 jbd_unlock_bh_state(bh);
1659 1659 if (buffer_jbd(bh))
1660 1660 goto busy;
1661 1661  
1662 1662  
1663 1663  
... ... @@ -1681,23 +1681,23 @@
1681 1681 int may_free = 1;
1682 1682 struct buffer_head *bh = jh2bh(jh);
1683 1683  
1684   - __journal_unfile_buffer(jh);
  1684 + __jbd2_journal_unfile_buffer(jh);
1685 1685  
1686 1686 if (jh->b_cp_transaction) {
1687 1687 JBUFFER_TRACE(jh, "on running+cp transaction");
1688   - __journal_file_buffer(jh, transaction, BJ_Forget);
  1688 + __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1689 1689 clear_buffer_jbddirty(bh);
1690 1690 may_free = 0;
1691 1691 } else {
1692 1692 JBUFFER_TRACE(jh, "on running transaction");
1693   - journal_remove_journal_head(bh);
  1693 + jbd2_journal_remove_journal_head(bh);
1694 1694 __brelse(bh);
1695 1695 }
1696 1696 return may_free;
1697 1697 }
1698 1698  
1699 1699 /*
1700   - * journal_invalidatepage
  1700 + * jbd2_journal_invalidatepage
1701 1701 *
1702 1702 * This code is tricky. It has a number of cases to deal with.
1703 1703 *
... ... @@ -1765,7 +1765,7 @@
1765 1765 jbd_lock_bh_state(bh);
1766 1766 spin_lock(&journal->j_list_lock);
1767 1767  
1768   - jh = journal_grab_journal_head(bh);
  1768 + jh = jbd2_journal_grab_journal_head(bh);
1769 1769 if (!jh)
1770 1770 goto zap_buffer_no_jh;
1771 1771  
... ... @@ -1796,7 +1796,7 @@
1796 1796 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1797 1797 ret = __dispose_buffer(jh,
1798 1798 journal->j_running_transaction);
1799   - journal_put_journal_head(jh);
  1799 + jbd2_journal_put_journal_head(jh);
1800 1800 spin_unlock(&journal->j_list_lock);
1801 1801 jbd_unlock_bh_state(bh);
1802 1802 spin_unlock(&journal->j_state_lock);
... ... @@ -1810,7 +1810,7 @@
1810 1810 JBUFFER_TRACE(jh, "give to committing trans");
1811 1811 ret = __dispose_buffer(jh,
1812 1812 journal->j_committing_transaction);
1813   - journal_put_journal_head(jh);
  1813 + jbd2_journal_put_journal_head(jh);
1814 1814 spin_unlock(&journal->j_list_lock);
1815 1815 jbd_unlock_bh_state(bh);
1816 1816 spin_unlock(&journal->j_state_lock);
... ... @@ -1844,7 +1844,7 @@
1844 1844 journal->j_running_transaction);
1845 1845 jh->b_next_transaction = NULL;
1846 1846 }
1847   - journal_put_journal_head(jh);
  1847 + jbd2_journal_put_journal_head(jh);
1848 1848 spin_unlock(&journal->j_list_lock);
1849 1849 jbd_unlock_bh_state(bh);
1850 1850 spin_unlock(&journal->j_state_lock);
... ... @@ -1861,7 +1861,7 @@
1861 1861 }
1862 1862  
1863 1863 zap_buffer:
1864   - journal_put_journal_head(jh);
  1864 + jbd2_journal_put_journal_head(jh);
1865 1865 zap_buffer_no_jh:
1866 1866 spin_unlock(&journal->j_list_lock);
1867 1867 jbd_unlock_bh_state(bh);
... ... @@ -1877,7 +1877,7 @@
1877 1877 }
1878 1878  
1879 1879 /**
1880   - * void journal_invalidatepage()
  1880 + * void jbd2_journal_invalidatepage()
1881 1881 * @journal: journal to use for flush...
1882 1882 * @page: page to flush
1883 1883 * @offset: length of page to invalidate.
... ... @@ -1885,7 +1885,7 @@
1885 1885 * Reap page buffers containing data after offset in page.
1886 1886 *
1887 1887 */
1888   -void journal_invalidatepage(journal_t *journal,
  1888 +void jbd2_journal_invalidatepage(journal_t *journal,
1889 1889 struct page *page,
1890 1890 unsigned long offset)
1891 1891 {
... ... @@ -1927,7 +1927,7 @@
1927 1927 /*
1928 1928 * File a buffer on the given transaction list.
1929 1929 */
1930   -void __journal_file_buffer(struct journal_head *jh,
  1930 +void __jbd2_journal_file_buffer(struct journal_head *jh,
1931 1931 transaction_t *transaction, int jlist)
1932 1932 {
1933 1933 struct journal_head **list = NULL;
... ... @@ -1956,7 +1956,7 @@
1956 1956 }
1957 1957  
1958 1958 if (jh->b_transaction)
1959   - __journal_temp_unlink_buffer(jh);
  1959 + __jbd2_journal_temp_unlink_buffer(jh);
1960 1960 jh->b_transaction = transaction;
1961 1961  
1962 1962 switch (jlist) {
1963 1963  
... ... @@ -1998,12 +1998,12 @@
1998 1998 set_buffer_jbddirty(bh);
1999 1999 }
2000 2000  
2001   -void journal_file_buffer(struct journal_head *jh,
  2001 +void jbd2_journal_file_buffer(struct journal_head *jh,
2002 2002 transaction_t *transaction, int jlist)
2003 2003 {
2004 2004 jbd_lock_bh_state(jh2bh(jh));
2005 2005 spin_lock(&transaction->t_journal->j_list_lock);
2006   - __journal_file_buffer(jh, transaction, jlist);
  2006 + __jbd2_journal_file_buffer(jh, transaction, jlist);
2007 2007 spin_unlock(&transaction->t_journal->j_list_lock);
2008 2008 jbd_unlock_bh_state(jh2bh(jh));
2009 2009 }
... ... @@ -2018,7 +2018,7 @@
2018 2018 *
2019 2019 * Called under jbd_lock_bh_state(jh2bh(jh))
2020 2020 */
2021   -void __journal_refile_buffer(struct journal_head *jh)
  2021 +void __jbd2_journal_refile_buffer(struct journal_head *jh)
2022 2022 {
2023 2023 int was_dirty;
2024 2024 struct buffer_head *bh = jh2bh(jh);
... ... @@ -2029,7 +2029,7 @@
2029 2029  
2030 2030 /* If the buffer is now unused, just drop it. */
2031 2031 if (jh->b_next_transaction == NULL) {
2032   - __journal_unfile_buffer(jh);
  2032 + __jbd2_journal_unfile_buffer(jh);
2033 2033 return;
2034 2034 }
2035 2035  
2036 2036  
... ... @@ -2039,10 +2039,10 @@
2039 2039 */
2040 2040  
2041 2041 was_dirty = test_clear_buffer_jbddirty(bh);
2042   - __journal_temp_unlink_buffer(jh);
  2042 + __jbd2_journal_temp_unlink_buffer(jh);
2043 2043 jh->b_transaction = jh->b_next_transaction;
2044 2044 jh->b_next_transaction = NULL;
2045   - __journal_file_buffer(jh, jh->b_transaction,
  2045 + __jbd2_journal_file_buffer(jh, jh->b_transaction,
2046 2046 was_dirty ? BJ_Metadata : BJ_Reserved);
2047 2047 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2048 2048  
2049 2049  
2050 2050  
2051 2051  
2052 2052  
... ... @@ -2054,26 +2054,26 @@
2054 2054 * For the unlocked version of this call, also make sure that any
2055 2055 * hanging journal_head is cleaned up if necessary.
2056 2056 *
2057   - * __journal_refile_buffer is usually called as part of a single locked
  2057 + * __jbd2_journal_refile_buffer is usually called as part of a single locked
2058 2058 * operation on a buffer_head, in which the caller is probably going to
2059 2059 * be hooking the journal_head onto other lists. In that case it is up
2060 2060 * to the caller to remove the journal_head if necessary. For the
2061   - * unlocked journal_refile_buffer call, the caller isn't going to be
  2061 + * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2062 2062 * doing anything else to the buffer so we need to do the cleanup
2063 2063 * ourselves to avoid a jh leak.
2064 2064 *
2065 2065 * *** The journal_head may be freed by this call! ***
2066 2066 */
2067   -void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
  2067 +void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2068 2068 {
2069 2069 struct buffer_head *bh = jh2bh(jh);
2070 2070  
2071 2071 jbd_lock_bh_state(bh);
2072 2072 spin_lock(&journal->j_list_lock);
2073 2073  
2074   - __journal_refile_buffer(jh);
  2074 + __jbd2_journal_refile_buffer(jh);
2075 2075 jbd_unlock_bh_state(bh);
2076   - journal_remove_journal_head(bh);
  2076 + jbd2_journal_remove_journal_head(bh);
2077 2077  
2078 2078 spin_unlock(&journal->j_list_lock);
2079 2079 __brelse(bh);
include/linux/ext4_jbd2.h
1 1 /*
2   - * linux/include/linux/ext4_jbd.h
  2 + * linux/include/linux/ext4_jbd2.h
3 3 *
4 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 5 *
... ... @@ -16,7 +16,7 @@
16 16 #define _LINUX_EXT4_JBD_H
17 17  
18 18 #include <linux/fs.h>
19   -#include <linux/jbd.h>
  19 +#include <linux/jbd2.h>
20 20 #include <linux/ext4_fs.h>
21 21  
22 22 #define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal)
... ... @@ -116,7 +116,7 @@
116 116 __ext4_journal_get_undo_access(const char *where, handle_t *handle,
117 117 struct buffer_head *bh)
118 118 {
119   - int err = journal_get_undo_access(handle, bh);
  119 + int err = jbd2_journal_get_undo_access(handle, bh);
120 120 if (err)
121 121 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
122 122 return err;
... ... @@ -126,7 +126,7 @@
126 126 __ext4_journal_get_write_access(const char *where, handle_t *handle,
127 127 struct buffer_head *bh)
128 128 {
129   - int err = journal_get_write_access(handle, bh);
  129 + int err = jbd2_journal_get_write_access(handle, bh);
130 130 if (err)
131 131 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
132 132 return err;
133 133  
... ... @@ -135,13 +135,13 @@
135 135 static inline void
136 136 ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
137 137 {
138   - journal_release_buffer(handle, bh);
  138 + jbd2_journal_release_buffer(handle, bh);
139 139 }
140 140  
141 141 static inline int
142 142 __ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
143 143 {
144   - int err = journal_forget(handle, bh);
  144 + int err = jbd2_journal_forget(handle, bh);
145 145 if (err)
146 146 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
147 147 return err;
... ... @@ -151,7 +151,7 @@
151 151 __ext4_journal_revoke(const char *where, handle_t *handle,
152 152 unsigned long blocknr, struct buffer_head *bh)
153 153 {
154   - int err = journal_revoke(handle, blocknr, bh);
  154 + int err = jbd2_journal_revoke(handle, blocknr, bh);
155 155 if (err)
156 156 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
157 157 return err;
... ... @@ -161,7 +161,7 @@
161 161 __ext4_journal_get_create_access(const char *where,
162 162 handle_t *handle, struct buffer_head *bh)
163 163 {
164   - int err = journal_get_create_access(handle, bh);
  164 + int err = jbd2_journal_get_create_access(handle, bh);
165 165 if (err)
166 166 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
167 167 return err;
... ... @@ -171,7 +171,7 @@
171 171 __ext4_journal_dirty_metadata(const char *where,
172 172 handle_t *handle, struct buffer_head *bh)
173 173 {
174   - int err = journal_dirty_metadata(handle, bh);
  174 + int err = jbd2_journal_dirty_metadata(handle, bh);
175 175 if (err)
176 176 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
177 177 return err;
178 178  
179 179  
180 180  
... ... @@ -211,22 +211,22 @@
211 211  
212 212 static inline int ext4_journal_extend(handle_t *handle, int nblocks)
213 213 {
214   - return journal_extend(handle, nblocks);
  214 + return jbd2_journal_extend(handle, nblocks);
215 215 }
216 216  
217 217 static inline int ext4_journal_restart(handle_t *handle, int nblocks)
218 218 {
219   - return journal_restart(handle, nblocks);
  219 + return jbd2_journal_restart(handle, nblocks);
220 220 }
221 221  
222 222 static inline int ext4_journal_blocks_per_page(struct inode *inode)
223 223 {
224   - return journal_blocks_per_page(inode);
  224 + return jbd2_journal_blocks_per_page(inode);
225 225 }
226 226  
227 227 static inline int ext4_journal_force_commit(journal_t *journal)
228 228 {
229   - return journal_force_commit(journal);
  229 + return jbd2_journal_force_commit(journal);
230 230 }
231 231  
232 232 /* super.c */
include/linux/jbd2.h
1 1 /*
2   - * linux/include/linux/jbd.h
  2 + * linux/include/linux/jbd2.h
3 3 *
4 4 * Written by Stephen C. Tweedie <sct@redhat.com>
5 5 *
... ... @@ -19,7 +19,7 @@
19 19 /* Allow this file to be included directly into e2fsprogs */
20 20 #ifndef __KERNEL__
21 21 #include "jfs_compat.h"
22   -#define JFS_DEBUG
  22 +#define JBD2_DEBUG
23 23 #define jfs_debug jbd_debug
24 24 #else
25 25  
26 26  
... ... @@ -57,11 +57,11 @@
57 57 * CONFIG_JBD_DEBUG is on.
58 58 */
59 59 #define JBD_EXPENSIVE_CHECKING
60   -extern int journal_enable_debug;
  60 +extern int jbd2_journal_enable_debug;
61 61  
62 62 #define jbd_debug(n, f, a...) \
63 63 do { \
64   - if ((n) <= journal_enable_debug) { \
  64 + if ((n) <= jbd2_journal_enable_debug) { \
65 65 printk (KERN_DEBUG "(%s, %d): %s: ", \
66 66 __FILE__, __LINE__, __FUNCTION__); \
67 67 printk (f, ## a); \
68 68  
69 69  
70 70  
... ... @@ -71,16 +71,16 @@
71 71 #define jbd_debug(f, a...) /**/
72 72 #endif
73 73  
74   -extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
75   -extern void * jbd_slab_alloc(size_t size, gfp_t flags);
76   -extern void jbd_slab_free(void *ptr, size_t size);
  74 +extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
  75 +extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
  76 +extern void jbd2_slab_free(void *ptr, size_t size);
77 77  
78 78 #define jbd_kmalloc(size, flags) \
79   - __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
  79 + __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
80 80 #define jbd_rep_kmalloc(size, flags) \
81   - __jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
  81 + __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
82 82  
83   -#define JFS_MIN_JOURNAL_BLOCKS 1024
  83 +#define JBD2_MIN_JOURNAL_BLOCKS 1024
84 84  
85 85 #ifdef __KERNEL__
86 86  
... ... @@ -122,7 +122,7 @@
122 122 * Internal structures used by the logging mechanism:
123 123 */
124 124  
125   -#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
  125 +#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
126 126  
127 127 /*
128 128 * On-disk structures
... ... @@ -132,11 +132,11 @@
132 132 * Descriptor block types:
133 133 */
134 134  
135   -#define JFS_DESCRIPTOR_BLOCK 1
136   -#define JFS_COMMIT_BLOCK 2
137   -#define JFS_SUPERBLOCK_V1 3
138   -#define JFS_SUPERBLOCK_V2 4
139   -#define JFS_REVOKE_BLOCK 5
  135 +#define JBD2_DESCRIPTOR_BLOCK 1
  136 +#define JBD2_COMMIT_BLOCK 2
  137 +#define JBD2_SUPERBLOCK_V1 3
  138 +#define JBD2_SUPERBLOCK_V2 4
  139 +#define JBD2_REVOKE_BLOCK 5
140 140  
141 141 /*
142 142 * Standard header for all descriptor blocks:
143 143  
144 144  
... ... @@ -162,18 +162,18 @@
162 162 * The revoke descriptor: used on disk to describe a series of blocks to
163 163 * be revoked from the log
164 164 */
165   -typedef struct journal_revoke_header_s
  165 +typedef struct jbd2_journal_revoke_header_s
166 166 {
167 167 journal_header_t r_header;
168 168 __be32 r_count; /* Count of bytes used in the block */
169   -} journal_revoke_header_t;
  169 +} jbd2_journal_revoke_header_t;
170 170  
171 171  
172 172 /* Definitions for the journal tag flags word: */
173   -#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
174   -#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
175   -#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
176   -#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
  173 +#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
  174 +#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */
  175 +#define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */
  176 +#define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
177 177  
178 178  
179 179 /*
... ... @@ -196,7 +196,7 @@
196 196 __be32 s_start; /* blocknr of start of log */
197 197  
198 198 /* 0x0020 */
199   - /* Error value, as set by journal_abort(). */
  199 + /* Error value, as set by jbd2_journal_abort(). */
200 200 __be32 s_errno;
201 201  
202 202 /* 0x0024 */
203 203  
204 204  
205 205  
206 206  
... ... @@ -224,22 +224,22 @@
224 224 /* 0x0400 */
225 225 } journal_superblock_t;
226 226  
227   -#define JFS_HAS_COMPAT_FEATURE(j,mask) \
  227 +#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
228 228 ((j)->j_format_version >= 2 && \
229 229 ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
230   -#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
  230 +#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
231 231 ((j)->j_format_version >= 2 && \
232 232 ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
233   -#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
  233 +#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
234 234 ((j)->j_format_version >= 2 && \
235 235 ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
236 236  
237   -#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
  237 +#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
238 238  
239 239 /* Features known to this kernel version: */
240   -#define JFS_KNOWN_COMPAT_FEATURES 0
241   -#define JFS_KNOWN_ROCOMPAT_FEATURES 0
242   -#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
  240 +#define JBD2_KNOWN_COMPAT_FEATURES 0
  241 +#define JBD2_KNOWN_ROCOMPAT_FEATURES 0
  242 +#define JBD2_KNOWN_INCOMPAT_FEATURES JBD2_FEATURE_INCOMPAT_REVOKE
243 243  
244 244 #ifdef __KERNEL__
245 245  
... ... @@ -359,7 +359,7 @@
359 359 bit_spin_unlock(BH_JournalHead, &bh->b_state);
360 360 }
361 361  
362   -struct jbd_revoke_table_s;
  362 +struct jbd2_revoke_table_s;
363 363  
364 364 /**
365 365 * struct handle_s - The handle_s type is the concrete type associated with
... ... @@ -445,7 +445,7 @@
445 445  
446 446 /*
447 447 * Transaction's current state
448   - * [no locking - only kjournald alters this]
  448 + * [no locking - only kjournald2 alters this]
449 449 * FIXME: needs barriers
450 450 * KLUDGE: [use j_state_lock]
451 451 */
... ... @@ -621,7 +621,7 @@
621 621 * @j_revoke: The revoke table - maintains the list of revoked blocks in the
622 622 * current transaction.
623 623 * @j_revoke_table: alternate revoke tables for j_revoke
624   - * @j_wbuf: array of buffer_heads for journal_commit_transaction
  624 + * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
625 625 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
626 626 * number that will fit in j_blocksize
627 627 * @j_last_sync_writer: most recent pid which did a synchronous write
628 628  
... ... @@ -805,11 +805,11 @@
805 805 * current transaction. [j_revoke_lock]
806 806 */
807 807 spinlock_t j_revoke_lock;
808   - struct jbd_revoke_table_s *j_revoke;
809   - struct jbd_revoke_table_s *j_revoke_table[2];
  808 + struct jbd2_revoke_table_s *j_revoke;
  809 + struct jbd2_revoke_table_s *j_revoke_table[2];
810 810  
811 811 /*
812   - * array of bhs for journal_commit_transaction
  812 + * array of bhs for jbd2_journal_commit_transaction
813 813 */
814 814 struct buffer_head **j_wbuf;
815 815 int j_wbufsize;
... ... @@ -826,12 +826,12 @@
826 826 /*
827 827 * Journal flag definitions
828 828 */
829   -#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
830   -#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
831   -#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
832   -#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
833   -#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
834   -#define JFS_BARRIER 0x020 /* Use IDE barriers */
  829 +#define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */
  830 +#define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */
  831 +#define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */
  832 +#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
  833 +#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
  834 +#define JBD2_BARRIER 0x020 /* Use IDE barriers */
835 835  
836 836 /*
837 837 * Function declarations for the journaling transaction and buffer
838 838  
839 839  
840 840  
841 841  
842 842  
... ... @@ -839,31 +839,31 @@
839 839 */
840 840  
841 841 /* Filing buffers */
842   -extern void __journal_temp_unlink_buffer(struct journal_head *jh);
843   -extern void journal_unfile_buffer(journal_t *, struct journal_head *);
844   -extern void __journal_unfile_buffer(struct journal_head *);
845   -extern void __journal_refile_buffer(struct journal_head *);
846   -extern void journal_refile_buffer(journal_t *, struct journal_head *);
847   -extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
  842 +extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
  843 +extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
  844 +extern void __jbd2_journal_unfile_buffer(struct journal_head *);
  845 +extern void __jbd2_journal_refile_buffer(struct journal_head *);
  846 +extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
  847 +extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
848 848 extern void __journal_free_buffer(struct journal_head *bh);
849   -extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
  849 +extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
850 850 extern void __journal_clean_data_list(transaction_t *transaction);
851 851  
852 852 /* Log buffer allocation */
853   -extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
854   -int journal_next_log_block(journal_t *, unsigned long *);
  853 +extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
  854 +int jbd2_journal_next_log_block(journal_t *, unsigned long *);
855 855  
856 856 /* Commit management */
857   -extern void journal_commit_transaction(journal_t *);
  857 +extern void jbd2_journal_commit_transaction(journal_t *);
858 858  
859 859 /* Checkpoint list management */
860   -int __journal_clean_checkpoint_list(journal_t *journal);
861   -int __journal_remove_checkpoint(struct journal_head *);
862   -void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
  860 +int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
  861 +int __jbd2_journal_remove_checkpoint(struct journal_head *);
  862 +void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
863 863  
864 864 /* Buffer IO */
865 865 extern int
866   -journal_write_metadata_buffer(transaction_t *transaction,
  866 +jbd2_journal_write_metadata_buffer(transaction_t *transaction,
867 867 struct journal_head *jh_in,
868 868 struct journal_head **jh_out,
869 869 unsigned long blocknr);
870 870  
871 871  
872 872  
873 873  
874 874  
875 875  
876 876  
877 877  
878 878  
879 879  
880 880  
881 881  
882 882  
883 883  
884 884  
... ... @@ -893,91 +893,91 @@
893 893 * Register buffer modifications against the current transaction.
894 894 */
895 895  
896   -extern handle_t *journal_start(journal_t *, int nblocks);
897   -extern int journal_restart (handle_t *, int nblocks);
898   -extern int journal_extend (handle_t *, int nblocks);
899   -extern int journal_get_write_access(handle_t *, struct buffer_head *);
900   -extern int journal_get_create_access (handle_t *, struct buffer_head *);
901   -extern int journal_get_undo_access(handle_t *, struct buffer_head *);
902   -extern int journal_dirty_data (handle_t *, struct buffer_head *);
903   -extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
904   -extern void journal_release_buffer (handle_t *, struct buffer_head *);
905   -extern int journal_forget (handle_t *, struct buffer_head *);
  896 +extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
  897 +extern int jbd2_journal_restart (handle_t *, int nblocks);
  898 +extern int jbd2_journal_extend (handle_t *, int nblocks);
  899 +extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
  900 +extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
  901 +extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
  902 +extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
  903 +extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
  904 +extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
  905 +extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
906 906 extern void journal_sync_buffer (struct buffer_head *);
907   -extern void journal_invalidatepage(journal_t *,
  907 +extern void jbd2_journal_invalidatepage(journal_t *,
908 908 struct page *, unsigned long);
909   -extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
910   -extern int journal_stop(handle_t *);
911   -extern int journal_flush (journal_t *);
912   -extern void journal_lock_updates (journal_t *);
913   -extern void journal_unlock_updates (journal_t *);
  909 +extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
  910 +extern int jbd2_journal_stop(handle_t *);
  911 +extern int jbd2_journal_flush (journal_t *);
  912 +extern void jbd2_journal_lock_updates (journal_t *);
  913 +extern void jbd2_journal_unlock_updates (journal_t *);
914 914  
915   -extern journal_t * journal_init_dev(struct block_device *bdev,
  915 +extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
916 916 struct block_device *fs_dev,
917 917 int start, int len, int bsize);
918   -extern journal_t * journal_init_inode (struct inode *);
919   -extern int journal_update_format (journal_t *);
920   -extern int journal_check_used_features
  918 +extern journal_t * jbd2_journal_init_inode (struct inode *);
  919 +extern int jbd2_journal_update_format (journal_t *);
  920 +extern int jbd2_journal_check_used_features
921 921 (journal_t *, unsigned long, unsigned long, unsigned long);
922   -extern int journal_check_available_features
  922 +extern int jbd2_journal_check_available_features
923 923 (journal_t *, unsigned long, unsigned long, unsigned long);
924   -extern int journal_set_features
  924 +extern int jbd2_journal_set_features
925 925 (journal_t *, unsigned long, unsigned long, unsigned long);
926   -extern int journal_create (journal_t *);
927   -extern int journal_load (journal_t *journal);
928   -extern void journal_destroy (journal_t *);
929   -extern int journal_recover (journal_t *journal);
930   -extern int journal_wipe (journal_t *, int);
931   -extern int journal_skip_recovery (journal_t *);
932   -extern void journal_update_superblock (journal_t *, int);
933   -extern void __journal_abort_hard (journal_t *);
934   -extern void journal_abort (journal_t *, int);
935   -extern int journal_errno (journal_t *);
936   -extern void journal_ack_err (journal_t *);
937   -extern int journal_clear_err (journal_t *);
938   -extern int journal_bmap(journal_t *, unsigned long, unsigned long *);
939   -extern int journal_force_commit(journal_t *);
  926 +extern int jbd2_journal_create (journal_t *);
  927 +extern int jbd2_journal_load (journal_t *journal);
  928 +extern void jbd2_journal_destroy (journal_t *);
  929 +extern int jbd2_journal_recover (journal_t *journal);
  930 +extern int jbd2_journal_wipe (journal_t *, int);
  931 +extern int jbd2_journal_skip_recovery (journal_t *);
  932 +extern void jbd2_journal_update_superblock (journal_t *, int);
  933 +extern void __jbd2_journal_abort_hard (journal_t *);
  934 +extern void jbd2_journal_abort (journal_t *, int);
  935 +extern int jbd2_journal_errno (journal_t *);
  936 +extern void jbd2_journal_ack_err (journal_t *);
  937 +extern int jbd2_journal_clear_err (journal_t *);
  938 +extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long *);
  939 +extern int jbd2_journal_force_commit(journal_t *);
940 940  
941 941 /*
942 942 * journal_head management
943 943 */
944   -struct journal_head *journal_add_journal_head(struct buffer_head *bh);
945   -struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
946   -void journal_remove_journal_head(struct buffer_head *bh);
947   -void journal_put_journal_head(struct journal_head *jh);
  944 +struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
  945 +struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
  946 +void jbd2_journal_remove_journal_head(struct buffer_head *bh);
  947 +void jbd2_journal_put_journal_head(struct journal_head *jh);
948 948  
949 949 /*
950 950 * handle management
951 951 */
952   -extern kmem_cache_t *jbd_handle_cache;
  952 +extern kmem_cache_t *jbd2_handle_cache;
953 953  
954 954 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
955 955 {
956   - return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
  956 + return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
957 957 }
958 958  
959 959 static inline void jbd_free_handle(handle_t *handle)
960 960 {
961   - kmem_cache_free(jbd_handle_cache, handle);
  961 + kmem_cache_free(jbd2_handle_cache, handle);
962 962 }
963 963  
964 964 /* Primary revoke support */
965 965 #define JOURNAL_REVOKE_DEFAULT_HASH 256
966   -extern int journal_init_revoke(journal_t *, int);
967   -extern void journal_destroy_revoke_caches(void);
968   -extern int journal_init_revoke_caches(void);
  966 +extern int jbd2_journal_init_revoke(journal_t *, int);
  967 +extern void jbd2_journal_destroy_revoke_caches(void);
  968 +extern int jbd2_journal_init_revoke_caches(void);
969 969  
970   -extern void journal_destroy_revoke(journal_t *);
971   -extern int journal_revoke (handle_t *,
  970 +extern void jbd2_journal_destroy_revoke(journal_t *);
  971 +extern int jbd2_journal_revoke (handle_t *,
972 972 unsigned long, struct buffer_head *);
973   -extern int journal_cancel_revoke(handle_t *, struct journal_head *);
974   -extern void journal_write_revoke_records(journal_t *, transaction_t *);
  973 +extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
  974 +extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
975 975  
976 976 /* Recovery revoke support */
977   -extern int journal_set_revoke(journal_t *, unsigned long, tid_t);
978   -extern int journal_test_revoke(journal_t *, unsigned long, tid_t);
979   -extern void journal_clear_revoke(journal_t *);
980   -extern void journal_switch_revoke_table(journal_t *journal);
  977 +extern int jbd2_journal_set_revoke(journal_t *, unsigned long, tid_t);
  978 +extern int jbd2_journal_test_revoke(journal_t *, unsigned long, tid_t);
  979 +extern void jbd2_journal_clear_revoke(journal_t *);
  980 +extern void jbd2_journal_switch_revoke_table(journal_t *journal);
981 981  
982 982 /*
983 983 * The log thread user interface:
984 984  
... ... @@ -986,17 +986,17 @@
986 986 * transitions on demand.
987 987 */
988 988  
989   -int __log_space_left(journal_t *); /* Called with journal locked */
990   -int log_start_commit(journal_t *journal, tid_t tid);
991   -int __log_start_commit(journal_t *journal, tid_t tid);
992   -int journal_start_commit(journal_t *journal, tid_t *tid);
993   -int journal_force_commit_nested(journal_t *journal);
994   -int log_wait_commit(journal_t *journal, tid_t tid);
995   -int log_do_checkpoint(journal_t *journal);
  989 +int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
  990 +int jbd2_log_start_commit(journal_t *journal, tid_t tid);
  991 +int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
  992 +int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
  993 +int jbd2_journal_force_commit_nested(journal_t *journal);
  994 +int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
  995 +int jbd2_log_do_checkpoint(journal_t *journal);
996 996  
997   -void __log_wait_for_space(journal_t *journal);
998   -extern void __journal_drop_transaction(journal_t *, transaction_t *);
999   -extern int cleanup_journal_tail(journal_t *);
  997 +void __jbd2_log_wait_for_space(journal_t *journal);
  998 +extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
  999 +extern int jbd2_cleanup_journal_tail(journal_t *);
1000 1000  
1001 1001 /* Debugging code only: */
1002 1002  
... ... @@ -1010,7 +1010,7 @@
1010 1010 /*
1011 1011 * is_journal_abort
1012 1012 *
1013   - * Simple test wrapper function to test the JFS_ABORT state flag. This
  1013 + * Simple test wrapper function to test the JBD2_ABORT state flag. This
1014 1014 * bit, when set, indicates that we have had a fatal error somewhere,
1015 1015 * either inside the journaling layer or indicated to us by the client
1016 1016 * (eg. ext3), and that we and should not commit any further
... ... @@ -1019,7 +1019,7 @@
1019 1019  
1020 1020 static inline int is_journal_aborted(journal_t *journal)
1021 1021 {
1022   - return journal->j_flags & JFS_ABORT;
  1022 + return journal->j_flags & JBD2_ABORT;
1023 1023 }
1024 1024  
1025 1025 static inline int is_handle_aborted(handle_t *handle)
... ... @@ -1029,7 +1029,7 @@
1029 1029 return is_journal_aborted(handle->h_transaction->t_journal);
1030 1030 }
1031 1031  
1032   -static inline void journal_abort_handle(handle_t *handle)
  1032 +static inline void jbd2_journal_abort_handle(handle_t *handle)
1033 1033 {
1034 1034 handle->h_aborted = 1;
1035 1035 }
... ... @@ -1051,7 +1051,7 @@
1051 1051 return (difference >= 0);
1052 1052 }
1053 1053  
1054   -extern int journal_blocks_per_page(struct inode *inode);
  1054 +extern int jbd2_journal_blocks_per_page(struct inode *inode);
1055 1055  
1056 1056 /*
1057 1057 * Return the minimum number of blocks which must be free in the journal