Commit b53e675dc868c4844ecbcce9149cf68e4299231d

Authored by Christoph Hellwig
Committed by Lachlan McIlroy
1 parent 67fcb7bfb6

[XFS] xlog_rec_header/xlog_rec_ext_header endianess annotations

Mostly trivial conversion with one exceptions: h_num_logops was kept in
native endian previously and only converted to big endian in xlog_sync,
but we always keep it big endian now. With todays cpus fast byteswap
instructions that's not an issue but the new variant keeps the code clean
and maintainable.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29821a

Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>

Showing 4 changed files with 120 additions and 133 deletions Side-by-side Diff

... ... @@ -1227,12 +1227,12 @@
1227 1227  
1228 1228 head = &iclog->ic_header;
1229 1229 memset(head, 0, sizeof(xlog_rec_header_t));
1230   - INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
1231   - INT_SET(head->h_version, ARCH_CONVERT,
  1230 + head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
  1231 + head->h_version = cpu_to_be32(
1232 1232 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1233   - INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size);
  1233 + head->h_size = cpu_to_be32(log->l_iclog_size);
1234 1234 /* new fields */
1235   - INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT);
  1235 + head->h_fmt = cpu_to_be32(XLOG_FMT);
1236 1236 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1237 1237  
1238 1238  
... ... @@ -1378,7 +1378,7 @@
1378 1378 {
1379 1379 xfs_caddr_t dptr; /* pointer to byte sized element */
1380 1380 xfs_buf_t *bp;
1381   - int i, ops;
  1381 + int i;
1382 1382 uint count; /* byte count of bwrite */
1383 1383 uint count_init; /* initial count before roundup */
1384 1384 int roundoff; /* roundoff to BB or stripe */
1385 1385  
1386 1386  
1387 1387  
... ... @@ -1417,21 +1417,17 @@
1417 1417  
1418 1418 /* real byte length */
1419 1419 if (v2) {
1420   - INT_SET(iclog->ic_header.h_len,
1421   - ARCH_CONVERT,
1422   - iclog->ic_offset + roundoff);
  1420 + iclog->ic_header.h_len =
  1421 + cpu_to_be32(iclog->ic_offset + roundoff);
1423 1422 } else {
1424   - INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
  1423 + iclog->ic_header.h_len =
  1424 + cpu_to_be32(iclog->ic_offset);
1425 1425 }
1426 1426  
1427   - /* put ops count in correct order */
1428   - ops = iclog->ic_header.h_num_logops;
1429   - INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
1430   -
1431 1427 bp = iclog->ic_bp;
1432 1428 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
1433 1429 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
1434   - XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
  1430 + XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
1435 1431  
1436 1432 XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1437 1433  
... ... @@ -1494,10 +1490,10 @@
1494 1490 * a new cycle. Watch out for the header magic number
1495 1491 * case, though.
1496 1492 */
1497   - for (i=0; i<split; i += BBSIZE) {
1498   - INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
1499   - if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
1500   - INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
  1493 + for (i = 0; i < split; i += BBSIZE) {
  1494 + be32_add((__be32 *)dptr, 1);
  1495 + if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
  1496 + be32_add((__be32 *)dptr, 1);
1501 1497 dptr += BBSIZE;
1502 1498 }
1503 1499  
... ... @@ -1586,7 +1582,7 @@
1586 1582 {
1587 1583 spin_lock(&log->l_icloglock);
1588 1584  
1589   - iclog->ic_header.h_num_logops += record_cnt;
  1585 + be32_add(&iclog->ic_header.h_num_logops, record_cnt);
1590 1586 iclog->ic_offset += copy_bytes;
1591 1587  
1592 1588 spin_unlock(&log->l_icloglock);
... ... @@ -1813,7 +1809,7 @@
1813 1809  
1814 1810 /* start_lsn is the first lsn written to. That's all we need. */
1815 1811 if (! *start_lsn)
1816   - *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
  1812 + *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
1817 1813  
1818 1814 /* This loop writes out as many regions as can fit in the amount
1819 1815 * of space which was allocated by xlog_state_get_iclog_space().
... ... @@ -1983,7 +1979,8 @@
1983 1979 * We don't need to cover the dummy.
1984 1980 */
1985 1981 if (!changed &&
1986   - (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) {
  1982 + (be32_to_cpu(iclog->ic_header.h_num_logops) ==
  1983 + XLOG_COVER_OPS)) {
1987 1984 changed = 1;
1988 1985 } else {
1989 1986 /*
... ... @@ -2051,7 +2048,7 @@
2051 2048 lowest_lsn = 0;
2052 2049 do {
2053 2050 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2054   - lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT);
  2051 + lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
2055 2052 if ((lsn && !lowest_lsn) ||
2056 2053 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2057 2054 lowest_lsn = lsn;
... ... @@ -2152,11 +2149,9 @@
2152 2149 */
2153 2150  
2154 2151 lowest_lsn = xlog_get_lowest_lsn(log);
2155   - if (lowest_lsn && (
2156   - XFS_LSN_CMP(
2157   - lowest_lsn,
2158   - INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
2159   - )<0)) {
  2152 + if (lowest_lsn &&
  2153 + XFS_LSN_CMP(lowest_lsn,
  2154 + be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2160 2155 iclog = iclog->ic_next;
2161 2156 continue; /* Leave this iclog for
2162 2157 * another thread */
... ... @@ -2171,11 +2166,10 @@
2171 2166 * No one else can be here except us.
2172 2167 */
2173 2168 spin_lock(&log->l_grant_lock);
2174   - ASSERT(XFS_LSN_CMP(
2175   - log->l_last_sync_lsn,
2176   - INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
2177   - )<=0);
2178   - log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
  2169 + ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
  2170 + be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
  2171 + log->l_last_sync_lsn =
  2172 + be64_to_cpu(iclog->ic_header.h_lsn);
2179 2173 spin_unlock(&log->l_grant_lock);
2180 2174  
2181 2175 /*
... ... @@ -2392,8 +2386,8 @@
2392 2386 xlog_tic_add_region(ticket,
2393 2387 log->l_iclog_hsize,
2394 2388 XLOG_REG_TYPE_LRHEADER);
2395   - INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
2396   - INT_SET(head->h_lsn, ARCH_CONVERT,
  2389 + head->h_cycle = cpu_to_be32(log->l_curr_cycle);
  2390 + head->h_lsn = cpu_to_be64(
2397 2391 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2398 2392 ASSERT(log->l_curr_block >= 0);
2399 2393 }
... ... @@ -2823,7 +2817,7 @@
2823 2817 iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2824 2818 sync++;
2825 2819 iclog->ic_state = XLOG_STATE_SYNCING;
2826   - INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn);
  2820 + iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
2827 2821 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
2828 2822 /* cycle incremented when incrementing curr_block */
2829 2823 }
... ... @@ -2861,7 +2855,7 @@
2861 2855 if (!eventual_size)
2862 2856 eventual_size = iclog->ic_offset;
2863 2857 iclog->ic_state = XLOG_STATE_WANT_SYNC;
2864   - INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block);
  2858 + iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2865 2859 log->l_prev_block = log->l_curr_block;
2866 2860 log->l_prev_cycle = log->l_curr_cycle;
2867 2861  
... ... @@ -2957,7 +2951,7 @@
2957 2951 * the previous sync.
2958 2952 */
2959 2953 iclog->ic_refcnt++;
2960   - lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
  2954 + lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2961 2955 xlog_state_switch_iclogs(log, iclog, 0);
2962 2956 spin_unlock(&log->l_icloglock);
2963 2957  
... ... @@ -2965,7 +2959,7 @@
2965 2959 return XFS_ERROR(EIO);
2966 2960 *log_flushed = 1;
2967 2961 spin_lock(&log->l_icloglock);
2968   - if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn &&
  2962 + if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
2969 2963 iclog->ic_state != XLOG_STATE_DIRTY)
2970 2964 goto maybe_sleep;
2971 2965 else
... ... @@ -3049,9 +3043,9 @@
3049 3043 }
3050 3044  
3051 3045 do {
3052   - if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) {
3053   - iclog = iclog->ic_next;
3054   - continue;
  3046 + if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
  3047 + iclog = iclog->ic_next;
  3048 + continue;
3055 3049 }
3056 3050  
3057 3051 if (iclog->ic_state == XLOG_STATE_DIRTY) {
3058 3052  
3059 3053  
3060 3054  
... ... @@ -3460,18 +3454,18 @@
3460 3454 spin_unlock(&log->l_icloglock);
3461 3455  
3462 3456 /* check log magic numbers */
3463   - ptr = (xfs_caddr_t) &(iclog->ic_header);
3464   - if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM)
  3457 + if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM)
3465 3458 xlog_panic("xlog_verify_iclog: invalid magic num");
3466 3459  
3467   - for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count;
  3460 + ptr = (xfs_caddr_t) &iclog->ic_header;
  3461 + for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
3468 3462 ptr += BBSIZE) {
3469   - if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
  3463 + if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
3470 3464 xlog_panic("xlog_verify_iclog: unexpected magic num");
3471 3465 }
3472 3466  
3473 3467 /* check fields */
3474   - len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT);
  3468 + len = be32_to_cpu(iclog->ic_header.h_num_logops);
3475 3469 ptr = iclog->ic_datap;
3476 3470 base_ptr = ptr;
3477 3471 ophead = (xlog_op_header_t *)ptr;
3478 3472  
... ... @@ -3512,9 +3506,9 @@
3512 3506 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3513 3507 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3514 3508 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3515   - op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT);
  3509 + op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3516 3510 } else {
3517   - op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT);
  3511 + op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3518 3512 }
3519 3513 }
3520 3514 ptr += sizeof(xlog_op_header_t) + op_len;
... ... @@ -22,8 +22,9 @@
22 22  
23 23 #define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
24 24 #define BLOCK_LSN(lsn) ((uint)(lsn))
  25 +
25 26 /* this is used in a spot where we might otherwise double-endian-flip */
26   -#define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0])
  27 +#define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0])
27 28  
28 29 #ifdef __KERNEL__
29 30 /*
fs/xfs/xfs_log_priv.h
... ... @@ -63,10 +63,10 @@
63 63  
64 64 static inline uint xlog_get_cycle(char *ptr)
65 65 {
66   - if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
67   - return INT_GET(*((uint *)ptr + 1), ARCH_CONVERT);
  66 + if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
  67 + return be32_to_cpu(*((__be32 *)ptr + 1));
68 68 else
69   - return INT_GET(*(uint *)ptr, ARCH_CONVERT);
  69 + return be32_to_cpu(*(__be32 *)ptr);
70 70 }
71 71  
72 72 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
73 73  
... ... @@ -85,9 +85,9 @@
85 85 *
86 86 * this has endian issues, of course.
87 87 */
88   -static inline uint xlog_get_client_id(uint i)
  88 +static inline uint xlog_get_client_id(__be32 i)
89 89 {
90   - return INT_GET(i, ARCH_CONVERT) >> 24;
  90 + return be32_to_cpu(i) >> 24;
91 91 }
92 92  
93 93 #define xlog_panic(args...) cmn_err(CE_PANIC, ## args)
94 94  
95 95  
... ... @@ -287,25 +287,25 @@
287 287 #endif
288 288  
289 289 typedef struct xlog_rec_header {
290   - uint h_magicno; /* log record (LR) identifier : 4 */
291   - uint h_cycle; /* write cycle of log : 4 */
292   - int h_version; /* LR version : 4 */
293   - int h_len; /* len in bytes; should be 64-bit aligned: 4 */
294   - xfs_lsn_t h_lsn; /* lsn of this LR : 8 */
295   - xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */
296   - uint h_chksum; /* may not be used; non-zero if used : 4 */
297   - int h_prev_block; /* block number to previous LR : 4 */
298   - int h_num_logops; /* number of log operations in this LR : 4 */
299   - uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
  290 + __be32 h_magicno; /* log record (LR) identifier : 4 */
  291 + __be32 h_cycle; /* write cycle of log : 4 */
  292 + __be32 h_version; /* LR version : 4 */
  293 + __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */
  294 + __be64 h_lsn; /* lsn of this LR : 8 */
  295 + __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */
  296 + __be32 h_chksum; /* may not be used; non-zero if used : 4 */
  297 + __be32 h_prev_block; /* block number to previous LR : 4 */
  298 + __be32 h_num_logops; /* number of log operations in this LR : 4 */
  299 + __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
300 300 /* new fields */
301   - int h_fmt; /* format of log record : 4 */
302   - uuid_t h_fs_uuid; /* uuid of FS : 16 */
303   - int h_size; /* iclog size : 4 */
  301 + __be32 h_fmt; /* format of log record : 4 */
  302 + uuid_t h_fs_uuid; /* uuid of FS : 16 */
  303 + __be32 h_size; /* iclog size : 4 */
304 304 } xlog_rec_header_t;
305 305  
306 306 typedef struct xlog_rec_ext_header {
307   - uint xh_cycle; /* write cycle of log : 4 */
308   - uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */
  307 + __be32 xh_cycle; /* write cycle of log : 4 */
  308 + __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */
309 309 } xlog_rec_ext_header_t;
310 310  
311 311 #ifdef __KERNEL__
fs/xfs/xfs_log_recover.c
... ... @@ -198,7 +198,7 @@
198 198 cmn_err(CE_DEBUG, " log : uuid = ");
199 199 for (b = 0; b < 16; b++)
200 200 cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
201   - cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
  201 + cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
202 202 }
203 203 #else
204 204 #define xlog_header_check_dump(mp, head)
205 205  
... ... @@ -212,14 +212,14 @@
212 212 xfs_mount_t *mp,
213 213 xlog_rec_header_t *head)
214 214 {
215   - ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
  215 + ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
216 216  
217 217 /*
218 218 * IRIX doesn't write the h_fmt field and leaves it zeroed
219 219 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
220 220 * a dirty log created in IRIX.
221 221 */
222   - if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
  222 + if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
223 223 xlog_warn(
224 224 "XFS: dirty log written in incompatible format - can't recover");
225 225 xlog_header_check_dump(mp, head);
... ... @@ -245,7 +245,7 @@
245 245 xfs_mount_t *mp,
246 246 xlog_rec_header_t *head)
247 247 {
248   - ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
  248 + ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
249 249  
250 250 if (uuid_is_nil(&head->h_fs_uuid)) {
251 251 /*
... ... @@ -447,8 +447,7 @@
447 447  
448 448 head = (xlog_rec_header_t *)offset;
449 449  
450   - if (XLOG_HEADER_MAGIC_NUM ==
451   - INT_GET(head->h_magicno, ARCH_CONVERT))
  450 + if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
452 451 break;
453 452  
454 453 if (!smallmem)
... ... @@ -480,7 +479,7 @@
480 479 * record do we update last_blk.
481 480 */
482 481 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
483   - uint h_size = INT_GET(head->h_size, ARCH_CONVERT);
  482 + uint h_size = be32_to_cpu(head->h_size);
484 483  
485 484 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
486 485 if (h_size % XLOG_HEADER_CYCLE_SIZE)
... ... @@ -489,8 +488,8 @@
489 488 xhdrs = 1;
490 489 }
491 490  
492   - if (*last_blk - i + extra_bblks
493   - != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
  491 + if (*last_blk - i + extra_bblks !=
  492 + BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
494 493 *last_blk = i;
495 494  
496 495 out:
... ... @@ -823,8 +822,7 @@
823 822 if ((error = xlog_bread(log, i, 1, bp)))
824 823 goto bread_err;
825 824 offset = xlog_align(log, i, 1, bp);
826   - if (XLOG_HEADER_MAGIC_NUM ==
827   - INT_GET(*(uint *)offset, ARCH_CONVERT)) {
  825 + if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
828 826 found = 1;
829 827 break;
830 828 }
... ... @@ -841,7 +839,7 @@
841 839 goto bread_err;
842 840 offset = xlog_align(log, i, 1, bp);
843 841 if (XLOG_HEADER_MAGIC_NUM ==
844   - INT_GET(*(uint*)offset, ARCH_CONVERT)) {
  842 + be32_to_cpu(*(__be32 *)offset)) {
845 843 found = 2;
846 844 break;
847 845 }
... ... @@ -855,7 +853,7 @@
855 853  
856 854 /* find blk_no of tail of log */
857 855 rhead = (xlog_rec_header_t *)offset;
858   - *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
  856 + *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
859 857  
860 858 /*
861 859 * Reset log values according to the state of the log when we
862 860  
... ... @@ -869,11 +867,11 @@
869 867 */
870 868 log->l_prev_block = i;
871 869 log->l_curr_block = (int)*head_blk;
872   - log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
  870 + log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
873 871 if (found == 2)
874 872 log->l_curr_cycle++;
875   - log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
876   - log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
  873 + log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
  874 + log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
877 875 log->l_grant_reserve_cycle = log->l_curr_cycle;
878 876 log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
879 877 log->l_grant_write_cycle = log->l_curr_cycle;
... ... @@ -891,8 +889,8 @@
891 889 * unmount record rather than the block after it.
892 890 */
893 891 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
894   - int h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
895   - int h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
  892 + int h_size = be32_to_cpu(rhead->h_size);
  893 + int h_version = be32_to_cpu(rhead->h_version);
896 894  
897 895 if ((h_version & XLOG_VERSION_2) &&
898 896 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
899 897  
... ... @@ -906,10 +904,10 @@
906 904 hblks = 1;
907 905 }
908 906 after_umount_blk = (i + hblks + (int)
909   - BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
  907 + BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
910 908 tail_lsn = log->l_tail_lsn;
911 909 if (*head_blk == after_umount_blk &&
912   - INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
  910 + be32_to_cpu(rhead->h_num_logops) == 1) {
913 911 umount_data_blk = (i + hblks) % log->l_logBBsize;
914 912 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
915 913 goto bread_err;
916 914  
... ... @@ -1100,14 +1098,13 @@
1100 1098 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1101 1099  
1102 1100 memset(buf, 0, BBSIZE);
1103   - INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
1104   - INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
1105   - INT_SET(recp->h_version, ARCH_CONVERT,
  1101 + recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
  1102 + recp->h_cycle = cpu_to_be32(cycle);
  1103 + recp->h_version = cpu_to_be32(
1106 1104 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1107   - INT_SET(recp->h_lsn, ARCH_CONVERT, xlog_assign_lsn(cycle, block));
1108   - INT_SET(recp->h_tail_lsn, ARCH_CONVERT,
1109   - xlog_assign_lsn(tail_cycle, tail_block));
1110   - INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
  1105 + recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
  1106 + recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
  1107 + recp->h_fmt = cpu_to_be32(XLOG_FMT);
1111 1108 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1112 1109 }
1113 1110  
... ... @@ -2214,7 +2211,7 @@
2214 2211 * overlap with future reads of those inodes.
2215 2212 */
2216 2213 if (XFS_DINODE_MAGIC ==
2217   - INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
  2214 + be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2218 2215 (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2219 2216 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2220 2217 XFS_BUF_STALE(bp);
... ... @@ -2584,8 +2581,7 @@
2584 2581 /*
2585 2582 * This type of quotas was turned off, so ignore this record.
2586 2583 */
2587   - type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
2588   - (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
  2584 + type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2589 2585 ASSERT(type);
2590 2586 if (log->l_quotaoffs_flag & type)
2591 2587 return (0);
... ... @@ -2898,8 +2894,8 @@
2898 2894 unsigned long hash;
2899 2895 uint flags;
2900 2896  
2901   - lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
2902   - num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
  2897 + lp = dp + be32_to_cpu(rhead->h_len);
  2898 + num_logops = be32_to_cpu(rhead->h_num_logops);
2903 2899  
2904 2900 /* check the log format matches our own - else we can't recover */
2905 2901 if (xlog_header_check_recover(log->l_mp, rhead))
... ... @@ -2922,7 +2918,7 @@
2922 2918 if (trans == NULL) { /* not found; add new tid */
2923 2919 if (ohead->oh_flags & XLOG_START_TRANS)
2924 2920 xlog_recover_new_tid(&rhash[hash], tid,
2925   - INT_GET(rhead->h_lsn, ARCH_CONVERT));
  2921 + be64_to_cpu(rhead->h_lsn));
2926 2922 } else {
2927 2923 ASSERT(dp + be32_to_cpu(ohead->oh_len) <= lp);
2928 2924 flags = ohead->oh_flags & ~XLOG_END_TRANS;
2929 2925  
2930 2926  
2931 2927  
... ... @@ -3313,16 +3309,16 @@
3313 3309 int size)
3314 3310 {
3315 3311 int i;
3316   - uint *up;
  3312 + __be32 *up;
3317 3313 uint chksum = 0;
3318 3314  
3319   - up = (uint *)iclog->ic_datap;
  3315 + up = (__be32 *)iclog->ic_datap;
3320 3316 /* divide length by 4 to get # words */
3321 3317 for (i = 0; i < (size >> 2); i++) {
3322   - chksum ^= INT_GET(*up, ARCH_CONVERT);
  3318 + chksum ^= be32_to_cpu(*up);
3323 3319 up++;
3324 3320 }
3325   - INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
  3321 + iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3326 3322 }
3327 3323 #else
3328 3324 #define xlog_pack_data_checksum(log, iclog, size)
... ... @@ -3339,7 +3335,7 @@
3339 3335 {
3340 3336 int i, j, k;
3341 3337 int size = iclog->ic_offset + roundoff;
3342   - uint cycle_lsn;
  3338 + __be32 cycle_lsn;
3343 3339 xfs_caddr_t dp;
3344 3340 xlog_in_core_2_t *xhdr;
3345 3341  
... ... @@ -3350,8 +3346,8 @@
3350 3346 dp = iclog->ic_datap;
3351 3347 for (i = 0; i < BTOBB(size) &&
3352 3348 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3353   - iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
3354   - *(uint *)dp = cycle_lsn;
  3349 + iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
  3350 + *(__be32 *)dp = cycle_lsn;
3355 3351 dp += BBSIZE;
3356 3352 }
3357 3353  
... ... @@ -3360,8 +3356,8 @@
3360 3356 for ( ; i < BTOBB(size); i++) {
3361 3357 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3362 3358 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3363   - xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
3364   - *(uint *)dp = cycle_lsn;
  3359 + xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
  3360 + *(__be32 *)dp = cycle_lsn;
3365 3361 dp += BBSIZE;
3366 3362 }
3367 3363  
3368 3364  
3369 3365  
3370 3366  
... ... @@ -3378,21 +3374,21 @@
3378 3374 xfs_caddr_t dp,
3379 3375 xlog_t *log)
3380 3376 {
3381   - uint *up = (uint *)dp;
  3377 + __be32 *up = (__be32 *)dp;
3382 3378 uint chksum = 0;
3383 3379 int i;
3384 3380  
3385 3381 /* divide length by 4 to get # words */
3386   - for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
3387   - chksum ^= INT_GET(*up, ARCH_CONVERT);
  3382 + for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
  3383 + chksum ^= be32_to_cpu(*up);
3388 3384 up++;
3389 3385 }
3390   - if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
  3386 + if (chksum != be32_to_cpu(rhead->h_chksum)) {
3391 3387 if (rhead->h_chksum ||
3392 3388 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3393 3389 cmn_err(CE_DEBUG,
3394 3390 "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
3395   - INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
  3391 + be32_to_cpu(rhead->h_chksum), chksum);
3396 3392 cmn_err(CE_DEBUG,
3397 3393 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3398 3394 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3399 3395  
3400 3396  
3401 3397  
... ... @@ -3416,18 +3412,18 @@
3416 3412 int i, j, k;
3417 3413 xlog_in_core_2_t *xhdr;
3418 3414  
3419   - for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
  3415 + for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3420 3416 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3421   - *(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
  3417 + *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3422 3418 dp += BBSIZE;
3423 3419 }
3424 3420  
3425 3421 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3426 3422 xhdr = (xlog_in_core_2_t *)rhead;
3427   - for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
  3423 + for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3428 3424 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3429 3425 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3430   - *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
  3426 + *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3431 3427 dp += BBSIZE;
3432 3428 }
3433 3429 }
3434 3430  
3435 3431  
3436 3432  
... ... @@ -3443,24 +3439,21 @@
3443 3439 {
3444 3440 int hlen;
3445 3441  
3446   - if (unlikely(
3447   - (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
3448   - XLOG_HEADER_MAGIC_NUM))) {
  3442 + if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3449 3443 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3450 3444 XFS_ERRLEVEL_LOW, log->l_mp);
3451 3445 return XFS_ERROR(EFSCORRUPTED);
3452 3446 }
3453 3447 if (unlikely(
3454 3448 (!rhead->h_version ||
3455   - (INT_GET(rhead->h_version, ARCH_CONVERT) &
3456   - (~XLOG_VERSION_OKBITS)) != 0))) {
  3449 + (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3457 3450 xlog_warn("XFS: %s: unrecognised log version (%d).",
3458   - __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
  3451 + __FUNCTION__, be32_to_cpu(rhead->h_version));
3459 3452 return XFS_ERROR(EIO);
3460 3453 }
3461 3454  
3462 3455 /* LR body must have data or it wouldn't have been written */
3463   - hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
  3456 + hlen = be32_to_cpu(rhead->h_len);
3464 3457 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3465 3458 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3466 3459 XFS_ERRLEVEL_LOW, log->l_mp);
... ... @@ -3520,9 +3513,8 @@
3520 3513 error = xlog_valid_rec_header(log, rhead, tail_blk);
3521 3514 if (error)
3522 3515 goto bread_err1;
3523   - h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
3524   - if ((INT_GET(rhead->h_version, ARCH_CONVERT)
3525   - & XLOG_VERSION_2) &&
  3516 + h_size = be32_to_cpu(rhead->h_size);
  3517 + if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3526 3518 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3527 3519 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3528 3520 if (h_size % XLOG_HEADER_CYCLE_SIZE)
... ... @@ -3559,7 +3551,7 @@
3559 3551 goto bread_err2;
3560 3552  
3561 3553 /* blocks in data section */
3562   - bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
  3554 + bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3563 3555 error = xlog_bread(log, blk_no + hblks, bblks, dbp);
3564 3556 if (error)
3565 3557 goto bread_err2;
... ... @@ -3634,7 +3626,7 @@
3634 3626 if (error)
3635 3627 goto bread_err2;
3636 3628  
3637   - bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
  3629 + bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3638 3630 blk_no += hblks;
3639 3631  
3640 3632 /* Read in data for log record */
... ... @@ -3705,7 +3697,7 @@
3705 3697 error = xlog_valid_rec_header(log, rhead, blk_no);
3706 3698 if (error)
3707 3699 goto bread_err2;
3708   - bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
  3700 + bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3709 3701 if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
3710 3702 goto bread_err2;
3711 3703 offset = xlog_align(log, blk_no+hblks, bblks, dbp);