Commit a8272ce0c1d49aa3bec57682678f0bdfe28ed4ca

Authored by David Chinner
Committed by Lachlan McIlroy
1 parent a69b176df2

[XFS] Fix up sparse warnings.

These are mostly locking annotations, marking things static, casts where
needed and declaring stuff in header files.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:30002a

Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>

Showing 19 changed files with 50 additions and 37 deletions Side-by-side Diff

fs/xfs/linux-2.6/xfs_globals.c
... ... @@ -47,5 +47,6 @@
47 47 /*
48 48 * Global system credential structure.
49 49 */
50   -cred_t sys_cred_val, *sys_cred = &sys_cred_val;
  50 +static cred_t sys_cred_val;
  51 +cred_t *sys_cred = &sys_cred_val;
fs/xfs/linux-2.6/xfs_ioctl.c
... ... @@ -512,7 +512,7 @@
512 512 if (!kbuf)
513 513 return ENOMEM;
514 514  
515   - error = xfs_attr_get(XFS_I(inode), name, kbuf, len, flags, NULL);
  515 + error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL);
516 516 if (error)
517 517 goto out_kfree;
518 518  
fs/xfs/linux-2.6/xfs_ioctl32.c
... ... @@ -44,6 +44,7 @@
44 44 #include "xfs_error.h"
45 45 #include "xfs_dfrag.h"
46 46 #include "xfs_vnodeops.h"
  47 +#include "xfs_ioctl32.h"
47 48  
48 49 #define _NATIVE_IOC(cmd, type) \
49 50 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
... ... @@ -929,7 +929,7 @@
929 929 * This leaf block cannot have a "remote" value, we only call this routine
930 930 * if bmap_one_block() says there is only one block (ie: no remote blks).
931 931 */
932   -int
  932 +STATIC int
933 933 xfs_attr_leaf_addname(xfs_da_args_t *args)
934 934 {
935 935 xfs_inode_t *dp;
... ... @@ -6393,7 +6393,7 @@
6393 6393 * Recursively walks each level of a btree
6394 6394 * to count total fsblocks is use.
6395 6395 */
6396   -int /* error */
  6396 +STATIC int /* error */
6397 6397 xfs_bmap_count_tree(
6398 6398 xfs_mount_t *mp, /* file system mount point */
6399 6399 xfs_trans_t *tp, /* transaction pointer */
... ... @@ -6469,7 +6469,7 @@
6469 6469 /*
6470 6470 * Count leaf blocks given a range of extent records.
6471 6471 */
6472   -int
  6472 +STATIC int
6473 6473 xfs_bmap_count_leaves(
6474 6474 xfs_ifork_t *ifp,
6475 6475 xfs_extnum_t idx,
... ... @@ -6489,7 +6489,7 @@
6489 6489 * Count leaf blocks given a range of extent records originally
6490 6490 * in btree format.
6491 6491 */
6492   -int
  6492 +STATIC int
6493 6493 xfs_bmap_disk_count_leaves(
6494 6494 xfs_extnum_t idx,
6495 6495 xfs_bmbt_block_t *block,
... ... @@ -25,6 +25,8 @@
25 25 struct xfs_mount;
26 26 struct xfs_trans;
27 27  
  28 +extern kmem_zone_t *xfs_bmap_free_item_zone;
  29 +
28 30 /*
29 31 * DELTA: describe a change to the in-core extent list.
30 32 *
... ... @@ -24,6 +24,8 @@
24 24 struct xfs_mount;
25 25 struct xfs_trans;
26 26  
  27 +extern kmem_zone_t *xfs_btree_cur_zone;
  28 +
27 29 /*
28 30 * This nonsense is to make -wlint happy.
29 31 */
fs/xfs/xfs_buf_item.h
... ... @@ -18,6 +18,8 @@
18 18 #ifndef __XFS_BUF_ITEM_H__
19 19 #define __XFS_BUF_ITEM_H__
20 20  
  21 +extern kmem_zone_t *xfs_buf_item_zone;
  22 +
21 23 /*
22 24 * This is the structure used to lay out a buf log item in the
23 25 * log. The data map describes which 128 byte chunks of the buffer
fs/xfs/xfs_da_btree.h
... ... @@ -260,6 +260,7 @@
260 260 xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
261 261  
262 262 extern struct kmem_zone *xfs_da_state_zone;
  263 +extern struct kmem_zone *xfs_dabuf_zone;
263 264 #endif /* __KERNEL__ */
264 265  
265 266 #endif /* __XFS_DA_BTREE_H__ */
... ... @@ -42,6 +42,7 @@
42 42 #include "xfs_dir2_node.h"
43 43 #include "xfs_dir2_trace.h"
44 44 #include "xfs_error.h"
  45 +#include "xfs_vnodeops.h"
45 46  
46 47  
47 48 void
fs/xfs/xfs_filestream.c
... ... @@ -348,7 +348,7 @@
348 348 }
349 349  
350 350 /* xfs_fstrm_free_func(): callback for freeing cached stream items. */
351   -void
  351 +STATIC void
352 352 xfs_fstrm_free_func(
353 353 unsigned long ino,
354 354 void *data)
... ... @@ -907,7 +907,7 @@
907 907 * the tail. The details of this case are described below, but the end
908 908 * result is that we return the size of the log as the amount of space left.
909 909 */
910   -int
  910 +STATIC int
911 911 xlog_space_left(xlog_t *log, int cycle, int bytes)
912 912 {
913 913 int free_bytes;
... ... @@ -1289,7 +1289,7 @@
1289 1289 * pushes on an lsn which is further along in the log once we reach the high
1290 1290 * water mark. In this manner, we would be creating a low water mark.
1291 1291 */
1292   -void
  1292 +STATIC void
1293 1293 xlog_grant_push_ail(xfs_mount_t *mp,
1294 1294 int need_bytes)
1295 1295 {
... ... @@ -1372,7 +1372,7 @@
1372 1372 * is added immediately before calling bwrite().
1373 1373 */
1374 1374  
1375   -int
  1375 +STATIC int
1376 1376 xlog_sync(xlog_t *log,
1377 1377 xlog_in_core_t *iclog)
1378 1378 {
... ... @@ -1516,7 +1516,7 @@
1516 1516 /*
1517 1517 * Deallocate a log structure
1518 1518 */
1519   -void
  1519 +STATIC void
1520 1520 xlog_dealloc_log(xlog_t *log)
1521 1521 {
1522 1522 xlog_in_core_t *iclog, *next_iclog;
... ... @@ -1738,7 +1738,7 @@
1738 1738 * we don't update ic_offset until the end when we know exactly how many
1739 1739 * bytes have been written out.
1740 1740 */
1741   -int
  1741 +STATIC int
1742 1742 xlog_write(xfs_mount_t * mp,
1743 1743 xfs_log_iovec_t reg[],
1744 1744 int nentries,
... ... @@ -2280,7 +2280,7 @@
2280 2280 * global state machine log lock. Assume that the calls to cvsema won't
2281 2281 * take a long time. At least we know it won't sleep.
2282 2282 */
2283   -void
  2283 +STATIC void
2284 2284 xlog_state_done_syncing(
2285 2285 xlog_in_core_t *iclog,
2286 2286 int aborted)
... ... @@ -2340,7 +2340,7 @@
2340 2340 * needs to be incremented, depending on the amount of data which
2341 2341 * is copied.
2342 2342 */
2343   -int
  2343 +STATIC int
2344 2344 xlog_state_get_iclog_space(xlog_t *log,
2345 2345 int len,
2346 2346 xlog_in_core_t **iclogp,
... ... @@ -2776,7 +2776,7 @@
2776 2776 /*
2777 2777 * Atomically put back used ticket.
2778 2778 */
2779   -void
  2779 +STATIC void
2780 2780 xlog_state_put_ticket(xlog_t *log,
2781 2781 xlog_ticket_t *tic)
2782 2782 {
... ... @@ -2794,7 +2794,7 @@
2794 2794 *
2795 2795 *
2796 2796 */
2797   -int
  2797 +STATIC int
2798 2798 xlog_state_release_iclog(xlog_t *log,
2799 2799 xlog_in_core_t *iclog)
2800 2800 {
... ... @@ -3024,7 +3024,7 @@
3024 3024 * If filesystem activity goes to zero, the iclog will get flushed only by
3025 3025 * bdflush().
3026 3026 */
3027   -int
  3027 +STATIC int
3028 3028 xlog_state_sync(xlog_t *log,
3029 3029 xfs_lsn_t lsn,
3030 3030 uint flags,
... ... @@ -3129,7 +3129,7 @@
3129 3129 * Called when we want to mark the current iclog as being ready to sync to
3130 3130 * disk.
3131 3131 */
3132   -void
  3132 +STATIC void
3133 3133 xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3134 3134 {
3135 3135 spin_lock(&log->l_icloglock);
... ... @@ -3241,7 +3241,7 @@
3241 3241 /*
3242 3242 * Grab ticket off freelist or allocation some more
3243 3243 */
3244   -xlog_ticket_t *
  3244 +STATIC xlog_ticket_t *
3245 3245 xlog_ticket_get(xlog_t *log,
3246 3246 int unit_bytes,
3247 3247 int cnt,
fs/xfs/xfs_log_recover.c
... ... @@ -293,7 +293,7 @@
293 293 * Note that the algorithm can not be perfect because the disk will not
294 294 * necessarily be perfect.
295 295 */
296   -int
  296 +STATIC int
297 297 xlog_find_cycle_start(
298 298 xlog_t *log,
299 299 xfs_buf_t *bp,
... ... @@ -986,7 +986,7 @@
986 986 * -1 => use *blk_no as the first block of the log
987 987 * >0 => error has occurred
988 988 */
989   -int
  989 +STATIC int
990 990 xlog_find_zeroed(
991 991 xlog_t *log,
992 992 xfs_daddr_t *blk_no)
... ... @@ -2343,7 +2343,7 @@
2343 2343 spin_unlock(&mp->m_sb_lock);
2344 2344 }
2345 2345  
2346   -int
  2346 +STATIC int
2347 2347 xfs_icsb_modify_counters(
2348 2348 xfs_mount_t *mp,
2349 2349 xfs_sb_field_t field,
fs/xfs/xfs_mru_cache.c
... ... @@ -225,10 +225,14 @@
225 225 * list need to be deleted. For each element this involves removing it from the
226 226 * data store, removing it from the reap list, calling the client's free
227 227 * function and deleting the element from the element zone.
  228 + *
  229 + * We get called holding the mru->lock, which we drop and then reacquire.
  230 + * Sparse need special help with this to tell it we know what we are doing.
228 231 */
229 232 STATIC void
230 233 _xfs_mru_cache_clear_reap_list(
231   - xfs_mru_cache_t *mru)
  234 + xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock)
  235 +
232 236 {
233 237 xfs_mru_cache_elem_t *elem, *next;
234 238 struct list_head tmp;
... ... @@ -528,6 +532,10 @@
528 532 *
529 533 * If the element isn't found, this function returns NULL and the spinlock is
530 534 * released. xfs_mru_cache_done() should NOT be called when this occurs.
  535 + *
  536 + * Because sparse isn't smart enough to know about conditional lock return
  537 + * status, we need to help it get it right by annotating the path that does
  538 + * not release the lock.
531 539 */
532 540 void *
533 541 xfs_mru_cache_lookup(
... ... @@ -545,8 +553,8 @@
545 553 if (elem) {
546 554 list_del(&elem->list_node);
547 555 _xfs_mru_cache_list_insert(mru, elem);
548   - }
549   - else
  556 + __release(mru_lock); /* help sparse not be stupid */
  557 + } else
550 558 spin_unlock(&mru->lock);
551 559  
552 560 return elem ? elem->value : NULL;
... ... @@ -575,6 +583,8 @@
575 583 elem = radix_tree_lookup(&mru->store, key);
576 584 if (!elem)
577 585 spin_unlock(&mru->lock);
  586 + else
  587 + __release(mru_lock); /* help sparse not be stupid */
578 588  
579 589 return elem ? elem->value : NULL;
580 590 }
... ... @@ -586,7 +596,7 @@
586 596 */
587 597 void
588 598 xfs_mru_cache_done(
589   - xfs_mru_cache_t *mru)
  599 + xfs_mru_cache_t *mru) __releases(mru->lock)
590 600 {
591 601 spin_unlock(&mru->lock);
592 602 }
... ... @@ -39,6 +39,7 @@
39 39 #include "xfs_refcache.h"
40 40 #include "xfs_utils.h"
41 41 #include "xfs_trans_space.h"
  42 +#include "xfs_vnodeops.h"
42 43  
43 44  
44 45 /*
... ... @@ -1001,6 +1001,8 @@
1001 1001 xfs_agnumber_t ag,
1002 1002 xfs_extlen_t idx);
1003 1003  
  1004 +extern kmem_zone_t *xfs_trans_zone;
  1005 +
1004 1006 #endif /* __KERNEL__ */
1005 1007  
1006 1008 #endif /* __XFS_TRANS_H__ */
fs/xfs/xfs_trans_item.c
... ... @@ -21,6 +21,7 @@
21 21 #include "xfs_log.h"
22 22 #include "xfs_inum.h"
23 23 #include "xfs_trans.h"
  24 +#include "xfs_trans_priv.h"
24 25  
25 26 STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *,
26 27 int, int, xfs_lsn_t);
... ... @@ -61,11 +61,6 @@
61 61 int
62 62 xfs_init(void)
63 63 {
64   - extern kmem_zone_t *xfs_bmap_free_item_zone;
65   - extern kmem_zone_t *xfs_btree_cur_zone;
66   - extern kmem_zone_t *xfs_trans_zone;
67   - extern kmem_zone_t *xfs_buf_item_zone;
68   - extern kmem_zone_t *xfs_dabuf_zone;
69 64 #ifdef XFS_DABUF_DEBUG
70 65 extern spinlock_t xfs_dabuf_global_lock;
71 66 spin_lock_init(&xfs_dabuf_global_lock);
72 67  
73 68  
... ... @@ -155,15 +150,9 @@
155 150 void
156 151 xfs_cleanup(void)
157 152 {
158   - extern kmem_zone_t *xfs_bmap_free_item_zone;
159   - extern kmem_zone_t *xfs_btree_cur_zone;
160 153 extern kmem_zone_t *xfs_inode_zone;
161   - extern kmem_zone_t *xfs_trans_zone;
162   - extern kmem_zone_t *xfs_da_state_zone;
163   - extern kmem_zone_t *xfs_dabuf_zone;
164 154 extern kmem_zone_t *xfs_efd_zone;
165 155 extern kmem_zone_t *xfs_efi_zone;
166   - extern kmem_zone_t *xfs_buf_item_zone;
167 156 extern kmem_zone_t *xfs_icluster_zone;
168 157  
169 158 xfs_cleanup_procfs();