Commit a8272ce0c1d49aa3bec57682678f0bdfe28ed4ca
Committed by
Lachlan McIlroy
1 parent
a69b176df2
Exists in
master
and in
39 other branches
[XFS] Fix up sparse warnings.
These are mostly locking annotations, marking things static, casts where needed and declaring stuff in header files. SGI-PV: 971186 SGI-Modid: xfs-linux-melb:xfs-kern:30002a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Showing 19 changed files with 50 additions and 37 deletions Side-by-side Diff
- fs/xfs/linux-2.6/xfs_globals.c
- fs/xfs/linux-2.6/xfs_ioctl.c
- fs/xfs/linux-2.6/xfs_ioctl32.c
- fs/xfs/xfs_attr.c
- fs/xfs/xfs_bmap.c
- fs/xfs/xfs_bmap.h
- fs/xfs/xfs_btree.h
- fs/xfs/xfs_buf_item.h
- fs/xfs/xfs_da_btree.h
- fs/xfs/xfs_dir2.c
- fs/xfs/xfs_filestream.c
- fs/xfs/xfs_log.c
- fs/xfs/xfs_log_recover.c
- fs/xfs/xfs_mount.c
- fs/xfs/xfs_mru_cache.c
- fs/xfs/xfs_rename.c
- fs/xfs/xfs_trans.h
- fs/xfs/xfs_trans_item.c
- fs/xfs/xfs_vfsops.c
fs/xfs/linux-2.6/xfs_globals.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_ioctl32.c
fs/xfs/xfs_attr.c
... | ... | @@ -929,7 +929,7 @@ |
929 | 929 | * This leaf block cannot have a "remote" value, we only call this routine |
930 | 930 | * if bmap_one_block() says there is only one block (ie: no remote blks). |
931 | 931 | */ |
932 | -int | |
932 | +STATIC int | |
933 | 933 | xfs_attr_leaf_addname(xfs_da_args_t *args) |
934 | 934 | { |
935 | 935 | xfs_inode_t *dp; |
fs/xfs/xfs_bmap.c
... | ... | @@ -6393,7 +6393,7 @@ |
6393 | 6393 | * Recursively walks each level of a btree |
6394 | 6394 | * to count total fsblocks is use. |
6395 | 6395 | */ |
6396 | -int /* error */ | |
6396 | +STATIC int /* error */ | |
6397 | 6397 | xfs_bmap_count_tree( |
6398 | 6398 | xfs_mount_t *mp, /* file system mount point */ |
6399 | 6399 | xfs_trans_t *tp, /* transaction pointer */ |
... | ... | @@ -6469,7 +6469,7 @@ |
6469 | 6469 | /* |
6470 | 6470 | * Count leaf blocks given a range of extent records. |
6471 | 6471 | */ |
6472 | -int | |
6472 | +STATIC int | |
6473 | 6473 | xfs_bmap_count_leaves( |
6474 | 6474 | xfs_ifork_t *ifp, |
6475 | 6475 | xfs_extnum_t idx, |
... | ... | @@ -6489,7 +6489,7 @@ |
6489 | 6489 | * Count leaf blocks given a range of extent records originally |
6490 | 6490 | * in btree format. |
6491 | 6491 | */ |
6492 | -int | |
6492 | +STATIC int | |
6493 | 6493 | xfs_bmap_disk_count_leaves( |
6494 | 6494 | xfs_extnum_t idx, |
6495 | 6495 | xfs_bmbt_block_t *block, |
fs/xfs/xfs_bmap.h
fs/xfs/xfs_btree.h
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_da_btree.h
fs/xfs/xfs_dir2.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_log.c
... | ... | @@ -907,7 +907,7 @@ |
907 | 907 | * the tail. The details of this case are described below, but the end |
908 | 908 | * result is that we return the size of the log as the amount of space left. |
909 | 909 | */ |
910 | -int | |
910 | +STATIC int | |
911 | 911 | xlog_space_left(xlog_t *log, int cycle, int bytes) |
912 | 912 | { |
913 | 913 | int free_bytes; |
... | ... | @@ -1289,7 +1289,7 @@ |
1289 | 1289 | * pushes on an lsn which is further along in the log once we reach the high |
1290 | 1290 | * water mark. In this manner, we would be creating a low water mark. |
1291 | 1291 | */ |
1292 | -void | |
1292 | +STATIC void | |
1293 | 1293 | xlog_grant_push_ail(xfs_mount_t *mp, |
1294 | 1294 | int need_bytes) |
1295 | 1295 | { |
... | ... | @@ -1372,7 +1372,7 @@ |
1372 | 1372 | * is added immediately before calling bwrite(). |
1373 | 1373 | */ |
1374 | 1374 | |
1375 | -int | |
1375 | +STATIC int | |
1376 | 1376 | xlog_sync(xlog_t *log, |
1377 | 1377 | xlog_in_core_t *iclog) |
1378 | 1378 | { |
... | ... | @@ -1516,7 +1516,7 @@ |
1516 | 1516 | /* |
1517 | 1517 | * Deallocate a log structure |
1518 | 1518 | */ |
1519 | -void | |
1519 | +STATIC void | |
1520 | 1520 | xlog_dealloc_log(xlog_t *log) |
1521 | 1521 | { |
1522 | 1522 | xlog_in_core_t *iclog, *next_iclog; |
... | ... | @@ -1738,7 +1738,7 @@ |
1738 | 1738 | * we don't update ic_offset until the end when we know exactly how many |
1739 | 1739 | * bytes have been written out. |
1740 | 1740 | */ |
1741 | -int | |
1741 | +STATIC int | |
1742 | 1742 | xlog_write(xfs_mount_t * mp, |
1743 | 1743 | xfs_log_iovec_t reg[], |
1744 | 1744 | int nentries, |
... | ... | @@ -2280,7 +2280,7 @@ |
2280 | 2280 | * global state machine log lock. Assume that the calls to cvsema won't |
2281 | 2281 | * take a long time. At least we know it won't sleep. |
2282 | 2282 | */ |
2283 | -void | |
2283 | +STATIC void | |
2284 | 2284 | xlog_state_done_syncing( |
2285 | 2285 | xlog_in_core_t *iclog, |
2286 | 2286 | int aborted) |
... | ... | @@ -2340,7 +2340,7 @@ |
2340 | 2340 | * needs to be incremented, depending on the amount of data which |
2341 | 2341 | * is copied. |
2342 | 2342 | */ |
2343 | -int | |
2343 | +STATIC int | |
2344 | 2344 | xlog_state_get_iclog_space(xlog_t *log, |
2345 | 2345 | int len, |
2346 | 2346 | xlog_in_core_t **iclogp, |
... | ... | @@ -2776,7 +2776,7 @@ |
2776 | 2776 | /* |
2777 | 2777 | * Atomically put back used ticket. |
2778 | 2778 | */ |
2779 | -void | |
2779 | +STATIC void | |
2780 | 2780 | xlog_state_put_ticket(xlog_t *log, |
2781 | 2781 | xlog_ticket_t *tic) |
2782 | 2782 | { |
... | ... | @@ -2794,7 +2794,7 @@ |
2794 | 2794 | * |
2795 | 2795 | * |
2796 | 2796 | */ |
2797 | -int | |
2797 | +STATIC int | |
2798 | 2798 | xlog_state_release_iclog(xlog_t *log, |
2799 | 2799 | xlog_in_core_t *iclog) |
2800 | 2800 | { |
... | ... | @@ -3024,7 +3024,7 @@ |
3024 | 3024 | * If filesystem activity goes to zero, the iclog will get flushed only by |
3025 | 3025 | * bdflush(). |
3026 | 3026 | */ |
3027 | -int | |
3027 | +STATIC int | |
3028 | 3028 | xlog_state_sync(xlog_t *log, |
3029 | 3029 | xfs_lsn_t lsn, |
3030 | 3030 | uint flags, |
... | ... | @@ -3129,7 +3129,7 @@ |
3129 | 3129 | * Called when we want to mark the current iclog as being ready to sync to |
3130 | 3130 | * disk. |
3131 | 3131 | */ |
3132 | -void | |
3132 | +STATIC void | |
3133 | 3133 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) |
3134 | 3134 | { |
3135 | 3135 | spin_lock(&log->l_icloglock); |
... | ... | @@ -3241,7 +3241,7 @@ |
3241 | 3241 | /* |
3242 | 3242 | * Grab ticket off freelist or allocation some more |
3243 | 3243 | */ |
3244 | -xlog_ticket_t * | |
3244 | +STATIC xlog_ticket_t * | |
3245 | 3245 | xlog_ticket_get(xlog_t *log, |
3246 | 3246 | int unit_bytes, |
3247 | 3247 | int cnt, |
fs/xfs/xfs_log_recover.c
... | ... | @@ -293,7 +293,7 @@ |
293 | 293 | * Note that the algorithm can not be perfect because the disk will not |
294 | 294 | * necessarily be perfect. |
295 | 295 | */ |
296 | -int | |
296 | +STATIC int | |
297 | 297 | xlog_find_cycle_start( |
298 | 298 | xlog_t *log, |
299 | 299 | xfs_buf_t *bp, |
... | ... | @@ -986,7 +986,7 @@ |
986 | 986 | * -1 => use *blk_no as the first block of the log |
987 | 987 | * >0 => error has occurred |
988 | 988 | */ |
989 | -int | |
989 | +STATIC int | |
990 | 990 | xlog_find_zeroed( |
991 | 991 | xlog_t *log, |
992 | 992 | xfs_daddr_t *blk_no) |
fs/xfs/xfs_mount.c
fs/xfs/xfs_mru_cache.c
... | ... | @@ -225,10 +225,14 @@ |
225 | 225 | * list need to be deleted. For each element this involves removing it from the |
226 | 226 | * data store, removing it from the reap list, calling the client's free |
227 | 227 | * function and deleting the element from the element zone. |
228 | + * | |
229 | + * We get called holding the mru->lock, which we drop and then reacquire. | |
230 | + * Sparse need special help with this to tell it we know what we are doing. | |
228 | 231 | */ |
229 | 232 | STATIC void |
230 | 233 | _xfs_mru_cache_clear_reap_list( |
231 | - xfs_mru_cache_t *mru) | |
234 | + xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) | |
235 | + | |
232 | 236 | { |
233 | 237 | xfs_mru_cache_elem_t *elem, *next; |
234 | 238 | struct list_head tmp; |
... | ... | @@ -528,6 +532,10 @@ |
528 | 532 | * |
529 | 533 | * If the element isn't found, this function returns NULL and the spinlock is |
530 | 534 | * released. xfs_mru_cache_done() should NOT be called when this occurs. |
535 | + * | |
536 | + * Because sparse isn't smart enough to know about conditional lock return | |
537 | + * status, we need to help it get it right by annotating the path that does | |
538 | + * not release the lock. | |
531 | 539 | */ |
532 | 540 | void * |
533 | 541 | xfs_mru_cache_lookup( |
... | ... | @@ -545,8 +553,8 @@ |
545 | 553 | if (elem) { |
546 | 554 | list_del(&elem->list_node); |
547 | 555 | _xfs_mru_cache_list_insert(mru, elem); |
548 | - } | |
549 | - else | |
556 | + __release(mru_lock); /* help sparse not be stupid */ | |
557 | + } else | |
550 | 558 | spin_unlock(&mru->lock); |
551 | 559 | |
552 | 560 | return elem ? elem->value : NULL; |
... | ... | @@ -575,6 +583,8 @@ |
575 | 583 | elem = radix_tree_lookup(&mru->store, key); |
576 | 584 | if (!elem) |
577 | 585 | spin_unlock(&mru->lock); |
586 | + else | |
587 | + __release(mru_lock); /* help sparse not be stupid */ | |
578 | 588 | |
579 | 589 | return elem ? elem->value : NULL; |
580 | 590 | } |
... | ... | @@ -586,7 +596,7 @@ |
586 | 596 | */ |
587 | 597 | void |
588 | 598 | xfs_mru_cache_done( |
589 | - xfs_mru_cache_t *mru) | |
599 | + xfs_mru_cache_t *mru) __releases(mru->lock) | |
590 | 600 | { |
591 | 601 | spin_unlock(&mru->lock); |
592 | 602 | } |
fs/xfs/xfs_rename.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans_item.c
fs/xfs/xfs_vfsops.c
... | ... | @@ -61,11 +61,6 @@ |
61 | 61 | int |
62 | 62 | xfs_init(void) |
63 | 63 | { |
64 | - extern kmem_zone_t *xfs_bmap_free_item_zone; | |
65 | - extern kmem_zone_t *xfs_btree_cur_zone; | |
66 | - extern kmem_zone_t *xfs_trans_zone; | |
67 | - extern kmem_zone_t *xfs_buf_item_zone; | |
68 | - extern kmem_zone_t *xfs_dabuf_zone; | |
69 | 64 | #ifdef XFS_DABUF_DEBUG |
70 | 65 | extern spinlock_t xfs_dabuf_global_lock; |
71 | 66 | spin_lock_init(&xfs_dabuf_global_lock); |
72 | 67 | |
73 | 68 | |
... | ... | @@ -155,15 +150,9 @@ |
155 | 150 | void |
156 | 151 | xfs_cleanup(void) |
157 | 152 | { |
158 | - extern kmem_zone_t *xfs_bmap_free_item_zone; | |
159 | - extern kmem_zone_t *xfs_btree_cur_zone; | |
160 | 153 | extern kmem_zone_t *xfs_inode_zone; |
161 | - extern kmem_zone_t *xfs_trans_zone; | |
162 | - extern kmem_zone_t *xfs_da_state_zone; | |
163 | - extern kmem_zone_t *xfs_dabuf_zone; | |
164 | 154 | extern kmem_zone_t *xfs_efd_zone; |
165 | 155 | extern kmem_zone_t *xfs_efi_zone; |
166 | - extern kmem_zone_t *xfs_buf_item_zone; | |
167 | 156 | extern kmem_zone_t *xfs_icluster_zone; |
168 | 157 | |
169 | 158 | xfs_cleanup_procfs(); |