Commit 800deef3f6f87fee3a2e89cf7237a1f20c1a78d7
Committed by
Mark Fasheh
1 parent
e6df3a663a
Exists in
master
and in
7 other branches
[PATCH] ocfs2: use list_for_each_entry where benefical
Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Showing 6 changed files with 47 additions and 105 deletions Side-by-side Diff
fs/ocfs2/cluster/tcp.c
... | ... | @@ -261,14 +261,12 @@ |
261 | 261 | |
262 | 262 | static void o2net_complete_nodes_nsw(struct o2net_node *nn) |
263 | 263 | { |
264 | - struct list_head *iter, *tmp; | |
264 | + struct o2net_status_wait *nsw, *tmp; | |
265 | 265 | unsigned int num_kills = 0; |
266 | - struct o2net_status_wait *nsw; | |
267 | 266 | |
268 | 267 | assert_spin_locked(&nn->nn_lock); |
269 | 268 | |
270 | - list_for_each_safe(iter, tmp, &nn->nn_status_list) { | |
271 | - nsw = list_entry(iter, struct o2net_status_wait, ns_node_item); | |
269 | + list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) { | |
272 | 270 | o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0); |
273 | 271 | num_kills++; |
274 | 272 | } |
275 | 273 | |
... | ... | @@ -764,13 +762,10 @@ |
764 | 762 | |
765 | 763 | void o2net_unregister_handler_list(struct list_head *list) |
766 | 764 | { |
767 | - struct list_head *pos, *n; | |
768 | - struct o2net_msg_handler *nmh; | |
765 | + struct o2net_msg_handler *nmh, *n; | |
769 | 766 | |
770 | 767 | write_lock(&o2net_handler_lock); |
771 | - list_for_each_safe(pos, n, list) { | |
772 | - nmh = list_entry(pos, struct o2net_msg_handler, | |
773 | - nh_unregister_item); | |
768 | + list_for_each_entry_safe(nmh, n, list, nh_unregister_item) { | |
774 | 769 | mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n", |
775 | 770 | nmh->nh_func, nmh->nh_msg_type, nmh->nh_key); |
776 | 771 | rb_erase(&nmh->nh_node, &o2net_handler_tree); |
fs/ocfs2/dlm/dlmmaster.c
... | ... | @@ -192,25 +192,20 @@ |
192 | 192 | static void dlm_dump_mles(struct dlm_ctxt *dlm) |
193 | 193 | { |
194 | 194 | struct dlm_master_list_entry *mle; |
195 | - struct list_head *iter; | |
196 | 195 | |
197 | 196 | mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name); |
198 | 197 | spin_lock(&dlm->master_lock); |
199 | - list_for_each(iter, &dlm->master_list) { | |
200 | - mle = list_entry(iter, struct dlm_master_list_entry, list); | |
198 | + list_for_each_entry(mle, &dlm->master_list, list) | |
201 | 199 | dlm_print_one_mle(mle); |
202 | - } | |
203 | 200 | spin_unlock(&dlm->master_lock); |
204 | 201 | } |
205 | 202 | |
206 | 203 | int dlm_dump_all_mles(const char __user *data, unsigned int len) |
207 | 204 | { |
208 | - struct list_head *iter; | |
209 | 205 | struct dlm_ctxt *dlm; |
210 | 206 | |
211 | 207 | spin_lock(&dlm_domain_lock); |
212 | - list_for_each(iter, &dlm_domains) { | |
213 | - dlm = list_entry (iter, struct dlm_ctxt, list); | |
208 | + list_for_each_entry(dlm, &dlm_domains, list) { | |
214 | 209 | mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name); |
215 | 210 | dlm_dump_mles(dlm); |
216 | 211 | } |
217 | 212 | |
... | ... | @@ -454,12 +449,10 @@ |
454 | 449 | char *name, unsigned int namelen) |
455 | 450 | { |
456 | 451 | struct dlm_master_list_entry *tmpmle; |
457 | - struct list_head *iter; | |
458 | 452 | |
459 | 453 | assert_spin_locked(&dlm->master_lock); |
460 | 454 | |
461 | - list_for_each(iter, &dlm->master_list) { | |
462 | - tmpmle = list_entry(iter, struct dlm_master_list_entry, list); | |
455 | + list_for_each_entry(tmpmle, &dlm->master_list, list) { | |
463 | 456 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) |
464 | 457 | continue; |
465 | 458 | dlm_get_mle(tmpmle); |
466 | 459 | |
... | ... | @@ -472,13 +465,10 @@ |
472 | 465 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) |
473 | 466 | { |
474 | 467 | struct dlm_master_list_entry *mle; |
475 | - struct list_head *iter; | |
476 | 468 | |
477 | 469 | assert_spin_locked(&dlm->spinlock); |
478 | 470 | |
479 | - list_for_each(iter, &dlm->mle_hb_events) { | |
480 | - mle = list_entry(iter, struct dlm_master_list_entry, | |
481 | - hb_events); | |
471 | + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { | |
482 | 472 | if (node_up) |
483 | 473 | dlm_mle_node_up(dlm, mle, NULL, idx); |
484 | 474 | else |
... | ... | @@ -2434,7 +2424,7 @@ |
2434 | 2424 | int ret; |
2435 | 2425 | int i; |
2436 | 2426 | int count = 0; |
2437 | - struct list_head *queue, *iter; | |
2427 | + struct list_head *queue; | |
2438 | 2428 | struct dlm_lock *lock; |
2439 | 2429 | |
2440 | 2430 | assert_spin_locked(&res->spinlock); |
... | ... | @@ -2453,8 +2443,7 @@ |
2453 | 2443 | ret = 0; |
2454 | 2444 | queue = &res->granted; |
2455 | 2445 | for (i = 0; i < 3; i++) { |
2456 | - list_for_each(iter, queue) { | |
2457 | - lock = list_entry(iter, struct dlm_lock, list); | |
2446 | + list_for_each_entry(lock, queue, list) { | |
2458 | 2447 | ++count; |
2459 | 2448 | if (lock->ml.node == dlm->node_num) { |
2460 | 2449 | mlog(0, "found a lock owned by this node still " |
2461 | 2450 | |
2462 | 2451 | |
... | ... | @@ -2923,18 +2912,16 @@ |
2923 | 2912 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, |
2924 | 2913 | struct dlm_lock_resource *res) |
2925 | 2914 | { |
2926 | - struct list_head *iter, *iter2; | |
2927 | 2915 | struct list_head *queue = &res->granted; |
2928 | 2916 | int i, bit; |
2929 | - struct dlm_lock *lock; | |
2917 | + struct dlm_lock *lock, *next; | |
2930 | 2918 | |
2931 | 2919 | assert_spin_locked(&res->spinlock); |
2932 | 2920 | |
2933 | 2921 | BUG_ON(res->owner == dlm->node_num); |
2934 | 2922 | |
2935 | 2923 | for (i=0; i<3; i++) { |
2936 | - list_for_each_safe(iter, iter2, queue) { | |
2937 | - lock = list_entry (iter, struct dlm_lock, list); | |
2924 | + list_for_each_entry_safe(lock, next, queue, list) { | |
2938 | 2925 | if (lock->ml.node != dlm->node_num) { |
2939 | 2926 | mlog(0, "putting lock for node %u\n", |
2940 | 2927 | lock->ml.node); |
... | ... | @@ -2976,7 +2963,6 @@ |
2976 | 2963 | { |
2977 | 2964 | int i; |
2978 | 2965 | struct list_head *queue = &res->granted; |
2979 | - struct list_head *iter; | |
2980 | 2966 | struct dlm_lock *lock; |
2981 | 2967 | int nodenum; |
2982 | 2968 | |
2983 | 2969 | |
... | ... | @@ -2984,10 +2970,9 @@ |
2984 | 2970 | |
2985 | 2971 | spin_lock(&res->spinlock); |
2986 | 2972 | for (i=0; i<3; i++) { |
2987 | - list_for_each(iter, queue) { | |
2973 | + list_for_each_entry(lock, queue, list) { | |
2988 | 2974 | /* up to the caller to make sure this node |
2989 | 2975 | * is alive */ |
2990 | - lock = list_entry (iter, struct dlm_lock, list); | |
2991 | 2976 | if (lock->ml.node != dlm->node_num) { |
2992 | 2977 | spin_unlock(&res->spinlock); |
2993 | 2978 | return lock->ml.node; |
... | ... | @@ -3234,8 +3219,7 @@ |
3234 | 3219 | |
3235 | 3220 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) |
3236 | 3221 | { |
3237 | - struct list_head *iter, *iter2; | |
3238 | - struct dlm_master_list_entry *mle; | |
3222 | + struct dlm_master_list_entry *mle, *next; | |
3239 | 3223 | struct dlm_lock_resource *res; |
3240 | 3224 | unsigned int hash; |
3241 | 3225 | |
... | ... | @@ -3245,9 +3229,7 @@ |
3245 | 3229 | |
3246 | 3230 | /* clean the master list */ |
3247 | 3231 | spin_lock(&dlm->master_lock); |
3248 | - list_for_each_safe(iter, iter2, &dlm->master_list) { | |
3249 | - mle = list_entry(iter, struct dlm_master_list_entry, list); | |
3250 | - | |
3232 | + list_for_each_entry_safe(mle, next, &dlm->master_list, list) { | |
3251 | 3233 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
3252 | 3234 | mle->type != DLM_MLE_MASTER && |
3253 | 3235 | mle->type != DLM_MLE_MIGRATION); |
fs/ocfs2/dlm/dlmrecovery.c
... | ... | @@ -158,8 +158,7 @@ |
158 | 158 | struct dlm_ctxt *dlm = |
159 | 159 | container_of(work, struct dlm_ctxt, dispatched_work); |
160 | 160 | LIST_HEAD(tmp_list); |
161 | - struct list_head *iter, *iter2; | |
162 | - struct dlm_work_item *item; | |
161 | + struct dlm_work_item *item, *next; | |
163 | 162 | dlm_workfunc_t *workfunc; |
164 | 163 | int tot=0; |
165 | 164 | |
166 | 165 | |
... | ... | @@ -167,13 +166,12 @@ |
167 | 166 | list_splice_init(&dlm->work_list, &tmp_list); |
168 | 167 | spin_unlock(&dlm->work_lock); |
169 | 168 | |
170 | - list_for_each_safe(iter, iter2, &tmp_list) { | |
169 | + list_for_each_entry(item, &tmp_list, list) { | |
171 | 170 | tot++; |
172 | 171 | } |
173 | 172 | mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); |
174 | 173 | |
175 | - list_for_each_safe(iter, iter2, &tmp_list) { | |
176 | - item = list_entry(iter, struct dlm_work_item, list); | |
174 | + list_for_each_entry_safe(item, next, &tmp_list, list) { | |
177 | 175 | workfunc = item->func; |
178 | 176 | list_del_init(&item->list); |
179 | 177 | |
... | ... | @@ -549,7 +547,6 @@ |
549 | 547 | { |
550 | 548 | int status = 0; |
551 | 549 | struct dlm_reco_node_data *ndata; |
552 | - struct list_head *iter; | |
553 | 550 | int all_nodes_done; |
554 | 551 | int destroy = 0; |
555 | 552 | int pass = 0; |
... | ... | @@ -567,8 +564,7 @@ |
567 | 564 | |
568 | 565 | /* safe to access the node data list without a lock, since this |
569 | 566 | * process is the only one to change the list */ |
570 | - list_for_each(iter, &dlm->reco.node_data) { | |
571 | - ndata = list_entry (iter, struct dlm_reco_node_data, list); | |
567 | + list_for_each_entry(ndata, &dlm->reco.node_data, list) { | |
572 | 568 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); |
573 | 569 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; |
574 | 570 | |
... | ... | @@ -655,9 +651,7 @@ |
655 | 651 | * done, or if anyone died */ |
656 | 652 | all_nodes_done = 1; |
657 | 653 | spin_lock(&dlm_reco_state_lock); |
658 | - list_for_each(iter, &dlm->reco.node_data) { | |
659 | - ndata = list_entry (iter, struct dlm_reco_node_data, list); | |
660 | - | |
654 | + list_for_each_entry(ndata, &dlm->reco.node_data, list) { | |
661 | 655 | mlog(0, "checking recovery state of node %u\n", |
662 | 656 | ndata->node_num); |
663 | 657 | switch (ndata->state) { |
664 | 658 | |
... | ... | @@ -774,16 +768,14 @@ |
774 | 768 | |
775 | 769 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) |
776 | 770 | { |
777 | - struct list_head *iter, *iter2; | |
778 | - struct dlm_reco_node_data *ndata; | |
771 | + struct dlm_reco_node_data *ndata, *next; | |
779 | 772 | LIST_HEAD(tmplist); |
780 | 773 | |
781 | 774 | spin_lock(&dlm_reco_state_lock); |
782 | 775 | list_splice_init(&dlm->reco.node_data, &tmplist); |
783 | 776 | spin_unlock(&dlm_reco_state_lock); |
784 | 777 | |
785 | - list_for_each_safe(iter, iter2, &tmplist) { | |
786 | - ndata = list_entry (iter, struct dlm_reco_node_data, list); | |
778 | + list_for_each_entry_safe(ndata, next, &tmplist, list) { | |
787 | 779 | list_del_init(&ndata->list); |
788 | 780 | kfree(ndata); |
789 | 781 | } |
... | ... | @@ -876,7 +868,6 @@ |
876 | 868 | struct dlm_lock_resource *res; |
877 | 869 | struct dlm_ctxt *dlm; |
878 | 870 | LIST_HEAD(resources); |
879 | - struct list_head *iter; | |
880 | 871 | int ret; |
881 | 872 | u8 dead_node, reco_master; |
882 | 873 | int skip_all_done = 0; |
... | ... | @@ -920,8 +911,7 @@ |
920 | 911 | |
921 | 912 | /* any errors returned will be due to the new_master dying, |
922 | 913 | * the dlm_reco_thread should detect this */ |
923 | - list_for_each(iter, &resources) { | |
924 | - res = list_entry (iter, struct dlm_lock_resource, recovering); | |
914 | + list_for_each_entry(res, &resources, recovering) { | |
925 | 915 | ret = dlm_send_one_lockres(dlm, res, mres, reco_master, |
926 | 916 | DLM_MRES_RECOVERY); |
927 | 917 | if (ret < 0) { |
... | ... | @@ -983,7 +973,6 @@ |
983 | 973 | { |
984 | 974 | struct dlm_ctxt *dlm = data; |
985 | 975 | struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; |
986 | - struct list_head *iter; | |
987 | 976 | struct dlm_reco_node_data *ndata = NULL; |
988 | 977 | int ret = -EINVAL; |
989 | 978 | |
... | ... | @@ -1000,8 +989,7 @@ |
1000 | 989 | dlm->reco.dead_node, done->node_idx, dlm->node_num); |
1001 | 990 | |
1002 | 991 | spin_lock(&dlm_reco_state_lock); |
1003 | - list_for_each(iter, &dlm->reco.node_data) { | |
1004 | - ndata = list_entry (iter, struct dlm_reco_node_data, list); | |
992 | + list_for_each_entry(ndata, &dlm->reco.node_data, list) { | |
1005 | 993 | if (ndata->node_num != done->node_idx) |
1006 | 994 | continue; |
1007 | 995 | |
1008 | 996 | |
... | ... | @@ -1049,13 +1037,11 @@ |
1049 | 1037 | struct list_head *list, |
1050 | 1038 | u8 dead_node) |
1051 | 1039 | { |
1052 | - struct dlm_lock_resource *res; | |
1053 | - struct list_head *iter, *iter2; | |
1040 | + struct dlm_lock_resource *res, *next; | |
1054 | 1041 | struct dlm_lock *lock; |
1055 | 1042 | |
1056 | 1043 | spin_lock(&dlm->spinlock); |
1057 | - list_for_each_safe(iter, iter2, &dlm->reco.resources) { | |
1058 | - res = list_entry (iter, struct dlm_lock_resource, recovering); | |
1044 | + list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | |
1059 | 1045 | /* always prune any $RECOVERY entries for dead nodes, |
1060 | 1046 | * otherwise hangs can occur during later recovery */ |
1061 | 1047 | if (dlm_is_recovery_lock(res->lockname.name, |
... | ... | @@ -1252,7 +1238,7 @@ |
1252 | 1238 | struct dlm_migratable_lockres *mres, |
1253 | 1239 | u8 send_to, u8 flags) |
1254 | 1240 | { |
1255 | - struct list_head *queue, *iter; | |
1241 | + struct list_head *queue; | |
1256 | 1242 | int total_locks, i; |
1257 | 1243 | u64 mig_cookie = 0; |
1258 | 1244 | struct dlm_lock *lock; |
... | ... | @@ -1278,9 +1264,7 @@ |
1278 | 1264 | total_locks = 0; |
1279 | 1265 | for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { |
1280 | 1266 | queue = dlm_list_idx_to_ptr(res, i); |
1281 | - list_for_each(iter, queue) { | |
1282 | - lock = list_entry (iter, struct dlm_lock, list); | |
1283 | - | |
1267 | + list_for_each_entry(lock, queue, list) { | |
1284 | 1268 | /* add another lock. */ |
1285 | 1269 | total_locks++; |
1286 | 1270 | if (!dlm_add_lock_to_array(lock, mres, i)) |
... | ... | @@ -1717,7 +1701,6 @@ |
1717 | 1701 | struct dlm_lockstatus *lksb = NULL; |
1718 | 1702 | int ret = 0; |
1719 | 1703 | int i, j, bad; |
1720 | - struct list_head *iter; | |
1721 | 1704 | struct dlm_lock *lock = NULL; |
1722 | 1705 | u8 from = O2NM_MAX_NODES; |
1723 | 1706 | unsigned int added = 0; |
... | ... | @@ -1755,8 +1738,7 @@ |
1755 | 1738 | spin_lock(&res->spinlock); |
1756 | 1739 | for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { |
1757 | 1740 | tmpq = dlm_list_idx_to_ptr(res, j); |
1758 | - list_for_each(iter, tmpq) { | |
1759 | - lock = list_entry (iter, struct dlm_lock, list); | |
1741 | + list_for_each_entry(lock, tmpq, list) { | |
1760 | 1742 | if (lock->ml.cookie != ml->cookie) |
1761 | 1743 | lock = NULL; |
1762 | 1744 | else |
... | ... | @@ -1930,8 +1912,8 @@ |
1930 | 1912 | struct dlm_lock_resource *res) |
1931 | 1913 | { |
1932 | 1914 | int i; |
1933 | - struct list_head *queue, *iter, *iter2; | |
1934 | - struct dlm_lock *lock; | |
1915 | + struct list_head *queue; | |
1916 | + struct dlm_lock *lock, *next; | |
1935 | 1917 | |
1936 | 1918 | res->state |= DLM_LOCK_RES_RECOVERING; |
1937 | 1919 | if (!list_empty(&res->recovering)) { |
... | ... | @@ -1947,8 +1929,7 @@ |
1947 | 1929 | /* find any pending locks and put them back on proper list */ |
1948 | 1930 | for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { |
1949 | 1931 | queue = dlm_list_idx_to_ptr(res, i); |
1950 | - list_for_each_safe(iter, iter2, queue) { | |
1951 | - lock = list_entry (iter, struct dlm_lock, list); | |
1932 | + list_for_each_entry_safe(lock, next, queue, list) { | |
1952 | 1933 | dlm_lock_get(lock); |
1953 | 1934 | if (lock->convert_pending) { |
1954 | 1935 | /* move converting lock back to granted */ |
1955 | 1936 | |
1956 | 1937 | |
1957 | 1938 | |
... | ... | @@ -2013,18 +1994,15 @@ |
2013 | 1994 | u8 dead_node, u8 new_master) |
2014 | 1995 | { |
2015 | 1996 | int i; |
2016 | - struct list_head *iter, *iter2; | |
2017 | 1997 | struct hlist_node *hash_iter; |
2018 | 1998 | struct hlist_head *bucket; |
1999 | + struct dlm_lock_resource *res, *next; | |
2019 | 2000 | |
2020 | - struct dlm_lock_resource *res; | |
2021 | - | |
2022 | 2001 | mlog_entry_void(); |
2023 | 2002 | |
2024 | 2003 | assert_spin_locked(&dlm->spinlock); |
2025 | 2004 | |
2026 | - list_for_each_safe(iter, iter2, &dlm->reco.resources) { | |
2027 | - res = list_entry (iter, struct dlm_lock_resource, recovering); | |
2005 | + list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | |
2028 | 2006 | if (res->owner == dead_node) { |
2029 | 2007 | list_del_init(&res->recovering); |
2030 | 2008 | spin_lock(&res->spinlock); |
... | ... | @@ -2099,7 +2077,7 @@ |
2099 | 2077 | static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, |
2100 | 2078 | struct dlm_lock_resource *res, u8 dead_node) |
2101 | 2079 | { |
2102 | - struct list_head *iter, *queue; | |
2080 | + struct list_head *queue; | |
2103 | 2081 | struct dlm_lock *lock; |
2104 | 2082 | int blank_lvb = 0, local = 0; |
2105 | 2083 | int i; |
... | ... | @@ -2121,8 +2099,7 @@ |
2121 | 2099 | |
2122 | 2100 | for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { |
2123 | 2101 | queue = dlm_list_idx_to_ptr(res, i); |
2124 | - list_for_each(iter, queue) { | |
2125 | - lock = list_entry (iter, struct dlm_lock, list); | |
2102 | + list_for_each_entry(lock, queue, list) { | |
2126 | 2103 | if (lock->ml.node == search_node) { |
2127 | 2104 | if (dlm_lvb_needs_invalidation(lock, local)) { |
2128 | 2105 | /* zero the lksb lvb and lockres lvb */ |
... | ... | @@ -2143,8 +2120,7 @@ |
2143 | 2120 | static void dlm_free_dead_locks(struct dlm_ctxt *dlm, |
2144 | 2121 | struct dlm_lock_resource *res, u8 dead_node) |
2145 | 2122 | { |
2146 | - struct list_head *iter, *tmpiter; | |
2147 | - struct dlm_lock *lock; | |
2123 | + struct dlm_lock *lock, *next; | |
2148 | 2124 | unsigned int freed = 0; |
2149 | 2125 | |
2150 | 2126 | /* this node is the lockres master: |
2151 | 2127 | |
2152 | 2128 | |
... | ... | @@ -2155,24 +2131,21 @@ |
2155 | 2131 | assert_spin_locked(&res->spinlock); |
2156 | 2132 | |
2157 | 2133 | /* TODO: check pending_asts, pending_basts here */ |
2158 | - list_for_each_safe(iter, tmpiter, &res->granted) { | |
2159 | - lock = list_entry (iter, struct dlm_lock, list); | |
2134 | + list_for_each_entry_safe(lock, next, &res->granted, list) { | |
2160 | 2135 | if (lock->ml.node == dead_node) { |
2161 | 2136 | list_del_init(&lock->list); |
2162 | 2137 | dlm_lock_put(lock); |
2163 | 2138 | freed++; |
2164 | 2139 | } |
2165 | 2140 | } |
2166 | - list_for_each_safe(iter, tmpiter, &res->converting) { | |
2167 | - lock = list_entry (iter, struct dlm_lock, list); | |
2141 | + list_for_each_entry_safe(lock, next, &res->converting, list) { | |
2168 | 2142 | if (lock->ml.node == dead_node) { |
2169 | 2143 | list_del_init(&lock->list); |
2170 | 2144 | dlm_lock_put(lock); |
2171 | 2145 | freed++; |
2172 | 2146 | } |
2173 | 2147 | } |
2174 | - list_for_each_safe(iter, tmpiter, &res->blocked) { | |
2175 | - lock = list_entry (iter, struct dlm_lock, list); | |
2148 | + list_for_each_entry_safe(lock, next, &res->blocked, list) { | |
2176 | 2149 | if (lock->ml.node == dead_node) { |
2177 | 2150 | list_del_init(&lock->list); |
2178 | 2151 | dlm_lock_put(lock); |
fs/ocfs2/dlmglue.c
... | ... | @@ -600,15 +600,13 @@ |
600 | 600 | static void lockres_set_flags(struct ocfs2_lock_res *lockres, |
601 | 601 | unsigned long newflags) |
602 | 602 | { |
603 | - struct list_head *pos, *tmp; | |
604 | - struct ocfs2_mask_waiter *mw; | |
603 | + struct ocfs2_mask_waiter *mw, *tmp; | |
605 | 604 | |
606 | 605 | assert_spin_locked(&lockres->l_lock); |
607 | 606 | |
608 | 607 | lockres->l_flags = newflags; |
609 | 608 | |
610 | - list_for_each_safe(pos, tmp, &lockres->l_mask_waiters) { | |
611 | - mw = list_entry(pos, struct ocfs2_mask_waiter, mw_item); | |
609 | + list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) { | |
612 | 610 | if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) |
613 | 611 | continue; |
614 | 612 |
fs/ocfs2/extent_map.c
... | ... | @@ -109,17 +109,14 @@ |
109 | 109 | */ |
110 | 110 | void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos) |
111 | 111 | { |
112 | - struct list_head *p, *n; | |
113 | - struct ocfs2_extent_map_item *emi; | |
112 | + struct ocfs2_extent_map_item *emi, *n; | |
114 | 113 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
115 | 114 | struct ocfs2_extent_map *em = &oi->ip_extent_map; |
116 | 115 | LIST_HEAD(tmp_list); |
117 | 116 | unsigned int range; |
118 | 117 | |
119 | 118 | spin_lock(&oi->ip_lock); |
120 | - list_for_each_safe(p, n, &em->em_list) { | |
121 | - emi = list_entry(p, struct ocfs2_extent_map_item, ei_list); | |
122 | - | |
119 | + list_for_each_entry_safe(emi, n, &em->em_list, ei_list) { | |
123 | 120 | if (emi->ei_cpos >= cpos) { |
124 | 121 | /* Full truncate of this record. */ |
125 | 122 | list_move(&emi->ei_list, &tmp_list); |
... | ... | @@ -136,8 +133,7 @@ |
136 | 133 | } |
137 | 134 | spin_unlock(&oi->ip_lock); |
138 | 135 | |
139 | - list_for_each_safe(p, n, &tmp_list) { | |
140 | - emi = list_entry(p, struct ocfs2_extent_map_item, ei_list); | |
136 | + list_for_each_entry_safe(emi, n, &tmp_list, ei_list) { | |
141 | 137 | list_del(&emi->ei_list); |
142 | 138 | kfree(emi); |
143 | 139 | } |
fs/ocfs2/journal.c
... | ... | @@ -722,8 +722,7 @@ |
722 | 722 | container_of(work, struct ocfs2_journal, j_recovery_work); |
723 | 723 | struct ocfs2_super *osb = journal->j_osb; |
724 | 724 | struct ocfs2_dinode *la_dinode, *tl_dinode; |
725 | - struct ocfs2_la_recovery_item *item; | |
726 | - struct list_head *p, *n; | |
725 | + struct ocfs2_la_recovery_item *item, *n; | |
727 | 726 | LIST_HEAD(tmp_la_list); |
728 | 727 | |
729 | 728 | mlog_entry_void(); |
... | ... | @@ -734,8 +733,7 @@ |
734 | 733 | list_splice_init(&journal->j_la_cleanups, &tmp_la_list); |
735 | 734 | spin_unlock(&journal->j_lock); |
736 | 735 | |
737 | - list_for_each_safe(p, n, &tmp_la_list) { | |
738 | - item = list_entry(p, struct ocfs2_la_recovery_item, lri_list); | |
736 | + list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { | |
739 | 737 | list_del_init(&item->lri_list); |
740 | 738 | |
741 | 739 | mlog(0, "Complete recovery for slot %d\n", item->lri_slot); |