Commit f116629d03655adaf7832b93b03c99391d09d4a7

Authored by Akinobu Mita
Committed by Linus Torvalds
1 parent 179e09172a

[PATCH] fs: use list_move()

This patch converts the combination of list_del(A) and list_add(A, B) to
list_move(A, B) under fs/.

Cc: Ian Kent <raven@themaw.net>
Acked-by: Joel Becker <joel.becker@oracle.com>
Cc: Neil Brown <neilb@cse.unsw.edu.au>
Cc: Hans Reiser <reiserfs-dev@namesys.com>
Cc: Urban Widmark <urban@teststation.com>
Acked-by: David Howells <dhowells@redhat.com>
Acked-by: Mark Fasheh <mark.fasheh@oracle.com>
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 22 changed files with 38 additions and 76 deletions Side-by-side Diff

... ... @@ -413,8 +413,7 @@
413 413  
414 414 /* we found it in the graveyard - resurrect it */
415 415 found_dead_server:
416   - list_del(&server->link);
417   - list_add_tail(&server->link, &cell->sv_list);
  416 + list_move_tail(&server->link, &cell->sv_list);
418 417 afs_get_server(server);
419 418 afs_kafstimod_del_timer(&server->timeout);
420 419 spin_unlock(&cell->sv_gylock);
... ... @@ -136,8 +136,7 @@
136 136 if (!list_empty(&kafsasyncd_async_attnq)) {
137 137 op = list_entry(kafsasyncd_async_attnq.next,
138 138 struct afs_async_op, link);
139   - list_del(&op->link);
140   - list_add_tail(&op->link,
  139 + list_move_tail(&op->link,
141 140 &kafsasyncd_async_busyq);
142 141 }
143 142  
... ... @@ -204,8 +203,7 @@
204 203 init_waitqueue_entry(&op->waiter, kafsasyncd_task);
205 204 add_wait_queue(&op->call->waitq, &op->waiter);
206 205  
207   - list_del(&op->link);
208   - list_add_tail(&op->link, &kafsasyncd_async_busyq);
  206 + list_move_tail(&op->link, &kafsasyncd_async_busyq);
209 207  
210 208 spin_unlock(&kafsasyncd_async_lock);
211 209  
... ... @@ -223,8 +221,7 @@
223 221  
224 222 spin_lock(&kafsasyncd_async_lock);
225 223  
226   - list_del(&op->link);
227   - list_add_tail(&op->link, &kafsasyncd_async_attnq);
  224 + list_move_tail(&op->link, &kafsasyncd_async_attnq);
228 225  
229 226 spin_unlock(&kafsasyncd_async_lock);
230 227  
... ... @@ -123,8 +123,7 @@
123 123 resurrect_server:
124 124 _debug("resurrecting server");
125 125  
126   - list_del(&zombie->link);
127   - list_add_tail(&zombie->link, &cell->sv_list);
  126 + list_move_tail(&zombie->link, &cell->sv_list);
128 127 afs_get_server(zombie);
129 128 afs_kafstimod_del_timer(&zombie->timeout);
130 129 spin_unlock(&cell->sv_gylock);
... ... @@ -168,8 +167,7 @@
168 167 }
169 168  
170 169 spin_lock(&cell->sv_gylock);
171   - list_del(&server->link);
172   - list_add_tail(&server->link, &cell->sv_graveyard);
  170 + list_move_tail(&server->link, &cell->sv_graveyard);
173 171  
174 172 /* time out in 10 secs */
175 173 afs_kafstimod_add_timer(&server->timeout, 10 * HZ);
... ... @@ -326,8 +326,7 @@
326 326 /* found in the graveyard - resurrect */
327 327 _debug("found in graveyard");
328 328 atomic_inc(&vlocation->usage);
329   - list_del(&vlocation->link);
330   - list_add_tail(&vlocation->link, &cell->vl_list);
  329 + list_move_tail(&vlocation->link, &cell->vl_list);
331 330 spin_unlock(&cell->vl_gylock);
332 331  
333 332 afs_kafstimod_del_timer(&vlocation->timeout);
... ... @@ -478,8 +477,7 @@
478 477 }
479 478  
480 479 /* move to graveyard queue */
481   - list_del(&vlocation->link);
482   - list_add_tail(&vlocation->link,&cell->vl_graveyard);
  480 + list_move_tail(&vlocation->link,&cell->vl_graveyard);
483 481  
484 482 /* remove from pending timeout queue (refcounted if actually being
485 483 * updated) */
... ... @@ -104,8 +104,7 @@
104 104 vnode->cb_expiry * HZ);
105 105  
106 106 spin_lock(&afs_cb_hash_lock);
107   - list_del(&vnode->cb_hash_link);
108   - list_add_tail(&vnode->cb_hash_link,
  107 + list_move_tail(&vnode->cb_hash_link,
109 108 &afs_cb_hash(server, &vnode->fid));
110 109 spin_unlock(&afs_cb_hash_lock);
111 110  
... ... @@ -376,8 +376,7 @@
376 376 DPRINTK("returning %p %.*s",
377 377 expired, (int)expired->d_name.len, expired->d_name.name);
378 378 spin_lock(&dcache_lock);
379   - list_del(&expired->d_parent->d_subdirs);
380   - list_add(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
  379 + list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
381 380 spin_unlock(&dcache_lock);
382 381 return expired;
383 382 }
... ... @@ -1009,8 +1009,7 @@
1009 1009 /* fallthrough */
1010 1010 default:
1011 1011 if (filp->f_pos == 2) {
1012   - list_del(q);
1013   - list_add(q, &parent_sd->s_children);
  1012 + list_move(q, &parent_sd->s_children);
1014 1013 }
1015 1014 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
1016 1015 struct configfs_dirent *next;
... ... @@ -1033,8 +1032,7 @@
1033 1032 dt_type(next)) < 0)
1034 1033 return 0;
1035 1034  
1036   - list_del(q);
1037   - list_add(q, p);
  1035 + list_move(q, p);
1038 1036 p = q;
1039 1037 filp->f_pos++;
1040 1038 }
... ... @@ -53,8 +53,7 @@
53 53 if (!instr) {
54 54 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
55 55 spin_lock(&c->erase_completion_lock);
56   - list_del(&jeb->list);
57   - list_add(&jeb->list, &c->erase_pending_list);
  56 + list_move(&jeb->list, &c->erase_pending_list);
58 57 c->erasing_size -= c->sector_size;
59 58 c->dirty_size += c->sector_size;
60 59 jeb->dirty_size = c->sector_size;
... ... @@ -86,8 +85,7 @@
86 85 /* Erase failed immediately. Refile it on the list */
87 86 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
88 87 spin_lock(&c->erase_completion_lock);
89   - list_del(&jeb->list);
90   - list_add(&jeb->list, &c->erase_pending_list);
  88 + list_move(&jeb->list, &c->erase_pending_list);
91 89 c->erasing_size -= c->sector_size;
92 90 c->dirty_size += c->sector_size;
93 91 jeb->dirty_size = c->sector_size;
... ... @@ -161,8 +159,7 @@
161 159 {
162 160 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
163 161 spin_lock(&c->erase_completion_lock);
164   - list_del(&jeb->list);
165   - list_add_tail(&jeb->list, &c->erase_complete_list);
  162 + list_move_tail(&jeb->list, &c->erase_complete_list);
166 163 spin_unlock(&c->erase_completion_lock);
167 164 /* Ensure that kupdated calls us again to mark them clean */
168 165 jffs2_erase_pending_trigger(c);
... ... @@ -178,8 +175,7 @@
178 175 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
179 176 /* We'd like to give this block another try. */
180 177 spin_lock(&c->erase_completion_lock);
181   - list_del(&jeb->list);
182   - list_add(&jeb->list, &c->erase_pending_list);
  178 + list_move(&jeb->list, &c->erase_pending_list);
183 179 c->erasing_size -= c->sector_size;
184 180 c->dirty_size += c->sector_size;
185 181 jeb->dirty_size = c->sector_size;
... ... @@ -191,8 +187,7 @@
191 187 spin_lock(&c->erase_completion_lock);
192 188 c->erasing_size -= c->sector_size;
193 189 c->bad_size += c->sector_size;
194   - list_del(&jeb->list);
195   - list_add(&jeb->list, &c->bad_list);
  190 + list_move(&jeb->list, &c->bad_list);
196 191 c->nr_erasing_blocks--;
197 192 spin_unlock(&c->erase_completion_lock);
198 193 wake_up(&c->erase_wait);
... ... @@ -211,8 +211,7 @@
211 211 struct jffs2_eraseblock *ejeb;
212 212  
213 213 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
214   - list_del(&ejeb->list);
215   - list_add_tail(&ejeb->list, &c->erase_pending_list);
  214 + list_move_tail(&ejeb->list, &c->erase_pending_list);
216 215 c->nr_erasing_blocks++;
217 216 jffs2_erase_pending_trigger(c);
218 217 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
... ... @@ -495,8 +495,7 @@
495 495 /* Fix up the original jeb now it's on the bad_list */
496 496 if (first_raw == jeb->first_node) {
497 497 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
498   - list_del(&jeb->list);
499   - list_add(&jeb->list, &c->erase_pending_list);
  498 + list_move(&jeb->list, &c->erase_pending_list);
500 499 c->nr_erasing_blocks++;
501 500 jffs2_erase_pending_trigger(c);
502 501 }
... ... @@ -529,8 +529,7 @@
529 529  
530 530 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
531 531 list_del_init(&clp->cl_strhash);
532   - list_del_init(&clp->cl_idhash);
533   - list_add(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
  532 + list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
534 533 strhashval = clientstr_hashval(clp->cl_recdir);
535 534 list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
536 535 renew_client(clp);
... ... @@ -103,8 +103,7 @@
103 103 static void
104 104 lru_put_end(struct svc_cacherep *rp)
105 105 {
106   - list_del(&rp->c_lru);
107   - list_add_tail(&rp->c_lru, &lru_head);
  106 + list_move_tail(&rp->c_lru, &lru_head);
108 107 }
109 108  
110 109 /*
fs/ocfs2/dlm/dlmast.c
... ... @@ -381,8 +381,7 @@
381 381 ret = DLM_NORMAL;
382 382 if (past->type == DLM_AST) {
383 383 /* do not alter lock refcount. switching lists. */
384   - list_del_init(&lock->list);
385   - list_add_tail(&lock->list, &res->granted);
  384 + list_move_tail(&lock->list, &res->granted);
386 385 mlog(0, "ast: adding to granted list... type=%d, "
387 386 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
388 387 if (lock->ml.convert_type != LKM_IVMODE) {
fs/ocfs2/dlm/dlmconvert.c
... ... @@ -231,8 +231,7 @@
231 231  
232 232 lock->ml.convert_type = type;
233 233 /* do not alter lock refcount. switching lists. */
234   - list_del_init(&lock->list);
235   - list_add_tail(&lock->list, &res->converting);
  234 + list_move_tail(&lock->list, &res->converting);
236 235  
237 236 unlock_exit:
238 237 spin_unlock(&lock->spinlock);
... ... @@ -248,8 +247,7 @@
248 247 struct dlm_lock *lock)
249 248 {
250 249 /* do not alter lock refcount. switching lists. */
251   - list_del_init(&lock->list);
252   - list_add_tail(&lock->list, &res->granted);
  250 + list_move_tail(&lock->list, &res->granted);
253 251 lock->ml.convert_type = LKM_IVMODE;
254 252 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
255 253 }
... ... @@ -294,8 +292,7 @@
294 292 res->state |= DLM_LOCK_RES_IN_PROGRESS;
295 293 /* move lock to local convert queue */
296 294 /* do not alter lock refcount. switching lists. */
297   - list_del_init(&lock->list);
298   - list_add_tail(&lock->list, &res->converting);
  295 + list_move_tail(&lock->list, &res->converting);
299 296 lock->convert_pending = 1;
300 297 lock->ml.convert_type = type;
301 298  
fs/ocfs2/dlm/dlmlock.c
... ... @@ -239,8 +239,7 @@
239 239 mlog(0, "%s: $RECOVERY lock for this node (%u) is "
240 240 "mastered by %u; got lock, manually granting (no ast)\n",
241 241 dlm->name, dlm->node_num, res->owner);
242   - list_del_init(&lock->list);
243   - list_add_tail(&lock->list, &res->granted);
  242 + list_move_tail(&lock->list, &res->granted);
244 243 }
245 244 spin_unlock(&res->spinlock);
246 245  
fs/ocfs2/dlm/dlmrecovery.c
... ... @@ -905,13 +905,11 @@
905 905 mlog(0, "found lockres owned by dead node while "
906 906 "doing recovery for node %u. sending it.\n",
907 907 dead_node);
908   - list_del_init(&res->recovering);
909   - list_add_tail(&res->recovering, list);
  908 + list_move_tail(&res->recovering, list);
910 909 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
911 910 mlog(0, "found UNKNOWN owner while doing recovery "
912 911 "for node %u. sending it.\n", dead_node);
913   - list_del_init(&res->recovering);
914   - list_add_tail(&res->recovering, list);
  912 + list_move_tail(&res->recovering, list);
915 913 }
916 914 }
917 915 spin_unlock(&dlm->spinlock);
... ... @@ -1529,8 +1527,7 @@
1529 1527  
1530 1528 /* move the lock to its proper place */
1531 1529 /* do not alter lock refcount. switching lists. */
1532   - list_del_init(&lock->list);
1533   - list_add_tail(&lock->list, queue);
  1530 + list_move_tail(&lock->list, queue);
1534 1531 spin_unlock(&res->spinlock);
1535 1532  
1536 1533 mlog(0, "just reordered a local lock!\n");
fs/ocfs2/dlm/dlmthread.c
... ... @@ -318,8 +318,7 @@
318 318  
319 319 target->ml.type = target->ml.convert_type;
320 320 target->ml.convert_type = LKM_IVMODE;
321   - list_del_init(&target->list);
322   - list_add_tail(&target->list, &res->granted);
  321 + list_move_tail(&target->list, &res->granted);
323 322  
324 323 BUG_ON(!target->lksb);
325 324 target->lksb->status = DLM_NORMAL;
... ... @@ -380,8 +379,7 @@
380 379 target->ml.type, target->ml.node);
381 380  
382 381 // target->ml.type is already correct
383   - list_del_init(&target->list);
384   - list_add_tail(&target->list, &res->granted);
  382 + list_move_tail(&target->list, &res->granted);
385 383  
386 384 BUG_ON(!target->lksb);
387 385 target->lksb->status = DLM_NORMAL;
fs/ocfs2/dlm/dlmunlock.c
... ... @@ -271,8 +271,7 @@
271 271 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
272 272 struct dlm_lock *lock)
273 273 {
274   - list_del_init(&lock->list);
275   - list_add_tail(&lock->list, &res->granted);
  274 + list_move_tail(&lock->list, &res->granted);
276 275 lock->ml.convert_type = LKM_IVMODE;
277 276 }
278 277  
... ... @@ -222,8 +222,7 @@
222 222 BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
223 223  
224 224 OCFS2_I(inode)->ip_handle = handle;
225   - list_del(&(OCFS2_I(inode)->ip_handle_list));
226   - list_add_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
  225 + list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
227 226 }
228 227  
229 228 static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
fs/reiserfs/journal.c
... ... @@ -834,8 +834,7 @@
834 834 get_bh(bh);
835 835 if (test_set_buffer_locked(bh)) {
836 836 if (!buffer_dirty(bh)) {
837   - list_del_init(&jh->list);
838   - list_add(&jh->list, &tmp);
  837 + list_move(&jh->list, &tmp);
839 838 goto loop_next;
840 839 }
841 840 spin_unlock(lock);
... ... @@ -855,8 +854,7 @@
855 854 ret = -EIO;
856 855 }
857 856 if (buffer_dirty(bh)) {
858   - list_del_init(&jh->list);
859   - list_add(&jh->list, &tmp);
  857 + list_move(&jh->list, &tmp);
860 858 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
861 859 } else {
862 860 reiserfs_free_jh(bh);
... ... @@ -400,8 +400,7 @@
400 400 if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
401 401 goto out;
402 402  
403   - list_del_init(&req->rq_queue);
404   - list_add_tail(&req->rq_queue, &server->recvq);
  403 + list_move_tail(&req->rq_queue, &server->recvq);
405 404 result = 1;
406 405 out:
407 406 return result;
... ... @@ -435,8 +434,7 @@
435 434 result = smb_request_send_req(req);
436 435 if (result < 0) {
437 436 server->conn_error = result;
438   - list_del_init(&req->rq_queue);
439   - list_add(&req->rq_queue, &server->xmitq);
  437 + list_move(&req->rq_queue, &server->xmitq);
440 438 result = -EIO;
441 439 goto out;
442 440 }
... ... @@ -193,8 +193,7 @@
193 193 if (req->rq_flags & SMB_REQ_RETRY) {
194 194 /* must move the request to the xmitq */
195 195 VERBOSE("retrying request %p on recvq\n", req);
196   - list_del(&req->rq_queue);
197   - list_add(&req->rq_queue, &server->xmitq);
  196 + list_move(&req->rq_queue, &server->xmitq);
198 197 continue;
199 198 }
200 199 #endif