Commit c7be761a8163d2f1ac0b606c21e4316b7abc5af7

Authored by David Teigland
1 parent 892c4467e3

dlm: change rsbtbl rwlock to spinlock

The rwlock is almost always used in write mode, so there's no reason
to not use a spinlock instead.

Signed-off-by: David Teigland <teigland@redhat.com>

Showing 5 changed files with 32 additions and 32 deletions Side-by-side Diff

... ... @@ -416,7 +416,7 @@
416 416 if (seq->op == &format3_seq_ops)
417 417 ri->format = 3;
418 418  
419   - read_lock(&ls->ls_rsbtbl[bucket].lock);
  419 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
420 420 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
421 421 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
422 422 res_hashchain) {
423 423  
... ... @@ -424,12 +424,12 @@
424 424 dlm_hold_rsb(r);
425 425 ri->rsb = r;
426 426 ri->bucket = bucket;
427   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  427 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
428 428 return ri;
429 429 }
430 430 }
431 431 }
432   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  432 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
433 433  
434 434 /*
435 435 * move to the first rsb in the next non-empty bucket
436 436  
437 437  
... ... @@ -447,18 +447,18 @@
447 447 return NULL;
448 448 }
449 449  
450   - read_lock(&ls->ls_rsbtbl[bucket].lock);
  450 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
451 451 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
452 452 r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
453 453 struct dlm_rsb, res_hashchain);
454 454 dlm_hold_rsb(r);
455 455 ri->rsb = r;
456 456 ri->bucket = bucket;
457   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  457 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
458 458 *pos = n;
459 459 return ri;
460 460 }
461   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  461 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
462 462 }
463 463 }
464 464  
... ... @@ -477,7 +477,7 @@
477 477 * move to the next rsb in the same bucket
478 478 */
479 479  
480   - read_lock(&ls->ls_rsbtbl[bucket].lock);
  480 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
481 481 rp = ri->rsb;
482 482 next = rp->res_hashchain.next;
483 483  
484 484  
... ... @@ -485,12 +485,12 @@
485 485 r = list_entry(next, struct dlm_rsb, res_hashchain);
486 486 dlm_hold_rsb(r);
487 487 ri->rsb = r;
488   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  488 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
489 489 dlm_put_rsb(rp);
490 490 ++*pos;
491 491 return ri;
492 492 }
493   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  493 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
494 494 dlm_put_rsb(rp);
495 495  
496 496 /*
497 497  
498 498  
... ... @@ -509,18 +509,18 @@
509 509 return NULL;
510 510 }
511 511  
512   - read_lock(&ls->ls_rsbtbl[bucket].lock);
  512 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
513 513 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
514 514 r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
515 515 struct dlm_rsb, res_hashchain);
516 516 dlm_hold_rsb(r);
517 517 ri->rsb = r;
518 518 ri->bucket = bucket;
519   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  519 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
520 520 *pos = n;
521 521 return ri;
522 522 }
523   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  523 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
524 524 }
525 525 }
526 526  
fs/dlm/dlm_internal.h
... ... @@ -105,7 +105,7 @@
105 105 struct dlm_rsbtable {
106 106 struct list_head list;
107 107 struct list_head toss;
108   - rwlock_t lock;
  108 + spinlock_t lock;
109 109 };
110 110  
111 111 struct dlm_lkbtable {
... ... @@ -412,9 +412,9 @@
412 412 unsigned int flags, struct dlm_rsb **r_ret)
413 413 {
414 414 int error;
415   - write_lock(&ls->ls_rsbtbl[b].lock);
  415 + spin_lock(&ls->ls_rsbtbl[b].lock);
416 416 error = _search_rsb(ls, name, len, b, flags, r_ret);
417   - write_unlock(&ls->ls_rsbtbl[b].lock);
  417 + spin_unlock(&ls->ls_rsbtbl[b].lock);
418 418 return error;
419 419 }
420 420  
421 421  
422 422  
... ... @@ -478,16 +478,16 @@
478 478 r->res_nodeid = nodeid;
479 479 }
480 480  
481   - write_lock(&ls->ls_rsbtbl[bucket].lock);
  481 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
482 482 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
483 483 if (!error) {
484   - write_unlock(&ls->ls_rsbtbl[bucket].lock);
  484 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
485 485 dlm_free_rsb(r);
486 486 r = tmp;
487 487 goto out;
488 488 }
489 489 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
490   - write_unlock(&ls->ls_rsbtbl[bucket].lock);
  490 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
491 491 error = 0;
492 492 out:
493 493 *r_ret = r;
494 494  
... ... @@ -530,9 +530,9 @@
530 530 struct dlm_ls *ls = r->res_ls;
531 531 uint32_t bucket = r->res_bucket;
532 532  
533   - write_lock(&ls->ls_rsbtbl[bucket].lock);
  533 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
534 534 kref_put(&r->res_ref, toss_rsb);
535   - write_unlock(&ls->ls_rsbtbl[bucket].lock);
  535 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
536 536 }
537 537  
538 538 void dlm_put_rsb(struct dlm_rsb *r)
... ... @@ -967,7 +967,7 @@
967 967  
968 968 for (;;) {
969 969 found = 0;
970   - write_lock(&ls->ls_rsbtbl[b].lock);
  970 + spin_lock(&ls->ls_rsbtbl[b].lock);
971 971 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
972 972 res_hashchain) {
973 973 if (!time_after_eq(jiffies, r->res_toss_time +
974 974  
975 975  
... ... @@ -978,20 +978,20 @@
978 978 }
979 979  
980 980 if (!found) {
981   - write_unlock(&ls->ls_rsbtbl[b].lock);
  981 + spin_unlock(&ls->ls_rsbtbl[b].lock);
982 982 break;
983 983 }
984 984  
985 985 if (kref_put(&r->res_ref, kill_rsb)) {
986 986 list_del(&r->res_hashchain);
987   - write_unlock(&ls->ls_rsbtbl[b].lock);
  987 + spin_unlock(&ls->ls_rsbtbl[b].lock);
988 988  
989 989 if (is_master(r))
990 990 dir_remove(r);
991 991 dlm_free_rsb(r);
992 992 count++;
993 993 } else {
994   - write_unlock(&ls->ls_rsbtbl[b].lock);
  994 + spin_unlock(&ls->ls_rsbtbl[b].lock);
995 995 log_error(ls, "tossed rsb in use %s", r->res_name);
996 996 }
997 997 }
... ... @@ -4224,7 +4224,7 @@
4224 4224 {
4225 4225 struct dlm_rsb *r, *r_ret = NULL;
4226 4226  
4227   - read_lock(&ls->ls_rsbtbl[bucket].lock);
  4227 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
4228 4228 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
4229 4229 if (!rsb_flag(r, RSB_LOCKS_PURGED))
4230 4230 continue;
... ... @@ -4233,7 +4233,7 @@
4233 4233 r_ret = r;
4234 4234 break;
4235 4235 }
4236   - read_unlock(&ls->ls_rsbtbl[bucket].lock);
  4236 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
4237 4237 return r_ret;
4238 4238 }
4239 4239  
... ... @@ -464,7 +464,7 @@
464 464 for (i = 0; i < size; i++) {
465 465 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
466 466 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
467   - rwlock_init(&ls->ls_rsbtbl[i].lock);
  467 + spin_lock_init(&ls->ls_rsbtbl[i].lock);
468 468 }
469 469  
470 470 size = dlm_config.ci_lkbtbl_size;
... ... @@ -726,7 +726,7 @@
726 726 }
727 727  
728 728 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
729   - read_lock(&ls->ls_rsbtbl[i].lock);
  729 + spin_lock(&ls->ls_rsbtbl[i].lock);
730 730 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
731 731 list_add(&r->res_root_list, &ls->ls_root_list);
732 732 dlm_hold_rsb(r);
... ... @@ -737,7 +737,7 @@
737 737 but no other recovery steps should do anything with them. */
738 738  
739 739 if (dlm_no_directory(ls)) {
740   - read_unlock(&ls->ls_rsbtbl[i].lock);
  740 + spin_unlock(&ls->ls_rsbtbl[i].lock);
741 741 continue;
742 742 }
743 743  
... ... @@ -745,7 +745,7 @@
745 745 list_add(&r->res_root_list, &ls->ls_root_list);
746 746 dlm_hold_rsb(r);
747 747 }
748   - read_unlock(&ls->ls_rsbtbl[i].lock);
  748 + spin_unlock(&ls->ls_rsbtbl[i].lock);
749 749 }
750 750 out:
751 751 up_write(&ls->ls_root_sem);
... ... @@ -775,7 +775,7 @@
775 775 int i;
776 776  
777 777 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
778   - write_lock(&ls->ls_rsbtbl[i].lock);
  778 + spin_lock(&ls->ls_rsbtbl[i].lock);
779 779 list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
780 780 res_hashchain) {
781 781 if (dlm_no_directory(ls) || !is_master(r)) {
... ... @@ -783,7 +783,7 @@
783 783 dlm_free_rsb(r);
784 784 }
785 785 }
786   - write_unlock(&ls->ls_rsbtbl[i].lock);
  786 + spin_unlock(&ls->ls_rsbtbl[i].lock);
787 787 }
788 788 }