Commit d59dd4620fb8d6422555a9e2b82a707718e68327

Authored by akpm@osdl.org
Committed by Linus Torvalds
1 parent 0d8d4d42f2

[PATCH] use smp_mb/wmb/rmb where possible

Replace a number of memory barriers with smp_ variants.  This means we won't
take the unnecessary hit on UP machines.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 11 changed files with 31 additions and 31 deletions Side-by-side Diff

... ... @@ -218,7 +218,7 @@
218 218 sb = get_super(bdev);
219 219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 220 sb->s_frozen = SB_FREEZE_WRITE;
221   - wmb();
  221 + smp_wmb();
222 222  
223 223 sync_inodes_sb(sb, 0);
224 224 DQUOT_SYNC(sb);
... ... @@ -235,7 +235,7 @@
235 235 sync_inodes_sb(sb, 1);
236 236  
237 237 sb->s_frozen = SB_FREEZE_TRANS;
238   - wmb();
  238 + smp_wmb();
239 239  
240 240 sync_blockdev(sb->s_bdev);
241 241  
... ... @@ -263,7 +263,7 @@
263 263 if (sb->s_op->unlockfs)
264 264 sb->s_op->unlockfs(sb);
265 265 sb->s_frozen = SB_UNFROZEN;
266   - wmb();
  266 + smp_wmb();
267 267 wake_up(&sb->s_wait_unfrozen);
268 268 drop_super(sb);
269 269 }
... ... @@ -767,7 +767,7 @@
767 767 list_del(&receiver->list);
768 768 receiver->state = STATE_PENDING;
769 769 wake_up_process(receiver->task);
770   - wmb();
  770 + smp_wmb();
771 771 receiver->state = STATE_READY;
772 772 }
773 773  
... ... @@ -786,7 +786,7 @@
786 786 list_del(&sender->list);
787 787 sender->state = STATE_PENDING;
788 788 wake_up_process(sender->task);
789   - wmb();
  789 + smp_wmb();
790 790 sender->state = STATE_READY;
791 791 }
792 792  
... ... @@ -174,7 +174,7 @@
174 174  
175 175 /* Must init completion *before* thread sees kthread_stop_info.k */
176 176 init_completion(&kthread_stop_info.done);
177   - wmb();
  177 + smp_wmb();
178 178  
179 179 /* Now set kthread_should_stop() to true, and wake it up. */
180 180 kthread_stop_info.k = k;
... ... @@ -522,7 +522,7 @@
522 522 return 0;
523 523 out_cleanup:
524 524 prof_on = 0;
525   - mb();
  525 + smp_mb();
526 526 on_each_cpu(profile_nop, NULL, 0, 1);
527 527 for_each_online_cpu(cpu) {
528 528 struct page *page;
... ... @@ -135,7 +135,7 @@
135 135 (current->gid != task->sgid) ||
136 136 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
137 137 goto bad;
138   - rmb();
  138 + smp_rmb();
139 139 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
140 140 goto bad;
141 141 /* the same process cannot be attached many times */
kernel/stop_machine.c
... ... @@ -33,7 +33,7 @@
33 33 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
34 34  
35 35 /* Ack: we are alive */
36   - mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
  36 + smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
37 37 atomic_inc(&stopmachine_thread_ack);
38 38  
39 39 /* Simple state machine */
40 40  
... ... @@ -43,14 +43,14 @@
43 43 local_irq_disable();
44 44 irqs_disabled = 1;
45 45 /* Ack: irqs disabled. */
46   - mb(); /* Must read state first. */
  46 + smp_mb(); /* Must read state first. */
47 47 atomic_inc(&stopmachine_thread_ack);
48 48 } else if (stopmachine_state == STOPMACHINE_PREPARE
49 49 && !prepared) {
50 50 /* Everyone is in place, hold CPU. */
51 51 preempt_disable();
52 52 prepared = 1;
53   - mb(); /* Must read state first. */
  53 + smp_mb(); /* Must read state first. */
54 54 atomic_inc(&stopmachine_thread_ack);
55 55 }
56 56 /* Yield in first stage: migration threads need to
... ... @@ -62,7 +62,7 @@
62 62 }
63 63  
64 64 /* Ack: we are exiting. */
65   - mb(); /* Must read state first. */
  65 + smp_mb(); /* Must read state first. */
66 66 atomic_inc(&stopmachine_thread_ack);
67 67  
68 68 if (irqs_disabled)
... ... @@ -77,7 +77,7 @@
77 77 static void stopmachine_set_state(enum stopmachine_state state)
78 78 {
79 79 atomic_set(&stopmachine_thread_ack, 0);
80   - wmb();
  80 + smp_wmb();
81 81 stopmachine_state = state;
82 82 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
83 83 cpu_relax();
... ... @@ -525,7 +525,7 @@
525 525 if (new_egid != old_egid)
526 526 {
527 527 current->mm->dumpable = 0;
528   - wmb();
  528 + smp_wmb();
529 529 }
530 530 if (rgid != (gid_t) -1 ||
531 531 (egid != (gid_t) -1 && egid != old_rgid))
... ... @@ -556,7 +556,7 @@
556 556 if(old_egid != gid)
557 557 {
558 558 current->mm->dumpable=0;
559   - wmb();
  559 + smp_wmb();
560 560 }
561 561 current->gid = current->egid = current->sgid = current->fsgid = gid;
562 562 }
... ... @@ -565,7 +565,7 @@
565 565 if(old_egid != gid)
566 566 {
567 567 current->mm->dumpable=0;
568   - wmb();
  568 + smp_wmb();
569 569 }
570 570 current->egid = current->fsgid = gid;
571 571 }
... ... @@ -596,7 +596,7 @@
596 596 if(dumpclear)
597 597 {
598 598 current->mm->dumpable = 0;
599   - wmb();
  599 + smp_wmb();
600 600 }
601 601 current->uid = new_ruid;
602 602 return 0;
... ... @@ -653,7 +653,7 @@
653 653 if (new_euid != old_euid)
654 654 {
655 655 current->mm->dumpable=0;
656   - wmb();
  656 + smp_wmb();
657 657 }
658 658 current->fsuid = current->euid = new_euid;
659 659 if (ruid != (uid_t) -1 ||
... ... @@ -703,7 +703,7 @@
703 703 if (old_euid != uid)
704 704 {
705 705 current->mm->dumpable = 0;
706   - wmb();
  706 + smp_wmb();
707 707 }
708 708 current->fsuid = current->euid = uid;
709 709 current->suid = new_suid;
... ... @@ -748,7 +748,7 @@
748 748 if (euid != current->euid)
749 749 {
750 750 current->mm->dumpable = 0;
751   - wmb();
  751 + smp_wmb();
752 752 }
753 753 current->euid = euid;
754 754 }
... ... @@ -798,7 +798,7 @@
798 798 if (egid != current->egid)
799 799 {
800 800 current->mm->dumpable = 0;
801   - wmb();
  801 + smp_wmb();
802 802 }
803 803 current->egid = egid;
804 804 }
... ... @@ -845,7 +845,7 @@
845 845 if (uid != old_fsuid)
846 846 {
847 847 current->mm->dumpable = 0;
848   - wmb();
  848 + smp_wmb();
849 849 }
850 850 current->fsuid = uid;
851 851 }
... ... @@ -875,7 +875,7 @@
875 875 if (gid != old_fsgid)
876 876 {
877 877 current->mm->dumpable = 0;
878   - wmb();
  878 + smp_wmb();
879 879 }
880 880 current->fsgid = gid;
881 881 key_fsgid_changed(current);
... ... @@ -1007,7 +1007,7 @@
1007 1007 * Make sure we read the pid before re-reading the
1008 1008 * parent pointer:
1009 1009 */
1010   - rmb();
  1010 + smp_rmb();
1011 1011 parent = me->group_leader->real_parent;
1012 1012 if (old != parent)
1013 1013 continue;
lib/rwsem-spinlock.c
... ... @@ -76,7 +76,7 @@
76 76 list_del(&waiter->list);
77 77 tsk = waiter->task;
78 78 /* Don't touch waiter after ->task has been NULLed */
79   - mb();
  79 + smp_mb();
80 80 waiter->task = NULL;
81 81 wake_up_process(tsk);
82 82 put_task_struct(tsk);
... ... @@ -91,7 +91,7 @@
91 91  
92 92 list_del(&waiter->list);
93 93 tsk = waiter->task;
94   - mb();
  94 + smp_mb();
95 95 waiter->task = NULL;
96 96 wake_up_process(tsk);
97 97 put_task_struct(tsk);
... ... @@ -123,7 +123,7 @@
123 123 list_del(&waiter->list);
124 124  
125 125 tsk = waiter->task;
126   - mb();
  126 + smp_mb();
127 127 waiter->task = NULL;
128 128 wake_up_process(tsk);
129 129 put_task_struct(tsk);
... ... @@ -74,7 +74,7 @@
74 74 */
75 75 list_del(&waiter->list);
76 76 tsk = waiter->task;
77   - mb();
  77 + smp_mb();
78 78 waiter->task = NULL;
79 79 wake_up_process(tsk);
80 80 put_task_struct(tsk);
... ... @@ -117,7 +117,7 @@
117 117 waiter = list_entry(next, struct rwsem_waiter, list);
118 118 next = waiter->list.next;
119 119 tsk = waiter->task;
120   - mb();
  120 + smp_mb();
121 121 waiter->task = NULL;
122 122 wake_up_process(tsk);
123 123 put_task_struct(tsk);
... ... @@ -229,7 +229,7 @@
229 229 /* Now start performing page reclaim */
230 230 gfp_temp = gfp_mask;
231 231 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
232   - mb();
  232 + smp_mb();
233 233 if (!pool->curr_nr)
234 234 io_schedule();
235 235 finish_wait(&pool->wait, &wait);
... ... @@ -250,7 +250,7 @@
250 250 {
251 251 unsigned long flags;
252 252  
253   - mb();
  253 + smp_mb();
254 254 if (pool->curr_nr < pool->min_nr) {
255 255 spin_lock_irqsave(&pool->lock, flags);
256 256 if (pool->curr_nr < pool->min_nr) {