Commit 7f6b9dbd5afbd966a82dcbafc5ed62305eb9d479

Authored by stephen hemminger
Committed by David S. Miller
1 parent 808f5114a9

af_key: locking change

Get rid of custom locking that was using wait queue, lock, and atomic
to basically build a queued mutex.  Use RCU for read side.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 16 additions and 60 deletions Side-by-side Diff

... ... @@ -41,9 +41,7 @@
41 41 struct hlist_head table;
42 42 atomic_t socks_nr;
43 43 };
44   -static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait);
45   -static DEFINE_RWLOCK(pfkey_table_lock);
46   -static atomic_t pfkey_table_users = ATOMIC_INIT(0);
  44 +static DEFINE_MUTEX(pfkey_mutex);
47 45  
48 46 struct pfkey_sock {
49 47 /* struct sock must be the first member of struct pfkey_sock */
... ... @@ -108,50 +106,6 @@
108 106 atomic_dec(&net_pfkey->socks_nr);
109 107 }
110 108  
111   -static void pfkey_table_grab(void)
112   -{
113   - write_lock_bh(&pfkey_table_lock);
114   -
115   - if (atomic_read(&pfkey_table_users)) {
116   - DECLARE_WAITQUEUE(wait, current);
117   -
118   - add_wait_queue_exclusive(&pfkey_table_wait, &wait);
119   - for(;;) {
120   - set_current_state(TASK_UNINTERRUPTIBLE);
121   - if (atomic_read(&pfkey_table_users) == 0)
122   - break;
123   - write_unlock_bh(&pfkey_table_lock);
124   - schedule();
125   - write_lock_bh(&pfkey_table_lock);
126   - }
127   -
128   - __set_current_state(TASK_RUNNING);
129   - remove_wait_queue(&pfkey_table_wait, &wait);
130   - }
131   -}
132   -
133   -static __inline__ void pfkey_table_ungrab(void)
134   -{
135   - write_unlock_bh(&pfkey_table_lock);
136   - wake_up(&pfkey_table_wait);
137   -}
138   -
139   -static __inline__ void pfkey_lock_table(void)
140   -{
141   - /* read_lock() synchronizes us to pfkey_table_grab */
142   -
143   - read_lock(&pfkey_table_lock);
144   - atomic_inc(&pfkey_table_users);
145   - read_unlock(&pfkey_table_lock);
146   -}
147   -
148   -static __inline__ void pfkey_unlock_table(void)
149   -{
150   - if (atomic_dec_and_test(&pfkey_table_users))
151   - wake_up(&pfkey_table_wait);
152   -}
153   -
154   -
155 109 static const struct proto_ops pfkey_ops;
156 110  
157 111 static void pfkey_insert(struct sock *sk)
158 112  
... ... @@ -159,16 +113,16 @@
159 113 struct net *net = sock_net(sk);
160 114 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
161 115  
162   - pfkey_table_grab();
163   - sk_add_node(sk, &net_pfkey->table);
164   - pfkey_table_ungrab();
  116 + mutex_lock(&pfkey_mutex);
  117 + sk_add_node_rcu(sk, &net_pfkey->table);
  118 + mutex_unlock(&pfkey_mutex);
165 119 }
166 120  
167 121 static void pfkey_remove(struct sock *sk)
168 122 {
169   - pfkey_table_grab();
170   - sk_del_node_init(sk);
171   - pfkey_table_ungrab();
  123 + mutex_lock(&pfkey_mutex);
  124 + sk_del_node_init_rcu(sk);
  125 + mutex_unlock(&pfkey_mutex);
172 126 }
173 127  
174 128 static struct proto key_proto = {
... ... @@ -223,6 +177,8 @@
223 177 sock_orphan(sk);
224 178 sock->sk = NULL;
225 179 skb_queue_purge(&sk->sk_write_queue);
  180 +
  181 + synchronize_rcu();
226 182 sock_put(sk);
227 183  
228 184 return 0;
... ... @@ -277,8 +233,8 @@
277 233 if (!skb)
278 234 return -ENOMEM;
279 235  
280   - pfkey_lock_table();
281   - sk_for_each(sk, node, &net_pfkey->table) {
  236 + rcu_read_lock();
  237 + sk_for_each_rcu(sk, node, &net_pfkey->table) {
282 238 struct pfkey_sock *pfk = pfkey_sk(sk);
283 239 int err2;
284 240  
... ... @@ -309,7 +265,7 @@
309 265 if ((broadcast_flags & BROADCAST_REGISTERED) && err)
310 266 err = err2;
311 267 }
312   - pfkey_unlock_table();
  268 + rcu_read_unlock();
313 269  
314 270 if (one_sk != NULL)
315 271 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
... ... @@ -3702,8 +3658,8 @@
3702 3658 struct net *net = seq_file_net(f);
3703 3659 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3704 3660  
3705   - read_lock(&pfkey_table_lock);
3706   - return seq_hlist_start_head(&net_pfkey->table, *ppos);
  3661 + rcu_read_lock();
  3662 + return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
3707 3663 }
3708 3664  
3709 3665 static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
3710 3666  
... ... @@ -3711,12 +3667,12 @@
3711 3667 struct net *net = seq_file_net(f);
3712 3668 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3713 3669  
3714   - return seq_hlist_next(v, &net_pfkey->table, ppos);
  3670 + return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
3715 3671 }
3716 3672  
3717 3673 static void pfkey_seq_stop(struct seq_file *f, void *v)
3718 3674 {
3719   - read_unlock(&pfkey_table_lock);
  3675 + rcu_read_unlock();
3720 3676 }
3721 3677  
3722 3678 static const struct seq_operations pfkey_seq_ops = {