Commit bd30ce4bc0b7dc859c1d1cba7ad87e08642418b0

Authored by sjur.brandeland@stericsson.com
Committed by David S. Miller
1 parent 0b1e9738de

caif: Use RCU instead of spin-lock in caif_dev.c

RCU read_lock and refcount is used to protect in-flight packets.

Use RCU and counters to manage freeing lower part of the CAIF stack if
CAIF-link layer is removed. Old solution based on delaying removal of
device is removed.

When CAIF link layer goes down the use of CAIF link layer is disabled
(by calling caif_set_phy_state()), but removal and freeing of the
lower part of the CAIF stack is done when Link layer is unregistered.

Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 169 additions and 118 deletions Side-by-side Diff

include/net/caif/cfcnfg.h
... ... @@ -145,5 +145,15 @@
145 145 * @ifi: ifindex obtained from socket.c bindtodevice.
146 146 */
147 147 int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
  148 +
  149 +/**
  150 + * cfcnfg_set_phy_state() - Set the state of the physical interface device.
  151 + * @cnfg: Configuration object
  152 + * @phy_layer: Physical Layer representation
  153 + * @up: State of device
  154 + */
  155 +int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
  156 + bool up);
  157 +
148 158 #endif /* CFCNFG_H_ */
... ... @@ -12,14 +12,11 @@
12 12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 13  
14 14 #include <linux/version.h>
15   -#include <linux/module.h>
16 15 #include <linux/kernel.h>
17 16 #include <linux/if_arp.h>
18 17 #include <linux/net.h>
19 18 #include <linux/netdevice.h>
20   -#include <linux/skbuff.h>
21   -#include <linux/sched.h>
22   -#include <linux/wait.h>
  19 +#include <linux/mutex.h>
23 20 #include <net/netns/generic.h>
24 21 #include <net/net_namespace.h>
25 22 #include <net/pkt_sched.h>
26 23  
27 24  
28 25  
... ... @@ -30,23 +27,19 @@
30 27 #include <net/caif/cfcnfg.h>
31 28  
32 29 MODULE_LICENSE("GPL");
33   -#define TIMEOUT (HZ*5)
34 30  
35 31 /* Used for local tracking of the CAIF net devices */
36 32 struct caif_device_entry {
37 33 struct cflayer layer;
38 34 struct list_head list;
39   - atomic_t in_use;
40   - atomic_t state;
41   - u16 phyid;
42 35 struct net_device *netdev;
43   - wait_queue_head_t event;
  36 + int __percpu *pcpu_refcnt;
44 37 };
45 38  
46 39 struct caif_device_entry_list {
47 40 struct list_head list;
48 41 /* Protects simulanous deletes in list */
49   - spinlock_t lock;
  42 + struct mutex lock;
50 43 };
51 44  
52 45 struct caif_net {
53 46  
54 47  
55 48  
56 49  
... ... @@ -65,19 +58,39 @@
65 58 return &caifn->caifdevs;
66 59 }
67 60  
  61 +static void caifd_put(struct caif_device_entry *e)
  62 +{
  63 + irqsafe_cpu_dec(*e->pcpu_refcnt);
  64 +}
  65 +
  66 +static void caifd_hold(struct caif_device_entry *e)
  67 +{
  68 + irqsafe_cpu_inc(*e->pcpu_refcnt);
  69 +}
  70 +
  71 +static int caifd_refcnt_read(struct caif_device_entry *e)
  72 +{
  73 + int i, refcnt = 0;
  74 + for_each_possible_cpu(i)
  75 + refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
  76 + return refcnt;
  77 +}
  78 +
68 79 /* Allocate new CAIF device. */
69 80 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
70 81 {
71 82 struct caif_device_entry_list *caifdevs;
72 83 struct caif_device_entry *caifd;
  84 +
73 85 caifdevs = caif_device_list(dev_net(dev));
74 86 BUG_ON(!caifdevs);
  87 +
75 88 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
76 89 if (!caifd)
77 90 return NULL;
  91 + caifd->pcpu_refcnt = alloc_percpu(int);
78 92 caifd->netdev = dev;
79   - list_add(&caifd->list, &caifdevs->list);
80   - init_waitqueue_head(&caifd->event);
  93 + dev_hold(dev);
81 94 return caifd;
82 95 }
83 96  
84 97  
... ... @@ -87,35 +100,13 @@
87 100 caif_device_list(dev_net(dev));
88 101 struct caif_device_entry *caifd;
89 102 BUG_ON(!caifdevs);
90   - list_for_each_entry(caifd, &caifdevs->list, list) {
  103 + list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
91 104 if (caifd->netdev == dev)
92 105 return caifd;
93 106 }
94 107 return NULL;
95 108 }
96 109  
97   -static void caif_device_destroy(struct net_device *dev)
98   -{
99   - struct caif_device_entry_list *caifdevs =
100   - caif_device_list(dev_net(dev));
101   - struct caif_device_entry *caifd;
102   - ASSERT_RTNL();
103   - if (dev->type != ARPHRD_CAIF)
104   - return;
105   -
106   - spin_lock_bh(&caifdevs->lock);
107   - caifd = caif_get(dev);
108   - if (caifd == NULL) {
109   - spin_unlock_bh(&caifdevs->lock);
110   - return;
111   - }
112   -
113   - list_del(&caifd->list);
114   - spin_unlock_bh(&caifdevs->lock);
115   -
116   - kfree(caifd);
117   -}
118   -
119 110 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120 111 {
121 112 struct caif_device_entry *caifd =
122 113  
... ... @@ -130,23 +121,8 @@
130 121 return 0;
131 122 }
132 123  
133   -static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
134   -{
135   - struct caif_device_entry *caifd;
136   - caifd = container_of(layr, struct caif_device_entry, layer);
137   - if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
138   - atomic_set(&caifd->in_use, 1);
139   - wake_up_interruptible(&caifd->event);
140   -
141   - } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
142   - atomic_set(&caifd->in_use, 0);
143   - wake_up_interruptible(&caifd->event);
144   - }
145   - return 0;
146   -}
147   -
148 124 /*
149   - * Stuff received packets to associated sockets.
  125 + * Stuff received packets into the CAIF stack.
150 126 * On error, returns non-zero and releases the skb.
151 127 */
152 128 static int receive(struct sk_buff *skb, struct net_device *dev,
153 129  
154 130  
155 131  
156 132  
157 133  
... ... @@ -154,14 +130,27 @@
154 130 {
155 131 struct cfpkt *pkt;
156 132 struct caif_device_entry *caifd;
  133 +
157 134 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
  135 +
  136 + rcu_read_lock();
158 137 caifd = caif_get(dev);
159   - if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
160   - return NET_RX_DROP;
161 138  
162   - if (caifd->layer.up->receive(caifd->layer.up, pkt))
  139 + if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
  140 + !netif_oper_up(caifd->netdev)) {
  141 + rcu_read_unlock();
  142 + kfree_skb(skb);
163 143 return NET_RX_DROP;
  144 + }
164 145  
  146 + /* Hold reference to netdevice while using CAIF stack */
  147 + caifd_hold(caifd);
  148 + rcu_read_unlock();
  149 +
  150 + caifd->layer.up->receive(caifd->layer.up, pkt);
  151 +
  152 + /* Release reference to stack upwards */
  153 + caifd_put(caifd);
165 154 return 0;
166 155 }
167 156  
168 157  
169 158  
170 159  
... ... @@ -172,15 +161,25 @@
172 161  
173 162 static void dev_flowctrl(struct net_device *dev, int on)
174 163 {
175   - struct caif_device_entry *caifd = caif_get(dev);
176   - if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
  164 + struct caif_device_entry *caifd;
  165 +
  166 + rcu_read_lock();
  167 +
  168 + caifd = caif_get(dev);
  169 + if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
  170 + rcu_read_unlock();
177 171 return;
  172 + }
178 173  
  174 + caifd_hold(caifd);
  175 + rcu_read_unlock();
  176 +
179 177 caifd->layer.up->ctrlcmd(caifd->layer.up,
180 178 on ?
181 179 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
182 180 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
183 181 caifd->layer.id);
  182 + caifd_put(caifd);
184 183 }
185 184  
186 185 /* notify Caif of device events */
187 186  
188 187  
189 188  
190 189  
191 190  
... ... @@ -192,34 +191,22 @@
192 191 struct caif_dev_common *caifdev;
193 192 enum cfcnfg_phy_preference pref;
194 193 enum cfcnfg_phy_type phy_type;
  194 + struct caif_device_entry_list *caifdevs =
  195 + caif_device_list(dev_net(dev));
195 196  
196 197 if (dev->type != ARPHRD_CAIF)
197 198 return 0;
198 199  
199 200 switch (what) {
200 201 case NETDEV_REGISTER:
201   - netdev_info(dev, "register\n");
202 202 caifd = caif_device_alloc(dev);
203   - if (caifd == NULL)
204   - break;
  203 + if (!caifd)
  204 + return 0;
  205 +
205 206 caifdev = netdev_priv(dev);
206 207 caifdev->flowctrl = dev_flowctrl;
207   - atomic_set(&caifd->state, what);
208   - break;
209 208  
210   - case NETDEV_UP:
211   - netdev_info(dev, "up\n");
212   - caifd = caif_get(dev);
213   - if (caifd == NULL)
214   - break;
215   - caifdev = netdev_priv(dev);
216   - if (atomic_read(&caifd->state) == NETDEV_UP) {
217   - netdev_info(dev, "already up\n");
218   - break;
219   - }
220   - atomic_set(&caifd->state, what);
221 209 caifd->layer.transmit = transmit;
222   - caifd->layer.modemcmd = modemcmd;
223 210  
224 211 if (caifdev->use_frag)
225 212 phy_type = CFPHYTYPE_FRAG;
226 213  
227 214  
228 215  
229 216  
230 217  
231 218  
232 219  
233 220  
234 221  
235 222  
236 223  
237 224  
238 225  
239 226  
... ... @@ -237,62 +224,95 @@
237 224 pref = CFPHYPREF_HIGH_BW;
238 225 break;
239 226 }
240   - dev_hold(dev);
  227 + strncpy(caifd->layer.name, dev->name,
  228 + sizeof(caifd->layer.name) - 1);
  229 + caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
  230 +
  231 + mutex_lock(&caifdevs->lock);
  232 + list_add_rcu(&caifd->list, &caifdevs->list);
  233 +
241 234 cfcnfg_add_phy_layer(cfg,
242 235 phy_type,
243 236 dev,
244 237 &caifd->layer,
245   - &caifd->phyid,
  238 + 0,
246 239 pref,
247 240 caifdev->use_fcs,
248 241 caifdev->use_stx);
249   - strncpy(caifd->layer.name, dev->name,
250   - sizeof(caifd->layer.name) - 1);
251   - caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
  242 + mutex_unlock(&caifdevs->lock);
252 243 break;
253 244  
254   - case NETDEV_GOING_DOWN:
  245 + case NETDEV_UP:
  246 + rcu_read_lock();
  247 +
255 248 caifd = caif_get(dev);
256   - if (caifd == NULL)
  249 + if (caifd == NULL) {
  250 + rcu_read_unlock();
257 251 break;
258   - netdev_info(dev, "going down\n");
  252 + }
259 253  
260   - if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
261   - atomic_read(&caifd->state) == NETDEV_DOWN)
262   - break;
  254 + cfcnfg_set_phy_state(cfg, &caifd->layer, true);
  255 + rcu_read_unlock();
263 256  
264   - atomic_set(&caifd->state, what);
265   - if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
  257 + break;
  258 +
  259 + case NETDEV_DOWN:
  260 + rcu_read_lock();
  261 +
  262 + caifd = caif_get(dev);
  263 + if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
  264 + rcu_read_unlock();
266 265 return -EINVAL;
  266 + }
  267 +
  268 + cfcnfg_set_phy_state(cfg, &caifd->layer, false);
  269 + caifd_hold(caifd);
  270 + rcu_read_unlock();
  271 +
267 272 caifd->layer.up->ctrlcmd(caifd->layer.up,
268 273 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
269 274 caifd->layer.id);
270   - might_sleep();
271   - wait_event_interruptible_timeout(caifd->event,
272   - atomic_read(&caifd->in_use) == 0,
273   - TIMEOUT);
  275 + caifd_put(caifd);
274 276 break;
275 277  
276   - case NETDEV_DOWN:
  278 + case NETDEV_UNREGISTER:
  279 + mutex_lock(&caifdevs->lock);
  280 +
277 281 caifd = caif_get(dev);
278   - if (caifd == NULL)
  282 + if (caifd == NULL) {
  283 + mutex_unlock(&caifdevs->lock);
279 284 break;
280   - netdev_info(dev, "down\n");
281   - if (atomic_read(&caifd->in_use))
282   - netdev_warn(dev,
283   - "Unregistering an active CAIF device\n");
284   - cfcnfg_del_phy_layer(cfg, &caifd->layer);
285   - dev_put(dev);
286   - atomic_set(&caifd->state, what);
287   - break;
  285 + }
  286 + list_del_rcu(&caifd->list);
288 287  
289   - case NETDEV_UNREGISTER:
290   - caifd = caif_get(dev);
291   - if (caifd == NULL)
  288 + /*
  289 + * NETDEV_UNREGISTER is called repeatedly until all reference
  290 + * counts for the net-device are released. If references to
  291 + * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
  292 + * the next call to NETDEV_UNREGISTER.
  293 + *
  294 + * If any packets are in flight down the CAIF Stack,
  295 + * cfcnfg_del_phy_layer will return nonzero.
  296 + * If no packets are in flight, the CAIF Stack associated
  297 + * with the net-device un-registering is freed.
  298 + */
  299 +
  300 + if (caifd_refcnt_read(caifd) != 0 ||
  301 + cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
  302 +
  303 + pr_info("Wait for device inuse\n");
  304 + /* Enrole device if CAIF Stack is still in use */
  305 + list_add_rcu(&caifd->list, &caifdevs->list);
  306 + mutex_unlock(&caifdevs->lock);
292 307 break;
293   - netdev_info(dev, "unregister\n");
294   - atomic_set(&caifd->state, what);
295   - caif_device_destroy(dev);
  308 + }
  309 +
  310 + synchronize_rcu();
  311 + dev_put(caifd->netdev);
  312 + free_percpu(caifd->pcpu_refcnt);
  313 + kfree(caifd);
  314 +
  315 + mutex_unlock(&caifdevs->lock);
296 316 break;
297 317 }
298 318 return 0;
... ... @@ -304,8 +324,8 @@
304 324 };
305 325  
306 326 int caif_connect_client(struct caif_connect_request *conn_req,
307   - struct cflayer *client_layer, int *ifindex,
308   - int *headroom, int *tailroom)
  327 + struct cflayer *client_layer, int *ifindex,
  328 + int *headroom, int *tailroom)
309 329 {
310 330 struct cfctrl_link_param param;
311 331 int ret;
... ... @@ -315,8 +335,8 @@
315 335 return ret;
316 336 /* Hook up the adaptation layer. */
317 337 return cfcnfg_add_adaptation_layer(cfg, &param,
318   - client_layer, ifindex,
319   - headroom, tailroom);
  338 + client_layer, ifindex,
  339 + headroom, tailroom);
320 340 }
321 341 EXPORT_SYMBOL(caif_connect_client);
322 342  
323 343  
324 344  
325 345  
... ... @@ -331,20 +351,40 @@
331 351 {
332 352 struct caif_net *caifn = net_generic(net, caif_net_id);
333 353 INIT_LIST_HEAD(&caifn->caifdevs.list);
334   - spin_lock_init(&caifn->caifdevs.lock);
  354 + mutex_init(&caifn->caifdevs.lock);
335 355 return 0;
336 356 }
337 357  
338 358 static void caif_exit_net(struct net *net)
339 359 {
340   - struct net_device *dev;
  360 + struct caif_device_entry *caifd, *tmp;
  361 + struct caif_device_entry_list *caifdevs =
  362 + caif_device_list(net);
  363 +
341 364 rtnl_lock();
342   - for_each_netdev(net, dev) {
343   - if (dev->type != ARPHRD_CAIF)
344   - continue;
345   - dev_close(dev);
346   - caif_device_destroy(dev);
  365 + mutex_lock(&caifdevs->lock);
  366 +
  367 + list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
  368 + int i = 0;
  369 + list_del_rcu(&caifd->list);
  370 + cfcnfg_set_phy_state(cfg, &caifd->layer, false);
  371 +
  372 + while (i < 10 &&
  373 + (caifd_refcnt_read(caifd) != 0 ||
  374 + cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
  375 +
  376 + pr_info("Wait for device inuse\n");
  377 + msleep(250);
  378 + i++;
  379 + }
  380 + synchronize_rcu();
  381 + dev_put(caifd->netdev);
  382 + free_percpu(caifd->pcpu_refcnt);
  383 + kfree(caifd);
347 384 }
  385 +
  386 +
  387 + mutex_unlock(&caifdevs->lock);
348 388 rtnl_unlock();
349 389 }
350 390  
... ... @@ -359,6 +399,7 @@
359 399 static int __init caif_device_init(void)
360 400 {
361 401 int result;
  402 +
362 403 cfg = cfcnfg_create();
363 404 if (!cfg) {
364 405 pr_warn("can't create cfcnfg\n");