Blame view

net/switchdev/switchdev.c 16.7 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
007f790c8   Jiri Pirko   net: introduce ge...
2
3
  /*
   * net/switchdev/switchdev.c - Switch device API
7ea6eb3f5   Jiri Pirko   switchdev: introd...
4
   * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
f8f214715   Scott Feldman   switchdev: add ne...
5
   * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
007f790c8   Jiri Pirko   net: introduce ge...
6
7
8
9
10
   */
  
  #include <linux/kernel.h>
  #include <linux/types.h>
  #include <linux/init.h>
03bf0c281   Jiri Pirko   switchdev: introd...
11
12
  #include <linux/mutex.h>
  #include <linux/notifier.h>
007f790c8   Jiri Pirko   net: introduce ge...
13
  #include <linux/netdevice.h>
850d0cbc9   Jiri Pirko   switchdev: remove...
14
  #include <linux/etherdevice.h>
47f8328bb   Scott Feldman   switchdev: add ne...
15
  #include <linux/if_bridge.h>
7ea6eb3f5   Jiri Pirko   switchdev: introd...
16
  #include <linux/list.h>
793f40147   Jiri Pirko   switchdev: introd...
17
  #include <linux/workqueue.h>
87aaf2cae   Nikolay Aleksandrov   switchdev: check ...
18
  #include <linux/if_vlan.h>
4f2c6ae5c   Ido Schimmel   switchdev: Requir...
19
  #include <linux/rtnetlink.h>
007f790c8   Jiri Pirko   net: introduce ge...
20
  #include <net/switchdev.h>
793f40147   Jiri Pirko   switchdev: introd...
21
22
23
24
25
26
27
28
29
30
  static LIST_HEAD(deferred);
  static DEFINE_SPINLOCK(deferred_lock);
  
  typedef void switchdev_deferred_func_t(struct net_device *dev,
  				       const void *data);
  
  struct switchdev_deferred_item {
  	struct list_head list;
  	struct net_device *dev;
  	switchdev_deferred_func_t *func;
fbfc8502a   Gustavo A. R. Silva   net: switchdev: R...
31
  	unsigned long data[];
793f40147   Jiri Pirko   switchdev: introd...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  };
  
  static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
  {
  	struct switchdev_deferred_item *dfitem;
  
  	spin_lock_bh(&deferred_lock);
  	if (list_empty(&deferred)) {
  		dfitem = NULL;
  		goto unlock;
  	}
  	dfitem = list_first_entry(&deferred,
  				  struct switchdev_deferred_item, list);
  	list_del(&dfitem->list);
  unlock:
  	spin_unlock_bh(&deferred_lock);
  	return dfitem;
  }
  
  /**
   *	switchdev_deferred_process - Process ops in deferred queue
   *
   *	Called to flush the ops currently queued in deferred ops queue.
   *	rtnl_lock must be held.
   */
  void switchdev_deferred_process(void)
  {
  	struct switchdev_deferred_item *dfitem;
  
  	ASSERT_RTNL();
  
  	while ((dfitem = switchdev_deferred_dequeue())) {
  		dfitem->func(dfitem->dev, dfitem->data);
  		dev_put(dfitem->dev);
  		kfree(dfitem);
  	}
  }
  EXPORT_SYMBOL_GPL(switchdev_deferred_process);
  
  static void switchdev_deferred_process_work(struct work_struct *work)
  {
  	rtnl_lock();
  	switchdev_deferred_process();
  	rtnl_unlock();
  }
  
  static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
  
  static int switchdev_deferred_enqueue(struct net_device *dev,
  				      const void *data, size_t data_len,
  				      switchdev_deferred_func_t *func)
  {
  	struct switchdev_deferred_item *dfitem;
  
  	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
  	if (!dfitem)
  		return -ENOMEM;
  	dfitem->dev = dev;
  	dfitem->func = func;
  	memcpy(dfitem->data, data, data_len);
  	dev_hold(dev);
  	spin_lock_bh(&deferred_lock);
  	list_add_tail(&dfitem->list, &deferred);
  	spin_unlock_bh(&deferred_lock);
  	schedule_work(&deferred_process_work);
  	return 0;
  }
d45224d60   Florian Fainelli   net: switchdev: R...
99
100
101
102
  static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
  				      struct net_device *dev,
  				      const struct switchdev_attr *attr,
  				      struct switchdev_trans *trans)
3094333d9   Scott Feldman   switchdev: introd...
103
  {
d45224d60   Florian Fainelli   net: switchdev: R...
104
105
  	int err;
  	int rc;
3094333d9   Scott Feldman   switchdev: introd...
106

d45224d60   Florian Fainelli   net: switchdev: R...
107
108
109
110
111
  	struct switchdev_notifier_port_attr_info attr_info = {
  		.attr = attr,
  		.trans = trans,
  		.handled = false,
  	};
3094333d9   Scott Feldman   switchdev: introd...
112

d45224d60   Florian Fainelli   net: switchdev: R...
113
114
115
116
117
118
  	rc = call_switchdev_blocking_notifiers(nt, dev,
  					       &attr_info.info, NULL);
  	err = notifier_to_errno(rc);
  	if (err) {
  		WARN_ON(!attr_info.handled);
  		return err;
3094333d9   Scott Feldman   switchdev: introd...
119
  	}
d45224d60   Florian Fainelli   net: switchdev: R...
120
121
  	if (!attr_info.handled)
  		return -EOPNOTSUPP;
464314ea6   Scott Feldman   switchdev: skip o...
122

d45224d60   Florian Fainelli   net: switchdev: R...
123
  	return 0;
3094333d9   Scott Feldman   switchdev: introd...
124
  }
0bc05d585   Jiri Pirko   switchdev: allow ...
125
126
  static int switchdev_port_attr_set_now(struct net_device *dev,
  				       const struct switchdev_attr *attr)
3094333d9   Scott Feldman   switchdev: introd...
127
  {
7ea6eb3f5   Jiri Pirko   switchdev: introd...
128
  	struct switchdev_trans trans;
3094333d9   Scott Feldman   switchdev: introd...
129
  	int err;
3094333d9   Scott Feldman   switchdev: introd...
130
131
132
133
134
135
  	/* Phase I: prepare for attr set. Driver/device should fail
  	 * here if there are going to be issues in the commit phase,
  	 * such as lack of resources or support.  The driver/device
  	 * should reserve resources needed for the commit phase here,
  	 * but should not commit the attr.
  	 */
f623ab7f5   Jiri Pirko   switchdev: reduce...
136
  	trans.ph_prepare = true;
d45224d60   Florian Fainelli   net: switchdev: R...
137
138
  	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
  					 &trans);
91cf8ecef   Florian Fainelli   switchdev: Remove...
139
  	if (err)
3094333d9   Scott Feldman   switchdev: introd...
140
  		return err;
3094333d9   Scott Feldman   switchdev: introd...
141
142
143
144
145
  
  	/* Phase II: commit attr set.  This cannot fail as a fault
  	 * of driver/device.  If it does, it's a bug in the driver/device
  	 * because the driver said everythings was OK in phase I.
  	 */
f623ab7f5   Jiri Pirko   switchdev: reduce...
146
  	trans.ph_prepare = false;
d45224d60   Florian Fainelli   net: switchdev: R...
147
148
  	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
  					 &trans);
e9fdaec0e   Scott Feldman   switchdev: change...
149
150
151
  	WARN(err, "%s: Commit of attribute (id=%d) failed.
  ",
  	     dev->name, attr->id);
3094333d9   Scott Feldman   switchdev: introd...
152
153
154
  
  	return err;
  }
0bc05d585   Jiri Pirko   switchdev: allow ...
155
156
157
158
159
160
161
162
163
164
165
166
  
  static void switchdev_port_attr_set_deferred(struct net_device *dev,
  					     const void *data)
  {
  	const struct switchdev_attr *attr = data;
  	int err;
  
  	err = switchdev_port_attr_set_now(dev, attr);
  	if (err && err != -EOPNOTSUPP)
  		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)
  ",
  			   err, attr->id);
7ceb2afbd   Elad Raz   switchdev: Adding...
167
168
  	if (attr->complete)
  		attr->complete(dev, err, attr->complete_priv);
0bc05d585   Jiri Pirko   switchdev: allow ...
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
  }
  
  static int switchdev_port_attr_set_defer(struct net_device *dev,
  					 const struct switchdev_attr *attr)
  {
  	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
  					  switchdev_port_attr_set_deferred);
  }
  
  /**
   *	switchdev_port_attr_set - Set port attribute
   *
   *	@dev: port device
   *	@attr: attribute to set
   *
   *	Use a 2-phase prepare-commit transaction model to ensure
   *	system is not left in a partially updated state due to
   *	failure from driver/device.
   *
   *	rtnl_lock must be held and must not be in atomic section,
   *	in case SWITCHDEV_F_DEFER flag is not set.
   */
  int switchdev_port_attr_set(struct net_device *dev,
  			    const struct switchdev_attr *attr)
  {
  	if (attr->flags & SWITCHDEV_F_DEFER)
  		return switchdev_port_attr_set_defer(dev, attr);
  	ASSERT_RTNL();
  	return switchdev_port_attr_set_now(dev, attr);
  }
3094333d9   Scott Feldman   switchdev: introd...
199
  EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
e258d919b   Scott Feldman   switchdev: fix: p...
200
201
202
203
204
  static size_t switchdev_obj_size(const struct switchdev_obj *obj)
  {
  	switch (obj->id) {
  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
  		return sizeof(struct switchdev_obj_port_vlan);
4d41e1259   Elad Raz   switchdev: Adding...
205
206
  	case SWITCHDEV_OBJ_ID_PORT_MDB:
  		return sizeof(struct switchdev_obj_port_mdb);
47d5b6db2   Andrew Lunn   net: bridge: Add/...
207
208
  	case SWITCHDEV_OBJ_ID_HOST_MDB:
  		return sizeof(struct switchdev_obj_port_mdb);
e258d919b   Scott Feldman   switchdev: fix: p...
209
210
211
212
213
  	default:
  		BUG();
  	}
  	return 0;
  }
d17d9f5e5   Petr Machata   switchdev: Replac...
214
215
216
  static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
  				     struct net_device *dev,
  				     const struct switchdev_obj *obj,
69b7320e1   Petr Machata   net: switchdev: A...
217
218
  				     struct switchdev_trans *trans,
  				     struct netlink_ext_ack *extack)
491d0f153   Scott Feldman   switchdev: introd...
219
  {
d17d9f5e5   Petr Machata   switchdev: Replac...
220
221
  	int rc;
  	int err;
491d0f153   Scott Feldman   switchdev: introd...
222

d17d9f5e5   Petr Machata   switchdev: Replac...
223
224
225
226
227
  	struct switchdev_notifier_port_obj_info obj_info = {
  		.obj = obj,
  		.trans = trans,
  		.handled = false,
  	};
491d0f153   Scott Feldman   switchdev: introd...
228

479c86dc5   Petr Machata   net: switchdev: A...
229
  	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
d17d9f5e5   Petr Machata   switchdev: Replac...
230
231
232
233
  	err = notifier_to_errno(rc);
  	if (err) {
  		WARN_ON(!obj_info.handled);
  		return err;
491d0f153   Scott Feldman   switchdev: introd...
234
  	}
d17d9f5e5   Petr Machata   switchdev: Replac...
235
236
237
  	if (!obj_info.handled)
  		return -EOPNOTSUPP;
  	return 0;
491d0f153   Scott Feldman   switchdev: introd...
238
  }
4d429c5dd   Jiri Pirko   switchdev: introd...
239
  static int switchdev_port_obj_add_now(struct net_device *dev,
69b7320e1   Petr Machata   net: switchdev: A...
240
241
  				      const struct switchdev_obj *obj,
  				      struct netlink_ext_ack *extack)
491d0f153   Scott Feldman   switchdev: introd...
242
  {
7ea6eb3f5   Jiri Pirko   switchdev: introd...
243
  	struct switchdev_trans trans;
491d0f153   Scott Feldman   switchdev: introd...
244
245
246
247
248
249
250
251
252
253
  	int err;
  
  	ASSERT_RTNL();
  
  	/* Phase I: prepare for obj add. Driver/device should fail
  	 * here if there are going to be issues in the commit phase,
  	 * such as lack of resources or support.  The driver/device
  	 * should reserve resources needed for the commit phase here,
  	 * but should not commit the obj.
  	 */
f623ab7f5   Jiri Pirko   switchdev: reduce...
254
  	trans.ph_prepare = true;
d17d9f5e5   Petr Machata   switchdev: Replac...
255
  	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
69b7320e1   Petr Machata   net: switchdev: A...
256
  					dev, obj, &trans, extack);
91cf8ecef   Florian Fainelli   switchdev: Remove...
257
  	if (err)
491d0f153   Scott Feldman   switchdev: introd...
258
  		return err;
491d0f153   Scott Feldman   switchdev: introd...
259
260
261
262
263
  
  	/* Phase II: commit obj add.  This cannot fail as a fault
  	 * of driver/device.  If it does, it's a bug in the driver/device
  	 * because the driver said everythings was OK in phase I.
  	 */
f623ab7f5   Jiri Pirko   switchdev: reduce...
264
  	trans.ph_prepare = false;
d17d9f5e5   Petr Machata   switchdev: Replac...
265
  	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
69b7320e1   Petr Machata   net: switchdev: A...
266
  					dev, obj, &trans, extack);
9e8f4a548   Jiri Pirko   switchdev: push o...
267
268
  	WARN(err, "%s: Commit of object (id=%d) failed.
  ", dev->name, obj->id);
491d0f153   Scott Feldman   switchdev: introd...
269
270
271
  
  	return err;
  }
4d429c5dd   Jiri Pirko   switchdev: introd...
272
273
274
275
276
277
  
  static void switchdev_port_obj_add_deferred(struct net_device *dev,
  					    const void *data)
  {
  	const struct switchdev_obj *obj = data;
  	int err;
69b7320e1   Petr Machata   net: switchdev: A...
278
  	err = switchdev_port_obj_add_now(dev, obj, NULL);
4d429c5dd   Jiri Pirko   switchdev: introd...
279
280
281
282
  	if (err && err != -EOPNOTSUPP)
  		netdev_err(dev, "failed (err=%d) to add object (id=%d)
  ",
  			   err, obj->id);
7ceb2afbd   Elad Raz   switchdev: Adding...
283
284
  	if (obj->complete)
  		obj->complete(dev, err, obj->complete_priv);
4d429c5dd   Jiri Pirko   switchdev: introd...
285
286
287
288
289
  }
  
  static int switchdev_port_obj_add_defer(struct net_device *dev,
  					const struct switchdev_obj *obj)
  {
e258d919b   Scott Feldman   switchdev: fix: p...
290
  	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
4d429c5dd   Jiri Pirko   switchdev: introd...
291
292
  					  switchdev_port_obj_add_deferred);
  }
491d0f153   Scott Feldman   switchdev: introd...
293
294
  
  /**
4d429c5dd   Jiri Pirko   switchdev: introd...
295
   *	switchdev_port_obj_add - Add port object
491d0f153   Scott Feldman   switchdev: introd...
296
297
   *
   *	@dev: port device
4d429c5dd   Jiri Pirko   switchdev: introd...
298
   *	@obj: object to add
c8af73f0b   Andrew Lunn   net: switchdev: k...
299
   *	@extack: netlink extended ack
4d429c5dd   Jiri Pirko   switchdev: introd...
300
301
302
303
304
305
306
   *
   *	Use a 2-phase prepare-commit transaction model to ensure
   *	system is not left in a partially updated state due to
   *	failure from driver/device.
   *
   *	rtnl_lock must be held and must not be in atomic section,
   *	in case SWITCHDEV_F_DEFER flag is not set.
491d0f153   Scott Feldman   switchdev: introd...
307
   */
4d429c5dd   Jiri Pirko   switchdev: introd...
308
  int switchdev_port_obj_add(struct net_device *dev,
69b7320e1   Petr Machata   net: switchdev: A...
309
310
  			   const struct switchdev_obj *obj,
  			   struct netlink_ext_ack *extack)
491d0f153   Scott Feldman   switchdev: introd...
311
  {
4d429c5dd   Jiri Pirko   switchdev: introd...
312
313
314
  	if (obj->flags & SWITCHDEV_F_DEFER)
  		return switchdev_port_obj_add_defer(dev, obj);
  	ASSERT_RTNL();
69b7320e1   Petr Machata   net: switchdev: A...
315
  	return switchdev_port_obj_add_now(dev, obj, extack);
4d429c5dd   Jiri Pirko   switchdev: introd...
316
317
318
319
320
321
  }
  EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
  
  static int switchdev_port_obj_del_now(struct net_device *dev,
  				      const struct switchdev_obj *obj)
  {
d17d9f5e5   Petr Machata   switchdev: Replac...
322
  	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
69b7320e1   Petr Machata   net: switchdev: A...
323
  					 dev, obj, NULL, NULL);
491d0f153   Scott Feldman   switchdev: introd...
324
  }
4d429c5dd   Jiri Pirko   switchdev: introd...
325
326
327
328
329
330
331
332
333
334
335
336
  
  static void switchdev_port_obj_del_deferred(struct net_device *dev,
  					    const void *data)
  {
  	const struct switchdev_obj *obj = data;
  	int err;
  
  	err = switchdev_port_obj_del_now(dev, obj);
  	if (err && err != -EOPNOTSUPP)
  		netdev_err(dev, "failed (err=%d) to del object (id=%d)
  ",
  			   err, obj->id);
7ceb2afbd   Elad Raz   switchdev: Adding...
337
338
  	if (obj->complete)
  		obj->complete(dev, err, obj->complete_priv);
4d429c5dd   Jiri Pirko   switchdev: introd...
339
340
341
342
343
  }
  
  static int switchdev_port_obj_del_defer(struct net_device *dev,
  					const struct switchdev_obj *obj)
  {
e258d919b   Scott Feldman   switchdev: fix: p...
344
  	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
4d429c5dd   Jiri Pirko   switchdev: introd...
345
346
347
348
349
350
351
  					  switchdev_port_obj_del_deferred);
  }
  
  /**
   *	switchdev_port_obj_del - Delete port object
   *
   *	@dev: port device
4d429c5dd   Jiri Pirko   switchdev: introd...
352
353
354
355
356
357
358
359
360
361
362
363
364
   *	@obj: object to delete
   *
   *	rtnl_lock must be held and must not be in atomic section,
   *	in case SWITCHDEV_F_DEFER flag is not set.
   */
  int switchdev_port_obj_del(struct net_device *dev,
  			   const struct switchdev_obj *obj)
  {
  	if (obj->flags & SWITCHDEV_F_DEFER)
  		return switchdev_port_obj_del_defer(dev, obj);
  	ASSERT_RTNL();
  	return switchdev_port_obj_del_now(dev, obj);
  }
491d0f153   Scott Feldman   switchdev: introd...
365
  EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
ff5cf1001   Arkadi Sharshevsky   net: switchdev: C...
366
  static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
a93e3b172   Petr Machata   switchdev: Add a ...
367
  static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
03bf0c281   Jiri Pirko   switchdev: introd...
368
369
  
  /**
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
370
   *	register_switchdev_notifier - Register notifier
03bf0c281   Jiri Pirko   switchdev: introd...
371
372
   *	@nb: notifier_block
   *
ff5cf1001   Arkadi Sharshevsky   net: switchdev: C...
373
   *	Register switch device notifier.
03bf0c281   Jiri Pirko   switchdev: introd...
374
   */
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
375
  int register_switchdev_notifier(struct notifier_block *nb)
03bf0c281   Jiri Pirko   switchdev: introd...
376
  {
ff5cf1001   Arkadi Sharshevsky   net: switchdev: C...
377
  	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
03bf0c281   Jiri Pirko   switchdev: introd...
378
  }
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
379
  EXPORT_SYMBOL_GPL(register_switchdev_notifier);
03bf0c281   Jiri Pirko   switchdev: introd...
380
381
  
  /**
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
382
   *	unregister_switchdev_notifier - Unregister notifier
03bf0c281   Jiri Pirko   switchdev: introd...
383
384
385
   *	@nb: notifier_block
   *
   *	Unregister switch device notifier.
03bf0c281   Jiri Pirko   switchdev: introd...
386
   */
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
387
  int unregister_switchdev_notifier(struct notifier_block *nb)
03bf0c281   Jiri Pirko   switchdev: introd...
388
  {
ff5cf1001   Arkadi Sharshevsky   net: switchdev: C...
389
  	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
03bf0c281   Jiri Pirko   switchdev: introd...
390
  }
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
391
  EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
03bf0c281   Jiri Pirko   switchdev: introd...
392
393
  
  /**
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
394
   *	call_switchdev_notifiers - Call notifiers
03bf0c281   Jiri Pirko   switchdev: introd...
395
396
397
   *	@val: value passed unmodified to notifier function
   *	@dev: port device
   *	@info: notifier information data
ea6754aef   Tian Tao   net: switchdev: F...
398
   *	@extack: netlink extended ack
ff5cf1001   Arkadi Sharshevsky   net: switchdev: C...
399
   *	Call all network notifier blocks.
03bf0c281   Jiri Pirko   switchdev: introd...
400
   */
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
401
  int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
6685987c2   Petr Machata   switchdev: Add ex...
402
403
  			     struct switchdev_notifier_info *info,
  			     struct netlink_ext_ack *extack)
03bf0c281   Jiri Pirko   switchdev: introd...
404
  {
03bf0c281   Jiri Pirko   switchdev: introd...
405
  	info->dev = dev;
6685987c2   Petr Machata   switchdev: Add ex...
406
  	info->extack = extack;
ff5cf1001   Arkadi Sharshevsky   net: switchdev: C...
407
  	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
03bf0c281   Jiri Pirko   switchdev: introd...
408
  }
ebb9a03a5   Jiri Pirko   switchdev: s/netd...
409
  EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
8a44dbb20   Roopa Prabhu   swdevice: add new...
410

a93e3b172   Petr Machata   switchdev: Add a ...
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
  int register_switchdev_blocking_notifier(struct notifier_block *nb)
  {
  	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
  
  	return blocking_notifier_chain_register(chain, nb);
  }
  EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
  
  int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
  {
  	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
  
  	return blocking_notifier_chain_unregister(chain, nb);
  }
  EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
  
  int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
479c86dc5   Petr Machata   net: switchdev: A...
428
429
  				      struct switchdev_notifier_info *info,
  				      struct netlink_ext_ack *extack)
a93e3b172   Petr Machata   switchdev: Add a ...
430
431
  {
  	info->dev = dev;
479c86dc5   Petr Machata   net: switchdev: A...
432
  	info->extack = extack;
a93e3b172   Petr Machata   switchdev: Add a ...
433
434
435
436
  	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
  					    val, info);
  }
  EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
f30f0601e   Petr Machata   switchdev: Add he...
437
438
439
440
441
  static int __switchdev_handle_port_obj_add(struct net_device *dev,
  			struct switchdev_notifier_port_obj_info *port_obj_info,
  			bool (*check_cb)(const struct net_device *dev),
  			int (*add_cb)(struct net_device *dev,
  				      const struct switchdev_obj *obj,
692135135   Petr Machata   net: switchdev: A...
442
443
  				      struct switchdev_trans *trans,
  				      struct netlink_ext_ack *extack))
f30f0601e   Petr Machata   switchdev: Add he...
444
  {
692135135   Petr Machata   net: switchdev: A...
445
  	struct netlink_ext_ack *extack;
f30f0601e   Petr Machata   switchdev: Add he...
446
447
448
  	struct net_device *lower_dev;
  	struct list_head *iter;
  	int err = -EOPNOTSUPP;
692135135   Petr Machata   net: switchdev: A...
449
  	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
f30f0601e   Petr Machata   switchdev: Add he...
450
451
452
  	if (check_cb(dev)) {
  		/* This flag is only checked if the return value is success. */
  		port_obj_info->handled = true;
692135135   Petr Machata   net: switchdev: A...
453
454
  		return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
  			      extack);
f30f0601e   Petr Machata   switchdev: Add he...
455
456
457
458
459
460
461
462
463
464
  	}
  
  	/* Switch ports might be stacked under e.g. a LAG. Ignore the
  	 * unsupported devices, another driver might be able to handle them. But
  	 * propagate to the callers any hard errors.
  	 *
  	 * If the driver does its own bookkeeping of stacked ports, it's not
  	 * necessary to go through this helper.
  	 */
  	netdev_for_each_lower_dev(dev, lower_dev, iter) {
07c6f9805   Russell King   net: switchdev: d...
465
466
  		if (netif_is_bridge_master(lower_dev))
  			continue;
f30f0601e   Petr Machata   switchdev: Add he...
467
468
469
470
471
472
473
474
475
476
477
478
479
480
  		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
  						      check_cb, add_cb);
  		if (err && err != -EOPNOTSUPP)
  			return err;
  	}
  
  	return err;
  }
  
  int switchdev_handle_port_obj_add(struct net_device *dev,
  			struct switchdev_notifier_port_obj_info *port_obj_info,
  			bool (*check_cb)(const struct net_device *dev),
  			int (*add_cb)(struct net_device *dev,
  				      const struct switchdev_obj *obj,
692135135   Petr Machata   net: switchdev: A...
481
482
  				      struct switchdev_trans *trans,
  				      struct netlink_ext_ack *extack))
f30f0601e   Petr Machata   switchdev: Add he...
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
  {
  	int err;
  
  	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
  					      add_cb);
  	if (err == -EOPNOTSUPP)
  		err = 0;
  	return err;
  }
  EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
  
  static int __switchdev_handle_port_obj_del(struct net_device *dev,
  			struct switchdev_notifier_port_obj_info *port_obj_info,
  			bool (*check_cb)(const struct net_device *dev),
  			int (*del_cb)(struct net_device *dev,
  				      const struct switchdev_obj *obj))
  {
  	struct net_device *lower_dev;
  	struct list_head *iter;
  	int err = -EOPNOTSUPP;
  
  	if (check_cb(dev)) {
  		/* This flag is only checked if the return value is success. */
  		port_obj_info->handled = true;
  		return del_cb(dev, port_obj_info->obj);
  	}
  
  	/* Switch ports might be stacked under e.g. a LAG. Ignore the
  	 * unsupported devices, another driver might be able to handle them. But
  	 * propagate to the callers any hard errors.
  	 *
  	 * If the driver does its own bookkeeping of stacked ports, it's not
  	 * necessary to go through this helper.
  	 */
  	netdev_for_each_lower_dev(dev, lower_dev, iter) {
07c6f9805   Russell King   net: switchdev: d...
518
519
  		if (netif_is_bridge_master(lower_dev))
  			continue;
f30f0601e   Petr Machata   switchdev: Add he...
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
  		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
  						      check_cb, del_cb);
  		if (err && err != -EOPNOTSUPP)
  			return err;
  	}
  
  	return err;
  }
  
  int switchdev_handle_port_obj_del(struct net_device *dev,
  			struct switchdev_notifier_port_obj_info *port_obj_info,
  			bool (*check_cb)(const struct net_device *dev),
  			int (*del_cb)(struct net_device *dev,
  				      const struct switchdev_obj *obj))
  {
  	int err;
  
  	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
  					      del_cb);
  	if (err == -EOPNOTSUPP)
  		err = 0;
  	return err;
  }
  EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
1cb33af1f   Florian Fainelli   switchdev: Add SW...
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
  
  static int __switchdev_handle_port_attr_set(struct net_device *dev,
  			struct switchdev_notifier_port_attr_info *port_attr_info,
  			bool (*check_cb)(const struct net_device *dev),
  			int (*set_cb)(struct net_device *dev,
  				      const struct switchdev_attr *attr,
  				      struct switchdev_trans *trans))
  {
  	struct net_device *lower_dev;
  	struct list_head *iter;
  	int err = -EOPNOTSUPP;
  
  	if (check_cb(dev)) {
  		port_attr_info->handled = true;
  		return set_cb(dev, port_attr_info->attr,
  			      port_attr_info->trans);
  	}
  
  	/* Switch ports might be stacked under e.g. a LAG. Ignore the
  	 * unsupported devices, another driver might be able to handle them. But
  	 * propagate to the callers any hard errors.
  	 *
  	 * If the driver does its own bookkeeping of stacked ports, it's not
  	 * necessary to go through this helper.
  	 */
  	netdev_for_each_lower_dev(dev, lower_dev, iter) {
07c6f9805   Russell King   net: switchdev: d...
570
571
  		if (netif_is_bridge_master(lower_dev))
  			continue;
1cb33af1f   Florian Fainelli   switchdev: Add SW...
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
  		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
  						       check_cb, set_cb);
  		if (err && err != -EOPNOTSUPP)
  			return err;
  	}
  
  	return err;
  }
  
  int switchdev_handle_port_attr_set(struct net_device *dev,
  			struct switchdev_notifier_port_attr_info *port_attr_info,
  			bool (*check_cb)(const struct net_device *dev),
  			int (*set_cb)(struct net_device *dev,
  				      const struct switchdev_attr *attr,
  				      struct switchdev_trans *trans))
  {
  	int err;
  
  	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
  					       set_cb);
  	if (err == -EOPNOTSUPP)
  		err = 0;
  	return err;
  }
  EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);