Commit b3908e22ad8bb6074934496ef171fd83605d7d3e

Authored by Thomas Graf
Committed by David S. Miller
1 parent 39912f9cf9

dcbnl: Use BUG_ON() instead of BUG()

Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 1 additions and 4 deletions Inline Diff

1 /* 1 /*
2 * Copyright (c) 2008-2011, Intel Corporation. 2 * Copyright (c) 2008-2011, Intel Corporation.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation. 6 * version 2, as published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details. 11 * more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public License along with 13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Author: Lucy Liu <lucy.liu@intel.com> 17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */ 18 */
19 19
20 #include <linux/netdevice.h> 20 #include <linux/netdevice.h>
21 #include <linux/netlink.h> 21 #include <linux/netlink.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <net/netlink.h> 23 #include <net/netlink.h>
24 #include <net/rtnetlink.h> 24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h> 25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h> 26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h> 27 #include <linux/rtnetlink.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <net/sock.h> 29 #include <net/sock.h>
30 30
31 /** 31 /**
32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements 32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33 * intended to allow network traffic with differing requirements 33 * intended to allow network traffic with differing requirements
34 * (highly reliable, no drops vs. best effort vs. low latency) to operate 34 * (highly reliable, no drops vs. best effort vs. low latency) to operate
35 * and co-exist on Ethernet. Current DCB features are: 35 * and co-exist on Ethernet. Current DCB features are:
36 * 36 *
37 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a 37 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38 * framework for assigning bandwidth guarantees to traffic classes. 38 * framework for assigning bandwidth guarantees to traffic classes.
39 * 39 *
40 * Priority-based Flow Control (PFC) - provides a flow control mechanism which 40 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41 * can work independently for each 802.1p priority. 41 * can work independently for each 802.1p priority.
42 * 42 *
43 * Congestion Notification - provides a mechanism for end-to-end congestion 43 * Congestion Notification - provides a mechanism for end-to-end congestion
44 * control for protocols which do not have built-in congestion management. 44 * control for protocols which do not have built-in congestion management.
45 * 45 *
46 * More information about the emerging standards for these Ethernet features 46 * More information about the emerging standards for these Ethernet features
47 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html 47 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
48 * 48 *
49 * This file implements an rtnetlink interface to allow configuration of DCB 49 * This file implements an rtnetlink interface to allow configuration of DCB
50 * features for capable devices. 50 * features for capable devices.
51 */ 51 */
52 52
53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>"); 53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
54 MODULE_DESCRIPTION("Data Center Bridging netlink interface"); 54 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
55 MODULE_LICENSE("GPL"); 55 MODULE_LICENSE("GPL");
56 56
57 /**************** DCB attribute policies *************************************/ 57 /**************** DCB attribute policies *************************************/
58 58
59 /* DCB netlink attributes policy */ 59 /* DCB netlink attributes policy */
60 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 60 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
61 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 61 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
62 [DCB_ATTR_STATE] = {.type = NLA_U8}, 62 [DCB_ATTR_STATE] = {.type = NLA_U8},
63 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 63 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
64 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, 64 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
65 [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, 65 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
66 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, 66 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
67 [DCB_ATTR_CAP] = {.type = NLA_NESTED}, 67 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
68 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, 68 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
69 [DCB_ATTR_BCN] = {.type = NLA_NESTED}, 69 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
70 [DCB_ATTR_APP] = {.type = NLA_NESTED}, 70 [DCB_ATTR_APP] = {.type = NLA_NESTED},
71 [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, 71 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
72 [DCB_ATTR_DCBX] = {.type = NLA_U8}, 72 [DCB_ATTR_DCBX] = {.type = NLA_U8},
73 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, 73 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
74 }; 74 };
75 75
76 /* DCB priority flow control to User Priority nested attributes */ 76 /* DCB priority flow control to User Priority nested attributes */
77 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 77 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
78 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 78 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
79 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 79 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
80 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 80 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
81 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, 81 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
82 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, 82 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
83 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, 83 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
84 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, 84 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
85 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, 85 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
86 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, 86 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
87 }; 87 };
88 88
89 /* DCB priority grouping nested attributes */ 89 /* DCB priority grouping nested attributes */
90 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 90 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
91 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 91 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
92 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 92 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
93 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 93 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
94 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, 94 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
95 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, 95 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
96 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, 96 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
97 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, 97 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
98 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, 98 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
99 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, 99 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
100 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, 100 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
101 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, 101 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
102 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, 102 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
103 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, 103 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
104 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, 104 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
105 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, 105 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
106 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, 106 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
107 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, 107 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
108 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, 108 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
109 }; 109 };
110 110
111 /* DCB traffic class nested attributes. */ 111 /* DCB traffic class nested attributes. */
112 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 112 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
113 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 113 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
114 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 114 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
115 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 115 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
116 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, 116 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
117 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, 117 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
118 }; 118 };
119 119
120 /* DCB capabilities nested attributes. */ 120 /* DCB capabilities nested attributes. */
121 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 121 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
122 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 122 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
123 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 123 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
124 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 124 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
125 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, 125 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
126 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, 126 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
127 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, 127 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
128 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, 128 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
129 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, 129 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
130 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, 130 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
131 }; 131 };
132 132
133 /* DCB capabilities nested attributes. */ 133 /* DCB capabilities nested attributes. */
134 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 134 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
135 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 135 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
136 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 136 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
137 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 137 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
138 }; 138 };
139 139
140 /* DCB BCN nested attributes. */ 140 /* DCB BCN nested attributes. */
141 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 141 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
142 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 142 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
143 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 143 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
144 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 144 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
145 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, 145 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
146 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, 146 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
147 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, 147 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
148 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, 148 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
149 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, 149 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
150 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, 150 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
151 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, 151 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
152 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, 152 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
153 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, 153 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
154 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, 154 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
155 [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, 155 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
156 [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, 156 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
157 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, 157 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
158 [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, 158 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
159 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, 159 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
160 [DCB_BCN_ATTR_W] = {.type = NLA_U32}, 160 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
161 [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, 161 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
162 [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, 162 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
163 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, 163 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
164 [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, 164 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
165 [DCB_BCN_ATTR_C] = {.type = NLA_U32}, 165 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
166 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, 166 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
167 }; 167 };
168 168
169 /* DCB APP nested attributes. */ 169 /* DCB APP nested attributes. */
170 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 170 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
171 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 171 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
172 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 172 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
173 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 173 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
174 }; 174 };
175 175
176 /* IEEE 802.1Qaz nested attributes. */ 176 /* IEEE 802.1Qaz nested attributes. */
177 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { 177 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, 178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, 179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, 180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
181 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, 181 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
182 }; 182 };
183 183
184 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { 184 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
185 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)}, 185 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
186 }; 186 };
187 187
188 /* DCB number of traffic classes nested attributes. */ 188 /* DCB number of traffic classes nested attributes. */
189 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { 189 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
190 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, 190 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
191 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, 191 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
192 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, 192 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
193 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, 193 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
194 }; 194 };
195 195
196 static LIST_HEAD(dcb_app_list); 196 static LIST_HEAD(dcb_app_list);
197 static DEFINE_SPINLOCK(dcb_lock); 197 static DEFINE_SPINLOCK(dcb_lock);
198 198
199 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, 199 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
200 u32 flags, struct nlmsghdr **nlhp) 200 u32 flags, struct nlmsghdr **nlhp)
201 { 201 {
202 struct sk_buff *skb; 202 struct sk_buff *skb;
203 struct dcbmsg *dcb; 203 struct dcbmsg *dcb;
204 struct nlmsghdr *nlh; 204 struct nlmsghdr *nlh;
205 205
206 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 206 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
207 if (!skb) 207 if (!skb)
208 return NULL; 208 return NULL;
209 209
210 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); 210 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
211 if (!nlh) { 211 BUG_ON(!nlh);
212 /* header should always fit, allocation must be buggy */
213 BUG();
214 }
215 212
216 dcb = nlmsg_data(nlh); 213 dcb = nlmsg_data(nlh);
217 dcb->dcb_family = AF_UNSPEC; 214 dcb->dcb_family = AF_UNSPEC;
218 dcb->cmd = cmd; 215 dcb->cmd = cmd;
219 dcb->dcb_pad = 0; 216 dcb->dcb_pad = 0;
220 217
221 if (nlhp) 218 if (nlhp)
222 *nlhp = nlh; 219 *nlhp = nlh;
223 220
224 return skb; 221 return skb;
225 } 222 }
226 223
227 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, 224 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
228 u32 seq, struct nlattr **tb, struct sk_buff *skb) 225 u32 seq, struct nlattr **tb, struct sk_buff *skb)
229 { 226 {
230 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ 227 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
231 if (!netdev->dcbnl_ops->getstate) 228 if (!netdev->dcbnl_ops->getstate)
232 return -EOPNOTSUPP; 229 return -EOPNOTSUPP;
233 230
234 return nla_put_u8(skb, DCB_ATTR_STATE, 231 return nla_put_u8(skb, DCB_ATTR_STATE,
235 netdev->dcbnl_ops->getstate(netdev)); 232 netdev->dcbnl_ops->getstate(netdev));
236 } 233 }
237 234
238 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, 235 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
239 u32 seq, struct nlattr **tb, struct sk_buff *skb) 236 u32 seq, struct nlattr **tb, struct sk_buff *skb)
240 { 237 {
241 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; 238 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
242 u8 value; 239 u8 value;
243 int ret; 240 int ret;
244 int i; 241 int i;
245 int getall = 0; 242 int getall = 0;
246 243
247 if (!tb[DCB_ATTR_PFC_CFG]) 244 if (!tb[DCB_ATTR_PFC_CFG])
248 return -EINVAL; 245 return -EINVAL;
249 246
250 if (!netdev->dcbnl_ops->getpfccfg) 247 if (!netdev->dcbnl_ops->getpfccfg)
251 return -EOPNOTSUPP; 248 return -EOPNOTSUPP;
252 249
253 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 250 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
254 tb[DCB_ATTR_PFC_CFG], 251 tb[DCB_ATTR_PFC_CFG],
255 dcbnl_pfc_up_nest); 252 dcbnl_pfc_up_nest);
256 if (ret) 253 if (ret)
257 return ret; 254 return ret;
258 255
259 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG); 256 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
260 if (!nest) 257 if (!nest)
261 return -EMSGSIZE; 258 return -EMSGSIZE;
262 259
263 if (data[DCB_PFC_UP_ATTR_ALL]) 260 if (data[DCB_PFC_UP_ATTR_ALL])
264 getall = 1; 261 getall = 1;
265 262
266 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 263 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
267 if (!getall && !data[i]) 264 if (!getall && !data[i])
268 continue; 265 continue;
269 266
270 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, 267 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
271 &value); 268 &value);
272 ret = nla_put_u8(skb, i, value); 269 ret = nla_put_u8(skb, i, value);
273 if (ret) { 270 if (ret) {
274 nla_nest_cancel(skb, nest); 271 nla_nest_cancel(skb, nest);
275 return ret; 272 return ret;
276 } 273 }
277 } 274 }
278 nla_nest_end(skb, nest); 275 nla_nest_end(skb, nest);
279 276
280 return 0; 277 return 0;
281 } 278 }
282 279
283 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, 280 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
284 u32 seq, struct nlattr **tb, struct sk_buff *skb) 281 u32 seq, struct nlattr **tb, struct sk_buff *skb)
285 { 282 {
286 u8 perm_addr[MAX_ADDR_LEN]; 283 u8 perm_addr[MAX_ADDR_LEN];
287 284
288 if (!netdev->dcbnl_ops->getpermhwaddr) 285 if (!netdev->dcbnl_ops->getpermhwaddr)
289 return -EOPNOTSUPP; 286 return -EOPNOTSUPP;
290 287
291 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 288 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
292 289
293 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); 290 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
294 } 291 }
295 292
296 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, 293 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
297 u32 seq, struct nlattr **tb, struct sk_buff *skb) 294 u32 seq, struct nlattr **tb, struct sk_buff *skb)
298 { 295 {
299 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; 296 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
300 u8 value; 297 u8 value;
301 int ret; 298 int ret;
302 int i; 299 int i;
303 int getall = 0; 300 int getall = 0;
304 301
305 if (!tb[DCB_ATTR_CAP]) 302 if (!tb[DCB_ATTR_CAP])
306 return -EINVAL; 303 return -EINVAL;
307 304
308 if (!netdev->dcbnl_ops->getcap) 305 if (!netdev->dcbnl_ops->getcap)
309 return -EOPNOTSUPP; 306 return -EOPNOTSUPP;
310 307
311 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], 308 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
312 dcbnl_cap_nest); 309 dcbnl_cap_nest);
313 if (ret) 310 if (ret)
314 return ret; 311 return ret;
315 312
316 nest = nla_nest_start(skb, DCB_ATTR_CAP); 313 nest = nla_nest_start(skb, DCB_ATTR_CAP);
317 if (!nest) 314 if (!nest)
318 return -EMSGSIZE; 315 return -EMSGSIZE;
319 316
320 if (data[DCB_CAP_ATTR_ALL]) 317 if (data[DCB_CAP_ATTR_ALL])
321 getall = 1; 318 getall = 1;
322 319
323 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { 320 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
324 if (!getall && !data[i]) 321 if (!getall && !data[i])
325 continue; 322 continue;
326 323
327 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { 324 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
328 ret = nla_put_u8(skb, i, value); 325 ret = nla_put_u8(skb, i, value);
329 if (ret) { 326 if (ret) {
330 nla_nest_cancel(skb, nest); 327 nla_nest_cancel(skb, nest);
331 return ret; 328 return ret;
332 } 329 }
333 } 330 }
334 } 331 }
335 nla_nest_end(skb, nest); 332 nla_nest_end(skb, nest);
336 333
337 return 0; 334 return 0;
338 } 335 }
339 336
340 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, 337 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
341 u32 seq, struct nlattr **tb, struct sk_buff *skb) 338 u32 seq, struct nlattr **tb, struct sk_buff *skb)
342 { 339 {
343 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; 340 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
344 u8 value; 341 u8 value;
345 int ret; 342 int ret;
346 int i; 343 int i;
347 int getall = 0; 344 int getall = 0;
348 345
349 if (!tb[DCB_ATTR_NUMTCS]) 346 if (!tb[DCB_ATTR_NUMTCS])
350 return -EINVAL; 347 return -EINVAL;
351 348
352 if (!netdev->dcbnl_ops->getnumtcs) 349 if (!netdev->dcbnl_ops->getnumtcs)
353 return -EOPNOTSUPP; 350 return -EOPNOTSUPP;
354 351
355 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 352 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
356 dcbnl_numtcs_nest); 353 dcbnl_numtcs_nest);
357 if (ret) 354 if (ret)
358 return ret; 355 return ret;
359 356
360 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS); 357 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
361 if (!nest) 358 if (!nest)
362 return -EMSGSIZE; 359 return -EMSGSIZE;
363 360
364 if (data[DCB_NUMTCS_ATTR_ALL]) 361 if (data[DCB_NUMTCS_ATTR_ALL])
365 getall = 1; 362 getall = 1;
366 363
367 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 364 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
368 if (!getall && !data[i]) 365 if (!getall && !data[i])
369 continue; 366 continue;
370 367
371 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); 368 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
372 if (!ret) { 369 if (!ret) {
373 ret = nla_put_u8(skb, i, value); 370 ret = nla_put_u8(skb, i, value);
374 if (ret) { 371 if (ret) {
375 nla_nest_cancel(skb, nest); 372 nla_nest_cancel(skb, nest);
376 return ret; 373 return ret;
377 } 374 }
378 } else 375 } else
379 return -EINVAL; 376 return -EINVAL;
380 } 377 }
381 nla_nest_end(skb, nest); 378 nla_nest_end(skb, nest);
382 379
383 return 0; 380 return 0;
384 } 381 }
385 382
386 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, 383 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
387 u32 seq, struct nlattr **tb, struct sk_buff *skb) 384 u32 seq, struct nlattr **tb, struct sk_buff *skb)
388 { 385 {
389 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; 386 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
390 int ret; 387 int ret;
391 u8 value; 388 u8 value;
392 int i; 389 int i;
393 390
394 if (!tb[DCB_ATTR_NUMTCS]) 391 if (!tb[DCB_ATTR_NUMTCS])
395 return -EINVAL; 392 return -EINVAL;
396 393
397 if (!netdev->dcbnl_ops->setnumtcs) 394 if (!netdev->dcbnl_ops->setnumtcs)
398 return -EOPNOTSUPP; 395 return -EOPNOTSUPP;
399 396
400 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 397 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
401 dcbnl_numtcs_nest); 398 dcbnl_numtcs_nest);
402 if (ret) 399 if (ret)
403 return ret; 400 return ret;
404 401
405 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 402 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
406 if (data[i] == NULL) 403 if (data[i] == NULL)
407 continue; 404 continue;
408 405
409 value = nla_get_u8(data[i]); 406 value = nla_get_u8(data[i]);
410 407
411 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); 408 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
412 if (ret) 409 if (ret)
413 break; 410 break;
414 } 411 }
415 412
416 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); 413 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
417 } 414 }
418 415
419 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, 416 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
420 u32 seq, struct nlattr **tb, struct sk_buff *skb) 417 u32 seq, struct nlattr **tb, struct sk_buff *skb)
421 { 418 {
422 if (!netdev->dcbnl_ops->getpfcstate) 419 if (!netdev->dcbnl_ops->getpfcstate)
423 return -EOPNOTSUPP; 420 return -EOPNOTSUPP;
424 421
425 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 422 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
426 netdev->dcbnl_ops->getpfcstate(netdev)); 423 netdev->dcbnl_ops->getpfcstate(netdev));
427 } 424 }
428 425
429 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, 426 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
430 u32 seq, struct nlattr **tb, struct sk_buff *skb) 427 u32 seq, struct nlattr **tb, struct sk_buff *skb)
431 { 428 {
432 u8 value; 429 u8 value;
433 430
434 if (!tb[DCB_ATTR_PFC_STATE]) 431 if (!tb[DCB_ATTR_PFC_STATE])
435 return -EINVAL; 432 return -EINVAL;
436 433
437 if (!netdev->dcbnl_ops->setpfcstate) 434 if (!netdev->dcbnl_ops->setpfcstate)
438 return -EOPNOTSUPP; 435 return -EOPNOTSUPP;
439 436
440 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); 437 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
441 438
442 netdev->dcbnl_ops->setpfcstate(netdev, value); 439 netdev->dcbnl_ops->setpfcstate(netdev, value);
443 440
444 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); 441 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
445 } 442 }
446 443
447 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, 444 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
448 u32 seq, struct nlattr **tb, struct sk_buff *skb) 445 u32 seq, struct nlattr **tb, struct sk_buff *skb)
449 { 446 {
450 struct nlattr *app_nest; 447 struct nlattr *app_nest;
451 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 448 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
452 u16 id; 449 u16 id;
453 u8 up, idtype; 450 u8 up, idtype;
454 int ret; 451 int ret;
455 452
456 if (!tb[DCB_ATTR_APP]) 453 if (!tb[DCB_ATTR_APP])
457 return -EINVAL; 454 return -EINVAL;
458 455
459 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 456 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
460 dcbnl_app_nest); 457 dcbnl_app_nest);
461 if (ret) 458 if (ret)
462 return ret; 459 return ret;
463 460
464 /* all must be non-null */ 461 /* all must be non-null */
465 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 462 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
466 (!app_tb[DCB_APP_ATTR_ID])) 463 (!app_tb[DCB_APP_ATTR_ID]))
467 return -EINVAL; 464 return -EINVAL;
468 465
469 /* either by eth type or by socket number */ 466 /* either by eth type or by socket number */
470 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 467 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
471 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 468 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
472 (idtype != DCB_APP_IDTYPE_PORTNUM)) 469 (idtype != DCB_APP_IDTYPE_PORTNUM))
473 return -EINVAL; 470 return -EINVAL;
474 471
475 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 472 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
476 473
477 if (netdev->dcbnl_ops->getapp) { 474 if (netdev->dcbnl_ops->getapp) {
478 up = netdev->dcbnl_ops->getapp(netdev, idtype, id); 475 up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
479 } else { 476 } else {
480 struct dcb_app app = { 477 struct dcb_app app = {
481 .selector = idtype, 478 .selector = idtype,
482 .protocol = id, 479 .protocol = id,
483 }; 480 };
484 up = dcb_getapp(netdev, &app); 481 up = dcb_getapp(netdev, &app);
485 } 482 }
486 483
487 app_nest = nla_nest_start(skb, DCB_ATTR_APP); 484 app_nest = nla_nest_start(skb, DCB_ATTR_APP);
488 if (!app_nest) 485 if (!app_nest)
489 return -EMSGSIZE; 486 return -EMSGSIZE;
490 487
491 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); 488 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
492 if (ret) 489 if (ret)
493 goto out_cancel; 490 goto out_cancel;
494 491
495 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); 492 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
496 if (ret) 493 if (ret)
497 goto out_cancel; 494 goto out_cancel;
498 495
499 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); 496 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
500 if (ret) 497 if (ret)
501 goto out_cancel; 498 goto out_cancel;
502 499
503 nla_nest_end(skb, app_nest); 500 nla_nest_end(skb, app_nest);
504 501
505 return 0; 502 return 0;
506 503
507 out_cancel: 504 out_cancel:
508 nla_nest_cancel(skb, app_nest); 505 nla_nest_cancel(skb, app_nest);
509 return ret; 506 return ret;
510 } 507 }
511 508
512 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, 509 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
513 u32 seq, struct nlattr **tb, struct sk_buff *skb) 510 u32 seq, struct nlattr **tb, struct sk_buff *skb)
514 { 511 {
515 int ret; 512 int ret;
516 u16 id; 513 u16 id;
517 u8 up, idtype; 514 u8 up, idtype;
518 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 515 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
519 516
520 if (!tb[DCB_ATTR_APP]) 517 if (!tb[DCB_ATTR_APP])
521 return -EINVAL; 518 return -EINVAL;
522 519
523 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 520 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
524 dcbnl_app_nest); 521 dcbnl_app_nest);
525 if (ret) 522 if (ret)
526 return ret; 523 return ret;
527 524
528 /* all must be non-null */ 525 /* all must be non-null */
529 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 526 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
530 (!app_tb[DCB_APP_ATTR_ID]) || 527 (!app_tb[DCB_APP_ATTR_ID]) ||
531 (!app_tb[DCB_APP_ATTR_PRIORITY])) 528 (!app_tb[DCB_APP_ATTR_PRIORITY]))
532 return -EINVAL; 529 return -EINVAL;
533 530
534 /* either by eth type or by socket number */ 531 /* either by eth type or by socket number */
535 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 532 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
536 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 533 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
537 (idtype != DCB_APP_IDTYPE_PORTNUM)) 534 (idtype != DCB_APP_IDTYPE_PORTNUM))
538 return -EINVAL; 535 return -EINVAL;
539 536
540 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 537 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
541 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 538 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
542 539
543 if (netdev->dcbnl_ops->setapp) { 540 if (netdev->dcbnl_ops->setapp) {
544 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); 541 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
545 } else { 542 } else {
546 struct dcb_app app; 543 struct dcb_app app;
547 app.selector = idtype; 544 app.selector = idtype;
548 app.protocol = id; 545 app.protocol = id;
549 app.priority = up; 546 app.priority = up;
550 ret = dcb_setapp(netdev, &app); 547 ret = dcb_setapp(netdev, &app);
551 } 548 }
552 549
553 ret = nla_put_u8(skb, DCB_ATTR_APP, ret); 550 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
554 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); 551 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
555 552
556 return ret; 553 return ret;
557 } 554 }
558 555
559 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 556 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
560 struct nlattr **tb, struct sk_buff *skb, int dir) 557 struct nlattr **tb, struct sk_buff *skb, int dir)
561 { 558 {
562 struct nlattr *pg_nest, *param_nest, *data; 559 struct nlattr *pg_nest, *param_nest, *data;
563 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 560 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
564 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 561 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
565 u8 prio, pgid, tc_pct, up_map; 562 u8 prio, pgid, tc_pct, up_map;
566 int ret; 563 int ret;
567 int getall = 0; 564 int getall = 0;
568 int i; 565 int i;
569 566
570 if (!tb[DCB_ATTR_PG_CFG]) 567 if (!tb[DCB_ATTR_PG_CFG])
571 return -EINVAL; 568 return -EINVAL;
572 569
573 if (!netdev->dcbnl_ops->getpgtccfgtx || 570 if (!netdev->dcbnl_ops->getpgtccfgtx ||
574 !netdev->dcbnl_ops->getpgtccfgrx || 571 !netdev->dcbnl_ops->getpgtccfgrx ||
575 !netdev->dcbnl_ops->getpgbwgcfgtx || 572 !netdev->dcbnl_ops->getpgbwgcfgtx ||
576 !netdev->dcbnl_ops->getpgbwgcfgrx) 573 !netdev->dcbnl_ops->getpgbwgcfgrx)
577 return -EOPNOTSUPP; 574 return -EOPNOTSUPP;
578 575
579 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 576 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
580 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 577 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
581 if (ret) 578 if (ret)
582 return ret; 579 return ret;
583 580
584 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG); 581 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
585 if (!pg_nest) 582 if (!pg_nest)
586 return -EMSGSIZE; 583 return -EMSGSIZE;
587 584
588 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 585 if (pg_tb[DCB_PG_ATTR_TC_ALL])
589 getall = 1; 586 getall = 1;
590 587
591 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 588 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
592 if (!getall && !pg_tb[i]) 589 if (!getall && !pg_tb[i])
593 continue; 590 continue;
594 591
595 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 592 if (pg_tb[DCB_PG_ATTR_TC_ALL])
596 data = pg_tb[DCB_PG_ATTR_TC_ALL]; 593 data = pg_tb[DCB_PG_ATTR_TC_ALL];
597 else 594 else
598 data = pg_tb[i]; 595 data = pg_tb[i];
599 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 596 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
600 data, dcbnl_tc_param_nest); 597 data, dcbnl_tc_param_nest);
601 if (ret) 598 if (ret)
602 goto err_pg; 599 goto err_pg;
603 600
604 param_nest = nla_nest_start(skb, i); 601 param_nest = nla_nest_start(skb, i);
605 if (!param_nest) 602 if (!param_nest)
606 goto err_pg; 603 goto err_pg;
607 604
608 pgid = DCB_ATTR_VALUE_UNDEFINED; 605 pgid = DCB_ATTR_VALUE_UNDEFINED;
609 prio = DCB_ATTR_VALUE_UNDEFINED; 606 prio = DCB_ATTR_VALUE_UNDEFINED;
610 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 607 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
611 up_map = DCB_ATTR_VALUE_UNDEFINED; 608 up_map = DCB_ATTR_VALUE_UNDEFINED;
612 609
613 if (dir) { 610 if (dir) {
614 /* Rx */ 611 /* Rx */
615 netdev->dcbnl_ops->getpgtccfgrx(netdev, 612 netdev->dcbnl_ops->getpgtccfgrx(netdev,
616 i - DCB_PG_ATTR_TC_0, &prio, 613 i - DCB_PG_ATTR_TC_0, &prio,
617 &pgid, &tc_pct, &up_map); 614 &pgid, &tc_pct, &up_map);
618 } else { 615 } else {
619 /* Tx */ 616 /* Tx */
620 netdev->dcbnl_ops->getpgtccfgtx(netdev, 617 netdev->dcbnl_ops->getpgtccfgtx(netdev,
621 i - DCB_PG_ATTR_TC_0, &prio, 618 i - DCB_PG_ATTR_TC_0, &prio,
622 &pgid, &tc_pct, &up_map); 619 &pgid, &tc_pct, &up_map);
623 } 620 }
624 621
625 if (param_tb[DCB_TC_ATTR_PARAM_PGID] || 622 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
626 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 623 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
627 ret = nla_put_u8(skb, 624 ret = nla_put_u8(skb,
628 DCB_TC_ATTR_PARAM_PGID, pgid); 625 DCB_TC_ATTR_PARAM_PGID, pgid);
629 if (ret) 626 if (ret)
630 goto err_param; 627 goto err_param;
631 } 628 }
632 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || 629 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
633 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 630 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
634 ret = nla_put_u8(skb, 631 ret = nla_put_u8(skb,
635 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 632 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
636 if (ret) 633 if (ret)
637 goto err_param; 634 goto err_param;
638 } 635 }
639 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || 636 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
640 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 637 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
641 ret = nla_put_u8(skb, 638 ret = nla_put_u8(skb,
642 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 639 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
643 if (ret) 640 if (ret)
644 goto err_param; 641 goto err_param;
645 } 642 }
646 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || 643 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
647 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 644 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
648 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, 645 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
649 tc_pct); 646 tc_pct);
650 if (ret) 647 if (ret)
651 goto err_param; 648 goto err_param;
652 } 649 }
653 nla_nest_end(skb, param_nest); 650 nla_nest_end(skb, param_nest);
654 } 651 }
655 652
656 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) 653 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
657 getall = 1; 654 getall = 1;
658 else 655 else
659 getall = 0; 656 getall = 0;
660 657
661 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 658 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
662 if (!getall && !pg_tb[i]) 659 if (!getall && !pg_tb[i])
663 continue; 660 continue;
664 661
665 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 662 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
666 663
667 if (dir) { 664 if (dir) {
668 /* Rx */ 665 /* Rx */
669 netdev->dcbnl_ops->getpgbwgcfgrx(netdev, 666 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
670 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 667 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
671 } else { 668 } else {
672 /* Tx */ 669 /* Tx */
673 netdev->dcbnl_ops->getpgbwgcfgtx(netdev, 670 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
674 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 671 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
675 } 672 }
676 ret = nla_put_u8(skb, i, tc_pct); 673 ret = nla_put_u8(skb, i, tc_pct);
677 if (ret) 674 if (ret)
678 goto err_pg; 675 goto err_pg;
679 } 676 }
680 677
681 nla_nest_end(skb, pg_nest); 678 nla_nest_end(skb, pg_nest);
682 679
683 return 0; 680 return 0;
684 681
685 err_param: 682 err_param:
686 nla_nest_cancel(skb, param_nest); 683 nla_nest_cancel(skb, param_nest);
687 err_pg: 684 err_pg:
688 nla_nest_cancel(skb, pg_nest); 685 nla_nest_cancel(skb, pg_nest);
689 686
690 return -EMSGSIZE; 687 return -EMSGSIZE;
691 } 688 }
692 689
693 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 690 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
694 u32 seq, struct nlattr **tb, struct sk_buff *skb) 691 u32 seq, struct nlattr **tb, struct sk_buff *skb)
695 { 692 {
696 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); 693 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
697 } 694 }
698 695
699 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 696 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
700 u32 seq, struct nlattr **tb, struct sk_buff *skb) 697 u32 seq, struct nlattr **tb, struct sk_buff *skb)
701 { 698 {
702 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); 699 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
703 } 700 }
704 701
705 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, 702 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
706 u32 seq, struct nlattr **tb, struct sk_buff *skb) 703 u32 seq, struct nlattr **tb, struct sk_buff *skb)
707 { 704 {
708 u8 value; 705 u8 value;
709 706
710 if (!tb[DCB_ATTR_STATE]) 707 if (!tb[DCB_ATTR_STATE])
711 return -EINVAL; 708 return -EINVAL;
712 709
713 if (!netdev->dcbnl_ops->setstate) 710 if (!netdev->dcbnl_ops->setstate)
714 return -EOPNOTSUPP; 711 return -EOPNOTSUPP;
715 712
716 value = nla_get_u8(tb[DCB_ATTR_STATE]); 713 value = nla_get_u8(tb[DCB_ATTR_STATE]);
717 714
718 return nla_put_u8(skb, DCB_ATTR_STATE, 715 return nla_put_u8(skb, DCB_ATTR_STATE,
719 netdev->dcbnl_ops->setstate(netdev, value)); 716 netdev->dcbnl_ops->setstate(netdev, value));
720 } 717 }
721 718
722 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, 719 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
723 u32 seq, struct nlattr **tb, struct sk_buff *skb) 720 u32 seq, struct nlattr **tb, struct sk_buff *skb)
724 { 721 {
725 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; 722 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
726 int i; 723 int i;
727 int ret; 724 int ret;
728 u8 value; 725 u8 value;
729 726
730 if (!tb[DCB_ATTR_PFC_CFG]) 727 if (!tb[DCB_ATTR_PFC_CFG])
731 return -EINVAL; 728 return -EINVAL;
732 729
733 if (!netdev->dcbnl_ops->setpfccfg) 730 if (!netdev->dcbnl_ops->setpfccfg)
734 return -EOPNOTSUPP; 731 return -EOPNOTSUPP;
735 732
736 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 733 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
737 tb[DCB_ATTR_PFC_CFG], 734 tb[DCB_ATTR_PFC_CFG],
738 dcbnl_pfc_up_nest); 735 dcbnl_pfc_up_nest);
739 if (ret) 736 if (ret)
740 return ret; 737 return ret;
741 738
742 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 739 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
743 if (data[i] == NULL) 740 if (data[i] == NULL)
744 continue; 741 continue;
745 value = nla_get_u8(data[i]); 742 value = nla_get_u8(data[i]);
746 netdev->dcbnl_ops->setpfccfg(netdev, 743 netdev->dcbnl_ops->setpfccfg(netdev,
747 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); 744 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
748 } 745 }
749 746
750 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); 747 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
751 } 748 }
752 749
753 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, 750 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
754 u32 seq, struct nlattr **tb, struct sk_buff *skb) 751 u32 seq, struct nlattr **tb, struct sk_buff *skb)
755 { 752 {
756 int ret; 753 int ret;
757 754
758 if (!tb[DCB_ATTR_SET_ALL]) 755 if (!tb[DCB_ATTR_SET_ALL])
759 return -EINVAL; 756 return -EINVAL;
760 757
761 if (!netdev->dcbnl_ops->setall) 758 if (!netdev->dcbnl_ops->setall)
762 return -EOPNOTSUPP; 759 return -EOPNOTSUPP;
763 760
764 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, 761 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
765 netdev->dcbnl_ops->setall(netdev)); 762 netdev->dcbnl_ops->setall(netdev));
766 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); 763 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
767 764
768 return ret; 765 return ret;
769 } 766 }
770 767
771 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 768 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
772 u32 seq, struct nlattr **tb, struct sk_buff *skb, 769 u32 seq, struct nlattr **tb, struct sk_buff *skb,
773 int dir) 770 int dir)
774 { 771 {
775 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 772 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
776 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 773 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
777 int ret; 774 int ret;
778 int i; 775 int i;
779 u8 pgid; 776 u8 pgid;
780 u8 up_map; 777 u8 up_map;
781 u8 prio; 778 u8 prio;
782 u8 tc_pct; 779 u8 tc_pct;
783 780
784 if (!tb[DCB_ATTR_PG_CFG]) 781 if (!tb[DCB_ATTR_PG_CFG])
785 return -EINVAL; 782 return -EINVAL;
786 783
787 if (!netdev->dcbnl_ops->setpgtccfgtx || 784 if (!netdev->dcbnl_ops->setpgtccfgtx ||
788 !netdev->dcbnl_ops->setpgtccfgrx || 785 !netdev->dcbnl_ops->setpgtccfgrx ||
789 !netdev->dcbnl_ops->setpgbwgcfgtx || 786 !netdev->dcbnl_ops->setpgbwgcfgtx ||
790 !netdev->dcbnl_ops->setpgbwgcfgrx) 787 !netdev->dcbnl_ops->setpgbwgcfgrx)
791 return -EOPNOTSUPP; 788 return -EOPNOTSUPP;
792 789
793 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 790 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
794 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 791 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
795 if (ret) 792 if (ret)
796 return ret; 793 return ret;
797 794
798 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 795 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
799 if (!pg_tb[i]) 796 if (!pg_tb[i])
800 continue; 797 continue;
801 798
802 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 799 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
803 pg_tb[i], dcbnl_tc_param_nest); 800 pg_tb[i], dcbnl_tc_param_nest);
804 if (ret) 801 if (ret)
805 return ret; 802 return ret;
806 803
807 pgid = DCB_ATTR_VALUE_UNDEFINED; 804 pgid = DCB_ATTR_VALUE_UNDEFINED;
808 prio = DCB_ATTR_VALUE_UNDEFINED; 805 prio = DCB_ATTR_VALUE_UNDEFINED;
809 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 806 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
810 up_map = DCB_ATTR_VALUE_UNDEFINED; 807 up_map = DCB_ATTR_VALUE_UNDEFINED;
811 808
812 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) 809 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
813 prio = 810 prio =
814 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); 811 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
815 812
816 if (param_tb[DCB_TC_ATTR_PARAM_PGID]) 813 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
817 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); 814 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
818 815
819 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) 816 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
820 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); 817 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
821 818
822 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) 819 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
823 up_map = 820 up_map =
824 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); 821 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
825 822
826 /* dir: Tx = 0, Rx = 1 */ 823 /* dir: Tx = 0, Rx = 1 */
827 if (dir) { 824 if (dir) {
828 /* Rx */ 825 /* Rx */
829 netdev->dcbnl_ops->setpgtccfgrx(netdev, 826 netdev->dcbnl_ops->setpgtccfgrx(netdev,
830 i - DCB_PG_ATTR_TC_0, 827 i - DCB_PG_ATTR_TC_0,
831 prio, pgid, tc_pct, up_map); 828 prio, pgid, tc_pct, up_map);
832 } else { 829 } else {
833 /* Tx */ 830 /* Tx */
834 netdev->dcbnl_ops->setpgtccfgtx(netdev, 831 netdev->dcbnl_ops->setpgtccfgtx(netdev,
835 i - DCB_PG_ATTR_TC_0, 832 i - DCB_PG_ATTR_TC_0,
836 prio, pgid, tc_pct, up_map); 833 prio, pgid, tc_pct, up_map);
837 } 834 }
838 } 835 }
839 836
840 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 837 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
841 if (!pg_tb[i]) 838 if (!pg_tb[i])
842 continue; 839 continue;
843 840
844 tc_pct = nla_get_u8(pg_tb[i]); 841 tc_pct = nla_get_u8(pg_tb[i]);
845 842
846 /* dir: Tx = 0, Rx = 1 */ 843 /* dir: Tx = 0, Rx = 1 */
847 if (dir) { 844 if (dir) {
848 /* Rx */ 845 /* Rx */
849 netdev->dcbnl_ops->setpgbwgcfgrx(netdev, 846 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
850 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 847 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
851 } else { 848 } else {
852 /* Tx */ 849 /* Tx */
853 netdev->dcbnl_ops->setpgbwgcfgtx(netdev, 850 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
854 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 851 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
855 } 852 }
856 } 853 }
857 854
858 return nla_put_u8(skb, 855 return nla_put_u8(skb,
859 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG), 0); 856 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG), 0);
860 } 857 }
861 858
862 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 859 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
863 u32 seq, struct nlattr **tb, struct sk_buff *skb) 860 u32 seq, struct nlattr **tb, struct sk_buff *skb)
864 { 861 {
865 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); 862 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
866 } 863 }
867 864
868 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 865 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
869 u32 seq, struct nlattr **tb, struct sk_buff *skb) 866 u32 seq, struct nlattr **tb, struct sk_buff *skb)
870 { 867 {
871 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); 868 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
872 } 869 }
873 870
874 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 871 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
875 u32 seq, struct nlattr **tb, struct sk_buff *skb) 872 u32 seq, struct nlattr **tb, struct sk_buff *skb)
876 { 873 {
877 struct nlattr *bcn_nest; 874 struct nlattr *bcn_nest;
878 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; 875 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
879 u8 value_byte; 876 u8 value_byte;
880 u32 value_integer; 877 u32 value_integer;
881 int ret; 878 int ret;
882 bool getall = false; 879 bool getall = false;
883 int i; 880 int i;
884 881
885 if (!tb[DCB_ATTR_BCN]) 882 if (!tb[DCB_ATTR_BCN])
886 return -EINVAL; 883 return -EINVAL;
887 884
888 if (!netdev->dcbnl_ops->getbcnrp || 885 if (!netdev->dcbnl_ops->getbcnrp ||
889 !netdev->dcbnl_ops->getbcncfg) 886 !netdev->dcbnl_ops->getbcncfg)
890 return -EOPNOTSUPP; 887 return -EOPNOTSUPP;
891 888
892 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, 889 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
893 tb[DCB_ATTR_BCN], dcbnl_bcn_nest); 890 tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
894 if (ret) 891 if (ret)
895 return ret; 892 return ret;
896 893
897 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN); 894 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
898 if (!bcn_nest) 895 if (!bcn_nest)
899 return -EMSGSIZE; 896 return -EMSGSIZE;
900 897
901 if (bcn_tb[DCB_BCN_ATTR_ALL]) 898 if (bcn_tb[DCB_BCN_ATTR_ALL])
902 getall = true; 899 getall = true;
903 900
904 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 901 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
905 if (!getall && !bcn_tb[i]) 902 if (!getall && !bcn_tb[i])
906 continue; 903 continue;
907 904
908 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, 905 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
909 &value_byte); 906 &value_byte);
910 ret = nla_put_u8(skb, i, value_byte); 907 ret = nla_put_u8(skb, i, value_byte);
911 if (ret) 908 if (ret)
912 goto err_bcn; 909 goto err_bcn;
913 } 910 }
914 911
915 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 912 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
916 if (!getall && !bcn_tb[i]) 913 if (!getall && !bcn_tb[i])
917 continue; 914 continue;
918 915
919 netdev->dcbnl_ops->getbcncfg(netdev, i, 916 netdev->dcbnl_ops->getbcncfg(netdev, i,
920 &value_integer); 917 &value_integer);
921 ret = nla_put_u32(skb, i, value_integer); 918 ret = nla_put_u32(skb, i, value_integer);
922 if (ret) 919 if (ret)
923 goto err_bcn; 920 goto err_bcn;
924 } 921 }
925 922
926 nla_nest_end(skb, bcn_nest); 923 nla_nest_end(skb, bcn_nest);
927 924
928 return 0; 925 return 0;
929 926
930 err_bcn: 927 err_bcn:
931 nla_nest_cancel(skb, bcn_nest); 928 nla_nest_cancel(skb, bcn_nest);
932 return ret; 929 return ret;
933 } 930 }
934 931
935 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 932 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
936 u32 seq, struct nlattr **tb, struct sk_buff *skb) 933 u32 seq, struct nlattr **tb, struct sk_buff *skb)
937 { 934 {
938 struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; 935 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
939 int i; 936 int i;
940 int ret; 937 int ret;
941 u8 value_byte; 938 u8 value_byte;
942 u32 value_int; 939 u32 value_int;
943 940
944 if (!tb[DCB_ATTR_BCN]) 941 if (!tb[DCB_ATTR_BCN])
945 return -EINVAL; 942 return -EINVAL;
946 943
947 if (!netdev->dcbnl_ops->setbcncfg || 944 if (!netdev->dcbnl_ops->setbcncfg ||
948 !netdev->dcbnl_ops->setbcnrp) 945 !netdev->dcbnl_ops->setbcnrp)
949 return -EOPNOTSUPP; 946 return -EOPNOTSUPP;
950 947
951 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, 948 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
952 tb[DCB_ATTR_BCN], 949 tb[DCB_ATTR_BCN],
953 dcbnl_pfc_up_nest); 950 dcbnl_pfc_up_nest);
954 if (ret) 951 if (ret)
955 return ret; 952 return ret;
956 953
957 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 954 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
958 if (data[i] == NULL) 955 if (data[i] == NULL)
959 continue; 956 continue;
960 value_byte = nla_get_u8(data[i]); 957 value_byte = nla_get_u8(data[i]);
961 netdev->dcbnl_ops->setbcnrp(netdev, 958 netdev->dcbnl_ops->setbcnrp(netdev,
962 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); 959 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
963 } 960 }
964 961
965 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 962 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
966 if (data[i] == NULL) 963 if (data[i] == NULL)
967 continue; 964 continue;
968 value_int = nla_get_u32(data[i]); 965 value_int = nla_get_u32(data[i]);
969 netdev->dcbnl_ops->setbcncfg(netdev, 966 netdev->dcbnl_ops->setbcncfg(netdev,
970 i, value_int); 967 i, value_int);
971 } 968 }
972 969
973 return nla_put_u8(skb, DCB_ATTR_BCN, 0); 970 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
974 } 971 }
975 972
976 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 973 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
977 int app_nested_type, int app_info_type, 974 int app_nested_type, int app_info_type,
978 int app_entry_type) 975 int app_entry_type)
979 { 976 {
980 struct dcb_peer_app_info info; 977 struct dcb_peer_app_info info;
981 struct dcb_app *table = NULL; 978 struct dcb_app *table = NULL;
982 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 979 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
983 u16 app_count; 980 u16 app_count;
984 int err; 981 int err;
985 982
986 983
987 /** 984 /**
988 * retrieve the peer app configuration form the driver. If the driver 985 * retrieve the peer app configuration form the driver. If the driver
989 * handlers fail exit without doing anything 986 * handlers fail exit without doing anything
990 */ 987 */
991 err = ops->peer_getappinfo(netdev, &info, &app_count); 988 err = ops->peer_getappinfo(netdev, &info, &app_count);
992 if (!err && app_count) { 989 if (!err && app_count) {
993 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL); 990 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
994 if (!table) 991 if (!table)
995 return -ENOMEM; 992 return -ENOMEM;
996 993
997 err = ops->peer_getapptable(netdev, table); 994 err = ops->peer_getapptable(netdev, table);
998 } 995 }
999 996
1000 if (!err) { 997 if (!err) {
1001 u16 i; 998 u16 i;
1002 struct nlattr *app; 999 struct nlattr *app;
1003 1000
1004 /** 1001 /**
1005 * build the message, from here on the only possible failure 1002 * build the message, from here on the only possible failure
1006 * is due to the skb size 1003 * is due to the skb size
1007 */ 1004 */
1008 err = -EMSGSIZE; 1005 err = -EMSGSIZE;
1009 1006
1010 app = nla_nest_start(skb, app_nested_type); 1007 app = nla_nest_start(skb, app_nested_type);
1011 if (!app) 1008 if (!app)
1012 goto nla_put_failure; 1009 goto nla_put_failure;
1013 1010
1014 if (app_info_type && 1011 if (app_info_type &&
1015 nla_put(skb, app_info_type, sizeof(info), &info)) 1012 nla_put(skb, app_info_type, sizeof(info), &info))
1016 goto nla_put_failure; 1013 goto nla_put_failure;
1017 1014
1018 for (i = 0; i < app_count; i++) { 1015 for (i = 0; i < app_count; i++) {
1019 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), 1016 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1020 &table[i])) 1017 &table[i]))
1021 goto nla_put_failure; 1018 goto nla_put_failure;
1022 } 1019 }
1023 nla_nest_end(skb, app); 1020 nla_nest_end(skb, app);
1024 } 1021 }
1025 err = 0; 1022 err = 0;
1026 1023
1027 nla_put_failure: 1024 nla_put_failure:
1028 kfree(table); 1025 kfree(table);
1029 return err; 1026 return err;
1030 } 1027 }
1031 1028
1032 /* Handle IEEE 802.1Qaz GET commands. */ 1029 /* Handle IEEE 802.1Qaz GET commands. */
1033 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) 1030 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1034 { 1031 {
1035 struct nlattr *ieee, *app; 1032 struct nlattr *ieee, *app;
1036 struct dcb_app_type *itr; 1033 struct dcb_app_type *itr;
1037 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1034 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1038 int dcbx; 1035 int dcbx;
1039 int err; 1036 int err;
1040 1037
1041 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1038 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1042 return -EMSGSIZE; 1039 return -EMSGSIZE;
1043 1040
1044 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1041 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1045 if (!ieee) 1042 if (!ieee)
1046 return -EMSGSIZE; 1043 return -EMSGSIZE;
1047 1044
1048 if (ops->ieee_getets) { 1045 if (ops->ieee_getets) {
1049 struct ieee_ets ets; 1046 struct ieee_ets ets;
1050 err = ops->ieee_getets(netdev, &ets); 1047 err = ops->ieee_getets(netdev, &ets);
1051 if (!err && 1048 if (!err &&
1052 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1049 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1053 return -EMSGSIZE; 1050 return -EMSGSIZE;
1054 } 1051 }
1055 1052
1056 if (ops->ieee_getmaxrate) { 1053 if (ops->ieee_getmaxrate) {
1057 struct ieee_maxrate maxrate; 1054 struct ieee_maxrate maxrate;
1058 err = ops->ieee_getmaxrate(netdev, &maxrate); 1055 err = ops->ieee_getmaxrate(netdev, &maxrate);
1059 if (!err) { 1056 if (!err) {
1060 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1057 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1061 sizeof(maxrate), &maxrate); 1058 sizeof(maxrate), &maxrate);
1062 if (err) 1059 if (err)
1063 return -EMSGSIZE; 1060 return -EMSGSIZE;
1064 } 1061 }
1065 } 1062 }
1066 1063
1067 if (ops->ieee_getpfc) { 1064 if (ops->ieee_getpfc) {
1068 struct ieee_pfc pfc; 1065 struct ieee_pfc pfc;
1069 err = ops->ieee_getpfc(netdev, &pfc); 1066 err = ops->ieee_getpfc(netdev, &pfc);
1070 if (!err && 1067 if (!err &&
1071 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1068 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1072 return -EMSGSIZE; 1069 return -EMSGSIZE;
1073 } 1070 }
1074 1071
1075 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1072 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1076 if (!app) 1073 if (!app)
1077 return -EMSGSIZE; 1074 return -EMSGSIZE;
1078 1075
1079 spin_lock(&dcb_lock); 1076 spin_lock(&dcb_lock);
1080 list_for_each_entry(itr, &dcb_app_list, list) { 1077 list_for_each_entry(itr, &dcb_app_list, list) {
1081 if (itr->ifindex == netdev->ifindex) { 1078 if (itr->ifindex == netdev->ifindex) {
1082 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1079 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1083 &itr->app); 1080 &itr->app);
1084 if (err) { 1081 if (err) {
1085 spin_unlock(&dcb_lock); 1082 spin_unlock(&dcb_lock);
1086 return -EMSGSIZE; 1083 return -EMSGSIZE;
1087 } 1084 }
1088 } 1085 }
1089 } 1086 }
1090 1087
1091 if (netdev->dcbnl_ops->getdcbx) 1088 if (netdev->dcbnl_ops->getdcbx)
1092 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1089 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1093 else 1090 else
1094 dcbx = -EOPNOTSUPP; 1091 dcbx = -EOPNOTSUPP;
1095 1092
1096 spin_unlock(&dcb_lock); 1093 spin_unlock(&dcb_lock);
1097 nla_nest_end(skb, app); 1094 nla_nest_end(skb, app);
1098 1095
1099 /* get peer info if available */ 1096 /* get peer info if available */
1100 if (ops->ieee_peer_getets) { 1097 if (ops->ieee_peer_getets) {
1101 struct ieee_ets ets; 1098 struct ieee_ets ets;
1102 err = ops->ieee_peer_getets(netdev, &ets); 1099 err = ops->ieee_peer_getets(netdev, &ets);
1103 if (!err && 1100 if (!err &&
1104 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1101 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1105 return -EMSGSIZE; 1102 return -EMSGSIZE;
1106 } 1103 }
1107 1104
1108 if (ops->ieee_peer_getpfc) { 1105 if (ops->ieee_peer_getpfc) {
1109 struct ieee_pfc pfc; 1106 struct ieee_pfc pfc;
1110 err = ops->ieee_peer_getpfc(netdev, &pfc); 1107 err = ops->ieee_peer_getpfc(netdev, &pfc);
1111 if (!err && 1108 if (!err &&
1112 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1109 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1113 return -EMSGSIZE; 1110 return -EMSGSIZE;
1114 } 1111 }
1115 1112
1116 if (ops->peer_getappinfo && ops->peer_getapptable) { 1113 if (ops->peer_getappinfo && ops->peer_getapptable) {
1117 err = dcbnl_build_peer_app(netdev, skb, 1114 err = dcbnl_build_peer_app(netdev, skb,
1118 DCB_ATTR_IEEE_PEER_APP, 1115 DCB_ATTR_IEEE_PEER_APP,
1119 DCB_ATTR_IEEE_APP_UNSPEC, 1116 DCB_ATTR_IEEE_APP_UNSPEC,
1120 DCB_ATTR_IEEE_APP); 1117 DCB_ATTR_IEEE_APP);
1121 if (err) 1118 if (err)
1122 return -EMSGSIZE; 1119 return -EMSGSIZE;
1123 } 1120 }
1124 1121
1125 nla_nest_end(skb, ieee); 1122 nla_nest_end(skb, ieee);
1126 if (dcbx >= 0) { 1123 if (dcbx >= 0) {
1127 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1124 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1128 if (err) 1125 if (err)
1129 return -EMSGSIZE; 1126 return -EMSGSIZE;
1130 } 1127 }
1131 1128
1132 return 0; 1129 return 0;
1133 } 1130 }
1134 1131
1135 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, 1132 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1136 int dir) 1133 int dir)
1137 { 1134 {
1138 u8 pgid, up_map, prio, tc_pct; 1135 u8 pgid, up_map, prio, tc_pct;
1139 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1136 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1140 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; 1137 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1141 struct nlattr *pg = nla_nest_start(skb, i); 1138 struct nlattr *pg = nla_nest_start(skb, i);
1142 1139
1143 if (!pg) 1140 if (!pg)
1144 return -EMSGSIZE; 1141 return -EMSGSIZE;
1145 1142
1146 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 1143 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1147 struct nlattr *tc_nest = nla_nest_start(skb, i); 1144 struct nlattr *tc_nest = nla_nest_start(skb, i);
1148 1145
1149 if (!tc_nest) 1146 if (!tc_nest)
1150 return -EMSGSIZE; 1147 return -EMSGSIZE;
1151 1148
1152 pgid = DCB_ATTR_VALUE_UNDEFINED; 1149 pgid = DCB_ATTR_VALUE_UNDEFINED;
1153 prio = DCB_ATTR_VALUE_UNDEFINED; 1150 prio = DCB_ATTR_VALUE_UNDEFINED;
1154 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 1151 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1155 up_map = DCB_ATTR_VALUE_UNDEFINED; 1152 up_map = DCB_ATTR_VALUE_UNDEFINED;
1156 1153
1157 if (!dir) 1154 if (!dir)
1158 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, 1155 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1159 &prio, &pgid, &tc_pct, &up_map); 1156 &prio, &pgid, &tc_pct, &up_map);
1160 else 1157 else
1161 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, 1158 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1162 &prio, &pgid, &tc_pct, &up_map); 1159 &prio, &pgid, &tc_pct, &up_map);
1163 1160
1164 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || 1161 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1165 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || 1162 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1166 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || 1163 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1167 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) 1164 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1168 return -EMSGSIZE; 1165 return -EMSGSIZE;
1169 nla_nest_end(skb, tc_nest); 1166 nla_nest_end(skb, tc_nest);
1170 } 1167 }
1171 1168
1172 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 1169 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1173 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 1170 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1174 1171
1175 if (!dir) 1172 if (!dir)
1176 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, 1173 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1177 &tc_pct); 1174 &tc_pct);
1178 else 1175 else
1179 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1176 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1180 &tc_pct); 1177 &tc_pct);
1181 if (nla_put_u8(skb, i, tc_pct)) 1178 if (nla_put_u8(skb, i, tc_pct))
1182 return -EMSGSIZE; 1179 return -EMSGSIZE;
1183 } 1180 }
1184 nla_nest_end(skb, pg); 1181 nla_nest_end(skb, pg);
1185 return 0; 1182 return 0;
1186 } 1183 }
1187 1184
1188 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) 1185 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1189 { 1186 {
1190 struct nlattr *cee, *app; 1187 struct nlattr *cee, *app;
1191 struct dcb_app_type *itr; 1188 struct dcb_app_type *itr;
1192 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1189 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1193 int dcbx, i, err = -EMSGSIZE; 1190 int dcbx, i, err = -EMSGSIZE;
1194 u8 value; 1191 u8 value;
1195 1192
1196 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1193 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1197 goto nla_put_failure; 1194 goto nla_put_failure;
1198 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1195 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1199 if (!cee) 1196 if (!cee)
1200 goto nla_put_failure; 1197 goto nla_put_failure;
1201 1198
1202 /* local pg */ 1199 /* local pg */
1203 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { 1200 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1204 err = dcbnl_cee_pg_fill(skb, netdev, 1); 1201 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1205 if (err) 1202 if (err)
1206 goto nla_put_failure; 1203 goto nla_put_failure;
1207 } 1204 }
1208 1205
1209 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { 1206 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1210 err = dcbnl_cee_pg_fill(skb, netdev, 0); 1207 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1211 if (err) 1208 if (err)
1212 goto nla_put_failure; 1209 goto nla_put_failure;
1213 } 1210 }
1214 1211
1215 /* local pfc */ 1212 /* local pfc */
1216 if (ops->getpfccfg) { 1213 if (ops->getpfccfg) {
1217 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); 1214 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1218 1215
1219 if (!pfc_nest) 1216 if (!pfc_nest)
1220 goto nla_put_failure; 1217 goto nla_put_failure;
1221 1218
1222 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 1219 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1223 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); 1220 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1224 if (nla_put_u8(skb, i, value)) 1221 if (nla_put_u8(skb, i, value))
1225 goto nla_put_failure; 1222 goto nla_put_failure;
1226 } 1223 }
1227 nla_nest_end(skb, pfc_nest); 1224 nla_nest_end(skb, pfc_nest);
1228 } 1225 }
1229 1226
1230 /* local app */ 1227 /* local app */
1231 spin_lock(&dcb_lock); 1228 spin_lock(&dcb_lock);
1232 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); 1229 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1233 if (!app) 1230 if (!app)
1234 goto dcb_unlock; 1231 goto dcb_unlock;
1235 1232
1236 list_for_each_entry(itr, &dcb_app_list, list) { 1233 list_for_each_entry(itr, &dcb_app_list, list) {
1237 if (itr->ifindex == netdev->ifindex) { 1234 if (itr->ifindex == netdev->ifindex) {
1238 struct nlattr *app_nest = nla_nest_start(skb, 1235 struct nlattr *app_nest = nla_nest_start(skb,
1239 DCB_ATTR_APP); 1236 DCB_ATTR_APP);
1240 if (!app_nest) 1237 if (!app_nest)
1241 goto dcb_unlock; 1238 goto dcb_unlock;
1242 1239
1243 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, 1240 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1244 itr->app.selector); 1241 itr->app.selector);
1245 if (err) 1242 if (err)
1246 goto dcb_unlock; 1243 goto dcb_unlock;
1247 1244
1248 err = nla_put_u16(skb, DCB_APP_ATTR_ID, 1245 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1249 itr->app.protocol); 1246 itr->app.protocol);
1250 if (err) 1247 if (err)
1251 goto dcb_unlock; 1248 goto dcb_unlock;
1252 1249
1253 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, 1250 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1254 itr->app.priority); 1251 itr->app.priority);
1255 if (err) 1252 if (err)
1256 goto dcb_unlock; 1253 goto dcb_unlock;
1257 1254
1258 nla_nest_end(skb, app_nest); 1255 nla_nest_end(skb, app_nest);
1259 } 1256 }
1260 } 1257 }
1261 nla_nest_end(skb, app); 1258 nla_nest_end(skb, app);
1262 1259
1263 if (netdev->dcbnl_ops->getdcbx) 1260 if (netdev->dcbnl_ops->getdcbx)
1264 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1261 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1265 else 1262 else
1266 dcbx = -EOPNOTSUPP; 1263 dcbx = -EOPNOTSUPP;
1267 1264
1268 spin_unlock(&dcb_lock); 1265 spin_unlock(&dcb_lock);
1269 1266
1270 /* features flags */ 1267 /* features flags */
1271 if (ops->getfeatcfg) { 1268 if (ops->getfeatcfg) {
1272 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); 1269 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1273 if (!feat) 1270 if (!feat)
1274 goto nla_put_failure; 1271 goto nla_put_failure;
1275 1272
1276 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; 1273 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1277 i++) 1274 i++)
1278 if (!ops->getfeatcfg(netdev, i, &value) && 1275 if (!ops->getfeatcfg(netdev, i, &value) &&
1279 nla_put_u8(skb, i, value)) 1276 nla_put_u8(skb, i, value))
1280 goto nla_put_failure; 1277 goto nla_put_failure;
1281 1278
1282 nla_nest_end(skb, feat); 1279 nla_nest_end(skb, feat);
1283 } 1280 }
1284 1281
1285 /* peer info if available */ 1282 /* peer info if available */
1286 if (ops->cee_peer_getpg) { 1283 if (ops->cee_peer_getpg) {
1287 struct cee_pg pg; 1284 struct cee_pg pg;
1288 err = ops->cee_peer_getpg(netdev, &pg); 1285 err = ops->cee_peer_getpg(netdev, &pg);
1289 if (!err && 1286 if (!err &&
1290 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) 1287 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1291 goto nla_put_failure; 1288 goto nla_put_failure;
1292 } 1289 }
1293 1290
1294 if (ops->cee_peer_getpfc) { 1291 if (ops->cee_peer_getpfc) {
1295 struct cee_pfc pfc; 1292 struct cee_pfc pfc;
1296 err = ops->cee_peer_getpfc(netdev, &pfc); 1293 err = ops->cee_peer_getpfc(netdev, &pfc);
1297 if (!err && 1294 if (!err &&
1298 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) 1295 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1299 goto nla_put_failure; 1296 goto nla_put_failure;
1300 } 1297 }
1301 1298
1302 if (ops->peer_getappinfo && ops->peer_getapptable) { 1299 if (ops->peer_getappinfo && ops->peer_getapptable) {
1303 err = dcbnl_build_peer_app(netdev, skb, 1300 err = dcbnl_build_peer_app(netdev, skb,
1304 DCB_ATTR_CEE_PEER_APP_TABLE, 1301 DCB_ATTR_CEE_PEER_APP_TABLE,
1305 DCB_ATTR_CEE_PEER_APP_INFO, 1302 DCB_ATTR_CEE_PEER_APP_INFO,
1306 DCB_ATTR_CEE_PEER_APP); 1303 DCB_ATTR_CEE_PEER_APP);
1307 if (err) 1304 if (err)
1308 goto nla_put_failure; 1305 goto nla_put_failure;
1309 } 1306 }
1310 nla_nest_end(skb, cee); 1307 nla_nest_end(skb, cee);
1311 1308
1312 /* DCBX state */ 1309 /* DCBX state */
1313 if (dcbx >= 0) { 1310 if (dcbx >= 0) {
1314 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1311 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1315 if (err) 1312 if (err)
1316 goto nla_put_failure; 1313 goto nla_put_failure;
1317 } 1314 }
1318 return 0; 1315 return 0;
1319 1316
1320 dcb_unlock: 1317 dcb_unlock:
1321 spin_unlock(&dcb_lock); 1318 spin_unlock(&dcb_lock);
1322 nla_put_failure: 1319 nla_put_failure:
1323 return err; 1320 return err;
1324 } 1321 }
1325 1322
1326 static int dcbnl_notify(struct net_device *dev, int event, int cmd, 1323 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1327 u32 seq, u32 pid, int dcbx_ver) 1324 u32 seq, u32 pid, int dcbx_ver)
1328 { 1325 {
1329 struct net *net = dev_net(dev); 1326 struct net *net = dev_net(dev);
1330 struct sk_buff *skb; 1327 struct sk_buff *skb;
1331 struct nlmsghdr *nlh; 1328 struct nlmsghdr *nlh;
1332 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1329 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1333 int err; 1330 int err;
1334 1331
1335 if (!ops) 1332 if (!ops)
1336 return -EOPNOTSUPP; 1333 return -EOPNOTSUPP;
1337 1334
1338 skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh); 1335 skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
1339 if (!skb) 1336 if (!skb)
1340 return -ENOBUFS; 1337 return -ENOBUFS;
1341 1338
1342 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) 1339 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1343 err = dcbnl_ieee_fill(skb, dev); 1340 err = dcbnl_ieee_fill(skb, dev);
1344 else 1341 else
1345 err = dcbnl_cee_fill(skb, dev); 1342 err = dcbnl_cee_fill(skb, dev);
1346 1343
1347 if (err < 0) { 1344 if (err < 0) {
1348 /* Report error to broadcast listeners */ 1345 /* Report error to broadcast listeners */
1349 nlmsg_free(skb); 1346 nlmsg_free(skb);
1350 rtnl_set_sk_err(net, RTNLGRP_DCB, err); 1347 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1351 } else { 1348 } else {
1352 /* End nlmsg and notify broadcast listeners */ 1349 /* End nlmsg and notify broadcast listeners */
1353 nlmsg_end(skb, nlh); 1350 nlmsg_end(skb, nlh);
1354 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); 1351 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1355 } 1352 }
1356 1353
1357 return err; 1354 return err;
1358 } 1355 }
1359 1356
1360 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, 1357 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1361 u32 seq, u32 pid) 1358 u32 seq, u32 pid)
1362 { 1359 {
1363 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE); 1360 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1364 } 1361 }
1365 EXPORT_SYMBOL(dcbnl_ieee_notify); 1362 EXPORT_SYMBOL(dcbnl_ieee_notify);
1366 1363
1367 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, 1364 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1368 u32 seq, u32 pid) 1365 u32 seq, u32 pid)
1369 { 1366 {
1370 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE); 1367 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1371 } 1368 }
1372 EXPORT_SYMBOL(dcbnl_cee_notify); 1369 EXPORT_SYMBOL(dcbnl_cee_notify);
1373 1370
1374 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not 1371 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1375 * be completed the entire msg is aborted and error value is returned. 1372 * be completed the entire msg is aborted and error value is returned.
1376 * No attempt is made to reconcile the case where only part of the 1373 * No attempt is made to reconcile the case where only part of the
1377 * cmd can be completed. 1374 * cmd can be completed.
1378 */ 1375 */
1379 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, 1376 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1380 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1377 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1381 { 1378 {
1382 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1379 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1383 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1380 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1384 int err; 1381 int err;
1385 1382
1386 if (!ops) 1383 if (!ops)
1387 return -EOPNOTSUPP; 1384 return -EOPNOTSUPP;
1388 1385
1389 if (!tb[DCB_ATTR_IEEE]) 1386 if (!tb[DCB_ATTR_IEEE])
1390 return -EINVAL; 1387 return -EINVAL;
1391 1388
1392 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, 1389 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1393 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); 1390 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1394 if (err) 1391 if (err)
1395 return err; 1392 return err;
1396 1393
1397 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { 1394 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1398 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); 1395 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1399 err = ops->ieee_setets(netdev, ets); 1396 err = ops->ieee_setets(netdev, ets);
1400 if (err) 1397 if (err)
1401 goto err; 1398 goto err;
1402 } 1399 }
1403 1400
1404 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { 1401 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1405 struct ieee_maxrate *maxrate = 1402 struct ieee_maxrate *maxrate =
1406 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); 1403 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1407 err = ops->ieee_setmaxrate(netdev, maxrate); 1404 err = ops->ieee_setmaxrate(netdev, maxrate);
1408 if (err) 1405 if (err)
1409 goto err; 1406 goto err;
1410 } 1407 }
1411 1408
1412 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1409 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1413 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1410 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1414 err = ops->ieee_setpfc(netdev, pfc); 1411 err = ops->ieee_setpfc(netdev, pfc);
1415 if (err) 1412 if (err)
1416 goto err; 1413 goto err;
1417 } 1414 }
1418 1415
1419 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1416 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1420 struct nlattr *attr; 1417 struct nlattr *attr;
1421 int rem; 1418 int rem;
1422 1419
1423 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1420 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1424 struct dcb_app *app_data; 1421 struct dcb_app *app_data;
1425 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1422 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1426 continue; 1423 continue;
1427 app_data = nla_data(attr); 1424 app_data = nla_data(attr);
1428 if (ops->ieee_setapp) 1425 if (ops->ieee_setapp)
1429 err = ops->ieee_setapp(netdev, app_data); 1426 err = ops->ieee_setapp(netdev, app_data);
1430 else 1427 else
1431 err = dcb_ieee_setapp(netdev, app_data); 1428 err = dcb_ieee_setapp(netdev, app_data);
1432 if (err) 1429 if (err)
1433 goto err; 1430 goto err;
1434 } 1431 }
1435 } 1432 }
1436 1433
1437 err: 1434 err:
1438 err = nla_put_u8(skb, DCB_ATTR_IEEE, err); 1435 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1439 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1436 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1440 return err; 1437 return err;
1441 } 1438 }
1442 1439
1443 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, 1440 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1444 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1441 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1445 { 1442 {
1446 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1443 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1447 1444
1448 if (!ops) 1445 if (!ops)
1449 return -EOPNOTSUPP; 1446 return -EOPNOTSUPP;
1450 1447
1451 return dcbnl_ieee_fill(skb, netdev); 1448 return dcbnl_ieee_fill(skb, netdev);
1452 } 1449 }
1453 1450
1454 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, 1451 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1455 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1452 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1456 { 1453 {
1457 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1454 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1458 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1455 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1459 int err; 1456 int err;
1460 1457
1461 if (!ops) 1458 if (!ops)
1462 return -EOPNOTSUPP; 1459 return -EOPNOTSUPP;
1463 1460
1464 if (!tb[DCB_ATTR_IEEE]) 1461 if (!tb[DCB_ATTR_IEEE])
1465 return -EINVAL; 1462 return -EINVAL;
1466 1463
1467 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, 1464 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1468 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); 1465 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1469 if (err) 1466 if (err)
1470 return err; 1467 return err;
1471 1468
1472 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1469 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1473 struct nlattr *attr; 1470 struct nlattr *attr;
1474 int rem; 1471 int rem;
1475 1472
1476 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1473 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1477 struct dcb_app *app_data; 1474 struct dcb_app *app_data;
1478 1475
1479 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1476 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1480 continue; 1477 continue;
1481 app_data = nla_data(attr); 1478 app_data = nla_data(attr);
1482 if (ops->ieee_delapp) 1479 if (ops->ieee_delapp)
1483 err = ops->ieee_delapp(netdev, app_data); 1480 err = ops->ieee_delapp(netdev, app_data);
1484 else 1481 else
1485 err = dcb_ieee_delapp(netdev, app_data); 1482 err = dcb_ieee_delapp(netdev, app_data);
1486 if (err) 1483 if (err)
1487 goto err; 1484 goto err;
1488 } 1485 }
1489 } 1486 }
1490 1487
1491 err: 1488 err:
1492 err = nla_put_u8(skb, DCB_ATTR_IEEE, err); 1489 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1493 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1490 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1494 return err; 1491 return err;
1495 } 1492 }
1496 1493
1497 1494
1498 /* DCBX configuration */ 1495 /* DCBX configuration */
1499 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, 1496 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1500 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1497 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1501 { 1498 {
1502 if (!netdev->dcbnl_ops->getdcbx) 1499 if (!netdev->dcbnl_ops->getdcbx)
1503 return -EOPNOTSUPP; 1500 return -EOPNOTSUPP;
1504 1501
1505 return nla_put_u8(skb, DCB_ATTR_DCBX, 1502 return nla_put_u8(skb, DCB_ATTR_DCBX,
1506 netdev->dcbnl_ops->getdcbx(netdev)); 1503 netdev->dcbnl_ops->getdcbx(netdev));
1507 } 1504 }
1508 1505
1509 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, 1506 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1510 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1507 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1511 { 1508 {
1512 u8 value; 1509 u8 value;
1513 1510
1514 if (!netdev->dcbnl_ops->setdcbx) 1511 if (!netdev->dcbnl_ops->setdcbx)
1515 return -EOPNOTSUPP; 1512 return -EOPNOTSUPP;
1516 1513
1517 if (!tb[DCB_ATTR_DCBX]) 1514 if (!tb[DCB_ATTR_DCBX])
1518 return -EINVAL; 1515 return -EINVAL;
1519 1516
1520 value = nla_get_u8(tb[DCB_ATTR_DCBX]); 1517 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1521 1518
1522 return nla_put_u8(skb, DCB_ATTR_DCBX, 1519 return nla_put_u8(skb, DCB_ATTR_DCBX,
1523 netdev->dcbnl_ops->setdcbx(netdev, value)); 1520 netdev->dcbnl_ops->setdcbx(netdev, value));
1524 } 1521 }
1525 1522
1526 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, 1523 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1527 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1524 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1528 { 1525 {
1529 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; 1526 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1530 u8 value; 1527 u8 value;
1531 int ret, i; 1528 int ret, i;
1532 int getall = 0; 1529 int getall = 0;
1533 1530
1534 if (!netdev->dcbnl_ops->getfeatcfg) 1531 if (!netdev->dcbnl_ops->getfeatcfg)
1535 return -EOPNOTSUPP; 1532 return -EOPNOTSUPP;
1536 1533
1537 if (!tb[DCB_ATTR_FEATCFG]) 1534 if (!tb[DCB_ATTR_FEATCFG])
1538 return -EINVAL; 1535 return -EINVAL;
1539 1536
1540 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], 1537 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1541 dcbnl_featcfg_nest); 1538 dcbnl_featcfg_nest);
1542 if (ret) 1539 if (ret)
1543 return ret; 1540 return ret;
1544 1541
1545 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG); 1542 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1546 if (!nest) 1543 if (!nest)
1547 return -EMSGSIZE; 1544 return -EMSGSIZE;
1548 1545
1549 if (data[DCB_FEATCFG_ATTR_ALL]) 1546 if (data[DCB_FEATCFG_ATTR_ALL])
1550 getall = 1; 1547 getall = 1;
1551 1548
1552 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1549 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1553 if (!getall && !data[i]) 1550 if (!getall && !data[i])
1554 continue; 1551 continue;
1555 1552
1556 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); 1553 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1557 if (!ret) 1554 if (!ret)
1558 ret = nla_put_u8(skb, i, value); 1555 ret = nla_put_u8(skb, i, value);
1559 1556
1560 if (ret) { 1557 if (ret) {
1561 nla_nest_cancel(skb, nest); 1558 nla_nest_cancel(skb, nest);
1562 goto nla_put_failure; 1559 goto nla_put_failure;
1563 } 1560 }
1564 } 1561 }
1565 nla_nest_end(skb, nest); 1562 nla_nest_end(skb, nest);
1566 1563
1567 nla_put_failure: 1564 nla_put_failure:
1568 return ret; 1565 return ret;
1569 } 1566 }
1570 1567
1571 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, 1568 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1572 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1569 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1573 { 1570 {
1574 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; 1571 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1575 int ret, i; 1572 int ret, i;
1576 u8 value; 1573 u8 value;
1577 1574
1578 if (!netdev->dcbnl_ops->setfeatcfg) 1575 if (!netdev->dcbnl_ops->setfeatcfg)
1579 return -ENOTSUPP; 1576 return -ENOTSUPP;
1580 1577
1581 if (!tb[DCB_ATTR_FEATCFG]) 1578 if (!tb[DCB_ATTR_FEATCFG])
1582 return -EINVAL; 1579 return -EINVAL;
1583 1580
1584 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], 1581 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1585 dcbnl_featcfg_nest); 1582 dcbnl_featcfg_nest);
1586 1583
1587 if (ret) 1584 if (ret)
1588 goto err; 1585 goto err;
1589 1586
1590 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1587 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1591 if (data[i] == NULL) 1588 if (data[i] == NULL)
1592 continue; 1589 continue;
1593 1590
1594 value = nla_get_u8(data[i]); 1591 value = nla_get_u8(data[i]);
1595 1592
1596 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); 1593 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1597 1594
1598 if (ret) 1595 if (ret)
1599 goto err; 1596 goto err;
1600 } 1597 }
1601 err: 1598 err:
1602 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret); 1599 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1603 1600
1604 return ret; 1601 return ret;
1605 } 1602 }
1606 1603
1607 /* Handle CEE DCBX GET commands. */ 1604 /* Handle CEE DCBX GET commands. */
1608 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, 1605 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1609 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1606 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1610 { 1607 {
1611 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1608 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1612 1609
1613 if (!ops) 1610 if (!ops)
1614 return -EOPNOTSUPP; 1611 return -EOPNOTSUPP;
1615 1612
1616 return dcbnl_cee_fill(skb, netdev); 1613 return dcbnl_cee_fill(skb, netdev);
1617 } 1614 }
1618 1615
1619 struct reply_func { 1616 struct reply_func {
1620 /* reply netlink message type */ 1617 /* reply netlink message type */
1621 int type; 1618 int type;
1622 1619
1623 /* function to fill message contents */ 1620 /* function to fill message contents */
1624 int (*cb)(struct net_device *, struct nlmsghdr *, u32, 1621 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1625 struct nlattr **, struct sk_buff *); 1622 struct nlattr **, struct sk_buff *);
1626 }; 1623 };
1627 1624
1628 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { 1625 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1629 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate }, 1626 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1630 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate }, 1627 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1631 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg }, 1628 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1632 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg }, 1629 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1633 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr }, 1630 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1634 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap }, 1631 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1635 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs }, 1632 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1636 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs }, 1633 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1637 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate }, 1634 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1638 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate }, 1635 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1639 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp }, 1636 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1640 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp }, 1637 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1641 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg }, 1638 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1642 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg }, 1639 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1643 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg }, 1640 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1644 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg }, 1641 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1645 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall }, 1642 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1646 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg }, 1643 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1647 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg }, 1644 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1648 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get }, 1645 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1649 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set }, 1646 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1650 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del }, 1647 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1651 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx }, 1648 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1652 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx }, 1649 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1653 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg }, 1650 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1654 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg }, 1651 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1655 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, 1652 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1656 }; 1653 };
1657 1654
1658 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1655 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1659 { 1656 {
1660 struct net *net = sock_net(skb->sk); 1657 struct net *net = sock_net(skb->sk);
1661 struct net_device *netdev; 1658 struct net_device *netdev;
1662 struct dcbmsg *dcb = nlmsg_data(nlh); 1659 struct dcbmsg *dcb = nlmsg_data(nlh);
1663 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1660 struct nlattr *tb[DCB_ATTR_MAX + 1];
1664 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 1661 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1665 int ret = -EINVAL; 1662 int ret = -EINVAL;
1666 struct sk_buff *reply_skb; 1663 struct sk_buff *reply_skb;
1667 struct nlmsghdr *reply_nlh = NULL; 1664 struct nlmsghdr *reply_nlh = NULL;
1668 const struct reply_func *fn; 1665 const struct reply_func *fn;
1669 1666
1670 if (!net_eq(net, &init_net)) 1667 if (!net_eq(net, &init_net))
1671 return -EINVAL; 1668 return -EINVAL;
1672 1669
1673 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, 1670 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1674 dcbnl_rtnl_policy); 1671 dcbnl_rtnl_policy);
1675 if (ret < 0) 1672 if (ret < 0)
1676 return ret; 1673 return ret;
1677 1674
1678 if (dcb->cmd > DCB_CMD_MAX) 1675 if (dcb->cmd > DCB_CMD_MAX)
1679 return -EINVAL; 1676 return -EINVAL;
1680 1677
1681 /* check if a reply function has been defined for the command */ 1678 /* check if a reply function has been defined for the command */
1682 fn = &reply_funcs[dcb->cmd]; 1679 fn = &reply_funcs[dcb->cmd];
1683 if (!fn->cb) 1680 if (!fn->cb)
1684 return -EOPNOTSUPP; 1681 return -EOPNOTSUPP;
1685 1682
1686 if (!tb[DCB_ATTR_IFNAME]) 1683 if (!tb[DCB_ATTR_IFNAME])
1687 return -EINVAL; 1684 return -EINVAL;
1688 1685
1689 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME])); 1686 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1690 if (!netdev) 1687 if (!netdev)
1691 return -ENODEV; 1688 return -ENODEV;
1692 1689
1693 if (!netdev->dcbnl_ops) { 1690 if (!netdev->dcbnl_ops) {
1694 ret = -EOPNOTSUPP; 1691 ret = -EOPNOTSUPP;
1695 goto out; 1692 goto out;
1696 } 1693 }
1697 1694
1698 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq, 1695 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
1699 nlh->nlmsg_flags, &reply_nlh); 1696 nlh->nlmsg_flags, &reply_nlh);
1700 if (!reply_skb) { 1697 if (!reply_skb) {
1701 ret = -ENOBUFS; 1698 ret = -ENOBUFS;
1702 goto out; 1699 goto out;
1703 } 1700 }
1704 1701
1705 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); 1702 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1706 if (ret < 0) { 1703 if (ret < 0) {
1707 nlmsg_free(reply_skb); 1704 nlmsg_free(reply_skb);
1708 goto out; 1705 goto out;
1709 } 1706 }
1710 1707
1711 nlmsg_end(reply_skb, reply_nlh); 1708 nlmsg_end(reply_skb, reply_nlh);
1712 1709
1713 ret = rtnl_unicast(reply_skb, &init_net, pid); 1710 ret = rtnl_unicast(reply_skb, &init_net, pid);
1714 out: 1711 out:
1715 dev_put(netdev); 1712 dev_put(netdev);
1716 return ret; 1713 return ret;
1717 } 1714 }
1718 1715
1719 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, 1716 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1720 int ifindex, int prio) 1717 int ifindex, int prio)
1721 { 1718 {
1722 struct dcb_app_type *itr; 1719 struct dcb_app_type *itr;
1723 1720
1724 list_for_each_entry(itr, &dcb_app_list, list) { 1721 list_for_each_entry(itr, &dcb_app_list, list) {
1725 if (itr->app.selector == app->selector && 1722 if (itr->app.selector == app->selector &&
1726 itr->app.protocol == app->protocol && 1723 itr->app.protocol == app->protocol &&
1727 itr->ifindex == ifindex && 1724 itr->ifindex == ifindex &&
1728 (!prio || itr->app.priority == prio)) 1725 (!prio || itr->app.priority == prio))
1729 return itr; 1726 return itr;
1730 } 1727 }
1731 1728
1732 return NULL; 1729 return NULL;
1733 } 1730 }
1734 1731
1735 static int dcb_app_add(const struct dcb_app *app, int ifindex) 1732 static int dcb_app_add(const struct dcb_app *app, int ifindex)
1736 { 1733 {
1737 struct dcb_app_type *entry; 1734 struct dcb_app_type *entry;
1738 1735
1739 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 1736 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1740 if (!entry) 1737 if (!entry)
1741 return -ENOMEM; 1738 return -ENOMEM;
1742 1739
1743 memcpy(&entry->app, app, sizeof(*app)); 1740 memcpy(&entry->app, app, sizeof(*app));
1744 entry->ifindex = ifindex; 1741 entry->ifindex = ifindex;
1745 list_add(&entry->list, &dcb_app_list); 1742 list_add(&entry->list, &dcb_app_list);
1746 1743
1747 return 0; 1744 return 0;
1748 } 1745 }
1749 1746
1750 /** 1747 /**
1751 * dcb_getapp - retrieve the DCBX application user priority 1748 * dcb_getapp - retrieve the DCBX application user priority
1752 * 1749 *
1753 * On success returns a non-zero 802.1p user priority bitmap 1750 * On success returns a non-zero 802.1p user priority bitmap
1754 * otherwise returns 0 as the invalid user priority bitmap to 1751 * otherwise returns 0 as the invalid user priority bitmap to
1755 * indicate an error. 1752 * indicate an error.
1756 */ 1753 */
1757 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) 1754 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1758 { 1755 {
1759 struct dcb_app_type *itr; 1756 struct dcb_app_type *itr;
1760 u8 prio = 0; 1757 u8 prio = 0;
1761 1758
1762 spin_lock(&dcb_lock); 1759 spin_lock(&dcb_lock);
1763 if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) 1760 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1764 prio = itr->app.priority; 1761 prio = itr->app.priority;
1765 spin_unlock(&dcb_lock); 1762 spin_unlock(&dcb_lock);
1766 1763
1767 return prio; 1764 return prio;
1768 } 1765 }
1769 EXPORT_SYMBOL(dcb_getapp); 1766 EXPORT_SYMBOL(dcb_getapp);
1770 1767
1771 /** 1768 /**
1772 * dcb_setapp - add CEE dcb application data to app list 1769 * dcb_setapp - add CEE dcb application data to app list
1773 * 1770 *
1774 * Priority 0 is an invalid priority in CEE spec. This routine 1771 * Priority 0 is an invalid priority in CEE spec. This routine
1775 * removes applications from the app list if the priority is 1772 * removes applications from the app list if the priority is
1776 * set to zero. 1773 * set to zero.
1777 */ 1774 */
1778 int dcb_setapp(struct net_device *dev, struct dcb_app *new) 1775 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1779 { 1776 {
1780 struct dcb_app_type *itr; 1777 struct dcb_app_type *itr;
1781 struct dcb_app_type event; 1778 struct dcb_app_type event;
1782 int err = 0; 1779 int err = 0;
1783 1780
1784 event.ifindex = dev->ifindex; 1781 event.ifindex = dev->ifindex;
1785 memcpy(&event.app, new, sizeof(event.app)); 1782 memcpy(&event.app, new, sizeof(event.app));
1786 if (dev->dcbnl_ops->getdcbx) 1783 if (dev->dcbnl_ops->getdcbx)
1787 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1784 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1788 1785
1789 spin_lock(&dcb_lock); 1786 spin_lock(&dcb_lock);
1790 /* Search for existing match and replace */ 1787 /* Search for existing match and replace */
1791 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { 1788 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
1792 if (new->priority) 1789 if (new->priority)
1793 itr->app.priority = new->priority; 1790 itr->app.priority = new->priority;
1794 else { 1791 else {
1795 list_del(&itr->list); 1792 list_del(&itr->list);
1796 kfree(itr); 1793 kfree(itr);
1797 } 1794 }
1798 goto out; 1795 goto out;
1799 } 1796 }
1800 /* App type does not exist add new application type */ 1797 /* App type does not exist add new application type */
1801 if (new->priority) 1798 if (new->priority)
1802 err = dcb_app_add(new, dev->ifindex); 1799 err = dcb_app_add(new, dev->ifindex);
1803 out: 1800 out:
1804 spin_unlock(&dcb_lock); 1801 spin_unlock(&dcb_lock);
1805 if (!err) 1802 if (!err)
1806 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1803 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1807 return err; 1804 return err;
1808 } 1805 }
1809 EXPORT_SYMBOL(dcb_setapp); 1806 EXPORT_SYMBOL(dcb_setapp);
1810 1807
1811 /** 1808 /**
1812 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority 1809 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
1813 * 1810 *
1814 * Helper routine which on success returns a non-zero 802.1Qaz user 1811 * Helper routine which on success returns a non-zero 802.1Qaz user
1815 * priority bitmap otherwise returns 0 to indicate the dcb_app was 1812 * priority bitmap otherwise returns 0 to indicate the dcb_app was
1816 * not found in APP list. 1813 * not found in APP list.
1817 */ 1814 */
1818 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) 1815 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1819 { 1816 {
1820 struct dcb_app_type *itr; 1817 struct dcb_app_type *itr;
1821 u8 prio = 0; 1818 u8 prio = 0;
1822 1819
1823 spin_lock(&dcb_lock); 1820 spin_lock(&dcb_lock);
1824 if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) 1821 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1825 prio |= 1 << itr->app.priority; 1822 prio |= 1 << itr->app.priority;
1826 spin_unlock(&dcb_lock); 1823 spin_unlock(&dcb_lock);
1827 1824
1828 return prio; 1825 return prio;
1829 } 1826 }
1830 EXPORT_SYMBOL(dcb_ieee_getapp_mask); 1827 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1831 1828
1832 /** 1829 /**
1833 * dcb_ieee_setapp - add IEEE dcb application data to app list 1830 * dcb_ieee_setapp - add IEEE dcb application data to app list
1834 * 1831 *
1835 * This adds Application data to the list. Multiple application 1832 * This adds Application data to the list. Multiple application
1836 * entries may exists for the same selector and protocol as long 1833 * entries may exists for the same selector and protocol as long
1837 * as the priorities are different. 1834 * as the priorities are different.
1838 */ 1835 */
1839 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) 1836 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1840 { 1837 {
1841 struct dcb_app_type event; 1838 struct dcb_app_type event;
1842 int err = 0; 1839 int err = 0;
1843 1840
1844 event.ifindex = dev->ifindex; 1841 event.ifindex = dev->ifindex;
1845 memcpy(&event.app, new, sizeof(event.app)); 1842 memcpy(&event.app, new, sizeof(event.app));
1846 if (dev->dcbnl_ops->getdcbx) 1843 if (dev->dcbnl_ops->getdcbx)
1847 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1844 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1848 1845
1849 spin_lock(&dcb_lock); 1846 spin_lock(&dcb_lock);
1850 /* Search for existing match and abort if found */ 1847 /* Search for existing match and abort if found */
1851 if (dcb_app_lookup(new, dev->ifindex, new->priority)) { 1848 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1852 err = -EEXIST; 1849 err = -EEXIST;
1853 goto out; 1850 goto out;
1854 } 1851 }
1855 1852
1856 err = dcb_app_add(new, dev->ifindex); 1853 err = dcb_app_add(new, dev->ifindex);
1857 out: 1854 out:
1858 spin_unlock(&dcb_lock); 1855 spin_unlock(&dcb_lock);
1859 if (!err) 1856 if (!err)
1860 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1857 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1861 return err; 1858 return err;
1862 } 1859 }
1863 EXPORT_SYMBOL(dcb_ieee_setapp); 1860 EXPORT_SYMBOL(dcb_ieee_setapp);
1864 1861
1865 /** 1862 /**
1866 * dcb_ieee_delapp - delete IEEE dcb application data from list 1863 * dcb_ieee_delapp - delete IEEE dcb application data from list
1867 * 1864 *
1868 * This removes a matching APP data from the APP list 1865 * This removes a matching APP data from the APP list
1869 */ 1866 */
1870 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) 1867 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1871 { 1868 {
1872 struct dcb_app_type *itr; 1869 struct dcb_app_type *itr;
1873 struct dcb_app_type event; 1870 struct dcb_app_type event;
1874 int err = -ENOENT; 1871 int err = -ENOENT;
1875 1872
1876 event.ifindex = dev->ifindex; 1873 event.ifindex = dev->ifindex;
1877 memcpy(&event.app, del, sizeof(event.app)); 1874 memcpy(&event.app, del, sizeof(event.app));
1878 if (dev->dcbnl_ops->getdcbx) 1875 if (dev->dcbnl_ops->getdcbx)
1879 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1876 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1880 1877
1881 spin_lock(&dcb_lock); 1878 spin_lock(&dcb_lock);
1882 /* Search for existing match and remove it. */ 1879 /* Search for existing match and remove it. */
1883 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { 1880 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1884 list_del(&itr->list); 1881 list_del(&itr->list);
1885 kfree(itr); 1882 kfree(itr);
1886 err = 0; 1883 err = 0;
1887 } 1884 }
1888 1885
1889 spin_unlock(&dcb_lock); 1886 spin_unlock(&dcb_lock);
1890 if (!err) 1887 if (!err)
1891 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1888 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1892 return err; 1889 return err;
1893 } 1890 }
1894 EXPORT_SYMBOL(dcb_ieee_delapp); 1891 EXPORT_SYMBOL(dcb_ieee_delapp);
1895 1892
1896 static void dcb_flushapp(void) 1893 static void dcb_flushapp(void)
1897 { 1894 {
1898 struct dcb_app_type *app; 1895 struct dcb_app_type *app;
1899 struct dcb_app_type *tmp; 1896 struct dcb_app_type *tmp;
1900 1897
1901 spin_lock(&dcb_lock); 1898 spin_lock(&dcb_lock);
1902 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { 1899 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1903 list_del(&app->list); 1900 list_del(&app->list);
1904 kfree(app); 1901 kfree(app);
1905 } 1902 }
1906 spin_unlock(&dcb_lock); 1903 spin_unlock(&dcb_lock);
1907 } 1904 }
1908 1905
1909 static int __init dcbnl_init(void) 1906 static int __init dcbnl_init(void)
1910 { 1907 {
1911 INIT_LIST_HEAD(&dcb_app_list); 1908 INIT_LIST_HEAD(&dcb_app_list);
1912 1909
1913 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); 1910 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
1914 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); 1911 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
1915 1912
1916 return 0; 1913 return 0;
1917 } 1914 }
1918 module_init(dcbnl_init); 1915 module_init(dcbnl_init);
1919 1916
1920 static void __exit dcbnl_exit(void) 1917 static void __exit dcbnl_exit(void)
1921 { 1918 {
1922 rtnl_unregister(PF_UNSPEC, RTM_GETDCB); 1919 rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1923 rtnl_unregister(PF_UNSPEC, RTM_SETDCB); 1920 rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1924 dcb_flushapp(); 1921 dcb_flushapp();
1925 } 1922 }
1926 module_exit(dcbnl_exit); 1923 module_exit(dcbnl_exit);
1927 1924