2 * Copyright (c) 2008-2011, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Lucy Liu <lucy.liu@intel.com>
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/module.h>
32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33 * intended to allow network traffic with differing requirements
34 * (highly reliable, no drops vs. best effort vs. low latency) to operate
35 * and co-exist on Ethernet. Current DCB features are:
37 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38 * framework for assigning bandwidth guarantees to traffic classes.
40 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41 * can work independently for each 802.1p priority.
43 * Congestion Notification - provides a mechanism for end-to-end congestion
44 * control for protocols which do not have built-in congestion management.
46 * More information about the emerging standards for these Ethernet features
47 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
49 * This file implements an rtnetlink interface to allow configuration of DCB
50 * features for capable devices.
53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
54 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
55 MODULE_LICENSE("GPL");
57 /**************** DCB attribute policies *************************************/
59 /* DCB netlink attributes policy */
60 static const struct nla_policy dcbnl_rtnl_policy
[DCB_ATTR_MAX
+ 1] = {
61 [DCB_ATTR_IFNAME
] = {.type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1},
62 [DCB_ATTR_STATE
] = {.type
= NLA_U8
},
63 [DCB_ATTR_PFC_CFG
] = {.type
= NLA_NESTED
},
64 [DCB_ATTR_PG_CFG
] = {.type
= NLA_NESTED
},
65 [DCB_ATTR_SET_ALL
] = {.type
= NLA_U8
},
66 [DCB_ATTR_PERM_HWADDR
] = {.type
= NLA_FLAG
},
67 [DCB_ATTR_CAP
] = {.type
= NLA_NESTED
},
68 [DCB_ATTR_PFC_STATE
] = {.type
= NLA_U8
},
69 [DCB_ATTR_BCN
] = {.type
= NLA_NESTED
},
70 [DCB_ATTR_APP
] = {.type
= NLA_NESTED
},
71 [DCB_ATTR_IEEE
] = {.type
= NLA_NESTED
},
72 [DCB_ATTR_DCBX
] = {.type
= NLA_U8
},
73 [DCB_ATTR_FEATCFG
] = {.type
= NLA_NESTED
},
76 /* DCB priority flow control to User Priority nested attributes */
77 static const struct nla_policy dcbnl_pfc_up_nest
[DCB_PFC_UP_ATTR_MAX
+ 1] = {
78 [DCB_PFC_UP_ATTR_0
] = {.type
= NLA_U8
},
79 [DCB_PFC_UP_ATTR_1
] = {.type
= NLA_U8
},
80 [DCB_PFC_UP_ATTR_2
] = {.type
= NLA_U8
},
81 [DCB_PFC_UP_ATTR_3
] = {.type
= NLA_U8
},
82 [DCB_PFC_UP_ATTR_4
] = {.type
= NLA_U8
},
83 [DCB_PFC_UP_ATTR_5
] = {.type
= NLA_U8
},
84 [DCB_PFC_UP_ATTR_6
] = {.type
= NLA_U8
},
85 [DCB_PFC_UP_ATTR_7
] = {.type
= NLA_U8
},
86 [DCB_PFC_UP_ATTR_ALL
] = {.type
= NLA_FLAG
},
89 /* DCB priority grouping nested attributes */
90 static const struct nla_policy dcbnl_pg_nest
[DCB_PG_ATTR_MAX
+ 1] = {
91 [DCB_PG_ATTR_TC_0
] = {.type
= NLA_NESTED
},
92 [DCB_PG_ATTR_TC_1
] = {.type
= NLA_NESTED
},
93 [DCB_PG_ATTR_TC_2
] = {.type
= NLA_NESTED
},
94 [DCB_PG_ATTR_TC_3
] = {.type
= NLA_NESTED
},
95 [DCB_PG_ATTR_TC_4
] = {.type
= NLA_NESTED
},
96 [DCB_PG_ATTR_TC_5
] = {.type
= NLA_NESTED
},
97 [DCB_PG_ATTR_TC_6
] = {.type
= NLA_NESTED
},
98 [DCB_PG_ATTR_TC_7
] = {.type
= NLA_NESTED
},
99 [DCB_PG_ATTR_TC_ALL
] = {.type
= NLA_NESTED
},
100 [DCB_PG_ATTR_BW_ID_0
] = {.type
= NLA_U8
},
101 [DCB_PG_ATTR_BW_ID_1
] = {.type
= NLA_U8
},
102 [DCB_PG_ATTR_BW_ID_2
] = {.type
= NLA_U8
},
103 [DCB_PG_ATTR_BW_ID_3
] = {.type
= NLA_U8
},
104 [DCB_PG_ATTR_BW_ID_4
] = {.type
= NLA_U8
},
105 [DCB_PG_ATTR_BW_ID_5
] = {.type
= NLA_U8
},
106 [DCB_PG_ATTR_BW_ID_6
] = {.type
= NLA_U8
},
107 [DCB_PG_ATTR_BW_ID_7
] = {.type
= NLA_U8
},
108 [DCB_PG_ATTR_BW_ID_ALL
] = {.type
= NLA_FLAG
},
111 /* DCB traffic class nested attributes. */
112 static const struct nla_policy dcbnl_tc_param_nest
[DCB_TC_ATTR_PARAM_MAX
+ 1] = {
113 [DCB_TC_ATTR_PARAM_PGID
] = {.type
= NLA_U8
},
114 [DCB_TC_ATTR_PARAM_UP_MAPPING
] = {.type
= NLA_U8
},
115 [DCB_TC_ATTR_PARAM_STRICT_PRIO
] = {.type
= NLA_U8
},
116 [DCB_TC_ATTR_PARAM_BW_PCT
] = {.type
= NLA_U8
},
117 [DCB_TC_ATTR_PARAM_ALL
] = {.type
= NLA_FLAG
},
120 /* DCB capabilities nested attributes. */
121 static const struct nla_policy dcbnl_cap_nest
[DCB_CAP_ATTR_MAX
+ 1] = {
122 [DCB_CAP_ATTR_ALL
] = {.type
= NLA_FLAG
},
123 [DCB_CAP_ATTR_PG
] = {.type
= NLA_U8
},
124 [DCB_CAP_ATTR_PFC
] = {.type
= NLA_U8
},
125 [DCB_CAP_ATTR_UP2TC
] = {.type
= NLA_U8
},
126 [DCB_CAP_ATTR_PG_TCS
] = {.type
= NLA_U8
},
127 [DCB_CAP_ATTR_PFC_TCS
] = {.type
= NLA_U8
},
128 [DCB_CAP_ATTR_GSP
] = {.type
= NLA_U8
},
129 [DCB_CAP_ATTR_BCN
] = {.type
= NLA_U8
},
130 [DCB_CAP_ATTR_DCBX
] = {.type
= NLA_U8
},
133 /* DCB capabilities nested attributes. */
134 static const struct nla_policy dcbnl_numtcs_nest
[DCB_NUMTCS_ATTR_MAX
+ 1] = {
135 [DCB_NUMTCS_ATTR_ALL
] = {.type
= NLA_FLAG
},
136 [DCB_NUMTCS_ATTR_PG
] = {.type
= NLA_U8
},
137 [DCB_NUMTCS_ATTR_PFC
] = {.type
= NLA_U8
},
140 /* DCB BCN nested attributes. */
141 static const struct nla_policy dcbnl_bcn_nest
[DCB_BCN_ATTR_MAX
+ 1] = {
142 [DCB_BCN_ATTR_RP_0
] = {.type
= NLA_U8
},
143 [DCB_BCN_ATTR_RP_1
] = {.type
= NLA_U8
},
144 [DCB_BCN_ATTR_RP_2
] = {.type
= NLA_U8
},
145 [DCB_BCN_ATTR_RP_3
] = {.type
= NLA_U8
},
146 [DCB_BCN_ATTR_RP_4
] = {.type
= NLA_U8
},
147 [DCB_BCN_ATTR_RP_5
] = {.type
= NLA_U8
},
148 [DCB_BCN_ATTR_RP_6
] = {.type
= NLA_U8
},
149 [DCB_BCN_ATTR_RP_7
] = {.type
= NLA_U8
},
150 [DCB_BCN_ATTR_RP_ALL
] = {.type
= NLA_FLAG
},
151 [DCB_BCN_ATTR_BCNA_0
] = {.type
= NLA_U32
},
152 [DCB_BCN_ATTR_BCNA_1
] = {.type
= NLA_U32
},
153 [DCB_BCN_ATTR_ALPHA
] = {.type
= NLA_U32
},
154 [DCB_BCN_ATTR_BETA
] = {.type
= NLA_U32
},
155 [DCB_BCN_ATTR_GD
] = {.type
= NLA_U32
},
156 [DCB_BCN_ATTR_GI
] = {.type
= NLA_U32
},
157 [DCB_BCN_ATTR_TMAX
] = {.type
= NLA_U32
},
158 [DCB_BCN_ATTR_TD
] = {.type
= NLA_U32
},
159 [DCB_BCN_ATTR_RMIN
] = {.type
= NLA_U32
},
160 [DCB_BCN_ATTR_W
] = {.type
= NLA_U32
},
161 [DCB_BCN_ATTR_RD
] = {.type
= NLA_U32
},
162 [DCB_BCN_ATTR_RU
] = {.type
= NLA_U32
},
163 [DCB_BCN_ATTR_WRTT
] = {.type
= NLA_U32
},
164 [DCB_BCN_ATTR_RI
] = {.type
= NLA_U32
},
165 [DCB_BCN_ATTR_C
] = {.type
= NLA_U32
},
166 [DCB_BCN_ATTR_ALL
] = {.type
= NLA_FLAG
},
169 /* DCB APP nested attributes. */
170 static const struct nla_policy dcbnl_app_nest
[DCB_APP_ATTR_MAX
+ 1] = {
171 [DCB_APP_ATTR_IDTYPE
] = {.type
= NLA_U8
},
172 [DCB_APP_ATTR_ID
] = {.type
= NLA_U16
},
173 [DCB_APP_ATTR_PRIORITY
] = {.type
= NLA_U8
},
176 /* IEEE 802.1Qaz nested attributes. */
177 static const struct nla_policy dcbnl_ieee_policy
[DCB_ATTR_IEEE_MAX
+ 1] = {
178 [DCB_ATTR_IEEE_ETS
] = {.len
= sizeof(struct ieee_ets
)},
179 [DCB_ATTR_IEEE_PFC
] = {.len
= sizeof(struct ieee_pfc
)},
180 [DCB_ATTR_IEEE_APP_TABLE
] = {.type
= NLA_NESTED
},
181 [DCB_ATTR_IEEE_MAXRATE
] = {.len
= sizeof(struct ieee_maxrate
)},
184 static const struct nla_policy dcbnl_ieee_app
[DCB_ATTR_IEEE_APP_MAX
+ 1] = {
185 [DCB_ATTR_IEEE_APP
] = {.len
= sizeof(struct dcb_app
)},
188 /* DCB number of traffic classes nested attributes. */
189 static const struct nla_policy dcbnl_featcfg_nest
[DCB_FEATCFG_ATTR_MAX
+ 1] = {
190 [DCB_FEATCFG_ATTR_ALL
] = {.type
= NLA_FLAG
},
191 [DCB_FEATCFG_ATTR_PG
] = {.type
= NLA_U8
},
192 [DCB_FEATCFG_ATTR_PFC
] = {.type
= NLA_U8
},
193 [DCB_FEATCFG_ATTR_APP
] = {.type
= NLA_U8
},
196 static LIST_HEAD(dcb_app_list
);
197 static DEFINE_SPINLOCK(dcb_lock
);
199 /* standard netlink reply call */
200 static int dcbnl_reply(u8 value
, u8 event
, u8 cmd
, u8 attr
, u32 pid
,
203 struct sk_buff
*dcbnl_skb
;
205 struct nlmsghdr
*nlh
;
208 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
212 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, event
, sizeof(*dcb
), flags
);
214 dcb
= NLMSG_DATA(nlh
);
215 dcb
->dcb_family
= AF_UNSPEC
;
219 ret
= nla_put_u8(dcbnl_skb
, attr
, value
);
223 /* end the message, assign the nlmsg_len. */
224 nlmsg_end(dcbnl_skb
, nlh
);
225 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
232 kfree_skb(dcbnl_skb
);
236 static int dcbnl_getstate(struct net_device
*netdev
, struct nlattr
**tb
,
237 u32 pid
, u32 seq
, u16 flags
)
241 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
242 if (!netdev
->dcbnl_ops
->getstate
)
245 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getstate(netdev
), RTM_GETDCB
,
246 DCB_CMD_GSTATE
, DCB_ATTR_STATE
, pid
, seq
, flags
);
251 static int dcbnl_getpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
252 u32 pid
, u32 seq
, u16 flags
)
254 struct sk_buff
*dcbnl_skb
;
255 struct nlmsghdr
*nlh
;
257 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1], *nest
;
263 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->getpfccfg
)
266 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
267 tb
[DCB_ATTR_PFC_CFG
],
272 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
276 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
278 dcb
= NLMSG_DATA(nlh
);
279 dcb
->dcb_family
= AF_UNSPEC
;
280 dcb
->cmd
= DCB_CMD_PFC_GCFG
;
282 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PFC_CFG
);
286 if (data
[DCB_PFC_UP_ATTR_ALL
])
289 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
290 if (!getall
&& !data
[i
])
293 netdev
->dcbnl_ops
->getpfccfg(netdev
, i
- DCB_PFC_UP_ATTR_0
,
295 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
298 nla_nest_cancel(dcbnl_skb
, nest
);
302 nla_nest_end(dcbnl_skb
, nest
);
304 nlmsg_end(dcbnl_skb
, nlh
);
306 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
313 kfree_skb(dcbnl_skb
);
318 static int dcbnl_getperm_hwaddr(struct net_device
*netdev
, struct nlattr
**tb
,
319 u32 pid
, u32 seq
, u16 flags
)
321 struct sk_buff
*dcbnl_skb
;
322 struct nlmsghdr
*nlh
;
324 u8 perm_addr
[MAX_ADDR_LEN
];
327 if (!netdev
->dcbnl_ops
->getpermhwaddr
)
330 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
334 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
336 dcb
= NLMSG_DATA(nlh
);
337 dcb
->dcb_family
= AF_UNSPEC
;
338 dcb
->cmd
= DCB_CMD_GPERM_HWADDR
;
340 netdev
->dcbnl_ops
->getpermhwaddr(netdev
, perm_addr
);
342 ret
= nla_put(dcbnl_skb
, DCB_ATTR_PERM_HWADDR
, sizeof(perm_addr
),
345 nlmsg_end(dcbnl_skb
, nlh
);
347 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
354 kfree_skb(dcbnl_skb
);
359 static int dcbnl_getcap(struct net_device
*netdev
, struct nlattr
**tb
,
360 u32 pid
, u32 seq
, u16 flags
)
362 struct sk_buff
*dcbnl_skb
;
363 struct nlmsghdr
*nlh
;
365 struct nlattr
*data
[DCB_CAP_ATTR_MAX
+ 1], *nest
;
371 if (!tb
[DCB_ATTR_CAP
] || !netdev
->dcbnl_ops
->getcap
)
374 ret
= nla_parse_nested(data
, DCB_CAP_ATTR_MAX
, tb
[DCB_ATTR_CAP
],
379 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
383 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
385 dcb
= NLMSG_DATA(nlh
);
386 dcb
->dcb_family
= AF_UNSPEC
;
387 dcb
->cmd
= DCB_CMD_GCAP
;
389 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_CAP
);
393 if (data
[DCB_CAP_ATTR_ALL
])
396 for (i
= DCB_CAP_ATTR_ALL
+1; i
<= DCB_CAP_ATTR_MAX
; i
++) {
397 if (!getall
&& !data
[i
])
400 if (!netdev
->dcbnl_ops
->getcap(netdev
, i
, &value
)) {
401 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
404 nla_nest_cancel(dcbnl_skb
, nest
);
409 nla_nest_end(dcbnl_skb
, nest
);
411 nlmsg_end(dcbnl_skb
, nlh
);
413 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
420 kfree_skb(dcbnl_skb
);
425 static int dcbnl_getnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
426 u32 pid
, u32 seq
, u16 flags
)
428 struct sk_buff
*dcbnl_skb
;
429 struct nlmsghdr
*nlh
;
431 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1], *nest
;
437 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->getnumtcs
)
440 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
447 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
453 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
455 dcb
= NLMSG_DATA(nlh
);
456 dcb
->dcb_family
= AF_UNSPEC
;
457 dcb
->cmd
= DCB_CMD_GNUMTCS
;
459 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_NUMTCS
);
465 if (data
[DCB_NUMTCS_ATTR_ALL
])
468 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
469 if (!getall
&& !data
[i
])
472 ret
= netdev
->dcbnl_ops
->getnumtcs(netdev
, i
, &value
);
474 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
477 nla_nest_cancel(dcbnl_skb
, nest
);
485 nla_nest_end(dcbnl_skb
, nest
);
487 nlmsg_end(dcbnl_skb
, nlh
);
489 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
498 kfree_skb(dcbnl_skb
);
503 static int dcbnl_setnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
504 u32 pid
, u32 seq
, u16 flags
)
506 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1];
511 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->setnumtcs
)
514 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
522 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
526 value
= nla_get_u8(data
[i
]);
528 ret
= netdev
->dcbnl_ops
->setnumtcs(netdev
, i
, value
);
535 ret
= dcbnl_reply(!!ret
, RTM_SETDCB
, DCB_CMD_SNUMTCS
,
536 DCB_ATTR_NUMTCS
, pid
, seq
, flags
);
542 static int dcbnl_getpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
543 u32 pid
, u32 seq
, u16 flags
)
547 if (!netdev
->dcbnl_ops
->getpfcstate
)
550 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getpfcstate(netdev
), RTM_GETDCB
,
551 DCB_CMD_PFC_GSTATE
, DCB_ATTR_PFC_STATE
,
557 static int dcbnl_setpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
558 u32 pid
, u32 seq
, u16 flags
)
563 if (!tb
[DCB_ATTR_PFC_STATE
] || !netdev
->dcbnl_ops
->setpfcstate
)
566 value
= nla_get_u8(tb
[DCB_ATTR_PFC_STATE
]);
568 netdev
->dcbnl_ops
->setpfcstate(netdev
, value
);
570 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SSTATE
, DCB_ATTR_PFC_STATE
,
576 static int dcbnl_getapp(struct net_device
*netdev
, struct nlattr
**tb
,
577 u32 pid
, u32 seq
, u16 flags
)
579 struct sk_buff
*dcbnl_skb
;
580 struct nlmsghdr
*nlh
;
582 struct nlattr
*app_nest
;
583 struct nlattr
*app_tb
[DCB_APP_ATTR_MAX
+ 1];
588 if (!tb
[DCB_ATTR_APP
])
591 ret
= nla_parse_nested(app_tb
, DCB_APP_ATTR_MAX
, tb
[DCB_ATTR_APP
],
597 /* all must be non-null */
598 if ((!app_tb
[DCB_APP_ATTR_IDTYPE
]) ||
599 (!app_tb
[DCB_APP_ATTR_ID
]))
602 /* either by eth type or by socket number */
603 idtype
= nla_get_u8(app_tb
[DCB_APP_ATTR_IDTYPE
]);
604 if ((idtype
!= DCB_APP_IDTYPE_ETHTYPE
) &&
605 (idtype
!= DCB_APP_IDTYPE_PORTNUM
))
608 id
= nla_get_u16(app_tb
[DCB_APP_ATTR_ID
]);
610 if (netdev
->dcbnl_ops
->getapp
) {
611 up
= netdev
->dcbnl_ops
->getapp(netdev
, idtype
, id
);
613 struct dcb_app app
= {
617 up
= dcb_getapp(netdev
, &app
);
621 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
625 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
626 dcb
= NLMSG_DATA(nlh
);
627 dcb
->dcb_family
= AF_UNSPEC
;
628 dcb
->cmd
= DCB_CMD_GAPP
;
630 app_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_APP
);
634 ret
= nla_put_u8(dcbnl_skb
, DCB_APP_ATTR_IDTYPE
, idtype
);
638 ret
= nla_put_u16(dcbnl_skb
, DCB_APP_ATTR_ID
, id
);
642 ret
= nla_put_u8(dcbnl_skb
, DCB_APP_ATTR_PRIORITY
, up
);
646 nla_nest_end(dcbnl_skb
, app_nest
);
647 nlmsg_end(dcbnl_skb
, nlh
);
649 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
656 nla_nest_cancel(dcbnl_skb
, app_nest
);
658 kfree_skb(dcbnl_skb
);
663 static int dcbnl_setapp(struct net_device
*netdev
, struct nlattr
**tb
,
664 u32 pid
, u32 seq
, u16 flags
)
666 int err
, ret
= -EINVAL
;
669 struct nlattr
*app_tb
[DCB_APP_ATTR_MAX
+ 1];
671 if (!tb
[DCB_ATTR_APP
])
674 ret
= nla_parse_nested(app_tb
, DCB_APP_ATTR_MAX
, tb
[DCB_ATTR_APP
],
680 /* all must be non-null */
681 if ((!app_tb
[DCB_APP_ATTR_IDTYPE
]) ||
682 (!app_tb
[DCB_APP_ATTR_ID
]) ||
683 (!app_tb
[DCB_APP_ATTR_PRIORITY
]))
686 /* either by eth type or by socket number */
687 idtype
= nla_get_u8(app_tb
[DCB_APP_ATTR_IDTYPE
]);
688 if ((idtype
!= DCB_APP_IDTYPE_ETHTYPE
) &&
689 (idtype
!= DCB_APP_IDTYPE_PORTNUM
))
692 id
= nla_get_u16(app_tb
[DCB_APP_ATTR_ID
]);
693 up
= nla_get_u8(app_tb
[DCB_APP_ATTR_PRIORITY
]);
695 if (netdev
->dcbnl_ops
->setapp
) {
696 err
= netdev
->dcbnl_ops
->setapp(netdev
, idtype
, id
, up
);
699 app
.selector
= idtype
;
702 err
= dcb_setapp(netdev
, &app
);
705 ret
= dcbnl_reply(err
, RTM_SETDCB
, DCB_CMD_SAPP
, DCB_ATTR_APP
,
711 static int __dcbnl_pg_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
712 u32 pid
, u32 seq
, u16 flags
, int dir
)
714 struct sk_buff
*dcbnl_skb
;
715 struct nlmsghdr
*nlh
;
717 struct nlattr
*pg_nest
, *param_nest
, *data
;
718 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
719 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
720 u8 prio
, pgid
, tc_pct
, up_map
;
725 if (!tb
[DCB_ATTR_PG_CFG
] ||
726 !netdev
->dcbnl_ops
->getpgtccfgtx
||
727 !netdev
->dcbnl_ops
->getpgtccfgrx
||
728 !netdev
->dcbnl_ops
->getpgbwgcfgtx
||
729 !netdev
->dcbnl_ops
->getpgbwgcfgrx
)
732 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
733 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
738 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
742 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
744 dcb
= NLMSG_DATA(nlh
);
745 dcb
->dcb_family
= AF_UNSPEC
;
746 dcb
->cmd
= (dir
) ? DCB_CMD_PGRX_GCFG
: DCB_CMD_PGTX_GCFG
;
748 pg_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PG_CFG
);
752 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
755 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
756 if (!getall
&& !pg_tb
[i
])
759 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
760 data
= pg_tb
[DCB_PG_ATTR_TC_ALL
];
763 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
764 data
, dcbnl_tc_param_nest
);
768 param_nest
= nla_nest_start(dcbnl_skb
, i
);
772 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
773 prio
= DCB_ATTR_VALUE_UNDEFINED
;
774 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
775 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
779 netdev
->dcbnl_ops
->getpgtccfgrx(netdev
,
780 i
- DCB_PG_ATTR_TC_0
, &prio
,
781 &pgid
, &tc_pct
, &up_map
);
784 netdev
->dcbnl_ops
->getpgtccfgtx(netdev
,
785 i
- DCB_PG_ATTR_TC_0
, &prio
,
786 &pgid
, &tc_pct
, &up_map
);
789 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
] ||
790 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
791 ret
= nla_put_u8(dcbnl_skb
,
792 DCB_TC_ATTR_PARAM_PGID
, pgid
);
796 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
] ||
797 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
798 ret
= nla_put_u8(dcbnl_skb
,
799 DCB_TC_ATTR_PARAM_UP_MAPPING
, up_map
);
803 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
] ||
804 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
805 ret
= nla_put_u8(dcbnl_skb
,
806 DCB_TC_ATTR_PARAM_STRICT_PRIO
, prio
);
810 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
] ||
811 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
812 ret
= nla_put_u8(dcbnl_skb
, DCB_TC_ATTR_PARAM_BW_PCT
,
817 nla_nest_end(dcbnl_skb
, param_nest
);
820 if (pg_tb
[DCB_PG_ATTR_BW_ID_ALL
])
825 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
826 if (!getall
&& !pg_tb
[i
])
829 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
833 netdev
->dcbnl_ops
->getpgbwgcfgrx(netdev
,
834 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
837 netdev
->dcbnl_ops
->getpgbwgcfgtx(netdev
,
838 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
840 ret
= nla_put_u8(dcbnl_skb
, i
, tc_pct
);
846 nla_nest_end(dcbnl_skb
, pg_nest
);
848 nlmsg_end(dcbnl_skb
, nlh
);
850 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
857 nla_nest_cancel(dcbnl_skb
, param_nest
);
859 nla_nest_cancel(dcbnl_skb
, pg_nest
);
862 kfree_skb(dcbnl_skb
);
868 static int dcbnl_pgtx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
869 u32 pid
, u32 seq
, u16 flags
)
871 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 0);
874 static int dcbnl_pgrx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
875 u32 pid
, u32 seq
, u16 flags
)
877 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 1);
880 static int dcbnl_setstate(struct net_device
*netdev
, struct nlattr
**tb
,
881 u32 pid
, u32 seq
, u16 flags
)
886 if (!tb
[DCB_ATTR_STATE
] || !netdev
->dcbnl_ops
->setstate
)
889 value
= nla_get_u8(tb
[DCB_ATTR_STATE
]);
891 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setstate(netdev
, value
),
892 RTM_SETDCB
, DCB_CMD_SSTATE
, DCB_ATTR_STATE
,
898 static int dcbnl_setpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
899 u32 pid
, u32 seq
, u16 flags
)
901 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1];
906 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->setpfccfg
)
909 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
910 tb
[DCB_ATTR_PFC_CFG
],
915 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
918 value
= nla_get_u8(data
[i
]);
919 netdev
->dcbnl_ops
->setpfccfg(netdev
,
920 data
[i
]->nla_type
- DCB_PFC_UP_ATTR_0
, value
);
923 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SCFG
, DCB_ATTR_PFC_CFG
,
929 static int dcbnl_setall(struct net_device
*netdev
, struct nlattr
**tb
,
930 u32 pid
, u32 seq
, u16 flags
)
934 if (!tb
[DCB_ATTR_SET_ALL
] || !netdev
->dcbnl_ops
->setall
)
937 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setall(netdev
), RTM_SETDCB
,
938 DCB_CMD_SET_ALL
, DCB_ATTR_SET_ALL
, pid
, seq
, flags
);
943 static int __dcbnl_pg_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
944 u32 pid
, u32 seq
, u16 flags
, int dir
)
946 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
947 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
955 if (!tb
[DCB_ATTR_PG_CFG
] ||
956 !netdev
->dcbnl_ops
->setpgtccfgtx
||
957 !netdev
->dcbnl_ops
->setpgtccfgrx
||
958 !netdev
->dcbnl_ops
->setpgbwgcfgtx
||
959 !netdev
->dcbnl_ops
->setpgbwgcfgrx
)
962 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
963 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
967 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
971 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
972 pg_tb
[i
], dcbnl_tc_param_nest
);
976 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
977 prio
= DCB_ATTR_VALUE_UNDEFINED
;
978 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
979 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
981 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
])
983 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
]);
985 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
])
986 pgid
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_PGID
]);
988 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
])
989 tc_pct
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
]);
991 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
])
993 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
]);
995 /* dir: Tx = 0, Rx = 1 */
998 netdev
->dcbnl_ops
->setpgtccfgrx(netdev
,
999 i
- DCB_PG_ATTR_TC_0
,
1000 prio
, pgid
, tc_pct
, up_map
);
1003 netdev
->dcbnl_ops
->setpgtccfgtx(netdev
,
1004 i
- DCB_PG_ATTR_TC_0
,
1005 prio
, pgid
, tc_pct
, up_map
);
1009 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
1013 tc_pct
= nla_get_u8(pg_tb
[i
]);
1015 /* dir: Tx = 0, Rx = 1 */
1018 netdev
->dcbnl_ops
->setpgbwgcfgrx(netdev
,
1019 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
1022 netdev
->dcbnl_ops
->setpgbwgcfgtx(netdev
,
1023 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
1027 ret
= dcbnl_reply(0, RTM_SETDCB
,
1028 (dir
? DCB_CMD_PGRX_SCFG
: DCB_CMD_PGTX_SCFG
),
1029 DCB_ATTR_PG_CFG
, pid
, seq
, flags
);
1035 static int dcbnl_pgtx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1036 u32 pid
, u32 seq
, u16 flags
)
1038 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 0);
1041 static int dcbnl_pgrx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1042 u32 pid
, u32 seq
, u16 flags
)
1044 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 1);
1047 static int dcbnl_bcn_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1048 u32 pid
, u32 seq
, u16 flags
)
1050 struct sk_buff
*dcbnl_skb
;
1051 struct nlmsghdr
*nlh
;
1053 struct nlattr
*bcn_nest
;
1054 struct nlattr
*bcn_tb
[DCB_BCN_ATTR_MAX
+ 1];
1058 bool getall
= false;
1061 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->getbcnrp
||
1062 !netdev
->dcbnl_ops
->getbcncfg
)
1065 ret
= nla_parse_nested(bcn_tb
, DCB_BCN_ATTR_MAX
,
1066 tb
[DCB_ATTR_BCN
], dcbnl_bcn_nest
);
1071 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1075 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
1077 dcb
= NLMSG_DATA(nlh
);
1078 dcb
->dcb_family
= AF_UNSPEC
;
1079 dcb
->cmd
= DCB_CMD_BCN_GCFG
;
1081 bcn_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_BCN
);
1085 if (bcn_tb
[DCB_BCN_ATTR_ALL
])
1088 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
1089 if (!getall
&& !bcn_tb
[i
])
1092 netdev
->dcbnl_ops
->getbcnrp(netdev
, i
- DCB_BCN_ATTR_RP_0
,
1094 ret
= nla_put_u8(dcbnl_skb
, i
, value_byte
);
1099 for (i
= DCB_BCN_ATTR_BCNA_0
; i
<= DCB_BCN_ATTR_RI
; i
++) {
1100 if (!getall
&& !bcn_tb
[i
])
1103 netdev
->dcbnl_ops
->getbcncfg(netdev
, i
,
1105 ret
= nla_put_u32(dcbnl_skb
, i
, value_integer
);
1110 nla_nest_end(dcbnl_skb
, bcn_nest
);
1112 nlmsg_end(dcbnl_skb
, nlh
);
1114 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
1121 nla_nest_cancel(dcbnl_skb
, bcn_nest
);
1124 kfree_skb(dcbnl_skb
);
1130 static int dcbnl_bcn_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1131 u32 pid
, u32 seq
, u16 flags
)
1133 struct nlattr
*data
[DCB_BCN_ATTR_MAX
+ 1];
1139 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->setbcncfg
||
1140 !netdev
->dcbnl_ops
->setbcnrp
)
1143 ret
= nla_parse_nested(data
, DCB_BCN_ATTR_MAX
,
1149 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
1150 if (data
[i
] == NULL
)
1152 value_byte
= nla_get_u8(data
[i
]);
1153 netdev
->dcbnl_ops
->setbcnrp(netdev
,
1154 data
[i
]->nla_type
- DCB_BCN_ATTR_RP_0
, value_byte
);
1157 for (i
= DCB_BCN_ATTR_BCNA_0
; i
<= DCB_BCN_ATTR_RI
; i
++) {
1158 if (data
[i
] == NULL
)
1160 value_int
= nla_get_u32(data
[i
]);
1161 netdev
->dcbnl_ops
->setbcncfg(netdev
,
1165 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_BCN_SCFG
, DCB_ATTR_BCN
,
1171 static int dcbnl_build_peer_app(struct net_device
*netdev
, struct sk_buff
* skb
,
1172 int app_nested_type
, int app_info_type
,
1175 struct dcb_peer_app_info info
;
1176 struct dcb_app
*table
= NULL
;
1177 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1183 * retrieve the peer app configuration form the driver. If the driver
1184 * handlers fail exit without doing anything
1186 err
= ops
->peer_getappinfo(netdev
, &info
, &app_count
);
1187 if (!err
&& app_count
) {
1188 table
= kmalloc(sizeof(struct dcb_app
) * app_count
, GFP_KERNEL
);
1192 err
= ops
->peer_getapptable(netdev
, table
);
1200 * build the message, from here on the only possible failure
1201 * is due to the skb size
1205 app
= nla_nest_start(skb
, app_nested_type
);
1207 goto nla_put_failure
;
1209 if (app_info_type
&&
1210 nla_put(skb
, app_info_type
, sizeof(info
), &info
))
1211 goto nla_put_failure
;
1213 for (i
= 0; i
< app_count
; i
++) {
1214 if (nla_put(skb
, app_entry_type
, sizeof(struct dcb_app
),
1216 goto nla_put_failure
;
1218 nla_nest_end(skb
, app
);
1227 /* Handle IEEE 802.1Qaz GET commands. */
1228 static int dcbnl_ieee_fill(struct sk_buff
*skb
, struct net_device
*netdev
)
1230 struct nlattr
*ieee
, *app
;
1231 struct dcb_app_type
*itr
;
1232 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1234 int err
= -EMSGSIZE
;
1236 if (nla_put_string(skb
, DCB_ATTR_IFNAME
, netdev
->name
))
1237 goto nla_put_failure
;
1238 ieee
= nla_nest_start(skb
, DCB_ATTR_IEEE
);
1240 goto nla_put_failure
;
1242 if (ops
->ieee_getets
) {
1243 struct ieee_ets ets
;
1244 err
= ops
->ieee_getets(netdev
, &ets
);
1246 nla_put(skb
, DCB_ATTR_IEEE_ETS
, sizeof(ets
), &ets
))
1247 goto nla_put_failure
;
1250 if (ops
->ieee_getmaxrate
) {
1251 struct ieee_maxrate maxrate
;
1252 err
= ops
->ieee_getmaxrate(netdev
, &maxrate
);
1254 err
= nla_put(skb
, DCB_ATTR_IEEE_MAXRATE
,
1255 sizeof(maxrate
), &maxrate
);
1257 goto nla_put_failure
;
1261 if (ops
->ieee_getpfc
) {
1262 struct ieee_pfc pfc
;
1263 err
= ops
->ieee_getpfc(netdev
, &pfc
);
1265 nla_put(skb
, DCB_ATTR_IEEE_PFC
, sizeof(pfc
), &pfc
))
1266 goto nla_put_failure
;
1269 app
= nla_nest_start(skb
, DCB_ATTR_IEEE_APP_TABLE
);
1271 goto nla_put_failure
;
1273 spin_lock(&dcb_lock
);
1274 list_for_each_entry(itr
, &dcb_app_list
, list
) {
1275 if (itr
->ifindex
== netdev
->ifindex
) {
1276 err
= nla_put(skb
, DCB_ATTR_IEEE_APP
, sizeof(itr
->app
),
1279 spin_unlock(&dcb_lock
);
1280 goto nla_put_failure
;
1285 if (netdev
->dcbnl_ops
->getdcbx
)
1286 dcbx
= netdev
->dcbnl_ops
->getdcbx(netdev
);
1290 spin_unlock(&dcb_lock
);
1291 nla_nest_end(skb
, app
);
1293 /* get peer info if available */
1294 if (ops
->ieee_peer_getets
) {
1295 struct ieee_ets ets
;
1296 err
= ops
->ieee_peer_getets(netdev
, &ets
);
1298 nla_put(skb
, DCB_ATTR_IEEE_PEER_ETS
, sizeof(ets
), &ets
))
1299 goto nla_put_failure
;
1302 if (ops
->ieee_peer_getpfc
) {
1303 struct ieee_pfc pfc
;
1304 err
= ops
->ieee_peer_getpfc(netdev
, &pfc
);
1306 nla_put(skb
, DCB_ATTR_IEEE_PEER_PFC
, sizeof(pfc
), &pfc
))
1307 goto nla_put_failure
;
1310 if (ops
->peer_getappinfo
&& ops
->peer_getapptable
) {
1311 err
= dcbnl_build_peer_app(netdev
, skb
,
1312 DCB_ATTR_IEEE_PEER_APP
,
1313 DCB_ATTR_IEEE_APP_UNSPEC
,
1316 goto nla_put_failure
;
1319 nla_nest_end(skb
, ieee
);
1321 err
= nla_put_u8(skb
, DCB_ATTR_DCBX
, dcbx
);
1323 goto nla_put_failure
;
1332 static int dcbnl_cee_pg_fill(struct sk_buff
*skb
, struct net_device
*dev
,
1335 u8 pgid
, up_map
, prio
, tc_pct
;
1336 const struct dcbnl_rtnl_ops
*ops
= dev
->dcbnl_ops
;
1337 int i
= dir
? DCB_ATTR_CEE_TX_PG
: DCB_ATTR_CEE_RX_PG
;
1338 struct nlattr
*pg
= nla_nest_start(skb
, i
);
1341 goto nla_put_failure
;
1343 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
1344 struct nlattr
*tc_nest
= nla_nest_start(skb
, i
);
1347 goto nla_put_failure
;
1349 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
1350 prio
= DCB_ATTR_VALUE_UNDEFINED
;
1351 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
1352 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
1355 ops
->getpgtccfgrx(dev
, i
- DCB_PG_ATTR_TC_0
,
1356 &prio
, &pgid
, &tc_pct
, &up_map
);
1358 ops
->getpgtccfgtx(dev
, i
- DCB_PG_ATTR_TC_0
,
1359 &prio
, &pgid
, &tc_pct
, &up_map
);
1361 if (nla_put_u8(skb
, DCB_TC_ATTR_PARAM_PGID
, pgid
) ||
1362 nla_put_u8(skb
, DCB_TC_ATTR_PARAM_UP_MAPPING
, up_map
) ||
1363 nla_put_u8(skb
, DCB_TC_ATTR_PARAM_STRICT_PRIO
, prio
) ||
1364 nla_put_u8(skb
, DCB_TC_ATTR_PARAM_BW_PCT
, tc_pct
))
1365 goto nla_put_failure
;
1366 nla_nest_end(skb
, tc_nest
);
1369 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
1370 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
1373 ops
->getpgbwgcfgrx(dev
, i
- DCB_PG_ATTR_BW_ID_0
,
1376 ops
->getpgbwgcfgtx(dev
, i
- DCB_PG_ATTR_BW_ID_0
,
1378 if (nla_put_u8(skb
, i
, tc_pct
))
1379 goto nla_put_failure
;
1381 nla_nest_end(skb
, pg
);
1388 static int dcbnl_cee_fill(struct sk_buff
*skb
, struct net_device
*netdev
)
1390 struct nlattr
*cee
, *app
;
1391 struct dcb_app_type
*itr
;
1392 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1393 int dcbx
, i
, err
= -EMSGSIZE
;
1396 if (nla_put_string(skb
, DCB_ATTR_IFNAME
, netdev
->name
))
1397 goto nla_put_failure
;
1398 cee
= nla_nest_start(skb
, DCB_ATTR_CEE
);
1400 goto nla_put_failure
;
1403 if (ops
->getpgtccfgtx
&& ops
->getpgbwgcfgtx
) {
1404 err
= dcbnl_cee_pg_fill(skb
, netdev
, 1);
1406 goto nla_put_failure
;
1409 if (ops
->getpgtccfgrx
&& ops
->getpgbwgcfgrx
) {
1410 err
= dcbnl_cee_pg_fill(skb
, netdev
, 0);
1412 goto nla_put_failure
;
1416 if (ops
->getpfccfg
) {
1417 struct nlattr
*pfc_nest
= nla_nest_start(skb
, DCB_ATTR_CEE_PFC
);
1420 goto nla_put_failure
;
1422 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
1423 ops
->getpfccfg(netdev
, i
- DCB_PFC_UP_ATTR_0
, &value
);
1424 if (nla_put_u8(skb
, i
, value
))
1425 goto nla_put_failure
;
1427 nla_nest_end(skb
, pfc_nest
);
1431 spin_lock(&dcb_lock
);
1432 app
= nla_nest_start(skb
, DCB_ATTR_CEE_APP_TABLE
);
1436 list_for_each_entry(itr
, &dcb_app_list
, list
) {
1437 if (itr
->ifindex
== netdev
->ifindex
) {
1438 struct nlattr
*app_nest
= nla_nest_start(skb
,
1443 err
= nla_put_u8(skb
, DCB_APP_ATTR_IDTYPE
,
1448 err
= nla_put_u16(skb
, DCB_APP_ATTR_ID
,
1453 err
= nla_put_u8(skb
, DCB_APP_ATTR_PRIORITY
,
1458 nla_nest_end(skb
, app_nest
);
1461 nla_nest_end(skb
, app
);
1463 if (netdev
->dcbnl_ops
->getdcbx
)
1464 dcbx
= netdev
->dcbnl_ops
->getdcbx(netdev
);
1468 spin_unlock(&dcb_lock
);
1470 /* features flags */
1471 if (ops
->getfeatcfg
) {
1472 struct nlattr
*feat
= nla_nest_start(skb
, DCB_ATTR_CEE_FEAT
);
1474 goto nla_put_failure
;
1476 for (i
= DCB_FEATCFG_ATTR_ALL
+ 1; i
<= DCB_FEATCFG_ATTR_MAX
;
1478 if (!ops
->getfeatcfg(netdev
, i
, &value
) &&
1479 nla_put_u8(skb
, i
, value
))
1480 goto nla_put_failure
;
1482 nla_nest_end(skb
, feat
);
1485 /* peer info if available */
1486 if (ops
->cee_peer_getpg
) {
1488 err
= ops
->cee_peer_getpg(netdev
, &pg
);
1490 nla_put(skb
, DCB_ATTR_CEE_PEER_PG
, sizeof(pg
), &pg
))
1491 goto nla_put_failure
;
1494 if (ops
->cee_peer_getpfc
) {
1496 err
= ops
->cee_peer_getpfc(netdev
, &pfc
);
1498 nla_put(skb
, DCB_ATTR_CEE_PEER_PFC
, sizeof(pfc
), &pfc
))
1499 goto nla_put_failure
;
1502 if (ops
->peer_getappinfo
&& ops
->peer_getapptable
) {
1503 err
= dcbnl_build_peer_app(netdev
, skb
,
1504 DCB_ATTR_CEE_PEER_APP_TABLE
,
1505 DCB_ATTR_CEE_PEER_APP_INFO
,
1506 DCB_ATTR_CEE_PEER_APP
);
1508 goto nla_put_failure
;
1510 nla_nest_end(skb
, cee
);
1514 err
= nla_put_u8(skb
, DCB_ATTR_DCBX
, dcbx
);
1516 goto nla_put_failure
;
1521 spin_unlock(&dcb_lock
);
1526 static int dcbnl_notify(struct net_device
*dev
, int event
, int cmd
,
1527 u32 seq
, u32 pid
, int dcbx_ver
)
1529 struct net
*net
= dev_net(dev
);
1530 struct sk_buff
*skb
;
1531 struct nlmsghdr
*nlh
;
1533 const struct dcbnl_rtnl_ops
*ops
= dev
->dcbnl_ops
;
1539 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1543 nlh
= nlmsg_put(skb
, pid
, 0, event
, sizeof(*dcb
), 0);
1549 dcb
= NLMSG_DATA(nlh
);
1550 dcb
->dcb_family
= AF_UNSPEC
;
1553 if (dcbx_ver
== DCB_CAP_DCBX_VER_IEEE
)
1554 err
= dcbnl_ieee_fill(skb
, dev
);
1556 err
= dcbnl_cee_fill(skb
, dev
);
1559 /* Report error to broadcast listeners */
1560 nlmsg_cancel(skb
, nlh
);
1562 rtnl_set_sk_err(net
, RTNLGRP_DCB
, err
);
1564 /* End nlmsg and notify broadcast listeners */
1565 nlmsg_end(skb
, nlh
);
1566 rtnl_notify(skb
, net
, 0, RTNLGRP_DCB
, NULL
, GFP_KERNEL
);
1572 int dcbnl_ieee_notify(struct net_device
*dev
, int event
, int cmd
,
1575 return dcbnl_notify(dev
, event
, cmd
, seq
, pid
, DCB_CAP_DCBX_VER_IEEE
);
1577 EXPORT_SYMBOL(dcbnl_ieee_notify
);
1579 int dcbnl_cee_notify(struct net_device
*dev
, int event
, int cmd
,
1582 return dcbnl_notify(dev
, event
, cmd
, seq
, pid
, DCB_CAP_DCBX_VER_CEE
);
1584 EXPORT_SYMBOL(dcbnl_cee_notify
);
1586 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1587 * be completed the entire msg is aborted and error value is returned.
1588 * No attempt is made to reconcile the case where only part of the
1589 * cmd can be completed.
1591 static int dcbnl_ieee_set(struct net_device
*netdev
, struct nlattr
**tb
,
1592 u32 pid
, u32 seq
, u16 flags
)
1594 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1595 struct nlattr
*ieee
[DCB_ATTR_IEEE_MAX
+ 1];
1596 int err
= -EOPNOTSUPP
;
1601 if (!tb
[DCB_ATTR_IEEE
])
1604 err
= nla_parse_nested(ieee
, DCB_ATTR_IEEE_MAX
,
1605 tb
[DCB_ATTR_IEEE
], dcbnl_ieee_policy
);
1609 if (ieee
[DCB_ATTR_IEEE_ETS
] && ops
->ieee_setets
) {
1610 struct ieee_ets
*ets
= nla_data(ieee
[DCB_ATTR_IEEE_ETS
]);
1611 err
= ops
->ieee_setets(netdev
, ets
);
1616 if (ieee
[DCB_ATTR_IEEE_MAXRATE
] && ops
->ieee_setmaxrate
) {
1617 struct ieee_maxrate
*maxrate
=
1618 nla_data(ieee
[DCB_ATTR_IEEE_MAXRATE
]);
1619 err
= ops
->ieee_setmaxrate(netdev
, maxrate
);
1624 if (ieee
[DCB_ATTR_IEEE_PFC
] && ops
->ieee_setpfc
) {
1625 struct ieee_pfc
*pfc
= nla_data(ieee
[DCB_ATTR_IEEE_PFC
]);
1626 err
= ops
->ieee_setpfc(netdev
, pfc
);
1631 if (ieee
[DCB_ATTR_IEEE_APP_TABLE
]) {
1632 struct nlattr
*attr
;
1635 nla_for_each_nested(attr
, ieee
[DCB_ATTR_IEEE_APP_TABLE
], rem
) {
1636 struct dcb_app
*app_data
;
1637 if (nla_type(attr
) != DCB_ATTR_IEEE_APP
)
1639 app_data
= nla_data(attr
);
1640 if (ops
->ieee_setapp
)
1641 err
= ops
->ieee_setapp(netdev
, app_data
);
1643 err
= dcb_ieee_setapp(netdev
, app_data
);
1650 dcbnl_reply(err
, RTM_SETDCB
, DCB_CMD_IEEE_SET
, DCB_ATTR_IEEE
,
1652 dcbnl_ieee_notify(netdev
, RTM_SETDCB
, DCB_CMD_IEEE_SET
, seq
, 0);
1656 static int dcbnl_ieee_get(struct net_device
*netdev
, struct nlattr
**tb
,
1657 u32 pid
, u32 seq
, u16 flags
)
1659 struct net
*net
= dev_net(netdev
);
1660 struct sk_buff
*skb
;
1661 struct nlmsghdr
*nlh
;
1663 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1669 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1673 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
1679 dcb
= NLMSG_DATA(nlh
);
1680 dcb
->dcb_family
= AF_UNSPEC
;
1681 dcb
->cmd
= DCB_CMD_IEEE_GET
;
1683 err
= dcbnl_ieee_fill(skb
, netdev
);
1686 nlmsg_cancel(skb
, nlh
);
1689 nlmsg_end(skb
, nlh
);
1690 err
= rtnl_unicast(skb
, net
, pid
);
1696 static int dcbnl_ieee_del(struct net_device
*netdev
, struct nlattr
**tb
,
1697 u32 pid
, u32 seq
, u16 flags
)
1699 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1700 struct nlattr
*ieee
[DCB_ATTR_IEEE_MAX
+ 1];
1701 int err
= -EOPNOTSUPP
;
1706 if (!tb
[DCB_ATTR_IEEE
])
1709 err
= nla_parse_nested(ieee
, DCB_ATTR_IEEE_MAX
,
1710 tb
[DCB_ATTR_IEEE
], dcbnl_ieee_policy
);
1714 if (ieee
[DCB_ATTR_IEEE_APP_TABLE
]) {
1715 struct nlattr
*attr
;
1718 nla_for_each_nested(attr
, ieee
[DCB_ATTR_IEEE_APP_TABLE
], rem
) {
1719 struct dcb_app
*app_data
;
1721 if (nla_type(attr
) != DCB_ATTR_IEEE_APP
)
1723 app_data
= nla_data(attr
);
1724 if (ops
->ieee_delapp
)
1725 err
= ops
->ieee_delapp(netdev
, app_data
);
1727 err
= dcb_ieee_delapp(netdev
, app_data
);
1734 dcbnl_reply(err
, RTM_SETDCB
, DCB_CMD_IEEE_DEL
, DCB_ATTR_IEEE
,
1736 dcbnl_ieee_notify(netdev
, RTM_SETDCB
, DCB_CMD_IEEE_DEL
, seq
, 0);
1741 /* DCBX configuration */
1742 static int dcbnl_getdcbx(struct net_device
*netdev
, struct nlattr
**tb
,
1743 u32 pid
, u32 seq
, u16 flags
)
1747 if (!netdev
->dcbnl_ops
->getdcbx
)
1750 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getdcbx(netdev
), RTM_GETDCB
,
1751 DCB_CMD_GDCBX
, DCB_ATTR_DCBX
, pid
, seq
, flags
);
1756 static int dcbnl_setdcbx(struct net_device
*netdev
, struct nlattr
**tb
,
1757 u32 pid
, u32 seq
, u16 flags
)
1762 if (!netdev
->dcbnl_ops
->setdcbx
)
1765 if (!tb
[DCB_ATTR_DCBX
])
1768 value
= nla_get_u8(tb
[DCB_ATTR_DCBX
]);
1770 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setdcbx(netdev
, value
),
1771 RTM_SETDCB
, DCB_CMD_SDCBX
, DCB_ATTR_DCBX
,
1777 static int dcbnl_getfeatcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1778 u32 pid
, u32 seq
, u16 flags
)
1780 struct sk_buff
*dcbnl_skb
;
1781 struct nlmsghdr
*nlh
;
1783 struct nlattr
*data
[DCB_FEATCFG_ATTR_MAX
+ 1], *nest
;
1788 if (!netdev
->dcbnl_ops
->getfeatcfg
)
1791 if (!tb
[DCB_ATTR_FEATCFG
])
1794 ret
= nla_parse_nested(data
, DCB_FEATCFG_ATTR_MAX
, tb
[DCB_ATTR_FEATCFG
],
1795 dcbnl_featcfg_nest
);
1799 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1805 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
1807 dcb
= NLMSG_DATA(nlh
);
1808 dcb
->dcb_family
= AF_UNSPEC
;
1809 dcb
->cmd
= DCB_CMD_GFEATCFG
;
1811 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_FEATCFG
);
1814 goto nla_put_failure
;
1817 if (data
[DCB_FEATCFG_ATTR_ALL
])
1820 for (i
= DCB_FEATCFG_ATTR_ALL
+1; i
<= DCB_FEATCFG_ATTR_MAX
; i
++) {
1821 if (!getall
&& !data
[i
])
1824 ret
= netdev
->dcbnl_ops
->getfeatcfg(netdev
, i
, &value
);
1826 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
1829 nla_nest_cancel(dcbnl_skb
, nest
);
1830 goto nla_put_failure
;
1833 nla_nest_end(dcbnl_skb
, nest
);
1835 nlmsg_end(dcbnl_skb
, nlh
);
1837 return rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
1839 nlmsg_cancel(dcbnl_skb
, nlh
);
1841 kfree_skb(dcbnl_skb
);
1846 static int dcbnl_setfeatcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1847 u32 pid
, u32 seq
, u16 flags
)
1849 struct nlattr
*data
[DCB_FEATCFG_ATTR_MAX
+ 1];
1853 if (!netdev
->dcbnl_ops
->setfeatcfg
)
1856 if (!tb
[DCB_ATTR_FEATCFG
])
1859 ret
= nla_parse_nested(data
, DCB_FEATCFG_ATTR_MAX
, tb
[DCB_ATTR_FEATCFG
],
1860 dcbnl_featcfg_nest
);
1865 for (i
= DCB_FEATCFG_ATTR_ALL
+1; i
<= DCB_FEATCFG_ATTR_MAX
; i
++) {
1866 if (data
[i
] == NULL
)
1869 value
= nla_get_u8(data
[i
]);
1871 ret
= netdev
->dcbnl_ops
->setfeatcfg(netdev
, i
, value
);
1877 dcbnl_reply(ret
, RTM_SETDCB
, DCB_CMD_SFEATCFG
, DCB_ATTR_FEATCFG
,
1883 /* Handle CEE DCBX GET commands. */
1884 static int dcbnl_cee_get(struct net_device
*netdev
, struct nlattr
**tb
,
1885 u32 pid
, u32 seq
, u16 flags
)
1887 struct net
*net
= dev_net(netdev
);
1888 struct sk_buff
*skb
;
1889 struct nlmsghdr
*nlh
;
1891 const struct dcbnl_rtnl_ops
*ops
= netdev
->dcbnl_ops
;
1897 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1901 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
1907 dcb
= NLMSG_DATA(nlh
);
1908 dcb
->dcb_family
= AF_UNSPEC
;
1909 dcb
->cmd
= DCB_CMD_CEE_GET
;
1911 err
= dcbnl_cee_fill(skb
, netdev
);
1914 nlmsg_cancel(skb
, nlh
);
1917 nlmsg_end(skb
, nlh
);
1918 err
= rtnl_unicast(skb
, net
, pid
);
1923 static int dcb_doit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1925 struct net
*net
= sock_net(skb
->sk
);
1926 struct net_device
*netdev
;
1927 struct dcbmsg
*dcb
= (struct dcbmsg
*)NLMSG_DATA(nlh
);
1928 struct nlattr
*tb
[DCB_ATTR_MAX
+ 1];
1929 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
1932 if (!net_eq(net
, &init_net
))
1935 ret
= nlmsg_parse(nlh
, sizeof(*dcb
), tb
, DCB_ATTR_MAX
,
1940 if (!tb
[DCB_ATTR_IFNAME
])
1943 netdev
= dev_get_by_name(&init_net
, nla_data(tb
[DCB_ATTR_IFNAME
]));
1947 if (!netdev
->dcbnl_ops
)
1951 case DCB_CMD_GSTATE
:
1952 ret
= dcbnl_getstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1955 case DCB_CMD_PFC_GCFG
:
1956 ret
= dcbnl_getpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1959 case DCB_CMD_GPERM_HWADDR
:
1960 ret
= dcbnl_getperm_hwaddr(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1963 case DCB_CMD_PGTX_GCFG
:
1964 ret
= dcbnl_pgtx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1967 case DCB_CMD_PGRX_GCFG
:
1968 ret
= dcbnl_pgrx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1971 case DCB_CMD_BCN_GCFG
:
1972 ret
= dcbnl_bcn_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1975 case DCB_CMD_SSTATE
:
1976 ret
= dcbnl_setstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1979 case DCB_CMD_PFC_SCFG
:
1980 ret
= dcbnl_setpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1984 case DCB_CMD_SET_ALL
:
1985 ret
= dcbnl_setall(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1988 case DCB_CMD_PGTX_SCFG
:
1989 ret
= dcbnl_pgtx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1992 case DCB_CMD_PGRX_SCFG
:
1993 ret
= dcbnl_pgrx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1997 ret
= dcbnl_getcap(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2000 case DCB_CMD_GNUMTCS
:
2001 ret
= dcbnl_getnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2004 case DCB_CMD_SNUMTCS
:
2005 ret
= dcbnl_setnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2008 case DCB_CMD_PFC_GSTATE
:
2009 ret
= dcbnl_getpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2012 case DCB_CMD_PFC_SSTATE
:
2013 ret
= dcbnl_setpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2016 case DCB_CMD_BCN_SCFG
:
2017 ret
= dcbnl_bcn_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2021 ret
= dcbnl_getapp(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2025 ret
= dcbnl_setapp(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2028 case DCB_CMD_IEEE_SET
:
2029 ret
= dcbnl_ieee_set(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2032 case DCB_CMD_IEEE_GET
:
2033 ret
= dcbnl_ieee_get(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2036 case DCB_CMD_IEEE_DEL
:
2037 ret
= dcbnl_ieee_del(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2041 ret
= dcbnl_getdcbx(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2045 ret
= dcbnl_setdcbx(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2048 case DCB_CMD_GFEATCFG
:
2049 ret
= dcbnl_getfeatcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2052 case DCB_CMD_SFEATCFG
:
2053 ret
= dcbnl_setfeatcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2056 case DCB_CMD_CEE_GET
:
2057 ret
= dcbnl_cee_get(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
2071 * dcb_getapp - retrieve the DCBX application user priority
2073 * On success returns a non-zero 802.1p user priority bitmap
2074 * otherwise returns 0 as the invalid user priority bitmap to
2075 * indicate an error.
2077 u8
dcb_getapp(struct net_device
*dev
, struct dcb_app
*app
)
2079 struct dcb_app_type
*itr
;
2082 spin_lock(&dcb_lock
);
2083 list_for_each_entry(itr
, &dcb_app_list
, list
) {
2084 if (itr
->app
.selector
== app
->selector
&&
2085 itr
->app
.protocol
== app
->protocol
&&
2086 itr
->ifindex
== dev
->ifindex
) {
2087 prio
= itr
->app
.priority
;
2091 spin_unlock(&dcb_lock
);
2095 EXPORT_SYMBOL(dcb_getapp
);
2098 * dcb_setapp - add CEE dcb application data to app list
2100 * Priority 0 is an invalid priority in CEE spec. This routine
2101 * removes applications from the app list if the priority is
2104 int dcb_setapp(struct net_device
*dev
, struct dcb_app
*new)
2106 struct dcb_app_type
*itr
;
2107 struct dcb_app_type event
;
2109 event
.ifindex
= dev
->ifindex
;
2110 memcpy(&event
.app
, new, sizeof(event
.app
));
2111 if (dev
->dcbnl_ops
->getdcbx
)
2112 event
.dcbx
= dev
->dcbnl_ops
->getdcbx(dev
);
2114 spin_lock(&dcb_lock
);
2115 /* Search for existing match and replace */
2116 list_for_each_entry(itr
, &dcb_app_list
, list
) {
2117 if (itr
->app
.selector
== new->selector
&&
2118 itr
->app
.protocol
== new->protocol
&&
2119 itr
->ifindex
== dev
->ifindex
) {
2121 itr
->app
.priority
= new->priority
;
2123 list_del(&itr
->list
);
2129 /* App type does not exist add new application type */
2130 if (new->priority
) {
2131 struct dcb_app_type
*entry
;
2132 entry
= kmalloc(sizeof(struct dcb_app_type
), GFP_ATOMIC
);
2134 spin_unlock(&dcb_lock
);
2138 memcpy(&entry
->app
, new, sizeof(*new));
2139 entry
->ifindex
= dev
->ifindex
;
2140 list_add(&entry
->list
, &dcb_app_list
);
2143 spin_unlock(&dcb_lock
);
2144 call_dcbevent_notifiers(DCB_APP_EVENT
, &event
);
2147 EXPORT_SYMBOL(dcb_setapp
);
2150 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2152 * Helper routine which on success returns a non-zero 802.1Qaz user
2153 * priority bitmap otherwise returns 0 to indicate the dcb_app was
2154 * not found in APP list.
2156 u8
dcb_ieee_getapp_mask(struct net_device
*dev
, struct dcb_app
*app
)
2158 struct dcb_app_type
*itr
;
2161 spin_lock(&dcb_lock
);
2162 list_for_each_entry(itr
, &dcb_app_list
, list
) {
2163 if (itr
->app
.selector
== app
->selector
&&
2164 itr
->app
.protocol
== app
->protocol
&&
2165 itr
->ifindex
== dev
->ifindex
) {
2166 prio
|= 1 << itr
->app
.priority
;
2169 spin_unlock(&dcb_lock
);
2173 EXPORT_SYMBOL(dcb_ieee_getapp_mask
);
2176 * dcb_ieee_setapp - add IEEE dcb application data to app list
2178 * This adds Application data to the list. Multiple application
2179 * entries may exists for the same selector and protocol as long
2180 * as the priorities are different.
2182 int dcb_ieee_setapp(struct net_device
*dev
, struct dcb_app
*new)
2184 struct dcb_app_type
*itr
, *entry
;
2185 struct dcb_app_type event
;
2188 event
.ifindex
= dev
->ifindex
;
2189 memcpy(&event
.app
, new, sizeof(event
.app
));
2190 if (dev
->dcbnl_ops
->getdcbx
)
2191 event
.dcbx
= dev
->dcbnl_ops
->getdcbx(dev
);
2193 spin_lock(&dcb_lock
);
2194 /* Search for existing match and abort if found */
2195 list_for_each_entry(itr
, &dcb_app_list
, list
) {
2196 if (itr
->app
.selector
== new->selector
&&
2197 itr
->app
.protocol
== new->protocol
&&
2198 itr
->app
.priority
== new->priority
&&
2199 itr
->ifindex
== dev
->ifindex
) {
2205 /* App entry does not exist add new entry */
2206 entry
= kmalloc(sizeof(struct dcb_app_type
), GFP_ATOMIC
);
2212 memcpy(&entry
->app
, new, sizeof(*new));
2213 entry
->ifindex
= dev
->ifindex
;
2214 list_add(&entry
->list
, &dcb_app_list
);
2216 spin_unlock(&dcb_lock
);
2218 call_dcbevent_notifiers(DCB_APP_EVENT
, &event
);
2221 EXPORT_SYMBOL(dcb_ieee_setapp
);
2224 * dcb_ieee_delapp - delete IEEE dcb application data from list
2226 * This removes a matching APP data from the APP list
2228 int dcb_ieee_delapp(struct net_device
*dev
, struct dcb_app
*del
)
2230 struct dcb_app_type
*itr
;
2231 struct dcb_app_type event
;
2234 event
.ifindex
= dev
->ifindex
;
2235 memcpy(&event
.app
, del
, sizeof(event
.app
));
2236 if (dev
->dcbnl_ops
->getdcbx
)
2237 event
.dcbx
= dev
->dcbnl_ops
->getdcbx(dev
);
2239 spin_lock(&dcb_lock
);
2240 /* Search for existing match and remove it. */
2241 list_for_each_entry(itr
, &dcb_app_list
, list
) {
2242 if (itr
->app
.selector
== del
->selector
&&
2243 itr
->app
.protocol
== del
->protocol
&&
2244 itr
->app
.priority
== del
->priority
&&
2245 itr
->ifindex
== dev
->ifindex
) {
2246 list_del(&itr
->list
);
2254 spin_unlock(&dcb_lock
);
2256 call_dcbevent_notifiers(DCB_APP_EVENT
, &event
);
2259 EXPORT_SYMBOL(dcb_ieee_delapp
);
2261 static void dcb_flushapp(void)
2263 struct dcb_app_type
*app
;
2264 struct dcb_app_type
*tmp
;
2266 spin_lock(&dcb_lock
);
2267 list_for_each_entry_safe(app
, tmp
, &dcb_app_list
, list
) {
2268 list_del(&app
->list
);
2271 spin_unlock(&dcb_lock
);
2274 static int __init
dcbnl_init(void)
2276 INIT_LIST_HEAD(&dcb_app_list
);
2278 rtnl_register(PF_UNSPEC
, RTM_GETDCB
, dcb_doit
, NULL
, NULL
);
2279 rtnl_register(PF_UNSPEC
, RTM_SETDCB
, dcb_doit
, NULL
, NULL
);
2283 module_init(dcbnl_init
);
2285 static void __exit
dcbnl_exit(void)
2287 rtnl_unregister(PF_UNSPEC
, RTM_GETDCB
);
2288 rtnl_unregister(PF_UNSPEC
, RTM_SETDCB
);
2291 module_exit(dcbnl_exit
);