2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Lucy Liu <lucy.liu@intel.com>
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <net/netlink.h>
23 #include <net/rtnetlink.h>
24 #include <linux/dcbnl.h>
25 #include <linux/rtnetlink.h>
29 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
30 * intended to allow network traffic with differing requirements
31 * (highly reliable, no drops vs. best effort vs. low latency) to operate
32 * and co-exist on Ethernet. Current DCB features are:
34 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
35 * framework for assigning bandwidth guarantees to traffic classes.
37 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
38 * can work independently for each 802.1p priority.
40 * Congestion Notification - provides a mechanism for end-to-end congestion
41 * control for protocols which do not have built-in congestion management.
43 * More information about the emerging standards for these Ethernet features
44 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
46 * This file implements an rtnetlink interface to allow configuration of DCB
47 * features for capable devices.
50 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
51 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
52 MODULE_LICENSE("GPL");
54 /**************** DCB attribute policies *************************************/
56 /* DCB netlink attributes policy */
57 static struct nla_policy dcbnl_rtnl_policy
[DCB_ATTR_MAX
+ 1] = {
58 [DCB_ATTR_IFNAME
] = {.type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1},
59 [DCB_ATTR_STATE
] = {.type
= NLA_U8
},
60 [DCB_ATTR_PFC_CFG
] = {.type
= NLA_NESTED
},
61 [DCB_ATTR_PG_CFG
] = {.type
= NLA_NESTED
},
62 [DCB_ATTR_SET_ALL
] = {.type
= NLA_U8
},
63 [DCB_ATTR_PERM_HWADDR
] = {.type
= NLA_FLAG
},
64 [DCB_ATTR_CAP
] = {.type
= NLA_NESTED
},
65 [DCB_ATTR_PFC_STATE
] = {.type
= NLA_U8
},
66 [DCB_ATTR_BCN
] = {.type
= NLA_NESTED
},
69 /* DCB priority flow control to User Priority nested attributes */
70 static struct nla_policy dcbnl_pfc_up_nest
[DCB_PFC_UP_ATTR_MAX
+ 1] = {
71 [DCB_PFC_UP_ATTR_0
] = {.type
= NLA_U8
},
72 [DCB_PFC_UP_ATTR_1
] = {.type
= NLA_U8
},
73 [DCB_PFC_UP_ATTR_2
] = {.type
= NLA_U8
},
74 [DCB_PFC_UP_ATTR_3
] = {.type
= NLA_U8
},
75 [DCB_PFC_UP_ATTR_4
] = {.type
= NLA_U8
},
76 [DCB_PFC_UP_ATTR_5
] = {.type
= NLA_U8
},
77 [DCB_PFC_UP_ATTR_6
] = {.type
= NLA_U8
},
78 [DCB_PFC_UP_ATTR_7
] = {.type
= NLA_U8
},
79 [DCB_PFC_UP_ATTR_ALL
] = {.type
= NLA_FLAG
},
82 /* DCB priority grouping nested attributes */
83 static struct nla_policy dcbnl_pg_nest
[DCB_PG_ATTR_MAX
+ 1] = {
84 [DCB_PG_ATTR_TC_0
] = {.type
= NLA_NESTED
},
85 [DCB_PG_ATTR_TC_1
] = {.type
= NLA_NESTED
},
86 [DCB_PG_ATTR_TC_2
] = {.type
= NLA_NESTED
},
87 [DCB_PG_ATTR_TC_3
] = {.type
= NLA_NESTED
},
88 [DCB_PG_ATTR_TC_4
] = {.type
= NLA_NESTED
},
89 [DCB_PG_ATTR_TC_5
] = {.type
= NLA_NESTED
},
90 [DCB_PG_ATTR_TC_6
] = {.type
= NLA_NESTED
},
91 [DCB_PG_ATTR_TC_7
] = {.type
= NLA_NESTED
},
92 [DCB_PG_ATTR_TC_ALL
] = {.type
= NLA_NESTED
},
93 [DCB_PG_ATTR_BW_ID_0
] = {.type
= NLA_U8
},
94 [DCB_PG_ATTR_BW_ID_1
] = {.type
= NLA_U8
},
95 [DCB_PG_ATTR_BW_ID_2
] = {.type
= NLA_U8
},
96 [DCB_PG_ATTR_BW_ID_3
] = {.type
= NLA_U8
},
97 [DCB_PG_ATTR_BW_ID_4
] = {.type
= NLA_U8
},
98 [DCB_PG_ATTR_BW_ID_5
] = {.type
= NLA_U8
},
99 [DCB_PG_ATTR_BW_ID_6
] = {.type
= NLA_U8
},
100 [DCB_PG_ATTR_BW_ID_7
] = {.type
= NLA_U8
},
101 [DCB_PG_ATTR_BW_ID_ALL
] = {.type
= NLA_FLAG
},
104 /* DCB traffic class nested attributes. */
105 static struct nla_policy dcbnl_tc_param_nest
[DCB_TC_ATTR_PARAM_MAX
+ 1] = {
106 [DCB_TC_ATTR_PARAM_PGID
] = {.type
= NLA_U8
},
107 [DCB_TC_ATTR_PARAM_UP_MAPPING
] = {.type
= NLA_U8
},
108 [DCB_TC_ATTR_PARAM_STRICT_PRIO
] = {.type
= NLA_U8
},
109 [DCB_TC_ATTR_PARAM_BW_PCT
] = {.type
= NLA_U8
},
110 [DCB_TC_ATTR_PARAM_ALL
] = {.type
= NLA_FLAG
},
113 /* DCB capabilities nested attributes. */
114 static struct nla_policy dcbnl_cap_nest
[DCB_CAP_ATTR_MAX
+ 1] = {
115 [DCB_CAP_ATTR_ALL
] = {.type
= NLA_FLAG
},
116 [DCB_CAP_ATTR_PG
] = {.type
= NLA_U8
},
117 [DCB_CAP_ATTR_PFC
] = {.type
= NLA_U8
},
118 [DCB_CAP_ATTR_UP2TC
] = {.type
= NLA_U8
},
119 [DCB_CAP_ATTR_PG_TCS
] = {.type
= NLA_U8
},
120 [DCB_CAP_ATTR_PFC_TCS
] = {.type
= NLA_U8
},
121 [DCB_CAP_ATTR_GSP
] = {.type
= NLA_U8
},
122 [DCB_CAP_ATTR_BCN
] = {.type
= NLA_U8
},
125 /* DCB capabilities nested attributes. */
126 static struct nla_policy dcbnl_numtcs_nest
[DCB_NUMTCS_ATTR_MAX
+ 1] = {
127 [DCB_NUMTCS_ATTR_ALL
] = {.type
= NLA_FLAG
},
128 [DCB_NUMTCS_ATTR_PG
] = {.type
= NLA_U8
},
129 [DCB_NUMTCS_ATTR_PFC
] = {.type
= NLA_U8
},
132 /* DCB BCN nested attributes. */
133 static struct nla_policy dcbnl_bcn_nest
[DCB_BCN_ATTR_MAX
+ 1] = {
134 [DCB_BCN_ATTR_RP_0
] = {.type
= NLA_U8
},
135 [DCB_BCN_ATTR_RP_1
] = {.type
= NLA_U8
},
136 [DCB_BCN_ATTR_RP_2
] = {.type
= NLA_U8
},
137 [DCB_BCN_ATTR_RP_3
] = {.type
= NLA_U8
},
138 [DCB_BCN_ATTR_RP_4
] = {.type
= NLA_U8
},
139 [DCB_BCN_ATTR_RP_5
] = {.type
= NLA_U8
},
140 [DCB_BCN_ATTR_RP_6
] = {.type
= NLA_U8
},
141 [DCB_BCN_ATTR_RP_7
] = {.type
= NLA_U8
},
142 [DCB_BCN_ATTR_RP_ALL
] = {.type
= NLA_FLAG
},
143 [DCB_BCN_ATTR_BCNA_0
] = {.type
= NLA_U32
},
144 [DCB_BCN_ATTR_BCNA_1
] = {.type
= NLA_U32
},
145 [DCB_BCN_ATTR_ALPHA
] = {.type
= NLA_U32
},
146 [DCB_BCN_ATTR_BETA
] = {.type
= NLA_U32
},
147 [DCB_BCN_ATTR_GD
] = {.type
= NLA_U32
},
148 [DCB_BCN_ATTR_GI
] = {.type
= NLA_U32
},
149 [DCB_BCN_ATTR_TMAX
] = {.type
= NLA_U32
},
150 [DCB_BCN_ATTR_TD
] = {.type
= NLA_U32
},
151 [DCB_BCN_ATTR_RMIN
] = {.type
= NLA_U32
},
152 [DCB_BCN_ATTR_W
] = {.type
= NLA_U32
},
153 [DCB_BCN_ATTR_RD
] = {.type
= NLA_U32
},
154 [DCB_BCN_ATTR_RU
] = {.type
= NLA_U32
},
155 [DCB_BCN_ATTR_WRTT
] = {.type
= NLA_U32
},
156 [DCB_BCN_ATTR_RI
] = {.type
= NLA_U32
},
157 [DCB_BCN_ATTR_C
] = {.type
= NLA_U32
},
158 [DCB_BCN_ATTR_ALL
] = {.type
= NLA_FLAG
},
161 /* standard netlink reply call */
162 static int dcbnl_reply(u8 value
, u8 event
, u8 cmd
, u8 attr
, u32 pid
,
165 struct sk_buff
*dcbnl_skb
;
167 struct nlmsghdr
*nlh
;
170 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
174 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, event
, sizeof(*dcb
), flags
);
176 dcb
= NLMSG_DATA(nlh
);
177 dcb
->dcb_family
= AF_UNSPEC
;
181 ret
= nla_put_u8(dcbnl_skb
, attr
, value
);
185 /* end the message, assign the nlmsg_len. */
186 nlmsg_end(dcbnl_skb
, nlh
);
187 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
198 static int dcbnl_getstate(struct net_device
*netdev
, struct nlattr
**tb
,
199 u32 pid
, u32 seq
, u16 flags
)
203 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
204 if (!netdev
->dcbnl_ops
->getstate
)
207 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getstate(netdev
), RTM_GETDCB
,
208 DCB_CMD_GSTATE
, DCB_ATTR_STATE
, pid
, seq
, flags
);
213 static int dcbnl_getpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
214 u32 pid
, u32 seq
, u16 flags
)
216 struct sk_buff
*dcbnl_skb
;
217 struct nlmsghdr
*nlh
;
219 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1], *nest
;
225 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->getpfccfg
)
228 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
229 tb
[DCB_ATTR_PFC_CFG
],
234 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
238 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
240 dcb
= NLMSG_DATA(nlh
);
241 dcb
->dcb_family
= AF_UNSPEC
;
242 dcb
->cmd
= DCB_CMD_PFC_GCFG
;
244 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PFC_CFG
);
248 if (data
[DCB_PFC_UP_ATTR_ALL
])
251 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
252 if (!getall
&& !data
[i
])
255 netdev
->dcbnl_ops
->getpfccfg(netdev
, i
- DCB_PFC_UP_ATTR_0
,
257 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
260 nla_nest_cancel(dcbnl_skb
, nest
);
264 nla_nest_end(dcbnl_skb
, nest
);
266 nlmsg_end(dcbnl_skb
, nlh
);
268 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
280 static int dcbnl_getperm_hwaddr(struct net_device
*netdev
, struct nlattr
**tb
,
281 u32 pid
, u32 seq
, u16 flags
)
283 struct sk_buff
*dcbnl_skb
;
284 struct nlmsghdr
*nlh
;
286 u8 perm_addr
[MAX_ADDR_LEN
];
289 if (!netdev
->dcbnl_ops
->getpermhwaddr
)
292 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
296 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
298 dcb
= NLMSG_DATA(nlh
);
299 dcb
->dcb_family
= AF_UNSPEC
;
300 dcb
->cmd
= DCB_CMD_GPERM_HWADDR
;
302 netdev
->dcbnl_ops
->getpermhwaddr(netdev
, perm_addr
);
304 ret
= nla_put(dcbnl_skb
, DCB_ATTR_PERM_HWADDR
, sizeof(perm_addr
),
307 nlmsg_end(dcbnl_skb
, nlh
);
309 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
322 static int dcbnl_getcap(struct net_device
*netdev
, struct nlattr
**tb
,
323 u32 pid
, u32 seq
, u16 flags
)
325 struct sk_buff
*dcbnl_skb
;
326 struct nlmsghdr
*nlh
;
328 struct nlattr
*data
[DCB_CAP_ATTR_MAX
+ 1], *nest
;
334 if (!tb
[DCB_ATTR_CAP
] || !netdev
->dcbnl_ops
->getcap
)
337 ret
= nla_parse_nested(data
, DCB_CAP_ATTR_MAX
, tb
[DCB_ATTR_CAP
],
342 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
346 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
348 dcb
= NLMSG_DATA(nlh
);
349 dcb
->dcb_family
= AF_UNSPEC
;
350 dcb
->cmd
= DCB_CMD_GCAP
;
352 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_CAP
);
356 if (data
[DCB_CAP_ATTR_ALL
])
359 for (i
= DCB_CAP_ATTR_ALL
+1; i
<= DCB_CAP_ATTR_MAX
; i
++) {
360 if (!getall
&& !data
[i
])
363 if (!netdev
->dcbnl_ops
->getcap(netdev
, i
, &value
)) {
364 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
367 nla_nest_cancel(dcbnl_skb
, nest
);
372 nla_nest_end(dcbnl_skb
, nest
);
374 nlmsg_end(dcbnl_skb
, nlh
);
376 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
388 static int dcbnl_getnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
389 u32 pid
, u32 seq
, u16 flags
)
391 struct sk_buff
*dcbnl_skb
;
392 struct nlmsghdr
*nlh
;
394 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1], *nest
;
400 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->getnumtcs
)
403 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
410 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
416 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
418 dcb
= NLMSG_DATA(nlh
);
419 dcb
->dcb_family
= AF_UNSPEC
;
420 dcb
->cmd
= DCB_CMD_GNUMTCS
;
422 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_NUMTCS
);
428 if (data
[DCB_NUMTCS_ATTR_ALL
])
431 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
432 if (!getall
&& !data
[i
])
435 ret
= netdev
->dcbnl_ops
->getnumtcs(netdev
, i
, &value
);
437 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
440 nla_nest_cancel(dcbnl_skb
, nest
);
448 nla_nest_end(dcbnl_skb
, nest
);
450 nlmsg_end(dcbnl_skb
, nlh
);
452 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
466 static int dcbnl_setnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
467 u32 pid
, u32 seq
, u16 flags
)
469 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1];
474 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->setnumtcs
)
477 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
485 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
489 value
= nla_get_u8(data
[i
]);
491 ret
= netdev
->dcbnl_ops
->setnumtcs(netdev
, i
, value
);
498 ret
= dcbnl_reply(!!ret
, RTM_SETDCB
, DCB_CMD_SNUMTCS
,
499 DCB_ATTR_NUMTCS
, pid
, seq
, flags
);
505 static int dcbnl_getpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
506 u32 pid
, u32 seq
, u16 flags
)
510 if (!netdev
->dcbnl_ops
->getpfcstate
)
513 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getpfcstate(netdev
), RTM_GETDCB
,
514 DCB_CMD_PFC_GSTATE
, DCB_ATTR_PFC_STATE
,
520 static int dcbnl_setpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
521 u32 pid
, u32 seq
, u16 flags
)
526 if (!tb
[DCB_ATTR_PFC_STATE
] || !netdev
->dcbnl_ops
->setpfcstate
)
529 value
= nla_get_u8(tb
[DCB_ATTR_PFC_STATE
]);
531 netdev
->dcbnl_ops
->setpfcstate(netdev
, value
);
533 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SSTATE
, DCB_ATTR_PFC_STATE
,
539 static int __dcbnl_pg_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
540 u32 pid
, u32 seq
, u16 flags
, int dir
)
542 struct sk_buff
*dcbnl_skb
;
543 struct nlmsghdr
*nlh
;
545 struct nlattr
*pg_nest
, *param_nest
, *data
;
546 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
547 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
548 u8 prio
, pgid
, tc_pct
, up_map
;
553 if (!tb
[DCB_ATTR_PG_CFG
] ||
554 !netdev
->dcbnl_ops
->getpgtccfgtx
||
555 !netdev
->dcbnl_ops
->getpgtccfgrx
||
556 !netdev
->dcbnl_ops
->getpgbwgcfgtx
||
557 !netdev
->dcbnl_ops
->getpgbwgcfgrx
)
560 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
561 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
566 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
570 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
572 dcb
= NLMSG_DATA(nlh
);
573 dcb
->dcb_family
= AF_UNSPEC
;
574 dcb
->cmd
= (dir
) ? DCB_CMD_PGRX_GCFG
: DCB_CMD_PGTX_GCFG
;
576 pg_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PG_CFG
);
580 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
583 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
584 if (!getall
&& !pg_tb
[i
])
587 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
588 data
= pg_tb
[DCB_PG_ATTR_TC_ALL
];
591 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
592 data
, dcbnl_tc_param_nest
);
596 param_nest
= nla_nest_start(dcbnl_skb
, i
);
600 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
601 prio
= DCB_ATTR_VALUE_UNDEFINED
;
602 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
603 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
607 netdev
->dcbnl_ops
->getpgtccfgrx(netdev
,
608 i
- DCB_PG_ATTR_TC_0
, &prio
,
609 &pgid
, &tc_pct
, &up_map
);
612 netdev
->dcbnl_ops
->getpgtccfgtx(netdev
,
613 i
- DCB_PG_ATTR_TC_0
, &prio
,
614 &pgid
, &tc_pct
, &up_map
);
617 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
] ||
618 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
619 ret
= nla_put_u8(dcbnl_skb
,
620 DCB_TC_ATTR_PARAM_PGID
, pgid
);
624 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
] ||
625 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
626 ret
= nla_put_u8(dcbnl_skb
,
627 DCB_TC_ATTR_PARAM_UP_MAPPING
, up_map
);
631 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
] ||
632 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
633 ret
= nla_put_u8(dcbnl_skb
,
634 DCB_TC_ATTR_PARAM_STRICT_PRIO
, prio
);
638 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
] ||
639 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
640 ret
= nla_put_u8(dcbnl_skb
, DCB_TC_ATTR_PARAM_BW_PCT
,
645 nla_nest_end(dcbnl_skb
, param_nest
);
648 if (pg_tb
[DCB_PG_ATTR_BW_ID_ALL
])
653 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
654 if (!getall
&& !pg_tb
[i
])
657 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
661 netdev
->dcbnl_ops
->getpgbwgcfgrx(netdev
,
662 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
665 netdev
->dcbnl_ops
->getpgbwgcfgtx(netdev
,
666 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
668 ret
= nla_put_u8(dcbnl_skb
, i
, tc_pct
);
674 nla_nest_end(dcbnl_skb
, pg_nest
);
676 nlmsg_end(dcbnl_skb
, nlh
);
678 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
685 nla_nest_cancel(dcbnl_skb
, param_nest
);
687 nla_nest_cancel(dcbnl_skb
, pg_nest
);
696 static int dcbnl_pgtx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
697 u32 pid
, u32 seq
, u16 flags
)
699 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 0);
702 static int dcbnl_pgrx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
703 u32 pid
, u32 seq
, u16 flags
)
705 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 1);
708 static int dcbnl_setstate(struct net_device
*netdev
, struct nlattr
**tb
,
709 u32 pid
, u32 seq
, u16 flags
)
714 if (!tb
[DCB_ATTR_STATE
] || !netdev
->dcbnl_ops
->setstate
)
717 value
= nla_get_u8(tb
[DCB_ATTR_STATE
]);
719 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setstate(netdev
, value
),
720 RTM_SETDCB
, DCB_CMD_SSTATE
, DCB_ATTR_STATE
,
726 static int dcbnl_setpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
727 u32 pid
, u32 seq
, u16 flags
)
729 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1];
734 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->setpfccfg
)
737 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
738 tb
[DCB_ATTR_PFC_CFG
],
743 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
746 value
= nla_get_u8(data
[i
]);
747 netdev
->dcbnl_ops
->setpfccfg(netdev
,
748 data
[i
]->nla_type
- DCB_PFC_UP_ATTR_0
, value
);
751 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SCFG
, DCB_ATTR_PFC_CFG
,
757 static int dcbnl_setall(struct net_device
*netdev
, struct nlattr
**tb
,
758 u32 pid
, u32 seq
, u16 flags
)
762 if (!tb
[DCB_ATTR_SET_ALL
] || !netdev
->dcbnl_ops
->setall
)
765 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setall(netdev
), RTM_SETDCB
,
766 DCB_CMD_SET_ALL
, DCB_ATTR_SET_ALL
, pid
, seq
, flags
);
771 static int __dcbnl_pg_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
772 u32 pid
, u32 seq
, u16 flags
, int dir
)
774 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
775 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
783 if (!tb
[DCB_ATTR_PG_CFG
] ||
784 !netdev
->dcbnl_ops
->setpgtccfgtx
||
785 !netdev
->dcbnl_ops
->setpgtccfgrx
||
786 !netdev
->dcbnl_ops
->setpgbwgcfgtx
||
787 !netdev
->dcbnl_ops
->setpgbwgcfgrx
)
790 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
791 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
795 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
799 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
800 pg_tb
[i
], dcbnl_tc_param_nest
);
804 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
805 prio
= DCB_ATTR_VALUE_UNDEFINED
;
806 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
807 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
809 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
])
811 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
]);
813 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
])
814 pgid
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_PGID
]);
816 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
])
817 tc_pct
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
]);
819 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
])
821 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
]);
823 /* dir: Tx = 0, Rx = 1 */
826 netdev
->dcbnl_ops
->setpgtccfgrx(netdev
,
827 i
- DCB_PG_ATTR_TC_0
,
828 prio
, pgid
, tc_pct
, up_map
);
831 netdev
->dcbnl_ops
->setpgtccfgtx(netdev
,
832 i
- DCB_PG_ATTR_TC_0
,
833 prio
, pgid
, tc_pct
, up_map
);
837 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
841 tc_pct
= nla_get_u8(pg_tb
[i
]);
843 /* dir: Tx = 0, Rx = 1 */
846 netdev
->dcbnl_ops
->setpgbwgcfgrx(netdev
,
847 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
850 netdev
->dcbnl_ops
->setpgbwgcfgtx(netdev
,
851 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
855 ret
= dcbnl_reply(0, RTM_SETDCB
,
856 (dir
? DCB_CMD_PGRX_SCFG
: DCB_CMD_PGTX_SCFG
),
857 DCB_ATTR_PG_CFG
, pid
, seq
, flags
);
863 static int dcbnl_pgtx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
864 u32 pid
, u32 seq
, u16 flags
)
866 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 0);
869 static int dcbnl_pgrx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
870 u32 pid
, u32 seq
, u16 flags
)
872 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 1);
875 static int dcbnl_bcn_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
876 u32 pid
, u32 seq
, u16 flags
)
878 struct sk_buff
*dcbnl_skb
;
879 struct nlmsghdr
*nlh
;
881 struct nlattr
*bcn_nest
;
882 struct nlattr
*bcn_tb
[DCB_BCN_ATTR_MAX
+ 1];
889 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->getbcnrp
||
890 !netdev
->dcbnl_ops
->getbcncfg
)
893 ret
= nla_parse_nested(bcn_tb
, DCB_BCN_ATTR_MAX
,
894 tb
[DCB_ATTR_BCN
], dcbnl_bcn_nest
);
899 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
903 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
905 dcb
= NLMSG_DATA(nlh
);
906 dcb
->dcb_family
= AF_UNSPEC
;
907 dcb
->cmd
= DCB_CMD_BCN_GCFG
;
909 bcn_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_BCN
);
913 if (bcn_tb
[DCB_BCN_ATTR_ALL
])
916 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
917 if (!getall
&& !bcn_tb
[i
])
920 netdev
->dcbnl_ops
->getbcnrp(netdev
, i
- DCB_BCN_ATTR_RP_0
,
922 ret
= nla_put_u8(dcbnl_skb
, i
, value_byte
);
927 for (i
= DCB_BCN_ATTR_BCNA_0
; i
<= DCB_BCN_ATTR_RI
; i
++) {
928 if (!getall
&& !bcn_tb
[i
])
931 netdev
->dcbnl_ops
->getbcncfg(netdev
, i
,
933 ret
= nla_put_u32(dcbnl_skb
, i
, value_integer
);
938 nla_nest_end(dcbnl_skb
, bcn_nest
);
940 nlmsg_end(dcbnl_skb
, nlh
);
942 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
949 nla_nest_cancel(dcbnl_skb
, bcn_nest
);
958 static int dcbnl_bcn_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
959 u32 pid
, u32 seq
, u16 flags
)
961 struct nlattr
*data
[DCB_BCN_ATTR_MAX
+ 1];
967 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->setbcncfg
968 || !netdev
->dcbnl_ops
->setbcnrp
)
971 ret
= nla_parse_nested(data
, DCB_BCN_ATTR_MAX
,
977 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
980 value_byte
= nla_get_u8(data
[i
]);
981 netdev
->dcbnl_ops
->setbcnrp(netdev
,
982 data
[i
]->nla_type
- DCB_BCN_ATTR_RP_0
, value_byte
);
985 for (i
= DCB_BCN_ATTR_BCNA_0
; i
<= DCB_BCN_ATTR_RI
; i
++) {
988 value_int
= nla_get_u32(data
[i
]);
989 netdev
->dcbnl_ops
->setbcncfg(netdev
,
993 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_BCN_SCFG
, DCB_ATTR_BCN
,
999 static int dcb_doit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1001 struct net
*net
= sock_net(skb
->sk
);
1002 struct net_device
*netdev
;
1003 struct dcbmsg
*dcb
= (struct dcbmsg
*)NLMSG_DATA(nlh
);
1004 struct nlattr
*tb
[DCB_ATTR_MAX
+ 1];
1005 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
1008 if (net
!= &init_net
)
1011 ret
= nlmsg_parse(nlh
, sizeof(*dcb
), tb
, DCB_ATTR_MAX
,
1016 if (!tb
[DCB_ATTR_IFNAME
])
1019 netdev
= dev_get_by_name(&init_net
, nla_data(tb
[DCB_ATTR_IFNAME
]));
1023 if (!netdev
->dcbnl_ops
)
1027 case DCB_CMD_GSTATE
:
1028 ret
= dcbnl_getstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1031 case DCB_CMD_PFC_GCFG
:
1032 ret
= dcbnl_getpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1035 case DCB_CMD_GPERM_HWADDR
:
1036 ret
= dcbnl_getperm_hwaddr(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1039 case DCB_CMD_PGTX_GCFG
:
1040 ret
= dcbnl_pgtx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1043 case DCB_CMD_PGRX_GCFG
:
1044 ret
= dcbnl_pgrx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1047 case DCB_CMD_BCN_GCFG
:
1048 ret
= dcbnl_bcn_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1051 case DCB_CMD_SSTATE
:
1052 ret
= dcbnl_setstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1055 case DCB_CMD_PFC_SCFG
:
1056 ret
= dcbnl_setpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1060 case DCB_CMD_SET_ALL
:
1061 ret
= dcbnl_setall(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1064 case DCB_CMD_PGTX_SCFG
:
1065 ret
= dcbnl_pgtx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1068 case DCB_CMD_PGRX_SCFG
:
1069 ret
= dcbnl_pgrx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1073 ret
= dcbnl_getcap(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1076 case DCB_CMD_GNUMTCS
:
1077 ret
= dcbnl_getnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1080 case DCB_CMD_SNUMTCS
:
1081 ret
= dcbnl_setnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1084 case DCB_CMD_PFC_GSTATE
:
1085 ret
= dcbnl_getpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1088 case DCB_CMD_PFC_SSTATE
:
1089 ret
= dcbnl_setpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1092 case DCB_CMD_BCN_SCFG
:
1093 ret
= dcbnl_bcn_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1106 static int __init
dcbnl_init(void)
1108 rtnl_register(PF_UNSPEC
, RTM_GETDCB
, dcb_doit
, NULL
);
1109 rtnl_register(PF_UNSPEC
, RTM_SETDCB
, dcb_doit
, NULL
);
1113 module_init(dcbnl_init
);
1115 static void __exit
dcbnl_exit(void)
1117 rtnl_unregister(PF_UNSPEC
, RTM_GETDCB
);
1118 rtnl_unregister(PF_UNSPEC
, RTM_SETDCB
);
1120 module_exit(dcbnl_exit
);