2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Lucy Liu <lucy.liu@intel.com>
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <linux/rtnetlink.h>
30 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
31 * intended to allow network traffic with differing requirements
32 * (highly reliable, no drops vs. best effort vs. low latency) to operate
33 * and co-exist on Ethernet. Current DCB features are:
35 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
36 * framework for assigning bandwidth guarantees to traffic classes.
38 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
39 * can work independently for each 802.1p priority.
41 * Congestion Notification - provides a mechanism for end-to-end congestion
42 * control for protocols which do not have built-in congestion management.
44 * More information about the emerging standards for these Ethernet features
45 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
47 * This file implements an rtnetlink interface to allow configuration of DCB
48 * features for capable devices.
51 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
52 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
53 MODULE_LICENSE("GPL");
55 /**************** DCB attribute policies *************************************/
57 /* DCB netlink attributes policy */
58 static const struct nla_policy dcbnl_rtnl_policy
[DCB_ATTR_MAX
+ 1] = {
59 [DCB_ATTR_IFNAME
] = {.type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1},
60 [DCB_ATTR_STATE
] = {.type
= NLA_U8
},
61 [DCB_ATTR_PFC_CFG
] = {.type
= NLA_NESTED
},
62 [DCB_ATTR_PG_CFG
] = {.type
= NLA_NESTED
},
63 [DCB_ATTR_SET_ALL
] = {.type
= NLA_U8
},
64 [DCB_ATTR_PERM_HWADDR
] = {.type
= NLA_FLAG
},
65 [DCB_ATTR_CAP
] = {.type
= NLA_NESTED
},
66 [DCB_ATTR_PFC_STATE
] = {.type
= NLA_U8
},
67 [DCB_ATTR_BCN
] = {.type
= NLA_NESTED
},
68 [DCB_ATTR_APP
] = {.type
= NLA_NESTED
},
71 /* DCB priority flow control to User Priority nested attributes */
72 static const struct nla_policy dcbnl_pfc_up_nest
[DCB_PFC_UP_ATTR_MAX
+ 1] = {
73 [DCB_PFC_UP_ATTR_0
] = {.type
= NLA_U8
},
74 [DCB_PFC_UP_ATTR_1
] = {.type
= NLA_U8
},
75 [DCB_PFC_UP_ATTR_2
] = {.type
= NLA_U8
},
76 [DCB_PFC_UP_ATTR_3
] = {.type
= NLA_U8
},
77 [DCB_PFC_UP_ATTR_4
] = {.type
= NLA_U8
},
78 [DCB_PFC_UP_ATTR_5
] = {.type
= NLA_U8
},
79 [DCB_PFC_UP_ATTR_6
] = {.type
= NLA_U8
},
80 [DCB_PFC_UP_ATTR_7
] = {.type
= NLA_U8
},
81 [DCB_PFC_UP_ATTR_ALL
] = {.type
= NLA_FLAG
},
84 /* DCB priority grouping nested attributes */
85 static const struct nla_policy dcbnl_pg_nest
[DCB_PG_ATTR_MAX
+ 1] = {
86 [DCB_PG_ATTR_TC_0
] = {.type
= NLA_NESTED
},
87 [DCB_PG_ATTR_TC_1
] = {.type
= NLA_NESTED
},
88 [DCB_PG_ATTR_TC_2
] = {.type
= NLA_NESTED
},
89 [DCB_PG_ATTR_TC_3
] = {.type
= NLA_NESTED
},
90 [DCB_PG_ATTR_TC_4
] = {.type
= NLA_NESTED
},
91 [DCB_PG_ATTR_TC_5
] = {.type
= NLA_NESTED
},
92 [DCB_PG_ATTR_TC_6
] = {.type
= NLA_NESTED
},
93 [DCB_PG_ATTR_TC_7
] = {.type
= NLA_NESTED
},
94 [DCB_PG_ATTR_TC_ALL
] = {.type
= NLA_NESTED
},
95 [DCB_PG_ATTR_BW_ID_0
] = {.type
= NLA_U8
},
96 [DCB_PG_ATTR_BW_ID_1
] = {.type
= NLA_U8
},
97 [DCB_PG_ATTR_BW_ID_2
] = {.type
= NLA_U8
},
98 [DCB_PG_ATTR_BW_ID_3
] = {.type
= NLA_U8
},
99 [DCB_PG_ATTR_BW_ID_4
] = {.type
= NLA_U8
},
100 [DCB_PG_ATTR_BW_ID_5
] = {.type
= NLA_U8
},
101 [DCB_PG_ATTR_BW_ID_6
] = {.type
= NLA_U8
},
102 [DCB_PG_ATTR_BW_ID_7
] = {.type
= NLA_U8
},
103 [DCB_PG_ATTR_BW_ID_ALL
] = {.type
= NLA_FLAG
},
106 /* DCB traffic class nested attributes. */
107 static const struct nla_policy dcbnl_tc_param_nest
[DCB_TC_ATTR_PARAM_MAX
+ 1] = {
108 [DCB_TC_ATTR_PARAM_PGID
] = {.type
= NLA_U8
},
109 [DCB_TC_ATTR_PARAM_UP_MAPPING
] = {.type
= NLA_U8
},
110 [DCB_TC_ATTR_PARAM_STRICT_PRIO
] = {.type
= NLA_U8
},
111 [DCB_TC_ATTR_PARAM_BW_PCT
] = {.type
= NLA_U8
},
112 [DCB_TC_ATTR_PARAM_ALL
] = {.type
= NLA_FLAG
},
115 /* DCB capabilities nested attributes. */
116 static const struct nla_policy dcbnl_cap_nest
[DCB_CAP_ATTR_MAX
+ 1] = {
117 [DCB_CAP_ATTR_ALL
] = {.type
= NLA_FLAG
},
118 [DCB_CAP_ATTR_PG
] = {.type
= NLA_U8
},
119 [DCB_CAP_ATTR_PFC
] = {.type
= NLA_U8
},
120 [DCB_CAP_ATTR_UP2TC
] = {.type
= NLA_U8
},
121 [DCB_CAP_ATTR_PG_TCS
] = {.type
= NLA_U8
},
122 [DCB_CAP_ATTR_PFC_TCS
] = {.type
= NLA_U8
},
123 [DCB_CAP_ATTR_GSP
] = {.type
= NLA_U8
},
124 [DCB_CAP_ATTR_BCN
] = {.type
= NLA_U8
},
127 /* DCB capabilities nested attributes. */
128 static const struct nla_policy dcbnl_numtcs_nest
[DCB_NUMTCS_ATTR_MAX
+ 1] = {
129 [DCB_NUMTCS_ATTR_ALL
] = {.type
= NLA_FLAG
},
130 [DCB_NUMTCS_ATTR_PG
] = {.type
= NLA_U8
},
131 [DCB_NUMTCS_ATTR_PFC
] = {.type
= NLA_U8
},
134 /* DCB BCN nested attributes. */
135 static const struct nla_policy dcbnl_bcn_nest
[DCB_BCN_ATTR_MAX
+ 1] = {
136 [DCB_BCN_ATTR_RP_0
] = {.type
= NLA_U8
},
137 [DCB_BCN_ATTR_RP_1
] = {.type
= NLA_U8
},
138 [DCB_BCN_ATTR_RP_2
] = {.type
= NLA_U8
},
139 [DCB_BCN_ATTR_RP_3
] = {.type
= NLA_U8
},
140 [DCB_BCN_ATTR_RP_4
] = {.type
= NLA_U8
},
141 [DCB_BCN_ATTR_RP_5
] = {.type
= NLA_U8
},
142 [DCB_BCN_ATTR_RP_6
] = {.type
= NLA_U8
},
143 [DCB_BCN_ATTR_RP_7
] = {.type
= NLA_U8
},
144 [DCB_BCN_ATTR_RP_ALL
] = {.type
= NLA_FLAG
},
145 [DCB_BCN_ATTR_BCNA_0
] = {.type
= NLA_U32
},
146 [DCB_BCN_ATTR_BCNA_1
] = {.type
= NLA_U32
},
147 [DCB_BCN_ATTR_ALPHA
] = {.type
= NLA_U32
},
148 [DCB_BCN_ATTR_BETA
] = {.type
= NLA_U32
},
149 [DCB_BCN_ATTR_GD
] = {.type
= NLA_U32
},
150 [DCB_BCN_ATTR_GI
] = {.type
= NLA_U32
},
151 [DCB_BCN_ATTR_TMAX
] = {.type
= NLA_U32
},
152 [DCB_BCN_ATTR_TD
] = {.type
= NLA_U32
},
153 [DCB_BCN_ATTR_RMIN
] = {.type
= NLA_U32
},
154 [DCB_BCN_ATTR_W
] = {.type
= NLA_U32
},
155 [DCB_BCN_ATTR_RD
] = {.type
= NLA_U32
},
156 [DCB_BCN_ATTR_RU
] = {.type
= NLA_U32
},
157 [DCB_BCN_ATTR_WRTT
] = {.type
= NLA_U32
},
158 [DCB_BCN_ATTR_RI
] = {.type
= NLA_U32
},
159 [DCB_BCN_ATTR_C
] = {.type
= NLA_U32
},
160 [DCB_BCN_ATTR_ALL
] = {.type
= NLA_FLAG
},
163 /* DCB APP nested attributes. */
164 static const struct nla_policy dcbnl_app_nest
[DCB_APP_ATTR_MAX
+ 1] = {
165 [DCB_APP_ATTR_IDTYPE
] = {.type
= NLA_U8
},
166 [DCB_APP_ATTR_ID
] = {.type
= NLA_U16
},
167 [DCB_APP_ATTR_PRIORITY
] = {.type
= NLA_U8
},
170 /* standard netlink reply call */
171 static int dcbnl_reply(u8 value
, u8 event
, u8 cmd
, u8 attr
, u32 pid
,
174 struct sk_buff
*dcbnl_skb
;
176 struct nlmsghdr
*nlh
;
179 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
183 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, event
, sizeof(*dcb
), flags
);
185 dcb
= NLMSG_DATA(nlh
);
186 dcb
->dcb_family
= AF_UNSPEC
;
190 ret
= nla_put_u8(dcbnl_skb
, attr
, value
);
194 /* end the message, assign the nlmsg_len. */
195 nlmsg_end(dcbnl_skb
, nlh
);
196 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
203 kfree_skb(dcbnl_skb
);
207 static int dcbnl_getstate(struct net_device
*netdev
, struct nlattr
**tb
,
208 u32 pid
, u32 seq
, u16 flags
)
212 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
213 if (!netdev
->dcbnl_ops
->getstate
)
216 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getstate(netdev
), RTM_GETDCB
,
217 DCB_CMD_GSTATE
, DCB_ATTR_STATE
, pid
, seq
, flags
);
222 static int dcbnl_getpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
223 u32 pid
, u32 seq
, u16 flags
)
225 struct sk_buff
*dcbnl_skb
;
226 struct nlmsghdr
*nlh
;
228 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1], *nest
;
234 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->getpfccfg
)
237 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
238 tb
[DCB_ATTR_PFC_CFG
],
243 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
247 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
249 dcb
= NLMSG_DATA(nlh
);
250 dcb
->dcb_family
= AF_UNSPEC
;
251 dcb
->cmd
= DCB_CMD_PFC_GCFG
;
253 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PFC_CFG
);
257 if (data
[DCB_PFC_UP_ATTR_ALL
])
260 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
261 if (!getall
&& !data
[i
])
264 netdev
->dcbnl_ops
->getpfccfg(netdev
, i
- DCB_PFC_UP_ATTR_0
,
266 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
269 nla_nest_cancel(dcbnl_skb
, nest
);
273 nla_nest_end(dcbnl_skb
, nest
);
275 nlmsg_end(dcbnl_skb
, nlh
);
277 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
284 kfree_skb(dcbnl_skb
);
289 static int dcbnl_getperm_hwaddr(struct net_device
*netdev
, struct nlattr
**tb
,
290 u32 pid
, u32 seq
, u16 flags
)
292 struct sk_buff
*dcbnl_skb
;
293 struct nlmsghdr
*nlh
;
295 u8 perm_addr
[MAX_ADDR_LEN
];
298 if (!netdev
->dcbnl_ops
->getpermhwaddr
)
301 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
305 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
307 dcb
= NLMSG_DATA(nlh
);
308 dcb
->dcb_family
= AF_UNSPEC
;
309 dcb
->cmd
= DCB_CMD_GPERM_HWADDR
;
311 netdev
->dcbnl_ops
->getpermhwaddr(netdev
, perm_addr
);
313 ret
= nla_put(dcbnl_skb
, DCB_ATTR_PERM_HWADDR
, sizeof(perm_addr
),
316 nlmsg_end(dcbnl_skb
, nlh
);
318 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
325 kfree_skb(dcbnl_skb
);
330 static int dcbnl_getcap(struct net_device
*netdev
, struct nlattr
**tb
,
331 u32 pid
, u32 seq
, u16 flags
)
333 struct sk_buff
*dcbnl_skb
;
334 struct nlmsghdr
*nlh
;
336 struct nlattr
*data
[DCB_CAP_ATTR_MAX
+ 1], *nest
;
342 if (!tb
[DCB_ATTR_CAP
] || !netdev
->dcbnl_ops
->getcap
)
345 ret
= nla_parse_nested(data
, DCB_CAP_ATTR_MAX
, tb
[DCB_ATTR_CAP
],
350 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
354 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
356 dcb
= NLMSG_DATA(nlh
);
357 dcb
->dcb_family
= AF_UNSPEC
;
358 dcb
->cmd
= DCB_CMD_GCAP
;
360 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_CAP
);
364 if (data
[DCB_CAP_ATTR_ALL
])
367 for (i
= DCB_CAP_ATTR_ALL
+1; i
<= DCB_CAP_ATTR_MAX
; i
++) {
368 if (!getall
&& !data
[i
])
371 if (!netdev
->dcbnl_ops
->getcap(netdev
, i
, &value
)) {
372 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
375 nla_nest_cancel(dcbnl_skb
, nest
);
380 nla_nest_end(dcbnl_skb
, nest
);
382 nlmsg_end(dcbnl_skb
, nlh
);
384 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
391 kfree_skb(dcbnl_skb
);
396 static int dcbnl_getnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
397 u32 pid
, u32 seq
, u16 flags
)
399 struct sk_buff
*dcbnl_skb
;
400 struct nlmsghdr
*nlh
;
402 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1], *nest
;
408 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->getnumtcs
)
411 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
418 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
424 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
426 dcb
= NLMSG_DATA(nlh
);
427 dcb
->dcb_family
= AF_UNSPEC
;
428 dcb
->cmd
= DCB_CMD_GNUMTCS
;
430 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_NUMTCS
);
436 if (data
[DCB_NUMTCS_ATTR_ALL
])
439 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
440 if (!getall
&& !data
[i
])
443 ret
= netdev
->dcbnl_ops
->getnumtcs(netdev
, i
, &value
);
445 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
448 nla_nest_cancel(dcbnl_skb
, nest
);
456 nla_nest_end(dcbnl_skb
, nest
);
458 nlmsg_end(dcbnl_skb
, nlh
);
460 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
469 kfree_skb(dcbnl_skb
);
474 static int dcbnl_setnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
475 u32 pid
, u32 seq
, u16 flags
)
477 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1];
482 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->setnumtcs
)
485 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
493 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
497 value
= nla_get_u8(data
[i
]);
499 ret
= netdev
->dcbnl_ops
->setnumtcs(netdev
, i
, value
);
506 ret
= dcbnl_reply(!!ret
, RTM_SETDCB
, DCB_CMD_SNUMTCS
,
507 DCB_ATTR_NUMTCS
, pid
, seq
, flags
);
513 static int dcbnl_getpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
514 u32 pid
, u32 seq
, u16 flags
)
518 if (!netdev
->dcbnl_ops
->getpfcstate
)
521 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getpfcstate(netdev
), RTM_GETDCB
,
522 DCB_CMD_PFC_GSTATE
, DCB_ATTR_PFC_STATE
,
528 static int dcbnl_setpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
529 u32 pid
, u32 seq
, u16 flags
)
534 if (!tb
[DCB_ATTR_PFC_STATE
] || !netdev
->dcbnl_ops
->setpfcstate
)
537 value
= nla_get_u8(tb
[DCB_ATTR_PFC_STATE
]);
539 netdev
->dcbnl_ops
->setpfcstate(netdev
, value
);
541 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SSTATE
, DCB_ATTR_PFC_STATE
,
547 static int dcbnl_getapp(struct net_device
*netdev
, struct nlattr
**tb
,
548 u32 pid
, u32 seq
, u16 flags
)
550 struct sk_buff
*dcbnl_skb
;
551 struct nlmsghdr
*nlh
;
553 struct nlattr
*app_nest
;
554 struct nlattr
*app_tb
[DCB_APP_ATTR_MAX
+ 1];
559 if (!tb
[DCB_ATTR_APP
] || !netdev
->dcbnl_ops
->getapp
)
562 ret
= nla_parse_nested(app_tb
, DCB_APP_ATTR_MAX
, tb
[DCB_ATTR_APP
],
568 /* all must be non-null */
569 if ((!app_tb
[DCB_APP_ATTR_IDTYPE
]) ||
570 (!app_tb
[DCB_APP_ATTR_ID
]))
573 /* either by eth type or by socket number */
574 idtype
= nla_get_u8(app_tb
[DCB_APP_ATTR_IDTYPE
]);
575 if ((idtype
!= DCB_APP_IDTYPE_ETHTYPE
) &&
576 (idtype
!= DCB_APP_IDTYPE_PORTNUM
))
579 id
= nla_get_u16(app_tb
[DCB_APP_ATTR_ID
]);
580 up
= netdev
->dcbnl_ops
->getapp(netdev
, idtype
, id
);
583 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
587 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
588 dcb
= NLMSG_DATA(nlh
);
589 dcb
->dcb_family
= AF_UNSPEC
;
590 dcb
->cmd
= DCB_CMD_GAPP
;
592 app_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_APP
);
593 ret
= nla_put_u8(dcbnl_skb
, DCB_APP_ATTR_IDTYPE
, idtype
);
597 ret
= nla_put_u16(dcbnl_skb
, DCB_APP_ATTR_ID
, id
);
601 ret
= nla_put_u8(dcbnl_skb
, DCB_APP_ATTR_PRIORITY
, up
);
605 nla_nest_end(dcbnl_skb
, app_nest
);
606 nlmsg_end(dcbnl_skb
, nlh
);
608 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
615 nla_nest_cancel(dcbnl_skb
, app_nest
);
617 kfree_skb(dcbnl_skb
);
622 static int dcbnl_setapp(struct net_device
*netdev
, struct nlattr
**tb
,
623 u32 pid
, u32 seq
, u16 flags
)
628 struct nlattr
*app_tb
[DCB_APP_ATTR_MAX
+ 1];
630 if (!tb
[DCB_ATTR_APP
] || !netdev
->dcbnl_ops
->setapp
)
633 ret
= nla_parse_nested(app_tb
, DCB_APP_ATTR_MAX
, tb
[DCB_ATTR_APP
],
639 /* all must be non-null */
640 if ((!app_tb
[DCB_APP_ATTR_IDTYPE
]) ||
641 (!app_tb
[DCB_APP_ATTR_ID
]) ||
642 (!app_tb
[DCB_APP_ATTR_PRIORITY
]))
645 /* either by eth type or by socket number */
646 idtype
= nla_get_u8(app_tb
[DCB_APP_ATTR_IDTYPE
]);
647 if ((idtype
!= DCB_APP_IDTYPE_ETHTYPE
) &&
648 (idtype
!= DCB_APP_IDTYPE_PORTNUM
))
651 id
= nla_get_u16(app_tb
[DCB_APP_ATTR_ID
]);
652 up
= nla_get_u8(app_tb
[DCB_APP_ATTR_PRIORITY
]);
654 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setapp(netdev
, idtype
, id
, up
),
655 RTM_SETDCB
, DCB_CMD_SAPP
, DCB_ATTR_APP
,
661 static int __dcbnl_pg_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
662 u32 pid
, u32 seq
, u16 flags
, int dir
)
664 struct sk_buff
*dcbnl_skb
;
665 struct nlmsghdr
*nlh
;
667 struct nlattr
*pg_nest
, *param_nest
, *data
;
668 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
669 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
670 u8 prio
, pgid
, tc_pct
, up_map
;
675 if (!tb
[DCB_ATTR_PG_CFG
] ||
676 !netdev
->dcbnl_ops
->getpgtccfgtx
||
677 !netdev
->dcbnl_ops
->getpgtccfgrx
||
678 !netdev
->dcbnl_ops
->getpgbwgcfgtx
||
679 !netdev
->dcbnl_ops
->getpgbwgcfgrx
)
682 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
683 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
688 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
692 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
694 dcb
= NLMSG_DATA(nlh
);
695 dcb
->dcb_family
= AF_UNSPEC
;
696 dcb
->cmd
= (dir
) ? DCB_CMD_PGRX_GCFG
: DCB_CMD_PGTX_GCFG
;
698 pg_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PG_CFG
);
702 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
705 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
706 if (!getall
&& !pg_tb
[i
])
709 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
710 data
= pg_tb
[DCB_PG_ATTR_TC_ALL
];
713 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
714 data
, dcbnl_tc_param_nest
);
718 param_nest
= nla_nest_start(dcbnl_skb
, i
);
722 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
723 prio
= DCB_ATTR_VALUE_UNDEFINED
;
724 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
725 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
729 netdev
->dcbnl_ops
->getpgtccfgrx(netdev
,
730 i
- DCB_PG_ATTR_TC_0
, &prio
,
731 &pgid
, &tc_pct
, &up_map
);
734 netdev
->dcbnl_ops
->getpgtccfgtx(netdev
,
735 i
- DCB_PG_ATTR_TC_0
, &prio
,
736 &pgid
, &tc_pct
, &up_map
);
739 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
] ||
740 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
741 ret
= nla_put_u8(dcbnl_skb
,
742 DCB_TC_ATTR_PARAM_PGID
, pgid
);
746 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
] ||
747 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
748 ret
= nla_put_u8(dcbnl_skb
,
749 DCB_TC_ATTR_PARAM_UP_MAPPING
, up_map
);
753 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
] ||
754 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
755 ret
= nla_put_u8(dcbnl_skb
,
756 DCB_TC_ATTR_PARAM_STRICT_PRIO
, prio
);
760 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
] ||
761 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
762 ret
= nla_put_u8(dcbnl_skb
, DCB_TC_ATTR_PARAM_BW_PCT
,
767 nla_nest_end(dcbnl_skb
, param_nest
);
770 if (pg_tb
[DCB_PG_ATTR_BW_ID_ALL
])
775 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
776 if (!getall
&& !pg_tb
[i
])
779 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
783 netdev
->dcbnl_ops
->getpgbwgcfgrx(netdev
,
784 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
787 netdev
->dcbnl_ops
->getpgbwgcfgtx(netdev
,
788 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
790 ret
= nla_put_u8(dcbnl_skb
, i
, tc_pct
);
796 nla_nest_end(dcbnl_skb
, pg_nest
);
798 nlmsg_end(dcbnl_skb
, nlh
);
800 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
807 nla_nest_cancel(dcbnl_skb
, param_nest
);
809 nla_nest_cancel(dcbnl_skb
, pg_nest
);
812 kfree_skb(dcbnl_skb
);
818 static int dcbnl_pgtx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
819 u32 pid
, u32 seq
, u16 flags
)
821 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 0);
824 static int dcbnl_pgrx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
825 u32 pid
, u32 seq
, u16 flags
)
827 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 1);
830 static int dcbnl_setstate(struct net_device
*netdev
, struct nlattr
**tb
,
831 u32 pid
, u32 seq
, u16 flags
)
836 if (!tb
[DCB_ATTR_STATE
] || !netdev
->dcbnl_ops
->setstate
)
839 value
= nla_get_u8(tb
[DCB_ATTR_STATE
]);
841 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setstate(netdev
, value
),
842 RTM_SETDCB
, DCB_CMD_SSTATE
, DCB_ATTR_STATE
,
848 static int dcbnl_setpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
849 u32 pid
, u32 seq
, u16 flags
)
851 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1];
856 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->setpfccfg
)
859 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
860 tb
[DCB_ATTR_PFC_CFG
],
865 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
868 value
= nla_get_u8(data
[i
]);
869 netdev
->dcbnl_ops
->setpfccfg(netdev
,
870 data
[i
]->nla_type
- DCB_PFC_UP_ATTR_0
, value
);
873 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SCFG
, DCB_ATTR_PFC_CFG
,
879 static int dcbnl_setall(struct net_device
*netdev
, struct nlattr
**tb
,
880 u32 pid
, u32 seq
, u16 flags
)
884 if (!tb
[DCB_ATTR_SET_ALL
] || !netdev
->dcbnl_ops
->setall
)
887 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setall(netdev
), RTM_SETDCB
,
888 DCB_CMD_SET_ALL
, DCB_ATTR_SET_ALL
, pid
, seq
, flags
);
893 static int __dcbnl_pg_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
894 u32 pid
, u32 seq
, u16 flags
, int dir
)
896 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
897 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
905 if (!tb
[DCB_ATTR_PG_CFG
] ||
906 !netdev
->dcbnl_ops
->setpgtccfgtx
||
907 !netdev
->dcbnl_ops
->setpgtccfgrx
||
908 !netdev
->dcbnl_ops
->setpgbwgcfgtx
||
909 !netdev
->dcbnl_ops
->setpgbwgcfgrx
)
912 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
913 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
917 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
921 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
922 pg_tb
[i
], dcbnl_tc_param_nest
);
926 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
927 prio
= DCB_ATTR_VALUE_UNDEFINED
;
928 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
929 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
931 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
])
933 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
]);
935 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
])
936 pgid
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_PGID
]);
938 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
])
939 tc_pct
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
]);
941 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
])
943 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
]);
945 /* dir: Tx = 0, Rx = 1 */
948 netdev
->dcbnl_ops
->setpgtccfgrx(netdev
,
949 i
- DCB_PG_ATTR_TC_0
,
950 prio
, pgid
, tc_pct
, up_map
);
953 netdev
->dcbnl_ops
->setpgtccfgtx(netdev
,
954 i
- DCB_PG_ATTR_TC_0
,
955 prio
, pgid
, tc_pct
, up_map
);
959 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
963 tc_pct
= nla_get_u8(pg_tb
[i
]);
965 /* dir: Tx = 0, Rx = 1 */
968 netdev
->dcbnl_ops
->setpgbwgcfgrx(netdev
,
969 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
972 netdev
->dcbnl_ops
->setpgbwgcfgtx(netdev
,
973 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
977 ret
= dcbnl_reply(0, RTM_SETDCB
,
978 (dir
? DCB_CMD_PGRX_SCFG
: DCB_CMD_PGTX_SCFG
),
979 DCB_ATTR_PG_CFG
, pid
, seq
, flags
);
985 static int dcbnl_pgtx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
986 u32 pid
, u32 seq
, u16 flags
)
988 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 0);
991 static int dcbnl_pgrx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
992 u32 pid
, u32 seq
, u16 flags
)
994 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 1);
997 static int dcbnl_bcn_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
998 u32 pid
, u32 seq
, u16 flags
)
1000 struct sk_buff
*dcbnl_skb
;
1001 struct nlmsghdr
*nlh
;
1003 struct nlattr
*bcn_nest
;
1004 struct nlattr
*bcn_tb
[DCB_BCN_ATTR_MAX
+ 1];
1008 bool getall
= false;
1011 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->getbcnrp
||
1012 !netdev
->dcbnl_ops
->getbcncfg
)
1015 ret
= nla_parse_nested(bcn_tb
, DCB_BCN_ATTR_MAX
,
1016 tb
[DCB_ATTR_BCN
], dcbnl_bcn_nest
);
1021 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1025 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
1027 dcb
= NLMSG_DATA(nlh
);
1028 dcb
->dcb_family
= AF_UNSPEC
;
1029 dcb
->cmd
= DCB_CMD_BCN_GCFG
;
1031 bcn_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_BCN
);
1035 if (bcn_tb
[DCB_BCN_ATTR_ALL
])
1038 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
1039 if (!getall
&& !bcn_tb
[i
])
1042 netdev
->dcbnl_ops
->getbcnrp(netdev
, i
- DCB_BCN_ATTR_RP_0
,
1044 ret
= nla_put_u8(dcbnl_skb
, i
, value_byte
);
1049 for (i
= DCB_BCN_ATTR_BCNA_0
; i
<= DCB_BCN_ATTR_RI
; i
++) {
1050 if (!getall
&& !bcn_tb
[i
])
1053 netdev
->dcbnl_ops
->getbcncfg(netdev
, i
,
1055 ret
= nla_put_u32(dcbnl_skb
, i
, value_integer
);
1060 nla_nest_end(dcbnl_skb
, bcn_nest
);
1062 nlmsg_end(dcbnl_skb
, nlh
);
1064 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
1071 nla_nest_cancel(dcbnl_skb
, bcn_nest
);
1074 kfree_skb(dcbnl_skb
);
1080 static int dcbnl_bcn_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
1081 u32 pid
, u32 seq
, u16 flags
)
1083 struct nlattr
*data
[DCB_BCN_ATTR_MAX
+ 1];
1089 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->setbcncfg
||
1090 !netdev
->dcbnl_ops
->setbcnrp
)
1093 ret
= nla_parse_nested(data
, DCB_BCN_ATTR_MAX
,
1099 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
1100 if (data
[i
] == NULL
)
1102 value_byte
= nla_get_u8(data
[i
]);
1103 netdev
->dcbnl_ops
->setbcnrp(netdev
,
1104 data
[i
]->nla_type
- DCB_BCN_ATTR_RP_0
, value_byte
);
1107 for (i
= DCB_BCN_ATTR_BCNA_0
; i
<= DCB_BCN_ATTR_RI
; i
++) {
1108 if (data
[i
] == NULL
)
1110 value_int
= nla_get_u32(data
[i
]);
1111 netdev
->dcbnl_ops
->setbcncfg(netdev
,
1115 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_BCN_SCFG
, DCB_ATTR_BCN
,
1121 static int dcb_doit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1123 struct net
*net
= sock_net(skb
->sk
);
1124 struct net_device
*netdev
;
1125 struct dcbmsg
*dcb
= (struct dcbmsg
*)NLMSG_DATA(nlh
);
1126 struct nlattr
*tb
[DCB_ATTR_MAX
+ 1];
1127 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
1130 if (!net_eq(net
, &init_net
))
1133 ret
= nlmsg_parse(nlh
, sizeof(*dcb
), tb
, DCB_ATTR_MAX
,
1138 if (!tb
[DCB_ATTR_IFNAME
])
1141 netdev
= dev_get_by_name(&init_net
, nla_data(tb
[DCB_ATTR_IFNAME
]));
1145 if (!netdev
->dcbnl_ops
)
1149 case DCB_CMD_GSTATE
:
1150 ret
= dcbnl_getstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1153 case DCB_CMD_PFC_GCFG
:
1154 ret
= dcbnl_getpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1157 case DCB_CMD_GPERM_HWADDR
:
1158 ret
= dcbnl_getperm_hwaddr(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1161 case DCB_CMD_PGTX_GCFG
:
1162 ret
= dcbnl_pgtx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1165 case DCB_CMD_PGRX_GCFG
:
1166 ret
= dcbnl_pgrx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1169 case DCB_CMD_BCN_GCFG
:
1170 ret
= dcbnl_bcn_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1173 case DCB_CMD_SSTATE
:
1174 ret
= dcbnl_setstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1177 case DCB_CMD_PFC_SCFG
:
1178 ret
= dcbnl_setpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1182 case DCB_CMD_SET_ALL
:
1183 ret
= dcbnl_setall(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1186 case DCB_CMD_PGTX_SCFG
:
1187 ret
= dcbnl_pgtx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1190 case DCB_CMD_PGRX_SCFG
:
1191 ret
= dcbnl_pgrx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1195 ret
= dcbnl_getcap(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1198 case DCB_CMD_GNUMTCS
:
1199 ret
= dcbnl_getnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1202 case DCB_CMD_SNUMTCS
:
1203 ret
= dcbnl_setnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1206 case DCB_CMD_PFC_GSTATE
:
1207 ret
= dcbnl_getpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1210 case DCB_CMD_PFC_SSTATE
:
1211 ret
= dcbnl_setpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1214 case DCB_CMD_BCN_SCFG
:
1215 ret
= dcbnl_bcn_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1219 ret
= dcbnl_getapp(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1223 ret
= dcbnl_setapp(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1236 static int __init
dcbnl_init(void)
1238 rtnl_register(PF_UNSPEC
, RTM_GETDCB
, dcb_doit
, NULL
);
1239 rtnl_register(PF_UNSPEC
, RTM_SETDCB
, dcb_doit
, NULL
);
1243 module_init(dcbnl_init
);
1245 static void __exit
dcbnl_exit(void)
1247 rtnl_unregister(PF_UNSPEC
, RTM_GETDCB
);
1248 rtnl_unregister(PF_UNSPEC
, RTM_SETDCB
);
1250 module_exit(dcbnl_exit
);