2 * net/switchdev/switchdev.c - Switch device API
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_bridge.h>
19 #include <net/ip_fib.h>
20 #include <net/switchdev.h>
23 * switchdev_port_attr_get - Get port attribute
26 * @attr: attribute to get
28 int switchdev_port_attr_get(struct net_device
*dev
, struct switchdev_attr
*attr
)
30 const struct switchdev_ops
*ops
= dev
->switchdev_ops
;
31 struct net_device
*lower_dev
;
32 struct list_head
*iter
;
33 struct switchdev_attr first
= {
34 .id
= SWITCHDEV_ATTR_UNDEFINED
36 int err
= -EOPNOTSUPP
;
38 if (ops
&& ops
->switchdev_port_attr_get
)
39 return ops
->switchdev_port_attr_get(dev
, attr
);
41 if (attr
->flags
& SWITCHDEV_F_NO_RECURSE
)
44 /* Switch device port(s) may be stacked under
45 * bond/team/vlan dev, so recurse down to get attr on
46 * each port. Return -ENODATA if attr values don't
47 * compare across ports.
50 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
51 err
= switchdev_port_attr_get(lower_dev
, attr
);
54 if (first
.id
== SWITCHDEV_ATTR_UNDEFINED
)
56 else if (memcmp(&first
, attr
, sizeof(*attr
)))
62 EXPORT_SYMBOL_GPL(switchdev_port_attr_get
);
64 static int __switchdev_port_attr_set(struct net_device
*dev
,
65 struct switchdev_attr
*attr
)
67 const struct switchdev_ops
*ops
= dev
->switchdev_ops
;
68 struct net_device
*lower_dev
;
69 struct list_head
*iter
;
70 int err
= -EOPNOTSUPP
;
72 if (ops
&& ops
->switchdev_port_attr_set
)
73 return ops
->switchdev_port_attr_set(dev
, attr
);
75 if (attr
->flags
& SWITCHDEV_F_NO_RECURSE
)
78 /* Switch device port(s) may be stacked under
79 * bond/team/vlan dev, so recurse down to set attr on
83 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
84 err
= __switchdev_port_attr_set(lower_dev
, attr
);
92 struct switchdev_attr_set_work
{
93 struct work_struct work
;
94 struct net_device
*dev
;
95 struct switchdev_attr attr
;
98 static void switchdev_port_attr_set_work(struct work_struct
*work
)
100 struct switchdev_attr_set_work
*asw
=
101 container_of(work
, struct switchdev_attr_set_work
, work
);
105 err
= switchdev_port_attr_set(asw
->dev
, &asw
->attr
);
113 static int switchdev_port_attr_set_defer(struct net_device
*dev
,
114 struct switchdev_attr
*attr
)
116 struct switchdev_attr_set_work
*asw
;
118 asw
= kmalloc(sizeof(*asw
), GFP_ATOMIC
);
122 INIT_WORK(&asw
->work
, switchdev_port_attr_set_work
);
126 memcpy(&asw
->attr
, attr
, sizeof(asw
->attr
));
128 schedule_work(&asw
->work
);
134 * switchdev_port_attr_set - Set port attribute
137 * @attr: attribute to set
139 * Use a 2-phase prepare-commit transaction model to ensure
140 * system is not left in a partially updated state due to
141 * failure from driver/device.
143 int switchdev_port_attr_set(struct net_device
*dev
, struct switchdev_attr
*attr
)
147 if (!rtnl_is_locked()) {
148 /* Running prepare-commit transaction across stacked
149 * devices requires nothing moves, so if rtnl_lock is
150 * not held, schedule a worker thread to hold rtnl_lock
151 * while setting attr.
154 return switchdev_port_attr_set_defer(dev
, attr
);
157 /* Phase I: prepare for attr set. Driver/device should fail
158 * here if there are going to be issues in the commit phase,
159 * such as lack of resources or support. The driver/device
160 * should reserve resources needed for the commit phase here,
161 * but should not commit the attr.
164 attr
->trans
= SWITCHDEV_TRANS_PREPARE
;
165 err
= __switchdev_port_attr_set(dev
, attr
);
167 /* Prepare phase failed: abort the transaction. Any
168 * resources reserved in the prepare phase are
172 attr
->trans
= SWITCHDEV_TRANS_ABORT
;
173 __switchdev_port_attr_set(dev
, attr
);
178 /* Phase II: commit attr set. This cannot fail as a fault
179 * of driver/device. If it does, it's a bug in the driver/device
180 * because the driver said everythings was OK in phase I.
183 attr
->trans
= SWITCHDEV_TRANS_COMMIT
;
184 err
= __switchdev_port_attr_set(dev
, attr
);
189 EXPORT_SYMBOL_GPL(switchdev_port_attr_set
);
191 int __switchdev_port_obj_add(struct net_device
*dev
, struct switchdev_obj
*obj
)
193 const struct switchdev_ops
*ops
= dev
->switchdev_ops
;
194 struct net_device
*lower_dev
;
195 struct list_head
*iter
;
196 int err
= -EOPNOTSUPP
;
198 if (ops
&& ops
->switchdev_port_obj_add
)
199 return ops
->switchdev_port_obj_add(dev
, obj
);
201 /* Switch device port(s) may be stacked under
202 * bond/team/vlan dev, so recurse down to add object on
206 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
207 err
= __switchdev_port_obj_add(lower_dev
, obj
);
216 * switchdev_port_obj_add - Add port object
219 * @obj: object to add
221 * Use a 2-phase prepare-commit transaction model to ensure
222 * system is not left in a partially updated state due to
223 * failure from driver/device.
225 * rtnl_lock must be held.
227 int switchdev_port_obj_add(struct net_device
*dev
, struct switchdev_obj
*obj
)
233 /* Phase I: prepare for obj add. Driver/device should fail
234 * here if there are going to be issues in the commit phase,
235 * such as lack of resources or support. The driver/device
236 * should reserve resources needed for the commit phase here,
237 * but should not commit the obj.
240 obj
->trans
= SWITCHDEV_TRANS_PREPARE
;
241 err
= __switchdev_port_obj_add(dev
, obj
);
243 /* Prepare phase failed: abort the transaction. Any
244 * resources reserved in the prepare phase are
248 obj
->trans
= SWITCHDEV_TRANS_ABORT
;
249 __switchdev_port_obj_add(dev
, obj
);
254 /* Phase II: commit obj add. This cannot fail as a fault
255 * of driver/device. If it does, it's a bug in the driver/device
256 * because the driver said everythings was OK in phase I.
259 obj
->trans
= SWITCHDEV_TRANS_COMMIT
;
260 err
= __switchdev_port_obj_add(dev
, obj
);
261 WARN(err
, "%s: Commit of object (id=%d) failed.\n", dev
->name
, obj
->id
);
265 EXPORT_SYMBOL_GPL(switchdev_port_obj_add
);
268 * switchdev_port_obj_del - Delete port object
271 * @obj: object to delete
273 int switchdev_port_obj_del(struct net_device
*dev
, struct switchdev_obj
*obj
)
275 const struct switchdev_ops
*ops
= dev
->switchdev_ops
;
276 struct net_device
*lower_dev
;
277 struct list_head
*iter
;
278 int err
= -EOPNOTSUPP
;
280 if (ops
&& ops
->switchdev_port_obj_del
)
281 return ops
->switchdev_port_obj_del(dev
, obj
);
283 /* Switch device port(s) may be stacked under
284 * bond/team/vlan dev, so recurse down to delete object on
288 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
289 err
= switchdev_port_obj_del(lower_dev
, obj
);
296 EXPORT_SYMBOL_GPL(switchdev_port_obj_del
);
298 static DEFINE_MUTEX(switchdev_mutex
);
299 static RAW_NOTIFIER_HEAD(switchdev_notif_chain
);
302 * register_switchdev_notifier - Register notifier
303 * @nb: notifier_block
305 * Register switch device notifier. This should be used by code
306 * which needs to monitor events happening in particular device.
307 * Return values are same as for atomic_notifier_chain_register().
309 int register_switchdev_notifier(struct notifier_block
*nb
)
313 mutex_lock(&switchdev_mutex
);
314 err
= raw_notifier_chain_register(&switchdev_notif_chain
, nb
);
315 mutex_unlock(&switchdev_mutex
);
318 EXPORT_SYMBOL_GPL(register_switchdev_notifier
);
321 * unregister_switchdev_notifier - Unregister notifier
322 * @nb: notifier_block
324 * Unregister switch device notifier.
325 * Return values are same as for atomic_notifier_chain_unregister().
327 int unregister_switchdev_notifier(struct notifier_block
*nb
)
331 mutex_lock(&switchdev_mutex
);
332 err
= raw_notifier_chain_unregister(&switchdev_notif_chain
, nb
);
333 mutex_unlock(&switchdev_mutex
);
336 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier
);
339 * call_switchdev_notifiers - Call notifiers
340 * @val: value passed unmodified to notifier function
342 * @info: notifier information data
344 * Call all network notifier blocks. This should be called by driver
345 * when it needs to propagate hardware event.
346 * Return values are same as for atomic_notifier_call_chain().
348 int call_switchdev_notifiers(unsigned long val
, struct net_device
*dev
,
349 struct switchdev_notifier_info
*info
)
354 mutex_lock(&switchdev_mutex
);
355 err
= raw_notifier_call_chain(&switchdev_notif_chain
, val
, info
);
356 mutex_unlock(&switchdev_mutex
);
359 EXPORT_SYMBOL_GPL(call_switchdev_notifiers
);
362 * switchdev_port_bridge_getlink - Get bridge port attributes
366 * Called for SELF on rtnl_bridge_getlink to get bridge port
369 int switchdev_port_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
370 struct net_device
*dev
, u32 filter_mask
,
373 struct switchdev_attr attr
= {
374 .id
= SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS
,
376 u16 mode
= BRIDGE_MODE_UNDEF
;
377 u32 mask
= BR_LEARNING
| BR_LEARNING_SYNC
;
380 err
= switchdev_port_attr_get(dev
, &attr
);
384 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, mode
,
385 attr
.brport_flags
, mask
, nlflags
);
387 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink
);
389 static int switchdev_port_br_setflag(struct net_device
*dev
,
390 struct nlattr
*nlattr
,
391 unsigned long brport_flag
)
393 struct switchdev_attr attr
= {
394 .id
= SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS
,
396 u8 flag
= nla_get_u8(nlattr
);
399 err
= switchdev_port_attr_get(dev
, &attr
);
404 attr
.brport_flags
|= brport_flag
;
406 attr
.brport_flags
&= ~brport_flag
;
408 return switchdev_port_attr_set(dev
, &attr
);
411 static const struct nla_policy
412 switchdev_port_bridge_policy
[IFLA_BRPORT_MAX
+ 1] = {
413 [IFLA_BRPORT_STATE
] = { .type
= NLA_U8
},
414 [IFLA_BRPORT_COST
] = { .type
= NLA_U32
},
415 [IFLA_BRPORT_PRIORITY
] = { .type
= NLA_U16
},
416 [IFLA_BRPORT_MODE
] = { .type
= NLA_U8
},
417 [IFLA_BRPORT_GUARD
] = { .type
= NLA_U8
},
418 [IFLA_BRPORT_PROTECT
] = { .type
= NLA_U8
},
419 [IFLA_BRPORT_FAST_LEAVE
] = { .type
= NLA_U8
},
420 [IFLA_BRPORT_LEARNING
] = { .type
= NLA_U8
},
421 [IFLA_BRPORT_LEARNING_SYNC
] = { .type
= NLA_U8
},
422 [IFLA_BRPORT_UNICAST_FLOOD
] = { .type
= NLA_U8
},
425 static int switchdev_port_br_setlink_protinfo(struct net_device
*dev
,
426 struct nlattr
*protinfo
)
432 err
= nla_validate_nested(protinfo
, IFLA_BRPORT_MAX
,
433 switchdev_port_bridge_policy
);
437 nla_for_each_nested(attr
, protinfo
, rem
) {
438 switch (nla_type(attr
)) {
439 case IFLA_BRPORT_LEARNING
:
440 err
= switchdev_port_br_setflag(dev
, attr
,
443 case IFLA_BRPORT_LEARNING_SYNC
:
444 err
= switchdev_port_br_setflag(dev
, attr
,
458 static int switchdev_port_br_afspec(struct net_device
*dev
,
459 struct nlattr
*afspec
,
460 int (*f
)(struct net_device
*dev
,
461 struct switchdev_obj
*obj
))
464 struct bridge_vlan_info
*vinfo
;
465 struct switchdev_obj obj
= {
466 .id
= SWITCHDEV_OBJ_PORT_VLAN
,
471 nla_for_each_nested(attr
, afspec
, rem
) {
472 if (nla_type(attr
) != IFLA_BRIDGE_VLAN_INFO
)
474 if (nla_len(attr
) != sizeof(struct bridge_vlan_info
))
476 vinfo
= nla_data(attr
);
477 obj
.vlan
.flags
= vinfo
->flags
;
478 if (vinfo
->flags
& BRIDGE_VLAN_INFO_RANGE_BEGIN
) {
479 if (obj
.vlan
.vid_start
)
481 obj
.vlan
.vid_start
= vinfo
->vid
;
482 } else if (vinfo
->flags
& BRIDGE_VLAN_INFO_RANGE_END
) {
483 if (!obj
.vlan
.vid_start
)
485 obj
.vlan
.vid_end
= vinfo
->vid
;
486 if (obj
.vlan
.vid_end
<= obj
.vlan
.vid_start
)
491 memset(&obj
.vlan
, 0, sizeof(obj
.vlan
));
493 if (obj
.vlan
.vid_start
)
495 obj
.vlan
.vid_start
= vinfo
->vid
;
496 obj
.vlan
.vid_end
= vinfo
->vid
;
500 memset(&obj
.vlan
, 0, sizeof(obj
.vlan
));
508 * switchdev_port_bridge_setlink - Set bridge port attributes
511 * @nlh: netlink header
512 * @flags: netlink flags
514 * Called for SELF on rtnl_bridge_setlink to set bridge port
517 int switchdev_port_bridge_setlink(struct net_device
*dev
,
518 struct nlmsghdr
*nlh
, u16 flags
)
520 struct nlattr
*protinfo
;
521 struct nlattr
*afspec
;
524 protinfo
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
),
527 err
= switchdev_port_br_setlink_protinfo(dev
, protinfo
);
532 afspec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
),
535 err
= switchdev_port_br_afspec(dev
, afspec
,
536 switchdev_port_obj_add
);
540 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink
);
543 * switchdev_port_bridge_dellink - Set bridge port attributes
546 * @nlh: netlink header
547 * @flags: netlink flags
549 * Called for SELF on rtnl_bridge_dellink to set bridge port
552 int switchdev_port_bridge_dellink(struct net_device
*dev
,
553 struct nlmsghdr
*nlh
, u16 flags
)
555 struct nlattr
*afspec
;
557 afspec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
),
560 return switchdev_port_br_afspec(dev
, afspec
,
561 switchdev_port_obj_del
);
565 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink
);
567 static struct net_device
*switchdev_get_lowest_dev(struct net_device
*dev
)
569 const struct switchdev_ops
*ops
= dev
->switchdev_ops
;
570 struct net_device
*lower_dev
;
571 struct net_device
*port_dev
;
572 struct list_head
*iter
;
574 /* Recusively search down until we find a sw port dev.
575 * (A sw port dev supports switchdev_port_attr_get).
578 if (ops
&& ops
->switchdev_port_attr_get
)
581 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
582 port_dev
= switchdev_get_lowest_dev(lower_dev
);
590 static struct net_device
*switchdev_get_dev_by_nhs(struct fib_info
*fi
)
592 struct switchdev_attr attr
= {
593 .id
= SWITCHDEV_ATTR_PORT_PARENT_ID
,
595 struct switchdev_attr prev_attr
;
596 struct net_device
*dev
= NULL
;
599 /* For this route, all nexthop devs must be on the same switch. */
601 for (nhsel
= 0; nhsel
< fi
->fib_nhs
; nhsel
++) {
602 const struct fib_nh
*nh
= &fi
->fib_nh
[nhsel
];
607 dev
= switchdev_get_lowest_dev(nh
->nh_dev
);
611 if (switchdev_port_attr_get(dev
, &attr
))
615 if (prev_attr
.ppid
.id_len
!= attr
.ppid
.id_len
)
617 if (memcmp(prev_attr
.ppid
.id
, attr
.ppid
.id
,
629 * switchdev_fib_ipv4_add - Add IPv4 route entry to switch
631 * @dst: route's IPv4 destination address
632 * @dst_len: destination address length (prefix length)
633 * @fi: route FIB info structure
636 * @nlflags: netlink flags passed in (NLM_F_*)
637 * @tb_id: route table ID
639 * Add IPv4 route entry to switch device.
641 int switchdev_fib_ipv4_add(u32 dst
, int dst_len
, struct fib_info
*fi
,
642 u8 tos
, u8 type
, u32 nlflags
, u32 tb_id
)
644 struct net_device
*dev
;
645 const struct switchdev_ops
*ops
;
648 /* Don't offload route if using custom ip rules or if
649 * IPv4 FIB offloading has been disabled completely.
652 #ifdef CONFIG_IP_MULTIPLE_TABLES
653 if (fi
->fib_net
->ipv4
.fib_has_custom_rules
)
657 if (fi
->fib_net
->ipv4
.fib_offload_disabled
)
660 dev
= switchdev_get_dev_by_nhs(fi
);
663 ops
= dev
->switchdev_ops
;
665 if (ops
->switchdev_fib_ipv4_add
) {
666 err
= ops
->switchdev_fib_ipv4_add(dev
, htonl(dst
), dst_len
,
667 fi
, tos
, type
, nlflags
,
670 fi
->fib_flags
|= RTNH_F_EXTERNAL
;
675 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add
);
678 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
680 * @dst: route's IPv4 destination address
681 * @dst_len: destination address length (prefix length)
682 * @fi: route FIB info structure
685 * @tb_id: route table ID
687 * Delete IPv4 route entry from switch device.
689 int switchdev_fib_ipv4_del(u32 dst
, int dst_len
, struct fib_info
*fi
,
690 u8 tos
, u8 type
, u32 tb_id
)
692 struct net_device
*dev
;
693 const struct switchdev_ops
*ops
;
696 if (!(fi
->fib_flags
& RTNH_F_EXTERNAL
))
699 dev
= switchdev_get_dev_by_nhs(fi
);
702 ops
= dev
->switchdev_ops
;
704 if (ops
->switchdev_fib_ipv4_del
) {
705 err
= ops
->switchdev_fib_ipv4_del(dev
, htonl(dst
), dst_len
,
706 fi
, tos
, type
, tb_id
);
708 fi
->fib_flags
&= ~RTNH_F_EXTERNAL
;
713 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del
);
716 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
718 * @fi: route FIB info structure
720 void switchdev_fib_ipv4_abort(struct fib_info
*fi
)
722 /* There was a problem installing this route to the offload
723 * device. For now, until we come up with more refined
724 * policy handling, abruptly end IPv4 fib offloading for
725 * for entire net by flushing offload device(s) of all
726 * IPv4 routes, and mark IPv4 fib offloading broken from
727 * this point forward.
730 fib_flush_external(fi
->fib_net
);
731 fi
->fib_net
->ipv4
.fib_offload_disabled
= true;
733 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort
);