2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <linux/workqueue.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
57 #include "vport-internal_dev.h"
60 * struct ovs_net - Per net-namespace data for ovs.
61 * @dps: List of datapaths to enable dumping them all out.
62 * Protected by genl_mutex.
68 static int ovs_net_id __read_mostly
;
70 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
71 static void rehash_flow_table(struct work_struct
*work
);
72 static DECLARE_DELAYED_WORK(rehash_flow_wq
, rehash_flow_table
);
77 * Writes to device state (add/remove datapath, port, set operations on vports,
78 * etc.) are protected by RTNL.
80 * Writes to other state (flow table modifications, set miscellaneous datapath
81 * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
84 * Reads are protected by RCU.
86 * There are a few special cases (mostly stats) that have their own
87 * synchronization but they nest under all of above and don't interact with
91 static struct vport
*new_vport(const struct vport_parms
*);
92 static int queue_gso_packets(struct net
*, int dp_ifindex
, struct sk_buff
*,
93 const struct dp_upcall_info
*);
94 static int queue_userspace_packet(struct net
*, int dp_ifindex
,
96 const struct dp_upcall_info
*);
98 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
99 static struct datapath
*get_dp(struct net
*net
, int dp_ifindex
)
101 struct datapath
*dp
= NULL
;
102 struct net_device
*dev
;
105 dev
= dev_get_by_index_rcu(net
, dp_ifindex
);
107 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *ovs_dp_name(const struct datapath
*dp
)
119 struct vport
*vport
= ovs_vport_rtnl_rcu(dp
, OVSP_LOCAL
);
120 return vport
->ops
->get_name(vport
);
123 static int get_dpifindex(struct datapath
*dp
)
130 local
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
132 ifindex
= local
->ops
->get_ifindex(local
);
141 static void destroy_dp_rcu(struct rcu_head
*rcu
)
143 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
145 ovs_flow_tbl_destroy((__force
struct flow_table
*)dp
->table
);
146 free_percpu(dp
->stats_percpu
);
147 release_net(ovs_dp_get_net(dp
));
152 static struct hlist_head
*vport_hash_bucket(const struct datapath
*dp
,
155 return &dp
->ports
[port_no
& (DP_VPORT_HASH_BUCKETS
- 1)];
158 struct vport
*ovs_lookup_vport(const struct datapath
*dp
, u16 port_no
)
161 struct hlist_head
*head
;
163 head
= vport_hash_bucket(dp
, port_no
);
164 hlist_for_each_entry_rcu(vport
, head
, dp_hash_node
) {
165 if (vport
->port_no
== port_no
)
171 /* Called with RTNL lock and genl_lock. */
172 static struct vport
*new_vport(const struct vport_parms
*parms
)
176 vport
= ovs_vport_add(parms
);
177 if (!IS_ERR(vport
)) {
178 struct datapath
*dp
= parms
->dp
;
179 struct hlist_head
*head
= vport_hash_bucket(dp
, vport
->port_no
);
181 hlist_add_head_rcu(&vport
->dp_hash_node
, head
);
187 /* Called with RTNL lock. */
188 void ovs_dp_detach_port(struct vport
*p
)
192 /* First drop references to device. */
193 hlist_del_rcu(&p
->dp_hash_node
);
195 /* Then destroy it. */
199 /* Must be called with rcu_read_lock. */
200 void ovs_dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
202 struct datapath
*dp
= p
->dp
;
203 struct sw_flow
*flow
;
204 struct dp_stats_percpu
*stats
;
205 struct sw_flow_key key
;
210 stats
= this_cpu_ptr(dp
->stats_percpu
);
212 /* Extract flow from 'skb' into 'key'. */
213 error
= ovs_flow_extract(skb
, p
->port_no
, &key
, &key_len
);
214 if (unlikely(error
)) {
220 flow
= ovs_flow_tbl_lookup(rcu_dereference(dp
->table
), &key
, key_len
);
221 if (unlikely(!flow
)) {
222 struct dp_upcall_info upcall
;
224 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
226 upcall
.userdata
= NULL
;
227 upcall
.portid
= p
->upcall_portid
;
228 ovs_dp_upcall(dp
, skb
, &upcall
);
230 stats_counter
= &stats
->n_missed
;
234 OVS_CB(skb
)->flow
= flow
;
236 stats_counter
= &stats
->n_hit
;
237 ovs_flow_used(OVS_CB(skb
)->flow
, skb
);
238 ovs_execute_actions(dp
, skb
);
241 /* Update datapath statistics. */
242 u64_stats_update_begin(&stats
->sync
);
244 u64_stats_update_end(&stats
->sync
);
247 static struct genl_family dp_packet_genl_family
= {
248 .id
= GENL_ID_GENERATE
,
249 .hdrsize
= sizeof(struct ovs_header
),
250 .name
= OVS_PACKET_FAMILY
,
251 .version
= OVS_PACKET_VERSION
,
252 .maxattr
= OVS_PACKET_ATTR_MAX
,
256 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
257 const struct dp_upcall_info
*upcall_info
)
259 struct dp_stats_percpu
*stats
;
263 if (upcall_info
->portid
== 0) {
268 dp_ifindex
= get_dpifindex(dp
);
274 if (!skb_is_gso(skb
))
275 err
= queue_userspace_packet(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
277 err
= queue_gso_packets(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
284 stats
= this_cpu_ptr(dp
->stats_percpu
);
286 u64_stats_update_begin(&stats
->sync
);
288 u64_stats_update_end(&stats
->sync
);
293 static int queue_gso_packets(struct net
*net
, int dp_ifindex
,
295 const struct dp_upcall_info
*upcall_info
)
297 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
298 struct dp_upcall_info later_info
;
299 struct sw_flow_key later_key
;
300 struct sk_buff
*segs
, *nskb
;
303 segs
= __skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
, false);
305 return PTR_ERR(segs
);
307 /* Queue all of the segments. */
310 err
= queue_userspace_packet(net
, dp_ifindex
, skb
, upcall_info
);
314 if (skb
== segs
&& gso_type
& SKB_GSO_UDP
) {
315 /* The initial flow key extracted by ovs_flow_extract()
316 * in this case is for a first fragment, so we need to
317 * properly mark later fragments.
319 later_key
= *upcall_info
->key
;
320 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
322 later_info
= *upcall_info
;
323 later_info
.key
= &later_key
;
324 upcall_info
= &later_info
;
326 } while ((skb
= skb
->next
));
328 /* Free all of the segments. */
336 } while ((skb
= nskb
));
340 static int queue_userspace_packet(struct net
*net
, int dp_ifindex
,
342 const struct dp_upcall_info
*upcall_info
)
344 struct ovs_header
*upcall
;
345 struct sk_buff
*nskb
= NULL
;
346 struct sk_buff
*user_skb
; /* to be queued to userspace */
351 if (vlan_tx_tag_present(skb
)) {
352 nskb
= skb_clone(skb
, GFP_ATOMIC
);
356 nskb
= __vlan_put_tag(nskb
, vlan_tx_tag_get(nskb
));
364 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
369 len
= sizeof(struct ovs_header
);
370 len
+= nla_total_size(skb
->len
);
371 len
+= nla_total_size(FLOW_BUFSIZE
);
372 if (upcall_info
->cmd
== OVS_PACKET_CMD_ACTION
)
373 len
+= nla_total_size(8);
375 user_skb
= genlmsg_new(len
, GFP_ATOMIC
);
381 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
382 0, upcall_info
->cmd
);
383 upcall
->dp_ifindex
= dp_ifindex
;
385 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_KEY
);
386 ovs_flow_to_nlattrs(upcall_info
->key
, user_skb
);
387 nla_nest_end(user_skb
, nla
);
389 if (upcall_info
->userdata
)
390 nla_put_u64(user_skb
, OVS_PACKET_ATTR_USERDATA
,
391 nla_get_u64(upcall_info
->userdata
));
393 nla
= __nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, skb
->len
);
395 skb_copy_and_csum_dev(skb
, nla_data(nla
));
397 genlmsg_end(user_skb
, upcall
);
398 err
= genlmsg_unicast(net
, user_skb
, upcall_info
->portid
);
405 /* Called with genl_mutex. */
406 static int flush_flows(struct datapath
*dp
)
408 struct flow_table
*old_table
;
409 struct flow_table
*new_table
;
411 old_table
= genl_dereference(dp
->table
);
412 new_table
= ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
);
416 rcu_assign_pointer(dp
->table
, new_table
);
418 ovs_flow_tbl_deferred_destroy(old_table
);
422 static int validate_actions(const struct nlattr
*attr
,
423 const struct sw_flow_key
*key
, int depth
);
425 static int validate_sample(const struct nlattr
*attr
,
426 const struct sw_flow_key
*key
, int depth
)
428 const struct nlattr
*attrs
[OVS_SAMPLE_ATTR_MAX
+ 1];
429 const struct nlattr
*probability
, *actions
;
430 const struct nlattr
*a
;
433 memset(attrs
, 0, sizeof(attrs
));
434 nla_for_each_nested(a
, attr
, rem
) {
435 int type
= nla_type(a
);
436 if (!type
|| type
> OVS_SAMPLE_ATTR_MAX
|| attrs
[type
])
443 probability
= attrs
[OVS_SAMPLE_ATTR_PROBABILITY
];
444 if (!probability
|| nla_len(probability
) != sizeof(u32
))
447 actions
= attrs
[OVS_SAMPLE_ATTR_ACTIONS
];
448 if (!actions
|| (nla_len(actions
) && nla_len(actions
) < NLA_HDRLEN
))
450 return validate_actions(actions
, key
, depth
+ 1);
453 static int validate_tp_port(const struct sw_flow_key
*flow_key
)
455 if (flow_key
->eth
.type
== htons(ETH_P_IP
)) {
456 if (flow_key
->ipv4
.tp
.src
|| flow_key
->ipv4
.tp
.dst
)
458 } else if (flow_key
->eth
.type
== htons(ETH_P_IPV6
)) {
459 if (flow_key
->ipv6
.tp
.src
|| flow_key
->ipv6
.tp
.dst
)
466 static int validate_set(const struct nlattr
*a
,
467 const struct sw_flow_key
*flow_key
)
469 const struct nlattr
*ovs_key
= nla_data(a
);
470 int key_type
= nla_type(ovs_key
);
472 /* There can be only one key in a action */
473 if (nla_total_size(nla_len(ovs_key
)) != nla_len(a
))
476 if (key_type
> OVS_KEY_ATTR_MAX
||
477 nla_len(ovs_key
) != ovs_key_lens
[key_type
])
481 const struct ovs_key_ipv4
*ipv4_key
;
482 const struct ovs_key_ipv6
*ipv6_key
;
484 case OVS_KEY_ATTR_PRIORITY
:
485 case OVS_KEY_ATTR_SKB_MARK
:
486 case OVS_KEY_ATTR_ETHERNET
:
489 case OVS_KEY_ATTR_IPV4
:
490 if (flow_key
->eth
.type
!= htons(ETH_P_IP
))
493 if (!flow_key
->ip
.proto
)
496 ipv4_key
= nla_data(ovs_key
);
497 if (ipv4_key
->ipv4_proto
!= flow_key
->ip
.proto
)
500 if (ipv4_key
->ipv4_frag
!= flow_key
->ip
.frag
)
505 case OVS_KEY_ATTR_IPV6
:
506 if (flow_key
->eth
.type
!= htons(ETH_P_IPV6
))
509 if (!flow_key
->ip
.proto
)
512 ipv6_key
= nla_data(ovs_key
);
513 if (ipv6_key
->ipv6_proto
!= flow_key
->ip
.proto
)
516 if (ipv6_key
->ipv6_frag
!= flow_key
->ip
.frag
)
519 if (ntohl(ipv6_key
->ipv6_label
) & 0xFFF00000)
524 case OVS_KEY_ATTR_TCP
:
525 if (flow_key
->ip
.proto
!= IPPROTO_TCP
)
528 return validate_tp_port(flow_key
);
530 case OVS_KEY_ATTR_UDP
:
531 if (flow_key
->ip
.proto
!= IPPROTO_UDP
)
534 return validate_tp_port(flow_key
);
543 static int validate_userspace(const struct nlattr
*attr
)
545 static const struct nla_policy userspace_policy
[OVS_USERSPACE_ATTR_MAX
+ 1] = {
546 [OVS_USERSPACE_ATTR_PID
] = {.type
= NLA_U32
},
547 [OVS_USERSPACE_ATTR_USERDATA
] = {.type
= NLA_U64
},
549 struct nlattr
*a
[OVS_USERSPACE_ATTR_MAX
+ 1];
552 error
= nla_parse_nested(a
, OVS_USERSPACE_ATTR_MAX
,
553 attr
, userspace_policy
);
557 if (!a
[OVS_USERSPACE_ATTR_PID
] ||
558 !nla_get_u32(a
[OVS_USERSPACE_ATTR_PID
]))
564 static int validate_actions(const struct nlattr
*attr
,
565 const struct sw_flow_key
*key
, int depth
)
567 const struct nlattr
*a
;
570 if (depth
>= SAMPLE_ACTION_DEPTH
)
573 nla_for_each_nested(a
, attr
, rem
) {
574 /* Expected argument lengths, (u32)-1 for variable length. */
575 static const u32 action_lens
[OVS_ACTION_ATTR_MAX
+ 1] = {
576 [OVS_ACTION_ATTR_OUTPUT
] = sizeof(u32
),
577 [OVS_ACTION_ATTR_USERSPACE
] = (u32
)-1,
578 [OVS_ACTION_ATTR_PUSH_VLAN
] = sizeof(struct ovs_action_push_vlan
),
579 [OVS_ACTION_ATTR_POP_VLAN
] = 0,
580 [OVS_ACTION_ATTR_SET
] = (u32
)-1,
581 [OVS_ACTION_ATTR_SAMPLE
] = (u32
)-1
583 const struct ovs_action_push_vlan
*vlan
;
584 int type
= nla_type(a
);
586 if (type
> OVS_ACTION_ATTR_MAX
||
587 (action_lens
[type
] != nla_len(a
) &&
588 action_lens
[type
] != (u32
)-1))
592 case OVS_ACTION_ATTR_UNSPEC
:
595 case OVS_ACTION_ATTR_USERSPACE
:
596 err
= validate_userspace(a
);
601 case OVS_ACTION_ATTR_OUTPUT
:
602 if (nla_get_u32(a
) >= DP_MAX_PORTS
)
607 case OVS_ACTION_ATTR_POP_VLAN
:
610 case OVS_ACTION_ATTR_PUSH_VLAN
:
612 if (vlan
->vlan_tpid
!= htons(ETH_P_8021Q
))
614 if (!(vlan
->vlan_tci
& htons(VLAN_TAG_PRESENT
)))
618 case OVS_ACTION_ATTR_SET
:
619 err
= validate_set(a
, key
);
624 case OVS_ACTION_ATTR_SAMPLE
:
625 err
= validate_sample(a
, key
, depth
);
641 static void clear_stats(struct sw_flow
*flow
)
645 flow
->packet_count
= 0;
646 flow
->byte_count
= 0;
649 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
651 struct ovs_header
*ovs_header
= info
->userhdr
;
652 struct nlattr
**a
= info
->attrs
;
653 struct sw_flow_actions
*acts
;
654 struct sk_buff
*packet
;
655 struct sw_flow
*flow
;
663 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
664 !a
[OVS_PACKET_ATTR_ACTIONS
] ||
665 nla_len(a
[OVS_PACKET_ATTR_PACKET
]) < ETH_HLEN
)
668 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
669 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
673 skb_reserve(packet
, NET_IP_ALIGN
);
675 memcpy(__skb_put(packet
, len
), nla_data(a
[OVS_PACKET_ATTR_PACKET
]), len
);
677 skb_reset_mac_header(packet
);
678 eth
= eth_hdr(packet
);
680 /* Normally, setting the skb 'protocol' field would be handled by a
681 * call to eth_type_trans(), but it assumes there's a sending
682 * device, which we may not have. */
683 if (ntohs(eth
->h_proto
) >= 1536)
684 packet
->protocol
= eth
->h_proto
;
686 packet
->protocol
= htons(ETH_P_802_2
);
688 /* Build an sw_flow for sending this packet. */
689 flow
= ovs_flow_alloc();
694 err
= ovs_flow_extract(packet
, -1, &flow
->key
, &key_len
);
698 err
= ovs_flow_metadata_from_nlattrs(&flow
->key
.phy
.priority
,
699 &flow
->key
.phy
.skb_mark
,
700 &flow
->key
.phy
.in_port
,
701 a
[OVS_PACKET_ATTR_KEY
]);
705 err
= validate_actions(a
[OVS_PACKET_ATTR_ACTIONS
], &flow
->key
, 0);
709 flow
->hash
= ovs_flow_hash(&flow
->key
, key_len
);
711 acts
= ovs_flow_actions_alloc(a
[OVS_PACKET_ATTR_ACTIONS
]);
715 rcu_assign_pointer(flow
->sf_acts
, acts
);
717 OVS_CB(packet
)->flow
= flow
;
718 packet
->priority
= flow
->key
.phy
.priority
;
719 packet
->mark
= flow
->key
.phy
.skb_mark
;
722 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
728 err
= ovs_execute_actions(dp
, packet
);
745 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
746 [OVS_PACKET_ATTR_PACKET
] = { .type
= NLA_UNSPEC
},
747 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
748 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
751 static struct genl_ops dp_packet_genl_ops
[] = {
752 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
753 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
754 .policy
= packet_policy
,
755 .doit
= ovs_packet_cmd_execute
759 static void get_dp_stats(struct datapath
*dp
, struct ovs_dp_stats
*stats
)
762 struct flow_table
*table
= genl_dereference(dp
->table
);
764 stats
->n_flows
= ovs_flow_tbl_count(table
);
766 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
767 for_each_possible_cpu(i
) {
768 const struct dp_stats_percpu
*percpu_stats
;
769 struct dp_stats_percpu local_stats
;
772 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
775 start
= u64_stats_fetch_begin_bh(&percpu_stats
->sync
);
776 local_stats
= *percpu_stats
;
777 } while (u64_stats_fetch_retry_bh(&percpu_stats
->sync
, start
));
779 stats
->n_hit
+= local_stats
.n_hit
;
780 stats
->n_missed
+= local_stats
.n_missed
;
781 stats
->n_lost
+= local_stats
.n_lost
;
785 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
786 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
787 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
788 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
791 static struct genl_family dp_flow_genl_family
= {
792 .id
= GENL_ID_GENERATE
,
793 .hdrsize
= sizeof(struct ovs_header
),
794 .name
= OVS_FLOW_FAMILY
,
795 .version
= OVS_FLOW_VERSION
,
796 .maxattr
= OVS_FLOW_ATTR_MAX
,
800 static struct genl_multicast_group ovs_dp_flow_multicast_group
= {
801 .name
= OVS_FLOW_MCGROUP
804 /* Called with genl_lock. */
805 static int ovs_flow_cmd_fill_info(struct sw_flow
*flow
, struct datapath
*dp
,
806 struct sk_buff
*skb
, u32 portid
,
807 u32 seq
, u32 flags
, u8 cmd
)
809 const int skb_orig_len
= skb
->len
;
810 const struct sw_flow_actions
*sf_acts
;
811 struct ovs_flow_stats stats
;
812 struct ovs_header
*ovs_header
;
818 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
819 lockdep_genl_is_held());
821 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_flow_genl_family
, flags
, cmd
);
825 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
827 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_KEY
);
829 goto nla_put_failure
;
830 err
= ovs_flow_to_nlattrs(&flow
->key
, skb
);
833 nla_nest_end(skb
, nla
);
835 spin_lock_bh(&flow
->lock
);
837 stats
.n_packets
= flow
->packet_count
;
838 stats
.n_bytes
= flow
->byte_count
;
839 tcp_flags
= flow
->tcp_flags
;
840 spin_unlock_bh(&flow
->lock
);
843 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
844 goto nla_put_failure
;
846 if (stats
.n_packets
&&
847 nla_put(skb
, OVS_FLOW_ATTR_STATS
,
848 sizeof(struct ovs_flow_stats
), &stats
))
849 goto nla_put_failure
;
852 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, tcp_flags
))
853 goto nla_put_failure
;
855 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
856 * this is the first flow to be dumped into 'skb'. This is unusual for
857 * Netlink but individual action lists can be longer than
858 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
859 * The userspace caller can always fetch the actions separately if it
860 * really wants them. (Most userspace callers in fact don't care.)
862 * This can only fail for dump operations because the skb is always
863 * properly sized for single flows.
865 err
= nla_put(skb
, OVS_FLOW_ATTR_ACTIONS
, sf_acts
->actions_len
,
867 if (err
< 0 && skb_orig_len
)
870 return genlmsg_end(skb
, ovs_header
);
875 genlmsg_cancel(skb
, ovs_header
);
879 static struct sk_buff
*ovs_flow_cmd_alloc_info(struct sw_flow
*flow
)
881 const struct sw_flow_actions
*sf_acts
;
884 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
885 lockdep_genl_is_held());
887 /* OVS_FLOW_ATTR_KEY */
888 len
= nla_total_size(FLOW_BUFSIZE
);
889 /* OVS_FLOW_ATTR_ACTIONS */
890 len
+= nla_total_size(sf_acts
->actions_len
);
891 /* OVS_FLOW_ATTR_STATS */
892 len
+= nla_total_size(sizeof(struct ovs_flow_stats
));
893 /* OVS_FLOW_ATTR_TCP_FLAGS */
894 len
+= nla_total_size(1);
895 /* OVS_FLOW_ATTR_USED */
896 len
+= nla_total_size(8);
898 len
+= NLMSG_ALIGN(sizeof(struct ovs_header
));
900 return genlmsg_new(len
, GFP_KERNEL
);
903 static struct sk_buff
*ovs_flow_cmd_build_info(struct sw_flow
*flow
,
905 u32 portid
, u32 seq
, u8 cmd
)
910 skb
= ovs_flow_cmd_alloc_info(flow
);
912 return ERR_PTR(-ENOMEM
);
914 retval
= ovs_flow_cmd_fill_info(flow
, dp
, skb
, portid
, seq
, 0, cmd
);
919 static int ovs_flow_cmd_new_or_set(struct sk_buff
*skb
, struct genl_info
*info
)
921 struct nlattr
**a
= info
->attrs
;
922 struct ovs_header
*ovs_header
= info
->userhdr
;
923 struct sw_flow_key key
;
924 struct sw_flow
*flow
;
925 struct sk_buff
*reply
;
927 struct flow_table
*table
;
933 if (!a
[OVS_FLOW_ATTR_KEY
])
935 error
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
939 /* Validate actions. */
940 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
941 error
= validate_actions(a
[OVS_FLOW_ATTR_ACTIONS
], &key
, 0);
944 } else if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
) {
949 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
954 table
= genl_dereference(dp
->table
);
955 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
957 struct sw_flow_actions
*acts
;
959 /* Bail out if we're not allowed to create a new flow. */
961 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_SET
)
964 /* Expand table, if necessary, to make room. */
965 if (ovs_flow_tbl_need_to_expand(table
)) {
966 struct flow_table
*new_table
;
968 new_table
= ovs_flow_tbl_expand(table
);
969 if (!IS_ERR(new_table
)) {
970 rcu_assign_pointer(dp
->table
, new_table
);
971 ovs_flow_tbl_deferred_destroy(table
);
972 table
= genl_dereference(dp
->table
);
977 flow
= ovs_flow_alloc();
979 error
= PTR_ERR(flow
);
985 /* Obtain actions. */
986 acts
= ovs_flow_actions_alloc(a
[OVS_FLOW_ATTR_ACTIONS
]);
987 error
= PTR_ERR(acts
);
989 goto error_free_flow
;
990 rcu_assign_pointer(flow
->sf_acts
, acts
);
992 /* Put flow in bucket. */
993 flow
->hash
= ovs_flow_hash(&key
, key_len
);
994 ovs_flow_tbl_insert(table
, flow
);
996 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1000 /* We found a matching flow. */
1001 struct sw_flow_actions
*old_acts
;
1002 struct nlattr
*acts_attrs
;
1004 /* Bail out if we're not allowed to modify an existing flow.
1005 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1006 * because Generic Netlink treats the latter as a dump
1007 * request. We also accept NLM_F_EXCL in case that bug ever
1011 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
&&
1012 info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
| NLM_F_EXCL
))
1015 /* Update actions. */
1016 old_acts
= rcu_dereference_protected(flow
->sf_acts
,
1017 lockdep_genl_is_held());
1018 acts_attrs
= a
[OVS_FLOW_ATTR_ACTIONS
];
1020 (old_acts
->actions_len
!= nla_len(acts_attrs
) ||
1021 memcmp(old_acts
->actions
, nla_data(acts_attrs
),
1022 old_acts
->actions_len
))) {
1023 struct sw_flow_actions
*new_acts
;
1025 new_acts
= ovs_flow_actions_alloc(acts_attrs
);
1026 error
= PTR_ERR(new_acts
);
1027 if (IS_ERR(new_acts
))
1030 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
1031 ovs_flow_deferred_free_acts(old_acts
);
1034 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1035 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1038 if (a
[OVS_FLOW_ATTR_CLEAR
]) {
1039 spin_lock_bh(&flow
->lock
);
1041 spin_unlock_bh(&flow
->lock
);
1046 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1047 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
,
1050 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
1051 ovs_dp_flow_multicast_group
.id
, PTR_ERR(reply
));
1055 ovs_flow_free(flow
);
1060 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1062 struct nlattr
**a
= info
->attrs
;
1063 struct ovs_header
*ovs_header
= info
->userhdr
;
1064 struct sw_flow_key key
;
1065 struct sk_buff
*reply
;
1066 struct sw_flow
*flow
;
1067 struct datapath
*dp
;
1068 struct flow_table
*table
;
1072 if (!a
[OVS_FLOW_ATTR_KEY
])
1074 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1078 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1082 table
= genl_dereference(dp
->table
);
1083 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1087 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1088 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1090 return PTR_ERR(reply
);
1092 return genlmsg_reply(reply
, info
);
1095 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1097 struct nlattr
**a
= info
->attrs
;
1098 struct ovs_header
*ovs_header
= info
->userhdr
;
1099 struct sw_flow_key key
;
1100 struct sk_buff
*reply
;
1101 struct sw_flow
*flow
;
1102 struct datapath
*dp
;
1103 struct flow_table
*table
;
1107 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1111 if (!a
[OVS_FLOW_ATTR_KEY
])
1112 return flush_flows(dp
);
1114 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1118 table
= genl_dereference(dp
->table
);
1119 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1123 reply
= ovs_flow_cmd_alloc_info(flow
);
1127 ovs_flow_tbl_remove(table
, flow
);
1129 err
= ovs_flow_cmd_fill_info(flow
, dp
, reply
, info
->snd_portid
,
1130 info
->snd_seq
, 0, OVS_FLOW_CMD_DEL
);
1133 ovs_flow_deferred_free(flow
);
1135 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1136 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1140 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1142 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1143 struct datapath
*dp
;
1144 struct flow_table
*table
;
1146 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1150 table
= genl_dereference(dp
->table
);
1153 struct sw_flow
*flow
;
1156 bucket
= cb
->args
[0];
1158 flow
= ovs_flow_tbl_next(table
, &bucket
, &obj
);
1162 if (ovs_flow_cmd_fill_info(flow
, dp
, skb
,
1163 NETLINK_CB(cb
->skb
).portid
,
1164 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1165 OVS_FLOW_CMD_NEW
) < 0)
1168 cb
->args
[0] = bucket
;
1174 static struct genl_ops dp_flow_genl_ops
[] = {
1175 { .cmd
= OVS_FLOW_CMD_NEW
,
1176 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1177 .policy
= flow_policy
,
1178 .doit
= ovs_flow_cmd_new_or_set
1180 { .cmd
= OVS_FLOW_CMD_DEL
,
1181 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1182 .policy
= flow_policy
,
1183 .doit
= ovs_flow_cmd_del
1185 { .cmd
= OVS_FLOW_CMD_GET
,
1186 .flags
= 0, /* OK for unprivileged users. */
1187 .policy
= flow_policy
,
1188 .doit
= ovs_flow_cmd_get
,
1189 .dumpit
= ovs_flow_cmd_dump
1191 { .cmd
= OVS_FLOW_CMD_SET
,
1192 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1193 .policy
= flow_policy
,
1194 .doit
= ovs_flow_cmd_new_or_set
,
1198 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1199 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1200 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1203 static struct genl_family dp_datapath_genl_family
= {
1204 .id
= GENL_ID_GENERATE
,
1205 .hdrsize
= sizeof(struct ovs_header
),
1206 .name
= OVS_DATAPATH_FAMILY
,
1207 .version
= OVS_DATAPATH_VERSION
,
1208 .maxattr
= OVS_DP_ATTR_MAX
,
1212 static struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
1213 .name
= OVS_DATAPATH_MCGROUP
1216 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1217 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1219 struct ovs_header
*ovs_header
;
1220 struct ovs_dp_stats dp_stats
;
1223 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_datapath_genl_family
,
1228 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1231 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1234 goto nla_put_failure
;
1236 get_dp_stats(dp
, &dp_stats
);
1237 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
), &dp_stats
))
1238 goto nla_put_failure
;
1240 return genlmsg_end(skb
, ovs_header
);
1243 genlmsg_cancel(skb
, ovs_header
);
1248 static struct sk_buff
*ovs_dp_cmd_build_info(struct datapath
*dp
, u32 portid
,
1251 struct sk_buff
*skb
;
1254 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1256 return ERR_PTR(-ENOMEM
);
1258 retval
= ovs_dp_cmd_fill_info(dp
, skb
, portid
, seq
, 0, cmd
);
1261 return ERR_PTR(retval
);
1266 /* Called with genl_mutex and optionally with RTNL lock also. */
1267 static struct datapath
*lookup_datapath(struct net
*net
,
1268 struct ovs_header
*ovs_header
,
1269 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1271 struct datapath
*dp
;
1273 if (!a
[OVS_DP_ATTR_NAME
])
1274 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1276 struct vport
*vport
;
1279 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_DP_ATTR_NAME
]));
1280 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1283 return dp
? dp
: ERR_PTR(-ENODEV
);
1286 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1288 struct nlattr
**a
= info
->attrs
;
1289 struct vport_parms parms
;
1290 struct sk_buff
*reply
;
1291 struct datapath
*dp
;
1292 struct vport
*vport
;
1293 struct ovs_net
*ovs_net
;
1297 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1303 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1305 goto err_unlock_rtnl
;
1307 ovs_dp_set_net(dp
, hold_net(sock_net(skb
->sk
)));
1309 /* Allocate table. */
1311 rcu_assign_pointer(dp
->table
, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
));
1315 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
1316 if (!dp
->stats_percpu
) {
1318 goto err_destroy_table
;
1321 dp
->ports
= kmalloc(DP_VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
1325 goto err_destroy_percpu
;
1328 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++)
1329 INIT_HLIST_HEAD(&dp
->ports
[i
]);
1331 /* Set up our datapath device. */
1332 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1333 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1334 parms
.options
= NULL
;
1336 parms
.port_no
= OVSP_LOCAL
;
1337 parms
.upcall_portid
= nla_get_u32(a
[OVS_DP_ATTR_UPCALL_PID
]);
1339 vport
= new_vport(&parms
);
1340 if (IS_ERR(vport
)) {
1341 err
= PTR_ERR(vport
);
1345 goto err_destroy_ports_array
;
1348 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1349 info
->snd_seq
, OVS_DP_CMD_NEW
);
1350 err
= PTR_ERR(reply
);
1352 goto err_destroy_local_port
;
1354 ovs_net
= net_generic(ovs_dp_get_net(dp
), ovs_net_id
);
1355 list_add_tail(&dp
->list_node
, &ovs_net
->dps
);
1358 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1359 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1363 err_destroy_local_port
:
1364 ovs_dp_detach_port(ovs_vport_rtnl(dp
, OVSP_LOCAL
));
1365 err_destroy_ports_array
:
1368 free_percpu(dp
->stats_percpu
);
1370 ovs_flow_tbl_destroy(genl_dereference(dp
->table
));
1372 release_net(ovs_dp_get_net(dp
));
1380 /* Called with genl_mutex. */
1381 static void __dp_destroy(struct datapath
*dp
)
1387 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1388 struct vport
*vport
;
1389 struct hlist_node
*n
;
1391 hlist_for_each_entry_safe(vport
, n
, &dp
->ports
[i
], dp_hash_node
)
1392 if (vport
->port_no
!= OVSP_LOCAL
)
1393 ovs_dp_detach_port(vport
);
1396 list_del(&dp
->list_node
);
1397 ovs_dp_detach_port(ovs_vport_rtnl(dp
, OVSP_LOCAL
));
1399 /* rtnl_unlock() will wait until all the references to devices that
1400 * are pending unregistration have been dropped. We do it here to
1401 * ensure that any internal devices (which contain DP pointers) are
1402 * fully destroyed before freeing the datapath.
1406 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1409 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1411 struct sk_buff
*reply
;
1412 struct datapath
*dp
;
1415 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1420 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1421 info
->snd_seq
, OVS_DP_CMD_DEL
);
1422 err
= PTR_ERR(reply
);
1428 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1429 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1435 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1437 struct sk_buff
*reply
;
1438 struct datapath
*dp
;
1441 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1445 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1446 info
->snd_seq
, OVS_DP_CMD_NEW
);
1447 if (IS_ERR(reply
)) {
1448 err
= PTR_ERR(reply
);
1449 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
1450 ovs_dp_datapath_multicast_group
.id
, err
);
1454 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1455 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1461 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1463 struct sk_buff
*reply
;
1464 struct datapath
*dp
;
1466 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1470 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1471 info
->snd_seq
, OVS_DP_CMD_NEW
);
1473 return PTR_ERR(reply
);
1475 return genlmsg_reply(reply
, info
);
1478 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1480 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1481 struct datapath
*dp
;
1482 int skip
= cb
->args
[0];
1485 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
1487 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).portid
,
1488 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1489 OVS_DP_CMD_NEW
) < 0)
1499 static struct genl_ops dp_datapath_genl_ops
[] = {
1500 { .cmd
= OVS_DP_CMD_NEW
,
1501 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1502 .policy
= datapath_policy
,
1503 .doit
= ovs_dp_cmd_new
1505 { .cmd
= OVS_DP_CMD_DEL
,
1506 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1507 .policy
= datapath_policy
,
1508 .doit
= ovs_dp_cmd_del
1510 { .cmd
= OVS_DP_CMD_GET
,
1511 .flags
= 0, /* OK for unprivileged users. */
1512 .policy
= datapath_policy
,
1513 .doit
= ovs_dp_cmd_get
,
1514 .dumpit
= ovs_dp_cmd_dump
1516 { .cmd
= OVS_DP_CMD_SET
,
1517 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1518 .policy
= datapath_policy
,
1519 .doit
= ovs_dp_cmd_set
,
1523 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
1524 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1525 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
1526 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
1527 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
1528 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1529 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
1532 static struct genl_family dp_vport_genl_family
= {
1533 .id
= GENL_ID_GENERATE
,
1534 .hdrsize
= sizeof(struct ovs_header
),
1535 .name
= OVS_VPORT_FAMILY
,
1536 .version
= OVS_VPORT_VERSION
,
1537 .maxattr
= OVS_VPORT_ATTR_MAX
,
1541 struct genl_multicast_group ovs_dp_vport_multicast_group
= {
1542 .name
= OVS_VPORT_MCGROUP
1545 /* Called with RTNL lock or RCU read lock. */
1546 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1547 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1549 struct ovs_header
*ovs_header
;
1550 struct ovs_vport_stats vport_stats
;
1553 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_vport_genl_family
,
1558 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1560 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1561 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1562 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
, vport
->ops
->get_name(vport
)) ||
1563 nla_put_u32(skb
, OVS_VPORT_ATTR_UPCALL_PID
, vport
->upcall_portid
))
1564 goto nla_put_failure
;
1566 ovs_vport_get_stats(vport
, &vport_stats
);
1567 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1569 goto nla_put_failure
;
1571 err
= ovs_vport_get_options(vport
, skb
);
1572 if (err
== -EMSGSIZE
)
1575 return genlmsg_end(skb
, ovs_header
);
1580 genlmsg_cancel(skb
, ovs_header
);
1584 /* Called with RTNL lock or RCU read lock. */
1585 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 portid
,
1588 struct sk_buff
*skb
;
1591 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1593 return ERR_PTR(-ENOMEM
);
1595 retval
= ovs_vport_cmd_fill_info(vport
, skb
, portid
, seq
, 0, cmd
);
1601 /* Called with RTNL lock or RCU read lock. */
1602 static struct vport
*lookup_vport(struct net
*net
,
1603 struct ovs_header
*ovs_header
,
1604 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1606 struct datapath
*dp
;
1607 struct vport
*vport
;
1609 if (a
[OVS_VPORT_ATTR_NAME
]) {
1610 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1612 return ERR_PTR(-ENODEV
);
1613 if (ovs_header
->dp_ifindex
&&
1614 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1615 return ERR_PTR(-ENODEV
);
1617 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1618 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1620 if (port_no
>= DP_MAX_PORTS
)
1621 return ERR_PTR(-EFBIG
);
1623 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1625 return ERR_PTR(-ENODEV
);
1627 vport
= ovs_vport_rtnl_rcu(dp
, port_no
);
1629 return ERR_PTR(-ENOENT
);
1632 return ERR_PTR(-EINVAL
);
1635 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1637 struct nlattr
**a
= info
->attrs
;
1638 struct ovs_header
*ovs_header
= info
->userhdr
;
1639 struct vport_parms parms
;
1640 struct sk_buff
*reply
;
1641 struct vport
*vport
;
1642 struct datapath
*dp
;
1647 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1648 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1652 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1657 if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1658 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1661 if (port_no
>= DP_MAX_PORTS
)
1664 vport
= ovs_vport_rtnl_rcu(dp
, port_no
);
1669 for (port_no
= 1; ; port_no
++) {
1670 if (port_no
>= DP_MAX_PORTS
) {
1674 vport
= ovs_vport_rtnl(dp
, port_no
);
1680 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1681 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1682 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1684 parms
.port_no
= port_no
;
1685 parms
.upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1687 vport
= new_vport(&parms
);
1688 err
= PTR_ERR(vport
);
1693 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1695 if (IS_ERR(reply
)) {
1696 err
= PTR_ERR(reply
);
1697 ovs_dp_detach_port(vport
);
1700 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1701 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1709 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1711 struct nlattr
**a
= info
->attrs
;
1712 struct sk_buff
*reply
;
1713 struct vport
*vport
;
1717 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1718 err
= PTR_ERR(vport
);
1723 if (a
[OVS_VPORT_ATTR_TYPE
] &&
1724 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
)
1727 reply
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1733 if (!err
&& a
[OVS_VPORT_ATTR_OPTIONS
])
1734 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
1738 if (a
[OVS_VPORT_ATTR_UPCALL_PID
])
1739 vport
->upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1741 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
1742 info
->snd_seq
, 0, OVS_VPORT_CMD_NEW
);
1745 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1746 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1758 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1760 struct nlattr
**a
= info
->attrs
;
1761 struct sk_buff
*reply
;
1762 struct vport
*vport
;
1766 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1767 err
= PTR_ERR(vport
);
1771 if (vport
->port_no
== OVSP_LOCAL
) {
1776 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1778 err
= PTR_ERR(reply
);
1783 ovs_dp_detach_port(vport
);
1785 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1786 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1793 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1795 struct nlattr
**a
= info
->attrs
;
1796 struct ovs_header
*ovs_header
= info
->userhdr
;
1797 struct sk_buff
*reply
;
1798 struct vport
*vport
;
1802 vport
= lookup_vport(sock_net(skb
->sk
), ovs_header
, a
);
1803 err
= PTR_ERR(vport
);
1807 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1809 err
= PTR_ERR(reply
);
1815 return genlmsg_reply(reply
, info
);
1822 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1824 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1825 struct datapath
*dp
;
1826 int bucket
= cb
->args
[0], skip
= cb
->args
[1];
1829 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1834 for (i
= bucket
; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1835 struct vport
*vport
;
1838 hlist_for_each_entry_rcu(vport
, &dp
->ports
[i
], dp_hash_node
) {
1840 ovs_vport_cmd_fill_info(vport
, skb
,
1841 NETLINK_CB(cb
->skb
).portid
,
1844 OVS_VPORT_CMD_NEW
) < 0)
1860 static struct genl_ops dp_vport_genl_ops
[] = {
1861 { .cmd
= OVS_VPORT_CMD_NEW
,
1862 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1863 .policy
= vport_policy
,
1864 .doit
= ovs_vport_cmd_new
1866 { .cmd
= OVS_VPORT_CMD_DEL
,
1867 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1868 .policy
= vport_policy
,
1869 .doit
= ovs_vport_cmd_del
1871 { .cmd
= OVS_VPORT_CMD_GET
,
1872 .flags
= 0, /* OK for unprivileged users. */
1873 .policy
= vport_policy
,
1874 .doit
= ovs_vport_cmd_get
,
1875 .dumpit
= ovs_vport_cmd_dump
1877 { .cmd
= OVS_VPORT_CMD_SET
,
1878 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1879 .policy
= vport_policy
,
1880 .doit
= ovs_vport_cmd_set
,
1884 struct genl_family_and_ops
{
1885 struct genl_family
*family
;
1886 struct genl_ops
*ops
;
1888 struct genl_multicast_group
*group
;
1891 static const struct genl_family_and_ops dp_genl_families
[] = {
1892 { &dp_datapath_genl_family
,
1893 dp_datapath_genl_ops
, ARRAY_SIZE(dp_datapath_genl_ops
),
1894 &ovs_dp_datapath_multicast_group
},
1895 { &dp_vport_genl_family
,
1896 dp_vport_genl_ops
, ARRAY_SIZE(dp_vport_genl_ops
),
1897 &ovs_dp_vport_multicast_group
},
1898 { &dp_flow_genl_family
,
1899 dp_flow_genl_ops
, ARRAY_SIZE(dp_flow_genl_ops
),
1900 &ovs_dp_flow_multicast_group
},
1901 { &dp_packet_genl_family
,
1902 dp_packet_genl_ops
, ARRAY_SIZE(dp_packet_genl_ops
),
1906 static void dp_unregister_genl(int n_families
)
1910 for (i
= 0; i
< n_families
; i
++)
1911 genl_unregister_family(dp_genl_families
[i
].family
);
1914 static int dp_register_genl(void)
1921 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
1922 const struct genl_family_and_ops
*f
= &dp_genl_families
[i
];
1924 err
= genl_register_family_with_ops(f
->family
, f
->ops
,
1931 err
= genl_register_mc_group(f
->family
, f
->group
);
1940 dp_unregister_genl(n_registered
);
1944 static void rehash_flow_table(struct work_struct
*work
)
1946 struct datapath
*dp
;
1952 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1954 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
1955 struct flow_table
*old_table
= genl_dereference(dp
->table
);
1956 struct flow_table
*new_table
;
1958 new_table
= ovs_flow_tbl_rehash(old_table
);
1959 if (!IS_ERR(new_table
)) {
1960 rcu_assign_pointer(dp
->table
, new_table
);
1961 ovs_flow_tbl_deferred_destroy(old_table
);
1968 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
1971 static int __net_init
ovs_init_net(struct net
*net
)
1973 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1975 INIT_LIST_HEAD(&ovs_net
->dps
);
1979 static void __net_exit
ovs_exit_net(struct net
*net
)
1981 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1982 struct datapath
*dp
, *dp_next
;
1985 list_for_each_entry_safe(dp
, dp_next
, &ovs_net
->dps
, list_node
)
1990 static struct pernet_operations ovs_net_ops
= {
1991 .init
= ovs_init_net
,
1992 .exit
= ovs_exit_net
,
1994 .size
= sizeof(struct ovs_net
),
1997 static int __init
dp_init(void)
2001 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2003 pr_info("Open vSwitch switching datapath\n");
2005 err
= ovs_flow_init();
2009 err
= ovs_vport_init();
2011 goto error_flow_exit
;
2013 err
= register_pernet_device(&ovs_net_ops
);
2015 goto error_vport_exit
;
2017 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
2019 goto error_netns_exit
;
2021 err
= dp_register_genl();
2023 goto error_unreg_notifier
;
2025 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
2029 error_unreg_notifier
:
2030 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2032 unregister_pernet_device(&ovs_net_ops
);
2041 static void dp_cleanup(void)
2043 cancel_delayed_work_sync(&rehash_flow_wq
);
2044 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
2045 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2046 unregister_pernet_device(&ovs_net_ops
);
2052 module_init(dp_init
);
2053 module_exit(dp_cleanup
);
2055 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2056 MODULE_LICENSE("GPL");