2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <linux/workqueue.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
57 #include "vport-internal_dev.h"
60 * struct ovs_net - Per net-namespace data for ovs.
61 * @dps: List of datapaths to enable dumping them all out.
62 * Protected by genl_mutex.
68 static int ovs_net_id __read_mostly
;
70 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
71 static void rehash_flow_table(struct work_struct
*work
);
72 static DECLARE_DELAYED_WORK(rehash_flow_wq
, rehash_flow_table
);
77 * Writes to device state (add/remove datapath, port, set operations on vports,
78 * etc.) are protected by RTNL.
80 * Writes to other state (flow table modifications, set miscellaneous datapath
81 * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
84 * Reads are protected by RCU.
86 * There are a few special cases (mostly stats) that have their own
87 * synchronization but they nest under all of above and don't interact with
91 static struct vport
*new_vport(const struct vport_parms
*);
92 static int queue_gso_packets(struct net
*, int dp_ifindex
, struct sk_buff
*,
93 const struct dp_upcall_info
*);
94 static int queue_userspace_packet(struct net
*, int dp_ifindex
,
96 const struct dp_upcall_info
*);
98 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
99 static struct datapath
*get_dp(struct net
*net
, int dp_ifindex
)
101 struct datapath
*dp
= NULL
;
102 struct net_device
*dev
;
105 dev
= dev_get_by_index_rcu(net
, dp_ifindex
);
107 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *ovs_dp_name(const struct datapath
*dp
)
119 struct vport
*vport
= ovs_vport_rtnl_rcu(dp
, OVSP_LOCAL
);
120 return vport
->ops
->get_name(vport
);
123 static int get_dpifindex(struct datapath
*dp
)
130 local
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
132 ifindex
= local
->ops
->get_ifindex(local
);
141 static void destroy_dp_rcu(struct rcu_head
*rcu
)
143 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
145 ovs_flow_tbl_destroy((__force
struct flow_table
*)dp
->table
);
146 free_percpu(dp
->stats_percpu
);
147 release_net(ovs_dp_get_net(dp
));
152 static struct hlist_head
*vport_hash_bucket(const struct datapath
*dp
,
155 return &dp
->ports
[port_no
& (DP_VPORT_HASH_BUCKETS
- 1)];
158 struct vport
*ovs_lookup_vport(const struct datapath
*dp
, u16 port_no
)
161 struct hlist_node
*n
;
162 struct hlist_head
*head
;
164 head
= vport_hash_bucket(dp
, port_no
);
165 hlist_for_each_entry_rcu(vport
, n
, head
, dp_hash_node
) {
166 if (vport
->port_no
== port_no
)
172 /* Called with RTNL lock and genl_lock. */
173 static struct vport
*new_vport(const struct vport_parms
*parms
)
177 vport
= ovs_vport_add(parms
);
178 if (!IS_ERR(vport
)) {
179 struct datapath
*dp
= parms
->dp
;
180 struct hlist_head
*head
= vport_hash_bucket(dp
, vport
->port_no
);
182 hlist_add_head_rcu(&vport
->dp_hash_node
, head
);
188 /* Called with RTNL lock. */
189 void ovs_dp_detach_port(struct vport
*p
)
193 /* First drop references to device. */
194 hlist_del_rcu(&p
->dp_hash_node
);
196 /* Then destroy it. */
200 /* Must be called with rcu_read_lock. */
201 void ovs_dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
203 struct datapath
*dp
= p
->dp
;
204 struct sw_flow
*flow
;
205 struct dp_stats_percpu
*stats
;
206 struct sw_flow_key key
;
211 stats
= per_cpu_ptr(dp
->stats_percpu
, smp_processor_id());
213 /* Extract flow from 'skb' into 'key'. */
214 error
= ovs_flow_extract(skb
, p
->port_no
, &key
, &key_len
);
215 if (unlikely(error
)) {
221 flow
= ovs_flow_tbl_lookup(rcu_dereference(dp
->table
), &key
, key_len
);
222 if (unlikely(!flow
)) {
223 struct dp_upcall_info upcall
;
225 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
227 upcall
.userdata
= NULL
;
228 upcall
.portid
= p
->upcall_portid
;
229 ovs_dp_upcall(dp
, skb
, &upcall
);
231 stats_counter
= &stats
->n_missed
;
235 OVS_CB(skb
)->flow
= flow
;
237 stats_counter
= &stats
->n_hit
;
238 ovs_flow_used(OVS_CB(skb
)->flow
, skb
);
239 ovs_execute_actions(dp
, skb
);
242 /* Update datapath statistics. */
243 u64_stats_update_begin(&stats
->sync
);
245 u64_stats_update_end(&stats
->sync
);
248 static struct genl_family dp_packet_genl_family
= {
249 .id
= GENL_ID_GENERATE
,
250 .hdrsize
= sizeof(struct ovs_header
),
251 .name
= OVS_PACKET_FAMILY
,
252 .version
= OVS_PACKET_VERSION
,
253 .maxattr
= OVS_PACKET_ATTR_MAX
,
257 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
258 const struct dp_upcall_info
*upcall_info
)
260 struct dp_stats_percpu
*stats
;
264 if (upcall_info
->portid
== 0) {
269 dp_ifindex
= get_dpifindex(dp
);
275 if (!skb_is_gso(skb
))
276 err
= queue_userspace_packet(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
278 err
= queue_gso_packets(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
285 stats
= per_cpu_ptr(dp
->stats_percpu
, smp_processor_id());
287 u64_stats_update_begin(&stats
->sync
);
289 u64_stats_update_end(&stats
->sync
);
294 static int queue_gso_packets(struct net
*net
, int dp_ifindex
,
296 const struct dp_upcall_info
*upcall_info
)
298 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
299 struct dp_upcall_info later_info
;
300 struct sw_flow_key later_key
;
301 struct sk_buff
*segs
, *nskb
;
304 segs
= skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
);
306 return PTR_ERR(segs
);
308 /* Queue all of the segments. */
311 err
= queue_userspace_packet(net
, dp_ifindex
, skb
, upcall_info
);
315 if (skb
== segs
&& gso_type
& SKB_GSO_UDP
) {
316 /* The initial flow key extracted by ovs_flow_extract()
317 * in this case is for a first fragment, so we need to
318 * properly mark later fragments.
320 later_key
= *upcall_info
->key
;
321 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
323 later_info
= *upcall_info
;
324 later_info
.key
= &later_key
;
325 upcall_info
= &later_info
;
327 } while ((skb
= skb
->next
));
329 /* Free all of the segments. */
337 } while ((skb
= nskb
));
341 static int queue_userspace_packet(struct net
*net
, int dp_ifindex
,
343 const struct dp_upcall_info
*upcall_info
)
345 struct ovs_header
*upcall
;
346 struct sk_buff
*nskb
= NULL
;
347 struct sk_buff
*user_skb
; /* to be queued to userspace */
352 if (vlan_tx_tag_present(skb
)) {
353 nskb
= skb_clone(skb
, GFP_ATOMIC
);
357 nskb
= __vlan_put_tag(nskb
, vlan_tx_tag_get(nskb
));
365 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
370 len
= sizeof(struct ovs_header
);
371 len
+= nla_total_size(skb
->len
);
372 len
+= nla_total_size(FLOW_BUFSIZE
);
373 if (upcall_info
->cmd
== OVS_PACKET_CMD_ACTION
)
374 len
+= nla_total_size(8);
376 user_skb
= genlmsg_new(len
, GFP_ATOMIC
);
382 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
383 0, upcall_info
->cmd
);
384 upcall
->dp_ifindex
= dp_ifindex
;
386 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_KEY
);
387 ovs_flow_to_nlattrs(upcall_info
->key
, user_skb
);
388 nla_nest_end(user_skb
, nla
);
390 if (upcall_info
->userdata
)
391 nla_put_u64(user_skb
, OVS_PACKET_ATTR_USERDATA
,
392 nla_get_u64(upcall_info
->userdata
));
394 nla
= __nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, skb
->len
);
396 skb_copy_and_csum_dev(skb
, nla_data(nla
));
398 err
= genlmsg_unicast(net
, user_skb
, upcall_info
->portid
);
405 /* Called with genl_mutex. */
406 static int flush_flows(struct datapath
*dp
)
408 struct flow_table
*old_table
;
409 struct flow_table
*new_table
;
411 old_table
= genl_dereference(dp
->table
);
412 new_table
= ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
);
416 rcu_assign_pointer(dp
->table
, new_table
);
418 ovs_flow_tbl_deferred_destroy(old_table
);
422 static int validate_actions(const struct nlattr
*attr
,
423 const struct sw_flow_key
*key
, int depth
);
425 static int validate_sample(const struct nlattr
*attr
,
426 const struct sw_flow_key
*key
, int depth
)
428 const struct nlattr
*attrs
[OVS_SAMPLE_ATTR_MAX
+ 1];
429 const struct nlattr
*probability
, *actions
;
430 const struct nlattr
*a
;
433 memset(attrs
, 0, sizeof(attrs
));
434 nla_for_each_nested(a
, attr
, rem
) {
435 int type
= nla_type(a
);
436 if (!type
|| type
> OVS_SAMPLE_ATTR_MAX
|| attrs
[type
])
443 probability
= attrs
[OVS_SAMPLE_ATTR_PROBABILITY
];
444 if (!probability
|| nla_len(probability
) != sizeof(u32
))
447 actions
= attrs
[OVS_SAMPLE_ATTR_ACTIONS
];
448 if (!actions
|| (nla_len(actions
) && nla_len(actions
) < NLA_HDRLEN
))
450 return validate_actions(actions
, key
, depth
+ 1);
453 static int validate_tp_port(const struct sw_flow_key
*flow_key
)
455 if (flow_key
->eth
.type
== htons(ETH_P_IP
)) {
456 if (flow_key
->ipv4
.tp
.src
|| flow_key
->ipv4
.tp
.dst
)
458 } else if (flow_key
->eth
.type
== htons(ETH_P_IPV6
)) {
459 if (flow_key
->ipv6
.tp
.src
|| flow_key
->ipv6
.tp
.dst
)
466 static int validate_set(const struct nlattr
*a
,
467 const struct sw_flow_key
*flow_key
)
469 const struct nlattr
*ovs_key
= nla_data(a
);
470 int key_type
= nla_type(ovs_key
);
472 /* There can be only one key in a action */
473 if (nla_total_size(nla_len(ovs_key
)) != nla_len(a
))
476 if (key_type
> OVS_KEY_ATTR_MAX
||
477 nla_len(ovs_key
) != ovs_key_lens
[key_type
])
481 const struct ovs_key_ipv4
*ipv4_key
;
483 case OVS_KEY_ATTR_PRIORITY
:
484 case OVS_KEY_ATTR_ETHERNET
:
487 case OVS_KEY_ATTR_IPV4
:
488 if (flow_key
->eth
.type
!= htons(ETH_P_IP
))
491 if (!flow_key
->ip
.proto
)
494 ipv4_key
= nla_data(ovs_key
);
495 if (ipv4_key
->ipv4_proto
!= flow_key
->ip
.proto
)
498 if (ipv4_key
->ipv4_frag
!= flow_key
->ip
.frag
)
503 case OVS_KEY_ATTR_TCP
:
504 if (flow_key
->ip
.proto
!= IPPROTO_TCP
)
507 return validate_tp_port(flow_key
);
509 case OVS_KEY_ATTR_UDP
:
510 if (flow_key
->ip
.proto
!= IPPROTO_UDP
)
513 return validate_tp_port(flow_key
);
522 static int validate_userspace(const struct nlattr
*attr
)
524 static const struct nla_policy userspace_policy
[OVS_USERSPACE_ATTR_MAX
+ 1] = {
525 [OVS_USERSPACE_ATTR_PID
] = {.type
= NLA_U32
},
526 [OVS_USERSPACE_ATTR_USERDATA
] = {.type
= NLA_U64
},
528 struct nlattr
*a
[OVS_USERSPACE_ATTR_MAX
+ 1];
531 error
= nla_parse_nested(a
, OVS_USERSPACE_ATTR_MAX
,
532 attr
, userspace_policy
);
536 if (!a
[OVS_USERSPACE_ATTR_PID
] ||
537 !nla_get_u32(a
[OVS_USERSPACE_ATTR_PID
]))
543 static int validate_actions(const struct nlattr
*attr
,
544 const struct sw_flow_key
*key
, int depth
)
546 const struct nlattr
*a
;
549 if (depth
>= SAMPLE_ACTION_DEPTH
)
552 nla_for_each_nested(a
, attr
, rem
) {
553 /* Expected argument lengths, (u32)-1 for variable length. */
554 static const u32 action_lens
[OVS_ACTION_ATTR_MAX
+ 1] = {
555 [OVS_ACTION_ATTR_OUTPUT
] = sizeof(u32
),
556 [OVS_ACTION_ATTR_USERSPACE
] = (u32
)-1,
557 [OVS_ACTION_ATTR_PUSH_VLAN
] = sizeof(struct ovs_action_push_vlan
),
558 [OVS_ACTION_ATTR_POP_VLAN
] = 0,
559 [OVS_ACTION_ATTR_SET
] = (u32
)-1,
560 [OVS_ACTION_ATTR_SAMPLE
] = (u32
)-1
562 const struct ovs_action_push_vlan
*vlan
;
563 int type
= nla_type(a
);
565 if (type
> OVS_ACTION_ATTR_MAX
||
566 (action_lens
[type
] != nla_len(a
) &&
567 action_lens
[type
] != (u32
)-1))
571 case OVS_ACTION_ATTR_UNSPEC
:
574 case OVS_ACTION_ATTR_USERSPACE
:
575 err
= validate_userspace(a
);
580 case OVS_ACTION_ATTR_OUTPUT
:
581 if (nla_get_u32(a
) >= DP_MAX_PORTS
)
586 case OVS_ACTION_ATTR_POP_VLAN
:
589 case OVS_ACTION_ATTR_PUSH_VLAN
:
591 if (vlan
->vlan_tpid
!= htons(ETH_P_8021Q
))
593 if (!(vlan
->vlan_tci
& htons(VLAN_TAG_PRESENT
)))
597 case OVS_ACTION_ATTR_SET
:
598 err
= validate_set(a
, key
);
603 case OVS_ACTION_ATTR_SAMPLE
:
604 err
= validate_sample(a
, key
, depth
);
620 static void clear_stats(struct sw_flow
*flow
)
624 flow
->packet_count
= 0;
625 flow
->byte_count
= 0;
628 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
630 struct ovs_header
*ovs_header
= info
->userhdr
;
631 struct nlattr
**a
= info
->attrs
;
632 struct sw_flow_actions
*acts
;
633 struct sk_buff
*packet
;
634 struct sw_flow
*flow
;
642 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
643 !a
[OVS_PACKET_ATTR_ACTIONS
] ||
644 nla_len(a
[OVS_PACKET_ATTR_PACKET
]) < ETH_HLEN
)
647 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
648 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
652 skb_reserve(packet
, NET_IP_ALIGN
);
654 memcpy(__skb_put(packet
, len
), nla_data(a
[OVS_PACKET_ATTR_PACKET
]), len
);
656 skb_reset_mac_header(packet
);
657 eth
= eth_hdr(packet
);
659 /* Normally, setting the skb 'protocol' field would be handled by a
660 * call to eth_type_trans(), but it assumes there's a sending
661 * device, which we may not have. */
662 if (ntohs(eth
->h_proto
) >= 1536)
663 packet
->protocol
= eth
->h_proto
;
665 packet
->protocol
= htons(ETH_P_802_2
);
667 /* Build an sw_flow for sending this packet. */
668 flow
= ovs_flow_alloc();
673 err
= ovs_flow_extract(packet
, -1, &flow
->key
, &key_len
);
677 err
= ovs_flow_metadata_from_nlattrs(&flow
->key
.phy
.priority
,
678 &flow
->key
.phy
.in_port
,
679 a
[OVS_PACKET_ATTR_KEY
]);
683 err
= validate_actions(a
[OVS_PACKET_ATTR_ACTIONS
], &flow
->key
, 0);
687 flow
->hash
= ovs_flow_hash(&flow
->key
, key_len
);
689 acts
= ovs_flow_actions_alloc(a
[OVS_PACKET_ATTR_ACTIONS
]);
693 rcu_assign_pointer(flow
->sf_acts
, acts
);
695 OVS_CB(packet
)->flow
= flow
;
696 packet
->priority
= flow
->key
.phy
.priority
;
699 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
705 err
= ovs_execute_actions(dp
, packet
);
722 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
723 [OVS_PACKET_ATTR_PACKET
] = { .type
= NLA_UNSPEC
},
724 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
725 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
728 static struct genl_ops dp_packet_genl_ops
[] = {
729 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
730 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
731 .policy
= packet_policy
,
732 .doit
= ovs_packet_cmd_execute
736 static void get_dp_stats(struct datapath
*dp
, struct ovs_dp_stats
*stats
)
739 struct flow_table
*table
= genl_dereference(dp
->table
);
741 stats
->n_flows
= ovs_flow_tbl_count(table
);
743 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
744 for_each_possible_cpu(i
) {
745 const struct dp_stats_percpu
*percpu_stats
;
746 struct dp_stats_percpu local_stats
;
749 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
752 start
= u64_stats_fetch_begin_bh(&percpu_stats
->sync
);
753 local_stats
= *percpu_stats
;
754 } while (u64_stats_fetch_retry_bh(&percpu_stats
->sync
, start
));
756 stats
->n_hit
+= local_stats
.n_hit
;
757 stats
->n_missed
+= local_stats
.n_missed
;
758 stats
->n_lost
+= local_stats
.n_lost
;
762 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
763 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
764 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
765 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
768 static struct genl_family dp_flow_genl_family
= {
769 .id
= GENL_ID_GENERATE
,
770 .hdrsize
= sizeof(struct ovs_header
),
771 .name
= OVS_FLOW_FAMILY
,
772 .version
= OVS_FLOW_VERSION
,
773 .maxattr
= OVS_FLOW_ATTR_MAX
,
777 static struct genl_multicast_group ovs_dp_flow_multicast_group
= {
778 .name
= OVS_FLOW_MCGROUP
781 /* Called with genl_lock. */
782 static int ovs_flow_cmd_fill_info(struct sw_flow
*flow
, struct datapath
*dp
,
783 struct sk_buff
*skb
, u32 portid
,
784 u32 seq
, u32 flags
, u8 cmd
)
786 const int skb_orig_len
= skb
->len
;
787 const struct sw_flow_actions
*sf_acts
;
788 struct ovs_flow_stats stats
;
789 struct ovs_header
*ovs_header
;
795 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
796 lockdep_genl_is_held());
798 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_flow_genl_family
, flags
, cmd
);
802 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
804 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_KEY
);
806 goto nla_put_failure
;
807 err
= ovs_flow_to_nlattrs(&flow
->key
, skb
);
810 nla_nest_end(skb
, nla
);
812 spin_lock_bh(&flow
->lock
);
814 stats
.n_packets
= flow
->packet_count
;
815 stats
.n_bytes
= flow
->byte_count
;
816 tcp_flags
= flow
->tcp_flags
;
817 spin_unlock_bh(&flow
->lock
);
820 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
821 goto nla_put_failure
;
823 if (stats
.n_packets
&&
824 nla_put(skb
, OVS_FLOW_ATTR_STATS
,
825 sizeof(struct ovs_flow_stats
), &stats
))
826 goto nla_put_failure
;
829 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, tcp_flags
))
830 goto nla_put_failure
;
832 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
833 * this is the first flow to be dumped into 'skb'. This is unusual for
834 * Netlink but individual action lists can be longer than
835 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
836 * The userspace caller can always fetch the actions separately if it
837 * really wants them. (Most userspace callers in fact don't care.)
839 * This can only fail for dump operations because the skb is always
840 * properly sized for single flows.
842 err
= nla_put(skb
, OVS_FLOW_ATTR_ACTIONS
, sf_acts
->actions_len
,
844 if (err
< 0 && skb_orig_len
)
847 return genlmsg_end(skb
, ovs_header
);
852 genlmsg_cancel(skb
, ovs_header
);
856 static struct sk_buff
*ovs_flow_cmd_alloc_info(struct sw_flow
*flow
)
858 const struct sw_flow_actions
*sf_acts
;
861 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
862 lockdep_genl_is_held());
864 /* OVS_FLOW_ATTR_KEY */
865 len
= nla_total_size(FLOW_BUFSIZE
);
866 /* OVS_FLOW_ATTR_ACTIONS */
867 len
+= nla_total_size(sf_acts
->actions_len
);
868 /* OVS_FLOW_ATTR_STATS */
869 len
+= nla_total_size(sizeof(struct ovs_flow_stats
));
870 /* OVS_FLOW_ATTR_TCP_FLAGS */
871 len
+= nla_total_size(1);
872 /* OVS_FLOW_ATTR_USED */
873 len
+= nla_total_size(8);
875 len
+= NLMSG_ALIGN(sizeof(struct ovs_header
));
877 return genlmsg_new(len
, GFP_KERNEL
);
880 static struct sk_buff
*ovs_flow_cmd_build_info(struct sw_flow
*flow
,
882 u32 portid
, u32 seq
, u8 cmd
)
887 skb
= ovs_flow_cmd_alloc_info(flow
);
889 return ERR_PTR(-ENOMEM
);
891 retval
= ovs_flow_cmd_fill_info(flow
, dp
, skb
, portid
, seq
, 0, cmd
);
896 static int ovs_flow_cmd_new_or_set(struct sk_buff
*skb
, struct genl_info
*info
)
898 struct nlattr
**a
= info
->attrs
;
899 struct ovs_header
*ovs_header
= info
->userhdr
;
900 struct sw_flow_key key
;
901 struct sw_flow
*flow
;
902 struct sk_buff
*reply
;
904 struct flow_table
*table
;
910 if (!a
[OVS_FLOW_ATTR_KEY
])
912 error
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
916 /* Validate actions. */
917 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
918 error
= validate_actions(a
[OVS_FLOW_ATTR_ACTIONS
], &key
, 0);
921 } else if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
) {
926 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
931 table
= genl_dereference(dp
->table
);
932 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
934 struct sw_flow_actions
*acts
;
936 /* Bail out if we're not allowed to create a new flow. */
938 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_SET
)
941 /* Expand table, if necessary, to make room. */
942 if (ovs_flow_tbl_need_to_expand(table
)) {
943 struct flow_table
*new_table
;
945 new_table
= ovs_flow_tbl_expand(table
);
946 if (!IS_ERR(new_table
)) {
947 rcu_assign_pointer(dp
->table
, new_table
);
948 ovs_flow_tbl_deferred_destroy(table
);
949 table
= genl_dereference(dp
->table
);
954 flow
= ovs_flow_alloc();
956 error
= PTR_ERR(flow
);
962 /* Obtain actions. */
963 acts
= ovs_flow_actions_alloc(a
[OVS_FLOW_ATTR_ACTIONS
]);
964 error
= PTR_ERR(acts
);
966 goto error_free_flow
;
967 rcu_assign_pointer(flow
->sf_acts
, acts
);
969 /* Put flow in bucket. */
970 flow
->hash
= ovs_flow_hash(&key
, key_len
);
971 ovs_flow_tbl_insert(table
, flow
);
973 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
977 /* We found a matching flow. */
978 struct sw_flow_actions
*old_acts
;
979 struct nlattr
*acts_attrs
;
981 /* Bail out if we're not allowed to modify an existing flow.
982 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
983 * because Generic Netlink treats the latter as a dump
984 * request. We also accept NLM_F_EXCL in case that bug ever
988 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
&&
989 info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
| NLM_F_EXCL
))
992 /* Update actions. */
993 old_acts
= rcu_dereference_protected(flow
->sf_acts
,
994 lockdep_genl_is_held());
995 acts_attrs
= a
[OVS_FLOW_ATTR_ACTIONS
];
997 (old_acts
->actions_len
!= nla_len(acts_attrs
) ||
998 memcmp(old_acts
->actions
, nla_data(acts_attrs
),
999 old_acts
->actions_len
))) {
1000 struct sw_flow_actions
*new_acts
;
1002 new_acts
= ovs_flow_actions_alloc(acts_attrs
);
1003 error
= PTR_ERR(new_acts
);
1004 if (IS_ERR(new_acts
))
1007 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
1008 ovs_flow_deferred_free_acts(old_acts
);
1011 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1012 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1015 if (a
[OVS_FLOW_ATTR_CLEAR
]) {
1016 spin_lock_bh(&flow
->lock
);
1018 spin_unlock_bh(&flow
->lock
);
1023 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1024 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
,
1027 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
1028 ovs_dp_flow_multicast_group
.id
, PTR_ERR(reply
));
1032 ovs_flow_free(flow
);
1037 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1039 struct nlattr
**a
= info
->attrs
;
1040 struct ovs_header
*ovs_header
= info
->userhdr
;
1041 struct sw_flow_key key
;
1042 struct sk_buff
*reply
;
1043 struct sw_flow
*flow
;
1044 struct datapath
*dp
;
1045 struct flow_table
*table
;
1049 if (!a
[OVS_FLOW_ATTR_KEY
])
1051 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1055 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1059 table
= genl_dereference(dp
->table
);
1060 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1064 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1065 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1067 return PTR_ERR(reply
);
1069 return genlmsg_reply(reply
, info
);
1072 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1074 struct nlattr
**a
= info
->attrs
;
1075 struct ovs_header
*ovs_header
= info
->userhdr
;
1076 struct sw_flow_key key
;
1077 struct sk_buff
*reply
;
1078 struct sw_flow
*flow
;
1079 struct datapath
*dp
;
1080 struct flow_table
*table
;
1084 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1088 if (!a
[OVS_FLOW_ATTR_KEY
])
1089 return flush_flows(dp
);
1091 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1095 table
= genl_dereference(dp
->table
);
1096 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1100 reply
= ovs_flow_cmd_alloc_info(flow
);
1104 ovs_flow_tbl_remove(table
, flow
);
1106 err
= ovs_flow_cmd_fill_info(flow
, dp
, reply
, info
->snd_portid
,
1107 info
->snd_seq
, 0, OVS_FLOW_CMD_DEL
);
1110 ovs_flow_deferred_free(flow
);
1112 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1113 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1117 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1119 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1120 struct datapath
*dp
;
1121 struct flow_table
*table
;
1123 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1127 table
= genl_dereference(dp
->table
);
1130 struct sw_flow
*flow
;
1133 bucket
= cb
->args
[0];
1135 flow
= ovs_flow_tbl_next(table
, &bucket
, &obj
);
1139 if (ovs_flow_cmd_fill_info(flow
, dp
, skb
,
1140 NETLINK_CB(cb
->skb
).portid
,
1141 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1142 OVS_FLOW_CMD_NEW
) < 0)
1145 cb
->args
[0] = bucket
;
1151 static struct genl_ops dp_flow_genl_ops
[] = {
1152 { .cmd
= OVS_FLOW_CMD_NEW
,
1153 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1154 .policy
= flow_policy
,
1155 .doit
= ovs_flow_cmd_new_or_set
1157 { .cmd
= OVS_FLOW_CMD_DEL
,
1158 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1159 .policy
= flow_policy
,
1160 .doit
= ovs_flow_cmd_del
1162 { .cmd
= OVS_FLOW_CMD_GET
,
1163 .flags
= 0, /* OK for unprivileged users. */
1164 .policy
= flow_policy
,
1165 .doit
= ovs_flow_cmd_get
,
1166 .dumpit
= ovs_flow_cmd_dump
1168 { .cmd
= OVS_FLOW_CMD_SET
,
1169 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1170 .policy
= flow_policy
,
1171 .doit
= ovs_flow_cmd_new_or_set
,
1175 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1176 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1177 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1180 static struct genl_family dp_datapath_genl_family
= {
1181 .id
= GENL_ID_GENERATE
,
1182 .hdrsize
= sizeof(struct ovs_header
),
1183 .name
= OVS_DATAPATH_FAMILY
,
1184 .version
= OVS_DATAPATH_VERSION
,
1185 .maxattr
= OVS_DP_ATTR_MAX
,
1189 static struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
1190 .name
= OVS_DATAPATH_MCGROUP
1193 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1194 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1196 struct ovs_header
*ovs_header
;
1197 struct ovs_dp_stats dp_stats
;
1200 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_datapath_genl_family
,
1205 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1208 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1211 goto nla_put_failure
;
1213 get_dp_stats(dp
, &dp_stats
);
1214 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
), &dp_stats
))
1215 goto nla_put_failure
;
1217 return genlmsg_end(skb
, ovs_header
);
1220 genlmsg_cancel(skb
, ovs_header
);
1225 static struct sk_buff
*ovs_dp_cmd_build_info(struct datapath
*dp
, u32 portid
,
1228 struct sk_buff
*skb
;
1231 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1233 return ERR_PTR(-ENOMEM
);
1235 retval
= ovs_dp_cmd_fill_info(dp
, skb
, portid
, seq
, 0, cmd
);
1238 return ERR_PTR(retval
);
1243 /* Called with genl_mutex and optionally with RTNL lock also. */
1244 static struct datapath
*lookup_datapath(struct net
*net
,
1245 struct ovs_header
*ovs_header
,
1246 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1248 struct datapath
*dp
;
1250 if (!a
[OVS_DP_ATTR_NAME
])
1251 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1253 struct vport
*vport
;
1256 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_DP_ATTR_NAME
]));
1257 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1260 return dp
? dp
: ERR_PTR(-ENODEV
);
1263 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1265 struct nlattr
**a
= info
->attrs
;
1266 struct vport_parms parms
;
1267 struct sk_buff
*reply
;
1268 struct datapath
*dp
;
1269 struct vport
*vport
;
1270 struct ovs_net
*ovs_net
;
1274 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1280 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1282 goto err_unlock_rtnl
;
1284 ovs_dp_set_net(dp
, hold_net(sock_net(skb
->sk
)));
1286 /* Allocate table. */
1288 rcu_assign_pointer(dp
->table
, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
));
1292 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
1293 if (!dp
->stats_percpu
) {
1295 goto err_destroy_table
;
1298 dp
->ports
= kmalloc(DP_VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
1302 goto err_destroy_percpu
;
1305 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++)
1306 INIT_HLIST_HEAD(&dp
->ports
[i
]);
1308 /* Set up our datapath device. */
1309 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1310 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1311 parms
.options
= NULL
;
1313 parms
.port_no
= OVSP_LOCAL
;
1314 parms
.upcall_portid
= nla_get_u32(a
[OVS_DP_ATTR_UPCALL_PID
]);
1316 vport
= new_vport(&parms
);
1317 if (IS_ERR(vport
)) {
1318 err
= PTR_ERR(vport
);
1322 goto err_destroy_ports_array
;
1325 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1326 info
->snd_seq
, OVS_DP_CMD_NEW
);
1327 err
= PTR_ERR(reply
);
1329 goto err_destroy_local_port
;
1331 ovs_net
= net_generic(ovs_dp_get_net(dp
), ovs_net_id
);
1332 list_add_tail(&dp
->list_node
, &ovs_net
->dps
);
1335 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1336 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1340 err_destroy_local_port
:
1341 ovs_dp_detach_port(ovs_vport_rtnl(dp
, OVSP_LOCAL
));
1342 err_destroy_ports_array
:
1345 free_percpu(dp
->stats_percpu
);
1347 ovs_flow_tbl_destroy(genl_dereference(dp
->table
));
1349 release_net(ovs_dp_get_net(dp
));
1357 /* Called with genl_mutex. */
1358 static void __dp_destroy(struct datapath
*dp
)
1364 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1365 struct vport
*vport
;
1366 struct hlist_node
*node
, *n
;
1368 hlist_for_each_entry_safe(vport
, node
, n
, &dp
->ports
[i
], dp_hash_node
)
1369 if (vport
->port_no
!= OVSP_LOCAL
)
1370 ovs_dp_detach_port(vport
);
1373 list_del(&dp
->list_node
);
1374 ovs_dp_detach_port(ovs_vport_rtnl(dp
, OVSP_LOCAL
));
1376 /* rtnl_unlock() will wait until all the references to devices that
1377 * are pending unregistration have been dropped. We do it here to
1378 * ensure that any internal devices (which contain DP pointers) are
1379 * fully destroyed before freeing the datapath.
1383 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1386 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1388 struct sk_buff
*reply
;
1389 struct datapath
*dp
;
1392 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1397 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1398 info
->snd_seq
, OVS_DP_CMD_DEL
);
1399 err
= PTR_ERR(reply
);
1405 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1406 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1412 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1414 struct sk_buff
*reply
;
1415 struct datapath
*dp
;
1418 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1422 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1423 info
->snd_seq
, OVS_DP_CMD_NEW
);
1424 if (IS_ERR(reply
)) {
1425 err
= PTR_ERR(reply
);
1426 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
1427 ovs_dp_datapath_multicast_group
.id
, err
);
1431 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1432 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1438 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1440 struct sk_buff
*reply
;
1441 struct datapath
*dp
;
1443 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1447 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1448 info
->snd_seq
, OVS_DP_CMD_NEW
);
1450 return PTR_ERR(reply
);
1452 return genlmsg_reply(reply
, info
);
1455 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1457 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1458 struct datapath
*dp
;
1459 int skip
= cb
->args
[0];
1462 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
1464 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).portid
,
1465 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1466 OVS_DP_CMD_NEW
) < 0)
1476 static struct genl_ops dp_datapath_genl_ops
[] = {
1477 { .cmd
= OVS_DP_CMD_NEW
,
1478 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1479 .policy
= datapath_policy
,
1480 .doit
= ovs_dp_cmd_new
1482 { .cmd
= OVS_DP_CMD_DEL
,
1483 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1484 .policy
= datapath_policy
,
1485 .doit
= ovs_dp_cmd_del
1487 { .cmd
= OVS_DP_CMD_GET
,
1488 .flags
= 0, /* OK for unprivileged users. */
1489 .policy
= datapath_policy
,
1490 .doit
= ovs_dp_cmd_get
,
1491 .dumpit
= ovs_dp_cmd_dump
1493 { .cmd
= OVS_DP_CMD_SET
,
1494 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1495 .policy
= datapath_policy
,
1496 .doit
= ovs_dp_cmd_set
,
1500 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
1501 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1502 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
1503 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
1504 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
1505 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1506 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
1509 static struct genl_family dp_vport_genl_family
= {
1510 .id
= GENL_ID_GENERATE
,
1511 .hdrsize
= sizeof(struct ovs_header
),
1512 .name
= OVS_VPORT_FAMILY
,
1513 .version
= OVS_VPORT_VERSION
,
1514 .maxattr
= OVS_VPORT_ATTR_MAX
,
1518 struct genl_multicast_group ovs_dp_vport_multicast_group
= {
1519 .name
= OVS_VPORT_MCGROUP
1522 /* Called with RTNL lock or RCU read lock. */
1523 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1524 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1526 struct ovs_header
*ovs_header
;
1527 struct ovs_vport_stats vport_stats
;
1530 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_vport_genl_family
,
1535 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1537 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1538 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1539 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
, vport
->ops
->get_name(vport
)) ||
1540 nla_put_u32(skb
, OVS_VPORT_ATTR_UPCALL_PID
, vport
->upcall_portid
))
1541 goto nla_put_failure
;
1543 ovs_vport_get_stats(vport
, &vport_stats
);
1544 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1546 goto nla_put_failure
;
1548 err
= ovs_vport_get_options(vport
, skb
);
1549 if (err
== -EMSGSIZE
)
1552 return genlmsg_end(skb
, ovs_header
);
1557 genlmsg_cancel(skb
, ovs_header
);
1561 /* Called with RTNL lock or RCU read lock. */
1562 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 portid
,
1565 struct sk_buff
*skb
;
1568 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1570 return ERR_PTR(-ENOMEM
);
1572 retval
= ovs_vport_cmd_fill_info(vport
, skb
, portid
, seq
, 0, cmd
);
1575 return ERR_PTR(retval
);
1580 /* Called with RTNL lock or RCU read lock. */
1581 static struct vport
*lookup_vport(struct net
*net
,
1582 struct ovs_header
*ovs_header
,
1583 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1585 struct datapath
*dp
;
1586 struct vport
*vport
;
1588 if (a
[OVS_VPORT_ATTR_NAME
]) {
1589 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1591 return ERR_PTR(-ENODEV
);
1592 if (ovs_header
->dp_ifindex
&&
1593 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1594 return ERR_PTR(-ENODEV
);
1596 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1597 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1599 if (port_no
>= DP_MAX_PORTS
)
1600 return ERR_PTR(-EFBIG
);
1602 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1604 return ERR_PTR(-ENODEV
);
1606 vport
= ovs_vport_rtnl_rcu(dp
, port_no
);
1608 return ERR_PTR(-ENOENT
);
1611 return ERR_PTR(-EINVAL
);
1614 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1616 struct nlattr
**a
= info
->attrs
;
1617 struct ovs_header
*ovs_header
= info
->userhdr
;
1618 struct vport_parms parms
;
1619 struct sk_buff
*reply
;
1620 struct vport
*vport
;
1621 struct datapath
*dp
;
1626 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1627 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1631 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1636 if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1637 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1640 if (port_no
>= DP_MAX_PORTS
)
1643 vport
= ovs_vport_rtnl_rcu(dp
, port_no
);
1648 for (port_no
= 1; ; port_no
++) {
1649 if (port_no
>= DP_MAX_PORTS
) {
1653 vport
= ovs_vport_rtnl(dp
, port_no
);
1659 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1660 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1661 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1663 parms
.port_no
= port_no
;
1664 parms
.upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1666 vport
= new_vport(&parms
);
1667 err
= PTR_ERR(vport
);
1671 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1673 if (IS_ERR(reply
)) {
1674 err
= PTR_ERR(reply
);
1675 ovs_dp_detach_port(vport
);
1678 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1679 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1687 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1689 struct nlattr
**a
= info
->attrs
;
1690 struct sk_buff
*reply
;
1691 struct vport
*vport
;
1695 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1696 err
= PTR_ERR(vport
);
1701 if (a
[OVS_VPORT_ATTR_TYPE
] &&
1702 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
)
1705 if (!err
&& a
[OVS_VPORT_ATTR_OPTIONS
])
1706 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
1709 if (a
[OVS_VPORT_ATTR_UPCALL_PID
])
1710 vport
->upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1712 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1714 if (IS_ERR(reply
)) {
1715 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
1716 ovs_dp_vport_multicast_group
.id
, PTR_ERR(reply
));
1720 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1721 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1728 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1730 struct nlattr
**a
= info
->attrs
;
1731 struct sk_buff
*reply
;
1732 struct vport
*vport
;
1736 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1737 err
= PTR_ERR(vport
);
1741 if (vport
->port_no
== OVSP_LOCAL
) {
1746 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1748 err
= PTR_ERR(reply
);
1752 ovs_dp_detach_port(vport
);
1754 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1755 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1762 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1764 struct nlattr
**a
= info
->attrs
;
1765 struct ovs_header
*ovs_header
= info
->userhdr
;
1766 struct sk_buff
*reply
;
1767 struct vport
*vport
;
1771 vport
= lookup_vport(sock_net(skb
->sk
), ovs_header
, a
);
1772 err
= PTR_ERR(vport
);
1776 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1778 err
= PTR_ERR(reply
);
1784 return genlmsg_reply(reply
, info
);
1791 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1793 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1794 struct datapath
*dp
;
1795 int bucket
= cb
->args
[0], skip
= cb
->args
[1];
1798 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1803 for (i
= bucket
; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1804 struct vport
*vport
;
1805 struct hlist_node
*n
;
1808 hlist_for_each_entry_rcu(vport
, n
, &dp
->ports
[i
], dp_hash_node
) {
1810 ovs_vport_cmd_fill_info(vport
, skb
,
1811 NETLINK_CB(cb
->skb
).portid
,
1814 OVS_VPORT_CMD_NEW
) < 0)
1830 static struct genl_ops dp_vport_genl_ops
[] = {
1831 { .cmd
= OVS_VPORT_CMD_NEW
,
1832 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1833 .policy
= vport_policy
,
1834 .doit
= ovs_vport_cmd_new
1836 { .cmd
= OVS_VPORT_CMD_DEL
,
1837 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1838 .policy
= vport_policy
,
1839 .doit
= ovs_vport_cmd_del
1841 { .cmd
= OVS_VPORT_CMD_GET
,
1842 .flags
= 0, /* OK for unprivileged users. */
1843 .policy
= vport_policy
,
1844 .doit
= ovs_vport_cmd_get
,
1845 .dumpit
= ovs_vport_cmd_dump
1847 { .cmd
= OVS_VPORT_CMD_SET
,
1848 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1849 .policy
= vport_policy
,
1850 .doit
= ovs_vport_cmd_set
,
1854 struct genl_family_and_ops
{
1855 struct genl_family
*family
;
1856 struct genl_ops
*ops
;
1858 struct genl_multicast_group
*group
;
1861 static const struct genl_family_and_ops dp_genl_families
[] = {
1862 { &dp_datapath_genl_family
,
1863 dp_datapath_genl_ops
, ARRAY_SIZE(dp_datapath_genl_ops
),
1864 &ovs_dp_datapath_multicast_group
},
1865 { &dp_vport_genl_family
,
1866 dp_vport_genl_ops
, ARRAY_SIZE(dp_vport_genl_ops
),
1867 &ovs_dp_vport_multicast_group
},
1868 { &dp_flow_genl_family
,
1869 dp_flow_genl_ops
, ARRAY_SIZE(dp_flow_genl_ops
),
1870 &ovs_dp_flow_multicast_group
},
1871 { &dp_packet_genl_family
,
1872 dp_packet_genl_ops
, ARRAY_SIZE(dp_packet_genl_ops
),
1876 static void dp_unregister_genl(int n_families
)
1880 for (i
= 0; i
< n_families
; i
++)
1881 genl_unregister_family(dp_genl_families
[i
].family
);
1884 static int dp_register_genl(void)
1891 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
1892 const struct genl_family_and_ops
*f
= &dp_genl_families
[i
];
1894 err
= genl_register_family_with_ops(f
->family
, f
->ops
,
1901 err
= genl_register_mc_group(f
->family
, f
->group
);
1910 dp_unregister_genl(n_registered
);
1914 static void rehash_flow_table(struct work_struct
*work
)
1916 struct datapath
*dp
;
1922 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1924 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
1925 struct flow_table
*old_table
= genl_dereference(dp
->table
);
1926 struct flow_table
*new_table
;
1928 new_table
= ovs_flow_tbl_rehash(old_table
);
1929 if (!IS_ERR(new_table
)) {
1930 rcu_assign_pointer(dp
->table
, new_table
);
1931 ovs_flow_tbl_deferred_destroy(old_table
);
1938 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
1941 static int __net_init
ovs_init_net(struct net
*net
)
1943 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1945 INIT_LIST_HEAD(&ovs_net
->dps
);
1949 static void __net_exit
ovs_exit_net(struct net
*net
)
1951 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1952 struct datapath
*dp
, *dp_next
;
1955 list_for_each_entry_safe(dp
, dp_next
, &ovs_net
->dps
, list_node
)
1960 static struct pernet_operations ovs_net_ops
= {
1961 .init
= ovs_init_net
,
1962 .exit
= ovs_exit_net
,
1964 .size
= sizeof(struct ovs_net
),
1967 static int __init
dp_init(void)
1969 struct sk_buff
*dummy_skb
;
1972 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > sizeof(dummy_skb
->cb
));
1974 pr_info("Open vSwitch switching datapath\n");
1976 err
= ovs_flow_init();
1980 err
= ovs_vport_init();
1982 goto error_flow_exit
;
1984 err
= register_pernet_device(&ovs_net_ops
);
1986 goto error_vport_exit
;
1988 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
1990 goto error_netns_exit
;
1992 err
= dp_register_genl();
1994 goto error_unreg_notifier
;
1996 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
2000 error_unreg_notifier
:
2001 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2003 unregister_pernet_device(&ovs_net_ops
);
2012 static void dp_cleanup(void)
2014 cancel_delayed_work_sync(&rehash_flow_wq
);
2015 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
2016 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2017 unregister_pernet_device(&ovs_net_ops
);
2023 module_init(dp_init
);
2024 module_exit(dp_cleanup
);
2026 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2027 MODULE_LICENSE("GPL");