2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <linux/workqueue.h>
51 #include <net/genetlink.h>
55 #include "vport-internal_dev.h"
60 * Writes to device state (add/remove datapath, port, set operations on vports,
61 * etc.) are protected by RTNL.
63 * Writes to other state (flow table modifications, set miscellaneous datapath
64 * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
67 * Reads are protected by RCU.
69 * There are a few special cases (mostly stats) that have their own
70 * synchronization but they nest under all of above and don't interact with
74 /* Global list of datapaths to enable dumping them all out.
75 * Protected by genl_mutex.
77 static LIST_HEAD(dps
);
79 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
80 static void rehash_flow_table(struct work_struct
*work
);
81 static DECLARE_DELAYED_WORK(rehash_flow_wq
, rehash_flow_table
);
83 static struct vport
*new_vport(const struct vport_parms
*);
84 static int queue_gso_packets(int dp_ifindex
, struct sk_buff
*,
85 const struct dp_upcall_info
*);
86 static int queue_userspace_packet(int dp_ifindex
, struct sk_buff
*,
87 const struct dp_upcall_info
*);
89 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
90 static struct datapath
*get_dp(int dp_ifindex
)
92 struct datapath
*dp
= NULL
;
93 struct net_device
*dev
;
96 dev
= dev_get_by_index_rcu(&init_net
, dp_ifindex
);
98 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
107 /* Must be called with rcu_read_lock or RTNL lock. */
108 const char *ovs_dp_name(const struct datapath
*dp
)
110 struct vport
*vport
= rcu_dereference_rtnl(dp
->ports
[OVSP_LOCAL
]);
111 return vport
->ops
->get_name(vport
);
114 static int get_dpifindex(struct datapath
*dp
)
121 local
= rcu_dereference(dp
->ports
[OVSP_LOCAL
]);
123 ifindex
= local
->ops
->get_ifindex(local
);
132 static void destroy_dp_rcu(struct rcu_head
*rcu
)
134 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
136 ovs_flow_tbl_destroy((__force
struct flow_table
*)dp
->table
);
137 free_percpu(dp
->stats_percpu
);
141 /* Called with RTNL lock and genl_lock. */
142 static struct vport
*new_vport(const struct vport_parms
*parms
)
146 vport
= ovs_vport_add(parms
);
147 if (!IS_ERR(vport
)) {
148 struct datapath
*dp
= parms
->dp
;
150 rcu_assign_pointer(dp
->ports
[parms
->port_no
], vport
);
151 list_add(&vport
->node
, &dp
->port_list
);
157 /* Called with RTNL lock. */
158 void ovs_dp_detach_port(struct vport
*p
)
162 /* First drop references to device. */
164 rcu_assign_pointer(p
->dp
->ports
[p
->port_no
], NULL
);
166 /* Then destroy it. */
170 /* Must be called with rcu_read_lock. */
171 void ovs_dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
173 struct datapath
*dp
= p
->dp
;
174 struct sw_flow
*flow
;
175 struct dp_stats_percpu
*stats
;
176 struct sw_flow_key key
;
181 stats
= per_cpu_ptr(dp
->stats_percpu
, smp_processor_id());
183 /* Extract flow from 'skb' into 'key'. */
184 error
= ovs_flow_extract(skb
, p
->port_no
, &key
, &key_len
);
185 if (unlikely(error
)) {
191 flow
= ovs_flow_tbl_lookup(rcu_dereference(dp
->table
), &key
, key_len
);
192 if (unlikely(!flow
)) {
193 struct dp_upcall_info upcall
;
195 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
197 upcall
.userdata
= NULL
;
198 upcall
.pid
= p
->upcall_pid
;
199 ovs_dp_upcall(dp
, skb
, &upcall
);
201 stats_counter
= &stats
->n_missed
;
205 OVS_CB(skb
)->flow
= flow
;
207 stats_counter
= &stats
->n_hit
;
208 ovs_flow_used(OVS_CB(skb
)->flow
, skb
);
209 ovs_execute_actions(dp
, skb
);
212 /* Update datapath statistics. */
213 u64_stats_update_begin(&stats
->sync
);
215 u64_stats_update_end(&stats
->sync
);
218 static struct genl_family dp_packet_genl_family
= {
219 .id
= GENL_ID_GENERATE
,
220 .hdrsize
= sizeof(struct ovs_header
),
221 .name
= OVS_PACKET_FAMILY
,
222 .version
= OVS_PACKET_VERSION
,
223 .maxattr
= OVS_PACKET_ATTR_MAX
226 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
227 const struct dp_upcall_info
*upcall_info
)
229 struct dp_stats_percpu
*stats
;
233 if (upcall_info
->pid
== 0) {
238 dp_ifindex
= get_dpifindex(dp
);
244 if (!skb_is_gso(skb
))
245 err
= queue_userspace_packet(dp_ifindex
, skb
, upcall_info
);
247 err
= queue_gso_packets(dp_ifindex
, skb
, upcall_info
);
254 stats
= per_cpu_ptr(dp
->stats_percpu
, smp_processor_id());
256 u64_stats_update_begin(&stats
->sync
);
258 u64_stats_update_end(&stats
->sync
);
263 static int queue_gso_packets(int dp_ifindex
, struct sk_buff
*skb
,
264 const struct dp_upcall_info
*upcall_info
)
266 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
267 struct dp_upcall_info later_info
;
268 struct sw_flow_key later_key
;
269 struct sk_buff
*segs
, *nskb
;
272 segs
= skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
);
274 return PTR_ERR(segs
);
276 /* Queue all of the segments. */
279 err
= queue_userspace_packet(dp_ifindex
, skb
, upcall_info
);
283 if (skb
== segs
&& gso_type
& SKB_GSO_UDP
) {
284 /* The initial flow key extracted by ovs_flow_extract()
285 * in this case is for a first fragment, so we need to
286 * properly mark later fragments.
288 later_key
= *upcall_info
->key
;
289 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
291 later_info
= *upcall_info
;
292 later_info
.key
= &later_key
;
293 upcall_info
= &later_info
;
295 } while ((skb
= skb
->next
));
297 /* Free all of the segments. */
305 } while ((skb
= nskb
));
309 static int queue_userspace_packet(int dp_ifindex
, struct sk_buff
*skb
,
310 const struct dp_upcall_info
*upcall_info
)
312 struct ovs_header
*upcall
;
313 struct sk_buff
*nskb
= NULL
;
314 struct sk_buff
*user_skb
; /* to be queued to userspace */
319 if (vlan_tx_tag_present(skb
)) {
320 nskb
= skb_clone(skb
, GFP_ATOMIC
);
324 nskb
= __vlan_put_tag(nskb
, vlan_tx_tag_get(nskb
));
332 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
337 len
= sizeof(struct ovs_header
);
338 len
+= nla_total_size(skb
->len
);
339 len
+= nla_total_size(FLOW_BUFSIZE
);
340 if (upcall_info
->cmd
== OVS_PACKET_CMD_ACTION
)
341 len
+= nla_total_size(8);
343 user_skb
= genlmsg_new(len
, GFP_ATOMIC
);
349 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
350 0, upcall_info
->cmd
);
351 upcall
->dp_ifindex
= dp_ifindex
;
353 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_KEY
);
354 ovs_flow_to_nlattrs(upcall_info
->key
, user_skb
);
355 nla_nest_end(user_skb
, nla
);
357 if (upcall_info
->userdata
)
358 nla_put_u64(user_skb
, OVS_PACKET_ATTR_USERDATA
,
359 nla_get_u64(upcall_info
->userdata
));
361 nla
= __nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, skb
->len
);
363 skb_copy_and_csum_dev(skb
, nla_data(nla
));
365 err
= genlmsg_unicast(&init_net
, user_skb
, upcall_info
->pid
);
372 /* Called with genl_mutex. */
373 static int flush_flows(int dp_ifindex
)
375 struct flow_table
*old_table
;
376 struct flow_table
*new_table
;
379 dp
= get_dp(dp_ifindex
);
383 old_table
= genl_dereference(dp
->table
);
384 new_table
= ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
);
388 rcu_assign_pointer(dp
->table
, new_table
);
390 ovs_flow_tbl_deferred_destroy(old_table
);
394 static int validate_actions(const struct nlattr
*attr
,
395 const struct sw_flow_key
*key
, int depth
);
397 static int validate_sample(const struct nlattr
*attr
,
398 const struct sw_flow_key
*key
, int depth
)
400 const struct nlattr
*attrs
[OVS_SAMPLE_ATTR_MAX
+ 1];
401 const struct nlattr
*probability
, *actions
;
402 const struct nlattr
*a
;
405 memset(attrs
, 0, sizeof(attrs
));
406 nla_for_each_nested(a
, attr
, rem
) {
407 int type
= nla_type(a
);
408 if (!type
|| type
> OVS_SAMPLE_ATTR_MAX
|| attrs
[type
])
415 probability
= attrs
[OVS_SAMPLE_ATTR_PROBABILITY
];
416 if (!probability
|| nla_len(probability
) != sizeof(u32
))
419 actions
= attrs
[OVS_SAMPLE_ATTR_ACTIONS
];
420 if (!actions
|| (nla_len(actions
) && nla_len(actions
) < NLA_HDRLEN
))
422 return validate_actions(actions
, key
, depth
+ 1);
425 static int validate_tp_port(const struct sw_flow_key
*flow_key
)
427 if (flow_key
->eth
.type
== htons(ETH_P_IP
)) {
428 if (flow_key
->ipv4
.tp
.src
&& flow_key
->ipv4
.tp
.dst
)
430 } else if (flow_key
->eth
.type
== htons(ETH_P_IPV6
)) {
431 if (flow_key
->ipv6
.tp
.src
&& flow_key
->ipv6
.tp
.dst
)
438 static int validate_set(const struct nlattr
*a
,
439 const struct sw_flow_key
*flow_key
)
441 const struct nlattr
*ovs_key
= nla_data(a
);
442 int key_type
= nla_type(ovs_key
);
444 /* There can be only one key in a action */
445 if (nla_total_size(nla_len(ovs_key
)) != nla_len(a
))
448 if (key_type
> OVS_KEY_ATTR_MAX
||
449 nla_len(ovs_key
) != ovs_key_lens
[key_type
])
453 const struct ovs_key_ipv4
*ipv4_key
;
455 case OVS_KEY_ATTR_PRIORITY
:
456 case OVS_KEY_ATTR_ETHERNET
:
459 case OVS_KEY_ATTR_IPV4
:
460 if (flow_key
->eth
.type
!= htons(ETH_P_IP
))
463 if (!flow_key
->ipv4
.addr
.src
|| !flow_key
->ipv4
.addr
.dst
)
466 ipv4_key
= nla_data(ovs_key
);
467 if (ipv4_key
->ipv4_proto
!= flow_key
->ip
.proto
)
470 if (ipv4_key
->ipv4_frag
!= flow_key
->ip
.frag
)
475 case OVS_KEY_ATTR_TCP
:
476 if (flow_key
->ip
.proto
!= IPPROTO_TCP
)
479 return validate_tp_port(flow_key
);
481 case OVS_KEY_ATTR_UDP
:
482 if (flow_key
->ip
.proto
!= IPPROTO_UDP
)
485 return validate_tp_port(flow_key
);
494 static int validate_userspace(const struct nlattr
*attr
)
496 static const struct nla_policy userspace_policy
[OVS_USERSPACE_ATTR_MAX
+ 1] = {
497 [OVS_USERSPACE_ATTR_PID
] = {.type
= NLA_U32
},
498 [OVS_USERSPACE_ATTR_USERDATA
] = {.type
= NLA_U64
},
500 struct nlattr
*a
[OVS_USERSPACE_ATTR_MAX
+ 1];
503 error
= nla_parse_nested(a
, OVS_USERSPACE_ATTR_MAX
,
504 attr
, userspace_policy
);
508 if (!a
[OVS_USERSPACE_ATTR_PID
] ||
509 !nla_get_u32(a
[OVS_USERSPACE_ATTR_PID
]))
515 static int validate_actions(const struct nlattr
*attr
,
516 const struct sw_flow_key
*key
, int depth
)
518 const struct nlattr
*a
;
521 if (depth
>= SAMPLE_ACTION_DEPTH
)
524 nla_for_each_nested(a
, attr
, rem
) {
525 /* Expected argument lengths, (u32)-1 for variable length. */
526 static const u32 action_lens
[OVS_ACTION_ATTR_MAX
+ 1] = {
527 [OVS_ACTION_ATTR_OUTPUT
] = sizeof(u32
),
528 [OVS_ACTION_ATTR_USERSPACE
] = (u32
)-1,
529 [OVS_ACTION_ATTR_PUSH_VLAN
] = sizeof(struct ovs_action_push_vlan
),
530 [OVS_ACTION_ATTR_POP_VLAN
] = 0,
531 [OVS_ACTION_ATTR_SET
] = (u32
)-1,
532 [OVS_ACTION_ATTR_SAMPLE
] = (u32
)-1
534 const struct ovs_action_push_vlan
*vlan
;
535 int type
= nla_type(a
);
537 if (type
> OVS_ACTION_ATTR_MAX
||
538 (action_lens
[type
] != nla_len(a
) &&
539 action_lens
[type
] != (u32
)-1))
543 case OVS_ACTION_ATTR_UNSPEC
:
546 case OVS_ACTION_ATTR_USERSPACE
:
547 err
= validate_userspace(a
);
552 case OVS_ACTION_ATTR_OUTPUT
:
553 if (nla_get_u32(a
) >= DP_MAX_PORTS
)
558 case OVS_ACTION_ATTR_POP_VLAN
:
561 case OVS_ACTION_ATTR_PUSH_VLAN
:
563 if (vlan
->vlan_tpid
!= htons(ETH_P_8021Q
))
565 if (!(vlan
->vlan_tci
& htons(VLAN_TAG_PRESENT
)))
569 case OVS_ACTION_ATTR_SET
:
570 err
= validate_set(a
, key
);
575 case OVS_ACTION_ATTR_SAMPLE
:
576 err
= validate_sample(a
, key
, depth
);
592 static void clear_stats(struct sw_flow
*flow
)
596 flow
->packet_count
= 0;
597 flow
->byte_count
= 0;
600 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
602 struct ovs_header
*ovs_header
= info
->userhdr
;
603 struct nlattr
**a
= info
->attrs
;
604 struct sw_flow_actions
*acts
;
605 struct sk_buff
*packet
;
606 struct sw_flow
*flow
;
614 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
615 !a
[OVS_PACKET_ATTR_ACTIONS
] ||
616 nla_len(a
[OVS_PACKET_ATTR_PACKET
]) < ETH_HLEN
)
619 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
620 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
624 skb_reserve(packet
, NET_IP_ALIGN
);
626 memcpy(__skb_put(packet
, len
), nla_data(a
[OVS_PACKET_ATTR_PACKET
]), len
);
628 skb_reset_mac_header(packet
);
629 eth
= eth_hdr(packet
);
631 /* Normally, setting the skb 'protocol' field would be handled by a
632 * call to eth_type_trans(), but it assumes there's a sending
633 * device, which we may not have. */
634 if (ntohs(eth
->h_proto
) >= 1536)
635 packet
->protocol
= eth
->h_proto
;
637 packet
->protocol
= htons(ETH_P_802_2
);
639 /* Build an sw_flow for sending this packet. */
640 flow
= ovs_flow_alloc();
645 err
= ovs_flow_extract(packet
, -1, &flow
->key
, &key_len
);
649 err
= ovs_flow_metadata_from_nlattrs(&flow
->key
.phy
.priority
,
650 &flow
->key
.phy
.in_port
,
651 a
[OVS_PACKET_ATTR_KEY
]);
655 err
= validate_actions(a
[OVS_PACKET_ATTR_ACTIONS
], &flow
->key
, 0);
659 flow
->hash
= ovs_flow_hash(&flow
->key
, key_len
);
661 acts
= ovs_flow_actions_alloc(a
[OVS_PACKET_ATTR_ACTIONS
]);
665 rcu_assign_pointer(flow
->sf_acts
, acts
);
667 OVS_CB(packet
)->flow
= flow
;
668 packet
->priority
= flow
->key
.phy
.priority
;
671 dp
= get_dp(ovs_header
->dp_ifindex
);
677 err
= ovs_execute_actions(dp
, packet
);
694 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
695 [OVS_PACKET_ATTR_PACKET
] = { .type
= NLA_UNSPEC
},
696 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
697 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
700 static struct genl_ops dp_packet_genl_ops
[] = {
701 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
702 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
703 .policy
= packet_policy
,
704 .doit
= ovs_packet_cmd_execute
708 static void get_dp_stats(struct datapath
*dp
, struct ovs_dp_stats
*stats
)
711 struct flow_table
*table
= genl_dereference(dp
->table
);
713 stats
->n_flows
= ovs_flow_tbl_count(table
);
715 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
716 for_each_possible_cpu(i
) {
717 const struct dp_stats_percpu
*percpu_stats
;
718 struct dp_stats_percpu local_stats
;
721 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
724 start
= u64_stats_fetch_begin_bh(&percpu_stats
->sync
);
725 local_stats
= *percpu_stats
;
726 } while (u64_stats_fetch_retry_bh(&percpu_stats
->sync
, start
));
728 stats
->n_hit
+= local_stats
.n_hit
;
729 stats
->n_missed
+= local_stats
.n_missed
;
730 stats
->n_lost
+= local_stats
.n_lost
;
734 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
735 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
736 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
737 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
740 static struct genl_family dp_flow_genl_family
= {
741 .id
= GENL_ID_GENERATE
,
742 .hdrsize
= sizeof(struct ovs_header
),
743 .name
= OVS_FLOW_FAMILY
,
744 .version
= OVS_FLOW_VERSION
,
745 .maxattr
= OVS_FLOW_ATTR_MAX
748 static struct genl_multicast_group ovs_dp_flow_multicast_group
= {
749 .name
= OVS_FLOW_MCGROUP
752 /* Called with genl_lock. */
753 static int ovs_flow_cmd_fill_info(struct sw_flow
*flow
, struct datapath
*dp
,
754 struct sk_buff
*skb
, u32 pid
,
755 u32 seq
, u32 flags
, u8 cmd
)
757 const int skb_orig_len
= skb
->len
;
758 const struct sw_flow_actions
*sf_acts
;
759 struct ovs_flow_stats stats
;
760 struct ovs_header
*ovs_header
;
766 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
767 lockdep_genl_is_held());
769 ovs_header
= genlmsg_put(skb
, pid
, seq
, &dp_flow_genl_family
, flags
, cmd
);
773 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
775 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_KEY
);
777 goto nla_put_failure
;
778 err
= ovs_flow_to_nlattrs(&flow
->key
, skb
);
781 nla_nest_end(skb
, nla
);
783 spin_lock_bh(&flow
->lock
);
785 stats
.n_packets
= flow
->packet_count
;
786 stats
.n_bytes
= flow
->byte_count
;
787 tcp_flags
= flow
->tcp_flags
;
788 spin_unlock_bh(&flow
->lock
);
791 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
792 goto nla_put_failure
;
794 if (stats
.n_packets
&&
795 nla_put(skb
, OVS_FLOW_ATTR_STATS
,
796 sizeof(struct ovs_flow_stats
), &stats
))
797 goto nla_put_failure
;
800 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, tcp_flags
))
801 goto nla_put_failure
;
803 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
804 * this is the first flow to be dumped into 'skb'. This is unusual for
805 * Netlink but individual action lists can be longer than
806 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
807 * The userspace caller can always fetch the actions separately if it
808 * really wants them. (Most userspace callers in fact don't care.)
810 * This can only fail for dump operations because the skb is always
811 * properly sized for single flows.
813 err
= nla_put(skb
, OVS_FLOW_ATTR_ACTIONS
, sf_acts
->actions_len
,
815 if (err
< 0 && skb_orig_len
)
818 return genlmsg_end(skb
, ovs_header
);
823 genlmsg_cancel(skb
, ovs_header
);
827 static struct sk_buff
*ovs_flow_cmd_alloc_info(struct sw_flow
*flow
)
829 const struct sw_flow_actions
*sf_acts
;
832 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
833 lockdep_genl_is_held());
835 /* OVS_FLOW_ATTR_KEY */
836 len
= nla_total_size(FLOW_BUFSIZE
);
837 /* OVS_FLOW_ATTR_ACTIONS */
838 len
+= nla_total_size(sf_acts
->actions_len
);
839 /* OVS_FLOW_ATTR_STATS */
840 len
+= nla_total_size(sizeof(struct ovs_flow_stats
));
841 /* OVS_FLOW_ATTR_TCP_FLAGS */
842 len
+= nla_total_size(1);
843 /* OVS_FLOW_ATTR_USED */
844 len
+= nla_total_size(8);
846 len
+= NLMSG_ALIGN(sizeof(struct ovs_header
));
848 return genlmsg_new(len
, GFP_KERNEL
);
851 static struct sk_buff
*ovs_flow_cmd_build_info(struct sw_flow
*flow
,
853 u32 pid
, u32 seq
, u8 cmd
)
858 skb
= ovs_flow_cmd_alloc_info(flow
);
860 return ERR_PTR(-ENOMEM
);
862 retval
= ovs_flow_cmd_fill_info(flow
, dp
, skb
, pid
, seq
, 0, cmd
);
867 static int ovs_flow_cmd_new_or_set(struct sk_buff
*skb
, struct genl_info
*info
)
869 struct nlattr
**a
= info
->attrs
;
870 struct ovs_header
*ovs_header
= info
->userhdr
;
871 struct sw_flow_key key
;
872 struct sw_flow
*flow
;
873 struct sk_buff
*reply
;
875 struct flow_table
*table
;
881 if (!a
[OVS_FLOW_ATTR_KEY
])
883 error
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
887 /* Validate actions. */
888 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
889 error
= validate_actions(a
[OVS_FLOW_ATTR_ACTIONS
], &key
, 0);
892 } else if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
) {
897 dp
= get_dp(ovs_header
->dp_ifindex
);
902 table
= genl_dereference(dp
->table
);
903 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
905 struct sw_flow_actions
*acts
;
907 /* Bail out if we're not allowed to create a new flow. */
909 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_SET
)
912 /* Expand table, if necessary, to make room. */
913 if (ovs_flow_tbl_need_to_expand(table
)) {
914 struct flow_table
*new_table
;
916 new_table
= ovs_flow_tbl_expand(table
);
917 if (!IS_ERR(new_table
)) {
918 rcu_assign_pointer(dp
->table
, new_table
);
919 ovs_flow_tbl_deferred_destroy(table
);
920 table
= genl_dereference(dp
->table
);
925 flow
= ovs_flow_alloc();
927 error
= PTR_ERR(flow
);
933 /* Obtain actions. */
934 acts
= ovs_flow_actions_alloc(a
[OVS_FLOW_ATTR_ACTIONS
]);
935 error
= PTR_ERR(acts
);
937 goto error_free_flow
;
938 rcu_assign_pointer(flow
->sf_acts
, acts
);
940 /* Put flow in bucket. */
941 flow
->hash
= ovs_flow_hash(&key
, key_len
);
942 ovs_flow_tbl_insert(table
, flow
);
944 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_pid
,
948 /* We found a matching flow. */
949 struct sw_flow_actions
*old_acts
;
950 struct nlattr
*acts_attrs
;
952 /* Bail out if we're not allowed to modify an existing flow.
953 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
954 * because Generic Netlink treats the latter as a dump
955 * request. We also accept NLM_F_EXCL in case that bug ever
959 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
&&
960 info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
| NLM_F_EXCL
))
963 /* Update actions. */
964 old_acts
= rcu_dereference_protected(flow
->sf_acts
,
965 lockdep_genl_is_held());
966 acts_attrs
= a
[OVS_FLOW_ATTR_ACTIONS
];
968 (old_acts
->actions_len
!= nla_len(acts_attrs
) ||
969 memcmp(old_acts
->actions
, nla_data(acts_attrs
),
970 old_acts
->actions_len
))) {
971 struct sw_flow_actions
*new_acts
;
973 new_acts
= ovs_flow_actions_alloc(acts_attrs
);
974 error
= PTR_ERR(new_acts
);
975 if (IS_ERR(new_acts
))
978 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
979 ovs_flow_deferred_free_acts(old_acts
);
982 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_pid
,
983 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
986 if (a
[OVS_FLOW_ATTR_CLEAR
]) {
987 spin_lock_bh(&flow
->lock
);
989 spin_unlock_bh(&flow
->lock
);
994 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
995 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
,
998 netlink_set_err(init_net
.genl_sock
, 0,
999 ovs_dp_flow_multicast_group
.id
, PTR_ERR(reply
));
1003 ovs_flow_free(flow
);
1008 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1010 struct nlattr
**a
= info
->attrs
;
1011 struct ovs_header
*ovs_header
= info
->userhdr
;
1012 struct sw_flow_key key
;
1013 struct sk_buff
*reply
;
1014 struct sw_flow
*flow
;
1015 struct datapath
*dp
;
1016 struct flow_table
*table
;
1020 if (!a
[OVS_FLOW_ATTR_KEY
])
1022 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1026 dp
= get_dp(ovs_header
->dp_ifindex
);
1030 table
= genl_dereference(dp
->table
);
1031 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1035 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_pid
,
1036 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1038 return PTR_ERR(reply
);
1040 return genlmsg_reply(reply
, info
);
1043 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1045 struct nlattr
**a
= info
->attrs
;
1046 struct ovs_header
*ovs_header
= info
->userhdr
;
1047 struct sw_flow_key key
;
1048 struct sk_buff
*reply
;
1049 struct sw_flow
*flow
;
1050 struct datapath
*dp
;
1051 struct flow_table
*table
;
1055 if (!a
[OVS_FLOW_ATTR_KEY
])
1056 return flush_flows(ovs_header
->dp_ifindex
);
1057 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1061 dp
= get_dp(ovs_header
->dp_ifindex
);
1065 table
= genl_dereference(dp
->table
);
1066 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1070 reply
= ovs_flow_cmd_alloc_info(flow
);
1074 ovs_flow_tbl_remove(table
, flow
);
1076 err
= ovs_flow_cmd_fill_info(flow
, dp
, reply
, info
->snd_pid
,
1077 info
->snd_seq
, 0, OVS_FLOW_CMD_DEL
);
1080 ovs_flow_deferred_free(flow
);
1082 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1083 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1087 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1089 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1090 struct datapath
*dp
;
1091 struct flow_table
*table
;
1093 dp
= get_dp(ovs_header
->dp_ifindex
);
1097 table
= genl_dereference(dp
->table
);
1100 struct sw_flow
*flow
;
1103 bucket
= cb
->args
[0];
1105 flow
= ovs_flow_tbl_next(table
, &bucket
, &obj
);
1109 if (ovs_flow_cmd_fill_info(flow
, dp
, skb
,
1110 NETLINK_CB(cb
->skb
).pid
,
1111 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1112 OVS_FLOW_CMD_NEW
) < 0)
1115 cb
->args
[0] = bucket
;
1121 static struct genl_ops dp_flow_genl_ops
[] = {
1122 { .cmd
= OVS_FLOW_CMD_NEW
,
1123 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1124 .policy
= flow_policy
,
1125 .doit
= ovs_flow_cmd_new_or_set
1127 { .cmd
= OVS_FLOW_CMD_DEL
,
1128 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1129 .policy
= flow_policy
,
1130 .doit
= ovs_flow_cmd_del
1132 { .cmd
= OVS_FLOW_CMD_GET
,
1133 .flags
= 0, /* OK for unprivileged users. */
1134 .policy
= flow_policy
,
1135 .doit
= ovs_flow_cmd_get
,
1136 .dumpit
= ovs_flow_cmd_dump
1138 { .cmd
= OVS_FLOW_CMD_SET
,
1139 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1140 .policy
= flow_policy
,
1141 .doit
= ovs_flow_cmd_new_or_set
,
1145 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1146 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1147 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1150 static struct genl_family dp_datapath_genl_family
= {
1151 .id
= GENL_ID_GENERATE
,
1152 .hdrsize
= sizeof(struct ovs_header
),
1153 .name
= OVS_DATAPATH_FAMILY
,
1154 .version
= OVS_DATAPATH_VERSION
,
1155 .maxattr
= OVS_DP_ATTR_MAX
1158 static struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
1159 .name
= OVS_DATAPATH_MCGROUP
1162 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1163 u32 pid
, u32 seq
, u32 flags
, u8 cmd
)
1165 struct ovs_header
*ovs_header
;
1166 struct ovs_dp_stats dp_stats
;
1169 ovs_header
= genlmsg_put(skb
, pid
, seq
, &dp_datapath_genl_family
,
1174 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1177 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1180 goto nla_put_failure
;
1182 get_dp_stats(dp
, &dp_stats
);
1183 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
), &dp_stats
))
1184 goto nla_put_failure
;
1186 return genlmsg_end(skb
, ovs_header
);
1189 genlmsg_cancel(skb
, ovs_header
);
1194 static struct sk_buff
*ovs_dp_cmd_build_info(struct datapath
*dp
, u32 pid
,
1197 struct sk_buff
*skb
;
1200 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1202 return ERR_PTR(-ENOMEM
);
1204 retval
= ovs_dp_cmd_fill_info(dp
, skb
, pid
, seq
, 0, cmd
);
1207 return ERR_PTR(retval
);
1212 /* Called with genl_mutex and optionally with RTNL lock also. */
1213 static struct datapath
*lookup_datapath(struct ovs_header
*ovs_header
,
1214 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1216 struct datapath
*dp
;
1218 if (!a
[OVS_DP_ATTR_NAME
])
1219 dp
= get_dp(ovs_header
->dp_ifindex
);
1221 struct vport
*vport
;
1224 vport
= ovs_vport_locate(nla_data(a
[OVS_DP_ATTR_NAME
]));
1225 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1228 return dp
? dp
: ERR_PTR(-ENODEV
);
1231 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1233 struct nlattr
**a
= info
->attrs
;
1234 struct vport_parms parms
;
1235 struct sk_buff
*reply
;
1236 struct datapath
*dp
;
1237 struct vport
*vport
;
1241 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1246 if (!try_module_get(THIS_MODULE
))
1247 goto err_unlock_rtnl
;
1250 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1252 goto err_put_module
;
1253 INIT_LIST_HEAD(&dp
->port_list
);
1255 /* Allocate table. */
1257 rcu_assign_pointer(dp
->table
, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
));
1261 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
1262 if (!dp
->stats_percpu
) {
1264 goto err_destroy_table
;
1267 /* Set up our datapath device. */
1268 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1269 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1270 parms
.options
= NULL
;
1272 parms
.port_no
= OVSP_LOCAL
;
1273 parms
.upcall_pid
= nla_get_u32(a
[OVS_DP_ATTR_UPCALL_PID
]);
1275 vport
= new_vport(&parms
);
1276 if (IS_ERR(vport
)) {
1277 err
= PTR_ERR(vport
);
1281 goto err_destroy_percpu
;
1284 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_pid
,
1285 info
->snd_seq
, OVS_DP_CMD_NEW
);
1286 err
= PTR_ERR(reply
);
1288 goto err_destroy_local_port
;
1290 list_add_tail(&dp
->list_node
, &dps
);
1293 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1294 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1298 err_destroy_local_port
:
1299 ovs_dp_detach_port(rtnl_dereference(dp
->ports
[OVSP_LOCAL
]));
1301 free_percpu(dp
->stats_percpu
);
1303 ovs_flow_tbl_destroy(genl_dereference(dp
->table
));
1307 module_put(THIS_MODULE
);
1314 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1316 struct vport
*vport
, *next_vport
;
1317 struct sk_buff
*reply
;
1318 struct datapath
*dp
;
1322 dp
= lookup_datapath(info
->userhdr
, info
->attrs
);
1327 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_pid
,
1328 info
->snd_seq
, OVS_DP_CMD_DEL
);
1329 err
= PTR_ERR(reply
);
1333 list_for_each_entry_safe(vport
, next_vport
, &dp
->port_list
, node
)
1334 if (vport
->port_no
!= OVSP_LOCAL
)
1335 ovs_dp_detach_port(vport
);
1337 list_del(&dp
->list_node
);
1338 ovs_dp_detach_port(rtnl_dereference(dp
->ports
[OVSP_LOCAL
]));
1340 /* rtnl_unlock() will wait until all the references to devices that
1341 * are pending unregistration have been dropped. We do it here to
1342 * ensure that any internal devices (which contain DP pointers) are
1343 * fully destroyed before freeing the datapath.
1347 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1348 module_put(THIS_MODULE
);
1350 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1351 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1361 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1363 struct sk_buff
*reply
;
1364 struct datapath
*dp
;
1367 dp
= lookup_datapath(info
->userhdr
, info
->attrs
);
1371 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_pid
,
1372 info
->snd_seq
, OVS_DP_CMD_NEW
);
1373 if (IS_ERR(reply
)) {
1374 err
= PTR_ERR(reply
);
1375 netlink_set_err(init_net
.genl_sock
, 0,
1376 ovs_dp_datapath_multicast_group
.id
, err
);
1380 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1381 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1387 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1389 struct sk_buff
*reply
;
1390 struct datapath
*dp
;
1392 dp
= lookup_datapath(info
->userhdr
, info
->attrs
);
1396 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_pid
,
1397 info
->snd_seq
, OVS_DP_CMD_NEW
);
1399 return PTR_ERR(reply
);
1401 return genlmsg_reply(reply
, info
);
1404 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1406 struct datapath
*dp
;
1407 int skip
= cb
->args
[0];
1410 list_for_each_entry(dp
, &dps
, list_node
) {
1412 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).pid
,
1413 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1414 OVS_DP_CMD_NEW
) < 0)
1424 static struct genl_ops dp_datapath_genl_ops
[] = {
1425 { .cmd
= OVS_DP_CMD_NEW
,
1426 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1427 .policy
= datapath_policy
,
1428 .doit
= ovs_dp_cmd_new
1430 { .cmd
= OVS_DP_CMD_DEL
,
1431 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1432 .policy
= datapath_policy
,
1433 .doit
= ovs_dp_cmd_del
1435 { .cmd
= OVS_DP_CMD_GET
,
1436 .flags
= 0, /* OK for unprivileged users. */
1437 .policy
= datapath_policy
,
1438 .doit
= ovs_dp_cmd_get
,
1439 .dumpit
= ovs_dp_cmd_dump
1441 { .cmd
= OVS_DP_CMD_SET
,
1442 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1443 .policy
= datapath_policy
,
1444 .doit
= ovs_dp_cmd_set
,
1448 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
1449 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1450 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
1451 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
1452 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
1453 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1454 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
1457 static struct genl_family dp_vport_genl_family
= {
1458 .id
= GENL_ID_GENERATE
,
1459 .hdrsize
= sizeof(struct ovs_header
),
1460 .name
= OVS_VPORT_FAMILY
,
1461 .version
= OVS_VPORT_VERSION
,
1462 .maxattr
= OVS_VPORT_ATTR_MAX
1465 struct genl_multicast_group ovs_dp_vport_multicast_group
= {
1466 .name
= OVS_VPORT_MCGROUP
1469 /* Called with RTNL lock or RCU read lock. */
1470 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1471 u32 pid
, u32 seq
, u32 flags
, u8 cmd
)
1473 struct ovs_header
*ovs_header
;
1474 struct ovs_vport_stats vport_stats
;
1477 ovs_header
= genlmsg_put(skb
, pid
, seq
, &dp_vport_genl_family
,
1482 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1484 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1485 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1486 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
, vport
->ops
->get_name(vport
)) ||
1487 nla_put_u32(skb
, OVS_VPORT_ATTR_UPCALL_PID
, vport
->upcall_pid
))
1488 goto nla_put_failure
;
1490 ovs_vport_get_stats(vport
, &vport_stats
);
1491 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1493 goto nla_put_failure
;
1495 err
= ovs_vport_get_options(vport
, skb
);
1496 if (err
== -EMSGSIZE
)
1499 return genlmsg_end(skb
, ovs_header
);
1504 genlmsg_cancel(skb
, ovs_header
);
1508 /* Called with RTNL lock or RCU read lock. */
1509 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 pid
,
1512 struct sk_buff
*skb
;
1515 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1517 return ERR_PTR(-ENOMEM
);
1519 retval
= ovs_vport_cmd_fill_info(vport
, skb
, pid
, seq
, 0, cmd
);
1522 return ERR_PTR(retval
);
1527 /* Called with RTNL lock or RCU read lock. */
1528 static struct vport
*lookup_vport(struct ovs_header
*ovs_header
,
1529 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1531 struct datapath
*dp
;
1532 struct vport
*vport
;
1534 if (a
[OVS_VPORT_ATTR_NAME
]) {
1535 vport
= ovs_vport_locate(nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1537 return ERR_PTR(-ENODEV
);
1538 if (ovs_header
->dp_ifindex
&&
1539 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1540 return ERR_PTR(-ENODEV
);
1542 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1543 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1545 if (port_no
>= DP_MAX_PORTS
)
1546 return ERR_PTR(-EFBIG
);
1548 dp
= get_dp(ovs_header
->dp_ifindex
);
1550 return ERR_PTR(-ENODEV
);
1552 vport
= rcu_dereference_rtnl(dp
->ports
[port_no
]);
1554 return ERR_PTR(-ENOENT
);
1557 return ERR_PTR(-EINVAL
);
1560 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1562 struct nlattr
**a
= info
->attrs
;
1563 struct ovs_header
*ovs_header
= info
->userhdr
;
1564 struct vport_parms parms
;
1565 struct sk_buff
*reply
;
1566 struct vport
*vport
;
1567 struct datapath
*dp
;
1572 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1573 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1577 dp
= get_dp(ovs_header
->dp_ifindex
);
1582 if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1583 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1586 if (port_no
>= DP_MAX_PORTS
)
1589 vport
= rtnl_dereference(dp
->ports
[port_no
]);
1594 for (port_no
= 1; ; port_no
++) {
1595 if (port_no
>= DP_MAX_PORTS
) {
1599 vport
= rtnl_dereference(dp
->ports
[port_no
]);
1605 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1606 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1607 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1609 parms
.port_no
= port_no
;
1610 parms
.upcall_pid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1612 vport
= new_vport(&parms
);
1613 err
= PTR_ERR(vport
);
1617 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_pid
, info
->snd_seq
,
1619 if (IS_ERR(reply
)) {
1620 err
= PTR_ERR(reply
);
1621 ovs_dp_detach_port(vport
);
1624 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1625 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1633 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1635 struct nlattr
**a
= info
->attrs
;
1636 struct sk_buff
*reply
;
1637 struct vport
*vport
;
1641 vport
= lookup_vport(info
->userhdr
, a
);
1642 err
= PTR_ERR(vport
);
1647 if (a
[OVS_VPORT_ATTR_TYPE
] &&
1648 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
)
1651 if (!err
&& a
[OVS_VPORT_ATTR_OPTIONS
])
1652 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
1655 if (a
[OVS_VPORT_ATTR_UPCALL_PID
])
1656 vport
->upcall_pid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1658 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_pid
, info
->snd_seq
,
1660 if (IS_ERR(reply
)) {
1661 netlink_set_err(init_net
.genl_sock
, 0,
1662 ovs_dp_vport_multicast_group
.id
, PTR_ERR(reply
));
1666 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1667 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1674 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1676 struct nlattr
**a
= info
->attrs
;
1677 struct sk_buff
*reply
;
1678 struct vport
*vport
;
1682 vport
= lookup_vport(info
->userhdr
, a
);
1683 err
= PTR_ERR(vport
);
1687 if (vport
->port_no
== OVSP_LOCAL
) {
1692 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_pid
, info
->snd_seq
,
1694 err
= PTR_ERR(reply
);
1698 ovs_dp_detach_port(vport
);
1700 genl_notify(reply
, genl_info_net(info
), info
->snd_pid
,
1701 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1708 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1710 struct nlattr
**a
= info
->attrs
;
1711 struct ovs_header
*ovs_header
= info
->userhdr
;
1712 struct sk_buff
*reply
;
1713 struct vport
*vport
;
1717 vport
= lookup_vport(ovs_header
, a
);
1718 err
= PTR_ERR(vport
);
1722 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_pid
, info
->snd_seq
,
1724 err
= PTR_ERR(reply
);
1730 return genlmsg_reply(reply
, info
);
1737 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1739 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1740 struct datapath
*dp
;
1744 dp
= get_dp(ovs_header
->dp_ifindex
);
1749 for (port_no
= cb
->args
[0]; port_no
< DP_MAX_PORTS
; port_no
++) {
1750 struct vport
*vport
;
1752 vport
= rcu_dereference(dp
->ports
[port_no
]);
1756 if (ovs_vport_cmd_fill_info(vport
, skb
, NETLINK_CB(cb
->skb
).pid
,
1757 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1758 OVS_VPORT_CMD_NEW
) < 0)
1763 cb
->args
[0] = port_no
;
1769 static void rehash_flow_table(struct work_struct
*work
)
1771 struct datapath
*dp
;
1775 list_for_each_entry(dp
, &dps
, list_node
) {
1776 struct flow_table
*old_table
= genl_dereference(dp
->table
);
1777 struct flow_table
*new_table
;
1779 new_table
= ovs_flow_tbl_rehash(old_table
);
1780 if (!IS_ERR(new_table
)) {
1781 rcu_assign_pointer(dp
->table
, new_table
);
1782 ovs_flow_tbl_deferred_destroy(old_table
);
1788 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
1791 static struct genl_ops dp_vport_genl_ops
[] = {
1792 { .cmd
= OVS_VPORT_CMD_NEW
,
1793 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1794 .policy
= vport_policy
,
1795 .doit
= ovs_vport_cmd_new
1797 { .cmd
= OVS_VPORT_CMD_DEL
,
1798 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1799 .policy
= vport_policy
,
1800 .doit
= ovs_vport_cmd_del
1802 { .cmd
= OVS_VPORT_CMD_GET
,
1803 .flags
= 0, /* OK for unprivileged users. */
1804 .policy
= vport_policy
,
1805 .doit
= ovs_vport_cmd_get
,
1806 .dumpit
= ovs_vport_cmd_dump
1808 { .cmd
= OVS_VPORT_CMD_SET
,
1809 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1810 .policy
= vport_policy
,
1811 .doit
= ovs_vport_cmd_set
,
1815 struct genl_family_and_ops
{
1816 struct genl_family
*family
;
1817 struct genl_ops
*ops
;
1819 struct genl_multicast_group
*group
;
1822 static const struct genl_family_and_ops dp_genl_families
[] = {
1823 { &dp_datapath_genl_family
,
1824 dp_datapath_genl_ops
, ARRAY_SIZE(dp_datapath_genl_ops
),
1825 &ovs_dp_datapath_multicast_group
},
1826 { &dp_vport_genl_family
,
1827 dp_vport_genl_ops
, ARRAY_SIZE(dp_vport_genl_ops
),
1828 &ovs_dp_vport_multicast_group
},
1829 { &dp_flow_genl_family
,
1830 dp_flow_genl_ops
, ARRAY_SIZE(dp_flow_genl_ops
),
1831 &ovs_dp_flow_multicast_group
},
1832 { &dp_packet_genl_family
,
1833 dp_packet_genl_ops
, ARRAY_SIZE(dp_packet_genl_ops
),
1837 static void dp_unregister_genl(int n_families
)
1841 for (i
= 0; i
< n_families
; i
++)
1842 genl_unregister_family(dp_genl_families
[i
].family
);
1845 static int dp_register_genl(void)
1852 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
1853 const struct genl_family_and_ops
*f
= &dp_genl_families
[i
];
1855 err
= genl_register_family_with_ops(f
->family
, f
->ops
,
1862 err
= genl_register_mc_group(f
->family
, f
->group
);
1871 dp_unregister_genl(n_registered
);
1875 static int __init
dp_init(void)
1877 struct sk_buff
*dummy_skb
;
1880 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > sizeof(dummy_skb
->cb
));
1882 pr_info("Open vSwitch switching datapath\n");
1884 err
= ovs_flow_init();
1888 err
= ovs_vport_init();
1890 goto error_flow_exit
;
1892 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
1894 goto error_vport_exit
;
1896 err
= dp_register_genl();
1898 goto error_unreg_notifier
;
1900 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
1904 error_unreg_notifier
:
1905 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
1914 static void dp_cleanup(void)
1916 cancel_delayed_work_sync(&rehash_flow_wq
);
1918 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
1919 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
1924 module_init(dp_init
);
1925 module_exit(dp_cleanup
);
1927 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1928 MODULE_LICENSE("GPL");