Merge tag '3.8-pci-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux-2.6/btrfs-unstable.git] / net / openvswitch / flow.c
blobc3294cebc4f2d8bb56182004145eac444fbc2908
1 /*
2 * Copyright (c) 2007-2011 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
42 #include <net/ip.h>
43 #include <net/ipv6.h>
44 #include <net/ndisc.h>
46 static struct kmem_cache *flow_cache;
48 static int check_header(struct sk_buff *skb, int len)
50 if (unlikely(skb->len < len))
51 return -EINVAL;
52 if (unlikely(!pskb_may_pull(skb, len)))
53 return -ENOMEM;
54 return 0;
57 static bool arphdr_ok(struct sk_buff *skb)
59 return pskb_may_pull(skb, skb_network_offset(skb) +
60 sizeof(struct arp_eth_header));
63 static int check_iphdr(struct sk_buff *skb)
65 unsigned int nh_ofs = skb_network_offset(skb);
66 unsigned int ip_len;
67 int err;
69 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
70 if (unlikely(err))
71 return err;
73 ip_len = ip_hdrlen(skb);
74 if (unlikely(ip_len < sizeof(struct iphdr) ||
75 skb->len < nh_ofs + ip_len))
76 return -EINVAL;
78 skb_set_transport_header(skb, nh_ofs + ip_len);
79 return 0;
82 static bool tcphdr_ok(struct sk_buff *skb)
84 int th_ofs = skb_transport_offset(skb);
85 int tcp_len;
87 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
88 return false;
90 tcp_len = tcp_hdrlen(skb);
91 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
92 skb->len < th_ofs + tcp_len))
93 return false;
95 return true;
98 static bool udphdr_ok(struct sk_buff *skb)
100 return pskb_may_pull(skb, skb_transport_offset(skb) +
101 sizeof(struct udphdr));
104 static bool icmphdr_ok(struct sk_buff *skb)
106 return pskb_may_pull(skb, skb_transport_offset(skb) +
107 sizeof(struct icmphdr));
110 u64 ovs_flow_used_time(unsigned long flow_jiffies)
112 struct timespec cur_ts;
113 u64 cur_ms, idle_ms;
115 ktime_get_ts(&cur_ts);
116 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
117 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
118 cur_ts.tv_nsec / NSEC_PER_MSEC;
120 return cur_ms - idle_ms;
123 #define SW_FLOW_KEY_OFFSET(field) \
124 (offsetof(struct sw_flow_key, field) + \
125 FIELD_SIZEOF(struct sw_flow_key, field))
127 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
128 int *key_lenp)
130 unsigned int nh_ofs = skb_network_offset(skb);
131 unsigned int nh_len;
132 int payload_ofs;
133 struct ipv6hdr *nh;
134 uint8_t nexthdr;
135 __be16 frag_off;
136 int err;
138 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
140 err = check_header(skb, nh_ofs + sizeof(*nh));
141 if (unlikely(err))
142 return err;
144 nh = ipv6_hdr(skb);
145 nexthdr = nh->nexthdr;
146 payload_ofs = (u8 *)(nh + 1) - skb->data;
148 key->ip.proto = NEXTHDR_NONE;
149 key->ip.tos = ipv6_get_dsfield(nh);
150 key->ip.ttl = nh->hop_limit;
151 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
152 key->ipv6.addr.src = nh->saddr;
153 key->ipv6.addr.dst = nh->daddr;
155 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
156 if (unlikely(payload_ofs < 0))
157 return -EINVAL;
159 if (frag_off) {
160 if (frag_off & htons(~0x7))
161 key->ip.frag = OVS_FRAG_TYPE_LATER;
162 else
163 key->ip.frag = OVS_FRAG_TYPE_FIRST;
166 nh_len = payload_ofs - nh_ofs;
167 skb_set_transport_header(skb, nh_ofs + nh_len);
168 key->ip.proto = nexthdr;
169 return nh_len;
172 static bool icmp6hdr_ok(struct sk_buff *skb)
174 return pskb_may_pull(skb, skb_transport_offset(skb) +
175 sizeof(struct icmp6hdr));
178 #define TCP_FLAGS_OFFSET 13
179 #define TCP_FLAG_MASK 0x3f
181 void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
183 u8 tcp_flags = 0;
185 if ((flow->key.eth.type == htons(ETH_P_IP) ||
186 flow->key.eth.type == htons(ETH_P_IPV6)) &&
187 flow->key.ip.proto == IPPROTO_TCP &&
188 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
189 u8 *tcp = (u8 *)tcp_hdr(skb);
190 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
193 spin_lock(&flow->lock);
194 flow->used = jiffies;
195 flow->packet_count++;
196 flow->byte_count += skb->len;
197 flow->tcp_flags |= tcp_flags;
198 spin_unlock(&flow->lock);
201 struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
203 int actions_len = nla_len(actions);
204 struct sw_flow_actions *sfa;
206 if (actions_len > MAX_ACTIONS_BUFSIZE)
207 return ERR_PTR(-EINVAL);
209 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
210 if (!sfa)
211 return ERR_PTR(-ENOMEM);
213 sfa->actions_len = actions_len;
214 memcpy(sfa->actions, nla_data(actions), actions_len);
215 return sfa;
218 struct sw_flow *ovs_flow_alloc(void)
220 struct sw_flow *flow;
222 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
223 if (!flow)
224 return ERR_PTR(-ENOMEM);
226 spin_lock_init(&flow->lock);
227 flow->sf_acts = NULL;
229 return flow;
232 static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
234 hash = jhash_1word(hash, table->hash_seed);
235 return flex_array_get(table->buckets,
236 (hash & (table->n_buckets - 1)));
239 static struct flex_array *alloc_buckets(unsigned int n_buckets)
241 struct flex_array *buckets;
242 int i, err;
244 buckets = flex_array_alloc(sizeof(struct hlist_head *),
245 n_buckets, GFP_KERNEL);
246 if (!buckets)
247 return NULL;
249 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
250 if (err) {
251 flex_array_free(buckets);
252 return NULL;
255 for (i = 0; i < n_buckets; i++)
256 INIT_HLIST_HEAD((struct hlist_head *)
257 flex_array_get(buckets, i));
259 return buckets;
262 static void free_buckets(struct flex_array *buckets)
264 flex_array_free(buckets);
267 struct flow_table *ovs_flow_tbl_alloc(int new_size)
269 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
271 if (!table)
272 return NULL;
274 table->buckets = alloc_buckets(new_size);
276 if (!table->buckets) {
277 kfree(table);
278 return NULL;
280 table->n_buckets = new_size;
281 table->count = 0;
282 table->node_ver = 0;
283 table->keep_flows = false;
284 get_random_bytes(&table->hash_seed, sizeof(u32));
286 return table;
289 void ovs_flow_tbl_destroy(struct flow_table *table)
291 int i;
293 if (!table)
294 return;
296 if (table->keep_flows)
297 goto skip_flows;
299 for (i = 0; i < table->n_buckets; i++) {
300 struct sw_flow *flow;
301 struct hlist_head *head = flex_array_get(table->buckets, i);
302 struct hlist_node *node, *n;
303 int ver = table->node_ver;
305 hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
306 hlist_del_rcu(&flow->hash_node[ver]);
307 ovs_flow_free(flow);
311 skip_flows:
312 free_buckets(table->buckets);
313 kfree(table);
316 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
318 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
320 ovs_flow_tbl_destroy(table);
323 void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
325 if (!table)
326 return;
328 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
331 struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
333 struct sw_flow *flow;
334 struct hlist_head *head;
335 struct hlist_node *n;
336 int ver;
337 int i;
339 ver = table->node_ver;
340 while (*bucket < table->n_buckets) {
341 i = 0;
342 head = flex_array_get(table->buckets, *bucket);
343 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
344 if (i < *last) {
345 i++;
346 continue;
348 *last = i + 1;
349 return flow;
351 (*bucket)++;
352 *last = 0;
355 return NULL;
358 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
360 int old_ver;
361 int i;
363 old_ver = old->node_ver;
364 new->node_ver = !old_ver;
366 /* Insert in new table. */
367 for (i = 0; i < old->n_buckets; i++) {
368 struct sw_flow *flow;
369 struct hlist_head *head;
370 struct hlist_node *n;
372 head = flex_array_get(old->buckets, i);
374 hlist_for_each_entry(flow, n, head, hash_node[old_ver])
375 ovs_flow_tbl_insert(new, flow);
377 old->keep_flows = true;
380 static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
382 struct flow_table *new_table;
384 new_table = ovs_flow_tbl_alloc(n_buckets);
385 if (!new_table)
386 return ERR_PTR(-ENOMEM);
388 flow_table_copy_flows(table, new_table);
390 return new_table;
393 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
395 return __flow_tbl_rehash(table, table->n_buckets);
398 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
400 return __flow_tbl_rehash(table, table->n_buckets * 2);
403 void ovs_flow_free(struct sw_flow *flow)
405 if (unlikely(!flow))
406 return;
408 kfree((struct sf_flow_acts __force *)flow->sf_acts);
409 kmem_cache_free(flow_cache, flow);
412 /* RCU callback used by ovs_flow_deferred_free. */
413 static void rcu_free_flow_callback(struct rcu_head *rcu)
415 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
417 ovs_flow_free(flow);
420 /* Schedules 'flow' to be freed after the next RCU grace period.
421 * The caller must hold rcu_read_lock for this to be sensible. */
422 void ovs_flow_deferred_free(struct sw_flow *flow)
424 call_rcu(&flow->rcu, rcu_free_flow_callback);
427 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
428 * The caller must hold rcu_read_lock for this to be sensible. */
429 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
431 kfree_rcu(sf_acts, rcu);
434 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
436 struct qtag_prefix {
437 __be16 eth_type; /* ETH_P_8021Q */
438 __be16 tci;
440 struct qtag_prefix *qp;
442 if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
443 return 0;
445 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
446 sizeof(__be16))))
447 return -ENOMEM;
449 qp = (struct qtag_prefix *) skb->data;
450 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
451 __skb_pull(skb, sizeof(struct qtag_prefix));
453 return 0;
456 static __be16 parse_ethertype(struct sk_buff *skb)
458 struct llc_snap_hdr {
459 u8 dsap; /* Always 0xAA */
460 u8 ssap; /* Always 0xAA */
461 u8 ctrl;
462 u8 oui[3];
463 __be16 ethertype;
465 struct llc_snap_hdr *llc;
466 __be16 proto;
468 proto = *(__be16 *) skb->data;
469 __skb_pull(skb, sizeof(__be16));
471 if (ntohs(proto) >= 1536)
472 return proto;
474 if (skb->len < sizeof(struct llc_snap_hdr))
475 return htons(ETH_P_802_2);
477 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
478 return htons(0);
480 llc = (struct llc_snap_hdr *) skb->data;
481 if (llc->dsap != LLC_SAP_SNAP ||
482 llc->ssap != LLC_SAP_SNAP ||
483 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
484 return htons(ETH_P_802_2);
486 __skb_pull(skb, sizeof(struct llc_snap_hdr));
487 return llc->ethertype;
490 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
491 int *key_lenp, int nh_len)
493 struct icmp6hdr *icmp = icmp6_hdr(skb);
494 int error = 0;
495 int key_len;
497 /* The ICMPv6 type and code fields use the 16-bit transport port
498 * fields, so we need to store them in 16-bit network byte order.
500 key->ipv6.tp.src = htons(icmp->icmp6_type);
501 key->ipv6.tp.dst = htons(icmp->icmp6_code);
502 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
504 if (icmp->icmp6_code == 0 &&
505 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
506 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
507 int icmp_len = skb->len - skb_transport_offset(skb);
508 struct nd_msg *nd;
509 int offset;
511 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
513 /* In order to process neighbor discovery options, we need the
514 * entire packet.
516 if (unlikely(icmp_len < sizeof(*nd)))
517 goto out;
518 if (unlikely(skb_linearize(skb))) {
519 error = -ENOMEM;
520 goto out;
523 nd = (struct nd_msg *)skb_transport_header(skb);
524 key->ipv6.nd.target = nd->target;
525 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
527 icmp_len -= sizeof(*nd);
528 offset = 0;
529 while (icmp_len >= 8) {
530 struct nd_opt_hdr *nd_opt =
531 (struct nd_opt_hdr *)(nd->opt + offset);
532 int opt_len = nd_opt->nd_opt_len * 8;
534 if (unlikely(!opt_len || opt_len > icmp_len))
535 goto invalid;
537 /* Store the link layer address if the appropriate
538 * option is provided. It is considered an error if
539 * the same link layer option is specified twice.
541 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
542 && opt_len == 8) {
543 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
544 goto invalid;
545 memcpy(key->ipv6.nd.sll,
546 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
547 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
548 && opt_len == 8) {
549 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
550 goto invalid;
551 memcpy(key->ipv6.nd.tll,
552 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
555 icmp_len -= opt_len;
556 offset += opt_len;
560 goto out;
562 invalid:
563 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
564 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
565 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
567 out:
568 *key_lenp = key_len;
569 return error;
573 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
574 * @skb: sk_buff that contains the frame, with skb->data pointing to the
575 * Ethernet header
576 * @in_port: port number on which @skb was received.
577 * @key: output flow key
578 * @key_lenp: length of output flow key
580 * The caller must ensure that skb->len >= ETH_HLEN.
582 * Returns 0 if successful, otherwise a negative errno value.
584 * Initializes @skb header pointers as follows:
586 * - skb->mac_header: the Ethernet header.
588 * - skb->network_header: just past the Ethernet header, or just past the
589 * VLAN header, to the first byte of the Ethernet payload.
591 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
592 * on output, then just past the IP header, if one is present and
593 * of a correct length, otherwise the same as skb->network_header.
594 * For other key->dl_type values it is left untouched.
596 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
597 int *key_lenp)
599 int error = 0;
600 int key_len = SW_FLOW_KEY_OFFSET(eth);
601 struct ethhdr *eth;
603 memset(key, 0, sizeof(*key));
605 key->phy.priority = skb->priority;
606 key->phy.in_port = in_port;
607 key->phy.skb_mark = skb->mark;
609 skb_reset_mac_header(skb);
611 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
612 * header in the linear data area.
614 eth = eth_hdr(skb);
615 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
616 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
618 __skb_pull(skb, 2 * ETH_ALEN);
620 if (vlan_tx_tag_present(skb))
621 key->eth.tci = htons(skb->vlan_tci);
622 else if (eth->h_proto == htons(ETH_P_8021Q))
623 if (unlikely(parse_vlan(skb, key)))
624 return -ENOMEM;
626 key->eth.type = parse_ethertype(skb);
627 if (unlikely(key->eth.type == htons(0)))
628 return -ENOMEM;
630 skb_reset_network_header(skb);
631 __skb_push(skb, skb->data - skb_mac_header(skb));
633 /* Network layer. */
634 if (key->eth.type == htons(ETH_P_IP)) {
635 struct iphdr *nh;
636 __be16 offset;
638 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
640 error = check_iphdr(skb);
641 if (unlikely(error)) {
642 if (error == -EINVAL) {
643 skb->transport_header = skb->network_header;
644 error = 0;
646 goto out;
649 nh = ip_hdr(skb);
650 key->ipv4.addr.src = nh->saddr;
651 key->ipv4.addr.dst = nh->daddr;
653 key->ip.proto = nh->protocol;
654 key->ip.tos = nh->tos;
655 key->ip.ttl = nh->ttl;
657 offset = nh->frag_off & htons(IP_OFFSET);
658 if (offset) {
659 key->ip.frag = OVS_FRAG_TYPE_LATER;
660 goto out;
662 if (nh->frag_off & htons(IP_MF) ||
663 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
664 key->ip.frag = OVS_FRAG_TYPE_FIRST;
666 /* Transport layer. */
667 if (key->ip.proto == IPPROTO_TCP) {
668 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
669 if (tcphdr_ok(skb)) {
670 struct tcphdr *tcp = tcp_hdr(skb);
671 key->ipv4.tp.src = tcp->source;
672 key->ipv4.tp.dst = tcp->dest;
674 } else if (key->ip.proto == IPPROTO_UDP) {
675 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
676 if (udphdr_ok(skb)) {
677 struct udphdr *udp = udp_hdr(skb);
678 key->ipv4.tp.src = udp->source;
679 key->ipv4.tp.dst = udp->dest;
681 } else if (key->ip.proto == IPPROTO_ICMP) {
682 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
683 if (icmphdr_ok(skb)) {
684 struct icmphdr *icmp = icmp_hdr(skb);
685 /* The ICMP type and code fields use the 16-bit
686 * transport port fields, so we need to store
687 * them in 16-bit network byte order. */
688 key->ipv4.tp.src = htons(icmp->type);
689 key->ipv4.tp.dst = htons(icmp->code);
693 } else if ((key->eth.type == htons(ETH_P_ARP) ||
694 key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
695 struct arp_eth_header *arp;
697 arp = (struct arp_eth_header *)skb_network_header(skb);
699 if (arp->ar_hrd == htons(ARPHRD_ETHER)
700 && arp->ar_pro == htons(ETH_P_IP)
701 && arp->ar_hln == ETH_ALEN
702 && arp->ar_pln == 4) {
704 /* We only match on the lower 8 bits of the opcode. */
705 if (ntohs(arp->ar_op) <= 0xff)
706 key->ip.proto = ntohs(arp->ar_op);
707 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
708 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
709 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
710 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
711 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
713 } else if (key->eth.type == htons(ETH_P_IPV6)) {
714 int nh_len; /* IPv6 Header + Extensions */
716 nh_len = parse_ipv6hdr(skb, key, &key_len);
717 if (unlikely(nh_len < 0)) {
718 if (nh_len == -EINVAL)
719 skb->transport_header = skb->network_header;
720 else
721 error = nh_len;
722 goto out;
725 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
726 goto out;
727 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
728 key->ip.frag = OVS_FRAG_TYPE_FIRST;
730 /* Transport layer. */
731 if (key->ip.proto == NEXTHDR_TCP) {
732 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
733 if (tcphdr_ok(skb)) {
734 struct tcphdr *tcp = tcp_hdr(skb);
735 key->ipv6.tp.src = tcp->source;
736 key->ipv6.tp.dst = tcp->dest;
738 } else if (key->ip.proto == NEXTHDR_UDP) {
739 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
740 if (udphdr_ok(skb)) {
741 struct udphdr *udp = udp_hdr(skb);
742 key->ipv6.tp.src = udp->source;
743 key->ipv6.tp.dst = udp->dest;
745 } else if (key->ip.proto == NEXTHDR_ICMP) {
746 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
747 if (icmp6hdr_ok(skb)) {
748 error = parse_icmpv6(skb, key, &key_len, nh_len);
749 if (error < 0)
750 goto out;
755 out:
756 *key_lenp = key_len;
757 return error;
760 u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
762 return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
765 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
766 struct sw_flow_key *key, int key_len)
768 struct sw_flow *flow;
769 struct hlist_node *n;
770 struct hlist_head *head;
771 u32 hash;
773 hash = ovs_flow_hash(key, key_len);
775 head = find_bucket(table, hash);
776 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
778 if (flow->hash == hash &&
779 !memcmp(&flow->key, key, key_len)) {
780 return flow;
783 return NULL;
786 void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
788 struct hlist_head *head;
790 head = find_bucket(table, flow->hash);
791 hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
792 table->count++;
795 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
797 hlist_del_rcu(&flow->hash_node[table->node_ver]);
798 table->count--;
799 BUG_ON(table->count < 0);
802 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
803 const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
804 [OVS_KEY_ATTR_ENCAP] = -1,
805 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
806 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
807 [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
808 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
809 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
810 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
811 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
812 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
813 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
814 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
815 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
816 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
817 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
818 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
821 static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
822 const struct nlattr *a[], u32 *attrs)
824 const struct ovs_key_icmp *icmp_key;
825 const struct ovs_key_tcp *tcp_key;
826 const struct ovs_key_udp *udp_key;
828 switch (swkey->ip.proto) {
829 case IPPROTO_TCP:
830 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
831 return -EINVAL;
832 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
834 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
835 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
836 swkey->ipv4.tp.src = tcp_key->tcp_src;
837 swkey->ipv4.tp.dst = tcp_key->tcp_dst;
838 break;
840 case IPPROTO_UDP:
841 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
842 return -EINVAL;
843 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
845 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
846 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
847 swkey->ipv4.tp.src = udp_key->udp_src;
848 swkey->ipv4.tp.dst = udp_key->udp_dst;
849 break;
851 case IPPROTO_ICMP:
852 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
853 return -EINVAL;
854 *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
856 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
857 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
858 swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
859 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
860 break;
863 return 0;
866 static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
867 const struct nlattr *a[], u32 *attrs)
869 const struct ovs_key_icmpv6 *icmpv6_key;
870 const struct ovs_key_tcp *tcp_key;
871 const struct ovs_key_udp *udp_key;
873 switch (swkey->ip.proto) {
874 case IPPROTO_TCP:
875 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
876 return -EINVAL;
877 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
879 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
880 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
881 swkey->ipv6.tp.src = tcp_key->tcp_src;
882 swkey->ipv6.tp.dst = tcp_key->tcp_dst;
883 break;
885 case IPPROTO_UDP:
886 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
887 return -EINVAL;
888 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
890 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
891 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
892 swkey->ipv6.tp.src = udp_key->udp_src;
893 swkey->ipv6.tp.dst = udp_key->udp_dst;
894 break;
896 case IPPROTO_ICMPV6:
897 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
898 return -EINVAL;
899 *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
901 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
902 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
903 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
904 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
906 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
907 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
908 const struct ovs_key_nd *nd_key;
910 if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
911 return -EINVAL;
912 *attrs &= ~(1 << OVS_KEY_ATTR_ND);
914 *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
915 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
916 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
917 sizeof(swkey->ipv6.nd.target));
918 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
919 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
921 break;
924 return 0;
927 static int parse_flow_nlattrs(const struct nlattr *attr,
928 const struct nlattr *a[], u32 *attrsp)
930 const struct nlattr *nla;
931 u32 attrs;
932 int rem;
934 attrs = 0;
935 nla_for_each_nested(nla, attr, rem) {
936 u16 type = nla_type(nla);
937 int expected_len;
939 if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
940 return -EINVAL;
942 expected_len = ovs_key_lens[type];
943 if (nla_len(nla) != expected_len && expected_len != -1)
944 return -EINVAL;
946 attrs |= 1 << type;
947 a[type] = nla;
949 if (rem)
950 return -EINVAL;
952 *attrsp = attrs;
953 return 0;
957 * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
958 * @swkey: receives the extracted flow key.
959 * @key_lenp: number of bytes used in @swkey.
960 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
961 * sequence.
963 int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
964 const struct nlattr *attr)
966 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
967 const struct ovs_key_ethernet *eth_key;
968 int key_len;
969 u32 attrs;
970 int err;
972 memset(swkey, 0, sizeof(struct sw_flow_key));
973 key_len = SW_FLOW_KEY_OFFSET(eth);
975 err = parse_flow_nlattrs(attr, a, &attrs);
976 if (err)
977 return err;
979 /* Metadata attributes. */
980 if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
981 swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
982 attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
984 if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
985 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
986 if (in_port >= DP_MAX_PORTS)
987 return -EINVAL;
988 swkey->phy.in_port = in_port;
989 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
990 } else {
991 swkey->phy.in_port = DP_MAX_PORTS;
993 if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
994 swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
995 attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
998 /* Data attributes. */
999 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
1000 return -EINVAL;
1001 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
1003 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1004 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
1005 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
1007 if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
1008 nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
1009 const struct nlattr *encap;
1010 __be16 tci;
1012 if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
1013 (1 << OVS_KEY_ATTR_ETHERTYPE) |
1014 (1 << OVS_KEY_ATTR_ENCAP)))
1015 return -EINVAL;
1017 encap = a[OVS_KEY_ATTR_ENCAP];
1018 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1019 if (tci & htons(VLAN_TAG_PRESENT)) {
1020 swkey->eth.tci = tci;
1022 err = parse_flow_nlattrs(encap, a, &attrs);
1023 if (err)
1024 return err;
1025 } else if (!tci) {
1026 /* Corner case for truncated 802.1Q header. */
1027 if (nla_len(encap))
1028 return -EINVAL;
1030 swkey->eth.type = htons(ETH_P_8021Q);
1031 *key_lenp = key_len;
1032 return 0;
1033 } else {
1034 return -EINVAL;
1038 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1039 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1040 if (ntohs(swkey->eth.type) < 1536)
1041 return -EINVAL;
1042 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1043 } else {
1044 swkey->eth.type = htons(ETH_P_802_2);
1047 if (swkey->eth.type == htons(ETH_P_IP)) {
1048 const struct ovs_key_ipv4 *ipv4_key;
1050 if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
1051 return -EINVAL;
1052 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1054 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
1055 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1056 if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
1057 return -EINVAL;
1058 swkey->ip.proto = ipv4_key->ipv4_proto;
1059 swkey->ip.tos = ipv4_key->ipv4_tos;
1060 swkey->ip.ttl = ipv4_key->ipv4_ttl;
1061 swkey->ip.frag = ipv4_key->ipv4_frag;
1062 swkey->ipv4.addr.src = ipv4_key->ipv4_src;
1063 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
1065 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1066 err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1067 if (err)
1068 return err;
1070 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1071 const struct ovs_key_ipv6 *ipv6_key;
1073 if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
1074 return -EINVAL;
1075 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
1077 key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
1078 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1079 if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
1080 return -EINVAL;
1081 swkey->ipv6.label = ipv6_key->ipv6_label;
1082 swkey->ip.proto = ipv6_key->ipv6_proto;
1083 swkey->ip.tos = ipv6_key->ipv6_tclass;
1084 swkey->ip.ttl = ipv6_key->ipv6_hlimit;
1085 swkey->ip.frag = ipv6_key->ipv6_frag;
1086 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
1087 sizeof(swkey->ipv6.addr.src));
1088 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
1089 sizeof(swkey->ipv6.addr.dst));
1091 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1092 err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1093 if (err)
1094 return err;
1096 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1097 swkey->eth.type == htons(ETH_P_RARP)) {
1098 const struct ovs_key_arp *arp_key;
1100 if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
1101 return -EINVAL;
1102 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
1104 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
1105 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1106 swkey->ipv4.addr.src = arp_key->arp_sip;
1107 swkey->ipv4.addr.dst = arp_key->arp_tip;
1108 if (arp_key->arp_op & htons(0xff00))
1109 return -EINVAL;
1110 swkey->ip.proto = ntohs(arp_key->arp_op);
1111 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
1112 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
1115 if (attrs)
1116 return -EINVAL;
1117 *key_lenp = key_len;
1119 return 0;
1123 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1124 * @priority: receives the skb priority
1125 * @mark: receives the skb mark
1126 * @in_port: receives the extracted input port.
1127 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1128 * sequence.
1130 * This parses a series of Netlink attributes that form a flow key, which must
1131 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1132 * get the metadata, that is, the parts of the flow key that cannot be
1133 * extracted from the packet itself.
1135 int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port,
1136 const struct nlattr *attr)
1138 const struct nlattr *nla;
1139 int rem;
1141 *in_port = DP_MAX_PORTS;
1142 *priority = 0;
1143 *mark = 0;
1145 nla_for_each_nested(nla, attr, rem) {
1146 int type = nla_type(nla);
1148 if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
1149 if (nla_len(nla) != ovs_key_lens[type])
1150 return -EINVAL;
1152 switch (type) {
1153 case OVS_KEY_ATTR_PRIORITY:
1154 *priority = nla_get_u32(nla);
1155 break;
1157 case OVS_KEY_ATTR_IN_PORT:
1158 if (nla_get_u32(nla) >= DP_MAX_PORTS)
1159 return -EINVAL;
1160 *in_port = nla_get_u32(nla);
1161 break;
1163 case OVS_KEY_ATTR_SKB_MARK:
1164 *mark = nla_get_u32(nla);
1165 break;
1169 if (rem)
1170 return -EINVAL;
1171 return 0;
1174 int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1176 struct ovs_key_ethernet *eth_key;
1177 struct nlattr *nla, *encap;
1179 if (swkey->phy.priority &&
1180 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
1181 goto nla_put_failure;
1183 if (swkey->phy.in_port != DP_MAX_PORTS &&
1184 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1185 goto nla_put_failure;
1187 if (swkey->phy.skb_mark &&
1188 nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark))
1189 goto nla_put_failure;
1191 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1192 if (!nla)
1193 goto nla_put_failure;
1194 eth_key = nla_data(nla);
1195 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
1196 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1198 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1199 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
1200 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
1201 goto nla_put_failure;
1202 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1203 if (!swkey->eth.tci)
1204 goto unencap;
1205 } else {
1206 encap = NULL;
1209 if (swkey->eth.type == htons(ETH_P_802_2))
1210 goto unencap;
1212 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
1213 goto nla_put_failure;
1215 if (swkey->eth.type == htons(ETH_P_IP)) {
1216 struct ovs_key_ipv4 *ipv4_key;
1218 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1219 if (!nla)
1220 goto nla_put_failure;
1221 ipv4_key = nla_data(nla);
1222 ipv4_key->ipv4_src = swkey->ipv4.addr.src;
1223 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
1224 ipv4_key->ipv4_proto = swkey->ip.proto;
1225 ipv4_key->ipv4_tos = swkey->ip.tos;
1226 ipv4_key->ipv4_ttl = swkey->ip.ttl;
1227 ipv4_key->ipv4_frag = swkey->ip.frag;
1228 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1229 struct ovs_key_ipv6 *ipv6_key;
1231 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1232 if (!nla)
1233 goto nla_put_failure;
1234 ipv6_key = nla_data(nla);
1235 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
1236 sizeof(ipv6_key->ipv6_src));
1237 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
1238 sizeof(ipv6_key->ipv6_dst));
1239 ipv6_key->ipv6_label = swkey->ipv6.label;
1240 ipv6_key->ipv6_proto = swkey->ip.proto;
1241 ipv6_key->ipv6_tclass = swkey->ip.tos;
1242 ipv6_key->ipv6_hlimit = swkey->ip.ttl;
1243 ipv6_key->ipv6_frag = swkey->ip.frag;
1244 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1245 swkey->eth.type == htons(ETH_P_RARP)) {
1246 struct ovs_key_arp *arp_key;
1248 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1249 if (!nla)
1250 goto nla_put_failure;
1251 arp_key = nla_data(nla);
1252 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1253 arp_key->arp_sip = swkey->ipv4.addr.src;
1254 arp_key->arp_tip = swkey->ipv4.addr.dst;
1255 arp_key->arp_op = htons(swkey->ip.proto);
1256 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
1257 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
1260 if ((swkey->eth.type == htons(ETH_P_IP) ||
1261 swkey->eth.type == htons(ETH_P_IPV6)) &&
1262 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1264 if (swkey->ip.proto == IPPROTO_TCP) {
1265 struct ovs_key_tcp *tcp_key;
1267 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1268 if (!nla)
1269 goto nla_put_failure;
1270 tcp_key = nla_data(nla);
1271 if (swkey->eth.type == htons(ETH_P_IP)) {
1272 tcp_key->tcp_src = swkey->ipv4.tp.src;
1273 tcp_key->tcp_dst = swkey->ipv4.tp.dst;
1274 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1275 tcp_key->tcp_src = swkey->ipv6.tp.src;
1276 tcp_key->tcp_dst = swkey->ipv6.tp.dst;
1278 } else if (swkey->ip.proto == IPPROTO_UDP) {
1279 struct ovs_key_udp *udp_key;
1281 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1282 if (!nla)
1283 goto nla_put_failure;
1284 udp_key = nla_data(nla);
1285 if (swkey->eth.type == htons(ETH_P_IP)) {
1286 udp_key->udp_src = swkey->ipv4.tp.src;
1287 udp_key->udp_dst = swkey->ipv4.tp.dst;
1288 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1289 udp_key->udp_src = swkey->ipv6.tp.src;
1290 udp_key->udp_dst = swkey->ipv6.tp.dst;
1292 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1293 swkey->ip.proto == IPPROTO_ICMP) {
1294 struct ovs_key_icmp *icmp_key;
1296 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1297 if (!nla)
1298 goto nla_put_failure;
1299 icmp_key = nla_data(nla);
1300 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
1301 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
1302 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1303 swkey->ip.proto == IPPROTO_ICMPV6) {
1304 struct ovs_key_icmpv6 *icmpv6_key;
1306 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1307 sizeof(*icmpv6_key));
1308 if (!nla)
1309 goto nla_put_failure;
1310 icmpv6_key = nla_data(nla);
1311 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
1312 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
1314 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1315 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1316 struct ovs_key_nd *nd_key;
1318 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1319 if (!nla)
1320 goto nla_put_failure;
1321 nd_key = nla_data(nla);
1322 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
1323 sizeof(nd_key->nd_target));
1324 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
1325 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
1330 unencap:
1331 if (encap)
1332 nla_nest_end(skb, encap);
1334 return 0;
1336 nla_put_failure:
1337 return -EMSGSIZE;
1340 /* Initializes the flow module.
1341 * Returns zero if successful or a negative error code. */
1342 int ovs_flow_init(void)
1344 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1345 0, NULL);
1346 if (flow_cache == NULL)
1347 return -ENOMEM;
1349 return 0;
1352 /* Uninitializes the flow module. */
1353 void ovs_flow_exit(void)
1355 kmem_cache_destroy(flow_cache);