net/rocker: Remove the dead error handling
[qemu/kevin.git] / hw / net / rocker / rocker_of_dpa.c
blob191a58e0a76d497c2bcaa7b5aedd61a98ce0fd48
1 /*
2 * QEMU rocker switch emulation - OF-DPA flow processing support
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include "qemu/osdep.h"
18 #include "net/eth.h"
19 #include "qemu/iov.h"
20 #include "qemu/timer.h"
21 #include "qmp-commands.h"
23 #include "rocker.h"
24 #include "rocker_hw.h"
25 #include "rocker_fp.h"
26 #include "rocker_tlv.h"
27 #include "rocker_world.h"
28 #include "rocker_desc.h"
29 #include "rocker_of_dpa.h"
31 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
32 static const MACAddr ff_mac = { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
34 typedef struct of_dpa {
35 World *world;
36 GHashTable *flow_tbl;
37 GHashTable *group_tbl;
38 unsigned int flow_tbl_max_size;
39 unsigned int group_tbl_max_size;
40 } OfDpa;
42 /* flow_key stolen mostly from OVS
44 * Note: fields that compare with network packet header fields
45 * are stored in network order (BE) to avoid per-packet field
46 * byte-swaps.
49 typedef struct of_dpa_flow_key {
50 uint32_t in_pport; /* ingress port */
51 uint32_t tunnel_id; /* overlay tunnel id */
52 uint32_t tbl_id; /* table id */
53 struct {
54 __be16 vlan_id; /* 0 if no VLAN */
55 MACAddr src; /* ethernet source address */
56 MACAddr dst; /* ethernet destination address */
57 __be16 type; /* ethernet frame type */
58 } eth;
59 struct {
60 uint8_t proto; /* IP protocol or ARP opcode */
61 uint8_t tos; /* IP ToS */
62 uint8_t ttl; /* IP TTL/hop limit */
63 uint8_t frag; /* one of FRAG_TYPE_* */
64 } ip;
65 union {
66 struct {
67 struct {
68 __be32 src; /* IP source address */
69 __be32 dst; /* IP destination address */
70 } addr;
71 union {
72 struct {
73 __be16 src; /* TCP/UDP/SCTP source port */
74 __be16 dst; /* TCP/UDP/SCTP destination port */
75 __be16 flags; /* TCP flags */
76 } tp;
77 struct {
78 MACAddr sha; /* ARP source hardware address */
79 MACAddr tha; /* ARP target hardware address */
80 } arp;
82 } ipv4;
83 struct {
84 struct {
85 Ipv6Addr src; /* IPv6 source address */
86 Ipv6Addr dst; /* IPv6 destination address */
87 } addr;
88 __be32 label; /* IPv6 flow label */
89 struct {
90 __be16 src; /* TCP/UDP/SCTP source port */
91 __be16 dst; /* TCP/UDP/SCTP destination port */
92 __be16 flags; /* TCP flags */
93 } tp;
94 struct {
95 Ipv6Addr target; /* ND target address */
96 MACAddr sll; /* ND source link layer address */
97 MACAddr tll; /* ND target link layer address */
98 } nd;
99 } ipv6;
101 int width; /* how many uint64_t's in key? */
102 } OfDpaFlowKey;
104 /* Width of key which includes field 'f' in u64s, rounded up */
105 #define FLOW_KEY_WIDTH(f) \
106 DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof(((OfDpaFlowKey *)0)->f), \
107 sizeof(uint64_t))
109 typedef struct of_dpa_flow_action {
110 uint32_t goto_tbl;
111 struct {
112 uint32_t group_id;
113 uint32_t tun_log_lport;
114 __be16 vlan_id;
115 } write;
116 struct {
117 __be16 new_vlan_id;
118 uint32_t out_pport;
119 uint8_t copy_to_cpu;
120 __be16 vlan_id;
121 } apply;
122 } OfDpaFlowAction;
124 typedef struct of_dpa_flow {
125 uint32_t lpm;
126 uint32_t priority;
127 uint32_t hardtime;
128 uint32_t idletime;
129 uint64_t cookie;
130 OfDpaFlowKey key;
131 OfDpaFlowKey mask;
132 OfDpaFlowAction action;
133 struct {
134 uint64_t hits;
135 int64_t install_time;
136 int64_t refresh_time;
137 uint64_t rx_pkts;
138 uint64_t tx_pkts;
139 } stats;
140 } OfDpaFlow;
142 typedef struct of_dpa_flow_pkt_fields {
143 uint32_t tunnel_id;
144 struct eth_header *ethhdr;
145 __be16 *h_proto;
146 struct vlan_header *vlanhdr;
147 struct ip_header *ipv4hdr;
148 struct ip6_header *ipv6hdr;
149 Ipv6Addr *ipv6_src_addr;
150 Ipv6Addr *ipv6_dst_addr;
151 } OfDpaFlowPktFields;
153 typedef struct of_dpa_flow_context {
154 uint32_t in_pport;
155 uint32_t tunnel_id;
156 struct iovec *iov;
157 int iovcnt;
158 struct eth_header ethhdr_rewrite;
159 struct vlan_header vlanhdr_rewrite;
160 struct vlan_header vlanhdr;
161 OfDpa *of_dpa;
162 OfDpaFlowPktFields fields;
163 OfDpaFlowAction action_set;
164 } OfDpaFlowContext;
166 typedef struct of_dpa_flow_match {
167 OfDpaFlowKey value;
168 OfDpaFlow *best;
169 } OfDpaFlowMatch;
171 typedef struct of_dpa_group {
172 uint32_t id;
173 union {
174 struct {
175 uint32_t out_pport;
176 uint8_t pop_vlan;
177 } l2_interface;
178 struct {
179 uint32_t group_id;
180 MACAddr src_mac;
181 MACAddr dst_mac;
182 __be16 vlan_id;
183 } l2_rewrite;
184 struct {
185 uint16_t group_count;
186 uint32_t *group_ids;
187 } l2_flood;
188 struct {
189 uint32_t group_id;
190 MACAddr src_mac;
191 MACAddr dst_mac;
192 __be16 vlan_id;
193 uint8_t ttl_check;
194 } l3_unicast;
196 } OfDpaGroup;
198 static int of_dpa_mask2prefix(__be32 mask)
200 int i;
201 int count = 32;
203 for (i = 0; i < 32; i++) {
204 if (!(ntohl(mask) & ((2 << i) - 1))) {
205 count--;
209 return count;
212 #if defined(DEBUG_ROCKER)
213 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
215 char buf[512], *b = buf, *mac;
217 b += sprintf(b, " tbl %2d", key->tbl_id);
219 if (key->in_pport || (mask && mask->in_pport)) {
220 b += sprintf(b, " in_pport %2d", key->in_pport);
221 if (mask && mask->in_pport != 0xffffffff) {
222 b += sprintf(b, "/0x%08x", key->in_pport);
226 if (key->tunnel_id || (mask && mask->tunnel_id)) {
227 b += sprintf(b, " tun %8d", key->tunnel_id);
228 if (mask && mask->tunnel_id != 0xffffffff) {
229 b += sprintf(b, "/0x%08x", key->tunnel_id);
233 if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
234 b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
235 if (mask && mask->eth.vlan_id != 0xffff) {
236 b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
240 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
241 (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
242 mac = qemu_mac_strdup_printf(key->eth.src.a);
243 b += sprintf(b, " src %s", mac);
244 g_free(mac);
245 if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
246 mac = qemu_mac_strdup_printf(mask->eth.src.a);
247 b += sprintf(b, "/%s", mac);
248 g_free(mac);
252 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
253 (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
254 mac = qemu_mac_strdup_printf(key->eth.dst.a);
255 b += sprintf(b, " dst %s", mac);
256 g_free(mac);
257 if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
258 mac = qemu_mac_strdup_printf(mask->eth.dst.a);
259 b += sprintf(b, "/%s", mac);
260 g_free(mac);
264 if (key->eth.type || (mask && mask->eth.type)) {
265 b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
266 if (mask && mask->eth.type != 0xffff) {
267 b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
269 switch (ntohs(key->eth.type)) {
270 case 0x0800:
271 case 0x86dd:
272 if (key->ip.proto || (mask && mask->ip.proto)) {
273 b += sprintf(b, " ip proto %2d", key->ip.proto);
274 if (mask && mask->ip.proto != 0xff) {
275 b += sprintf(b, "/0x%02x", mask->ip.proto);
278 if (key->ip.tos || (mask && mask->ip.tos)) {
279 b += sprintf(b, " ip tos %2d", key->ip.tos);
280 if (mask && mask->ip.tos != 0xff) {
281 b += sprintf(b, "/0x%02x", mask->ip.tos);
284 break;
286 switch (ntohs(key->eth.type)) {
287 case 0x0800:
288 if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
289 b += sprintf(b, " dst %s",
290 inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
291 if (mask) {
292 b += sprintf(b, "/%d",
293 of_dpa_mask2prefix(mask->ipv4.addr.dst));
296 break;
300 DPRINTF("%s\n", buf);
302 #else
303 #define of_dpa_flow_key_dump(k, m)
304 #endif
306 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
308 OfDpaFlow *flow = value;
309 OfDpaFlowMatch *match = user_data;
310 uint64_t *k = (uint64_t *)&flow->key;
311 uint64_t *m = (uint64_t *)&flow->mask;
312 uint64_t *v = (uint64_t *)&match->value;
313 int i;
315 if (flow->key.tbl_id == match->value.tbl_id) {
316 of_dpa_flow_key_dump(&flow->key, &flow->mask);
319 if (flow->key.width > match->value.width) {
320 return;
323 for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
324 if ((~*k & *m & *v) | (*k & *m & ~*v)) {
325 return;
329 DPRINTF("match\n");
331 if (!match->best ||
332 flow->priority > match->best->priority ||
333 flow->lpm > match->best->lpm) {
334 match->best = flow;
338 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
340 DPRINTF("\nnew search\n");
341 of_dpa_flow_key_dump(&match->value, NULL);
343 g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
345 return match->best;
348 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
350 return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
353 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
355 g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
357 return ROCKER_OK;
360 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
362 g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
365 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
367 OfDpaFlow *flow;
368 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
370 flow = g_new0(OfDpaFlow, 1);
372 flow->cookie = cookie;
373 flow->mask.tbl_id = 0xffffffff;
375 flow->stats.install_time = flow->stats.refresh_time = now;
377 return flow;
380 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
382 OfDpaFlowPktFields *fields = &fc->fields;
384 fc->iov[0].iov_base = fields->ethhdr;
385 fc->iov[0].iov_len = sizeof(struct eth_header);
386 fc->iov[1].iov_base = fields->vlanhdr;
387 fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
390 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
391 const struct iovec *iov, int iovcnt)
393 OfDpaFlowPktFields *fields = &fc->fields;
394 size_t sofar = 0;
395 int i;
397 sofar += sizeof(struct eth_header);
398 if (iov->iov_len < sofar) {
399 DPRINTF("flow_pkt_parse underrun on eth_header\n");
400 return;
403 fields->ethhdr = iov->iov_base;
404 fields->h_proto = &fields->ethhdr->h_proto;
406 if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
407 sofar += sizeof(struct vlan_header);
408 if (iov->iov_len < sofar) {
409 DPRINTF("flow_pkt_parse underrun on vlan_header\n");
410 return;
412 fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
413 fields->h_proto = &fields->vlanhdr->h_proto;
416 switch (ntohs(*fields->h_proto)) {
417 case ETH_P_IP:
418 sofar += sizeof(struct ip_header);
419 if (iov->iov_len < sofar) {
420 DPRINTF("flow_pkt_parse underrun on ip_header\n");
421 return;
423 fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
424 break;
425 case ETH_P_IPV6:
426 sofar += sizeof(struct ip6_header);
427 if (iov->iov_len < sofar) {
428 DPRINTF("flow_pkt_parse underrun on ip6_header\n");
429 return;
431 fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
432 break;
435 /* To facilitate (potential) VLAN tag insertion, Make a
436 * copy of the iov and insert two new vectors at the
437 * beginning for eth hdr and vlan hdr. No data is copied,
438 * just the vectors.
441 of_dpa_flow_pkt_hdr_reset(fc);
443 fc->iov[2].iov_base = fields->h_proto + 1;
444 fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
446 for (i = 1; i < iovcnt; i++) {
447 fc->iov[i+2] = iov[i];
450 fc->iovcnt = iovcnt + 2;
453 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
455 OfDpaFlowPktFields *fields = &fc->fields;
456 uint16_t h_proto = fields->ethhdr->h_proto;
458 if (fields->vlanhdr) {
459 DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
460 return;
463 fields->ethhdr->h_proto = htons(ETH_P_VLAN);
464 fields->vlanhdr = &fc->vlanhdr;
465 fields->vlanhdr->h_tci = vlan_id;
466 fields->vlanhdr->h_proto = h_proto;
467 fields->h_proto = &fields->vlanhdr->h_proto;
469 fc->iov[1].iov_base = fields->vlanhdr;
470 fc->iov[1].iov_len = sizeof(struct vlan_header);
473 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
475 OfDpaFlowPktFields *fields = &fc->fields;
477 if (!fields->vlanhdr) {
478 return;
481 fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
482 fc->iov[1].iov_base = fields->h_proto;
483 fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
486 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
487 uint8_t *src_mac, uint8_t *dst_mac,
488 __be16 vlan_id)
490 OfDpaFlowPktFields *fields = &fc->fields;
492 if (src_mac || dst_mac) {
493 memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
494 if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
495 memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
497 if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
498 memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
500 fc->iov[0].iov_base = &fc->ethhdr_rewrite;
503 if (vlan_id && fields->vlanhdr) {
504 fc->vlanhdr_rewrite = fc->vlanhdr;
505 fc->vlanhdr_rewrite.h_tci = vlan_id;
506 fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
510 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
512 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
513 OfDpaFlowMatch *match)
515 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
516 match->value.in_pport = fc->in_pport;
517 match->value.width = FLOW_KEY_WIDTH(tbl_id);
520 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
522 uint32_t port;
524 /* The default on miss is for packets from physical ports
525 * to go to the VLAN Flow Table. There is no default rule
526 * for packets from logical ports, which are dropped on miss.
529 if (fp_port_from_pport(fc->in_pport, &port)) {
530 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
534 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
535 OfDpaFlowMatch *match)
537 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
538 match->value.in_pport = fc->in_pport;
539 if (fc->fields.vlanhdr) {
540 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
542 match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
545 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
546 OfDpaFlow *flow)
548 if (flow->action.apply.new_vlan_id) {
549 of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
553 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
554 OfDpaFlowMatch *match)
556 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
557 match->value.in_pport = fc->in_pport;
558 match->value.eth.type = *fc->fields.h_proto;
559 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
560 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
561 sizeof(match->value.eth.dst.a));
562 match->value.width = FLOW_KEY_WIDTH(eth.type);
565 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
567 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
570 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
571 OfDpaFlow *flow)
573 fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
574 fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
577 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
578 OfDpaFlowMatch *match)
580 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
581 if (fc->fields.vlanhdr) {
582 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
583 } else if (fc->tunnel_id) {
584 match->value.tunnel_id = fc->tunnel_id;
586 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
587 sizeof(match->value.eth.dst.a));
588 match->value.width = FLOW_KEY_WIDTH(eth.dst);
591 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
592 OfDpaFlow *dst_flow)
594 OfDpaFlowMatch match = { { 0, }, };
595 OfDpaFlow *flow;
596 uint8_t *addr;
597 uint16_t vlan_id;
598 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
599 int64_t refresh_delay = 1;
601 /* Do a lookup in bridge table by src_mac/vlan */
603 addr = fc->fields.ethhdr->h_source;
604 vlan_id = fc->fields.vlanhdr->h_tci;
606 match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
607 match.value.eth.vlan_id = vlan_id;
608 memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
609 match.value.width = FLOW_KEY_WIDTH(eth.dst);
611 flow = of_dpa_flow_match(fc->of_dpa, &match);
612 if (flow) {
613 if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
614 sizeof(flow->mask.eth.dst.a))) {
615 /* src_mac/vlan already learned; if in_port and out_port
616 * don't match, the end station has moved and the port
617 * needs updating */
618 /* XXX implement the in_port/out_port check */
619 if (now - flow->stats.refresh_time < refresh_delay) {
620 return;
622 flow->stats.refresh_time = now;
626 /* Let driver know about mac/vlan. This may be a new mac/vlan
627 * or a refresh of existing mac/vlan that's been hit after the
628 * refresh_delay.
631 rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
632 fc->in_pport, addr, vlan_id);
635 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
637 of_dpa_bridging_learn(fc, NULL);
638 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
641 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
642 OfDpaFlow *flow)
644 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
645 fc->action_set.write.group_id = flow->action.write.group_id;
647 fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
650 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
651 OfDpaFlowMatch *match)
653 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
654 match->value.eth.type = *fc->fields.h_proto;
655 if (fc->fields.ipv4hdr) {
656 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
658 if (fc->fields.ipv6_dst_addr) {
659 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
660 sizeof(match->value.ipv6.addr.dst));
662 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
665 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
667 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
670 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
671 OfDpaFlow *flow)
673 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
674 fc->action_set.write.group_id = flow->action.write.group_id;
678 static void
679 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
680 OfDpaFlowMatch *match)
682 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
683 match->value.eth.type = *fc->fields.h_proto;
684 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
685 if (fc->fields.ipv4hdr) {
686 match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
687 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
689 if (fc->fields.ipv6_src_addr) {
690 memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
691 sizeof(match->value.ipv6.addr.src));
693 if (fc->fields.ipv6_dst_addr) {
694 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
695 sizeof(match->value.ipv6.addr.dst));
697 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
700 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
702 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
705 static void
706 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
707 OfDpaFlow *flow)
709 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
710 fc->action_set.write.group_id = flow->action.write.group_id;
712 fc->action_set.write.vlan_id = flow->action.write.vlan_id;
715 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
716 OfDpaFlowMatch *match)
718 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
719 match->value.in_pport = fc->in_pport;
720 memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
721 sizeof(match->value.eth.src.a));
722 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
723 sizeof(match->value.eth.dst.a));
724 match->value.eth.type = *fc->fields.h_proto;
725 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
726 match->value.width = FLOW_KEY_WIDTH(eth.type);
727 if (fc->fields.ipv4hdr) {
728 match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
729 match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
730 match->value.width = FLOW_KEY_WIDTH(ip.tos);
731 } else if (fc->fields.ipv6hdr) {
732 match->value.ip.proto =
733 fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
734 match->value.ip.tos = 0; /* XXX what goes here? */
735 match->value.width = FLOW_KEY_WIDTH(ip.tos);
739 static void of_dpa_eg(OfDpaFlowContext *fc);
740 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
741 OfDpaFlow *dst_flow)
743 of_dpa_eg(fc);
746 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
747 OfDpaFlow *flow)
749 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
750 fc->action_set.write.group_id = flow->action.write.group_id;
754 static void of_dpa_drop(OfDpaFlowContext *fc)
756 /* drop packet */
759 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
760 uint32_t group_id)
762 return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
765 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
767 g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
769 return 0;
772 #if 0
773 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
775 OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
777 if (!old_group) {
778 return -ENOENT;
781 /* XXX */
783 return 0;
785 #endif
787 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
789 g_hash_table_remove(of_dpa->group_tbl, &group->id);
791 return 0;
794 #if 0
795 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
797 OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
799 if (!group) {
800 return -ENOENT;
803 /* XXX get/return stats */
805 return 0;
807 #endif
809 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
811 OfDpaGroup *group = g_new0(OfDpaGroup, 1);
813 group->id = id;
815 return group;
818 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
819 OfDpaGroup *group)
821 uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
823 if (group->l2_interface.pop_vlan) {
824 of_dpa_flow_pkt_strip_vlan(fc);
827 /* Note: By default, and as per the OpenFlow 1.3.1
828 * specification, a packet cannot be forwarded back
829 * to the IN_PORT from which it came in. An action
830 * bucket that specifies the particular packet's
831 * egress port is not evaluated.
834 if (group->l2_interface.out_pport == 0) {
835 rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
836 copy_to_cpu);
837 } else if (group->l2_interface.out_pport != fc->in_pport) {
838 rocker_port_eg(world_rocker(fc->of_dpa->world),
839 group->l2_interface.out_pport,
840 fc->iov, fc->iovcnt);
844 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
845 OfDpaGroup *group)
847 OfDpaGroup *l2_group =
848 of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
850 if (!l2_group) {
851 return;
854 of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
855 group->l2_rewrite.dst_mac.a,
856 group->l2_rewrite.vlan_id);
857 of_dpa_output_l2_interface(fc, l2_group);
860 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
861 OfDpaGroup *group)
863 OfDpaGroup *l2_group;
864 int i;
866 for (i = 0; i < group->l2_flood.group_count; i++) {
867 of_dpa_flow_pkt_hdr_reset(fc);
868 l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
869 if (!l2_group) {
870 continue;
872 switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
873 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
874 of_dpa_output_l2_interface(fc, l2_group);
875 break;
876 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
877 of_dpa_output_l2_rewrite(fc, l2_group);
878 break;
883 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
885 OfDpaGroup *l2_group =
886 of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
888 if (!l2_group) {
889 return;
892 of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
893 group->l3_unicast.dst_mac.a,
894 group->l3_unicast.vlan_id);
895 /* XXX need ttl_check */
896 of_dpa_output_l2_interface(fc, l2_group);
899 static void of_dpa_eg(OfDpaFlowContext *fc)
901 OfDpaFlowAction *set = &fc->action_set;
902 OfDpaGroup *group;
903 uint32_t group_id;
905 /* send a copy of pkt to CPU (controller)? */
907 if (set->apply.copy_to_cpu) {
908 group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
909 group = of_dpa_group_find(fc->of_dpa, group_id);
910 if (group) {
911 of_dpa_output_l2_interface(fc, group);
912 of_dpa_flow_pkt_hdr_reset(fc);
916 /* process group write actions */
918 if (!set->write.group_id) {
919 return;
922 group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
923 if (!group) {
924 return;
927 switch (ROCKER_GROUP_TYPE_GET(group->id)) {
928 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
929 of_dpa_output_l2_interface(fc, group);
930 break;
931 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
932 of_dpa_output_l2_rewrite(fc, group);
933 break;
934 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
935 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
936 of_dpa_output_l2_flood(fc, group);
937 break;
938 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
939 of_dpa_output_l3_unicast(fc, group);
940 break;
944 typedef struct of_dpa_flow_tbl_ops {
945 void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
946 void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
947 void (*miss)(OfDpaFlowContext *fc);
948 void (*hit_no_goto)(OfDpaFlowContext *fc);
949 void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
950 void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
951 } OfDpaFlowTblOps;
953 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
954 [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
955 .build_match = of_dpa_ig_port_build_match,
956 .miss = of_dpa_ig_port_miss,
957 .hit_no_goto = of_dpa_drop,
959 [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
960 .build_match = of_dpa_vlan_build_match,
961 .hit_no_goto = of_dpa_drop,
962 .action_apply = of_dpa_vlan_insert,
964 [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
965 .build_match = of_dpa_term_mac_build_match,
966 .miss = of_dpa_term_mac_miss,
967 .hit_no_goto = of_dpa_drop,
968 .action_apply = of_dpa_apply_actions,
970 [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
971 .build_match = of_dpa_bridging_build_match,
972 .hit = of_dpa_bridging_learn,
973 .miss = of_dpa_bridging_miss,
974 .hit_no_goto = of_dpa_drop,
975 .action_apply = of_dpa_apply_actions,
976 .action_write = of_dpa_bridging_action_write,
978 [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
979 .build_match = of_dpa_unicast_routing_build_match,
980 .miss = of_dpa_unicast_routing_miss,
981 .hit_no_goto = of_dpa_drop,
982 .action_write = of_dpa_unicast_routing_action_write,
984 [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
985 .build_match = of_dpa_multicast_routing_build_match,
986 .miss = of_dpa_multicast_routing_miss,
987 .hit_no_goto = of_dpa_drop,
988 .action_write = of_dpa_multicast_routing_action_write,
990 [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
991 .build_match = of_dpa_acl_build_match,
992 .hit = of_dpa_acl_hit,
993 .miss = of_dpa_eg,
994 .action_apply = of_dpa_apply_actions,
995 .action_write = of_dpa_acl_action_write,
999 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1001 OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1002 OfDpaFlowMatch match = { { 0, }, };
1003 OfDpaFlow *flow;
1005 if (ops->build_match) {
1006 ops->build_match(fc, &match);
1007 } else {
1008 return;
1011 flow = of_dpa_flow_match(fc->of_dpa, &match);
1012 if (!flow) {
1013 if (ops->miss) {
1014 ops->miss(fc);
1016 return;
1019 flow->stats.hits++;
1021 if (ops->action_apply) {
1022 ops->action_apply(fc, flow);
1025 if (ops->action_write) {
1026 ops->action_write(fc, flow);
1029 if (ops->hit) {
1030 ops->hit(fc, flow);
1033 if (flow->action.goto_tbl) {
1034 of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1035 } else if (ops->hit_no_goto) {
1036 ops->hit_no_goto(fc);
1039 /* drop packet */
1042 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1043 const struct iovec *iov, int iovcnt)
1045 struct iovec iov_copy[iovcnt + 2];
1046 OfDpaFlowContext fc = {
1047 .of_dpa = world_private(world),
1048 .in_pport = pport,
1049 .iov = iov_copy,
1050 .iovcnt = iovcnt + 2,
1053 of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1054 of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1056 return iov_size(iov, iovcnt);
1059 #define ROCKER_TUNNEL_LPORT 0x00010000
1061 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1063 OfDpaFlowKey *key = &flow->key;
1064 OfDpaFlowKey *mask = &flow->mask;
1065 OfDpaFlowAction *action = &flow->action;
1066 bool overlay_tunnel;
1068 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1069 !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1070 return -ROCKER_EINVAL;
1073 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1074 key->width = FLOW_KEY_WIDTH(tbl_id);
1076 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1077 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1078 mask->in_pport =
1079 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1082 overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1084 action->goto_tbl =
1085 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1087 if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1088 return -ROCKER_EINVAL;
1091 if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1092 return -ROCKER_EINVAL;
1095 return ROCKER_OK;
1098 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1100 OfDpaFlowKey *key = &flow->key;
1101 OfDpaFlowKey *mask = &flow->mask;
1102 OfDpaFlowAction *action = &flow->action;
1103 uint32_t port;
1104 bool untagged;
1106 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1107 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1108 DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1109 return -ROCKER_EINVAL;
1112 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1113 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1115 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1116 if (!fp_port_from_pport(key->in_pport, &port)) {
1117 DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1118 return -ROCKER_EINVAL;
1120 mask->in_pport = 0xffffffff;
1122 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1124 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1125 mask->eth.vlan_id =
1126 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1129 if (key->eth.vlan_id) {
1130 untagged = false; /* filtering */
1131 } else {
1132 untagged = true;
1135 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1136 action->goto_tbl =
1137 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1138 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1139 DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1140 return -ROCKER_EINVAL;
1144 if (untagged) {
1145 if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1146 DPRINTF("Must specify new vlan_id if untagged\n");
1147 return -ROCKER_EINVAL;
1149 action->apply.new_vlan_id =
1150 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1151 if (1 > ntohs(action->apply.new_vlan_id) ||
1152 ntohs(action->apply.new_vlan_id) > 4095) {
1153 DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1154 ntohs(action->apply.new_vlan_id));
1155 return -ROCKER_EINVAL;
1159 return ROCKER_OK;
1162 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1164 OfDpaFlowKey *key = &flow->key;
1165 OfDpaFlowKey *mask = &flow->mask;
1166 OfDpaFlowAction *action = &flow->action;
1167 const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1168 const MACAddr ipv4_mask = { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1169 const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1170 const MACAddr ipv6_mask = { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1171 uint32_t port;
1172 bool unicast = false;
1173 bool multicast = false;
1175 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1176 !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1177 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1178 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1179 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1180 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1181 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1182 return -ROCKER_EINVAL;
1185 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1186 key->width = FLOW_KEY_WIDTH(eth.type);
1188 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1189 if (!fp_port_from_pport(key->in_pport, &port)) {
1190 return -ROCKER_EINVAL;
1192 mask->in_pport =
1193 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1195 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1196 if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1197 return -ROCKER_EINVAL;
1199 mask->eth.type = htons(0xffff);
1201 memcpy(key->eth.dst.a,
1202 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1203 sizeof(key->eth.dst.a));
1204 memcpy(mask->eth.dst.a,
1205 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1206 sizeof(mask->eth.dst.a));
1208 if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1209 unicast = true;
1212 /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1213 if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1214 memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1215 multicast = true;
1217 if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1218 memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1219 multicast = true;
1222 if (!unicast && !multicast) {
1223 return -ROCKER_EINVAL;
1226 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1227 mask->eth.vlan_id =
1228 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1230 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1231 action->goto_tbl =
1232 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1234 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1235 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1236 return -ROCKER_EINVAL;
1239 if (unicast &&
1240 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1241 return -ROCKER_EINVAL;
1244 if (multicast &&
1245 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1246 return -ROCKER_EINVAL;
1250 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1251 action->apply.copy_to_cpu =
1252 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1255 return ROCKER_OK;
1258 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1260 OfDpaFlowKey *key = &flow->key;
1261 OfDpaFlowKey *mask = &flow->mask;
1262 OfDpaFlowAction *action = &flow->action;
1263 bool unicast = false;
1264 bool dst_mac = false;
1265 bool dst_mac_mask = false;
1266 enum {
1267 BRIDGING_MODE_UNKNOWN,
1268 BRIDGING_MODE_VLAN_UCAST,
1269 BRIDGING_MODE_VLAN_MCAST,
1270 BRIDGING_MODE_VLAN_DFLT,
1271 BRIDGING_MODE_TUNNEL_UCAST,
1272 BRIDGING_MODE_TUNNEL_MCAST,
1273 BRIDGING_MODE_TUNNEL_DFLT,
1274 } mode = BRIDGING_MODE_UNKNOWN;
1276 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1278 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1279 key->eth.vlan_id =
1280 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1281 mask->eth.vlan_id = 0xffff;
1282 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1285 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1286 key->tunnel_id =
1287 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1288 mask->tunnel_id = 0xffffffff;
1289 key->width = FLOW_KEY_WIDTH(tunnel_id);
1292 /* can't do VLAN bridging and tunnel bridging at same time */
1293 if (key->eth.vlan_id && key->tunnel_id) {
1294 DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1295 return -ROCKER_EINVAL;
1298 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1299 memcpy(key->eth.dst.a,
1300 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1301 sizeof(key->eth.dst.a));
1302 key->width = FLOW_KEY_WIDTH(eth.dst);
1303 dst_mac = true;
1304 unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1307 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1308 memcpy(mask->eth.dst.a,
1309 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1310 sizeof(mask->eth.dst.a));
1311 key->width = FLOW_KEY_WIDTH(eth.dst);
1312 dst_mac_mask = true;
1313 } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1314 memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1317 if (key->eth.vlan_id) {
1318 if (dst_mac && !dst_mac_mask) {
1319 mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1320 BRIDGING_MODE_VLAN_MCAST;
1321 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1322 mode = BRIDGING_MODE_VLAN_DFLT;
1324 } else if (key->tunnel_id) {
1325 if (dst_mac && !dst_mac_mask) {
1326 mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1327 BRIDGING_MODE_TUNNEL_MCAST;
1328 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1329 mode = BRIDGING_MODE_TUNNEL_DFLT;
1333 if (mode == BRIDGING_MODE_UNKNOWN) {
1334 DPRINTF("Unknown bridging mode\n");
1335 return -ROCKER_EINVAL;
1338 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1339 action->goto_tbl =
1340 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1341 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1342 DPRINTF("Briding goto tbl must be ACL policy\n");
1343 return -ROCKER_EINVAL;
1347 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1348 action->write.group_id =
1349 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1350 switch (mode) {
1351 case BRIDGING_MODE_VLAN_UCAST:
1352 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1353 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1354 DPRINTF("Bridging mode vlan ucast needs L2 "
1355 "interface group (0x%08x)\n",
1356 action->write.group_id);
1357 return -ROCKER_EINVAL;
1359 break;
1360 case BRIDGING_MODE_VLAN_MCAST:
1361 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1362 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1363 DPRINTF("Bridging mode vlan mcast needs L2 "
1364 "mcast group (0x%08x)\n",
1365 action->write.group_id);
1366 return -ROCKER_EINVAL;
1368 break;
1369 case BRIDGING_MODE_VLAN_DFLT:
1370 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1371 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1372 DPRINTF("Bridging mode vlan dflt needs L2 "
1373 "flood group (0x%08x)\n",
1374 action->write.group_id);
1375 return -ROCKER_EINVAL;
1377 break;
1378 case BRIDGING_MODE_TUNNEL_MCAST:
1379 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1380 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1381 DPRINTF("Bridging mode tunnel mcast needs L2 "
1382 "overlay group (0x%08x)\n",
1383 action->write.group_id);
1384 return -ROCKER_EINVAL;
1386 break;
1387 case BRIDGING_MODE_TUNNEL_DFLT:
1388 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1389 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1390 DPRINTF("Bridging mode tunnel dflt needs L2 "
1391 "overlay group (0x%08x)\n",
1392 action->write.group_id);
1393 return -ROCKER_EINVAL;
1395 break;
1396 default:
1397 return -ROCKER_EINVAL;
1401 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1402 action->write.tun_log_lport =
1403 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1404 if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1405 DPRINTF("Have tunnel logical port but not "
1406 "in bridging tunnel mode\n");
1407 return -ROCKER_EINVAL;
1411 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1412 action->apply.copy_to_cpu =
1413 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1416 return ROCKER_OK;
1419 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1420 RockerTlv **flow_tlvs)
1422 OfDpaFlowKey *key = &flow->key;
1423 OfDpaFlowKey *mask = &flow->mask;
1424 OfDpaFlowAction *action = &flow->action;
1425 enum {
1426 UNICAST_ROUTING_MODE_UNKNOWN,
1427 UNICAST_ROUTING_MODE_IPV4,
1428 UNICAST_ROUTING_MODE_IPV6,
1429 } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1430 uint8_t type;
1432 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1433 return -ROCKER_EINVAL;
1436 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1437 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1439 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1440 switch (ntohs(key->eth.type)) {
1441 case 0x0800:
1442 mode = UNICAST_ROUTING_MODE_IPV4;
1443 break;
1444 case 0x86dd:
1445 mode = UNICAST_ROUTING_MODE_IPV6;
1446 break;
1447 default:
1448 return -ROCKER_EINVAL;
1450 mask->eth.type = htons(0xffff);
1452 switch (mode) {
1453 case UNICAST_ROUTING_MODE_IPV4:
1454 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1455 return -ROCKER_EINVAL;
1457 key->ipv4.addr.dst =
1458 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1459 if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1460 return -ROCKER_EINVAL;
1462 flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1463 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1464 mask->ipv4.addr.dst =
1465 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1466 flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1468 break;
1469 case UNICAST_ROUTING_MODE_IPV6:
1470 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1471 return -ROCKER_EINVAL;
1473 memcpy(&key->ipv6.addr.dst,
1474 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1475 sizeof(key->ipv6.addr.dst));
1476 if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1477 return -ROCKER_EINVAL;
1479 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1480 memcpy(&mask->ipv6.addr.dst,
1481 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1482 sizeof(mask->ipv6.addr.dst));
1484 break;
1485 default:
1486 return -ROCKER_EINVAL;
1489 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1490 action->goto_tbl =
1491 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1492 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1493 return -ROCKER_EINVAL;
1497 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1498 action->write.group_id =
1499 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1500 type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1501 if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1502 type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1503 type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1504 return -ROCKER_EINVAL;
1508 return ROCKER_OK;
1511 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1512 RockerTlv **flow_tlvs)
1514 OfDpaFlowKey *key = &flow->key;
1515 OfDpaFlowKey *mask = &flow->mask;
1516 OfDpaFlowAction *action = &flow->action;
1517 enum {
1518 MULTICAST_ROUTING_MODE_UNKNOWN,
1519 MULTICAST_ROUTING_MODE_IPV4,
1520 MULTICAST_ROUTING_MODE_IPV6,
1521 } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1523 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1524 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1525 return -ROCKER_EINVAL;
1528 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1529 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1531 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1532 switch (ntohs(key->eth.type)) {
1533 case 0x0800:
1534 mode = MULTICAST_ROUTING_MODE_IPV4;
1535 break;
1536 case 0x86dd:
1537 mode = MULTICAST_ROUTING_MODE_IPV6;
1538 break;
1539 default:
1540 return -ROCKER_EINVAL;
1543 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1545 switch (mode) {
1546 case MULTICAST_ROUTING_MODE_IPV4:
1548 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1549 key->ipv4.addr.src =
1550 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1553 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1554 mask->ipv4.addr.src =
1555 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1558 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1559 if (mask->ipv4.addr.src != 0) {
1560 return -ROCKER_EINVAL;
1564 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1565 return -ROCKER_EINVAL;
1568 key->ipv4.addr.dst =
1569 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1570 if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1571 return -ROCKER_EINVAL;
1574 break;
1576 case MULTICAST_ROUTING_MODE_IPV6:
1578 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1579 memcpy(&key->ipv6.addr.src,
1580 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1581 sizeof(key->ipv6.addr.src));
1584 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1585 memcpy(&mask->ipv6.addr.src,
1586 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1587 sizeof(mask->ipv6.addr.src));
1590 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1591 if (mask->ipv6.addr.src.addr32[0] != 0 &&
1592 mask->ipv6.addr.src.addr32[1] != 0 &&
1593 mask->ipv6.addr.src.addr32[2] != 0 &&
1594 mask->ipv6.addr.src.addr32[3] != 0) {
1595 return -ROCKER_EINVAL;
1599 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1600 return -ROCKER_EINVAL;
1603 memcpy(&key->ipv6.addr.dst,
1604 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1605 sizeof(key->ipv6.addr.dst));
1606 if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1607 return -ROCKER_EINVAL;
1610 break;
1612 default:
1613 return -ROCKER_EINVAL;
1616 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1617 action->goto_tbl =
1618 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1619 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1620 return -ROCKER_EINVAL;
1624 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1625 action->write.group_id =
1626 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1627 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1628 ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1629 return -ROCKER_EINVAL;
1631 action->write.vlan_id = key->eth.vlan_id;
1634 return ROCKER_OK;
1637 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1638 RockerTlv **flow_tlvs)
1640 key->width = FLOW_KEY_WIDTH(ip.tos);
1642 key->ip.proto = 0;
1643 key->ip.tos = 0;
1644 mask->ip.proto = 0;
1645 mask->ip.tos = 0;
1647 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1648 key->ip.proto =
1649 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1651 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1652 mask->ip.proto =
1653 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1655 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1656 key->ip.tos =
1657 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1659 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1660 mask->ip.tos =
1661 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1663 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1664 key->ip.tos |=
1665 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1667 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1668 mask->ip.tos |=
1669 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1672 return ROCKER_OK;
1675 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1677 OfDpaFlowKey *key = &flow->key;
1678 OfDpaFlowKey *mask = &flow->mask;
1679 OfDpaFlowAction *action = &flow->action;
1680 enum {
1681 ACL_MODE_UNKNOWN,
1682 ACL_MODE_IPV4_VLAN,
1683 ACL_MODE_IPV6_VLAN,
1684 ACL_MODE_IPV4_TENANT,
1685 ACL_MODE_IPV6_TENANT,
1686 ACL_MODE_NON_IP_VLAN,
1687 ACL_MODE_NON_IP_TENANT,
1688 ACL_MODE_ANY_VLAN,
1689 ACL_MODE_ANY_TENANT,
1690 } mode = ACL_MODE_UNKNOWN;
1691 int err = ROCKER_OK;
1693 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1694 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1695 return -ROCKER_EINVAL;
1698 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1699 flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1700 return -ROCKER_EINVAL;
1703 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1704 key->width = FLOW_KEY_WIDTH(eth.type);
1706 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1707 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1708 mask->in_pport =
1709 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1712 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1713 memcpy(key->eth.src.a,
1714 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1715 sizeof(key->eth.src.a));
1718 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1719 memcpy(mask->eth.src.a,
1720 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1721 sizeof(mask->eth.src.a));
1724 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1725 memcpy(key->eth.dst.a,
1726 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1727 sizeof(key->eth.dst.a));
1730 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1731 memcpy(mask->eth.dst.a,
1732 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1733 sizeof(mask->eth.dst.a));
1736 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1737 if (key->eth.type) {
1738 mask->eth.type = 0xffff;
1741 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1742 key->eth.vlan_id =
1743 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1746 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1747 mask->eth.vlan_id =
1748 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1751 switch (ntohs(key->eth.type)) {
1752 case 0x0000:
1753 mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1754 break;
1755 case 0x0800:
1756 mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1757 break;
1758 case 0x86dd:
1759 mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1760 break;
1761 default:
1762 mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1763 ACL_MODE_NON_IP_TENANT;
1764 break;
1767 /* XXX only supporting VLAN modes for now */
1768 if (mode != ACL_MODE_IPV4_VLAN &&
1769 mode != ACL_MODE_IPV6_VLAN &&
1770 mode != ACL_MODE_NON_IP_VLAN &&
1771 mode != ACL_MODE_ANY_VLAN) {
1772 return -ROCKER_EINVAL;
1775 switch (ntohs(key->eth.type)) {
1776 case 0x0800:
1777 case 0x86dd:
1778 err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1779 break;
1782 if (err) {
1783 return err;
1786 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1787 action->write.group_id =
1788 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1791 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1792 action->apply.copy_to_cpu =
1793 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1796 return ROCKER_OK;
1799 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1800 RockerTlv **flow_tlvs)
1802 enum rocker_of_dpa_table_id tbl;
1803 int err = ROCKER_OK;
1805 if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1806 !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1807 !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1808 return -ROCKER_EINVAL;
1811 tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1812 flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1813 flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1815 if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1816 if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1817 tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1818 tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1819 return -ROCKER_EINVAL;
1821 flow->idletime =
1822 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1825 switch (tbl) {
1826 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1827 err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1828 break;
1829 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1830 err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1831 break;
1832 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1833 err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1834 break;
1835 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1836 err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1837 break;
1838 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1839 err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1840 break;
1841 case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1842 err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1843 break;
1844 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1845 err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1846 break;
1849 return err;
1852 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1853 RockerTlv **flow_tlvs)
1855 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1856 int err = ROCKER_OK;
1858 if (flow) {
1859 return -ROCKER_EEXIST;
1862 flow = of_dpa_flow_alloc(cookie);
1864 err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1865 if (err) {
1866 g_free(flow);
1867 return err;
1870 return of_dpa_flow_add(of_dpa, flow);
1873 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1874 RockerTlv **flow_tlvs)
1876 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1878 if (!flow) {
1879 return -ROCKER_ENOENT;
1882 return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1885 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1887 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1889 if (!flow) {
1890 return -ROCKER_ENOENT;
1893 of_dpa_flow_del(of_dpa, flow);
1895 return ROCKER_OK;
1898 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1899 struct desc_info *info, char *buf)
1901 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1902 size_t tlv_size;
1903 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1904 int pos;
1906 if (!flow) {
1907 return -ROCKER_ENOENT;
1910 tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) + /* duration */
1911 rocker_tlv_total_size(sizeof(uint64_t)) + /* rx_pkts */
1912 rocker_tlv_total_size(sizeof(uint64_t)); /* tx_ptks */
1914 if (tlv_size > desc_buf_size(info)) {
1915 return -ROCKER_EMSGSIZE;
1918 pos = 0;
1919 rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1920 (int32_t)(now - flow->stats.install_time));
1921 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1922 flow->stats.rx_pkts);
1923 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1924 flow->stats.tx_pkts);
1926 return desc_set_buf(info, tlv_size);
1929 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1930 char *buf, uint16_t cmd,
1931 RockerTlv **flow_tlvs)
1933 uint64_t cookie;
1935 if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1936 return -ROCKER_EINVAL;
1939 cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1941 switch (cmd) {
1942 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1943 return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1944 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1945 return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1946 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1947 return of_dpa_cmd_flow_del(of_dpa, cookie);
1948 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1949 return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1952 return -ROCKER_ENOTSUP;
1955 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1956 RockerTlv **group_tlvs)
1958 if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1959 !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1960 return -ROCKER_EINVAL;
1963 group->l2_interface.out_pport =
1964 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1965 group->l2_interface.pop_vlan =
1966 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1968 return ROCKER_OK;
1971 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1972 RockerTlv **group_tlvs)
1974 OfDpaGroup *l2_interface_group;
1976 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1977 return -ROCKER_EINVAL;
1980 group->l2_rewrite.group_id =
1981 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1983 l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1984 if (!l2_interface_group ||
1985 ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1986 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1987 DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1988 return -ROCKER_EINVAL;
1991 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1992 memcpy(group->l2_rewrite.src_mac.a,
1993 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1994 sizeof(group->l2_rewrite.src_mac.a));
1997 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1998 memcpy(group->l2_rewrite.dst_mac.a,
1999 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2000 sizeof(group->l2_rewrite.dst_mac.a));
2003 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2004 group->l2_rewrite.vlan_id =
2005 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2006 if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2007 (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2008 DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2009 return -ROCKER_EINVAL;
2013 return ROCKER_OK;
2016 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2017 RockerTlv **group_tlvs)
2019 OfDpaGroup *l2_group;
2020 RockerTlv **tlvs;
2021 int err;
2022 int i;
2024 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2025 !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2026 return -ROCKER_EINVAL;
2029 group->l2_flood.group_count =
2030 rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2032 tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2034 g_free(group->l2_flood.group_ids);
2035 group->l2_flood.group_ids =
2036 g_new0(uint32_t, group->l2_flood.group_count);
2038 rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2039 group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2041 for (i = 0; i < group->l2_flood.group_count; i++) {
2042 group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2045 /* All of the L2 interface groups referenced by the L2 flood
2046 * must have same VLAN
2049 for (i = 0; i < group->l2_flood.group_count; i++) {
2050 l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2051 if (!l2_group) {
2052 continue;
2054 if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2055 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2056 (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2057 ROCKER_GROUP_VLAN_GET(group->id))) {
2058 DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2059 "flood group 0x%08x\n",
2060 group->l2_flood.group_ids[i], group->id);
2061 err = -ROCKER_EINVAL;
2062 goto err_out;
2066 g_free(tlvs);
2067 return ROCKER_OK;
2069 err_out:
2070 group->l2_flood.group_count = 0;
2071 g_free(group->l2_flood.group_ids);
2072 g_free(tlvs);
2074 return err;
2077 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2079 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2080 return -ROCKER_EINVAL;
2083 group->l3_unicast.group_id =
2084 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2086 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2087 memcpy(group->l3_unicast.src_mac.a,
2088 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2089 sizeof(group->l3_unicast.src_mac.a));
2092 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2093 memcpy(group->l3_unicast.dst_mac.a,
2094 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2095 sizeof(group->l3_unicast.dst_mac.a));
2098 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2099 group->l3_unicast.vlan_id =
2100 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2103 if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2104 group->l3_unicast.ttl_check =
2105 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2108 return ROCKER_OK;
2111 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2112 OfDpaGroup *group, RockerTlv **group_tlvs)
2114 uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2116 switch (type) {
2117 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2118 return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2119 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2120 return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2121 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2122 /* Treat L2 multicast group same as a L2 flood group */
2123 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2124 return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2125 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2126 return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2129 return -ROCKER_ENOTSUP;
2132 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2133 RockerTlv **group_tlvs)
2135 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2136 int err;
2138 if (group) {
2139 return -ROCKER_EEXIST;
2142 group = of_dpa_group_alloc(group_id);
2144 err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2145 if (err) {
2146 goto err_cmd_add;
2149 err = of_dpa_group_add(of_dpa, group);
2150 if (err) {
2151 goto err_cmd_add;
2154 return ROCKER_OK;
2156 err_cmd_add:
2157 g_free(group);
2158 return err;
2161 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2162 RockerTlv **group_tlvs)
2164 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2166 if (!group) {
2167 return -ROCKER_ENOENT;
2170 return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2173 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2175 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2177 if (!group) {
2178 return -ROCKER_ENOENT;
2181 return of_dpa_group_del(of_dpa, group);
2184 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2185 struct desc_info *info, char *buf)
2187 return -ROCKER_ENOTSUP;
2190 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2191 char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2193 uint32_t group_id;
2195 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2196 return -ROCKER_EINVAL;
2199 group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2201 switch (cmd) {
2202 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2203 return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2204 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2205 return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2206 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2207 return of_dpa_cmd_group_del(of_dpa, group_id);
2208 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2209 return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2212 return -ROCKER_ENOTSUP;
2215 static int of_dpa_cmd(World *world, struct desc_info *info,
2216 char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2218 OfDpa *of_dpa = world_private(world);
2219 RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2221 rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2223 switch (cmd) {
2224 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2225 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2226 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2227 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2228 return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2229 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2230 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2231 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2232 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2233 return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2236 return -ROCKER_ENOTSUP;
2239 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2241 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2244 static guint rocker_int64_hash(gconstpointer v)
2246 return (guint)*(const uint64_t *)v;
2249 static int of_dpa_init(World *world)
2251 OfDpa *of_dpa = world_private(world);
2253 of_dpa->world = world;
2255 of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2256 rocker_int64_equal,
2257 NULL, g_free);
2258 if (!of_dpa->flow_tbl) {
2259 return -ENOMEM;
2262 of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2263 NULL, g_free);
2264 if (!of_dpa->group_tbl) {
2265 goto err_group_tbl;
2268 /* XXX hardcode some artificial table max values */
2269 of_dpa->flow_tbl_max_size = 100;
2270 of_dpa->group_tbl_max_size = 100;
2272 return 0;
2274 err_group_tbl:
2275 g_hash_table_destroy(of_dpa->flow_tbl);
2276 return -ENOMEM;
2279 static void of_dpa_uninit(World *world)
2281 OfDpa *of_dpa = world_private(world);
2283 g_hash_table_destroy(of_dpa->group_tbl);
2284 g_hash_table_destroy(of_dpa->flow_tbl);
2287 struct of_dpa_flow_fill_context {
2288 RockerOfDpaFlowList *list;
2289 uint32_t tbl_id;
2292 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2294 struct of_dpa_flow *flow = value;
2295 struct of_dpa_flow_key *key = &flow->key;
2296 struct of_dpa_flow_key *mask = &flow->mask;
2297 struct of_dpa_flow_fill_context *flow_context = user_data;
2298 RockerOfDpaFlowList *new;
2299 RockerOfDpaFlow *nflow;
2300 RockerOfDpaFlowKey *nkey;
2301 RockerOfDpaFlowMask *nmask;
2302 RockerOfDpaFlowAction *naction;
2304 if (flow_context->tbl_id != -1 &&
2305 flow_context->tbl_id != key->tbl_id) {
2306 return;
2309 new = g_malloc0(sizeof(*new));
2310 nflow = new->value = g_malloc0(sizeof(*nflow));
2311 nkey = nflow->key = g_malloc0(sizeof(*nkey));
2312 nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2313 naction = nflow->action = g_malloc0(sizeof(*naction));
2315 nflow->cookie = flow->cookie;
2316 nflow->hits = flow->stats.hits;
2317 nkey->priority = flow->priority;
2318 nkey->tbl_id = key->tbl_id;
2320 if (key->in_pport || mask->in_pport) {
2321 nkey->has_in_pport = true;
2322 nkey->in_pport = key->in_pport;
2325 if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2326 nmask->has_in_pport = true;
2327 nmask->in_pport = mask->in_pport;
2330 if (key->eth.vlan_id || mask->eth.vlan_id) {
2331 nkey->has_vlan_id = true;
2332 nkey->vlan_id = ntohs(key->eth.vlan_id);
2335 if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2336 nmask->has_vlan_id = true;
2337 nmask->vlan_id = ntohs(mask->eth.vlan_id);
2340 if (key->tunnel_id || mask->tunnel_id) {
2341 nkey->has_tunnel_id = true;
2342 nkey->tunnel_id = key->tunnel_id;
2345 if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2346 nmask->has_tunnel_id = true;
2347 nmask->tunnel_id = mask->tunnel_id;
2350 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2351 memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2352 nkey->has_eth_src = true;
2353 nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2356 if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2357 nmask->has_eth_src = true;
2358 nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2361 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2362 memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2363 nkey->has_eth_dst = true;
2364 nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2367 if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2368 nmask->has_eth_dst = true;
2369 nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2372 if (key->eth.type) {
2374 nkey->has_eth_type = true;
2375 nkey->eth_type = ntohs(key->eth.type);
2377 switch (ntohs(key->eth.type)) {
2378 case 0x0800:
2379 case 0x86dd:
2380 if (key->ip.proto || mask->ip.proto) {
2381 nkey->has_ip_proto = true;
2382 nkey->ip_proto = key->ip.proto;
2384 if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2385 nmask->has_ip_proto = true;
2386 nmask->ip_proto = mask->ip.proto;
2388 if (key->ip.tos || mask->ip.tos) {
2389 nkey->has_ip_tos = true;
2390 nkey->ip_tos = key->ip.tos;
2392 if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2393 nmask->has_ip_tos = true;
2394 nmask->ip_tos = mask->ip.tos;
2396 break;
2399 switch (ntohs(key->eth.type)) {
2400 case 0x0800:
2401 if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2402 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2403 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2404 nkey->has_ip_dst = true;
2405 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2407 break;
2411 if (flow->action.goto_tbl) {
2412 naction->has_goto_tbl = true;
2413 naction->goto_tbl = flow->action.goto_tbl;
2416 if (flow->action.write.group_id) {
2417 naction->has_group_id = true;
2418 naction->group_id = flow->action.write.group_id;
2421 if (flow->action.apply.new_vlan_id) {
2422 naction->has_new_vlan_id = true;
2423 naction->new_vlan_id = flow->action.apply.new_vlan_id;
2426 new->next = flow_context->list;
2427 flow_context->list = new;
2430 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2431 bool has_tbl_id,
2432 uint32_t tbl_id,
2433 Error **errp)
2435 struct rocker *r;
2436 struct world *w;
2437 struct of_dpa *of_dpa;
2438 struct of_dpa_flow_fill_context fill_context = {
2439 .list = NULL,
2440 .tbl_id = tbl_id,
2443 r = rocker_find(name);
2444 if (!r) {
2445 error_setg(errp, "rocker %s not found", name);
2446 return NULL;
2449 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2450 if (!w) {
2451 error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2452 return NULL;
2455 of_dpa = world_private(w);
2457 g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2459 return fill_context.list;
2462 struct of_dpa_group_fill_context {
2463 RockerOfDpaGroupList *list;
2464 uint8_t type;
2467 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2469 struct of_dpa_group *group = value;
2470 struct of_dpa_group_fill_context *flow_context = user_data;
2471 RockerOfDpaGroupList *new;
2472 RockerOfDpaGroup *ngroup;
2473 struct uint32List *id;
2474 int i;
2476 if (flow_context->type != 9 &&
2477 flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2478 return;
2481 new = g_malloc0(sizeof(*new));
2482 ngroup = new->value = g_malloc0(sizeof(*ngroup));
2484 ngroup->id = group->id;
2486 ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2488 switch (ngroup->type) {
2489 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2490 ngroup->has_vlan_id = true;
2491 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2492 ngroup->has_pport = true;
2493 ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2494 ngroup->has_out_pport = true;
2495 ngroup->out_pport = group->l2_interface.out_pport;
2496 ngroup->has_pop_vlan = true;
2497 ngroup->pop_vlan = group->l2_interface.pop_vlan;
2498 break;
2499 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2500 ngroup->has_index = true;
2501 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2502 ngroup->has_group_id = true;
2503 ngroup->group_id = group->l2_rewrite.group_id;
2504 if (group->l2_rewrite.vlan_id) {
2505 ngroup->has_set_vlan_id = true;
2506 ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2508 if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2509 ngroup->has_set_eth_src = true;
2510 ngroup->set_eth_src =
2511 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2513 if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2514 ngroup->has_set_eth_dst = true;
2515 ngroup->set_eth_dst =
2516 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2518 break;
2519 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2520 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2521 ngroup->has_vlan_id = true;
2522 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2523 ngroup->has_index = true;
2524 ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2525 for (i = 0; i < group->l2_flood.group_count; i++) {
2526 ngroup->has_group_ids = true;
2527 id = g_malloc0(sizeof(*id));
2528 id->value = group->l2_flood.group_ids[i];
2529 id->next = ngroup->group_ids;
2530 ngroup->group_ids = id;
2532 break;
2533 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2534 ngroup->has_index = true;
2535 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2536 ngroup->has_group_id = true;
2537 ngroup->group_id = group->l3_unicast.group_id;
2538 if (group->l3_unicast.vlan_id) {
2539 ngroup->has_set_vlan_id = true;
2540 ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2542 if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2543 ngroup->has_set_eth_src = true;
2544 ngroup->set_eth_src =
2545 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2547 if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2548 ngroup->has_set_eth_dst = true;
2549 ngroup->set_eth_dst =
2550 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2552 if (group->l3_unicast.ttl_check) {
2553 ngroup->has_ttl_check = true;
2554 ngroup->ttl_check = group->l3_unicast.ttl_check;
2556 break;
2559 new->next = flow_context->list;
2560 flow_context->list = new;
2563 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2564 bool has_type,
2565 uint8_t type,
2566 Error **errp)
2568 struct rocker *r;
2569 struct world *w;
2570 struct of_dpa *of_dpa;
2571 struct of_dpa_group_fill_context fill_context = {
2572 .list = NULL,
2573 .type = type,
2576 r = rocker_find(name);
2577 if (!r) {
2578 error_setg(errp, "rocker %s not found", name);
2579 return NULL;
2582 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2583 if (!w) {
2584 error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2585 return NULL;
2588 of_dpa = world_private(w);
2590 g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2592 return fill_context.list;
2595 static WorldOps of_dpa_ops = {
2596 .name = "ofdpa",
2597 .init = of_dpa_init,
2598 .uninit = of_dpa_uninit,
2599 .ig = of_dpa_ig,
2600 .cmd = of_dpa_cmd,
2603 World *of_dpa_world_alloc(Rocker *r)
2605 return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);