rocker: Use g_new() & friends where that makes obvious sense
[qemu/ar7.git] / hw / net / rocker / rocker_of_dpa.c
blob1ad2791965c850b6aeb34892fe9159c6c12bcca8
1 /*
2 * QEMU rocker switch emulation - OF-DPA flow processing support
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include "net/eth.h"
18 #include "qemu/iov.h"
19 #include "qemu/timer.h"
20 #include "qmp-commands.h"
22 #include "rocker.h"
23 #include "rocker_hw.h"
24 #include "rocker_fp.h"
25 #include "rocker_tlv.h"
26 #include "rocker_world.h"
27 #include "rocker_desc.h"
28 #include "rocker_of_dpa.h"
30 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
31 static const MACAddr ff_mac = { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
33 typedef struct of_dpa {
34 World *world;
35 GHashTable *flow_tbl;
36 GHashTable *group_tbl;
37 unsigned int flow_tbl_max_size;
38 unsigned int group_tbl_max_size;
39 } OfDpa;
41 /* flow_key stolen mostly from OVS
43 * Note: fields that compare with network packet header fields
44 * are stored in network order (BE) to avoid per-packet field
45 * byte-swaps.
48 typedef struct of_dpa_flow_key {
49 uint32_t in_pport; /* ingress port */
50 uint32_t tunnel_id; /* overlay tunnel id */
51 uint32_t tbl_id; /* table id */
52 struct {
53 __be16 vlan_id; /* 0 if no VLAN */
54 MACAddr src; /* ethernet source address */
55 MACAddr dst; /* ethernet destination address */
56 __be16 type; /* ethernet frame type */
57 } eth;
58 struct {
59 uint8_t proto; /* IP protocol or ARP opcode */
60 uint8_t tos; /* IP ToS */
61 uint8_t ttl; /* IP TTL/hop limit */
62 uint8_t frag; /* one of FRAG_TYPE_* */
63 } ip;
64 union {
65 struct {
66 struct {
67 __be32 src; /* IP source address */
68 __be32 dst; /* IP destination address */
69 } addr;
70 union {
71 struct {
72 __be16 src; /* TCP/UDP/SCTP source port */
73 __be16 dst; /* TCP/UDP/SCTP destination port */
74 __be16 flags; /* TCP flags */
75 } tp;
76 struct {
77 MACAddr sha; /* ARP source hardware address */
78 MACAddr tha; /* ARP target hardware address */
79 } arp;
81 } ipv4;
82 struct {
83 struct {
84 Ipv6Addr src; /* IPv6 source address */
85 Ipv6Addr dst; /* IPv6 destination address */
86 } addr;
87 __be32 label; /* IPv6 flow label */
88 struct {
89 __be16 src; /* TCP/UDP/SCTP source port */
90 __be16 dst; /* TCP/UDP/SCTP destination port */
91 __be16 flags; /* TCP flags */
92 } tp;
93 struct {
94 Ipv6Addr target; /* ND target address */
95 MACAddr sll; /* ND source link layer address */
96 MACAddr tll; /* ND target link layer address */
97 } nd;
98 } ipv6;
100 int width; /* how many uint64_t's in key? */
101 } OfDpaFlowKey;
103 /* Width of key which includes field 'f' in u64s, rounded up */
104 #define FLOW_KEY_WIDTH(f) \
105 ((offsetof(OfDpaFlowKey, f) + \
106 sizeof(((OfDpaFlowKey *)0)->f) + \
107 sizeof(uint64_t) - 1) / sizeof(uint64_t))
109 typedef struct of_dpa_flow_action {
110 uint32_t goto_tbl;
111 struct {
112 uint32_t group_id;
113 uint32_t tun_log_lport;
114 __be16 vlan_id;
115 } write;
116 struct {
117 __be16 new_vlan_id;
118 uint32_t out_pport;
119 uint8_t copy_to_cpu;
120 __be16 vlan_id;
121 } apply;
122 } OfDpaFlowAction;
124 typedef struct of_dpa_flow {
125 uint32_t lpm;
126 uint32_t priority;
127 uint32_t hardtime;
128 uint32_t idletime;
129 uint64_t cookie;
130 OfDpaFlowKey key;
131 OfDpaFlowKey mask;
132 OfDpaFlowAction action;
133 struct {
134 uint64_t hits;
135 int64_t install_time;
136 int64_t refresh_time;
137 uint64_t rx_pkts;
138 uint64_t tx_pkts;
139 } stats;
140 } OfDpaFlow;
142 typedef struct of_dpa_flow_pkt_fields {
143 uint32_t tunnel_id;
144 struct eth_header *ethhdr;
145 __be16 *h_proto;
146 struct vlan_header *vlanhdr;
147 struct ip_header *ipv4hdr;
148 struct ip6_header *ipv6hdr;
149 Ipv6Addr *ipv6_src_addr;
150 Ipv6Addr *ipv6_dst_addr;
151 } OfDpaFlowPktFields;
153 typedef struct of_dpa_flow_context {
154 uint32_t in_pport;
155 uint32_t tunnel_id;
156 struct iovec *iov;
157 int iovcnt;
158 struct eth_header ethhdr_rewrite;
159 struct vlan_header vlanhdr_rewrite;
160 struct vlan_header vlanhdr;
161 OfDpa *of_dpa;
162 OfDpaFlowPktFields fields;
163 OfDpaFlowAction action_set;
164 } OfDpaFlowContext;
166 typedef struct of_dpa_flow_match {
167 OfDpaFlowKey value;
168 OfDpaFlow *best;
169 } OfDpaFlowMatch;
171 typedef struct of_dpa_group {
172 uint32_t id;
173 union {
174 struct {
175 uint32_t out_pport;
176 uint8_t pop_vlan;
177 } l2_interface;
178 struct {
179 uint32_t group_id;
180 MACAddr src_mac;
181 MACAddr dst_mac;
182 __be16 vlan_id;
183 } l2_rewrite;
184 struct {
185 uint16_t group_count;
186 uint32_t *group_ids;
187 } l2_flood;
188 struct {
189 uint32_t group_id;
190 MACAddr src_mac;
191 MACAddr dst_mac;
192 __be16 vlan_id;
193 uint8_t ttl_check;
194 } l3_unicast;
196 } OfDpaGroup;
198 static int of_dpa_mask2prefix(__be32 mask)
200 int i;
201 int count = 32;
203 for (i = 0; i < 32; i++) {
204 if (!(ntohl(mask) & ((2 << i) - 1))) {
205 count--;
209 return count;
212 #if defined(DEBUG_ROCKER)
213 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
215 char buf[512], *b = buf, *mac;
217 b += sprintf(b, " tbl %2d", key->tbl_id);
219 if (key->in_pport || (mask && mask->in_pport)) {
220 b += sprintf(b, " in_pport %2d", key->in_pport);
221 if (mask && mask->in_pport != 0xffffffff) {
222 b += sprintf(b, "/0x%08x", key->in_pport);
226 if (key->tunnel_id || (mask && mask->tunnel_id)) {
227 b += sprintf(b, " tun %8d", key->tunnel_id);
228 if (mask && mask->tunnel_id != 0xffffffff) {
229 b += sprintf(b, "/0x%08x", key->tunnel_id);
233 if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
234 b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
235 if (mask && mask->eth.vlan_id != 0xffff) {
236 b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
240 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
241 (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
242 mac = qemu_mac_strdup_printf(key->eth.src.a);
243 b += sprintf(b, " src %s", mac);
244 g_free(mac);
245 if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
246 mac = qemu_mac_strdup_printf(mask->eth.src.a);
247 b += sprintf(b, "/%s", mac);
248 g_free(mac);
252 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
253 (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
254 mac = qemu_mac_strdup_printf(key->eth.dst.a);
255 b += sprintf(b, " dst %s", mac);
256 g_free(mac);
257 if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
258 mac = qemu_mac_strdup_printf(mask->eth.dst.a);
259 b += sprintf(b, "/%s", mac);
260 g_free(mac);
264 if (key->eth.type || (mask && mask->eth.type)) {
265 b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
266 if (mask && mask->eth.type != 0xffff) {
267 b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
269 switch (ntohs(key->eth.type)) {
270 case 0x0800:
271 case 0x86dd:
272 if (key->ip.proto || (mask && mask->ip.proto)) {
273 b += sprintf(b, " ip proto %2d", key->ip.proto);
274 if (mask && mask->ip.proto != 0xff) {
275 b += sprintf(b, "/0x%02x", mask->ip.proto);
278 if (key->ip.tos || (mask && mask->ip.tos)) {
279 b += sprintf(b, " ip tos %2d", key->ip.tos);
280 if (mask && mask->ip.tos != 0xff) {
281 b += sprintf(b, "/0x%02x", mask->ip.tos);
284 break;
286 switch (ntohs(key->eth.type)) {
287 case 0x0800:
288 if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
289 b += sprintf(b, " dst %s",
290 inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
291 if (mask) {
292 b += sprintf(b, "/%d",
293 of_dpa_mask2prefix(mask->ipv4.addr.dst));
296 break;
300 DPRINTF("%s\n", buf);
302 #else
303 #define of_dpa_flow_key_dump(k, m)
304 #endif
306 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
308 OfDpaFlow *flow = value;
309 OfDpaFlowMatch *match = user_data;
310 uint64_t *k = (uint64_t *)&flow->key;
311 uint64_t *m = (uint64_t *)&flow->mask;
312 uint64_t *v = (uint64_t *)&match->value;
313 int i;
315 if (flow->key.tbl_id == match->value.tbl_id) {
316 of_dpa_flow_key_dump(&flow->key, &flow->mask);
319 if (flow->key.width > match->value.width) {
320 return;
323 for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
324 if ((~*k & *m & *v) | (*k & *m & ~*v)) {
325 return;
329 DPRINTF("match\n");
331 if (!match->best ||
332 flow->priority > match->best->priority ||
333 flow->lpm > match->best->lpm) {
334 match->best = flow;
338 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
340 DPRINTF("\nnew search\n");
341 of_dpa_flow_key_dump(&match->value, NULL);
343 g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
345 return match->best;
348 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
350 return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
353 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
355 g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
357 return ROCKER_OK;
360 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
362 g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
365 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
367 OfDpaFlow *flow;
368 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
370 flow = g_new0(OfDpaFlow, 1);
371 if (!flow) {
372 return NULL;
375 flow->cookie = cookie;
376 flow->mask.tbl_id = 0xffffffff;
378 flow->stats.install_time = flow->stats.refresh_time = now;
380 return flow;
383 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
385 OfDpaFlowPktFields *fields = &fc->fields;
387 fc->iov[0].iov_base = fields->ethhdr;
388 fc->iov[0].iov_len = sizeof(struct eth_header);
389 fc->iov[1].iov_base = fields->vlanhdr;
390 fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
393 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
394 const struct iovec *iov, int iovcnt)
396 OfDpaFlowPktFields *fields = &fc->fields;
397 size_t sofar = 0;
398 int i;
400 sofar += sizeof(struct eth_header);
401 if (iov->iov_len < sofar) {
402 DPRINTF("flow_pkt_parse underrun on eth_header\n");
403 return;
406 fields->ethhdr = iov->iov_base;
407 fields->h_proto = &fields->ethhdr->h_proto;
409 if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
410 sofar += sizeof(struct vlan_header);
411 if (iov->iov_len < sofar) {
412 DPRINTF("flow_pkt_parse underrun on vlan_header\n");
413 return;
415 fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
416 fields->h_proto = &fields->vlanhdr->h_proto;
419 switch (ntohs(*fields->h_proto)) {
420 case ETH_P_IP:
421 sofar += sizeof(struct ip_header);
422 if (iov->iov_len < sofar) {
423 DPRINTF("flow_pkt_parse underrun on ip_header\n");
424 return;
426 fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
427 break;
428 case ETH_P_IPV6:
429 sofar += sizeof(struct ip6_header);
430 if (iov->iov_len < sofar) {
431 DPRINTF("flow_pkt_parse underrun on ip6_header\n");
432 return;
434 fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
435 break;
438 /* To facilitate (potential) VLAN tag insertion, Make a
439 * copy of the iov and insert two new vectors at the
440 * beginning for eth hdr and vlan hdr. No data is copied,
441 * just the vectors.
444 of_dpa_flow_pkt_hdr_reset(fc);
446 fc->iov[2].iov_base = fields->h_proto + 1;
447 fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
449 for (i = 1; i < iovcnt; i++) {
450 fc->iov[i+2] = iov[i];
453 fc->iovcnt = iovcnt + 2;
456 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
458 OfDpaFlowPktFields *fields = &fc->fields;
459 uint16_t h_proto = fields->ethhdr->h_proto;
461 if (fields->vlanhdr) {
462 DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
463 return;
466 fields->ethhdr->h_proto = htons(ETH_P_VLAN);
467 fields->vlanhdr = &fc->vlanhdr;
468 fields->vlanhdr->h_tci = vlan_id;
469 fields->vlanhdr->h_proto = h_proto;
470 fields->h_proto = &fields->vlanhdr->h_proto;
472 fc->iov[1].iov_base = fields->vlanhdr;
473 fc->iov[1].iov_len = sizeof(struct vlan_header);
476 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
478 OfDpaFlowPktFields *fields = &fc->fields;
480 if (!fields->vlanhdr) {
481 return;
484 fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
485 fc->iov[1].iov_base = fields->h_proto;
486 fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
489 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
490 uint8_t *src_mac, uint8_t *dst_mac,
491 __be16 vlan_id)
493 OfDpaFlowPktFields *fields = &fc->fields;
495 if (src_mac || dst_mac) {
496 memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
497 if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
498 memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
500 if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
501 memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
503 fc->iov[0].iov_base = &fc->ethhdr_rewrite;
506 if (vlan_id && fields->vlanhdr) {
507 fc->vlanhdr_rewrite = fc->vlanhdr;
508 fc->vlanhdr_rewrite.h_tci = vlan_id;
509 fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
513 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
515 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
516 OfDpaFlowMatch *match)
518 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
519 match->value.in_pport = fc->in_pport;
520 match->value.width = FLOW_KEY_WIDTH(tbl_id);
523 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
525 uint32_t port;
527 /* The default on miss is for packets from physical ports
528 * to go to the VLAN Flow Table. There is no default rule
529 * for packets from logical ports, which are dropped on miss.
532 if (fp_port_from_pport(fc->in_pport, &port)) {
533 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
537 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
538 OfDpaFlowMatch *match)
540 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
541 match->value.in_pport = fc->in_pport;
542 if (fc->fields.vlanhdr) {
543 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
545 match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
548 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
549 OfDpaFlow *flow)
551 if (flow->action.apply.new_vlan_id) {
552 of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
556 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
557 OfDpaFlowMatch *match)
559 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
560 match->value.in_pport = fc->in_pport;
561 match->value.eth.type = *fc->fields.h_proto;
562 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
563 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
564 sizeof(match->value.eth.dst.a));
565 match->value.width = FLOW_KEY_WIDTH(eth.type);
568 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
570 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
573 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
574 OfDpaFlow *flow)
576 fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
577 fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
580 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
581 OfDpaFlowMatch *match)
583 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
584 if (fc->fields.vlanhdr) {
585 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
586 } else if (fc->tunnel_id) {
587 match->value.tunnel_id = fc->tunnel_id;
589 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
590 sizeof(match->value.eth.dst.a));
591 match->value.width = FLOW_KEY_WIDTH(eth.dst);
594 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
595 OfDpaFlow *dst_flow)
597 OfDpaFlowMatch match = { { 0, }, };
598 OfDpaFlow *flow;
599 uint8_t *addr;
600 uint16_t vlan_id;
601 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
602 int64_t refresh_delay = 1;
604 /* Do a lookup in bridge table by src_mac/vlan */
606 addr = fc->fields.ethhdr->h_source;
607 vlan_id = fc->fields.vlanhdr->h_tci;
609 match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
610 match.value.eth.vlan_id = vlan_id;
611 memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
612 match.value.width = FLOW_KEY_WIDTH(eth.dst);
614 flow = of_dpa_flow_match(fc->of_dpa, &match);
615 if (flow) {
616 if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
617 sizeof(flow->mask.eth.dst.a))) {
618 /* src_mac/vlan already learned; if in_port and out_port
619 * don't match, the end station has moved and the port
620 * needs updating */
621 /* XXX implement the in_port/out_port check */
622 if (now - flow->stats.refresh_time < refresh_delay) {
623 return;
625 flow->stats.refresh_time = now;
629 /* Let driver know about mac/vlan. This may be a new mac/vlan
630 * or a refresh of existing mac/vlan that's been hit after the
631 * refresh_delay.
634 rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
635 fc->in_pport, addr, vlan_id);
638 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
640 of_dpa_bridging_learn(fc, NULL);
641 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
644 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
645 OfDpaFlow *flow)
647 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
648 fc->action_set.write.group_id = flow->action.write.group_id;
650 fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
653 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
654 OfDpaFlowMatch *match)
656 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
657 match->value.eth.type = *fc->fields.h_proto;
658 if (fc->fields.ipv4hdr) {
659 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
661 if (fc->fields.ipv6_dst_addr) {
662 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
663 sizeof(match->value.ipv6.addr.dst));
665 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
668 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
670 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
673 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
674 OfDpaFlow *flow)
676 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
677 fc->action_set.write.group_id = flow->action.write.group_id;
681 static void
682 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
683 OfDpaFlowMatch *match)
685 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
686 match->value.eth.type = *fc->fields.h_proto;
687 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
688 if (fc->fields.ipv4hdr) {
689 match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
690 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
692 if (fc->fields.ipv6_src_addr) {
693 memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
694 sizeof(match->value.ipv6.addr.src));
696 if (fc->fields.ipv6_dst_addr) {
697 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
698 sizeof(match->value.ipv6.addr.dst));
700 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
703 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
705 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
708 static void
709 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
710 OfDpaFlow *flow)
712 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
713 fc->action_set.write.group_id = flow->action.write.group_id;
715 fc->action_set.write.vlan_id = flow->action.write.vlan_id;
718 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
719 OfDpaFlowMatch *match)
721 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
722 match->value.in_pport = fc->in_pport;
723 memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
724 sizeof(match->value.eth.src.a));
725 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
726 sizeof(match->value.eth.dst.a));
727 match->value.eth.type = *fc->fields.h_proto;
728 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
729 match->value.width = FLOW_KEY_WIDTH(eth.type);
730 if (fc->fields.ipv4hdr) {
731 match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
732 match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
733 match->value.width = FLOW_KEY_WIDTH(ip.tos);
734 } else if (fc->fields.ipv6hdr) {
735 match->value.ip.proto =
736 fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
737 match->value.ip.tos = 0; /* XXX what goes here? */
738 match->value.width = FLOW_KEY_WIDTH(ip.tos);
742 static void of_dpa_eg(OfDpaFlowContext *fc);
743 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
744 OfDpaFlow *dst_flow)
746 of_dpa_eg(fc);
749 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
750 OfDpaFlow *flow)
752 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
753 fc->action_set.write.group_id = flow->action.write.group_id;
757 static void of_dpa_drop(OfDpaFlowContext *fc)
759 /* drop packet */
762 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
763 uint32_t group_id)
765 return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
768 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
770 g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
772 return 0;
775 #if 0
776 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
778 OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
780 if (!old_group) {
781 return -ENOENT;
784 /* XXX */
786 return 0;
788 #endif
790 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
792 g_hash_table_remove(of_dpa->group_tbl, &group->id);
794 return 0;
797 #if 0
798 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
800 OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
802 if (!group) {
803 return -ENOENT;
806 /* XXX get/return stats */
808 return 0;
810 #endif
812 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
814 OfDpaGroup *group = g_new0(OfDpaGroup, 1);
816 if (!group) {
817 return NULL;
820 group->id = id;
822 return group;
825 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
826 OfDpaGroup *group)
828 uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
830 if (group->l2_interface.pop_vlan) {
831 of_dpa_flow_pkt_strip_vlan(fc);
834 /* Note: By default, and as per the OpenFlow 1.3.1
835 * specification, a packet cannot be forwarded back
836 * to the IN_PORT from which it came in. An action
837 * bucket that specifies the particular packet's
838 * egress port is not evaluated.
841 if (group->l2_interface.out_pport == 0) {
842 rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
843 copy_to_cpu);
844 } else if (group->l2_interface.out_pport != fc->in_pport) {
845 rocker_port_eg(world_rocker(fc->of_dpa->world),
846 group->l2_interface.out_pport,
847 fc->iov, fc->iovcnt);
851 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
852 OfDpaGroup *group)
854 OfDpaGroup *l2_group =
855 of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
857 if (!l2_group) {
858 return;
861 of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
862 group->l2_rewrite.dst_mac.a,
863 group->l2_rewrite.vlan_id);
864 of_dpa_output_l2_interface(fc, l2_group);
867 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
868 OfDpaGroup *group)
870 OfDpaGroup *l2_group;
871 int i;
873 for (i = 0; i < group->l2_flood.group_count; i++) {
874 of_dpa_flow_pkt_hdr_reset(fc);
875 l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
876 if (!l2_group) {
877 continue;
879 switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
880 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
881 of_dpa_output_l2_interface(fc, l2_group);
882 break;
883 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
884 of_dpa_output_l2_rewrite(fc, l2_group);
885 break;
890 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
892 OfDpaGroup *l2_group =
893 of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
895 if (!l2_group) {
896 return;
899 of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
900 group->l3_unicast.dst_mac.a,
901 group->l3_unicast.vlan_id);
902 /* XXX need ttl_check */
903 of_dpa_output_l2_interface(fc, l2_group);
906 static void of_dpa_eg(OfDpaFlowContext *fc)
908 OfDpaFlowAction *set = &fc->action_set;
909 OfDpaGroup *group;
910 uint32_t group_id;
912 /* send a copy of pkt to CPU (controller)? */
914 if (set->apply.copy_to_cpu) {
915 group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
916 group = of_dpa_group_find(fc->of_dpa, group_id);
917 if (group) {
918 of_dpa_output_l2_interface(fc, group);
919 of_dpa_flow_pkt_hdr_reset(fc);
923 /* process group write actions */
925 if (!set->write.group_id) {
926 return;
929 group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
930 if (!group) {
931 return;
934 switch (ROCKER_GROUP_TYPE_GET(group->id)) {
935 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
936 of_dpa_output_l2_interface(fc, group);
937 break;
938 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
939 of_dpa_output_l2_rewrite(fc, group);
940 break;
941 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
942 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
943 of_dpa_output_l2_flood(fc, group);
944 break;
945 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
946 of_dpa_output_l3_unicast(fc, group);
947 break;
951 typedef struct of_dpa_flow_tbl_ops {
952 void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
953 void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
954 void (*miss)(OfDpaFlowContext *fc);
955 void (*hit_no_goto)(OfDpaFlowContext *fc);
956 void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
957 void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
958 } OfDpaFlowTblOps;
960 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
961 [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
962 .build_match = of_dpa_ig_port_build_match,
963 .miss = of_dpa_ig_port_miss,
964 .hit_no_goto = of_dpa_drop,
966 [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
967 .build_match = of_dpa_vlan_build_match,
968 .hit_no_goto = of_dpa_drop,
969 .action_apply = of_dpa_vlan_insert,
971 [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
972 .build_match = of_dpa_term_mac_build_match,
973 .miss = of_dpa_term_mac_miss,
974 .hit_no_goto = of_dpa_drop,
975 .action_apply = of_dpa_apply_actions,
977 [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
978 .build_match = of_dpa_bridging_build_match,
979 .hit = of_dpa_bridging_learn,
980 .miss = of_dpa_bridging_miss,
981 .hit_no_goto = of_dpa_drop,
982 .action_apply = of_dpa_apply_actions,
983 .action_write = of_dpa_bridging_action_write,
985 [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
986 .build_match = of_dpa_unicast_routing_build_match,
987 .miss = of_dpa_unicast_routing_miss,
988 .hit_no_goto = of_dpa_drop,
989 .action_write = of_dpa_unicast_routing_action_write,
991 [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
992 .build_match = of_dpa_multicast_routing_build_match,
993 .miss = of_dpa_multicast_routing_miss,
994 .hit_no_goto = of_dpa_drop,
995 .action_write = of_dpa_multicast_routing_action_write,
997 [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
998 .build_match = of_dpa_acl_build_match,
999 .hit = of_dpa_acl_hit,
1000 .miss = of_dpa_eg,
1001 .action_apply = of_dpa_apply_actions,
1002 .action_write = of_dpa_acl_action_write,
1006 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1008 OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1009 OfDpaFlowMatch match = { { 0, }, };
1010 OfDpaFlow *flow;
1012 if (ops->build_match) {
1013 ops->build_match(fc, &match);
1014 } else {
1015 return;
1018 flow = of_dpa_flow_match(fc->of_dpa, &match);
1019 if (!flow) {
1020 if (ops->miss) {
1021 ops->miss(fc);
1023 return;
1026 flow->stats.hits++;
1028 if (ops->action_apply) {
1029 ops->action_apply(fc, flow);
1032 if (ops->action_write) {
1033 ops->action_write(fc, flow);
1036 if (ops->hit) {
1037 ops->hit(fc, flow);
1040 if (flow->action.goto_tbl) {
1041 of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1042 } else if (ops->hit_no_goto) {
1043 ops->hit_no_goto(fc);
1046 /* drop packet */
1049 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1050 const struct iovec *iov, int iovcnt)
1052 struct iovec iov_copy[iovcnt + 2];
1053 OfDpaFlowContext fc = {
1054 .of_dpa = world_private(world),
1055 .in_pport = pport,
1056 .iov = iov_copy,
1057 .iovcnt = iovcnt + 2,
1060 of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1061 of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1063 return iov_size(iov, iovcnt);
1066 #define ROCKER_TUNNEL_LPORT 0x00010000
1068 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1070 OfDpaFlowKey *key = &flow->key;
1071 OfDpaFlowKey *mask = &flow->mask;
1072 OfDpaFlowAction *action = &flow->action;
1073 bool overlay_tunnel;
1075 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1076 !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1077 return -ROCKER_EINVAL;
1080 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1081 key->width = FLOW_KEY_WIDTH(tbl_id);
1083 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1084 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1085 mask->in_pport =
1086 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1089 overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1091 action->goto_tbl =
1092 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1094 if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1095 return -ROCKER_EINVAL;
1098 if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1099 return -ROCKER_EINVAL;
1102 return ROCKER_OK;
1105 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1107 OfDpaFlowKey *key = &flow->key;
1108 OfDpaFlowKey *mask = &flow->mask;
1109 OfDpaFlowAction *action = &flow->action;
1110 uint32_t port;
1111 bool untagged;
1113 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1114 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1115 DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1116 return -ROCKER_EINVAL;
1119 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1120 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1122 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1123 if (!fp_port_from_pport(key->in_pport, &port)) {
1124 DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1125 return -ROCKER_EINVAL;
1127 mask->in_pport = 0xffffffff;
1129 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1131 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1132 mask->eth.vlan_id =
1133 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1136 if (key->eth.vlan_id) {
1137 untagged = false; /* filtering */
1138 } else {
1139 untagged = true;
1142 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1143 action->goto_tbl =
1144 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1145 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1146 DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1147 return -ROCKER_EINVAL;
1151 if (untagged) {
1152 if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1153 DPRINTF("Must specify new vlan_id if untagged\n");
1154 return -ROCKER_EINVAL;
1156 action->apply.new_vlan_id =
1157 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1158 if (1 > ntohs(action->apply.new_vlan_id) ||
1159 ntohs(action->apply.new_vlan_id) > 4095) {
1160 DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1161 ntohs(action->apply.new_vlan_id));
1162 return -ROCKER_EINVAL;
1166 return ROCKER_OK;
1169 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1171 OfDpaFlowKey *key = &flow->key;
1172 OfDpaFlowKey *mask = &flow->mask;
1173 OfDpaFlowAction *action = &flow->action;
1174 const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1175 const MACAddr ipv4_mask = { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1176 const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1177 const MACAddr ipv6_mask = { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1178 uint32_t port;
1179 bool unicast = false;
1180 bool multicast = false;
1182 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1183 !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1184 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1185 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1186 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1187 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1188 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1189 return -ROCKER_EINVAL;
1192 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1193 key->width = FLOW_KEY_WIDTH(eth.type);
1195 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1196 if (!fp_port_from_pport(key->in_pport, &port)) {
1197 return -ROCKER_EINVAL;
1199 mask->in_pport =
1200 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1202 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1203 if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1204 return -ROCKER_EINVAL;
1206 mask->eth.type = htons(0xffff);
1208 memcpy(key->eth.dst.a,
1209 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1210 sizeof(key->eth.dst.a));
1211 memcpy(mask->eth.dst.a,
1212 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1213 sizeof(mask->eth.dst.a));
1215 if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1216 unicast = true;
1219 /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1220 if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1221 memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1222 multicast = true;
1224 if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1225 memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1226 multicast = true;
1229 if (!unicast && !multicast) {
1230 return -ROCKER_EINVAL;
1233 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1234 mask->eth.vlan_id =
1235 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1237 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1238 action->goto_tbl =
1239 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1241 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1242 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1243 return -ROCKER_EINVAL;
1246 if (unicast &&
1247 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1248 return -ROCKER_EINVAL;
1251 if (multicast &&
1252 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1253 return -ROCKER_EINVAL;
1257 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1258 action->apply.copy_to_cpu =
1259 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1262 return ROCKER_OK;
1265 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1267 OfDpaFlowKey *key = &flow->key;
1268 OfDpaFlowKey *mask = &flow->mask;
1269 OfDpaFlowAction *action = &flow->action;
1270 bool unicast = false;
1271 bool dst_mac = false;
1272 bool dst_mac_mask = false;
1273 enum {
1274 BRIDGING_MODE_UNKNOWN,
1275 BRIDGING_MODE_VLAN_UCAST,
1276 BRIDGING_MODE_VLAN_MCAST,
1277 BRIDGING_MODE_VLAN_DFLT,
1278 BRIDGING_MODE_TUNNEL_UCAST,
1279 BRIDGING_MODE_TUNNEL_MCAST,
1280 BRIDGING_MODE_TUNNEL_DFLT,
1281 } mode = BRIDGING_MODE_UNKNOWN;
1283 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1285 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1286 key->eth.vlan_id =
1287 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1288 mask->eth.vlan_id = 0xffff;
1289 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1292 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1293 key->tunnel_id =
1294 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1295 mask->tunnel_id = 0xffffffff;
1296 key->width = FLOW_KEY_WIDTH(tunnel_id);
1299 /* can't do VLAN bridging and tunnel bridging at same time */
1300 if (key->eth.vlan_id && key->tunnel_id) {
1301 DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1302 return -ROCKER_EINVAL;
1305 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1306 memcpy(key->eth.dst.a,
1307 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1308 sizeof(key->eth.dst.a));
1309 key->width = FLOW_KEY_WIDTH(eth.dst);
1310 dst_mac = true;
1311 unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1314 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1315 memcpy(mask->eth.dst.a,
1316 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1317 sizeof(mask->eth.dst.a));
1318 key->width = FLOW_KEY_WIDTH(eth.dst);
1319 dst_mac_mask = true;
1320 } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1321 memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1324 if (key->eth.vlan_id) {
1325 if (dst_mac && !dst_mac_mask) {
1326 mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1327 BRIDGING_MODE_VLAN_MCAST;
1328 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1329 mode = BRIDGING_MODE_VLAN_DFLT;
1331 } else if (key->tunnel_id) {
1332 if (dst_mac && !dst_mac_mask) {
1333 mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1334 BRIDGING_MODE_TUNNEL_MCAST;
1335 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1336 mode = BRIDGING_MODE_TUNNEL_DFLT;
1340 if (mode == BRIDGING_MODE_UNKNOWN) {
1341 DPRINTF("Unknown bridging mode\n");
1342 return -ROCKER_EINVAL;
1345 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1346 action->goto_tbl =
1347 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1348 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1349 DPRINTF("Briding goto tbl must be ACL policy\n");
1350 return -ROCKER_EINVAL;
1354 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1355 action->write.group_id =
1356 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1357 switch (mode) {
1358 case BRIDGING_MODE_VLAN_UCAST:
1359 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1360 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1361 DPRINTF("Bridging mode vlan ucast needs L2 "
1362 "interface group (0x%08x)\n",
1363 action->write.group_id);
1364 return -ROCKER_EINVAL;
1366 break;
1367 case BRIDGING_MODE_VLAN_MCAST:
1368 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1369 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1370 DPRINTF("Bridging mode vlan mcast needs L2 "
1371 "mcast group (0x%08x)\n",
1372 action->write.group_id);
1373 return -ROCKER_EINVAL;
1375 break;
1376 case BRIDGING_MODE_VLAN_DFLT:
1377 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1378 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1379 DPRINTF("Bridging mode vlan dflt needs L2 "
1380 "flood group (0x%08x)\n",
1381 action->write.group_id);
1382 return -ROCKER_EINVAL;
1384 break;
1385 case BRIDGING_MODE_TUNNEL_MCAST:
1386 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1387 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1388 DPRINTF("Bridging mode tunnel mcast needs L2 "
1389 "overlay group (0x%08x)\n",
1390 action->write.group_id);
1391 return -ROCKER_EINVAL;
1393 break;
1394 case BRIDGING_MODE_TUNNEL_DFLT:
1395 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1396 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1397 DPRINTF("Bridging mode tunnel dflt needs L2 "
1398 "overlay group (0x%08x)\n",
1399 action->write.group_id);
1400 return -ROCKER_EINVAL;
1402 break;
1403 default:
1404 return -ROCKER_EINVAL;
1408 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1409 action->write.tun_log_lport =
1410 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1411 if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1412 DPRINTF("Have tunnel logical port but not "
1413 "in bridging tunnel mode\n");
1414 return -ROCKER_EINVAL;
1418 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1419 action->apply.copy_to_cpu =
1420 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1423 return ROCKER_OK;
1426 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1427 RockerTlv **flow_tlvs)
1429 OfDpaFlowKey *key = &flow->key;
1430 OfDpaFlowKey *mask = &flow->mask;
1431 OfDpaFlowAction *action = &flow->action;
1432 enum {
1433 UNICAST_ROUTING_MODE_UNKNOWN,
1434 UNICAST_ROUTING_MODE_IPV4,
1435 UNICAST_ROUTING_MODE_IPV6,
1436 } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1437 uint8_t type;
1439 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1440 return -ROCKER_EINVAL;
1443 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1444 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1446 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1447 switch (ntohs(key->eth.type)) {
1448 case 0x0800:
1449 mode = UNICAST_ROUTING_MODE_IPV4;
1450 break;
1451 case 0x86dd:
1452 mode = UNICAST_ROUTING_MODE_IPV6;
1453 break;
1454 default:
1455 return -ROCKER_EINVAL;
1457 mask->eth.type = htons(0xffff);
1459 switch (mode) {
1460 case UNICAST_ROUTING_MODE_IPV4:
1461 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1462 return -ROCKER_EINVAL;
1464 key->ipv4.addr.dst =
1465 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1466 if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1467 return -ROCKER_EINVAL;
1469 flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1470 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1471 mask->ipv4.addr.dst =
1472 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1473 flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1475 break;
1476 case UNICAST_ROUTING_MODE_IPV6:
1477 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1478 return -ROCKER_EINVAL;
1480 memcpy(&key->ipv6.addr.dst,
1481 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1482 sizeof(key->ipv6.addr.dst));
1483 if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1484 return -ROCKER_EINVAL;
1486 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1487 memcpy(&mask->ipv6.addr.dst,
1488 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1489 sizeof(mask->ipv6.addr.dst));
1491 break;
1492 default:
1493 return -ROCKER_EINVAL;
1496 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1497 action->goto_tbl =
1498 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1499 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1500 return -ROCKER_EINVAL;
1504 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1505 action->write.group_id =
1506 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1507 type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1508 if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1509 type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1510 type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1511 return -ROCKER_EINVAL;
1515 return ROCKER_OK;
1518 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1519 RockerTlv **flow_tlvs)
1521 OfDpaFlowKey *key = &flow->key;
1522 OfDpaFlowKey *mask = &flow->mask;
1523 OfDpaFlowAction *action = &flow->action;
1524 enum {
1525 MULTICAST_ROUTING_MODE_UNKNOWN,
1526 MULTICAST_ROUTING_MODE_IPV4,
1527 MULTICAST_ROUTING_MODE_IPV6,
1528 } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1530 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1531 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1532 return -ROCKER_EINVAL;
1535 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1536 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1538 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1539 switch (ntohs(key->eth.type)) {
1540 case 0x0800:
1541 mode = MULTICAST_ROUTING_MODE_IPV4;
1542 break;
1543 case 0x86dd:
1544 mode = MULTICAST_ROUTING_MODE_IPV6;
1545 break;
1546 default:
1547 return -ROCKER_EINVAL;
1550 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1552 switch (mode) {
1553 case MULTICAST_ROUTING_MODE_IPV4:
1555 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1556 key->ipv4.addr.src =
1557 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1560 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1561 mask->ipv4.addr.src =
1562 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1565 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1566 if (mask->ipv4.addr.src != 0) {
1567 return -ROCKER_EINVAL;
1571 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1572 return -ROCKER_EINVAL;
1575 key->ipv4.addr.dst =
1576 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1577 if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1578 return -ROCKER_EINVAL;
1581 break;
1583 case MULTICAST_ROUTING_MODE_IPV6:
1585 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1586 memcpy(&key->ipv6.addr.src,
1587 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1588 sizeof(key->ipv6.addr.src));
1591 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1592 memcpy(&mask->ipv6.addr.src,
1593 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1594 sizeof(mask->ipv6.addr.src));
1597 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1598 if (mask->ipv6.addr.src.addr32[0] != 0 &&
1599 mask->ipv6.addr.src.addr32[1] != 0 &&
1600 mask->ipv6.addr.src.addr32[2] != 0 &&
1601 mask->ipv6.addr.src.addr32[3] != 0) {
1602 return -ROCKER_EINVAL;
1606 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1607 return -ROCKER_EINVAL;
1610 memcpy(&key->ipv6.addr.dst,
1611 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1612 sizeof(key->ipv6.addr.dst));
1613 if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1614 return -ROCKER_EINVAL;
1617 break;
1619 default:
1620 return -ROCKER_EINVAL;
1623 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1624 action->goto_tbl =
1625 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1626 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1627 return -ROCKER_EINVAL;
1631 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1632 action->write.group_id =
1633 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1634 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1635 ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1636 return -ROCKER_EINVAL;
1638 action->write.vlan_id = key->eth.vlan_id;
1641 return ROCKER_OK;
1644 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1645 RockerTlv **flow_tlvs)
1647 key->width = FLOW_KEY_WIDTH(ip.tos);
1649 key->ip.proto = 0;
1650 key->ip.tos = 0;
1651 mask->ip.proto = 0;
1652 mask->ip.tos = 0;
1654 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1655 key->ip.proto =
1656 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1658 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1659 mask->ip.proto =
1660 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1662 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1663 key->ip.tos =
1664 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1666 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1667 mask->ip.tos =
1668 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1670 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1671 key->ip.tos |=
1672 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1674 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1675 mask->ip.tos |=
1676 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1679 return ROCKER_OK;
1682 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1684 OfDpaFlowKey *key = &flow->key;
1685 OfDpaFlowKey *mask = &flow->mask;
1686 OfDpaFlowAction *action = &flow->action;
1687 enum {
1688 ACL_MODE_UNKNOWN,
1689 ACL_MODE_IPV4_VLAN,
1690 ACL_MODE_IPV6_VLAN,
1691 ACL_MODE_IPV4_TENANT,
1692 ACL_MODE_IPV6_TENANT,
1693 ACL_MODE_NON_IP_VLAN,
1694 ACL_MODE_NON_IP_TENANT,
1695 ACL_MODE_ANY_VLAN,
1696 ACL_MODE_ANY_TENANT,
1697 } mode = ACL_MODE_UNKNOWN;
1698 int err = ROCKER_OK;
1700 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1701 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1702 return -ROCKER_EINVAL;
1705 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1706 flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1707 return -ROCKER_EINVAL;
1710 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1711 key->width = FLOW_KEY_WIDTH(eth.type);
1713 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1714 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1715 mask->in_pport =
1716 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1719 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1720 memcpy(key->eth.src.a,
1721 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1722 sizeof(key->eth.src.a));
1725 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1726 memcpy(mask->eth.src.a,
1727 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1728 sizeof(mask->eth.src.a));
1731 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1732 memcpy(key->eth.dst.a,
1733 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1734 sizeof(key->eth.dst.a));
1737 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1738 memcpy(mask->eth.dst.a,
1739 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1740 sizeof(mask->eth.dst.a));
1743 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1744 if (key->eth.type) {
1745 mask->eth.type = 0xffff;
1748 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1749 key->eth.vlan_id =
1750 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1753 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1754 mask->eth.vlan_id =
1755 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1758 switch (ntohs(key->eth.type)) {
1759 case 0x0000:
1760 mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1761 break;
1762 case 0x0800:
1763 mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1764 break;
1765 case 0x86dd:
1766 mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1767 break;
1768 default:
1769 mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1770 ACL_MODE_NON_IP_TENANT;
1771 break;
1774 /* XXX only supporting VLAN modes for now */
1775 if (mode != ACL_MODE_IPV4_VLAN &&
1776 mode != ACL_MODE_IPV6_VLAN &&
1777 mode != ACL_MODE_NON_IP_VLAN &&
1778 mode != ACL_MODE_ANY_VLAN) {
1779 return -ROCKER_EINVAL;
1782 switch (ntohs(key->eth.type)) {
1783 case 0x0800:
1784 case 0x86dd:
1785 err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1786 break;
1789 if (err) {
1790 return err;
1793 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1794 action->write.group_id =
1795 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1798 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1799 action->apply.copy_to_cpu =
1800 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1803 return ROCKER_OK;
1806 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1807 RockerTlv **flow_tlvs)
1809 enum rocker_of_dpa_table_id tbl;
1810 int err = ROCKER_OK;
1812 if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1813 !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1814 !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1815 return -ROCKER_EINVAL;
1818 tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1819 flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1820 flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1822 if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1823 if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1824 tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1825 tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1826 return -ROCKER_EINVAL;
1828 flow->idletime =
1829 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1832 switch (tbl) {
1833 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1834 err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1835 break;
1836 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1837 err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1838 break;
1839 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1840 err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1841 break;
1842 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1843 err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1844 break;
1845 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1846 err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1847 break;
1848 case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1849 err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1850 break;
1851 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1852 err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1853 break;
1856 return err;
1859 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1860 RockerTlv **flow_tlvs)
1862 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1863 int err = ROCKER_OK;
1865 if (flow) {
1866 return -ROCKER_EEXIST;
1869 flow = of_dpa_flow_alloc(cookie);
1870 if (!flow) {
1871 return -ROCKER_ENOMEM;
1874 err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1875 if (err) {
1876 g_free(flow);
1877 return err;
1880 return of_dpa_flow_add(of_dpa, flow);
1883 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1884 RockerTlv **flow_tlvs)
1886 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1888 if (!flow) {
1889 return -ROCKER_ENOENT;
1892 return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1895 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1897 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1899 if (!flow) {
1900 return -ROCKER_ENOENT;
1903 of_dpa_flow_del(of_dpa, flow);
1905 return ROCKER_OK;
1908 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1909 struct desc_info *info, char *buf)
1911 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1912 size_t tlv_size;
1913 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1914 int pos;
1916 if (!flow) {
1917 return -ROCKER_ENOENT;
1920 tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) + /* duration */
1921 rocker_tlv_total_size(sizeof(uint64_t)) + /* rx_pkts */
1922 rocker_tlv_total_size(sizeof(uint64_t)); /* tx_ptks */
1924 if (tlv_size > desc_buf_size(info)) {
1925 return -ROCKER_EMSGSIZE;
1928 pos = 0;
1929 rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1930 (int32_t)(now - flow->stats.install_time));
1931 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1932 flow->stats.rx_pkts);
1933 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1934 flow->stats.tx_pkts);
1936 return desc_set_buf(info, tlv_size);
1939 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1940 char *buf, uint16_t cmd,
1941 RockerTlv **flow_tlvs)
1943 uint64_t cookie;
1945 if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1946 return -ROCKER_EINVAL;
1949 cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1951 switch (cmd) {
1952 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1953 return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1954 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1955 return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1956 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1957 return of_dpa_cmd_flow_del(of_dpa, cookie);
1958 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1959 return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1962 return -ROCKER_ENOTSUP;
1965 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1966 RockerTlv **group_tlvs)
1968 if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1969 !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1970 return -ROCKER_EINVAL;
1973 group->l2_interface.out_pport =
1974 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1975 group->l2_interface.pop_vlan =
1976 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1978 return ROCKER_OK;
1981 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1982 RockerTlv **group_tlvs)
1984 OfDpaGroup *l2_interface_group;
1986 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1987 return -ROCKER_EINVAL;
1990 group->l2_rewrite.group_id =
1991 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1993 l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1994 if (!l2_interface_group ||
1995 ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1996 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1997 DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1998 return -ROCKER_EINVAL;
2001 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2002 memcpy(group->l2_rewrite.src_mac.a,
2003 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2004 sizeof(group->l2_rewrite.src_mac.a));
2007 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2008 memcpy(group->l2_rewrite.dst_mac.a,
2009 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2010 sizeof(group->l2_rewrite.dst_mac.a));
2013 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2014 group->l2_rewrite.vlan_id =
2015 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2016 if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2017 (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2018 DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2019 return -ROCKER_EINVAL;
2023 return ROCKER_OK;
2026 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2027 RockerTlv **group_tlvs)
2029 OfDpaGroup *l2_group;
2030 RockerTlv **tlvs;
2031 int err;
2032 int i;
2034 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2035 !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2036 return -ROCKER_EINVAL;
2039 group->l2_flood.group_count =
2040 rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2042 tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2043 if (!tlvs) {
2044 return -ROCKER_ENOMEM;
2047 g_free(group->l2_flood.group_ids);
2048 group->l2_flood.group_ids =
2049 g_new0(uint32_t, group->l2_flood.group_count);
2050 if (!group->l2_flood.group_ids) {
2051 err = -ROCKER_ENOMEM;
2052 goto err_out;
2055 rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2056 group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2058 for (i = 0; i < group->l2_flood.group_count; i++) {
2059 group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2062 /* All of the L2 interface groups referenced by the L2 flood
2063 * must have same VLAN
2066 for (i = 0; i < group->l2_flood.group_count; i++) {
2067 l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2068 if (!l2_group) {
2069 continue;
2071 if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2072 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2073 (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2074 ROCKER_GROUP_VLAN_GET(group->id))) {
2075 DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2076 "flood group 0x%08x\n",
2077 group->l2_flood.group_ids[i], group->id);
2078 err = -ROCKER_EINVAL;
2079 goto err_out;
2083 g_free(tlvs);
2084 return ROCKER_OK;
2086 err_out:
2087 group->l2_flood.group_count = 0;
2088 g_free(group->l2_flood.group_ids);
2089 g_free(tlvs);
2091 return err;
2094 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2096 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2097 return -ROCKER_EINVAL;
2100 group->l3_unicast.group_id =
2101 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2103 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2104 memcpy(group->l3_unicast.src_mac.a,
2105 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2106 sizeof(group->l3_unicast.src_mac.a));
2109 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2110 memcpy(group->l3_unicast.dst_mac.a,
2111 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2112 sizeof(group->l3_unicast.dst_mac.a));
2115 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2116 group->l3_unicast.vlan_id =
2117 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2120 if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2121 group->l3_unicast.ttl_check =
2122 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2125 return ROCKER_OK;
2128 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2129 OfDpaGroup *group, RockerTlv **group_tlvs)
2131 uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2133 switch (type) {
2134 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2135 return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2136 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2137 return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2138 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2139 /* Treat L2 multicast group same as a L2 flood group */
2140 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2141 return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2142 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2143 return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2146 return -ROCKER_ENOTSUP;
2149 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2150 RockerTlv **group_tlvs)
2152 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2153 int err;
2155 if (group) {
2156 return -ROCKER_EEXIST;
2159 group = of_dpa_group_alloc(group_id);
2160 if (!group) {
2161 return -ROCKER_ENOMEM;
2164 err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2165 if (err) {
2166 goto err_cmd_add;
2169 err = of_dpa_group_add(of_dpa, group);
2170 if (err) {
2171 goto err_cmd_add;
2174 return ROCKER_OK;
2176 err_cmd_add:
2177 g_free(group);
2178 return err;
2181 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2182 RockerTlv **group_tlvs)
2184 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2186 if (!group) {
2187 return -ROCKER_ENOENT;
2190 return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2193 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2195 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2197 if (!group) {
2198 return -ROCKER_ENOENT;
2201 return of_dpa_group_del(of_dpa, group);
2204 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2205 struct desc_info *info, char *buf)
2207 return -ROCKER_ENOTSUP;
2210 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2211 char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2213 uint32_t group_id;
2215 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2216 return -ROCKER_EINVAL;
2219 group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2221 switch (cmd) {
2222 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2223 return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2224 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2225 return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2226 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2227 return of_dpa_cmd_group_del(of_dpa, group_id);
2228 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2229 return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2232 return -ROCKER_ENOTSUP;
2235 static int of_dpa_cmd(World *world, struct desc_info *info,
2236 char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2238 OfDpa *of_dpa = world_private(world);
2239 RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2241 rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2243 switch (cmd) {
2244 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2245 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2246 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2247 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2248 return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2249 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2250 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2251 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2252 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2253 return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2256 return -ROCKER_ENOTSUP;
2259 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2261 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2264 static guint rocker_int64_hash(gconstpointer v)
2266 return (guint)*(const uint64_t *)v;
2269 static int of_dpa_init(World *world)
2271 OfDpa *of_dpa = world_private(world);
2273 of_dpa->world = world;
2275 of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2276 rocker_int64_equal,
2277 NULL, g_free);
2278 if (!of_dpa->flow_tbl) {
2279 return -ENOMEM;
2282 of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2283 NULL, g_free);
2284 if (!of_dpa->group_tbl) {
2285 goto err_group_tbl;
2288 /* XXX hardcode some artificial table max values */
2289 of_dpa->flow_tbl_max_size = 100;
2290 of_dpa->group_tbl_max_size = 100;
2292 return 0;
2294 err_group_tbl:
2295 g_hash_table_destroy(of_dpa->flow_tbl);
2296 return -ENOMEM;
2299 static void of_dpa_uninit(World *world)
2301 OfDpa *of_dpa = world_private(world);
2303 g_hash_table_destroy(of_dpa->group_tbl);
2304 g_hash_table_destroy(of_dpa->flow_tbl);
2307 struct of_dpa_flow_fill_context {
2308 RockerOfDpaFlowList *list;
2309 uint32_t tbl_id;
2312 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2314 struct of_dpa_flow *flow = value;
2315 struct of_dpa_flow_key *key = &flow->key;
2316 struct of_dpa_flow_key *mask = &flow->mask;
2317 struct of_dpa_flow_fill_context *flow_context = user_data;
2318 RockerOfDpaFlowList *new;
2319 RockerOfDpaFlow *nflow;
2320 RockerOfDpaFlowKey *nkey;
2321 RockerOfDpaFlowMask *nmask;
2322 RockerOfDpaFlowAction *naction;
2324 if (flow_context->tbl_id != -1 &&
2325 flow_context->tbl_id != key->tbl_id) {
2326 return;
2329 new = g_malloc0(sizeof(*new));
2330 nflow = new->value = g_malloc0(sizeof(*nflow));
2331 nkey = nflow->key = g_malloc0(sizeof(*nkey));
2332 nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2333 naction = nflow->action = g_malloc0(sizeof(*naction));
2335 nflow->cookie = flow->cookie;
2336 nflow->hits = flow->stats.hits;
2337 nkey->priority = flow->priority;
2338 nkey->tbl_id = key->tbl_id;
2340 if (key->in_pport || mask->in_pport) {
2341 nkey->has_in_pport = true;
2342 nkey->in_pport = key->in_pport;
2345 if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2346 nmask->has_in_pport = true;
2347 nmask->in_pport = mask->in_pport;
2350 if (key->eth.vlan_id || mask->eth.vlan_id) {
2351 nkey->has_vlan_id = true;
2352 nkey->vlan_id = ntohs(key->eth.vlan_id);
2355 if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2356 nmask->has_vlan_id = true;
2357 nmask->vlan_id = ntohs(mask->eth.vlan_id);
2360 if (key->tunnel_id || mask->tunnel_id) {
2361 nkey->has_tunnel_id = true;
2362 nkey->tunnel_id = key->tunnel_id;
2365 if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2366 nmask->has_tunnel_id = true;
2367 nmask->tunnel_id = mask->tunnel_id;
2370 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2371 memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2372 nkey->has_eth_src = true;
2373 nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2376 if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2377 nmask->has_eth_src = true;
2378 nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2381 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2382 memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2383 nkey->has_eth_dst = true;
2384 nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2387 if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2388 nmask->has_eth_dst = true;
2389 nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2392 if (key->eth.type) {
2394 nkey->has_eth_type = true;
2395 nkey->eth_type = ntohs(key->eth.type);
2397 switch (ntohs(key->eth.type)) {
2398 case 0x0800:
2399 case 0x86dd:
2400 if (key->ip.proto || mask->ip.proto) {
2401 nkey->has_ip_proto = true;
2402 nkey->ip_proto = key->ip.proto;
2404 if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2405 nmask->has_ip_proto = true;
2406 nmask->ip_proto = mask->ip.proto;
2408 if (key->ip.tos || mask->ip.tos) {
2409 nkey->has_ip_tos = true;
2410 nkey->ip_tos = key->ip.tos;
2412 if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2413 nmask->has_ip_tos = true;
2414 nmask->ip_tos = mask->ip.tos;
2416 break;
2419 switch (ntohs(key->eth.type)) {
2420 case 0x0800:
2421 if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2422 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2423 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2424 nkey->has_ip_dst = true;
2425 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2427 break;
2431 if (flow->action.goto_tbl) {
2432 naction->has_goto_tbl = true;
2433 naction->goto_tbl = flow->action.goto_tbl;
2436 if (flow->action.write.group_id) {
2437 naction->has_group_id = true;
2438 naction->group_id = flow->action.write.group_id;
2441 if (flow->action.apply.new_vlan_id) {
2442 naction->has_new_vlan_id = true;
2443 naction->new_vlan_id = flow->action.apply.new_vlan_id;
2446 new->next = flow_context->list;
2447 flow_context->list = new;
2450 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2451 bool has_tbl_id,
2452 uint32_t tbl_id,
2453 Error **errp)
2455 struct rocker *r;
2456 struct world *w;
2457 struct of_dpa *of_dpa;
2458 struct of_dpa_flow_fill_context fill_context = {
2459 .list = NULL,
2460 .tbl_id = tbl_id,
2463 r = rocker_find(name);
2464 if (!r) {
2465 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2466 "rocker %s not found", name);
2467 return NULL;
2470 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2471 if (!w) {
2472 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2473 "rocker %s doesn't have OF-DPA world", name);
2474 return NULL;
2477 of_dpa = world_private(w);
2479 g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2481 return fill_context.list;
2484 struct of_dpa_group_fill_context {
2485 RockerOfDpaGroupList *list;
2486 uint8_t type;
2489 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2491 struct of_dpa_group *group = value;
2492 struct of_dpa_group_fill_context *flow_context = user_data;
2493 RockerOfDpaGroupList *new;
2494 RockerOfDpaGroup *ngroup;
2495 struct uint32List *id;
2496 int i;
2498 if (flow_context->type != 9 &&
2499 flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2500 return;
2503 new = g_malloc0(sizeof(*new));
2504 ngroup = new->value = g_malloc0(sizeof(*ngroup));
2506 ngroup->id = group->id;
2508 ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2510 switch (ngroup->type) {
2511 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2512 ngroup->has_vlan_id = true;
2513 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2514 ngroup->has_pport = true;
2515 ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2516 ngroup->has_out_pport = true;
2517 ngroup->out_pport = group->l2_interface.out_pport;
2518 ngroup->has_pop_vlan = true;
2519 ngroup->pop_vlan = group->l2_interface.pop_vlan;
2520 break;
2521 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2522 ngroup->has_index = true;
2523 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2524 ngroup->has_group_id = true;
2525 ngroup->group_id = group->l2_rewrite.group_id;
2526 if (group->l2_rewrite.vlan_id) {
2527 ngroup->has_set_vlan_id = true;
2528 ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2530 if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2531 ngroup->has_set_eth_src = true;
2532 ngroup->set_eth_src =
2533 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2535 if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2536 ngroup->has_set_eth_dst = true;
2537 ngroup->set_eth_dst =
2538 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2540 break;
2541 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2542 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2543 ngroup->has_vlan_id = true;
2544 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2545 ngroup->has_index = true;
2546 ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2547 for (i = 0; i < group->l2_flood.group_count; i++) {
2548 ngroup->has_group_ids = true;
2549 id = g_malloc0(sizeof(*id));
2550 id->value = group->l2_flood.group_ids[i];
2551 id->next = ngroup->group_ids;
2552 ngroup->group_ids = id;
2554 break;
2555 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2556 ngroup->has_index = true;
2557 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2558 ngroup->has_group_id = true;
2559 ngroup->group_id = group->l3_unicast.group_id;
2560 if (group->l3_unicast.vlan_id) {
2561 ngroup->has_set_vlan_id = true;
2562 ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2564 if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2565 ngroup->has_set_eth_src = true;
2566 ngroup->set_eth_src =
2567 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2569 if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2570 ngroup->has_set_eth_dst = true;
2571 ngroup->set_eth_dst =
2572 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2574 if (group->l3_unicast.ttl_check) {
2575 ngroup->has_ttl_check = true;
2576 ngroup->ttl_check = group->l3_unicast.ttl_check;
2578 break;
2581 new->next = flow_context->list;
2582 flow_context->list = new;
2585 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2586 bool has_type,
2587 uint8_t type,
2588 Error **errp)
2590 struct rocker *r;
2591 struct world *w;
2592 struct of_dpa *of_dpa;
2593 struct of_dpa_group_fill_context fill_context = {
2594 .list = NULL,
2595 .type = type,
2598 r = rocker_find(name);
2599 if (!r) {
2600 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2601 "rocker %s not found", name);
2602 return NULL;
2605 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2606 if (!w) {
2607 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
2608 "rocker %s doesn't have OF-DPA world", name);
2609 return NULL;
2612 of_dpa = world_private(w);
2614 g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2616 return fill_context.list;
2619 static WorldOps of_dpa_ops = {
2620 .init = of_dpa_init,
2621 .uninit = of_dpa_uninit,
2622 .ig = of_dpa_ig,
2623 .cmd = of_dpa_cmd,
2626 World *of_dpa_world_alloc(Rocker *r)
2628 return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);