qapi: Use QAPI_LIST_PREPEND() where possible
[qemu/ar7.git] / hw / net / rocker / rocker_of_dpa.c
blobb3b8c5bb6d4b5700e4d22b72955af759f60be184
1 /*
2 * QEMU rocker switch emulation - OF-DPA flow processing support
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include "qemu/osdep.h"
18 #include "net/eth.h"
19 #include "qapi/error.h"
20 #include "qapi/qapi-commands-rocker.h"
21 #include "qemu/iov.h"
22 #include "qemu/timer.h"
24 #include "rocker.h"
25 #include "rocker_hw.h"
26 #include "rocker_fp.h"
27 #include "rocker_tlv.h"
28 #include "rocker_world.h"
29 #include "rocker_desc.h"
30 #include "rocker_of_dpa.h"
32 static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
33 static const MACAddr ff_mac = { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
35 typedef struct of_dpa {
36 World *world;
37 GHashTable *flow_tbl;
38 GHashTable *group_tbl;
39 unsigned int flow_tbl_max_size;
40 unsigned int group_tbl_max_size;
41 } OfDpa;
43 /* flow_key stolen mostly from OVS
45 * Note: fields that compare with network packet header fields
46 * are stored in network order (BE) to avoid per-packet field
47 * byte-swaps.
50 typedef struct of_dpa_flow_key {
51 uint32_t in_pport; /* ingress port */
52 uint32_t tunnel_id; /* overlay tunnel id */
53 uint32_t tbl_id; /* table id */
54 struct {
55 __be16 vlan_id; /* 0 if no VLAN */
56 MACAddr src; /* ethernet source address */
57 MACAddr dst; /* ethernet destination address */
58 __be16 type; /* ethernet frame type */
59 } eth;
60 struct {
61 uint8_t proto; /* IP protocol or ARP opcode */
62 uint8_t tos; /* IP ToS */
63 uint8_t ttl; /* IP TTL/hop limit */
64 uint8_t frag; /* one of FRAG_TYPE_* */
65 } ip;
66 union {
67 struct {
68 struct {
69 __be32 src; /* IP source address */
70 __be32 dst; /* IP destination address */
71 } addr;
72 union {
73 struct {
74 __be16 src; /* TCP/UDP/SCTP source port */
75 __be16 dst; /* TCP/UDP/SCTP destination port */
76 __be16 flags; /* TCP flags */
77 } tp;
78 struct {
79 MACAddr sha; /* ARP source hardware address */
80 MACAddr tha; /* ARP target hardware address */
81 } arp;
83 } ipv4;
84 struct {
85 struct {
86 Ipv6Addr src; /* IPv6 source address */
87 Ipv6Addr dst; /* IPv6 destination address */
88 } addr;
89 __be32 label; /* IPv6 flow label */
90 struct {
91 __be16 src; /* TCP/UDP/SCTP source port */
92 __be16 dst; /* TCP/UDP/SCTP destination port */
93 __be16 flags; /* TCP flags */
94 } tp;
95 struct {
96 Ipv6Addr target; /* ND target address */
97 MACAddr sll; /* ND source link layer address */
98 MACAddr tll; /* ND target link layer address */
99 } nd;
100 } ipv6;
102 int width; /* how many uint64_t's in key? */
103 } OfDpaFlowKey;
105 /* Width of key which includes field 'f' in u64s, rounded up */
106 #define FLOW_KEY_WIDTH(f) \
107 DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof_field(OfDpaFlowKey, f), \
108 sizeof(uint64_t))
110 typedef struct of_dpa_flow_action {
111 uint32_t goto_tbl;
112 struct {
113 uint32_t group_id;
114 uint32_t tun_log_lport;
115 __be16 vlan_id;
116 } write;
117 struct {
118 __be16 new_vlan_id;
119 uint32_t out_pport;
120 uint8_t copy_to_cpu;
121 __be16 vlan_id;
122 } apply;
123 } OfDpaFlowAction;
125 typedef struct of_dpa_flow {
126 uint32_t lpm;
127 uint32_t priority;
128 uint32_t hardtime;
129 uint32_t idletime;
130 uint64_t cookie;
131 OfDpaFlowKey key;
132 OfDpaFlowKey mask;
133 OfDpaFlowAction action;
134 struct {
135 uint64_t hits;
136 int64_t install_time;
137 int64_t refresh_time;
138 uint64_t rx_pkts;
139 uint64_t tx_pkts;
140 } stats;
141 } OfDpaFlow;
143 typedef struct of_dpa_flow_pkt_fields {
144 uint32_t tunnel_id;
145 struct eth_header *ethhdr;
146 __be16 *h_proto;
147 struct vlan_header *vlanhdr;
148 struct ip_header *ipv4hdr;
149 struct ip6_header *ipv6hdr;
150 Ipv6Addr *ipv6_src_addr;
151 Ipv6Addr *ipv6_dst_addr;
152 } OfDpaFlowPktFields;
154 typedef struct of_dpa_flow_context {
155 uint32_t in_pport;
156 uint32_t tunnel_id;
157 struct iovec *iov;
158 int iovcnt;
159 struct eth_header ethhdr_rewrite;
160 struct vlan_header vlanhdr_rewrite;
161 struct vlan_header vlanhdr;
162 OfDpa *of_dpa;
163 OfDpaFlowPktFields fields;
164 OfDpaFlowAction action_set;
165 } OfDpaFlowContext;
167 typedef struct of_dpa_flow_match {
168 OfDpaFlowKey value;
169 OfDpaFlow *best;
170 } OfDpaFlowMatch;
172 typedef struct of_dpa_group {
173 uint32_t id;
174 union {
175 struct {
176 uint32_t out_pport;
177 uint8_t pop_vlan;
178 } l2_interface;
179 struct {
180 uint32_t group_id;
181 MACAddr src_mac;
182 MACAddr dst_mac;
183 __be16 vlan_id;
184 } l2_rewrite;
185 struct {
186 uint16_t group_count;
187 uint32_t *group_ids;
188 } l2_flood;
189 struct {
190 uint32_t group_id;
191 MACAddr src_mac;
192 MACAddr dst_mac;
193 __be16 vlan_id;
194 uint8_t ttl_check;
195 } l3_unicast;
197 } OfDpaGroup;
199 static int of_dpa_mask2prefix(__be32 mask)
201 int i;
202 int count = 32;
204 for (i = 0; i < 32; i++) {
205 if (!(ntohl(mask) & ((2 << i) - 1))) {
206 count--;
210 return count;
213 #if defined(DEBUG_ROCKER)
214 static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask)
216 char buf[512], *b = buf, *mac;
218 b += sprintf(b, " tbl %2d", key->tbl_id);
220 if (key->in_pport || (mask && mask->in_pport)) {
221 b += sprintf(b, " in_pport %2d", key->in_pport);
222 if (mask && mask->in_pport != 0xffffffff) {
223 b += sprintf(b, "/0x%08x", key->in_pport);
227 if (key->tunnel_id || (mask && mask->tunnel_id)) {
228 b += sprintf(b, " tun %8d", key->tunnel_id);
229 if (mask && mask->tunnel_id != 0xffffffff) {
230 b += sprintf(b, "/0x%08x", key->tunnel_id);
234 if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) {
235 b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id));
236 if (mask && mask->eth.vlan_id != 0xffff) {
237 b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id));
241 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
242 (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) {
243 mac = qemu_mac_strdup_printf(key->eth.src.a);
244 b += sprintf(b, " src %s", mac);
245 g_free(mac);
246 if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
247 mac = qemu_mac_strdup_printf(mask->eth.src.a);
248 b += sprintf(b, "/%s", mac);
249 g_free(mac);
253 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
254 (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) {
255 mac = qemu_mac_strdup_printf(key->eth.dst.a);
256 b += sprintf(b, " dst %s", mac);
257 g_free(mac);
258 if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
259 mac = qemu_mac_strdup_printf(mask->eth.dst.a);
260 b += sprintf(b, "/%s", mac);
261 g_free(mac);
265 if (key->eth.type || (mask && mask->eth.type)) {
266 b += sprintf(b, " type 0x%04x", ntohs(key->eth.type));
267 if (mask && mask->eth.type != 0xffff) {
268 b += sprintf(b, "/0x%04x", ntohs(mask->eth.type));
270 switch (ntohs(key->eth.type)) {
271 case 0x0800:
272 case 0x86dd:
273 if (key->ip.proto || (mask && mask->ip.proto)) {
274 b += sprintf(b, " ip proto %2d", key->ip.proto);
275 if (mask && mask->ip.proto != 0xff) {
276 b += sprintf(b, "/0x%02x", mask->ip.proto);
279 if (key->ip.tos || (mask && mask->ip.tos)) {
280 b += sprintf(b, " ip tos %2d", key->ip.tos);
281 if (mask && mask->ip.tos != 0xff) {
282 b += sprintf(b, "/0x%02x", mask->ip.tos);
285 break;
287 switch (ntohs(key->eth.type)) {
288 case 0x0800:
289 if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) {
290 b += sprintf(b, " dst %s",
291 inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst));
292 if (mask) {
293 b += sprintf(b, "/%d",
294 of_dpa_mask2prefix(mask->ipv4.addr.dst));
297 break;
301 DPRINTF("%s\n", buf);
303 #else
304 #define of_dpa_flow_key_dump(k, m)
305 #endif
307 static void _of_dpa_flow_match(void *key, void *value, void *user_data)
309 OfDpaFlow *flow = value;
310 OfDpaFlowMatch *match = user_data;
311 uint64_t *k = (uint64_t *)&flow->key;
312 uint64_t *m = (uint64_t *)&flow->mask;
313 uint64_t *v = (uint64_t *)&match->value;
314 int i;
316 if (flow->key.tbl_id == match->value.tbl_id) {
317 of_dpa_flow_key_dump(&flow->key, &flow->mask);
320 if (flow->key.width > match->value.width) {
321 return;
324 for (i = 0; i < flow->key.width; i++, k++, m++, v++) {
325 if ((~*k & *m & *v) | (*k & *m & ~*v)) {
326 return;
330 DPRINTF("match\n");
332 if (!match->best ||
333 flow->priority > match->best->priority ||
334 flow->lpm > match->best->lpm) {
335 match->best = flow;
339 static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match)
341 DPRINTF("\nnew search\n");
342 of_dpa_flow_key_dump(&match->value, NULL);
344 g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match);
346 return match->best;
349 static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie)
351 return g_hash_table_lookup(of_dpa->flow_tbl, &cookie);
354 static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow)
356 g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow);
358 return ROCKER_OK;
361 static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow)
363 g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie);
366 static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie)
368 OfDpaFlow *flow;
369 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
371 flow = g_new0(OfDpaFlow, 1);
373 flow->cookie = cookie;
374 flow->mask.tbl_id = 0xffffffff;
376 flow->stats.install_time = flow->stats.refresh_time = now;
378 return flow;
381 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc)
383 OfDpaFlowPktFields *fields = &fc->fields;
385 fc->iov[0].iov_base = fields->ethhdr;
386 fc->iov[0].iov_len = sizeof(struct eth_header);
387 fc->iov[1].iov_base = fields->vlanhdr;
388 fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0;
391 static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc,
392 const struct iovec *iov, int iovcnt)
394 OfDpaFlowPktFields *fields = &fc->fields;
395 size_t sofar = 0;
396 int i;
398 sofar += sizeof(struct eth_header);
399 if (iov->iov_len < sofar) {
400 DPRINTF("flow_pkt_parse underrun on eth_header\n");
401 return;
404 fields->ethhdr = iov->iov_base;
405 fields->h_proto = &fields->ethhdr->h_proto;
407 if (ntohs(*fields->h_proto) == ETH_P_VLAN) {
408 sofar += sizeof(struct vlan_header);
409 if (iov->iov_len < sofar) {
410 DPRINTF("flow_pkt_parse underrun on vlan_header\n");
411 return;
413 fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1);
414 fields->h_proto = &fields->vlanhdr->h_proto;
417 switch (ntohs(*fields->h_proto)) {
418 case ETH_P_IP:
419 sofar += sizeof(struct ip_header);
420 if (iov->iov_len < sofar) {
421 DPRINTF("flow_pkt_parse underrun on ip_header\n");
422 return;
424 fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1);
425 break;
426 case ETH_P_IPV6:
427 sofar += sizeof(struct ip6_header);
428 if (iov->iov_len < sofar) {
429 DPRINTF("flow_pkt_parse underrun on ip6_header\n");
430 return;
432 fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1);
433 break;
436 /* To facilitate (potential) VLAN tag insertion, Make a
437 * copy of the iov and insert two new vectors at the
438 * beginning for eth hdr and vlan hdr. No data is copied,
439 * just the vectors.
442 of_dpa_flow_pkt_hdr_reset(fc);
444 fc->iov[2].iov_base = fields->h_proto + 1;
445 fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len;
447 for (i = 1; i < iovcnt; i++) {
448 fc->iov[i+2] = iov[i];
451 fc->iovcnt = iovcnt + 2;
454 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id)
456 OfDpaFlowPktFields *fields = &fc->fields;
457 uint16_t h_proto = fields->ethhdr->h_proto;
459 if (fields->vlanhdr) {
460 DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
461 return;
464 fields->ethhdr->h_proto = htons(ETH_P_VLAN);
465 fields->vlanhdr = &fc->vlanhdr;
466 fields->vlanhdr->h_tci = vlan_id;
467 fields->vlanhdr->h_proto = h_proto;
468 fields->h_proto = &fields->vlanhdr->h_proto;
470 fc->iov[1].iov_base = fields->vlanhdr;
471 fc->iov[1].iov_len = sizeof(struct vlan_header);
474 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc)
476 OfDpaFlowPktFields *fields = &fc->fields;
478 if (!fields->vlanhdr) {
479 return;
482 fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto);
483 fc->iov[1].iov_base = fields->h_proto;
484 fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto);
487 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc,
488 uint8_t *src_mac, uint8_t *dst_mac,
489 __be16 vlan_id)
491 OfDpaFlowPktFields *fields = &fc->fields;
493 if (src_mac || dst_mac) {
494 memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header));
495 if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) {
496 memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN);
498 if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) {
499 memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN);
501 fc->iov[0].iov_base = &fc->ethhdr_rewrite;
504 if (vlan_id && fields->vlanhdr) {
505 fc->vlanhdr_rewrite = fc->vlanhdr;
506 fc->vlanhdr_rewrite.h_tci = vlan_id;
507 fc->iov[1].iov_base = &fc->vlanhdr_rewrite;
511 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id);
513 static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc,
514 OfDpaFlowMatch *match)
516 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
517 match->value.in_pport = fc->in_pport;
518 match->value.width = FLOW_KEY_WIDTH(tbl_id);
521 static void of_dpa_ig_port_miss(OfDpaFlowContext *fc)
523 uint32_t port;
525 /* The default on miss is for packets from physical ports
526 * to go to the VLAN Flow Table. There is no default rule
527 * for packets from logical ports, which are dropped on miss.
530 if (fp_port_from_pport(fc->in_pport, &port)) {
531 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN);
535 static void of_dpa_vlan_build_match(OfDpaFlowContext *fc,
536 OfDpaFlowMatch *match)
538 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
539 match->value.in_pport = fc->in_pport;
540 if (fc->fields.vlanhdr) {
541 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
543 match->value.width = FLOW_KEY_WIDTH(eth.vlan_id);
546 static void of_dpa_vlan_insert(OfDpaFlowContext *fc,
547 OfDpaFlow *flow)
549 if (flow->action.apply.new_vlan_id) {
550 of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id);
554 static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc,
555 OfDpaFlowMatch *match)
557 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
558 match->value.in_pport = fc->in_pport;
559 match->value.eth.type = *fc->fields.h_proto;
560 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
561 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
562 sizeof(match->value.eth.dst.a));
563 match->value.width = FLOW_KEY_WIDTH(eth.type);
566 static void of_dpa_term_mac_miss(OfDpaFlowContext *fc)
568 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING);
571 static void of_dpa_apply_actions(OfDpaFlowContext *fc,
572 OfDpaFlow *flow)
574 fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu;
575 fc->action_set.apply.vlan_id = flow->key.eth.vlan_id;
578 static void of_dpa_bridging_build_match(OfDpaFlowContext *fc,
579 OfDpaFlowMatch *match)
581 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
582 if (fc->fields.vlanhdr) {
583 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
584 } else if (fc->tunnel_id) {
585 match->value.tunnel_id = fc->tunnel_id;
587 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
588 sizeof(match->value.eth.dst.a));
589 match->value.width = FLOW_KEY_WIDTH(eth.dst);
592 static void of_dpa_bridging_learn(OfDpaFlowContext *fc,
593 OfDpaFlow *dst_flow)
595 OfDpaFlowMatch match = { { 0, }, };
596 OfDpaFlow *flow;
597 uint8_t *addr;
598 uint16_t vlan_id;
599 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
600 int64_t refresh_delay = 1;
602 /* Do a lookup in bridge table by src_mac/vlan */
604 addr = fc->fields.ethhdr->h_source;
605 vlan_id = fc->fields.vlanhdr->h_tci;
607 match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
608 match.value.eth.vlan_id = vlan_id;
609 memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a));
610 match.value.width = FLOW_KEY_WIDTH(eth.dst);
612 flow = of_dpa_flow_match(fc->of_dpa, &match);
613 if (flow) {
614 if (!memcmp(flow->mask.eth.dst.a, ff_mac.a,
615 sizeof(flow->mask.eth.dst.a))) {
616 /* src_mac/vlan already learned; if in_port and out_port
617 * don't match, the end station has moved and the port
618 * needs updating */
619 /* XXX implement the in_port/out_port check */
620 if (now - flow->stats.refresh_time < refresh_delay) {
621 return;
623 flow->stats.refresh_time = now;
627 /* Let driver know about mac/vlan. This may be a new mac/vlan
628 * or a refresh of existing mac/vlan that's been hit after the
629 * refresh_delay.
632 rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world),
633 fc->in_pport, addr, vlan_id);
636 static void of_dpa_bridging_miss(OfDpaFlowContext *fc)
638 of_dpa_bridging_learn(fc, NULL);
639 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
642 static void of_dpa_bridging_action_write(OfDpaFlowContext *fc,
643 OfDpaFlow *flow)
645 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
646 fc->action_set.write.group_id = flow->action.write.group_id;
648 fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport;
651 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc,
652 OfDpaFlowMatch *match)
654 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
655 match->value.eth.type = *fc->fields.h_proto;
656 if (fc->fields.ipv4hdr) {
657 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
659 if (fc->fields.ipv6_dst_addr) {
660 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
661 sizeof(match->value.ipv6.addr.dst));
663 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
666 static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc)
668 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
671 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc,
672 OfDpaFlow *flow)
674 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
675 fc->action_set.write.group_id = flow->action.write.group_id;
679 static void
680 of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc,
681 OfDpaFlowMatch *match)
683 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
684 match->value.eth.type = *fc->fields.h_proto;
685 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
686 if (fc->fields.ipv4hdr) {
687 match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src;
688 match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst;
690 if (fc->fields.ipv6_src_addr) {
691 memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr,
692 sizeof(match->value.ipv6.addr.src));
694 if (fc->fields.ipv6_dst_addr) {
695 memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr,
696 sizeof(match->value.ipv6.addr.dst));
698 match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst);
701 static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc)
703 of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY);
706 static void
707 of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc,
708 OfDpaFlow *flow)
710 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
711 fc->action_set.write.group_id = flow->action.write.group_id;
713 fc->action_set.write.vlan_id = flow->action.write.vlan_id;
716 static void of_dpa_acl_build_match(OfDpaFlowContext *fc,
717 OfDpaFlowMatch *match)
719 match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
720 match->value.in_pport = fc->in_pport;
721 memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source,
722 sizeof(match->value.eth.src.a));
723 memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest,
724 sizeof(match->value.eth.dst.a));
725 match->value.eth.type = *fc->fields.h_proto;
726 match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci;
727 match->value.width = FLOW_KEY_WIDTH(eth.type);
728 if (fc->fields.ipv4hdr) {
729 match->value.ip.proto = fc->fields.ipv4hdr->ip_p;
730 match->value.ip.tos = fc->fields.ipv4hdr->ip_tos;
731 match->value.width = FLOW_KEY_WIDTH(ip.tos);
732 } else if (fc->fields.ipv6hdr) {
733 match->value.ip.proto =
734 fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt;
735 match->value.ip.tos = 0; /* XXX what goes here? */
736 match->value.width = FLOW_KEY_WIDTH(ip.tos);
740 static void of_dpa_eg(OfDpaFlowContext *fc);
741 static void of_dpa_acl_hit(OfDpaFlowContext *fc,
742 OfDpaFlow *dst_flow)
744 of_dpa_eg(fc);
747 static void of_dpa_acl_action_write(OfDpaFlowContext *fc,
748 OfDpaFlow *flow)
750 if (flow->action.write.group_id != ROCKER_GROUP_NONE) {
751 fc->action_set.write.group_id = flow->action.write.group_id;
755 static void of_dpa_drop(OfDpaFlowContext *fc)
757 /* drop packet */
760 static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa,
761 uint32_t group_id)
763 return g_hash_table_lookup(of_dpa->group_tbl, &group_id);
766 static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group)
768 g_hash_table_insert(of_dpa->group_tbl, &group->id, group);
770 return 0;
773 #if 0
774 static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group)
776 OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id);
778 if (!old_group) {
779 return -ENOENT;
782 /* XXX */
784 return 0;
786 #endif
788 static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group)
790 g_hash_table_remove(of_dpa->group_tbl, &group->id);
792 return 0;
795 #if 0
796 static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id)
798 OfDpaGroup *group = of_dpa_group_find(of_dpa, id);
800 if (!group) {
801 return -ENOENT;
804 /* XXX get/return stats */
806 return 0;
808 #endif
810 static OfDpaGroup *of_dpa_group_alloc(uint32_t id)
812 OfDpaGroup *group = g_new0(OfDpaGroup, 1);
814 group->id = id;
816 return group;
819 static void of_dpa_output_l2_interface(OfDpaFlowContext *fc,
820 OfDpaGroup *group)
822 uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu;
824 if (group->l2_interface.pop_vlan) {
825 of_dpa_flow_pkt_strip_vlan(fc);
828 /* Note: By default, and as per the OpenFlow 1.3.1
829 * specification, a packet cannot be forwarded back
830 * to the IN_PORT from which it came in. An action
831 * bucket that specifies the particular packet's
832 * egress port is not evaluated.
835 if (group->l2_interface.out_pport == 0) {
836 rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt,
837 copy_to_cpu);
838 } else if (group->l2_interface.out_pport != fc->in_pport) {
839 rocker_port_eg(world_rocker(fc->of_dpa->world),
840 group->l2_interface.out_pport,
841 fc->iov, fc->iovcnt);
845 static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc,
846 OfDpaGroup *group)
848 OfDpaGroup *l2_group =
849 of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id);
851 if (!l2_group) {
852 return;
855 of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a,
856 group->l2_rewrite.dst_mac.a,
857 group->l2_rewrite.vlan_id);
858 of_dpa_output_l2_interface(fc, l2_group);
861 static void of_dpa_output_l2_flood(OfDpaFlowContext *fc,
862 OfDpaGroup *group)
864 OfDpaGroup *l2_group;
865 int i;
867 for (i = 0; i < group->l2_flood.group_count; i++) {
868 of_dpa_flow_pkt_hdr_reset(fc);
869 l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]);
870 if (!l2_group) {
871 continue;
873 switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) {
874 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
875 of_dpa_output_l2_interface(fc, l2_group);
876 break;
877 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
878 of_dpa_output_l2_rewrite(fc, l2_group);
879 break;
884 static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group)
886 OfDpaGroup *l2_group =
887 of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id);
889 if (!l2_group) {
890 return;
893 of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a,
894 group->l3_unicast.dst_mac.a,
895 group->l3_unicast.vlan_id);
896 /* XXX need ttl_check */
897 of_dpa_output_l2_interface(fc, l2_group);
900 static void of_dpa_eg(OfDpaFlowContext *fc)
902 OfDpaFlowAction *set = &fc->action_set;
903 OfDpaGroup *group;
904 uint32_t group_id;
906 /* send a copy of pkt to CPU (controller)? */
908 if (set->apply.copy_to_cpu) {
909 group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0);
910 group = of_dpa_group_find(fc->of_dpa, group_id);
911 if (group) {
912 of_dpa_output_l2_interface(fc, group);
913 of_dpa_flow_pkt_hdr_reset(fc);
917 /* process group write actions */
919 if (!set->write.group_id) {
920 return;
923 group = of_dpa_group_find(fc->of_dpa, set->write.group_id);
924 if (!group) {
925 return;
928 switch (ROCKER_GROUP_TYPE_GET(group->id)) {
929 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
930 of_dpa_output_l2_interface(fc, group);
931 break;
932 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
933 of_dpa_output_l2_rewrite(fc, group);
934 break;
935 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
936 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
937 of_dpa_output_l2_flood(fc, group);
938 break;
939 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
940 of_dpa_output_l3_unicast(fc, group);
941 break;
945 typedef struct of_dpa_flow_tbl_ops {
946 void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match);
947 void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow);
948 void (*miss)(OfDpaFlowContext *fc);
949 void (*hit_no_goto)(OfDpaFlowContext *fc);
950 void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow);
951 void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow);
952 } OfDpaFlowTblOps;
954 static OfDpaFlowTblOps of_dpa_tbl_ops[] = {
955 [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = {
956 .build_match = of_dpa_ig_port_build_match,
957 .miss = of_dpa_ig_port_miss,
958 .hit_no_goto = of_dpa_drop,
960 [ROCKER_OF_DPA_TABLE_ID_VLAN] = {
961 .build_match = of_dpa_vlan_build_match,
962 .hit_no_goto = of_dpa_drop,
963 .action_apply = of_dpa_vlan_insert,
965 [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = {
966 .build_match = of_dpa_term_mac_build_match,
967 .miss = of_dpa_term_mac_miss,
968 .hit_no_goto = of_dpa_drop,
969 .action_apply = of_dpa_apply_actions,
971 [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = {
972 .build_match = of_dpa_bridging_build_match,
973 .hit = of_dpa_bridging_learn,
974 .miss = of_dpa_bridging_miss,
975 .hit_no_goto = of_dpa_drop,
976 .action_apply = of_dpa_apply_actions,
977 .action_write = of_dpa_bridging_action_write,
979 [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = {
980 .build_match = of_dpa_unicast_routing_build_match,
981 .miss = of_dpa_unicast_routing_miss,
982 .hit_no_goto = of_dpa_drop,
983 .action_write = of_dpa_unicast_routing_action_write,
985 [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = {
986 .build_match = of_dpa_multicast_routing_build_match,
987 .miss = of_dpa_multicast_routing_miss,
988 .hit_no_goto = of_dpa_drop,
989 .action_write = of_dpa_multicast_routing_action_write,
991 [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = {
992 .build_match = of_dpa_acl_build_match,
993 .hit = of_dpa_acl_hit,
994 .miss = of_dpa_eg,
995 .action_apply = of_dpa_apply_actions,
996 .action_write = of_dpa_acl_action_write,
1000 static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id)
1002 OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id];
1003 OfDpaFlowMatch match = { { 0, }, };
1004 OfDpaFlow *flow;
1006 if (ops->build_match) {
1007 ops->build_match(fc, &match);
1008 } else {
1009 return;
1012 flow = of_dpa_flow_match(fc->of_dpa, &match);
1013 if (!flow) {
1014 if (ops->miss) {
1015 ops->miss(fc);
1017 return;
1020 flow->stats.hits++;
1022 if (ops->action_apply) {
1023 ops->action_apply(fc, flow);
1026 if (ops->action_write) {
1027 ops->action_write(fc, flow);
1030 if (ops->hit) {
1031 ops->hit(fc, flow);
1034 if (flow->action.goto_tbl) {
1035 of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl);
1036 } else if (ops->hit_no_goto) {
1037 ops->hit_no_goto(fc);
1040 /* drop packet */
1043 static ssize_t of_dpa_ig(World *world, uint32_t pport,
1044 const struct iovec *iov, int iovcnt)
1046 struct iovec iov_copy[iovcnt + 2];
1047 OfDpaFlowContext fc = {
1048 .of_dpa = world_private(world),
1049 .in_pport = pport,
1050 .iov = iov_copy,
1051 .iovcnt = iovcnt + 2,
1054 of_dpa_flow_pkt_parse(&fc, iov, iovcnt);
1055 of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT);
1057 return iov_size(iov, iovcnt);
1060 #define ROCKER_TUNNEL_LPORT 0x00010000
1062 static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1064 OfDpaFlowKey *key = &flow->key;
1065 OfDpaFlowKey *mask = &flow->mask;
1066 OfDpaFlowAction *action = &flow->action;
1067 bool overlay_tunnel;
1069 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1070 !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1071 return -ROCKER_EINVAL;
1074 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
1075 key->width = FLOW_KEY_WIDTH(tbl_id);
1077 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1078 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1079 mask->in_pport =
1080 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1083 overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT);
1085 action->goto_tbl =
1086 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1088 if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) {
1089 return -ROCKER_EINVAL;
1092 if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) {
1093 return -ROCKER_EINVAL;
1096 return ROCKER_OK;
1099 static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1101 OfDpaFlowKey *key = &flow->key;
1102 OfDpaFlowKey *mask = &flow->mask;
1103 OfDpaFlowAction *action = &flow->action;
1104 uint32_t port;
1105 bool untagged;
1107 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1108 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1109 DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1110 return -ROCKER_EINVAL;
1113 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
1114 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1116 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1117 if (!fp_port_from_pport(key->in_pport, &port)) {
1118 DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport);
1119 return -ROCKER_EINVAL;
1121 mask->in_pport = 0xffffffff;
1123 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1125 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1126 mask->eth.vlan_id =
1127 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1130 if (key->eth.vlan_id) {
1131 untagged = false; /* filtering */
1132 } else {
1133 untagged = true;
1136 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1137 action->goto_tbl =
1138 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1139 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1140 DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl);
1141 return -ROCKER_EINVAL;
1145 if (untagged) {
1146 if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) {
1147 DPRINTF("Must specify new vlan_id if untagged\n");
1148 return -ROCKER_EINVAL;
1150 action->apply.new_vlan_id =
1151 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]);
1152 if (1 > ntohs(action->apply.new_vlan_id) ||
1153 ntohs(action->apply.new_vlan_id) > 4095) {
1154 DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1155 ntohs(action->apply.new_vlan_id));
1156 return -ROCKER_EINVAL;
1160 return ROCKER_OK;
1163 static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1165 OfDpaFlowKey *key = &flow->key;
1166 OfDpaFlowKey *mask = &flow->mask;
1167 OfDpaFlowAction *action = &flow->action;
1168 const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1169 const MACAddr ipv4_mask = { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1170 const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1171 const MACAddr ipv6_mask = { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1172 uint32_t port;
1173 bool unicast = false;
1174 bool multicast = false;
1176 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1177 !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] ||
1178 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1179 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] ||
1180 !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] ||
1181 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] ||
1182 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1183 return -ROCKER_EINVAL;
1186 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1187 key->width = FLOW_KEY_WIDTH(eth.type);
1189 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1190 if (!fp_port_from_pport(key->in_pport, &port)) {
1191 return -ROCKER_EINVAL;
1193 mask->in_pport =
1194 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1196 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1197 if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) {
1198 return -ROCKER_EINVAL;
1200 mask->eth.type = htons(0xffff);
1202 memcpy(key->eth.dst.a,
1203 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1204 sizeof(key->eth.dst.a));
1205 memcpy(mask->eth.dst.a,
1206 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1207 sizeof(mask->eth.dst.a));
1209 if ((key->eth.dst.a[0] & 0x01) == 0x00) {
1210 unicast = true;
1213 /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1214 if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1215 memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1216 multicast = true;
1218 if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 &&
1219 memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) {
1220 multicast = true;
1223 if (!unicast && !multicast) {
1224 return -ROCKER_EINVAL;
1227 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1228 mask->eth.vlan_id =
1229 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1231 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1232 action->goto_tbl =
1233 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1235 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING &&
1236 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1237 return -ROCKER_EINVAL;
1240 if (unicast &&
1241 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) {
1242 return -ROCKER_EINVAL;
1245 if (multicast &&
1246 action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) {
1247 return -ROCKER_EINVAL;
1251 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1252 action->apply.copy_to_cpu =
1253 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1256 return ROCKER_OK;
1259 static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1261 OfDpaFlowKey *key = &flow->key;
1262 OfDpaFlowKey *mask = &flow->mask;
1263 OfDpaFlowAction *action = &flow->action;
1264 bool unicast = false;
1265 bool dst_mac = false;
1266 bool dst_mac_mask = false;
1267 enum {
1268 BRIDGING_MODE_UNKNOWN,
1269 BRIDGING_MODE_VLAN_UCAST,
1270 BRIDGING_MODE_VLAN_MCAST,
1271 BRIDGING_MODE_VLAN_DFLT,
1272 BRIDGING_MODE_TUNNEL_UCAST,
1273 BRIDGING_MODE_TUNNEL_MCAST,
1274 BRIDGING_MODE_TUNNEL_DFLT,
1275 } mode = BRIDGING_MODE_UNKNOWN;
1277 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1279 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1280 key->eth.vlan_id =
1281 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1282 mask->eth.vlan_id = 0xffff;
1283 key->width = FLOW_KEY_WIDTH(eth.vlan_id);
1286 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1287 key->tunnel_id =
1288 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]);
1289 mask->tunnel_id = 0xffffffff;
1290 key->width = FLOW_KEY_WIDTH(tunnel_id);
1293 /* can't do VLAN bridging and tunnel bridging at same time */
1294 if (key->eth.vlan_id && key->tunnel_id) {
1295 DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1296 return -ROCKER_EINVAL;
1299 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1300 memcpy(key->eth.dst.a,
1301 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1302 sizeof(key->eth.dst.a));
1303 key->width = FLOW_KEY_WIDTH(eth.dst);
1304 dst_mac = true;
1305 unicast = (key->eth.dst.a[0] & 0x01) == 0x00;
1308 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1309 memcpy(mask->eth.dst.a,
1310 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1311 sizeof(mask->eth.dst.a));
1312 key->width = FLOW_KEY_WIDTH(eth.dst);
1313 dst_mac_mask = true;
1314 } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1315 memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a));
1318 if (key->eth.vlan_id) {
1319 if (dst_mac && !dst_mac_mask) {
1320 mode = unicast ? BRIDGING_MODE_VLAN_UCAST :
1321 BRIDGING_MODE_VLAN_MCAST;
1322 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1323 mode = BRIDGING_MODE_VLAN_DFLT;
1325 } else if (key->tunnel_id) {
1326 if (dst_mac && !dst_mac_mask) {
1327 mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST :
1328 BRIDGING_MODE_TUNNEL_MCAST;
1329 } else if ((dst_mac && dst_mac_mask) || !dst_mac) {
1330 mode = BRIDGING_MODE_TUNNEL_DFLT;
1334 if (mode == BRIDGING_MODE_UNKNOWN) {
1335 DPRINTF("Unknown bridging mode\n");
1336 return -ROCKER_EINVAL;
1339 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1340 action->goto_tbl =
1341 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1342 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1343 DPRINTF("Briding goto tbl must be ACL policy\n");
1344 return -ROCKER_EINVAL;
1348 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1349 action->write.group_id =
1350 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1351 switch (mode) {
1352 case BRIDGING_MODE_VLAN_UCAST:
1353 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1354 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1355 DPRINTF("Bridging mode vlan ucast needs L2 "
1356 "interface group (0x%08x)\n",
1357 action->write.group_id);
1358 return -ROCKER_EINVAL;
1360 break;
1361 case BRIDGING_MODE_VLAN_MCAST:
1362 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1363 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) {
1364 DPRINTF("Bridging mode vlan mcast needs L2 "
1365 "mcast group (0x%08x)\n",
1366 action->write.group_id);
1367 return -ROCKER_EINVAL;
1369 break;
1370 case BRIDGING_MODE_VLAN_DFLT:
1371 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1372 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) {
1373 DPRINTF("Bridging mode vlan dflt needs L2 "
1374 "flood group (0x%08x)\n",
1375 action->write.group_id);
1376 return -ROCKER_EINVAL;
1378 break;
1379 case BRIDGING_MODE_TUNNEL_MCAST:
1380 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1381 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1382 DPRINTF("Bridging mode tunnel mcast needs L2 "
1383 "overlay group (0x%08x)\n",
1384 action->write.group_id);
1385 return -ROCKER_EINVAL;
1387 break;
1388 case BRIDGING_MODE_TUNNEL_DFLT:
1389 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1390 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) {
1391 DPRINTF("Bridging mode tunnel dflt needs L2 "
1392 "overlay group (0x%08x)\n",
1393 action->write.group_id);
1394 return -ROCKER_EINVAL;
1396 break;
1397 default:
1398 return -ROCKER_EINVAL;
1402 if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) {
1403 action->write.tun_log_lport =
1404 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]);
1405 if (mode != BRIDGING_MODE_TUNNEL_UCAST) {
1406 DPRINTF("Have tunnel logical port but not "
1407 "in bridging tunnel mode\n");
1408 return -ROCKER_EINVAL;
1412 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1413 action->apply.copy_to_cpu =
1414 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1417 return ROCKER_OK;
1420 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow,
1421 RockerTlv **flow_tlvs)
1423 OfDpaFlowKey *key = &flow->key;
1424 OfDpaFlowKey *mask = &flow->mask;
1425 OfDpaFlowAction *action = &flow->action;
1426 enum {
1427 UNICAST_ROUTING_MODE_UNKNOWN,
1428 UNICAST_ROUTING_MODE_IPV4,
1429 UNICAST_ROUTING_MODE_IPV6,
1430 } mode = UNICAST_ROUTING_MODE_UNKNOWN;
1431 uint8_t type;
1433 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1434 return -ROCKER_EINVAL;
1437 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1438 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1440 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1441 switch (ntohs(key->eth.type)) {
1442 case 0x0800:
1443 mode = UNICAST_ROUTING_MODE_IPV4;
1444 break;
1445 case 0x86dd:
1446 mode = UNICAST_ROUTING_MODE_IPV6;
1447 break;
1448 default:
1449 return -ROCKER_EINVAL;
1451 mask->eth.type = htons(0xffff);
1453 switch (mode) {
1454 case UNICAST_ROUTING_MODE_IPV4:
1455 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1456 return -ROCKER_EINVAL;
1458 key->ipv4.addr.dst =
1459 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1460 if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1461 return -ROCKER_EINVAL;
1463 flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff));
1464 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) {
1465 mask->ipv4.addr.dst =
1466 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]);
1467 flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst);
1469 break;
1470 case UNICAST_ROUTING_MODE_IPV6:
1471 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1472 return -ROCKER_EINVAL;
1474 memcpy(&key->ipv6.addr.dst,
1475 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1476 sizeof(key->ipv6.addr.dst));
1477 if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1478 return -ROCKER_EINVAL;
1480 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) {
1481 memcpy(&mask->ipv6.addr.dst,
1482 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]),
1483 sizeof(mask->ipv6.addr.dst));
1485 break;
1486 default:
1487 return -ROCKER_EINVAL;
1490 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1491 action->goto_tbl =
1492 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1493 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1494 return -ROCKER_EINVAL;
1498 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1499 action->write.group_id =
1500 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1501 type = ROCKER_GROUP_TYPE_GET(action->write.group_id);
1502 if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE &&
1503 type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST &&
1504 type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) {
1505 return -ROCKER_EINVAL;
1509 return ROCKER_OK;
1512 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow,
1513 RockerTlv **flow_tlvs)
1515 OfDpaFlowKey *key = &flow->key;
1516 OfDpaFlowKey *mask = &flow->mask;
1517 OfDpaFlowAction *action = &flow->action;
1518 enum {
1519 MULTICAST_ROUTING_MODE_UNKNOWN,
1520 MULTICAST_ROUTING_MODE_IPV4,
1521 MULTICAST_ROUTING_MODE_IPV6,
1522 } mode = MULTICAST_ROUTING_MODE_UNKNOWN;
1524 if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] ||
1525 !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1526 return -ROCKER_EINVAL;
1529 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
1530 key->width = FLOW_KEY_WIDTH(ipv6.addr.dst);
1532 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1533 switch (ntohs(key->eth.type)) {
1534 case 0x0800:
1535 mode = MULTICAST_ROUTING_MODE_IPV4;
1536 break;
1537 case 0x86dd:
1538 mode = MULTICAST_ROUTING_MODE_IPV6;
1539 break;
1540 default:
1541 return -ROCKER_EINVAL;
1544 key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1546 switch (mode) {
1547 case MULTICAST_ROUTING_MODE_IPV4:
1549 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1550 key->ipv4.addr.src =
1551 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]);
1554 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) {
1555 mask->ipv4.addr.src =
1556 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]);
1559 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) {
1560 if (mask->ipv4.addr.src != 0) {
1561 return -ROCKER_EINVAL;
1565 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) {
1566 return -ROCKER_EINVAL;
1569 key->ipv4.addr.dst =
1570 rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]);
1571 if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) {
1572 return -ROCKER_EINVAL;
1575 break;
1577 case MULTICAST_ROUTING_MODE_IPV6:
1579 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1580 memcpy(&key->ipv6.addr.src,
1581 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]),
1582 sizeof(key->ipv6.addr.src));
1585 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) {
1586 memcpy(&mask->ipv6.addr.src,
1587 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]),
1588 sizeof(mask->ipv6.addr.src));
1591 if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) {
1592 if (mask->ipv6.addr.src.addr32[0] != 0 &&
1593 mask->ipv6.addr.src.addr32[1] != 0 &&
1594 mask->ipv6.addr.src.addr32[2] != 0 &&
1595 mask->ipv6.addr.src.addr32[3] != 0) {
1596 return -ROCKER_EINVAL;
1600 if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) {
1601 return -ROCKER_EINVAL;
1604 memcpy(&key->ipv6.addr.dst,
1605 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]),
1606 sizeof(key->ipv6.addr.dst));
1607 if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) {
1608 return -ROCKER_EINVAL;
1611 break;
1613 default:
1614 return -ROCKER_EINVAL;
1617 if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) {
1618 action->goto_tbl =
1619 rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]);
1620 if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) {
1621 return -ROCKER_EINVAL;
1625 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1626 action->write.group_id =
1627 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1628 if (ROCKER_GROUP_TYPE_GET(action->write.group_id) !=
1629 ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) {
1630 return -ROCKER_EINVAL;
1632 action->write.vlan_id = key->eth.vlan_id;
1635 return ROCKER_OK;
1638 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask,
1639 RockerTlv **flow_tlvs)
1641 key->width = FLOW_KEY_WIDTH(ip.tos);
1643 key->ip.proto = 0;
1644 key->ip.tos = 0;
1645 mask->ip.proto = 0;
1646 mask->ip.tos = 0;
1648 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) {
1649 key->ip.proto =
1650 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]);
1652 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) {
1653 mask->ip.proto =
1654 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]);
1656 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) {
1657 key->ip.tos =
1658 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]);
1660 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) {
1661 mask->ip.tos =
1662 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]);
1664 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) {
1665 key->ip.tos |=
1666 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6;
1668 if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) {
1669 mask->ip.tos |=
1670 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6;
1673 return ROCKER_OK;
1676 static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs)
1678 OfDpaFlowKey *key = &flow->key;
1679 OfDpaFlowKey *mask = &flow->mask;
1680 OfDpaFlowAction *action = &flow->action;
1681 enum {
1682 ACL_MODE_UNKNOWN,
1683 ACL_MODE_IPV4_VLAN,
1684 ACL_MODE_IPV6_VLAN,
1685 ACL_MODE_IPV4_TENANT,
1686 ACL_MODE_IPV6_TENANT,
1687 ACL_MODE_NON_IP_VLAN,
1688 ACL_MODE_NON_IP_TENANT,
1689 ACL_MODE_ANY_VLAN,
1690 ACL_MODE_ANY_TENANT,
1691 } mode = ACL_MODE_UNKNOWN;
1692 int err = ROCKER_OK;
1694 if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] ||
1695 !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) {
1696 return -ROCKER_EINVAL;
1699 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] &&
1700 flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) {
1701 return -ROCKER_EINVAL;
1704 key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1705 key->width = FLOW_KEY_WIDTH(eth.type);
1707 key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]);
1708 if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) {
1709 mask->in_pport =
1710 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]);
1713 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1714 memcpy(key->eth.src.a,
1715 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1716 sizeof(key->eth.src.a));
1719 if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) {
1720 memcpy(mask->eth.src.a,
1721 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]),
1722 sizeof(mask->eth.src.a));
1725 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1726 memcpy(key->eth.dst.a,
1727 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
1728 sizeof(key->eth.dst.a));
1731 if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) {
1732 memcpy(mask->eth.dst.a,
1733 rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]),
1734 sizeof(mask->eth.dst.a));
1737 key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]);
1738 if (key->eth.type) {
1739 mask->eth.type = 0xffff;
1742 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
1743 key->eth.vlan_id =
1744 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
1747 if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) {
1748 mask->eth.vlan_id =
1749 rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]);
1752 switch (ntohs(key->eth.type)) {
1753 case 0x0000:
1754 mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT;
1755 break;
1756 case 0x0800:
1757 mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT;
1758 break;
1759 case 0x86dd:
1760 mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT;
1761 break;
1762 default:
1763 mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN :
1764 ACL_MODE_NON_IP_TENANT;
1765 break;
1768 /* XXX only supporting VLAN modes for now */
1769 if (mode != ACL_MODE_IPV4_VLAN &&
1770 mode != ACL_MODE_IPV6_VLAN &&
1771 mode != ACL_MODE_NON_IP_VLAN &&
1772 mode != ACL_MODE_ANY_VLAN) {
1773 return -ROCKER_EINVAL;
1776 switch (ntohs(key->eth.type)) {
1777 case 0x0800:
1778 case 0x86dd:
1779 err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs);
1780 break;
1783 if (err) {
1784 return err;
1787 if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
1788 action->write.group_id =
1789 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
1792 if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) {
1793 action->apply.copy_to_cpu =
1794 rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]);
1797 return ROCKER_OK;
1800 static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow,
1801 RockerTlv **flow_tlvs)
1803 enum rocker_of_dpa_table_id tbl;
1804 int err = ROCKER_OK;
1806 if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] ||
1807 !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] ||
1808 !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) {
1809 return -ROCKER_EINVAL;
1812 tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]);
1813 flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]);
1814 flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]);
1816 if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) {
1817 if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT ||
1818 tbl == ROCKER_OF_DPA_TABLE_ID_VLAN ||
1819 tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) {
1820 return -ROCKER_EINVAL;
1822 flow->idletime =
1823 rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]);
1826 switch (tbl) {
1827 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1828 err = of_dpa_cmd_add_ig_port(flow, flow_tlvs);
1829 break;
1830 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1831 err = of_dpa_cmd_add_vlan(flow, flow_tlvs);
1832 break;
1833 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1834 err = of_dpa_cmd_add_term_mac(flow, flow_tlvs);
1835 break;
1836 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1837 err = of_dpa_cmd_add_bridging(flow, flow_tlvs);
1838 break;
1839 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1840 err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs);
1841 break;
1842 case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING:
1843 err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs);
1844 break;
1845 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1846 err = of_dpa_cmd_add_acl(flow, flow_tlvs);
1847 break;
1850 return err;
1853 static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie,
1854 RockerTlv **flow_tlvs)
1856 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1857 int err = ROCKER_OK;
1859 if (flow) {
1860 return -ROCKER_EEXIST;
1863 flow = of_dpa_flow_alloc(cookie);
1865 err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1866 if (err) {
1867 g_free(flow);
1868 return err;
1871 return of_dpa_flow_add(of_dpa, flow);
1874 static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie,
1875 RockerTlv **flow_tlvs)
1877 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1879 if (!flow) {
1880 return -ROCKER_ENOENT;
1883 return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
1886 static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie)
1888 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1890 if (!flow) {
1891 return -ROCKER_ENOENT;
1894 of_dpa_flow_del(of_dpa, flow);
1896 return ROCKER_OK;
1899 static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie,
1900 struct desc_info *info, char *buf)
1902 OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie);
1903 size_t tlv_size;
1904 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
1905 int pos;
1907 if (!flow) {
1908 return -ROCKER_ENOENT;
1911 tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) + /* duration */
1912 rocker_tlv_total_size(sizeof(uint64_t)) + /* rx_pkts */
1913 rocker_tlv_total_size(sizeof(uint64_t)); /* tx_ptks */
1915 if (tlv_size > desc_buf_size(info)) {
1916 return -ROCKER_EMSGSIZE;
1919 pos = 0;
1920 rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,
1921 (int32_t)(now - flow->stats.install_time));
1922 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,
1923 flow->stats.rx_pkts);
1924 rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,
1925 flow->stats.tx_pkts);
1927 return desc_set_buf(info, tlv_size);
1930 static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info,
1931 char *buf, uint16_t cmd,
1932 RockerTlv **flow_tlvs)
1934 uint64_t cookie;
1936 if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) {
1937 return -ROCKER_EINVAL;
1940 cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]);
1942 switch (cmd) {
1943 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
1944 return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs);
1945 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
1946 return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs);
1947 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
1948 return of_dpa_cmd_flow_del(of_dpa, cookie);
1949 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
1950 return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf);
1953 return -ROCKER_ENOTSUP;
1956 static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group,
1957 RockerTlv **group_tlvs)
1959 if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] ||
1960 !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) {
1961 return -ROCKER_EINVAL;
1964 group->l2_interface.out_pport =
1965 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]);
1966 group->l2_interface.pop_vlan =
1967 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]);
1969 return ROCKER_OK;
1972 static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group,
1973 RockerTlv **group_tlvs)
1975 OfDpaGroup *l2_interface_group;
1977 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
1978 return -ROCKER_EINVAL;
1981 group->l2_rewrite.group_id =
1982 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
1984 l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id);
1985 if (!l2_interface_group ||
1986 ROCKER_GROUP_TYPE_GET(l2_interface_group->id) !=
1987 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) {
1988 DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1989 return -ROCKER_EINVAL;
1992 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
1993 memcpy(group->l2_rewrite.src_mac.a,
1994 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
1995 sizeof(group->l2_rewrite.src_mac.a));
1998 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
1999 memcpy(group->l2_rewrite.dst_mac.a,
2000 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2001 sizeof(group->l2_rewrite.dst_mac.a));
2004 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2005 group->l2_rewrite.vlan_id =
2006 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2007 if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) !=
2008 (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) {
2009 DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2010 return -ROCKER_EINVAL;
2014 return ROCKER_OK;
2017 static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group,
2018 RockerTlv **group_tlvs)
2020 OfDpaGroup *l2_group;
2021 RockerTlv **tlvs;
2022 int err;
2023 int i;
2025 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] ||
2026 !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) {
2027 return -ROCKER_EINVAL;
2030 group->l2_flood.group_count =
2031 rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
2033 tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
2035 g_free(group->l2_flood.group_ids);
2036 group->l2_flood.group_ids =
2037 g_new0(uint32_t, group->l2_flood.group_count);
2039 rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
2040 group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
2042 for (i = 0; i < group->l2_flood.group_count; i++) {
2043 group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]);
2046 /* All of the L2 interface groups referenced by the L2 flood
2047 * must have same VLAN
2050 for (i = 0; i < group->l2_flood.group_count; i++) {
2051 l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]);
2052 if (!l2_group) {
2053 continue;
2055 if ((ROCKER_GROUP_TYPE_GET(l2_group->id) ==
2056 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) &&
2057 (ROCKER_GROUP_VLAN_GET(l2_group->id) !=
2058 ROCKER_GROUP_VLAN_GET(group->id))) {
2059 DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2060 "flood group 0x%08x\n",
2061 group->l2_flood.group_ids[i], group->id);
2062 err = -ROCKER_EINVAL;
2063 goto err_out;
2067 g_free(tlvs);
2068 return ROCKER_OK;
2070 err_out:
2071 group->l2_flood.group_count = 0;
2072 g_free(group->l2_flood.group_ids);
2073 g_free(tlvs);
2075 return err;
2078 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs)
2080 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) {
2081 return -ROCKER_EINVAL;
2084 group->l3_unicast.group_id =
2085 rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]);
2087 if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) {
2088 memcpy(group->l3_unicast.src_mac.a,
2089 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]),
2090 sizeof(group->l3_unicast.src_mac.a));
2093 if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) {
2094 memcpy(group->l3_unicast.dst_mac.a,
2095 rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]),
2096 sizeof(group->l3_unicast.dst_mac.a));
2099 if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) {
2100 group->l3_unicast.vlan_id =
2101 rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]);
2104 if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) {
2105 group->l3_unicast.ttl_check =
2106 rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]);
2109 return ROCKER_OK;
2112 static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id,
2113 OfDpaGroup *group, RockerTlv **group_tlvs)
2115 uint8_t type = ROCKER_GROUP_TYPE_GET(group_id);
2117 switch (type) {
2118 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2119 return of_dpa_cmd_add_l2_interface(group, group_tlvs);
2120 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2121 return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs);
2122 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2123 /* Treat L2 multicast group same as a L2 flood group */
2124 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2125 return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs);
2126 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2127 return of_dpa_cmd_add_l3_unicast(group, group_tlvs);
2130 return -ROCKER_ENOTSUP;
2133 static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
2134 RockerTlv **group_tlvs)
2136 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2137 int err;
2139 if (group) {
2140 return -ROCKER_EEXIST;
2143 group = of_dpa_group_alloc(group_id);
2145 err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2146 if (err) {
2147 goto err_cmd_add;
2150 err = of_dpa_group_add(of_dpa, group);
2151 if (err) {
2152 goto err_cmd_add;
2155 return ROCKER_OK;
2157 err_cmd_add:
2158 g_free(group);
2159 return err;
2162 static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id,
2163 RockerTlv **group_tlvs)
2165 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2167 if (!group) {
2168 return -ROCKER_ENOENT;
2171 return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
2174 static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id)
2176 OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
2178 if (!group) {
2179 return -ROCKER_ENOENT;
2182 return of_dpa_group_del(of_dpa, group);
2185 static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id,
2186 struct desc_info *info, char *buf)
2188 return -ROCKER_ENOTSUP;
2191 static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info,
2192 char *buf, uint16_t cmd, RockerTlv **group_tlvs)
2194 uint32_t group_id;
2196 if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) {
2197 return -ROCKER_EINVAL;
2200 group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]);
2202 switch (cmd) {
2203 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2204 return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs);
2205 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2206 return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs);
2207 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2208 return of_dpa_cmd_group_del(of_dpa, group_id);
2209 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2210 return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf);
2213 return -ROCKER_ENOTSUP;
2216 static int of_dpa_cmd(World *world, struct desc_info *info,
2217 char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv)
2219 OfDpa *of_dpa = world_private(world);
2220 RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1];
2222 rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv);
2224 switch (cmd) {
2225 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
2226 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
2227 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
2228 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
2229 return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs);
2230 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
2231 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
2232 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
2233 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
2234 return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs);
2237 return -ROCKER_ENOTSUP;
2240 static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2)
2242 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
2245 static guint rocker_int64_hash(gconstpointer v)
2247 return (guint)*(const uint64_t *)v;
2250 static int of_dpa_init(World *world)
2252 OfDpa *of_dpa = world_private(world);
2254 of_dpa->world = world;
2256 of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash,
2257 rocker_int64_equal,
2258 NULL, g_free);
2259 if (!of_dpa->flow_tbl) {
2260 return -ENOMEM;
2263 of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal,
2264 NULL, g_free);
2265 if (!of_dpa->group_tbl) {
2266 goto err_group_tbl;
2269 /* XXX hardcode some artificial table max values */
2270 of_dpa->flow_tbl_max_size = 100;
2271 of_dpa->group_tbl_max_size = 100;
2273 return 0;
2275 err_group_tbl:
2276 g_hash_table_destroy(of_dpa->flow_tbl);
2277 return -ENOMEM;
2280 static void of_dpa_uninit(World *world)
2282 OfDpa *of_dpa = world_private(world);
2284 g_hash_table_destroy(of_dpa->group_tbl);
2285 g_hash_table_destroy(of_dpa->flow_tbl);
2288 struct of_dpa_flow_fill_context {
2289 RockerOfDpaFlowList *list;
2290 uint32_t tbl_id;
2293 static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
2295 struct of_dpa_flow *flow = value;
2296 struct of_dpa_flow_key *key = &flow->key;
2297 struct of_dpa_flow_key *mask = &flow->mask;
2298 struct of_dpa_flow_fill_context *flow_context = user_data;
2299 RockerOfDpaFlow *nflow;
2300 RockerOfDpaFlowKey *nkey;
2301 RockerOfDpaFlowMask *nmask;
2302 RockerOfDpaFlowAction *naction;
2304 if (flow_context->tbl_id != -1 &&
2305 flow_context->tbl_id != key->tbl_id) {
2306 return;
2309 nflow = g_malloc0(sizeof(*nflow));
2310 nkey = nflow->key = g_malloc0(sizeof(*nkey));
2311 nmask = nflow->mask = g_malloc0(sizeof(*nmask));
2312 naction = nflow->action = g_malloc0(sizeof(*naction));
2314 nflow->cookie = flow->cookie;
2315 nflow->hits = flow->stats.hits;
2316 nkey->priority = flow->priority;
2317 nkey->tbl_id = key->tbl_id;
2319 if (key->in_pport || mask->in_pport) {
2320 nkey->has_in_pport = true;
2321 nkey->in_pport = key->in_pport;
2324 if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
2325 nmask->has_in_pport = true;
2326 nmask->in_pport = mask->in_pport;
2329 if (key->eth.vlan_id || mask->eth.vlan_id) {
2330 nkey->has_vlan_id = true;
2331 nkey->vlan_id = ntohs(key->eth.vlan_id);
2334 if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
2335 nmask->has_vlan_id = true;
2336 nmask->vlan_id = ntohs(mask->eth.vlan_id);
2339 if (key->tunnel_id || mask->tunnel_id) {
2340 nkey->has_tunnel_id = true;
2341 nkey->tunnel_id = key->tunnel_id;
2344 if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
2345 nmask->has_tunnel_id = true;
2346 nmask->tunnel_id = mask->tunnel_id;
2349 if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
2350 memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
2351 nkey->has_eth_src = true;
2352 nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
2355 if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
2356 nmask->has_eth_src = true;
2357 nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
2360 if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
2361 memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
2362 nkey->has_eth_dst = true;
2363 nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
2366 if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
2367 nmask->has_eth_dst = true;
2368 nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
2371 if (key->eth.type) {
2373 nkey->has_eth_type = true;
2374 nkey->eth_type = ntohs(key->eth.type);
2376 switch (ntohs(key->eth.type)) {
2377 case 0x0800:
2378 case 0x86dd:
2379 if (key->ip.proto || mask->ip.proto) {
2380 nkey->has_ip_proto = true;
2381 nkey->ip_proto = key->ip.proto;
2383 if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
2384 nmask->has_ip_proto = true;
2385 nmask->ip_proto = mask->ip.proto;
2387 if (key->ip.tos || mask->ip.tos) {
2388 nkey->has_ip_tos = true;
2389 nkey->ip_tos = key->ip.tos;
2391 if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
2392 nmask->has_ip_tos = true;
2393 nmask->ip_tos = mask->ip.tos;
2395 break;
2398 switch (ntohs(key->eth.type)) {
2399 case 0x0800:
2400 if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
2401 char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
2402 int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
2403 nkey->has_ip_dst = true;
2404 nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
2406 break;
2410 if (flow->action.goto_tbl) {
2411 naction->has_goto_tbl = true;
2412 naction->goto_tbl = flow->action.goto_tbl;
2415 if (flow->action.write.group_id) {
2416 naction->has_group_id = true;
2417 naction->group_id = flow->action.write.group_id;
2420 if (flow->action.apply.new_vlan_id) {
2421 naction->has_new_vlan_id = true;
2422 naction->new_vlan_id = flow->action.apply.new_vlan_id;
2425 QAPI_LIST_PREPEND(flow_context->list, nflow);
2428 RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
2429 bool has_tbl_id,
2430 uint32_t tbl_id,
2431 Error **errp)
2433 struct rocker *r;
2434 struct world *w;
2435 struct of_dpa *of_dpa;
2436 struct of_dpa_flow_fill_context fill_context = {
2437 .list = NULL,
2438 .tbl_id = tbl_id,
2441 r = rocker_find(name);
2442 if (!r) {
2443 error_setg(errp, "rocker %s not found", name);
2444 return NULL;
2447 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2448 if (!w) {
2449 error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2450 return NULL;
2453 of_dpa = world_private(w);
2455 g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
2457 return fill_context.list;
2460 struct of_dpa_group_fill_context {
2461 RockerOfDpaGroupList *list;
2462 uint8_t type;
2465 static void of_dpa_group_fill(void *key, void *value, void *user_data)
2467 struct of_dpa_group *group = value;
2468 struct of_dpa_group_fill_context *flow_context = user_data;
2469 RockerOfDpaGroup *ngroup;
2470 int i;
2472 if (flow_context->type != 9 &&
2473 flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
2474 return;
2477 ngroup = g_malloc0(sizeof(*ngroup));
2479 ngroup->id = group->id;
2481 ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
2483 switch (ngroup->type) {
2484 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2485 ngroup->has_vlan_id = true;
2486 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2487 ngroup->has_pport = true;
2488 ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
2489 ngroup->has_out_pport = true;
2490 ngroup->out_pport = group->l2_interface.out_pport;
2491 ngroup->has_pop_vlan = true;
2492 ngroup->pop_vlan = group->l2_interface.pop_vlan;
2493 break;
2494 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2495 ngroup->has_index = true;
2496 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2497 ngroup->has_group_id = true;
2498 ngroup->group_id = group->l2_rewrite.group_id;
2499 if (group->l2_rewrite.vlan_id) {
2500 ngroup->has_set_vlan_id = true;
2501 ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
2503 if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
2504 ngroup->has_set_eth_src = true;
2505 ngroup->set_eth_src =
2506 qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
2508 if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2509 ngroup->has_set_eth_dst = true;
2510 ngroup->set_eth_dst =
2511 qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
2513 break;
2514 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2515 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2516 ngroup->has_vlan_id = true;
2517 ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
2518 ngroup->has_index = true;
2519 ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
2520 for (i = 0; i < group->l2_flood.group_count; i++) {
2521 ngroup->has_group_ids = true;
2522 QAPI_LIST_PREPEND(ngroup->group_ids, group->l2_flood.group_ids[i]);
2524 break;
2525 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2526 ngroup->has_index = true;
2527 ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
2528 ngroup->has_group_id = true;
2529 ngroup->group_id = group->l3_unicast.group_id;
2530 if (group->l3_unicast.vlan_id) {
2531 ngroup->has_set_vlan_id = true;
2532 ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
2534 if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
2535 ngroup->has_set_eth_src = true;
2536 ngroup->set_eth_src =
2537 qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
2539 if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
2540 ngroup->has_set_eth_dst = true;
2541 ngroup->set_eth_dst =
2542 qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
2544 if (group->l3_unicast.ttl_check) {
2545 ngroup->has_ttl_check = true;
2546 ngroup->ttl_check = group->l3_unicast.ttl_check;
2548 break;
2551 QAPI_LIST_PREPEND(flow_context->list, ngroup);
2554 RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
2555 bool has_type,
2556 uint8_t type,
2557 Error **errp)
2559 struct rocker *r;
2560 struct world *w;
2561 struct of_dpa *of_dpa;
2562 struct of_dpa_group_fill_context fill_context = {
2563 .list = NULL,
2564 .type = type,
2567 r = rocker_find(name);
2568 if (!r) {
2569 error_setg(errp, "rocker %s not found", name);
2570 return NULL;
2573 w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
2574 if (!w) {
2575 error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
2576 return NULL;
2579 of_dpa = world_private(w);
2581 g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
2583 return fill_context.list;
2586 static WorldOps of_dpa_ops = {
2587 .name = "ofdpa",
2588 .init = of_dpa_init,
2589 .uninit = of_dpa_uninit,
2590 .ig = of_dpa_ig,
2591 .cmd = of_dpa_cmd,
2594 World *of_dpa_world_alloc(Rocker *r)
2596 return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops);