measure ping latency including qos_resume delay
[cor.git] / net / core / flow_offload.c
blobcf52d9c422fa20b52b8cd2c4d79950c104aa13a3
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
8 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
10 struct flow_rule *rule;
12 rule = kzalloc(struct_size(rule, action.entries, num_actions),
13 GFP_KERNEL);
14 if (!rule)
15 return NULL;
17 rule->action.num_entries = num_actions;
19 return rule;
21 EXPORT_SYMBOL(flow_rule_alloc);
23 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
24 const struct flow_match *__m = &(__rule)->match; \
25 struct flow_dissector *__d = (__m)->dissector; \
27 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
28 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
30 void flow_rule_match_meta(const struct flow_rule *rule,
31 struct flow_match_meta *out)
33 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
35 EXPORT_SYMBOL(flow_rule_match_meta);
37 void flow_rule_match_basic(const struct flow_rule *rule,
38 struct flow_match_basic *out)
40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
42 EXPORT_SYMBOL(flow_rule_match_basic);
44 void flow_rule_match_control(const struct flow_rule *rule,
45 struct flow_match_control *out)
47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
49 EXPORT_SYMBOL(flow_rule_match_control);
51 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
52 struct flow_match_eth_addrs *out)
54 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
56 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
58 void flow_rule_match_vlan(const struct flow_rule *rule,
59 struct flow_match_vlan *out)
61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
63 EXPORT_SYMBOL(flow_rule_match_vlan);
65 void flow_rule_match_cvlan(const struct flow_rule *rule,
66 struct flow_match_vlan *out)
68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
70 EXPORT_SYMBOL(flow_rule_match_cvlan);
72 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
73 struct flow_match_ipv4_addrs *out)
75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
77 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
79 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
80 struct flow_match_ipv6_addrs *out)
82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
84 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
86 void flow_rule_match_ip(const struct flow_rule *rule,
87 struct flow_match_ip *out)
89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
91 EXPORT_SYMBOL(flow_rule_match_ip);
93 void flow_rule_match_ports(const struct flow_rule *rule,
94 struct flow_match_ports *out)
96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
98 EXPORT_SYMBOL(flow_rule_match_ports);
100 void flow_rule_match_tcp(const struct flow_rule *rule,
101 struct flow_match_tcp *out)
103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
105 EXPORT_SYMBOL(flow_rule_match_tcp);
107 void flow_rule_match_icmp(const struct flow_rule *rule,
108 struct flow_match_icmp *out)
110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
112 EXPORT_SYMBOL(flow_rule_match_icmp);
114 void flow_rule_match_mpls(const struct flow_rule *rule,
115 struct flow_match_mpls *out)
117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
119 EXPORT_SYMBOL(flow_rule_match_mpls);
121 void flow_rule_match_enc_control(const struct flow_rule *rule,
122 struct flow_match_control *out)
124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
126 EXPORT_SYMBOL(flow_rule_match_enc_control);
128 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
129 struct flow_match_ipv4_addrs *out)
131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
133 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
135 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
136 struct flow_match_ipv6_addrs *out)
138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
142 void flow_rule_match_enc_ip(const struct flow_rule *rule,
143 struct flow_match_ip *out)
145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
147 EXPORT_SYMBOL(flow_rule_match_enc_ip);
149 void flow_rule_match_enc_ports(const struct flow_rule *rule,
150 struct flow_match_ports *out)
152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
154 EXPORT_SYMBOL(flow_rule_match_enc_ports);
156 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
157 struct flow_match_enc_keyid *out)
159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
161 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
163 void flow_rule_match_enc_opts(const struct flow_rule *rule,
164 struct flow_match_enc_opts *out)
166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
168 EXPORT_SYMBOL(flow_rule_match_enc_opts);
170 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
171 void *cb_ident, void *cb_priv,
172 void (*release)(void *cb_priv))
174 struct flow_block_cb *block_cb;
176 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
177 if (!block_cb)
178 return ERR_PTR(-ENOMEM);
180 block_cb->cb = cb;
181 block_cb->cb_ident = cb_ident;
182 block_cb->cb_priv = cb_priv;
183 block_cb->release = release;
185 return block_cb;
187 EXPORT_SYMBOL(flow_block_cb_alloc);
189 void flow_block_cb_free(struct flow_block_cb *block_cb)
191 if (block_cb->release)
192 block_cb->release(block_cb->cb_priv);
194 kfree(block_cb);
196 EXPORT_SYMBOL(flow_block_cb_free);
198 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
199 flow_setup_cb_t *cb, void *cb_ident)
201 struct flow_block_cb *block_cb;
203 list_for_each_entry(block_cb, &block->cb_list, list) {
204 if (block_cb->cb == cb &&
205 block_cb->cb_ident == cb_ident)
206 return block_cb;
209 return NULL;
211 EXPORT_SYMBOL(flow_block_cb_lookup);
213 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
215 return block_cb->cb_priv;
217 EXPORT_SYMBOL(flow_block_cb_priv);
219 void flow_block_cb_incref(struct flow_block_cb *block_cb)
221 block_cb->refcnt++;
223 EXPORT_SYMBOL(flow_block_cb_incref);
225 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
227 return --block_cb->refcnt;
229 EXPORT_SYMBOL(flow_block_cb_decref);
231 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
232 struct list_head *driver_block_list)
234 struct flow_block_cb *block_cb;
236 list_for_each_entry(block_cb, driver_block_list, driver_list) {
237 if (block_cb->cb == cb &&
238 block_cb->cb_ident == cb_ident)
239 return true;
242 return false;
244 EXPORT_SYMBOL(flow_block_cb_is_busy);
246 int flow_block_cb_setup_simple(struct flow_block_offload *f,
247 struct list_head *driver_block_list,
248 flow_setup_cb_t *cb,
249 void *cb_ident, void *cb_priv,
250 bool ingress_only)
252 struct flow_block_cb *block_cb;
254 if (ingress_only &&
255 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
256 return -EOPNOTSUPP;
258 f->driver_block_list = driver_block_list;
260 switch (f->command) {
261 case FLOW_BLOCK_BIND:
262 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
263 return -EBUSY;
265 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
266 if (IS_ERR(block_cb))
267 return PTR_ERR(block_cb);
269 flow_block_cb_add(block_cb, f);
270 list_add_tail(&block_cb->driver_list, driver_block_list);
271 return 0;
272 case FLOW_BLOCK_UNBIND:
273 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
274 if (!block_cb)
275 return -ENOENT;
277 flow_block_cb_remove(block_cb, f);
278 list_del(&block_cb->driver_list);
279 return 0;
280 default:
281 return -EOPNOTSUPP;
284 EXPORT_SYMBOL(flow_block_cb_setup_simple);
286 static LIST_HEAD(block_ing_cb_list);
288 static struct rhashtable indr_setup_block_ht;
290 struct flow_indr_block_cb {
291 struct list_head list;
292 void *cb_priv;
293 flow_indr_block_bind_cb_t *cb;
294 void *cb_ident;
297 struct flow_indr_block_dev {
298 struct rhash_head ht_node;
299 struct net_device *dev;
300 unsigned int refcnt;
301 struct list_head cb_list;
304 static const struct rhashtable_params flow_indr_setup_block_ht_params = {
305 .key_offset = offsetof(struct flow_indr_block_dev, dev),
306 .head_offset = offsetof(struct flow_indr_block_dev, ht_node),
307 .key_len = sizeof(struct net_device *),
310 static struct flow_indr_block_dev *
311 flow_indr_block_dev_lookup(struct net_device *dev)
313 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
314 flow_indr_setup_block_ht_params);
317 static struct flow_indr_block_dev *
318 flow_indr_block_dev_get(struct net_device *dev)
320 struct flow_indr_block_dev *indr_dev;
322 indr_dev = flow_indr_block_dev_lookup(dev);
323 if (indr_dev)
324 goto inc_ref;
326 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
327 if (!indr_dev)
328 return NULL;
330 INIT_LIST_HEAD(&indr_dev->cb_list);
331 indr_dev->dev = dev;
332 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
333 flow_indr_setup_block_ht_params)) {
334 kfree(indr_dev);
335 return NULL;
338 inc_ref:
339 indr_dev->refcnt++;
340 return indr_dev;
343 static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
345 if (--indr_dev->refcnt)
346 return;
348 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
349 flow_indr_setup_block_ht_params);
350 kfree(indr_dev);
353 static struct flow_indr_block_cb *
354 flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
355 flow_indr_block_bind_cb_t *cb, void *cb_ident)
357 struct flow_indr_block_cb *indr_block_cb;
359 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
360 if (indr_block_cb->cb == cb &&
361 indr_block_cb->cb_ident == cb_ident)
362 return indr_block_cb;
363 return NULL;
366 static struct flow_indr_block_cb *
367 flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
368 flow_indr_block_bind_cb_t *cb, void *cb_ident)
370 struct flow_indr_block_cb *indr_block_cb;
372 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
373 if (indr_block_cb)
374 return ERR_PTR(-EEXIST);
376 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
377 if (!indr_block_cb)
378 return ERR_PTR(-ENOMEM);
380 indr_block_cb->cb_priv = cb_priv;
381 indr_block_cb->cb = cb;
382 indr_block_cb->cb_ident = cb_ident;
383 list_add(&indr_block_cb->list, &indr_dev->cb_list);
385 return indr_block_cb;
388 static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
390 list_del(&indr_block_cb->list);
391 kfree(indr_block_cb);
394 static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
396 static void flow_block_ing_cmd(struct net_device *dev,
397 flow_indr_block_bind_cb_t *cb,
398 void *cb_priv,
399 enum flow_block_command command)
401 struct flow_indr_block_ing_entry *entry;
403 mutex_lock(&flow_indr_block_ing_cb_lock);
404 list_for_each_entry(entry, &block_ing_cb_list, list) {
405 entry->cb(dev, cb, cb_priv, command);
407 mutex_unlock(&flow_indr_block_ing_cb_lock);
410 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
411 flow_indr_block_bind_cb_t *cb,
412 void *cb_ident)
414 struct flow_indr_block_cb *indr_block_cb;
415 struct flow_indr_block_dev *indr_dev;
416 int err;
418 indr_dev = flow_indr_block_dev_get(dev);
419 if (!indr_dev)
420 return -ENOMEM;
422 indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
423 err = PTR_ERR_OR_ZERO(indr_block_cb);
424 if (err)
425 goto err_dev_put;
427 flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
428 FLOW_BLOCK_BIND);
430 return 0;
432 err_dev_put:
433 flow_indr_block_dev_put(indr_dev);
434 return err;
436 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);
438 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
439 flow_indr_block_bind_cb_t *cb,
440 void *cb_ident)
442 int err;
444 rtnl_lock();
445 err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
446 rtnl_unlock();
448 return err;
450 EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);
452 void __flow_indr_block_cb_unregister(struct net_device *dev,
453 flow_indr_block_bind_cb_t *cb,
454 void *cb_ident)
456 struct flow_indr_block_cb *indr_block_cb;
457 struct flow_indr_block_dev *indr_dev;
459 indr_dev = flow_indr_block_dev_lookup(dev);
460 if (!indr_dev)
461 return;
463 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
464 if (!indr_block_cb)
465 return;
467 flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
468 FLOW_BLOCK_UNBIND);
470 flow_indr_block_cb_del(indr_block_cb);
471 flow_indr_block_dev_put(indr_dev);
473 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);
475 void flow_indr_block_cb_unregister(struct net_device *dev,
476 flow_indr_block_bind_cb_t *cb,
477 void *cb_ident)
479 rtnl_lock();
480 __flow_indr_block_cb_unregister(dev, cb, cb_ident);
481 rtnl_unlock();
483 EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
485 void flow_indr_block_call(struct net_device *dev,
486 struct flow_block_offload *bo,
487 enum flow_block_command command)
489 struct flow_indr_block_cb *indr_block_cb;
490 struct flow_indr_block_dev *indr_dev;
492 indr_dev = flow_indr_block_dev_lookup(dev);
493 if (!indr_dev)
494 return;
496 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
497 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
498 bo);
500 EXPORT_SYMBOL_GPL(flow_indr_block_call);
502 void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
504 mutex_lock(&flow_indr_block_ing_cb_lock);
505 list_add_tail(&entry->list, &block_ing_cb_list);
506 mutex_unlock(&flow_indr_block_ing_cb_lock);
508 EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
510 void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
512 mutex_lock(&flow_indr_block_ing_cb_lock);
513 list_del(&entry->list);
514 mutex_unlock(&flow_indr_block_ing_cb_lock);
516 EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
518 static int __init init_flow_indr_rhashtable(void)
520 return rhashtable_init(&indr_setup_block_ht,
521 &flow_indr_setup_block_ht_params);
523 subsys_initcall(init_flow_indr_rhashtable);