shrink cor_conn.source.sock
[cor.git] / net / sched / act_ipt.c
blob400a2cfe84522aea743b8a350f33129da6edca7a
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_ipt.c iptables target interface
5 *TODO: Add other tables. For now we only support the ipv4 table targets
7 * Copyright: Jamal Hadi Salim (2002-13)
8 */
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_ipt.h>
22 #include <net/tc_act/tc_ipt.h>
24 #include <linux/netfilter_ipv4/ip_tables.h>
27 static unsigned int ipt_net_id;
28 static struct tc_action_ops act_ipt_ops;
30 static unsigned int xt_net_id;
31 static struct tc_action_ops act_xt_ops;
33 static int ipt_init_target(struct net *net, struct xt_entry_target *t,
34 char *table, unsigned int hook)
36 struct xt_tgchk_param par;
37 struct xt_target *target;
38 struct ipt_entry e = {};
39 int ret = 0;
41 target = xt_request_find_target(AF_INET, t->u.user.name,
42 t->u.user.revision);
43 if (IS_ERR(target))
44 return PTR_ERR(target);
46 t->u.kernel.target = target;
47 memset(&par, 0, sizeof(par));
48 par.net = net;
49 par.table = table;
50 par.entryinfo = &e;
51 par.target = target;
52 par.targinfo = t->data;
53 par.hook_mask = hook;
54 par.family = NFPROTO_IPV4;
56 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
57 if (ret < 0) {
58 module_put(t->u.kernel.target->me);
59 return ret;
61 return 0;
64 static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
66 struct xt_tgdtor_param par = {
67 .target = t->u.kernel.target,
68 .targinfo = t->data,
69 .family = NFPROTO_IPV4,
70 .net = net,
72 if (par.target->destroy != NULL)
73 par.target->destroy(&par);
74 module_put(par.target->me);
77 static void tcf_ipt_release(struct tc_action *a)
79 struct tcf_ipt *ipt = to_ipt(a);
81 if (ipt->tcfi_t) {
82 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
83 kfree(ipt->tcfi_t);
85 kfree(ipt->tcfi_tname);
88 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
89 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
90 [TCA_IPT_HOOK] = { .type = NLA_U32 },
91 [TCA_IPT_INDEX] = { .type = NLA_U32 },
92 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
95 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
96 struct nlattr *est, struct tc_action **a,
97 const struct tc_action_ops *ops, int ovr, int bind,
98 struct tcf_proto *tp, u32 flags)
100 struct tc_action_net *tn = net_generic(net, id);
101 struct nlattr *tb[TCA_IPT_MAX + 1];
102 struct tcf_ipt *ipt;
103 struct xt_entry_target *td, *t;
104 char *tname;
105 bool exists = false;
106 int ret = 0, err;
107 u32 hook = 0;
108 u32 index = 0;
110 if (nla == NULL)
111 return -EINVAL;
113 err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
114 NULL);
115 if (err < 0)
116 return err;
118 if (tb[TCA_IPT_INDEX] != NULL)
119 index = nla_get_u32(tb[TCA_IPT_INDEX]);
121 err = tcf_idr_check_alloc(tn, &index, a, bind);
122 if (err < 0)
123 return err;
124 exists = err;
125 if (exists && bind)
126 return 0;
128 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
129 if (exists)
130 tcf_idr_release(*a, bind);
131 else
132 tcf_idr_cleanup(tn, index);
133 return -EINVAL;
136 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
137 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
138 if (exists)
139 tcf_idr_release(*a, bind);
140 else
141 tcf_idr_cleanup(tn, index);
142 return -EINVAL;
145 if (!exists) {
146 ret = tcf_idr_create(tn, index, est, a, ops, bind,
147 false, 0);
148 if (ret) {
149 tcf_idr_cleanup(tn, index);
150 return ret;
152 ret = ACT_P_CREATED;
153 } else {
154 if (bind)/* dont override defaults */
155 return 0;
157 if (!ovr) {
158 tcf_idr_release(*a, bind);
159 return -EEXIST;
162 hook = nla_get_u32(tb[TCA_IPT_HOOK]);
164 err = -ENOMEM;
165 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
166 if (unlikely(!tname))
167 goto err1;
168 if (tb[TCA_IPT_TABLE] == NULL ||
169 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
170 strcpy(tname, "mangle");
172 t = kmemdup(td, td->u.target_size, GFP_KERNEL);
173 if (unlikely(!t))
174 goto err2;
176 err = ipt_init_target(net, t, tname, hook);
177 if (err < 0)
178 goto err3;
180 ipt = to_ipt(*a);
182 spin_lock_bh(&ipt->tcf_lock);
183 if (ret != ACT_P_CREATED) {
184 ipt_destroy_target(ipt->tcfi_t, net);
185 kfree(ipt->tcfi_tname);
186 kfree(ipt->tcfi_t);
188 ipt->tcfi_tname = tname;
189 ipt->tcfi_t = t;
190 ipt->tcfi_hook = hook;
191 spin_unlock_bh(&ipt->tcf_lock);
192 if (ret == ACT_P_CREATED)
193 tcf_idr_insert(tn, *a);
194 return ret;
196 err3:
197 kfree(t);
198 err2:
199 kfree(tname);
200 err1:
201 tcf_idr_release(*a, bind);
202 return err;
205 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
206 struct nlattr *est, struct tc_action **a, int ovr,
207 int bind, bool rtnl_held, struct tcf_proto *tp,
208 u32 flags, struct netlink_ext_ack *extack)
210 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
211 bind, tp, flags);
214 static int tcf_xt_init(struct net *net, struct nlattr *nla,
215 struct nlattr *est, struct tc_action **a, int ovr,
216 int bind, bool unlocked, struct tcf_proto *tp,
217 u32 flags, struct netlink_ext_ack *extack)
219 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
220 bind, tp, flags);
223 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
224 struct tcf_result *res)
226 int ret = 0, result = 0;
227 struct tcf_ipt *ipt = to_ipt(a);
228 struct xt_action_param par;
229 struct nf_hook_state state = {
230 .net = dev_net(skb->dev),
231 .in = skb->dev,
232 .hook = ipt->tcfi_hook,
233 .pf = NFPROTO_IPV4,
236 if (skb_unclone(skb, GFP_ATOMIC))
237 return TC_ACT_UNSPEC;
239 spin_lock(&ipt->tcf_lock);
241 tcf_lastuse_update(&ipt->tcf_tm);
242 bstats_update(&ipt->tcf_bstats, skb);
244 /* yes, we have to worry about both in and out dev
245 * worry later - danger - this API seems to have changed
246 * from earlier kernels
248 par.state = &state;
249 par.target = ipt->tcfi_t->u.kernel.target;
250 par.targinfo = ipt->tcfi_t->data;
251 ret = par.target->target(skb, &par);
253 switch (ret) {
254 case NF_ACCEPT:
255 result = TC_ACT_OK;
256 break;
257 case NF_DROP:
258 result = TC_ACT_SHOT;
259 ipt->tcf_qstats.drops++;
260 break;
261 case XT_CONTINUE:
262 result = TC_ACT_PIPE;
263 break;
264 default:
265 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
266 ret);
267 result = TC_ACT_OK;
268 break;
270 spin_unlock(&ipt->tcf_lock);
271 return result;
275 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
276 int ref)
278 unsigned char *b = skb_tail_pointer(skb);
279 struct tcf_ipt *ipt = to_ipt(a);
280 struct xt_entry_target *t;
281 struct tcf_t tm;
282 struct tc_cnt c;
284 /* for simple targets kernel size == user size
285 * user name = target name
286 * for foolproof you need to not assume this
289 spin_lock_bh(&ipt->tcf_lock);
290 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
291 if (unlikely(!t))
292 goto nla_put_failure;
294 c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
295 c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
296 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
298 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
299 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
300 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
301 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
302 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
303 goto nla_put_failure;
305 tcf_tm_dump(&tm, &ipt->tcf_tm);
306 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
307 goto nla_put_failure;
309 spin_unlock_bh(&ipt->tcf_lock);
310 kfree(t);
311 return skb->len;
313 nla_put_failure:
314 spin_unlock_bh(&ipt->tcf_lock);
315 nlmsg_trim(skb, b);
316 kfree(t);
317 return -1;
320 static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
321 struct netlink_callback *cb, int type,
322 const struct tc_action_ops *ops,
323 struct netlink_ext_ack *extack)
325 struct tc_action_net *tn = net_generic(net, ipt_net_id);
327 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
330 static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
332 struct tc_action_net *tn = net_generic(net, ipt_net_id);
334 return tcf_idr_search(tn, a, index);
337 static struct tc_action_ops act_ipt_ops = {
338 .kind = "ipt",
339 .id = TCA_ID_IPT,
340 .owner = THIS_MODULE,
341 .act = tcf_ipt_act,
342 .dump = tcf_ipt_dump,
343 .cleanup = tcf_ipt_release,
344 .init = tcf_ipt_init,
345 .walk = tcf_ipt_walker,
346 .lookup = tcf_ipt_search,
347 .size = sizeof(struct tcf_ipt),
350 static __net_init int ipt_init_net(struct net *net)
352 struct tc_action_net *tn = net_generic(net, ipt_net_id);
354 return tc_action_net_init(net, tn, &act_ipt_ops);
357 static void __net_exit ipt_exit_net(struct list_head *net_list)
359 tc_action_net_exit(net_list, ipt_net_id);
362 static struct pernet_operations ipt_net_ops = {
363 .init = ipt_init_net,
364 .exit_batch = ipt_exit_net,
365 .id = &ipt_net_id,
366 .size = sizeof(struct tc_action_net),
369 static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
370 struct netlink_callback *cb, int type,
371 const struct tc_action_ops *ops,
372 struct netlink_ext_ack *extack)
374 struct tc_action_net *tn = net_generic(net, xt_net_id);
376 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
379 static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
381 struct tc_action_net *tn = net_generic(net, xt_net_id);
383 return tcf_idr_search(tn, a, index);
386 static struct tc_action_ops act_xt_ops = {
387 .kind = "xt",
388 .id = TCA_ID_XT,
389 .owner = THIS_MODULE,
390 .act = tcf_ipt_act,
391 .dump = tcf_ipt_dump,
392 .cleanup = tcf_ipt_release,
393 .init = tcf_xt_init,
394 .walk = tcf_xt_walker,
395 .lookup = tcf_xt_search,
396 .size = sizeof(struct tcf_ipt),
399 static __net_init int xt_init_net(struct net *net)
401 struct tc_action_net *tn = net_generic(net, xt_net_id);
403 return tc_action_net_init(net, tn, &act_xt_ops);
406 static void __net_exit xt_exit_net(struct list_head *net_list)
408 tc_action_net_exit(net_list, xt_net_id);
411 static struct pernet_operations xt_net_ops = {
412 .init = xt_init_net,
413 .exit_batch = xt_exit_net,
414 .id = &xt_net_id,
415 .size = sizeof(struct tc_action_net),
418 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
419 MODULE_DESCRIPTION("Iptables target actions");
420 MODULE_LICENSE("GPL");
421 MODULE_ALIAS("act_xt");
423 static int __init ipt_init_module(void)
425 int ret1, ret2;
427 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
428 if (ret1 < 0)
429 pr_err("Failed to load xt action\n");
431 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
432 if (ret2 < 0)
433 pr_err("Failed to load ipt action\n");
435 if (ret1 < 0 && ret2 < 0) {
436 return ret1;
437 } else
438 return 0;
441 static void __exit ipt_cleanup_module(void)
443 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
444 tcf_unregister_action(&act_xt_ops, &xt_net_ops);
447 module_init(ipt_init_module);
448 module_exit(ipt_cleanup_module);