Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / net / sched / act_vlan.c
blob033d273afe50236a090fd4caddd0a8328ce82157
1 /*
2 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/if_vlan.h>
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
19 #include <linux/tc_act/tc_vlan.h>
20 #include <net/tc_act/tc_vlan.h>
22 static unsigned int vlan_net_id;
23 static struct tc_action_ops act_vlan_ops;
25 static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
26 struct tcf_result *res)
28 struct tcf_vlan *v = to_vlan(a);
29 struct tcf_vlan_params *p;
30 int action;
31 int err;
32 u16 tci;
34 tcf_lastuse_update(&v->tcf_tm);
35 bstats_cpu_update(this_cpu_ptr(v->common.cpu_bstats), skb);
37 /* Ensure 'data' points at mac_header prior calling vlan manipulating
38 * functions.
40 if (skb_at_tc_ingress(skb))
41 skb_push_rcsum(skb, skb->mac_len);
43 action = READ_ONCE(v->tcf_action);
45 p = rcu_dereference_bh(v->vlan_p);
47 switch (p->tcfv_action) {
48 case TCA_VLAN_ACT_POP:
49 err = skb_vlan_pop(skb);
50 if (err)
51 goto drop;
52 break;
53 case TCA_VLAN_ACT_PUSH:
54 err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid |
55 (p->tcfv_push_prio << VLAN_PRIO_SHIFT));
56 if (err)
57 goto drop;
58 break;
59 case TCA_VLAN_ACT_MODIFY:
60 /* No-op if no vlan tag (either hw-accel or in-payload) */
61 if (!skb_vlan_tagged(skb))
62 goto out;
63 /* extract existing tag (and guarantee no hw-accel tag) */
64 if (skb_vlan_tag_present(skb)) {
65 tci = skb_vlan_tag_get(skb);
66 skb->vlan_tci = 0;
67 } else {
68 /* in-payload vlan tag, pop it */
69 err = __skb_vlan_pop(skb, &tci);
70 if (err)
71 goto drop;
73 /* replace the vid */
74 tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
75 /* replace prio bits, if tcfv_push_prio specified */
76 if (p->tcfv_push_prio) {
77 tci &= ~VLAN_PRIO_MASK;
78 tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
80 /* put updated tci as hwaccel tag */
81 __vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
82 break;
83 default:
84 BUG();
87 out:
88 if (skb_at_tc_ingress(skb))
89 skb_pull_rcsum(skb, skb->mac_len);
91 return action;
93 drop:
94 qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats));
95 return TC_ACT_SHOT;
98 static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
99 [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
100 [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
101 [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
102 [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 },
105 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
106 struct nlattr *est, struct tc_action **a,
107 int ovr, int bind, bool rtnl_held,
108 struct netlink_ext_ack *extack)
110 struct tc_action_net *tn = net_generic(net, vlan_net_id);
111 struct nlattr *tb[TCA_VLAN_MAX + 1];
112 struct tcf_vlan_params *p;
113 struct tc_vlan *parm;
114 struct tcf_vlan *v;
115 int action;
116 u16 push_vid = 0;
117 __be16 push_proto = 0;
118 u8 push_prio = 0;
119 bool exists = false;
120 int ret = 0, err;
122 if (!nla)
123 return -EINVAL;
125 err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy, NULL);
126 if (err < 0)
127 return err;
129 if (!tb[TCA_VLAN_PARMS])
130 return -EINVAL;
131 parm = nla_data(tb[TCA_VLAN_PARMS]);
132 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
133 if (err < 0)
134 return err;
135 exists = err;
136 if (exists && bind)
137 return 0;
139 switch (parm->v_action) {
140 case TCA_VLAN_ACT_POP:
141 break;
142 case TCA_VLAN_ACT_PUSH:
143 case TCA_VLAN_ACT_MODIFY:
144 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
145 if (exists)
146 tcf_idr_release(*a, bind);
147 else
148 tcf_idr_cleanup(tn, parm->index);
149 return -EINVAL;
151 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
152 if (push_vid >= VLAN_VID_MASK) {
153 if (exists)
154 tcf_idr_release(*a, bind);
155 else
156 tcf_idr_cleanup(tn, parm->index);
157 return -ERANGE;
160 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
161 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
162 switch (push_proto) {
163 case htons(ETH_P_8021Q):
164 case htons(ETH_P_8021AD):
165 break;
166 default:
167 if (exists)
168 tcf_idr_release(*a, bind);
169 else
170 tcf_idr_cleanup(tn, parm->index);
171 return -EPROTONOSUPPORT;
173 } else {
174 push_proto = htons(ETH_P_8021Q);
177 if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
178 push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
179 break;
180 default:
181 if (exists)
182 tcf_idr_release(*a, bind);
183 else
184 tcf_idr_cleanup(tn, parm->index);
185 return -EINVAL;
187 action = parm->v_action;
189 if (!exists) {
190 ret = tcf_idr_create(tn, parm->index, est, a,
191 &act_vlan_ops, bind, true);
192 if (ret) {
193 tcf_idr_cleanup(tn, parm->index);
194 return ret;
197 ret = ACT_P_CREATED;
198 } else if (!ovr) {
199 tcf_idr_release(*a, bind);
200 return -EEXIST;
203 v = to_vlan(*a);
205 p = kzalloc(sizeof(*p), GFP_KERNEL);
206 if (!p) {
207 tcf_idr_release(*a, bind);
208 return -ENOMEM;
211 p->tcfv_action = action;
212 p->tcfv_push_vid = push_vid;
213 p->tcfv_push_prio = push_prio;
214 p->tcfv_push_proto = push_proto;
216 spin_lock_bh(&v->tcf_lock);
217 v->tcf_action = parm->action;
218 rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
219 spin_unlock_bh(&v->tcf_lock);
221 if (p)
222 kfree_rcu(p, rcu);
224 if (ret == ACT_P_CREATED)
225 tcf_idr_insert(tn, *a);
226 return ret;
229 static void tcf_vlan_cleanup(struct tc_action *a)
231 struct tcf_vlan *v = to_vlan(a);
232 struct tcf_vlan_params *p;
234 p = rcu_dereference_protected(v->vlan_p, 1);
235 if (p)
236 kfree_rcu(p, rcu);
239 static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
240 int bind, int ref)
242 unsigned char *b = skb_tail_pointer(skb);
243 struct tcf_vlan *v = to_vlan(a);
244 struct tcf_vlan_params *p;
245 struct tc_vlan opt = {
246 .index = v->tcf_index,
247 .refcnt = refcount_read(&v->tcf_refcnt) - ref,
248 .bindcnt = atomic_read(&v->tcf_bindcnt) - bind,
250 struct tcf_t t;
252 spin_lock_bh(&v->tcf_lock);
253 opt.action = v->tcf_action;
254 p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
255 opt.v_action = p->tcfv_action;
256 if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
257 goto nla_put_failure;
259 if ((p->tcfv_action == TCA_VLAN_ACT_PUSH ||
260 p->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
261 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
262 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
263 p->tcfv_push_proto) ||
264 (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
265 p->tcfv_push_prio))))
266 goto nla_put_failure;
268 tcf_tm_dump(&t, &v->tcf_tm);
269 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
270 goto nla_put_failure;
271 spin_unlock_bh(&v->tcf_lock);
273 return skb->len;
275 nla_put_failure:
276 spin_unlock_bh(&v->tcf_lock);
277 nlmsg_trim(skb, b);
278 return -1;
281 static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
282 struct netlink_callback *cb, int type,
283 const struct tc_action_ops *ops,
284 struct netlink_ext_ack *extack)
286 struct tc_action_net *tn = net_generic(net, vlan_net_id);
288 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
291 static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
292 struct netlink_ext_ack *extack)
294 struct tc_action_net *tn = net_generic(net, vlan_net_id);
296 return tcf_idr_search(tn, a, index);
299 static struct tc_action_ops act_vlan_ops = {
300 .kind = "vlan",
301 .type = TCA_ACT_VLAN,
302 .owner = THIS_MODULE,
303 .act = tcf_vlan_act,
304 .dump = tcf_vlan_dump,
305 .init = tcf_vlan_init,
306 .cleanup = tcf_vlan_cleanup,
307 .walk = tcf_vlan_walker,
308 .lookup = tcf_vlan_search,
309 .size = sizeof(struct tcf_vlan),
312 static __net_init int vlan_init_net(struct net *net)
314 struct tc_action_net *tn = net_generic(net, vlan_net_id);
316 return tc_action_net_init(tn, &act_vlan_ops);
319 static void __net_exit vlan_exit_net(struct list_head *net_list)
321 tc_action_net_exit(net_list, vlan_net_id);
324 static struct pernet_operations vlan_net_ops = {
325 .init = vlan_init_net,
326 .exit_batch = vlan_exit_net,
327 .id = &vlan_net_id,
328 .size = sizeof(struct tc_action_net),
331 static int __init vlan_init_module(void)
333 return tcf_register_action(&act_vlan_ops, &vlan_net_ops);
336 static void __exit vlan_cleanup_module(void)
338 tcf_unregister_action(&act_vlan_ops, &vlan_net_ops);
341 module_init(vlan_init_module);
342 module_exit(vlan_cleanup_module);
344 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
345 MODULE_DESCRIPTION("vlan manipulation actions");
346 MODULE_LICENSE("GPL v2");