lwtunnel: fix autoload of lwt modules
[linux-2.6/btrfs-unstable.git] / net / core / lwtunnel.c
blob47b1dd65947bd2547cf22a0bb84592fef8f28be0
1 /*
2 * lwtunnel Infrastructure for light weight tunnels like mpls
4 * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/uaccess.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
21 #include <linux/lwtunnel.h>
22 #include <linux/in.h>
23 #include <linux/init.h>
24 #include <linux/err.h>
26 #include <net/lwtunnel.h>
27 #include <net/rtnetlink.h>
28 #include <net/ip6_fib.h>
29 #include <net/nexthop.h>
31 #ifdef CONFIG_MODULES
33 static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
35 /* Only lwt encaps implemented without using an interface for
36 * the encap need to return a string here.
38 switch (encap_type) {
39 case LWTUNNEL_ENCAP_MPLS:
40 return "MPLS";
41 case LWTUNNEL_ENCAP_ILA:
42 return "ILA";
43 case LWTUNNEL_ENCAP_SEG6:
44 return "SEG6";
45 case LWTUNNEL_ENCAP_BPF:
46 return "BPF";
47 case LWTUNNEL_ENCAP_IP6:
48 case LWTUNNEL_ENCAP_IP:
49 case LWTUNNEL_ENCAP_NONE:
50 case __LWTUNNEL_ENCAP_MAX:
51 /* should not have got here */
52 WARN_ON(1);
53 break;
55 return NULL;
58 #endif /* CONFIG_MODULES */
60 struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
62 struct lwtunnel_state *lws;
64 lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
66 return lws;
68 EXPORT_SYMBOL(lwtunnel_state_alloc);
70 static const struct lwtunnel_encap_ops __rcu *
71 lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
73 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
74 unsigned int num)
76 if (num > LWTUNNEL_ENCAP_MAX)
77 return -ERANGE;
79 return !cmpxchg((const struct lwtunnel_encap_ops **)
80 &lwtun_encaps[num],
81 NULL, ops) ? 0 : -1;
83 EXPORT_SYMBOL(lwtunnel_encap_add_ops);
85 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
86 unsigned int encap_type)
88 int ret;
90 if (encap_type == LWTUNNEL_ENCAP_NONE ||
91 encap_type > LWTUNNEL_ENCAP_MAX)
92 return -ERANGE;
94 ret = (cmpxchg((const struct lwtunnel_encap_ops **)
95 &lwtun_encaps[encap_type],
96 ops, NULL) == ops) ? 0 : -1;
98 synchronize_net();
100 return ret;
102 EXPORT_SYMBOL(lwtunnel_encap_del_ops);
104 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
105 struct nlattr *encap, unsigned int family,
106 const void *cfg, struct lwtunnel_state **lws)
108 const struct lwtunnel_encap_ops *ops;
109 int ret = -EINVAL;
111 if (encap_type == LWTUNNEL_ENCAP_NONE ||
112 encap_type > LWTUNNEL_ENCAP_MAX)
113 return ret;
115 ret = -EOPNOTSUPP;
116 rcu_read_lock();
117 ops = rcu_dereference(lwtun_encaps[encap_type]);
118 if (likely(ops && ops->build_state))
119 ret = ops->build_state(dev, encap, family, cfg, lws);
120 rcu_read_unlock();
122 return ret;
124 EXPORT_SYMBOL(lwtunnel_build_state);
126 int lwtunnel_valid_encap_type(u16 encap_type)
128 const struct lwtunnel_encap_ops *ops;
129 int ret = -EINVAL;
131 if (encap_type == LWTUNNEL_ENCAP_NONE ||
132 encap_type > LWTUNNEL_ENCAP_MAX)
133 return ret;
135 rcu_read_lock();
136 ops = rcu_dereference(lwtun_encaps[encap_type]);
137 rcu_read_unlock();
138 #ifdef CONFIG_MODULES
139 if (!ops) {
140 const char *encap_type_str = lwtunnel_encap_str(encap_type);
142 if (encap_type_str) {
143 __rtnl_unlock();
144 request_module("rtnl-lwt-%s", encap_type_str);
145 rtnl_lock();
147 rcu_read_lock();
148 ops = rcu_dereference(lwtun_encaps[encap_type]);
149 rcu_read_unlock();
152 #endif
153 return ops ? 0 : -EOPNOTSUPP;
155 EXPORT_SYMBOL(lwtunnel_valid_encap_type);
157 int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
159 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
160 struct nlattr *nla_entype;
161 struct nlattr *attrs;
162 struct nlattr *nla;
163 u16 encap_type;
164 int attrlen;
166 while (rtnh_ok(rtnh, remaining)) {
167 attrlen = rtnh_attrlen(rtnh);
168 if (attrlen > 0) {
169 attrs = rtnh_attrs(rtnh);
170 nla = nla_find(attrs, attrlen, RTA_ENCAP);
171 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
173 if (nla_entype) {
174 encap_type = nla_get_u16(nla_entype);
176 if (lwtunnel_valid_encap_type(encap_type) != 0)
177 return -EOPNOTSUPP;
180 rtnh = rtnh_next(rtnh, &remaining);
183 return 0;
185 EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
187 void lwtstate_free(struct lwtunnel_state *lws)
189 const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];
191 if (ops->destroy_state) {
192 ops->destroy_state(lws);
193 kfree_rcu(lws, rcu);
194 } else {
195 kfree(lws);
198 EXPORT_SYMBOL(lwtstate_free);
200 int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
202 const struct lwtunnel_encap_ops *ops;
203 struct nlattr *nest;
204 int ret = -EINVAL;
206 if (!lwtstate)
207 return 0;
209 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
210 lwtstate->type > LWTUNNEL_ENCAP_MAX)
211 return 0;
213 ret = -EOPNOTSUPP;
214 nest = nla_nest_start(skb, RTA_ENCAP);
215 rcu_read_lock();
216 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
217 if (likely(ops && ops->fill_encap))
218 ret = ops->fill_encap(skb, lwtstate);
219 rcu_read_unlock();
221 if (ret)
222 goto nla_put_failure;
223 nla_nest_end(skb, nest);
224 ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
225 if (ret)
226 goto nla_put_failure;
228 return 0;
230 nla_put_failure:
231 nla_nest_cancel(skb, nest);
233 return (ret == -EOPNOTSUPP ? 0 : ret);
235 EXPORT_SYMBOL(lwtunnel_fill_encap);
237 int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
239 const struct lwtunnel_encap_ops *ops;
240 int ret = 0;
242 if (!lwtstate)
243 return 0;
245 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
246 lwtstate->type > LWTUNNEL_ENCAP_MAX)
247 return 0;
249 rcu_read_lock();
250 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
251 if (likely(ops && ops->get_encap_size))
252 ret = nla_total_size(ops->get_encap_size(lwtstate));
253 rcu_read_unlock();
255 return ret;
257 EXPORT_SYMBOL(lwtunnel_get_encap_size);
259 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
261 const struct lwtunnel_encap_ops *ops;
262 int ret = 0;
264 if (!a && !b)
265 return 0;
267 if (!a || !b)
268 return 1;
270 if (a->type != b->type)
271 return 1;
273 if (a->type == LWTUNNEL_ENCAP_NONE ||
274 a->type > LWTUNNEL_ENCAP_MAX)
275 return 0;
277 rcu_read_lock();
278 ops = rcu_dereference(lwtun_encaps[a->type]);
279 if (likely(ops && ops->cmp_encap))
280 ret = ops->cmp_encap(a, b);
281 rcu_read_unlock();
283 return ret;
285 EXPORT_SYMBOL(lwtunnel_cmp_encap);
287 int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
289 struct dst_entry *dst = skb_dst(skb);
290 const struct lwtunnel_encap_ops *ops;
291 struct lwtunnel_state *lwtstate;
292 int ret = -EINVAL;
294 if (!dst)
295 goto drop;
296 lwtstate = dst->lwtstate;
298 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
299 lwtstate->type > LWTUNNEL_ENCAP_MAX)
300 return 0;
302 ret = -EOPNOTSUPP;
303 rcu_read_lock();
304 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
305 if (likely(ops && ops->output))
306 ret = ops->output(net, sk, skb);
307 rcu_read_unlock();
309 if (ret == -EOPNOTSUPP)
310 goto drop;
312 return ret;
314 drop:
315 kfree_skb(skb);
317 return ret;
319 EXPORT_SYMBOL(lwtunnel_output);
321 int lwtunnel_xmit(struct sk_buff *skb)
323 struct dst_entry *dst = skb_dst(skb);
324 const struct lwtunnel_encap_ops *ops;
325 struct lwtunnel_state *lwtstate;
326 int ret = -EINVAL;
328 if (!dst)
329 goto drop;
331 lwtstate = dst->lwtstate;
333 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
334 lwtstate->type > LWTUNNEL_ENCAP_MAX)
335 return 0;
337 ret = -EOPNOTSUPP;
338 rcu_read_lock();
339 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
340 if (likely(ops && ops->xmit))
341 ret = ops->xmit(skb);
342 rcu_read_unlock();
344 if (ret == -EOPNOTSUPP)
345 goto drop;
347 return ret;
349 drop:
350 kfree_skb(skb);
352 return ret;
354 EXPORT_SYMBOL(lwtunnel_xmit);
356 int lwtunnel_input(struct sk_buff *skb)
358 struct dst_entry *dst = skb_dst(skb);
359 const struct lwtunnel_encap_ops *ops;
360 struct lwtunnel_state *lwtstate;
361 int ret = -EINVAL;
363 if (!dst)
364 goto drop;
365 lwtstate = dst->lwtstate;
367 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
368 lwtstate->type > LWTUNNEL_ENCAP_MAX)
369 return 0;
371 ret = -EOPNOTSUPP;
372 rcu_read_lock();
373 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
374 if (likely(ops && ops->input))
375 ret = ops->input(skb);
376 rcu_read_unlock();
378 if (ret == -EOPNOTSUPP)
379 goto drop;
381 return ret;
383 drop:
384 kfree_skb(skb);
386 return ret;
388 EXPORT_SYMBOL(lwtunnel_input);