3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Routing Forwarding Information Base (Rules)
9 * Author: Steve Whitehouse <SteveW@ACM.org>
10 * Mostly copied from Alexey Kuznetsov's ipv4/fib_rules.c
16 #include <linux/config.h>
17 #include <linux/string.h>
18 #include <linux/net.h>
19 #include <linux/socket.h>
20 #include <linux/sockios.h>
21 #include <linux/init.h>
22 #include <linux/skbuff.h>
23 #include <linux/netlink.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netdevice.h>
27 #include <linux/timer.h>
28 #include <linux/spinlock.h>
29 #include <linux/in_route.h>
30 #include <linux/list.h>
31 #include <linux/rcupdate.h>
32 #include <asm/atomic.h>
33 #include <asm/uaccess.h>
34 #include <net/neighbour.h>
38 #include <net/dn_fib.h>
39 #include <net/dn_neigh.h>
40 #include <net/dn_dev.h>
44 struct hlist_node r_hlist
;
47 unsigned char r_table
;
48 unsigned char r_action
;
49 unsigned char r_dst_len
;
50 unsigned char r_src_len
;
57 #ifdef CONFIG_DECNET_ROUTE_FWMARK
61 char r_ifname
[IFNAMSIZ
];
66 static struct dn_fib_rule default_rule
= {
67 .r_clntref
= ATOMIC_INIT(2),
68 .r_preference
= 0x7fff,
69 .r_table
= RT_TABLE_MAIN
,
70 .r_action
= RTN_UNICAST
73 static struct hlist_head dn_fib_rules
;
75 int dn_fib_rtm_delrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
77 struct rtattr
**rta
= arg
;
78 struct rtmsg
*rtm
= NLMSG_DATA(nlh
);
79 struct dn_fib_rule
*r
;
80 struct hlist_node
*node
;
83 hlist_for_each_entry(r
, node
, &dn_fib_rules
, r_hlist
) {
84 if ((!rta
[RTA_SRC
-1] || memcmp(RTA_DATA(rta
[RTA_SRC
-1]), &r
->r_src
, 2) == 0) &&
85 rtm
->rtm_src_len
== r
->r_src_len
&&
86 rtm
->rtm_dst_len
== r
->r_dst_len
&&
87 (!rta
[RTA_DST
-1] || memcmp(RTA_DATA(rta
[RTA_DST
-1]), &r
->r_dst
, 2) == 0) &&
88 #ifdef CONFIG_DECNET_ROUTE_FWMARK
89 (!rta
[RTA_PROTOINFO
-1] || memcmp(RTA_DATA(rta
[RTA_PROTOINFO
-1]), &r
->r_fwmark
, 4) == 0) &&
91 (!rtm
->rtm_type
|| rtm
->rtm_type
== r
->r_action
) &&
92 (!rta
[RTA_PRIORITY
-1] || memcmp(RTA_DATA(rta
[RTA_PRIORITY
-1]), &r
->r_preference
, 4) == 0) &&
93 (!rta
[RTA_IIF
-1] || rtattr_strcmp(rta
[RTA_IIF
-1], r
->r_ifname
) == 0) &&
94 (!rtm
->rtm_table
|| (r
&& rtm
->rtm_table
== r
->r_table
))) {
97 if (r
== &default_rule
)
100 hlist_del_rcu(&r
->r_hlist
);
111 static inline void dn_fib_rule_put_rcu(struct rcu_head
*head
)
113 struct dn_fib_rule
*r
= container_of(head
, struct dn_fib_rule
, rcu
);
117 void dn_fib_rule_put(struct dn_fib_rule
*r
)
119 if (atomic_dec_and_test(&r
->r_clntref
)) {
121 call_rcu(&r
->rcu
, dn_fib_rule_put_rcu
);
123 printk(KERN_DEBUG
"Attempt to free alive dn_fib_rule\n");
128 int dn_fib_rtm_newrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
130 struct rtattr
**rta
= arg
;
131 struct rtmsg
*rtm
= NLMSG_DATA(nlh
);
132 struct dn_fib_rule
*r
, *new_r
, *last
= NULL
;
133 struct hlist_node
*node
= NULL
;
134 unsigned char table_id
;
136 if (rtm
->rtm_src_len
> 16 || rtm
->rtm_dst_len
> 16)
139 if (rta
[RTA_IIF
-1] && RTA_PAYLOAD(rta
[RTA_IIF
-1]) > IFNAMSIZ
)
142 if (rtm
->rtm_type
== RTN_NAT
)
145 table_id
= rtm
->rtm_table
;
146 if (table_id
== RT_TABLE_UNSPEC
) {
147 struct dn_fib_table
*tb
;
148 if (rtm
->rtm_type
== RTN_UNICAST
) {
149 if ((tb
= dn_fib_empty_table()) == NULL
)
155 new_r
= kmalloc(sizeof(*new_r
), GFP_KERNEL
);
158 memset(new_r
, 0, sizeof(*new_r
));
161 memcpy(&new_r
->r_src
, RTA_DATA(rta
[RTA_SRC
-1]), 2);
163 memcpy(&new_r
->r_dst
, RTA_DATA(rta
[RTA_DST
-1]), 2);
164 if (rta
[RTA_GATEWAY
-1])
165 memcpy(&new_r
->r_srcmap
, RTA_DATA(rta
[RTA_GATEWAY
-1]), 2);
166 new_r
->r_src_len
= rtm
->rtm_src_len
;
167 new_r
->r_dst_len
= rtm
->rtm_dst_len
;
168 new_r
->r_srcmask
= dnet_make_mask(rtm
->rtm_src_len
);
169 new_r
->r_dstmask
= dnet_make_mask(rtm
->rtm_dst_len
);
170 #ifdef CONFIG_DECNET_ROUTE_FWMARK
171 if (rta
[RTA_PROTOINFO
-1])
172 memcpy(&new_r
->r_fwmark
, RTA_DATA(rta
[RTA_PROTOINFO
-1]), 4);
174 new_r
->r_action
= rtm
->rtm_type
;
175 new_r
->r_flags
= rtm
->rtm_flags
;
176 if (rta
[RTA_PRIORITY
-1])
177 memcpy(&new_r
->r_preference
, RTA_DATA(rta
[RTA_PRIORITY
-1]), 4);
178 new_r
->r_table
= table_id
;
179 if (rta
[RTA_IIF
-1]) {
180 struct net_device
*dev
;
181 rtattr_strlcpy(new_r
->r_ifname
, rta
[RTA_IIF
-1], IFNAMSIZ
);
182 new_r
->r_ifindex
= -1;
183 dev
= dev_get_by_name(new_r
->r_ifname
);
185 new_r
->r_ifindex
= dev
->ifindex
;
190 r
= container_of(dn_fib_rules
.first
, struct dn_fib_rule
, r_hlist
);
191 if (!new_r
->r_preference
) {
192 if (r
&& r
->r_hlist
.next
!= NULL
) {
193 r
= container_of(r
->r_hlist
.next
, struct dn_fib_rule
, r_hlist
);
195 new_r
->r_preference
= r
->r_preference
- 1;
199 hlist_for_each_entry(r
, node
, &dn_fib_rules
, r_hlist
) {
200 if (r
->r_preference
> new_r
->r_preference
)
204 atomic_inc(&new_r
->r_clntref
);
207 hlist_add_after_rcu(&last
->r_hlist
, &new_r
->r_hlist
);
209 hlist_add_before_rcu(&new_r
->r_hlist
, &r
->r_hlist
);
214 int dn_fib_lookup(const struct flowi
*flp
, struct dn_fib_res
*res
)
216 struct dn_fib_rule
*r
, *policy
;
217 struct dn_fib_table
*tb
;
218 __le16 saddr
= flp
->fld_src
;
219 __le16 daddr
= flp
->fld_dst
;
220 struct hlist_node
*node
;
225 hlist_for_each_entry_rcu(r
, node
, &dn_fib_rules
, r_hlist
) {
226 if (((saddr
^r
->r_src
) & r
->r_srcmask
) ||
227 ((daddr
^r
->r_dst
) & r
->r_dstmask
) ||
228 #ifdef CONFIG_DECNET_ROUTE_FWMARK
229 (r
->r_fwmark
&& r
->r_fwmark
!= flp
->fld_fwmark
) ||
231 (r
->r_ifindex
&& r
->r_ifindex
!= flp
->iif
))
234 switch(r
->r_action
) {
239 case RTN_UNREACHABLE
:
251 if ((tb
= dn_fib_get_table(r
->r_table
, 0)) == NULL
)
253 err
= tb
->lookup(tb
, flp
, res
);
257 atomic_inc(&policy
->r_clntref
);
261 if (err
< 0 && err
!= -EAGAIN
) {
271 unsigned dnet_addr_type(__le16 addr
)
273 struct flowi fl
= { .nl_u
= { .dn_u
= { .daddr
= addr
} } };
274 struct dn_fib_res res
;
275 unsigned ret
= RTN_UNICAST
;
276 struct dn_fib_table
*tb
= dn_fib_tables
[RT_TABLE_LOCAL
];
281 if (!tb
->lookup(tb
, &fl
, &res
)) {
283 dn_fib_res_put(&res
);
289 __le16
dn_fib_rules_policy(__le16 saddr
, struct dn_fib_res
*res
, unsigned *flags
)
291 struct dn_fib_rule
*r
= res
->r
;
293 if (r
->r_action
== RTN_NAT
) {
294 int addrtype
= dnet_addr_type(r
->r_srcmap
);
296 if (addrtype
== RTN_NAT
) {
297 saddr
= (saddr
&~r
->r_srcmask
)|r
->r_srcmap
;
299 } else if (addrtype
== RTN_LOCAL
|| r
->r_srcmap
== 0) {
307 static void dn_fib_rules_detach(struct net_device
*dev
)
309 struct hlist_node
*node
;
310 struct dn_fib_rule
*r
;
312 hlist_for_each_entry(r
, node
, &dn_fib_rules
, r_hlist
) {
313 if (r
->r_ifindex
== dev
->ifindex
)
318 static void dn_fib_rules_attach(struct net_device
*dev
)
320 struct hlist_node
*node
;
321 struct dn_fib_rule
*r
;
323 hlist_for_each_entry(r
, node
, &dn_fib_rules
, r_hlist
) {
324 if (r
->r_ifindex
== -1 && strcmp(dev
->name
, r
->r_ifname
) == 0)
325 r
->r_ifindex
= dev
->ifindex
;
329 static int dn_fib_rules_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
331 struct net_device
*dev
= ptr
;
334 case NETDEV_UNREGISTER
:
335 dn_fib_rules_detach(dev
);
336 dn_fib_sync_down(0, dev
, 1);
337 case NETDEV_REGISTER
:
338 dn_fib_rules_attach(dev
);
346 static struct notifier_block dn_fib_rules_notifier
= {
347 .notifier_call
= dn_fib_rules_event
,
350 static int dn_fib_fill_rule(struct sk_buff
*skb
, struct dn_fib_rule
*r
,
351 struct netlink_callback
*cb
, unsigned int flags
)
354 struct nlmsghdr
*nlh
;
355 unsigned char *b
= skb
->tail
;
358 nlh
= NLMSG_NEW_ANSWER(skb
, cb
, RTM_NEWRULE
, sizeof(*rtm
), flags
);
359 rtm
= NLMSG_DATA(nlh
);
360 rtm
->rtm_family
= AF_DECnet
;
361 rtm
->rtm_dst_len
= r
->r_dst_len
;
362 rtm
->rtm_src_len
= r
->r_src_len
;
364 #ifdef CONFIG_DECNET_ROUTE_FWMARK
366 RTA_PUT(skb
, RTA_PROTOINFO
, 4, &r
->r_fwmark
);
368 rtm
->rtm_table
= r
->r_table
;
369 rtm
->rtm_protocol
= 0;
371 rtm
->rtm_type
= r
->r_action
;
372 rtm
->rtm_flags
= r
->r_flags
;
375 RTA_PUT(skb
, RTA_DST
, 2, &r
->r_dst
);
377 RTA_PUT(skb
, RTA_SRC
, 2, &r
->r_src
);
379 RTA_PUT(skb
, RTA_IIF
, IFNAMSIZ
, &r
->r_ifname
);
381 RTA_PUT(skb
, RTA_PRIORITY
, 4, &r
->r_preference
);
383 RTA_PUT(skb
, RTA_GATEWAY
, 2, &r
->r_srcmap
);
384 nlh
->nlmsg_len
= skb
->tail
- b
;
389 skb_trim(skb
, b
- skb
->data
);
393 int dn_fib_dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
)
396 int s_idx
= cb
->args
[0];
397 struct dn_fib_rule
*r
;
398 struct hlist_node
*node
;
401 hlist_for_each_entry(r
, node
, &dn_fib_rules
, r_hlist
) {
404 if (dn_fib_fill_rule(skb
, r
, cb
, NLM_F_MULTI
) < 0)
414 void __init
dn_fib_rules_init(void)
416 INIT_HLIST_HEAD(&dn_fib_rules
);
417 hlist_add_head(&default_rule
.r_hlist
, &dn_fib_rules
);
418 register_netdevice_notifier(&dn_fib_rules_notifier
);
421 void __exit
dn_fib_rules_cleanup(void)
423 unregister_netdevice_notifier(&dn_fib_rules_notifier
);