2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/config.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/timer.h>
27 #include <linux/string.h>
28 #include <linux/sockios.h>
29 #include <linux/net.h>
30 #include <linux/fcntl.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/capability.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
37 #include <linux/security.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/string.h>
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
46 #include <net/protocol.h>
48 #include <net/route.h>
51 #include <net/pkt_sched.h>
53 DECLARE_MUTEX(rtnl_sem
);
60 int rtnl_lock_interruptible(void)
62 return down_interruptible(&rtnl_sem
);
65 void rtnl_unlock(void)
72 int rtattr_parse(struct rtattr
*tb
[], int maxattr
, struct rtattr
*rta
, int len
)
74 memset(tb
, 0, sizeof(struct rtattr
*)*maxattr
);
76 while (RTA_OK(rta
, len
)) {
77 unsigned flavor
= rta
->rta_type
;
78 if (flavor
&& flavor
<= maxattr
)
80 rta
= RTA_NEXT(rta
, len
);
87 struct rtnetlink_link
* rtnetlink_links
[NPROTO
];
89 static const int rtm_min
[RTM_NR_FAMILIES
] =
91 [RTM_FAM(RTM_NEWLINK
)] = NLMSG_LENGTH(sizeof(struct ifinfomsg
)),
92 [RTM_FAM(RTM_NEWADDR
)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg
)),
93 [RTM_FAM(RTM_NEWROUTE
)] = NLMSG_LENGTH(sizeof(struct rtmsg
)),
94 [RTM_FAM(RTM_NEWNEIGH
)] = NLMSG_LENGTH(sizeof(struct ndmsg
)),
95 [RTM_FAM(RTM_NEWRULE
)] = NLMSG_LENGTH(sizeof(struct rtmsg
)),
96 [RTM_FAM(RTM_NEWQDISC
)] = NLMSG_LENGTH(sizeof(struct tcmsg
)),
97 [RTM_FAM(RTM_NEWTCLASS
)] = NLMSG_LENGTH(sizeof(struct tcmsg
)),
98 [RTM_FAM(RTM_NEWTFILTER
)] = NLMSG_LENGTH(sizeof(struct tcmsg
)),
99 [RTM_FAM(RTM_NEWACTION
)] = NLMSG_LENGTH(sizeof(struct tcamsg
)),
100 [RTM_FAM(RTM_NEWPREFIX
)] = NLMSG_LENGTH(sizeof(struct rtgenmsg
)),
101 [RTM_FAM(RTM_GETMULTICAST
)] = NLMSG_LENGTH(sizeof(struct rtgenmsg
)),
102 [RTM_FAM(RTM_GETANYCAST
)] = NLMSG_LENGTH(sizeof(struct rtgenmsg
)),
103 [RTM_FAM(RTM_NEWNEIGHTBL
)] = NLMSG_LENGTH(sizeof(struct ndtmsg
)),
106 static const int rta_max
[RTM_NR_FAMILIES
] =
108 [RTM_FAM(RTM_NEWLINK
)] = IFLA_MAX
,
109 [RTM_FAM(RTM_NEWADDR
)] = IFA_MAX
,
110 [RTM_FAM(RTM_NEWROUTE
)] = RTA_MAX
,
111 [RTM_FAM(RTM_NEWNEIGH
)] = NDA_MAX
,
112 [RTM_FAM(RTM_NEWRULE
)] = RTA_MAX
,
113 [RTM_FAM(RTM_NEWQDISC
)] = TCA_MAX
,
114 [RTM_FAM(RTM_NEWTCLASS
)] = TCA_MAX
,
115 [RTM_FAM(RTM_NEWTFILTER
)] = TCA_MAX
,
116 [RTM_FAM(RTM_NEWACTION
)] = TCAA_MAX
,
117 [RTM_FAM(RTM_NEWNEIGHTBL
)] = NDTA_MAX
,
120 void __rta_fill(struct sk_buff
*skb
, int attrtype
, int attrlen
, const void *data
)
123 int size
= RTA_LENGTH(attrlen
);
125 rta
= (struct rtattr
*)skb_put(skb
, RTA_ALIGN(size
));
126 rta
->rta_type
= attrtype
;
128 memcpy(RTA_DATA(rta
), data
, attrlen
);
129 memset(RTA_DATA(rta
) + attrlen
, 0, RTA_ALIGN(size
) - size
);
132 size_t rtattr_strlcpy(char *dest
, const struct rtattr
*rta
, size_t size
)
134 size_t ret
= RTA_PAYLOAD(rta
);
135 char *src
= RTA_DATA(rta
);
137 if (ret
> 0 && src
[ret
- 1] == '\0')
140 size_t len
= (ret
>= size
) ? size
- 1 : ret
;
141 memset(dest
, 0, size
);
142 memcpy(dest
, src
, len
);
147 int rtnetlink_send(struct sk_buff
*skb
, u32 pid
, unsigned group
, int echo
)
151 NETLINK_CB(skb
).dst_groups
= group
;
153 atomic_inc(&skb
->users
);
154 netlink_broadcast(rtnl
, skb
, pid
, group
, GFP_KERNEL
);
156 err
= netlink_unicast(rtnl
, skb
, pid
, MSG_DONTWAIT
);
160 int rtnetlink_put_metrics(struct sk_buff
*skb
, u32
*metrics
)
162 struct rtattr
*mx
= (struct rtattr
*)skb
->tail
;
165 RTA_PUT(skb
, RTA_METRICS
, 0, NULL
);
166 for (i
=0; i
<RTAX_MAX
; i
++) {
168 RTA_PUT(skb
, i
+1, sizeof(u32
), metrics
+i
);
170 mx
->rta_len
= skb
->tail
- (u8
*)mx
;
171 if (mx
->rta_len
== RTA_LENGTH(0))
172 skb_trim(skb
, (u8
*)mx
- skb
->data
);
176 skb_trim(skb
, (u8
*)mx
- skb
->data
);
181 static int rtnetlink_fill_ifinfo(struct sk_buff
*skb
, struct net_device
*dev
,
182 int type
, u32 pid
, u32 seq
, u32 change
,
186 struct nlmsghdr
*nlh
;
187 unsigned char *b
= skb
->tail
;
189 nlh
= NLMSG_NEW(skb
, pid
, seq
, type
, sizeof(*r
), flags
);
191 r
->ifi_family
= AF_UNSPEC
;
193 r
->ifi_type
= dev
->type
;
194 r
->ifi_index
= dev
->ifindex
;
195 r
->ifi_flags
= dev_get_flags(dev
);
196 r
->ifi_change
= change
;
198 RTA_PUT(skb
, IFLA_IFNAME
, strlen(dev
->name
)+1, dev
->name
);
201 u32 txqlen
= dev
->tx_queue_len
;
202 RTA_PUT(skb
, IFLA_TXQLEN
, sizeof(txqlen
), &txqlen
);
206 u32 weight
= dev
->weight
;
207 RTA_PUT(skb
, IFLA_WEIGHT
, sizeof(weight
), &weight
);
211 struct rtnl_link_ifmap map
= {
212 .mem_start
= dev
->mem_start
,
213 .mem_end
= dev
->mem_end
,
214 .base_addr
= dev
->base_addr
,
217 .port
= dev
->if_port
,
219 RTA_PUT(skb
, IFLA_MAP
, sizeof(map
), &map
);
223 RTA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
224 RTA_PUT(skb
, IFLA_BROADCAST
, dev
->addr_len
, dev
->broadcast
);
229 RTA_PUT(skb
, IFLA_MTU
, sizeof(mtu
), &mtu
);
232 if (dev
->ifindex
!= dev
->iflink
) {
233 u32 iflink
= dev
->iflink
;
234 RTA_PUT(skb
, IFLA_LINK
, sizeof(iflink
), &iflink
);
237 if (dev
->qdisc_sleeping
)
238 RTA_PUT(skb
, IFLA_QDISC
,
239 strlen(dev
->qdisc_sleeping
->ops
->id
) + 1,
240 dev
->qdisc_sleeping
->ops
->id
);
243 u32 master
= dev
->master
->ifindex
;
244 RTA_PUT(skb
, IFLA_MASTER
, sizeof(master
), &master
);
247 if (dev
->get_stats
) {
248 unsigned long *stats
= (unsigned long*)dev
->get_stats(dev
);
253 int n
= sizeof(struct rtnl_link_stats
)/4;
255 a
= __RTA_PUT(skb
, IFLA_STATS
, n
*4);
261 nlh
->nlmsg_len
= skb
->tail
- b
;
266 skb_trim(skb
, b
- skb
->data
);
270 static int rtnetlink_dump_ifinfo(struct sk_buff
*skb
, struct netlink_callback
*cb
)
273 int s_idx
= cb
->args
[0];
274 struct net_device
*dev
;
276 read_lock(&dev_base_lock
);
277 for (dev
=dev_base
, idx
=0; dev
; dev
= dev
->next
, idx
++) {
280 if (rtnetlink_fill_ifinfo(skb
, dev
, RTM_NEWLINK
,
281 NETLINK_CB(cb
->skb
).pid
,
282 cb
->nlh
->nlmsg_seq
, 0,
286 read_unlock(&dev_base_lock
);
292 static int do_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
294 struct ifinfomsg
*ifm
= NLMSG_DATA(nlh
);
295 struct rtattr
**ida
= arg
;
296 struct net_device
*dev
;
297 int err
, send_addr_notify
= 0;
299 if (ifm
->ifi_index
>= 0)
300 dev
= dev_get_by_index(ifm
->ifi_index
);
301 else if (ida
[IFLA_IFNAME
- 1]) {
302 char ifname
[IFNAMSIZ
];
304 if (rtattr_strlcpy(ifname
, ida
[IFLA_IFNAME
- 1],
305 IFNAMSIZ
) >= IFNAMSIZ
)
307 dev
= dev_get_by_name(ifname
);
317 dev_change_flags(dev
, ifm
->ifi_flags
);
319 if (ida
[IFLA_MAP
- 1]) {
320 struct rtnl_link_ifmap
*u_map
;
323 if (!dev
->set_config
) {
328 if (!netif_device_present(dev
)) {
333 if (ida
[IFLA_MAP
- 1]->rta_len
!= RTA_LENGTH(sizeof(*u_map
)))
336 u_map
= RTA_DATA(ida
[IFLA_MAP
- 1]);
338 k_map
.mem_start
= (unsigned long) u_map
->mem_start
;
339 k_map
.mem_end
= (unsigned long) u_map
->mem_end
;
340 k_map
.base_addr
= (unsigned short) u_map
->base_addr
;
341 k_map
.irq
= (unsigned char) u_map
->irq
;
342 k_map
.dma
= (unsigned char) u_map
->dma
;
343 k_map
.port
= (unsigned char) u_map
->port
;
345 err
= dev
->set_config(dev
, &k_map
);
351 if (ida
[IFLA_ADDRESS
- 1]) {
352 if (!dev
->set_mac_address
) {
356 if (!netif_device_present(dev
)) {
360 if (ida
[IFLA_ADDRESS
- 1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
363 err
= dev
->set_mac_address(dev
, RTA_DATA(ida
[IFLA_ADDRESS
- 1]));
366 send_addr_notify
= 1;
369 if (ida
[IFLA_BROADCAST
- 1]) {
370 if (ida
[IFLA_BROADCAST
- 1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
372 memcpy(dev
->broadcast
, RTA_DATA(ida
[IFLA_BROADCAST
- 1]),
374 send_addr_notify
= 1;
377 if (ida
[IFLA_MTU
- 1]) {
378 if (ida
[IFLA_MTU
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
380 err
= dev_set_mtu(dev
, *((u32
*) RTA_DATA(ida
[IFLA_MTU
- 1])));
387 if (ida
[IFLA_TXQLEN
- 1]) {
388 if (ida
[IFLA_TXQLEN
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
391 dev
->tx_queue_len
= *((u32
*) RTA_DATA(ida
[IFLA_TXQLEN
- 1]));
394 if (ida
[IFLA_WEIGHT
- 1]) {
395 if (ida
[IFLA_WEIGHT
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
398 dev
->weight
= *((u32
*) RTA_DATA(ida
[IFLA_WEIGHT
- 1]));
401 if (ifm
->ifi_index
>= 0 && ida
[IFLA_IFNAME
- 1]) {
402 char ifname
[IFNAMSIZ
];
404 if (rtattr_strlcpy(ifname
, ida
[IFLA_IFNAME
- 1],
405 IFNAMSIZ
) >= IFNAMSIZ
)
407 err
= dev_change_name(dev
, ifname
);
415 if (send_addr_notify
)
416 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
422 static int rtnetlink_dump_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
425 int s_idx
= cb
->family
;
429 for (idx
=1; idx
<NPROTO
; idx
++) {
430 int type
= cb
->nlh
->nlmsg_type
-RTM_BASE
;
431 if (idx
< s_idx
|| idx
== PF_PACKET
)
433 if (rtnetlink_links
[idx
] == NULL
||
434 rtnetlink_links
[idx
][type
].dumpit
== NULL
)
437 memset(&cb
->args
[0], 0, sizeof(cb
->args
));
438 if (rtnetlink_links
[idx
][type
].dumpit(skb
, cb
))
446 void rtmsg_ifinfo(int type
, struct net_device
*dev
, unsigned change
)
449 int size
= NLMSG_SPACE(sizeof(struct ifinfomsg
) +
450 sizeof(struct rtnl_link_ifmap
) +
451 sizeof(struct rtnl_link_stats
) + 128);
453 skb
= alloc_skb(size
, GFP_KERNEL
);
457 if (rtnetlink_fill_ifinfo(skb
, dev
, type
, current
->pid
, 0, change
, 0) < 0) {
461 NETLINK_CB(skb
).dst_groups
= RTMGRP_LINK
;
462 netlink_broadcast(rtnl
, skb
, 0, RTMGRP_LINK
, GFP_KERNEL
);
465 static int rtnetlink_done(struct netlink_callback
*cb
)
470 /* Protected by RTNL sempahore. */
471 static struct rtattr
**rta_buf
;
472 static int rtattr_max
;
474 /* Process one rtnetlink message. */
476 static __inline__
int
477 rtnetlink_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, int *errp
)
479 struct rtnetlink_link
*link
;
480 struct rtnetlink_link
*link_tab
;
487 /* Only requests are handled by kernel now */
488 if (!(nlh
->nlmsg_flags
&NLM_F_REQUEST
))
491 type
= nlh
->nlmsg_type
;
493 /* A control message: ignore them */
497 /* Unknown message: reply with EINVAL */
503 /* All the messages must have at least 1 byte length */
504 if (nlh
->nlmsg_len
< NLMSG_LENGTH(sizeof(struct rtgenmsg
)))
507 family
= ((struct rtgenmsg
*)NLMSG_DATA(nlh
))->rtgen_family
;
508 if (family
>= NPROTO
) {
509 *errp
= -EAFNOSUPPORT
;
513 link_tab
= rtnetlink_links
[family
];
514 if (link_tab
== NULL
)
515 link_tab
= rtnetlink_links
[PF_UNSPEC
];
516 link
= &link_tab
[type
];
521 if (kind
!= 2 && security_netlink_recv(skb
)) {
526 if (kind
== 2 && nlh
->nlmsg_flags
&NLM_F_DUMP
) {
529 if (link
->dumpit
== NULL
)
530 link
= &(rtnetlink_links
[PF_UNSPEC
][type
]);
532 if (link
->dumpit
== NULL
)
535 if ((*errp
= netlink_dump_start(rtnl
, skb
, nlh
,
537 rtnetlink_done
)) != 0) {
540 rlen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
547 memset(rta_buf
, 0, (rtattr_max
* sizeof(struct rtattr
*)));
549 min_len
= rtm_min
[sz_idx
];
550 if (nlh
->nlmsg_len
< min_len
)
553 if (nlh
->nlmsg_len
> min_len
) {
554 int attrlen
= nlh
->nlmsg_len
- NLMSG_ALIGN(min_len
);
555 struct rtattr
*attr
= (void*)nlh
+ NLMSG_ALIGN(min_len
);
557 while (RTA_OK(attr
, attrlen
)) {
558 unsigned flavor
= attr
->rta_type
;
560 if (flavor
> rta_max
[sz_idx
])
562 rta_buf
[flavor
-1] = attr
;
564 attr
= RTA_NEXT(attr
, attrlen
);
568 if (link
->doit
== NULL
)
569 link
= &(rtnetlink_links
[PF_UNSPEC
][type
]);
570 if (link
->doit
== NULL
)
572 err
= link
->doit(skb
, nlh
, (void *)&rta_buf
[0]);
583 * Process one packet of messages.
584 * Malformed skbs with wrong lengths of messages are discarded silently.
587 static inline int rtnetlink_rcv_skb(struct sk_buff
*skb
)
590 struct nlmsghdr
* nlh
;
592 while (skb
->len
>= NLMSG_SPACE(0)) {
595 nlh
= (struct nlmsghdr
*)skb
->data
;
596 if (nlh
->nlmsg_len
< sizeof(*nlh
) || skb
->len
< nlh
->nlmsg_len
)
598 rlen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
601 if (rtnetlink_rcv_msg(skb
, nlh
, &err
)) {
602 /* Not error, but we must interrupt processing here:
603 * Note, that in this case we do not pull message
604 * from skb, it will be processed later.
608 netlink_ack(skb
, nlh
, err
);
609 } else if (nlh
->nlmsg_flags
&NLM_F_ACK
)
610 netlink_ack(skb
, nlh
, 0);
618 * rtnetlink input queue processing routine:
619 * - process as much as there was in the queue upon entry.
620 * - feed skbs to rtnetlink_rcv_skb, until it refuse a message,
621 * that will occur, when a dump started.
624 static void rtnetlink_rcv(struct sock
*sk
, int len
)
626 unsigned int qlen
= skb_queue_len(&sk
->sk_receive_queue
);
633 if (qlen
> skb_queue_len(&sk
->sk_receive_queue
))
634 qlen
= skb_queue_len(&sk
->sk_receive_queue
);
636 for (; qlen
; qlen
--) {
637 skb
= skb_dequeue(&sk
->sk_receive_queue
);
638 if (rtnetlink_rcv_skb(skb
)) {
640 skb_queue_head(&sk
->sk_receive_queue
,
657 static struct rtnetlink_link link_rtnetlink_table
[RTM_NR_MSGTYPES
] =
659 [RTM_GETLINK
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_ifinfo
},
660 [RTM_SETLINK
- RTM_BASE
] = { .doit
= do_setlink
},
661 [RTM_GETADDR
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
662 [RTM_GETROUTE
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
663 [RTM_NEWNEIGH
- RTM_BASE
] = { .doit
= neigh_add
},
664 [RTM_DELNEIGH
- RTM_BASE
] = { .doit
= neigh_delete
},
665 [RTM_GETNEIGH
- RTM_BASE
] = { .dumpit
= neigh_dump_info
},
666 [RTM_GETRULE
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
667 [RTM_GETNEIGHTBL
- RTM_BASE
] = { .dumpit
= neightbl_dump_info
},
668 [RTM_SETNEIGHTBL
- RTM_BASE
] = { .doit
= neightbl_set
},
671 static int rtnetlink_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
673 struct net_device
*dev
= ptr
;
675 case NETDEV_UNREGISTER
:
676 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U);
678 case NETDEV_REGISTER
:
679 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
683 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
686 case NETDEV_GOING_DOWN
:
689 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0);
695 static struct notifier_block rtnetlink_dev_notifier
= {
696 .notifier_call
= rtnetlink_event
,
699 void __init
rtnetlink_init(void)
704 for (i
= 0; i
< ARRAY_SIZE(rta_max
); i
++)
705 if (rta_max
[i
] > rtattr_max
)
706 rtattr_max
= rta_max
[i
];
707 rta_buf
= kmalloc(rtattr_max
* sizeof(struct rtattr
*), GFP_KERNEL
);
709 panic("rtnetlink_init: cannot allocate rta_buf\n");
711 rtnl
= netlink_kernel_create(NETLINK_ROUTE
, rtnetlink_rcv
);
713 panic("rtnetlink_init: cannot initialize rtnetlink\n");
714 netlink_set_nonroot(NETLINK_ROUTE
, NL_NONROOT_RECV
);
715 register_netdevice_notifier(&rtnetlink_dev_notifier
);
716 rtnetlink_links
[PF_UNSPEC
] = link_rtnetlink_table
;
717 rtnetlink_links
[PF_PACKET
] = link_rtnetlink_table
;
720 EXPORT_SYMBOL(__rta_fill
);
721 EXPORT_SYMBOL(rtattr_strlcpy
);
722 EXPORT_SYMBOL(rtattr_parse
);
723 EXPORT_SYMBOL(rtnetlink_links
);
724 EXPORT_SYMBOL(rtnetlink_put_metrics
);
726 EXPORT_SYMBOL(rtnl_lock
);
727 EXPORT_SYMBOL(rtnl_lock_interruptible
);
728 EXPORT_SYMBOL(rtnl_sem
);
729 EXPORT_SYMBOL(rtnl_unlock
);