2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/config.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/major.h>
26 #include <linux/sched.h>
27 #include <linux/timer.h>
28 #include <linux/string.h>
29 #include <linux/sockios.h>
30 #include <linux/net.h>
31 #include <linux/fcntl.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/capability.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/security.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/string.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
47 #include <net/protocol.h>
49 #include <net/route.h>
52 #include <net/pkt_sched.h>
54 DECLARE_MUTEX(rtnl_sem
);
61 void rtnl_unlock(void)
68 int rtattr_parse(struct rtattr
*tb
[], int maxattr
, struct rtattr
*rta
, int len
)
70 memset(tb
, 0, sizeof(struct rtattr
*)*maxattr
);
72 while (RTA_OK(rta
, len
)) {
73 unsigned flavor
= rta
->rta_type
;
74 if (flavor
&& flavor
<= maxattr
)
76 rta
= RTA_NEXT(rta
, len
);
83 struct rtnetlink_link
* rtnetlink_links
[NPROTO
];
85 static const int rtm_min
[(RTM_MAX
+1-RTM_BASE
)/4] =
87 NLMSG_LENGTH(sizeof(struct ifinfomsg
)),
88 NLMSG_LENGTH(sizeof(struct ifaddrmsg
)),
89 NLMSG_LENGTH(sizeof(struct rtmsg
)),
90 NLMSG_LENGTH(sizeof(struct ndmsg
)),
91 NLMSG_LENGTH(sizeof(struct rtmsg
)),
92 NLMSG_LENGTH(sizeof(struct tcmsg
)),
93 NLMSG_LENGTH(sizeof(struct tcmsg
)),
94 NLMSG_LENGTH(sizeof(struct tcmsg
)),
95 NLMSG_LENGTH(sizeof(struct tcamsg
))
98 static const int rta_max
[(RTM_MAX
+1-RTM_BASE
)/4] =
111 void __rta_fill(struct sk_buff
*skb
, int attrtype
, int attrlen
, const void *data
)
114 int size
= RTA_LENGTH(attrlen
);
116 rta
= (struct rtattr
*)skb_put(skb
, RTA_ALIGN(size
));
117 rta
->rta_type
= attrtype
;
119 memcpy(RTA_DATA(rta
), data
, attrlen
);
122 int rtnetlink_send(struct sk_buff
*skb
, u32 pid
, unsigned group
, int echo
)
126 NETLINK_CB(skb
).dst_groups
= group
;
128 atomic_inc(&skb
->users
);
129 netlink_broadcast(rtnl
, skb
, pid
, group
, GFP_KERNEL
);
131 err
= netlink_unicast(rtnl
, skb
, pid
, MSG_DONTWAIT
);
135 int rtnetlink_put_metrics(struct sk_buff
*skb
, u32
*metrics
)
137 struct rtattr
*mx
= (struct rtattr
*)skb
->tail
;
140 RTA_PUT(skb
, RTA_METRICS
, 0, NULL
);
141 for (i
=0; i
<RTAX_MAX
; i
++) {
143 RTA_PUT(skb
, i
+1, sizeof(u32
), metrics
+i
);
145 mx
->rta_len
= skb
->tail
- (u8
*)mx
;
146 if (mx
->rta_len
== RTA_LENGTH(0))
147 skb_trim(skb
, (u8
*)mx
- skb
->data
);
151 skb_trim(skb
, (u8
*)mx
- skb
->data
);
156 static int rtnetlink_fill_ifinfo(struct sk_buff
*skb
, struct net_device
*dev
,
157 int type
, u32 pid
, u32 seq
, u32 change
)
160 struct nlmsghdr
*nlh
;
161 unsigned char *b
= skb
->tail
;
163 nlh
= NLMSG_PUT(skb
, pid
, seq
, type
, sizeof(*r
));
164 if (pid
) nlh
->nlmsg_flags
|= NLM_F_MULTI
;
166 r
->ifi_family
= AF_UNSPEC
;
167 r
->ifi_type
= dev
->type
;
168 r
->ifi_index
= dev
->ifindex
;
169 r
->ifi_flags
= dev_get_flags(dev
);
170 r
->ifi_change
= change
;
172 RTA_PUT(skb
, IFLA_IFNAME
, strlen(dev
->name
)+1, dev
->name
);
175 u32 txqlen
= dev
->tx_queue_len
;
176 RTA_PUT(skb
, IFLA_TXQLEN
, sizeof(txqlen
), &txqlen
);
180 u32 weight
= dev
->weight
;
181 RTA_PUT(skb
, IFLA_WEIGHT
, sizeof(weight
), &weight
);
185 struct rtnl_link_ifmap map
= {
186 .mem_start
= dev
->mem_start
,
187 .mem_end
= dev
->mem_end
,
188 .base_addr
= dev
->base_addr
,
191 .port
= dev
->if_port
,
193 RTA_PUT(skb
, IFLA_MAP
, sizeof(map
), &map
);
197 RTA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
198 RTA_PUT(skb
, IFLA_BROADCAST
, dev
->addr_len
, dev
->broadcast
);
203 RTA_PUT(skb
, IFLA_MTU
, sizeof(mtu
), &mtu
);
206 if (dev
->ifindex
!= dev
->iflink
) {
207 u32 iflink
= dev
->iflink
;
208 RTA_PUT(skb
, IFLA_LINK
, sizeof(iflink
), &iflink
);
211 if (dev
->qdisc_sleeping
)
212 RTA_PUT(skb
, IFLA_QDISC
,
213 strlen(dev
->qdisc_sleeping
->ops
->id
) + 1,
214 dev
->qdisc_sleeping
->ops
->id
);
217 u32 master
= dev
->master
->ifindex
;
218 RTA_PUT(skb
, IFLA_MASTER
, sizeof(master
), &master
);
221 if (dev
->get_stats
) {
222 unsigned long *stats
= (unsigned long*)dev
->get_stats(dev
);
227 int n
= sizeof(struct rtnl_link_stats
)/4;
229 a
= __RTA_PUT(skb
, IFLA_STATS
, n
*4);
235 nlh
->nlmsg_len
= skb
->tail
- b
;
240 skb_trim(skb
, b
- skb
->data
);
244 int rtnetlink_dump_ifinfo(struct sk_buff
*skb
, struct netlink_callback
*cb
)
247 int s_idx
= cb
->args
[0];
248 struct net_device
*dev
;
250 read_lock(&dev_base_lock
);
251 for (dev
=dev_base
, idx
=0; dev
; dev
= dev
->next
, idx
++) {
254 if (rtnetlink_fill_ifinfo(skb
, dev
, RTM_NEWLINK
, NETLINK_CB(cb
->skb
).pid
, cb
->nlh
->nlmsg_seq
, 0) <= 0)
257 read_unlock(&dev_base_lock
);
263 static int do_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
265 struct ifinfomsg
*ifm
= NLMSG_DATA(nlh
);
266 struct rtattr
**ida
= arg
;
267 struct net_device
*dev
;
268 int err
, send_addr_notify
= 0;
270 dev
= dev_get_by_index(ifm
->ifi_index
);
277 dev_change_flags(dev
, ifm
->ifi_flags
);
279 if (ida
[IFLA_MAP
- 1]) {
280 struct rtnl_link_ifmap
*u_map
;
283 if (!dev
->set_config
) {
288 if (!netif_device_present(dev
)) {
293 if (ida
[IFLA_MAP
- 1]->rta_len
!= RTA_LENGTH(sizeof(*u_map
)))
296 u_map
= RTA_DATA(ida
[IFLA_MAP
- 1]);
298 k_map
.mem_start
= (unsigned long) u_map
->mem_start
;
299 k_map
.mem_end
= (unsigned long) u_map
->mem_end
;
300 k_map
.base_addr
= (unsigned short) u_map
->base_addr
;
301 k_map
.irq
= (unsigned char) u_map
->irq
;
302 k_map
.dma
= (unsigned char) u_map
->dma
;
303 k_map
.port
= (unsigned char) u_map
->port
;
305 err
= dev
->set_config(dev
, &k_map
);
311 if (ida
[IFLA_ADDRESS
- 1]) {
312 if (!dev
->set_mac_address
) {
316 if (!netif_device_present(dev
)) {
320 if (ida
[IFLA_ADDRESS
- 1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
323 err
= dev
->set_mac_address(dev
, RTA_DATA(ida
[IFLA_ADDRESS
- 1]));
326 send_addr_notify
= 1;
329 if (ida
[IFLA_BROADCAST
- 1]) {
330 if (ida
[IFLA_BROADCAST
- 1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
332 memcpy(dev
->broadcast
, RTA_DATA(ida
[IFLA_BROADCAST
- 1]),
334 send_addr_notify
= 1;
337 if (ida
[IFLA_MTU
- 1]) {
338 if (ida
[IFLA_MTU
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
340 err
= dev_set_mtu(dev
, *((u32
*) RTA_DATA(ida
[IFLA_MTU
- 1])));
347 if (ida
[IFLA_TXQLEN
- 1]) {
348 if (ida
[IFLA_TXQLEN
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
351 dev
->tx_queue_len
= *((u32
*) RTA_DATA(ida
[IFLA_TXQLEN
- 1]));
354 if (ida
[IFLA_WEIGHT
- 1]) {
355 if (ida
[IFLA_WEIGHT
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
358 dev
->weight
= *((u32
*) RTA_DATA(ida
[IFLA_WEIGHT
- 1]));
361 if (ida
[IFLA_IFNAME
- 1]) {
362 char ifname
[IFNAMSIZ
];
364 if (ida
[IFLA_IFNAME
- 1]->rta_len
> RTA_LENGTH(sizeof(ifname
)))
367 memset(ifname
, 0, sizeof(ifname
));
368 memcpy(ifname
, RTA_DATA(ida
[IFLA_IFNAME
- 1]),
369 RTA_PAYLOAD(ida
[IFLA_IFNAME
- 1]));
370 ifname
[IFNAMSIZ
- 1] = '\0';
372 err
= dev_change_name(dev
, ifname
);
381 if (send_addr_notify
)
382 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
388 static int rtnetlink_dump_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
391 int s_idx
= cb
->family
;
395 for (idx
=1; idx
<NPROTO
; idx
++) {
396 int type
= cb
->nlh
->nlmsg_type
-RTM_BASE
;
397 if (idx
< s_idx
|| idx
== PF_PACKET
)
399 if (rtnetlink_links
[idx
] == NULL
||
400 rtnetlink_links
[idx
][type
].dumpit
== NULL
)
403 memset(&cb
->args
[0], 0, sizeof(cb
->args
));
404 if (rtnetlink_links
[idx
][type
].dumpit(skb
, cb
))
412 void rtmsg_ifinfo(int type
, struct net_device
*dev
, unsigned change
)
415 int size
= NLMSG_SPACE(sizeof(struct ifinfomsg
) +
416 sizeof(struct rtnl_link_ifmap
) +
417 sizeof(struct rtnl_link_stats
) + 128);
419 skb
= alloc_skb(size
, GFP_KERNEL
);
423 if (rtnetlink_fill_ifinfo(skb
, dev
, type
, 0, 0, change
) < 0) {
427 NETLINK_CB(skb
).dst_groups
= RTMGRP_LINK
;
428 netlink_broadcast(rtnl
, skb
, 0, RTMGRP_LINK
, GFP_KERNEL
);
431 static int rtnetlink_done(struct netlink_callback
*cb
)
436 /* Protected by RTNL sempahore. */
437 static struct rtattr
**rta_buf
;
438 static int rtattr_max
;
440 /* Process one rtnetlink message. */
442 static __inline__
int
443 rtnetlink_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, int *errp
)
445 struct rtnetlink_link
*link
;
446 struct rtnetlink_link
*link_tab
;
453 /* Only requests are handled by kernel now */
454 if (!(nlh
->nlmsg_flags
&NLM_F_REQUEST
))
457 type
= nlh
->nlmsg_type
;
459 /* A control message: ignore them */
463 /* Unknown message: reply with EINVAL */
469 /* All the messages must have at least 1 byte length */
470 if (nlh
->nlmsg_len
< NLMSG_LENGTH(sizeof(struct rtgenmsg
)))
473 family
= ((struct rtgenmsg
*)NLMSG_DATA(nlh
))->rtgen_family
;
474 if (family
>= NPROTO
) {
475 *errp
= -EAFNOSUPPORT
;
479 link_tab
= rtnetlink_links
[family
];
480 if (link_tab
== NULL
)
481 link_tab
= rtnetlink_links
[PF_UNSPEC
];
482 link
= &link_tab
[type
];
487 if (kind
!= 2 && security_netlink_recv(skb
)) {
492 if (kind
== 2 && nlh
->nlmsg_flags
&NLM_F_DUMP
) {
495 if (link
->dumpit
== NULL
)
496 link
= &(rtnetlink_links
[PF_UNSPEC
][type
]);
498 if (link
->dumpit
== NULL
)
501 if ((*errp
= netlink_dump_start(rtnl
, skb
, nlh
,
503 rtnetlink_done
)) != 0) {
506 rlen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
513 memset(rta_buf
, 0, (rtattr_max
* sizeof(struct rtattr
*)));
515 min_len
= rtm_min
[sz_idx
];
516 if (nlh
->nlmsg_len
< min_len
)
519 if (nlh
->nlmsg_len
> min_len
) {
520 int attrlen
= nlh
->nlmsg_len
- NLMSG_ALIGN(min_len
);
521 struct rtattr
*attr
= (void*)nlh
+ NLMSG_ALIGN(min_len
);
523 while (RTA_OK(attr
, attrlen
)) {
524 unsigned flavor
= attr
->rta_type
;
526 if (flavor
> rta_max
[sz_idx
])
528 rta_buf
[flavor
-1] = attr
;
530 attr
= RTA_NEXT(attr
, attrlen
);
534 if (link
->doit
== NULL
)
535 link
= &(rtnetlink_links
[PF_UNSPEC
][type
]);
536 if (link
->doit
== NULL
)
538 err
= link
->doit(skb
, nlh
, (void *)&rta_buf
[0]);
549 * Process one packet of messages.
550 * Malformed skbs with wrong lengths of messages are discarded silently.
553 static inline int rtnetlink_rcv_skb(struct sk_buff
*skb
)
556 struct nlmsghdr
* nlh
;
558 while (skb
->len
>= NLMSG_SPACE(0)) {
561 nlh
= (struct nlmsghdr
*)skb
->data
;
562 if (nlh
->nlmsg_len
< sizeof(*nlh
) || skb
->len
< nlh
->nlmsg_len
)
564 rlen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
567 if (rtnetlink_rcv_msg(skb
, nlh
, &err
)) {
568 /* Not error, but we must interrupt processing here:
569 * Note, that in this case we do not pull message
570 * from skb, it will be processed later.
574 netlink_ack(skb
, nlh
, err
);
575 } else if (nlh
->nlmsg_flags
&NLM_F_ACK
)
576 netlink_ack(skb
, nlh
, 0);
584 * rtnetlink input queue processing routine:
585 * - try to acquire shared lock. If it is failed, defer processing.
586 * - feed skbs to rtnetlink_rcv_skb, until it refuse a message,
587 * that will occur, when a dump started and/or acquisition of
588 * exclusive lock failed.
591 static void rtnetlink_rcv(struct sock
*sk
, int len
)
596 if (rtnl_shlock_nowait())
599 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
600 if (rtnetlink_rcv_skb(skb
)) {
602 skb_queue_head(&sk
->sk_receive_queue
,
614 } while (rtnl
&& rtnl
->sk_receive_queue
.qlen
);
617 static struct rtnetlink_link link_rtnetlink_table
[RTM_MAX
-RTM_BASE
+1] =
619 [RTM_GETLINK
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_ifinfo
},
620 [RTM_SETLINK
- RTM_BASE
] = { .doit
= do_setlink
},
621 [RTM_GETADDR
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
622 [RTM_GETROUTE
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
623 [RTM_NEWNEIGH
- RTM_BASE
] = { .doit
= neigh_add
},
624 [RTM_DELNEIGH
- RTM_BASE
] = { .doit
= neigh_delete
},
625 [RTM_GETNEIGH
- RTM_BASE
] = { .dumpit
= neigh_dump_info
}
628 static int rtnetlink_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
630 struct net_device
*dev
= ptr
;
632 case NETDEV_UNREGISTER
:
633 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U);
635 case NETDEV_REGISTER
:
636 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
640 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
643 case NETDEV_GOING_DOWN
:
646 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0);
652 static struct notifier_block rtnetlink_dev_notifier
= {
653 .notifier_call
= rtnetlink_event
,
656 void __init
rtnetlink_init(void)
661 for (i
= 0; i
< ARRAY_SIZE(rta_max
); i
++)
662 if (rta_max
[i
] > rtattr_max
)
663 rtattr_max
= rta_max
[i
];
664 rta_buf
= kmalloc(rtattr_max
* sizeof(struct rtattr
*), GFP_KERNEL
);
666 panic("rtnetlink_init: cannot allocate rta_buf\n");
668 rtnl
= netlink_kernel_create(NETLINK_ROUTE
, rtnetlink_rcv
);
670 panic("rtnetlink_init: cannot initialize rtnetlink\n");
671 netlink_set_nonroot(NETLINK_ROUTE
, NL_NONROOT_RECV
);
672 register_netdevice_notifier(&rtnetlink_dev_notifier
);
673 rtnetlink_links
[PF_UNSPEC
] = link_rtnetlink_table
;
674 rtnetlink_links
[PF_PACKET
] = link_rtnetlink_table
;
677 EXPORT_SYMBOL(__rta_fill
);
678 EXPORT_SYMBOL(rtattr_parse
);
679 EXPORT_SYMBOL(rtnetlink_dump_ifinfo
);
680 EXPORT_SYMBOL(rtnetlink_links
);
681 EXPORT_SYMBOL(rtnetlink_put_metrics
);
683 EXPORT_SYMBOL(rtnl_lock
);
684 EXPORT_SYMBOL(rtnl_sem
);
685 EXPORT_SYMBOL(rtnl_unlock
);