[ARM] 3349/1: ixp4xx exp bus defines
[linux-2.6/libata-dev.git] / net / core / rtnetlink.c
blobae10d3740faa79fa1b4e72ddd56c38d814379d83
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Fixes:
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/config.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/timer.h>
27 #include <linux/string.h>
28 #include <linux/sockios.h>
29 #include <linux/net.h>
30 #include <linux/fcntl.h>
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/capability.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
37 #include <linux/security.h>
38 #include <linux/mutex.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/string.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <net/ip.h>
47 #include <net/protocol.h>
48 #include <net/arp.h>
49 #include <net/route.h>
50 #include <net/udp.h>
51 #include <net/sock.h>
52 #include <net/pkt_sched.h>
53 #include <net/netlink.h>
55 static DEFINE_MUTEX(rtnl_mutex);
57 void rtnl_lock(void)
59 mutex_lock(&rtnl_mutex);
62 void __rtnl_unlock(void)
64 mutex_unlock(&rtnl_mutex);
67 void rtnl_unlock(void)
69 mutex_unlock(&rtnl_mutex);
70 if (rtnl && rtnl->sk_receive_queue.qlen)
71 rtnl->sk_data_ready(rtnl, 0);
72 netdev_run_todo();
75 int rtnl_trylock(void)
77 return mutex_trylock(&rtnl_mutex);
80 int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
82 memset(tb, 0, sizeof(struct rtattr*)*maxattr);
84 while (RTA_OK(rta, len)) {
85 unsigned flavor = rta->rta_type;
86 if (flavor && flavor <= maxattr)
87 tb[flavor-1] = rta;
88 rta = RTA_NEXT(rta, len);
90 return 0;
93 struct sock *rtnl;
95 struct rtnetlink_link * rtnetlink_links[NPROTO];
97 static const int rtm_min[RTM_NR_FAMILIES] =
99 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
100 [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
101 [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
102 [RTM_FAM(RTM_NEWNEIGH)] = NLMSG_LENGTH(sizeof(struct ndmsg)),
103 [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
104 [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
105 [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
106 [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
107 [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
108 [RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
109 [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
110 [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
111 [RTM_FAM(RTM_NEWNEIGHTBL)] = NLMSG_LENGTH(sizeof(struct ndtmsg)),
114 static const int rta_max[RTM_NR_FAMILIES] =
116 [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
117 [RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
118 [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
119 [RTM_FAM(RTM_NEWNEIGH)] = NDA_MAX,
120 [RTM_FAM(RTM_NEWRULE)] = RTA_MAX,
121 [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
122 [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
123 [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
124 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
125 [RTM_FAM(RTM_NEWNEIGHTBL)] = NDTA_MAX,
128 void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
130 struct rtattr *rta;
131 int size = RTA_LENGTH(attrlen);
133 rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
134 rta->rta_type = attrtype;
135 rta->rta_len = size;
136 memcpy(RTA_DATA(rta), data, attrlen);
137 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
140 size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size)
142 size_t ret = RTA_PAYLOAD(rta);
143 char *src = RTA_DATA(rta);
145 if (ret > 0 && src[ret - 1] == '\0')
146 ret--;
147 if (size > 0) {
148 size_t len = (ret >= size) ? size - 1 : ret;
149 memset(dest, 0, size);
150 memcpy(dest, src, len);
152 return ret;
155 int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
157 int err = 0;
159 NETLINK_CB(skb).dst_group = group;
160 if (echo)
161 atomic_inc(&skb->users);
162 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
163 if (echo)
164 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
165 return err;
168 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
170 struct rtattr *mx = (struct rtattr*)skb->tail;
171 int i;
173 RTA_PUT(skb, RTA_METRICS, 0, NULL);
174 for (i=0; i<RTAX_MAX; i++) {
175 if (metrics[i])
176 RTA_PUT(skb, i+1, sizeof(u32), metrics+i);
178 mx->rta_len = skb->tail - (u8*)mx;
179 if (mx->rta_len == RTA_LENGTH(0))
180 skb_trim(skb, (u8*)mx - skb->data);
181 return 0;
183 rtattr_failure:
184 skb_trim(skb, (u8*)mx - skb->data);
185 return -1;
189 static void set_operstate(struct net_device *dev, unsigned char transition)
191 unsigned char operstate = dev->operstate;
193 switch(transition) {
194 case IF_OPER_UP:
195 if ((operstate == IF_OPER_DORMANT ||
196 operstate == IF_OPER_UNKNOWN) &&
197 !netif_dormant(dev))
198 operstate = IF_OPER_UP;
199 break;
201 case IF_OPER_DORMANT:
202 if (operstate == IF_OPER_UP ||
203 operstate == IF_OPER_UNKNOWN)
204 operstate = IF_OPER_DORMANT;
205 break;
208 if (dev->operstate != operstate) {
209 write_lock_bh(&dev_base_lock);
210 dev->operstate = operstate;
211 write_unlock_bh(&dev_base_lock);
212 netdev_state_change(dev);
216 static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
217 int type, u32 pid, u32 seq, u32 change,
218 unsigned int flags)
220 struct ifinfomsg *r;
221 struct nlmsghdr *nlh;
222 unsigned char *b = skb->tail;
224 nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags);
225 r = NLMSG_DATA(nlh);
226 r->ifi_family = AF_UNSPEC;
227 r->__ifi_pad = 0;
228 r->ifi_type = dev->type;
229 r->ifi_index = dev->ifindex;
230 r->ifi_flags = dev_get_flags(dev);
231 r->ifi_change = change;
233 RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name);
235 if (1) {
236 u32 txqlen = dev->tx_queue_len;
237 RTA_PUT(skb, IFLA_TXQLEN, sizeof(txqlen), &txqlen);
240 if (1) {
241 u32 weight = dev->weight;
242 RTA_PUT(skb, IFLA_WEIGHT, sizeof(weight), &weight);
245 if (1) {
246 u8 operstate = netif_running(dev)?dev->operstate:IF_OPER_DOWN;
247 u8 link_mode = dev->link_mode;
248 RTA_PUT(skb, IFLA_OPERSTATE, sizeof(operstate), &operstate);
249 RTA_PUT(skb, IFLA_LINKMODE, sizeof(link_mode), &link_mode);
252 if (1) {
253 struct rtnl_link_ifmap map = {
254 .mem_start = dev->mem_start,
255 .mem_end = dev->mem_end,
256 .base_addr = dev->base_addr,
257 .irq = dev->irq,
258 .dma = dev->dma,
259 .port = dev->if_port,
261 RTA_PUT(skb, IFLA_MAP, sizeof(map), &map);
264 if (dev->addr_len) {
265 RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
266 RTA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
269 if (1) {
270 u32 mtu = dev->mtu;
271 RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu);
274 if (dev->ifindex != dev->iflink) {
275 u32 iflink = dev->iflink;
276 RTA_PUT(skb, IFLA_LINK, sizeof(iflink), &iflink);
279 if (dev->qdisc_sleeping)
280 RTA_PUT(skb, IFLA_QDISC,
281 strlen(dev->qdisc_sleeping->ops->id) + 1,
282 dev->qdisc_sleeping->ops->id);
284 if (dev->master) {
285 u32 master = dev->master->ifindex;
286 RTA_PUT(skb, IFLA_MASTER, sizeof(master), &master);
289 if (dev->get_stats) {
290 unsigned long *stats = (unsigned long*)dev->get_stats(dev);
291 if (stats) {
292 struct rtattr *a;
293 __u32 *s;
294 int i;
295 int n = sizeof(struct rtnl_link_stats)/4;
297 a = __RTA_PUT(skb, IFLA_STATS, n*4);
298 s = RTA_DATA(a);
299 for (i=0; i<n; i++)
300 s[i] = stats[i];
303 nlh->nlmsg_len = skb->tail - b;
304 return skb->len;
306 nlmsg_failure:
307 rtattr_failure:
308 skb_trim(skb, b - skb->data);
309 return -1;
312 static int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
314 int idx;
315 int s_idx = cb->args[0];
316 struct net_device *dev;
318 read_lock(&dev_base_lock);
319 for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
320 if (idx < s_idx)
321 continue;
322 if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK,
323 NETLINK_CB(cb->skb).pid,
324 cb->nlh->nlmsg_seq, 0,
325 NLM_F_MULTI) <= 0)
326 break;
328 read_unlock(&dev_base_lock);
329 cb->args[0] = idx;
331 return skb->len;
334 static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
336 struct ifinfomsg *ifm = NLMSG_DATA(nlh);
337 struct rtattr **ida = arg;
338 struct net_device *dev;
339 int err, send_addr_notify = 0;
341 if (ifm->ifi_index >= 0)
342 dev = dev_get_by_index(ifm->ifi_index);
343 else if (ida[IFLA_IFNAME - 1]) {
344 char ifname[IFNAMSIZ];
346 if (rtattr_strlcpy(ifname, ida[IFLA_IFNAME - 1],
347 IFNAMSIZ) >= IFNAMSIZ)
348 return -EINVAL;
349 dev = dev_get_by_name(ifname);
350 } else
351 return -EINVAL;
353 if (!dev)
354 return -ENODEV;
356 err = -EINVAL;
358 if (ifm->ifi_flags)
359 dev_change_flags(dev, ifm->ifi_flags);
361 if (ida[IFLA_MAP - 1]) {
362 struct rtnl_link_ifmap *u_map;
363 struct ifmap k_map;
365 if (!dev->set_config) {
366 err = -EOPNOTSUPP;
367 goto out;
370 if (!netif_device_present(dev)) {
371 err = -ENODEV;
372 goto out;
375 if (ida[IFLA_MAP - 1]->rta_len != RTA_LENGTH(sizeof(*u_map)))
376 goto out;
378 u_map = RTA_DATA(ida[IFLA_MAP - 1]);
380 k_map.mem_start = (unsigned long) u_map->mem_start;
381 k_map.mem_end = (unsigned long) u_map->mem_end;
382 k_map.base_addr = (unsigned short) u_map->base_addr;
383 k_map.irq = (unsigned char) u_map->irq;
384 k_map.dma = (unsigned char) u_map->dma;
385 k_map.port = (unsigned char) u_map->port;
387 err = dev->set_config(dev, &k_map);
389 if (err)
390 goto out;
393 if (ida[IFLA_ADDRESS - 1]) {
394 if (!dev->set_mac_address) {
395 err = -EOPNOTSUPP;
396 goto out;
398 if (!netif_device_present(dev)) {
399 err = -ENODEV;
400 goto out;
402 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
403 goto out;
405 err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1]));
406 if (err)
407 goto out;
408 send_addr_notify = 1;
411 if (ida[IFLA_BROADCAST - 1]) {
412 if (ida[IFLA_BROADCAST - 1]->rta_len != RTA_LENGTH(dev->addr_len))
413 goto out;
414 memcpy(dev->broadcast, RTA_DATA(ida[IFLA_BROADCAST - 1]),
415 dev->addr_len);
416 send_addr_notify = 1;
419 if (ida[IFLA_MTU - 1]) {
420 if (ida[IFLA_MTU - 1]->rta_len != RTA_LENGTH(sizeof(u32)))
421 goto out;
422 err = dev_set_mtu(dev, *((u32 *) RTA_DATA(ida[IFLA_MTU - 1])));
424 if (err)
425 goto out;
429 if (ida[IFLA_TXQLEN - 1]) {
430 if (ida[IFLA_TXQLEN - 1]->rta_len != RTA_LENGTH(sizeof(u32)))
431 goto out;
433 dev->tx_queue_len = *((u32 *) RTA_DATA(ida[IFLA_TXQLEN - 1]));
436 if (ida[IFLA_WEIGHT - 1]) {
437 if (ida[IFLA_WEIGHT - 1]->rta_len != RTA_LENGTH(sizeof(u32)))
438 goto out;
440 dev->weight = *((u32 *) RTA_DATA(ida[IFLA_WEIGHT - 1]));
443 if (ida[IFLA_OPERSTATE - 1]) {
444 if (ida[IFLA_OPERSTATE - 1]->rta_len != RTA_LENGTH(sizeof(u8)))
445 goto out;
447 set_operstate(dev, *((u8 *) RTA_DATA(ida[IFLA_OPERSTATE - 1])));
450 if (ida[IFLA_LINKMODE - 1]) {
451 if (ida[IFLA_LINKMODE - 1]->rta_len != RTA_LENGTH(sizeof(u8)))
452 goto out;
454 write_lock_bh(&dev_base_lock);
455 dev->link_mode = *((u8 *) RTA_DATA(ida[IFLA_LINKMODE - 1]));
456 write_unlock_bh(&dev_base_lock);
459 if (ifm->ifi_index >= 0 && ida[IFLA_IFNAME - 1]) {
460 char ifname[IFNAMSIZ];
462 if (rtattr_strlcpy(ifname, ida[IFLA_IFNAME - 1],
463 IFNAMSIZ) >= IFNAMSIZ)
464 goto out;
465 err = dev_change_name(dev, ifname);
466 if (err)
467 goto out;
470 err = 0;
472 out:
473 if (send_addr_notify)
474 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
476 dev_put(dev);
477 return err;
480 static int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
482 int idx;
483 int s_idx = cb->family;
485 if (s_idx == 0)
486 s_idx = 1;
487 for (idx=1; idx<NPROTO; idx++) {
488 int type = cb->nlh->nlmsg_type-RTM_BASE;
489 if (idx < s_idx || idx == PF_PACKET)
490 continue;
491 if (rtnetlink_links[idx] == NULL ||
492 rtnetlink_links[idx][type].dumpit == NULL)
493 continue;
494 if (idx > s_idx)
495 memset(&cb->args[0], 0, sizeof(cb->args));
496 if (rtnetlink_links[idx][type].dumpit(skb, cb))
497 break;
499 cb->family = idx;
501 return skb->len;
504 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
506 struct sk_buff *skb;
507 int size = NLMSG_SPACE(sizeof(struct ifinfomsg) +
508 sizeof(struct rtnl_link_ifmap) +
509 sizeof(struct rtnl_link_stats) + 128);
511 skb = alloc_skb(size, GFP_KERNEL);
512 if (!skb)
513 return;
515 if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change, 0) < 0) {
516 kfree_skb(skb);
517 return;
519 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
520 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_KERNEL);
523 /* Protected by RTNL sempahore. */
524 static struct rtattr **rta_buf;
525 static int rtattr_max;
527 /* Process one rtnetlink message. */
529 static __inline__ int
530 rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
532 struct rtnetlink_link *link;
533 struct rtnetlink_link *link_tab;
534 int sz_idx, kind;
535 int min_len;
536 int family;
537 int type;
538 int err;
540 /* Only requests are handled by kernel now */
541 if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
542 return 0;
544 type = nlh->nlmsg_type;
546 /* A control message: ignore them */
547 if (type < RTM_BASE)
548 return 0;
550 /* Unknown message: reply with EINVAL */
551 if (type > RTM_MAX)
552 goto err_inval;
554 type -= RTM_BASE;
556 /* All the messages must have at least 1 byte length */
557 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
558 return 0;
560 family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family;
561 if (family >= NPROTO) {
562 *errp = -EAFNOSUPPORT;
563 return -1;
566 link_tab = rtnetlink_links[family];
567 if (link_tab == NULL)
568 link_tab = rtnetlink_links[PF_UNSPEC];
569 link = &link_tab[type];
571 sz_idx = type>>2;
572 kind = type&3;
574 if (kind != 2 && security_netlink_recv(skb)) {
575 *errp = -EPERM;
576 return -1;
579 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
580 if (link->dumpit == NULL)
581 link = &(rtnetlink_links[PF_UNSPEC][type]);
583 if (link->dumpit == NULL)
584 goto err_inval;
586 if ((*errp = netlink_dump_start(rtnl, skb, nlh,
587 link->dumpit, NULL)) != 0) {
588 return -1;
591 netlink_queue_skip(nlh, skb);
592 return -1;
595 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
597 min_len = rtm_min[sz_idx];
598 if (nlh->nlmsg_len < min_len)
599 goto err_inval;
601 if (nlh->nlmsg_len > min_len) {
602 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
603 struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len);
605 while (RTA_OK(attr, attrlen)) {
606 unsigned flavor = attr->rta_type;
607 if (flavor) {
608 if (flavor > rta_max[sz_idx])
609 goto err_inval;
610 rta_buf[flavor-1] = attr;
612 attr = RTA_NEXT(attr, attrlen);
616 if (link->doit == NULL)
617 link = &(rtnetlink_links[PF_UNSPEC][type]);
618 if (link->doit == NULL)
619 goto err_inval;
620 err = link->doit(skb, nlh, (void *)&rta_buf[0]);
622 *errp = err;
623 return err;
625 err_inval:
626 *errp = -EINVAL;
627 return -1;
630 static void rtnetlink_rcv(struct sock *sk, int len)
632 unsigned int qlen = 0;
634 do {
635 mutex_lock(&rtnl_mutex);
636 netlink_run_queue(sk, &qlen, &rtnetlink_rcv_msg);
637 mutex_unlock(&rtnl_mutex);
639 netdev_run_todo();
640 } while (qlen);
643 static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] =
645 [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo },
646 [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink },
647 [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
648 [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
649 [RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add },
650 [RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete },
651 [RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info },
652 [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
653 [RTM_GETNEIGHTBL - RTM_BASE] = { .dumpit = neightbl_dump_info },
654 [RTM_SETNEIGHTBL - RTM_BASE] = { .doit = neightbl_set },
657 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
659 struct net_device *dev = ptr;
660 switch (event) {
661 case NETDEV_UNREGISTER:
662 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
663 break;
664 case NETDEV_REGISTER:
665 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
666 break;
667 case NETDEV_UP:
668 case NETDEV_DOWN:
669 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
670 break;
671 case NETDEV_CHANGE:
672 case NETDEV_GOING_DOWN:
673 break;
674 default:
675 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
676 break;
678 return NOTIFY_DONE;
681 static struct notifier_block rtnetlink_dev_notifier = {
682 .notifier_call = rtnetlink_event,
685 void __init rtnetlink_init(void)
687 int i;
689 rtattr_max = 0;
690 for (i = 0; i < ARRAY_SIZE(rta_max); i++)
691 if (rta_max[i] > rtattr_max)
692 rtattr_max = rta_max[i];
693 rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
694 if (!rta_buf)
695 panic("rtnetlink_init: cannot allocate rta_buf\n");
697 rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
698 THIS_MODULE);
699 if (rtnl == NULL)
700 panic("rtnetlink_init: cannot initialize rtnetlink\n");
701 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
702 register_netdevice_notifier(&rtnetlink_dev_notifier);
703 rtnetlink_links[PF_UNSPEC] = link_rtnetlink_table;
704 rtnetlink_links[PF_PACKET] = link_rtnetlink_table;
707 EXPORT_SYMBOL(__rta_fill);
708 EXPORT_SYMBOL(rtattr_strlcpy);
709 EXPORT_SYMBOL(rtattr_parse);
710 EXPORT_SYMBOL(rtnetlink_links);
711 EXPORT_SYMBOL(rtnetlink_put_metrics);
712 EXPORT_SYMBOL(rtnl);
713 EXPORT_SYMBOL(rtnl_lock);
714 EXPORT_SYMBOL(rtnl_trylock);
715 EXPORT_SYMBOL(rtnl_unlock);