2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
22 * Alan Cox : device private ioctl copies fields back.
23 * Alan Cox : Transmit queue code does relevant stunts to
24 * keep the queue safe.
25 * Alan Cox : Fixed double lock.
26 * Alan Cox : Fixed promisc NULL pointer trap
27 * ???????? : Support the full private ioctl range
28 * Alan Cox : Moved ioctl permission check into drivers
29 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
30 * Alan Cox : 100 backlog just doesn't cut it when
31 * you start doing multicast video 8)
32 * Alan Cox : Rewrote net_bh and list manager.
33 * Alan Cox : Fix ETH_P_ALL echoback lengths.
34 * Alan Cox : Took out transmit every packet pass
35 * Saved a few bytes in the ioctl handler
36 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
37 * a function call a packet.
38 * Alan Cox : Hashed net_bh()
39 * Richard Kooijman: Timestamp fixes.
40 * Alan Cox : Wrong field in SIOCGIFDSTADDR
41 * Alan Cox : Device lock protection.
42 * Alan Cox : Fixed nasty side effect of device close changes.
43 * Rudi Cilibrasi : Pass the right thing to set_mac_address()
44 * Dave Miller : 32bit quantity for the device lock to make it work out
46 * Bjorn Ekwall : Added KERNELD hack.
47 * Alan Cox : Cleaned up the backlog initialise.
48 * Craig Metz : SIOCGIFCONF fix if space for under
50 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
51 * is no device open function.
52 * Andi Kleen : Fix error reporting for SIOCGIFCONF
53 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
54 * Cyrus Durgin : Cleaned for KMOD
55 * Adam Sulmicki : Bug Fix : Network Device Unload
56 * A network device unload needs to purge
58 * Paul Rusty Russel : SIOCSIFNAME
61 #include <asm/uaccess.h>
62 #include <asm/system.h>
63 #include <asm/bitops.h>
64 #include <linux/config.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
73 #include <linux/interrupt.h>
74 #include <linux/if_ether.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/notifier.h>
78 #include <linux/skbuff.h>
80 #include <linux/rtnetlink.h>
82 #include <linux/proc_fs.h>
83 #include <linux/stat.h>
86 #include <net/pkt_sched.h>
87 #include <net/profile.h>
88 #include <linux/init.h>
89 #include <linux/kmod.h>
90 #ifdef CONFIG_NET_RADIO
91 #include <linux/wireless.h>
92 #endif /* CONFIG_NET_RADIO */
94 extern int plip_init(void);
97 NET_PROFILE_DEFINE(dev_queue_xmit
)
98 NET_PROFILE_DEFINE(net_bh
)
99 NET_PROFILE_DEFINE(net_bh_skb
)
102 const char *if_port_text
[] = {
113 * The list of packet types we will receive (as opposed to discard)
114 * and the routines to invoke.
116 * Why 16. Because with 16 the only overlap we get on a hash of the
117 * low nibble of the protocol value is RARP/SNAP/X.25.
132 struct packet_type
*ptype_base
[16]; /* 16 way hashed list */
133 struct packet_type
*ptype_all
= NULL
; /* Taps */
136 * Device list lock. Setting it provides that interface
137 * will not disappear unexpectedly while kernel sleeps.
140 atomic_t dev_lockct
= ATOMIC_INIT(0);
146 static struct notifier_block
*netdev_chain
=NULL
;
149 * Device drivers call our routines to queue packets here. We empty the
150 * queue in the bottom half handler.
153 static struct sk_buff_head backlog
;
155 #ifdef CONFIG_NET_FASTROUTE
156 int netdev_fastroute
;
157 int netdev_fastroute_obstacles
;
158 struct net_fastroute_stats dev_fastroute_stat
;
161 static void dev_clear_backlog(struct device
*dev
);
164 /******************************************************************************************
166 Protocol management and registration routines
168 *******************************************************************************************/
177 * Add a protocol ID to the list. Now that the input handler is
178 * smarter we can dispense with all the messy stuff that used to be
181 * BEWARE!!! Protocol handlers, mangling input packets,
182 * MUST BE last in hash buckets and checking protocol handlers
183 * MUST start from promiscous ptype_all chain in net_bh.
184 * It is true now, do not change it.
185 * Explantion follows: if protocol handler, mangling packet, will
186 * be the first on list, it is not able to sense, that packet
187 * is cloned and should be copied-on-write, so that it will
188 * change it and subsequent readers will get broken packet.
192 void dev_add_pack(struct packet_type
*pt
)
195 #ifdef CONFIG_NET_FASTROUTE
196 /* Hack to detect packet socket */
198 netdev_fastroute_obstacles
++;
199 dev_clear_fastroute(pt
->dev
);
202 if(pt
->type
==htons(ETH_P_ALL
))
210 hash
=ntohs(pt
->type
)&15;
211 pt
->next
= ptype_base
[hash
];
212 ptype_base
[hash
] = pt
;
218 * Remove a protocol ID from the list.
221 void dev_remove_pack(struct packet_type
*pt
)
223 struct packet_type
**pt1
;
224 if(pt
->type
==htons(ETH_P_ALL
))
230 pt1
=&ptype_base
[ntohs(pt
->type
)&15];
231 for(; (*pt1
)!=NULL
; pt1
=&((*pt1
)->next
))
237 #ifdef CONFIG_NET_FASTROUTE
239 netdev_fastroute_obstacles
--;
244 printk(KERN_WARNING
"dev_remove_pack: %p not found.\n", pt
);
247 /*****************************************************************************************
249 Device Interface Subroutines
251 ******************************************************************************************/
254 * Find an interface by name.
257 struct device
*dev_get(const char *name
)
261 read_lock_bh(&dev_base_lock
);
262 for (dev
= dev_base
; dev
!= NULL
; dev
= dev
->next
) {
263 if (strcmp(dev
->name
, name
) == 0)
267 read_unlock_bh(&dev_base_lock
);
271 struct device
* dev_get_by_index(int ifindex
)
275 read_lock_bh(&dev_base_lock
);
276 for (dev
= dev_base
; dev
!= NULL
; dev
= dev
->next
) {
277 if (dev
->ifindex
== ifindex
)
281 read_unlock_bh(&dev_base_lock
);
285 struct device
*dev_getbyhwaddr(unsigned short type
, char *ha
)
289 read_lock_bh(&dev_base_lock
);
290 for (dev
= dev_base
; dev
!= NULL
; dev
= dev
->next
) {
291 if (dev
->type
== type
&&
292 memcmp(dev
->dev_addr
, ha
, dev
->addr_len
) == 0)
296 read_unlock_bh(&dev_base_lock
);
301 * Passed a format string - eg "lt%d" it will try and find a suitable
302 * id. Not efficient for many devices, not called a lot..
305 int dev_alloc_name(struct device
*dev
, const char *name
)
309 * If you need over 100 please also fix the algorithm...
313 sprintf(dev
->name
,name
,i
);
314 if(dev_get(dev
->name
)==NULL
)
317 return -ENFILE
; /* Over 100 of the things .. bail out! */
320 struct device
*dev_alloc(const char *name
, int *err
)
322 struct device
*dev
=kmalloc(sizeof(struct device
)+16, GFP_KERNEL
);
328 dev
->name
=(char *)(dev
+1); /* Name string space */
329 *err
=dev_alloc_name(dev
,name
);
338 void netdev_state_change(struct device
*dev
)
340 if (dev
->flags
&IFF_UP
)
341 notifier_call_chain(&netdev_chain
, NETDEV_CHANGE
, dev
);
346 * Find and possibly load an interface.
351 void dev_load(const char *name
)
353 if(!dev_get(name
) && capable(CAP_SYS_MODULE
))
354 request_module(name
);
359 extern inline void dev_load(const char *unused
){;}
363 static int default_rebuild_header(struct sk_buff
*skb
)
365 printk(KERN_DEBUG
"%s: default_rebuild_header called -- BUG!\n", skb
->dev
? skb
->dev
->name
: "NULL!!!");
371 * Prepare an interface for use.
374 int dev_open(struct device
*dev
)
382 if (dev
->flags
&IFF_UP
)
385 /* Setup the lock before we open the faucet. */
386 spin_lock_init(&dev
->xmit_lock
);
389 * Call device private open method
393 ret
= dev
->open(dev
);
396 * If it went open OK then:
402 * nil rebuild_header routine,
403 * that should be never called and used as just bug trap.
406 if (dev
->rebuild_header
== NULL
)
407 dev
->rebuild_header
= default_rebuild_header
;
412 dev
->flags
|= (IFF_UP
| IFF_RUNNING
);
415 * Initialize multicasting status
420 * Wakeup transmit queue engine
425 * ... and announce new interface.
427 notifier_call_chain(&netdev_chain
, NETDEV_UP
, dev
);
433 #ifdef CONFIG_NET_FASTROUTE
435 static __inline__
void dev_do_clear_fastroute(struct device
*dev
)
437 if (dev
->accept_fastpath
) {
440 for (i
=0; i
<=NETDEV_FASTROUTE_HMASK
; i
++)
441 dst_release_irqwait(xchg(dev
->fastpath
+i
, NULL
));
445 void dev_clear_fastroute(struct device
*dev
)
448 dev_do_clear_fastroute(dev
);
450 read_lock_bh(&dev_base_lock
);
451 for (dev
= dev_base
; dev
; dev
= dev
->next
) {
452 read_unlock_bh(&dev_base_lock
);
453 dev_do_clear_fastroute(dev
);
454 read_lock_bh(&dev_base_lock
);
456 read_unlock_bh(&dev_base_lock
);
462 * Completely shutdown an interface.
465 int dev_close(struct device
*dev
)
467 if (!(dev
->flags
&IFF_UP
))
475 * Call the device specific close. This cannot fail.
476 * Only if device is UP
483 printk("dev_close: bug %s still running\n", dev
->name
);
486 * Device is now down.
488 dev_clear_backlog(dev
);
490 dev
->flags
&=~(IFF_UP
|IFF_RUNNING
);
491 #ifdef CONFIG_NET_FASTROUTE
492 dev_clear_fastroute(dev
);
496 * Tell people we are going down
498 notifier_call_chain(&netdev_chain
, NETDEV_DOWN
, dev
);
505 * Device change register/unregister. These are not inline or static
506 * as we export them to the world.
509 int register_netdevice_notifier(struct notifier_block
*nb
)
511 return notifier_chain_register(&netdev_chain
, nb
);
514 int unregister_netdevice_notifier(struct notifier_block
*nb
)
516 return notifier_chain_unregister(&netdev_chain
,nb
);
520 * Support routine. Sends outgoing frames to any network
521 * taps currently in use.
524 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct device
*dev
)
526 struct packet_type
*ptype
;
527 get_fast_time(&skb
->stamp
);
529 for (ptype
= ptype_all
; ptype
!=NULL
; ptype
= ptype
->next
)
531 /* Never send packets back to the socket
532 * they originated from - MvS (miquels@drinkel.ow.org)
534 if ((ptype
->dev
== dev
|| !ptype
->dev
) &&
535 ((struct sock
*)ptype
->data
!= skb
->sk
))
537 struct sk_buff
*skb2
;
538 if ((skb2
= skb_clone(skb
, GFP_ATOMIC
)) == NULL
)
541 /* Code, following below is wrong.
543 The only reason, why it does work is that
544 ONLY packet sockets receive outgoing
545 packets. If such a packet will be (occasionally)
546 received by normal packet handler, which expects
547 that mac header is pulled...
550 /* More sensible variant. skb->nh should be correctly
551 set by sender, so that the second statement is
552 just protection against buggy protocols.
554 skb2
->mac
.raw
= skb2
->data
;
556 if (skb2
->nh
.raw
< skb2
->data
|| skb2
->nh
.raw
>= skb2
->tail
) {
558 printk(KERN_DEBUG
"protocol %04x is buggy, dev %s\n", skb2
->protocol
, dev
->name
);
559 skb2
->nh
.raw
= skb2
->data
;
560 if (dev
->hard_header
)
561 skb2
->nh
.raw
+= dev
->hard_header_len
;
564 skb2
->h
.raw
= skb2
->nh
.raw
;
565 skb2
->pkt_type
= PACKET_OUTGOING
;
566 ptype
->func(skb2
, skb
->dev
, ptype
);
572 * Fast path for loopback frames.
575 void dev_loopback_xmit(struct sk_buff
*skb
)
577 struct sk_buff
*newskb
=skb_clone(skb
, GFP_ATOMIC
);
581 newskb
->mac
.raw
= newskb
->data
;
582 skb_pull(newskb
, newskb
->nh
.raw
- newskb
->data
);
583 newskb
->pkt_type
= PACKET_LOOPBACK
;
584 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
585 if (newskb
->dst
==NULL
)
586 printk(KERN_DEBUG
"BUG: packet without dst looped back 1\n");
590 int dev_queue_xmit(struct sk_buff
*skb
)
592 struct device
*dev
= skb
->dev
;
595 #ifdef CONFIG_NET_PROFILE
597 NET_PROFILE_ENTER(dev_queue_xmit
);
600 spin_lock_bh(&dev
->xmit_lock
);
605 spin_unlock_bh(&dev
->xmit_lock
);
607 #ifdef CONFIG_NET_PROFILE
608 NET_PROFILE_LEAVE(dev_queue_xmit
);
615 /* The device has no queue. Common case for software devices:
616 loopback, all the sorts of tunnels...
618 Really, it is unlikely that bh protection is necessary here:
619 virtual devices do not generate EOI events.
620 However, it is possible, that they rely on bh protection
623 if (dev
->flags
&IFF_UP
) {
625 dev_queue_xmit_nit(skb
,dev
);
626 if (dev
->hard_start_xmit(skb
, dev
) == 0) {
627 spin_unlock_bh(&dev
->xmit_lock
);
629 #ifdef CONFIG_NET_PROFILE
630 NET_PROFILE_LEAVE(dev_queue_xmit
);
637 printk(KERN_DEBUG
"Virtual device %s asks to queue packet!\n", dev
->name
);
639 spin_unlock_bh(&dev
->xmit_lock
);
643 #ifdef CONFIG_NET_PROFILE
644 NET_PROFILE_LEAVE(dev_queue_xmit
);
652 /*=======================================================================
654 =======================================================================*/
656 int netdev_dropping
= 0;
657 int netdev_max_backlog
= 300;
658 atomic_t netdev_rx_dropped
;
659 #ifdef CONFIG_CPU_IS_SLOW
660 int net_cpu_congestion
;
663 #ifdef CONFIG_NET_HW_FLOWCONTROL
664 int netdev_throttle_events
;
665 static unsigned long netdev_fc_mask
= 1;
666 unsigned long netdev_fc_xoff
= 0;
670 void (*stimul
)(struct device
*);
672 } netdev_fc_slots
[32];
674 int netdev_register_fc(struct device
*dev
, void (*stimul
)(struct device
*dev
))
681 if (netdev_fc_mask
!= ~0UL) {
682 bit
= ffz(netdev_fc_mask
);
683 netdev_fc_slots
[bit
].stimul
= stimul
;
684 netdev_fc_slots
[bit
].dev
= dev
;
685 set_bit(bit
, &netdev_fc_mask
);
686 clear_bit(bit
, &netdev_fc_xoff
);
688 restore_flags(flags
);
692 void netdev_unregister_fc(int bit
)
699 netdev_fc_slots
[bit
].stimul
= NULL
;
700 netdev_fc_slots
[bit
].dev
= NULL
;
701 clear_bit(bit
, &netdev_fc_mask
);
702 clear_bit(bit
, &netdev_fc_xoff
);
704 restore_flags(flags
);
707 static void netdev_wakeup(void)
712 xoff
= netdev_fc_xoff
;
715 netdev_throttle_events
++;
719 netdev_fc_slots
[i
].stimul(netdev_fc_slots
[i
].dev
);
725 static void dev_clear_backlog(struct device
*dev
)
727 struct sk_buff
*prev
, *curr
;
731 * Let now clear backlog queue. -AS
733 * We are competing here both with netif_rx() and net_bh().
734 * We don't want either of those to mess with skb ptrs
735 * while we work on them, thus cli()/sti().
737 * It looks better to use net_bh trick, at least
738 * to be sure, that we keep interrupt latency really low. --ANK (980727)
744 while ( curr
!= (struct sk_buff
*)(&backlog
) ) {
747 if ( curr
->prev
->dev
== dev
) {
749 spin_lock_irqsave(&skb_queue_lock
, flags
);
750 __skb_unlink(prev
, &backlog
);
751 spin_unlock_irqrestore(&skb_queue_lock
, flags
);
756 #ifdef CONFIG_NET_HW_FLOWCONTROL
766 * Receive a packet from a device driver and queue it for the upper
767 * (protocol) levels. It always succeeds.
770 void netif_rx(struct sk_buff
*skb
)
772 #ifndef CONFIG_CPU_IS_SLOW
773 if(skb
->stamp
.tv_sec
==0)
774 get_fast_time(&skb
->stamp
);
779 /* The code is rearranged so that the path is the most
780 short when CPU is congested, but is still operating.
783 if (backlog
.qlen
<= netdev_max_backlog
) {
785 if (netdev_dropping
== 0) {
786 skb_queue_tail(&backlog
,skb
);
790 atomic_inc(&netdev_rx_dropped
);
794 #ifdef CONFIG_NET_HW_FLOWCONTROL
800 skb_queue_tail(&backlog
,skb
);
805 atomic_inc(&netdev_rx_dropped
);
810 static inline void handle_bridge(struct sk_buff
*skb
, unsigned short type
)
812 if (br_stats
.flags
& BR_UP
&& br_protocol_ok(ntohs(type
)))
815 * We pass the bridge a complete frame. This means
816 * recovering the MAC header first.
821 skb
=skb_clone(skb
, GFP_ATOMIC
);
825 offset
=skb
->data
-skb
->mac
.raw
;
826 skb_push(skb
,offset
); /* Put header back on for bridge */
828 if(br_receive_frame(skb
))
838 * When we are called the queue is ready to grab, the interrupts are
839 * on and hardware can interrupt and queue to the receive queue as we
840 * run with no problems.
841 * This is run as a bottom half after an interrupt handler that does
847 struct packet_type
*ptype
;
848 struct packet_type
*pt_prev
;
850 unsigned long start_time
= jiffies
;
851 #ifdef CONFIG_CPU_IS_SLOW
852 static unsigned long start_busy
= 0;
853 static unsigned long ave_busy
= 0;
856 start_busy
= start_time
;
857 net_cpu_congestion
= ave_busy
>>8;
860 NET_PROFILE_ENTER(net_bh
);
862 * Can we send anything now? We want to clear the
863 * decks for any more sends that get done as we
864 * process the input. This also minimises the
865 * latency on a transmit interrupt bh.
868 if (qdisc_head
.forw
!= &qdisc_head
)
872 * Any data left to process. This may occur because a
873 * mark_bh() is done after we empty the queue including
874 * that from the device which does a mark_bh() just after
878 * While the queue is not empty..
880 * Note that the queue never shrinks due to
881 * an interrupt, so we can do this test without
882 * disabling interrupts.
885 while (!skb_queue_empty(&backlog
))
887 struct sk_buff
* skb
;
889 /* Give chance to other bottom halves to run */
890 if (jiffies
- start_time
> 1)
894 * We have a packet. Therefore the queue has shrunk
896 skb
= skb_dequeue(&backlog
);
898 #ifdef CONFIG_CPU_IS_SLOW
899 if (ave_busy
> 128*16) {
901 while ((skb
= skb_dequeue(&backlog
)) != NULL
)
909 NET_PROFILE_SKB_PASSED(skb
, net_bh_skb
);
911 #ifdef CONFIG_NET_FASTROUTE
912 if (skb
->pkt_type
== PACKET_FASTROUTE
) {
919 * Bump the pointer to the next structure.
921 * On entry to the protocol layer. skb->data and
922 * skb->nh.raw point to the MAC and encapsulated data
925 /* XXX until we figure out every place to modify.. */
926 skb
->h
.raw
= skb
->nh
.raw
= skb
->data
;
928 if (skb
->mac
.raw
< skb
->head
|| skb
->mac
.raw
> skb
->data
) {
929 printk(KERN_CRIT
"%s: wrong mac.raw ptr, proto=%04x\n", skb
->dev
->name
, skb
->protocol
);
935 * Fetch the packet protocol ID.
938 type
= skb
->protocol
;
942 * If we are bridging then pass the frame up to the
943 * bridging code (if this protocol is to be bridged).
944 * If it is bridged then move on
946 handle_bridge(skb
, type
);
950 * We got a packet ID. Now loop over the "known protocols"
951 * list. There are two lists. The ptype_all list of taps (normally empty)
952 * and the main protocol list which is hashed perfectly for normal protocols.
956 for (ptype
= ptype_all
; ptype
!=NULL
; ptype
=ptype
->next
)
958 if (!ptype
->dev
|| ptype
->dev
== skb
->dev
) {
961 struct sk_buff
*skb2
=skb_clone(skb
, GFP_ATOMIC
);
963 pt_prev
->func(skb2
,skb
->dev
, pt_prev
);
969 for (ptype
= ptype_base
[ntohs(type
)&15]; ptype
!= NULL
; ptype
= ptype
->next
)
971 if (ptype
->type
== type
&& (!ptype
->dev
|| ptype
->dev
==skb
->dev
))
974 * We already have a match queued. Deliver
975 * to it and then remember the new match
979 struct sk_buff
*skb2
;
981 skb2
=skb_clone(skb
, GFP_ATOMIC
);
984 * Kick the protocol handler. This should be fast
985 * and efficient code.
989 pt_prev
->func(skb2
, skb
->dev
, pt_prev
);
991 /* Remember the current last to do */
994 } /* End of protocol list loop */
997 * Is there a last item to send to ?
1001 pt_prev
->func(skb
, skb
->dev
, pt_prev
);
1003 * Has an unknown packet has been received ?
1009 } /* End of queue loop */
1012 * We have emptied the queue
1016 * One last output flush.
1019 if (qdisc_head
.forw
!= &qdisc_head
)
1022 #ifdef CONFIG_CPU_IS_SLOW
1024 unsigned long start_idle
= jiffies
;
1025 ave_busy
+= ((start_idle
- start_busy
)<<3) - (ave_busy
>>4);
1029 #ifdef CONFIG_NET_HW_FLOWCONTROL
1030 if (netdev_dropping
)
1033 netdev_dropping
= 0;
1035 NET_PROFILE_LEAVE(net_bh
);
1040 NET_PROFILE_LEAVE(net_bh
);
1044 /* Protocol dependent address dumping routines */
1046 static gifconf_func_t
* gifconf_list
[NPROTO
];
1048 int register_gifconf(unsigned int family
, gifconf_func_t
* gifconf
)
1052 gifconf_list
[family
] = gifconf
;
1058 * Map an interface index to its name (SIOCGIFNAME)
1062 * This call is useful, but I'd remove it too.
1064 * The reason is purely aestetical, it is the only call
1065 * from SIOC* family using struct ifreq in reversed manner.
1066 * Besides that, it is pretty silly to put "drawing" facility
1067 * to kernel, it is useful only to print ifindices
1068 * in readable form, is not it? --ANK
1070 * We need this ioctl for efficient implementation of the
1071 * if_indextoname() function required by the IPv6 API. Without
1072 * it, we would have to search all the interfaces to find a
1076 static int dev_ifname(struct ifreq
*arg
)
1083 * Fetch the caller's info block.
1086 err
= copy_from_user(&ifr
, arg
, sizeof(struct ifreq
));
1090 dev
= dev_get_by_index(ifr
.ifr_ifindex
);
1094 strcpy(ifr
.ifr_name
, dev
->name
);
1096 err
= copy_to_user(arg
, &ifr
, sizeof(struct ifreq
));
1097 return (err
)?-EFAULT
:0;
1101 * Perform a SIOCGIFCONF call. This structure will change
1102 * size eventually, and there is nothing I can do about it.
1103 * Thus we will need a 'compatibility mode'.
1106 static int dev_ifconf(char *arg
)
1116 * Fetch the caller's info block.
1119 if (copy_from_user(&ifc
, arg
, sizeof(struct ifconf
)))
1126 * Loop over the interfaces, and write an info block for each.
1130 read_lock_bh(&dev_base_lock
);
1131 for (dev
= dev_base
; dev
!= NULL
; dev
= dev
->next
) {
1132 read_unlock_bh(&dev_base_lock
);
1133 for (i
=0; i
<NPROTO
; i
++) {
1134 if (gifconf_list
[i
]) {
1137 done
= gifconf_list
[i
](dev
, NULL
, 0);
1139 done
= gifconf_list
[i
](dev
, pos
+total
, len
-total
);
1147 read_lock_bh(&dev_base_lock
);
1149 read_unlock_bh(&dev_base_lock
);
1152 * All done. Write the updated control block back to the caller.
1154 ifc
.ifc_len
= total
;
1156 if (copy_to_user(arg
, &ifc
, sizeof(struct ifconf
)))
1160 * Both BSD and Solaris return 0 here, so we do too.
1166 * This is invoked by the /proc filesystem handler to display a device
1170 #ifdef CONFIG_PROC_FS
1171 static int sprintf_stats(char *buffer
, struct device
*dev
)
1173 struct net_device_stats
*stats
= (dev
->get_stats
? dev
->get_stats(dev
): NULL
);
1177 size
= sprintf(buffer
, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1180 stats
->rx_packets
, stats
->rx_errors
,
1181 stats
->rx_dropped
+ stats
->rx_missed_errors
,
1182 stats
->rx_fifo_errors
,
1183 stats
->rx_length_errors
+ stats
->rx_over_errors
1184 + stats
->rx_crc_errors
+ stats
->rx_frame_errors
,
1185 stats
->rx_compressed
, stats
->multicast
,
1187 stats
->tx_packets
, stats
->tx_errors
, stats
->tx_dropped
,
1188 stats
->tx_fifo_errors
, stats
->collisions
,
1189 stats
->tx_carrier_errors
+ stats
->tx_aborted_errors
1190 + stats
->tx_window_errors
+ stats
->tx_heartbeat_errors
,
1191 stats
->tx_compressed
);
1193 size
= sprintf(buffer
, "%6s: No statistics available.\n", dev
->name
);
1199 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
1200 * to create /proc/net/dev
1203 int dev_get_info(char *buffer
, char **start
, off_t offset
, int length
, int dummy
)
1213 size
= sprintf(buffer
,
1214 "Inter-| Receive | Transmit\n"
1215 " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n");
1221 read_lock_bh(&dev_base_lock
);
1222 for (dev
= dev_base
; dev
!= NULL
; dev
= dev
->next
) {
1223 size
= sprintf_stats(buffer
+len
, dev
);
1231 if(pos
>offset
+length
)
1234 read_unlock_bh(&dev_base_lock
);
1236 *start
=buffer
+(offset
-begin
); /* Start of wanted data */
1237 len
-=(offset
-begin
); /* Start slop */
1239 len
=length
; /* Ending slop */
1243 static int dev_proc_stats(char *buffer
, char **start
, off_t offset
,
1244 int length
, int *eof
, void *data
)
1248 len
= sprintf(buffer
, "%08x %08x %08x %08x %08x\n",
1249 atomic_read(&netdev_rx_dropped
),
1250 #ifdef CONFIG_NET_HW_FLOWCONTROL
1251 netdev_throttle_events
,
1255 #ifdef CONFIG_NET_FASTROUTE
1256 dev_fastroute_stat
.hits
,
1257 dev_fastroute_stat
.succeed
,
1258 dev_fastroute_stat
.deferred
1271 *start
= buffer
+ offset
;
1277 #endif /* CONFIG_PROC_FS */
1280 #ifdef CONFIG_NET_RADIO
1281 #ifdef CONFIG_PROC_FS
1284 * Print one entry of /proc/net/wireless
1285 * This is a clone of /proc/net/dev (just above)
1287 static int sprintf_wireless_stats(char *buffer
, struct device
*dev
)
1289 /* Get stats from the driver */
1290 struct iw_statistics
*stats
= (dev
->get_wireless_stats
?
1291 dev
->get_wireless_stats(dev
) :
1292 (struct iw_statistics
*) NULL
);
1295 if(stats
!= (struct iw_statistics
*) NULL
)
1296 size
= sprintf(buffer
,
1297 "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
1301 stats
->qual
.updated
& 1 ? '.' : ' ',
1303 stats
->qual
.updated
& 2 ? '.' : ' ',
1305 stats
->qual
.updated
& 3 ? '.' : ' ',
1306 stats
->discard
.nwid
,
1307 stats
->discard
.code
,
1308 stats
->discard
.misc
);
1316 * Print info for /proc/net/wireless (print all entries)
1317 * This is a clone of /proc/net/dev (just above)
1319 int dev_get_wireless_info(char * buffer
, char **start
, off_t offset
,
1320 int length
, int dummy
)
1327 struct device
* dev
;
1329 size
= sprintf(buffer
,
1330 "Inter-|sta| Quality | Discarded packets\n"
1331 " face |tus|link level noise| nwid crypt misc\n");
1336 read_lock_bh(&dev_base_lock
);
1337 for(dev
= dev_base
; dev
!= NULL
; dev
= dev
->next
) {
1338 size
= sprintf_wireless_stats(buffer
+len
, dev
);
1346 if(pos
> offset
+ length
)
1349 read_unlock_bh(&dev_base_lock
);
1351 *start
= buffer
+ (offset
- begin
); /* Start of wanted data */
1352 len
-= (offset
- begin
); /* Start slop */
1354 len
= length
; /* Ending slop */
1358 #endif /* CONFIG_PROC_FS */
1359 #endif /* CONFIG_NET_RADIO */
1361 void dev_set_promiscuity(struct device
*dev
, int inc
)
1363 unsigned short old_flags
= dev
->flags
;
1365 dev
->flags
|= IFF_PROMISC
;
1366 if ((dev
->promiscuity
+= inc
) == 0)
1367 dev
->flags
&= ~IFF_PROMISC
;
1368 if (dev
->flags
^old_flags
) {
1369 #ifdef CONFIG_NET_FASTROUTE
1370 if (dev
->flags
&IFF_PROMISC
) {
1371 netdev_fastroute_obstacles
++;
1372 dev_clear_fastroute(dev
);
1374 netdev_fastroute_obstacles
--;
1377 printk(KERN_INFO
"device %s %s promiscuous mode\n",
1378 dev
->name
, (dev
->flags
&IFF_PROMISC
) ? "entered" : "left");
1382 void dev_set_allmulti(struct device
*dev
, int inc
)
1384 unsigned short old_flags
= dev
->flags
;
1386 dev
->flags
|= IFF_ALLMULTI
;
1387 if ((dev
->allmulti
+= inc
) == 0)
1388 dev
->flags
&= ~IFF_ALLMULTI
;
1389 if (dev
->flags
^old_flags
)
1393 int dev_change_flags(struct device
*dev
, unsigned flags
)
1396 int old_flags
= dev
->flags
;
1399 * Set the flags on our device.
1402 dev
->flags
= (flags
& (IFF_DEBUG
|IFF_NOTRAILERS
|IFF_RUNNING
|IFF_NOARP
|
1403 IFF_SLAVE
|IFF_MASTER
|IFF_DYNAMIC
|
1404 IFF_MULTICAST
|IFF_PORTSEL
|IFF_AUTOMEDIA
)) |
1405 (dev
->flags
& (IFF_UP
|IFF_VOLATILE
|IFF_PROMISC
|IFF_ALLMULTI
));
1408 * Load in the correct multicast list now the flags have changed.
1414 * Have we downed the interface. We handle IFF_UP ourselves
1415 * according to user attempts to set it, rather than blindly
1420 if ((old_flags
^flags
)&IFF_UP
) /* Bit is different ? */
1422 ret
= ((old_flags
& IFF_UP
) ? dev_close
: dev_open
)(dev
);
1428 if (dev
->flags
&IFF_UP
&&
1429 ((old_flags
^dev
->flags
)&~(IFF_UP
|IFF_RUNNING
|IFF_PROMISC
|IFF_ALLMULTI
|IFF_VOLATILE
)))
1430 notifier_call_chain(&netdev_chain
, NETDEV_CHANGE
, dev
);
1432 if ((flags
^dev
->gflags
)&IFF_PROMISC
) {
1433 int inc
= (flags
&IFF_PROMISC
) ? +1 : -1;
1434 dev
->gflags
^= IFF_PROMISC
;
1435 dev_set_promiscuity(dev
, inc
);
1438 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
1439 is important. Some (broken) drivers set IFF_PROMISC, when
1440 IFF_ALLMULTI is requested not asking us and not reporting.
1442 if ((flags
^dev
->gflags
)&IFF_ALLMULTI
) {
1443 int inc
= (flags
&IFF_ALLMULTI
) ? +1 : -1;
1444 dev
->gflags
^= IFF_ALLMULTI
;
1445 dev_set_allmulti(dev
, inc
);
1452 * Perform the SIOCxIFxxx calls.
1455 static int dev_ifsioc(struct ifreq
*ifr
, unsigned int cmd
)
1460 if ((dev
= dev_get(ifr
->ifr_name
)) == NULL
)
1465 case SIOCGIFFLAGS
: /* Get interface flags */
1466 ifr
->ifr_flags
= (dev
->flags
&~(IFF_PROMISC
|IFF_ALLMULTI
))
1467 |(dev
->gflags
&(IFF_PROMISC
|IFF_ALLMULTI
));
1470 case SIOCSIFFLAGS
: /* Set interface flags */
1471 return dev_change_flags(dev
, ifr
->ifr_flags
);
1473 case SIOCGIFMETRIC
: /* Get the metric on the interface (currently unused) */
1474 ifr
->ifr_metric
= 0;
1477 case SIOCSIFMETRIC
: /* Set the metric on the interface (currently unused) */
1480 case SIOCGIFMTU
: /* Get the MTU of a device */
1481 ifr
->ifr_mtu
= dev
->mtu
;
1484 case SIOCSIFMTU
: /* Set the MTU of a device */
1485 if (ifr
->ifr_mtu
== dev
->mtu
)
1489 * MTU must be positive.
1495 if (dev
->change_mtu
)
1496 err
= dev
->change_mtu(dev
, ifr
->ifr_mtu
);
1498 dev
->mtu
= ifr
->ifr_mtu
;
1501 if (!err
&& dev
->flags
&IFF_UP
)
1502 notifier_call_chain(&netdev_chain
, NETDEV_CHANGEMTU
, dev
);
1506 memcpy(ifr
->ifr_hwaddr
.sa_data
,dev
->dev_addr
, MAX_ADDR_LEN
);
1507 ifr
->ifr_hwaddr
.sa_family
=dev
->type
;
1511 if(dev
->set_mac_address
==NULL
)
1513 if(ifr
->ifr_hwaddr
.sa_family
!=dev
->type
)
1515 err
=dev
->set_mac_address(dev
,&ifr
->ifr_hwaddr
);
1517 notifier_call_chain(&netdev_chain
, NETDEV_CHANGEADDR
, dev
);
1520 case SIOCSIFHWBROADCAST
:
1521 if(ifr
->ifr_hwaddr
.sa_family
!=dev
->type
)
1523 memcpy(dev
->broadcast
, ifr
->ifr_hwaddr
.sa_data
, MAX_ADDR_LEN
);
1524 notifier_call_chain(&netdev_chain
, NETDEV_CHANGEADDR
, dev
);
1528 ifr
->ifr_map
.mem_start
=dev
->mem_start
;
1529 ifr
->ifr_map
.mem_end
=dev
->mem_end
;
1530 ifr
->ifr_map
.base_addr
=dev
->base_addr
;
1531 ifr
->ifr_map
.irq
=dev
->irq
;
1532 ifr
->ifr_map
.dma
=dev
->dma
;
1533 ifr
->ifr_map
.port
=dev
->if_port
;
1537 if (dev
->set_config
)
1538 return dev
->set_config(dev
,&ifr
->ifr_map
);
1542 if(dev
->set_multicast_list
==NULL
||
1543 ifr
->ifr_hwaddr
.sa_family
!=AF_UNSPEC
)
1545 dev_mc_add(dev
,ifr
->ifr_hwaddr
.sa_data
, dev
->addr_len
, 1);
1549 if(dev
->set_multicast_list
==NULL
||
1550 ifr
->ifr_hwaddr
.sa_family
!=AF_UNSPEC
)
1552 dev_mc_delete(dev
,ifr
->ifr_hwaddr
.sa_data
,dev
->addr_len
, 1);
1556 ifr
->ifr_ifindex
= dev
->ifindex
;
1560 ifr
->ifr_qlen
= dev
->tx_queue_len
;
1566 dev
->tx_queue_len
= ifr
->ifr_qlen
;
1570 if (dev
->flags
&IFF_UP
)
1572 if (dev_get(ifr
->ifr_newname
))
1574 memcpy(dev
->name
, ifr
->ifr_newname
, IFNAMSIZ
);
1575 dev
->name
[IFNAMSIZ
-1] = 0;
1576 notifier_call_chain(&netdev_chain
, NETDEV_CHANGENAME
, dev
);
1580 * Unknown or private ioctl
1584 if(cmd
>= SIOCDEVPRIVATE
&&
1585 cmd
<= SIOCDEVPRIVATE
+ 15) {
1587 return dev
->do_ioctl(dev
, ifr
, cmd
);
1591 #ifdef CONFIG_NET_RADIO
1592 if(cmd
>= SIOCIWFIRST
&& cmd
<= SIOCIWLAST
) {
1594 return dev
->do_ioctl(dev
, ifr
, cmd
);
1597 #endif /* CONFIG_NET_RADIO */
1605 * This function handles all "interface"-type I/O control requests. The actual
1606 * 'doing' part of this is dev_ifsioc above.
1609 int dev_ioctl(unsigned int cmd
, void *arg
)
1615 /* One special case: SIOCGIFCONF takes ifconf argument
1616 and requires shared lock, because it sleeps writing
1620 if (cmd
== SIOCGIFCONF
) {
1622 ret
= dev_ifconf((char *) arg
);
1626 if (cmd
== SIOCGIFNAME
) {
1627 return dev_ifname((struct ifreq
*)arg
);
1630 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
1633 ifr
.ifr_name
[IFNAMSIZ
-1] = 0;
1635 colon
= strchr(ifr
.ifr_name
, ':');
1640 * See which interface the caller is talking about.
1646 * These ioctl calls:
1647 * - can be done by all.
1648 * - atomic and do not require locking.
1660 dev_load(ifr
.ifr_name
);
1661 ret
= dev_ifsioc(&ifr
, cmd
);
1665 if (copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)))
1671 * These ioctl calls:
1672 * - require superuser power.
1673 * - require strict serialization.
1674 * - do not return a value
1685 case SIOCSIFHWBROADCAST
:
1688 if (!capable(CAP_NET_ADMIN
))
1690 dev_load(ifr
.ifr_name
);
1692 ret
= dev_ifsioc(&ifr
, cmd
);
1697 /* Get the per device memory space. We can add this but currently
1698 do not support it */
1700 /* Set the per device memory buffer space. Not applicable in our case */
1705 * Unknown or private ioctl.
1709 if (cmd
>= SIOCDEVPRIVATE
&&
1710 cmd
<= SIOCDEVPRIVATE
+ 15) {
1711 dev_load(ifr
.ifr_name
);
1713 ret
= dev_ifsioc(&ifr
, cmd
);
1715 if (!ret
&& copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)))
1719 #ifdef CONFIG_NET_RADIO
1720 if (cmd
>= SIOCIWFIRST
&& cmd
<= SIOCIWLAST
) {
1721 dev_load(ifr
.ifr_name
);
1722 if (IW_IS_SET(cmd
)) {
1727 ret
= dev_ifsioc(&ifr
, cmd
);
1730 if (!ret
&& IW_IS_GET(cmd
) &&
1731 copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)))
1735 #endif /* CONFIG_NET_RADIO */
1740 int dev_new_index(void)
1746 if (dev_get_by_index(ifindex
) == NULL
)
1751 static int dev_boot_phase
= 1;
1754 int register_netdevice(struct device
*dev
)
1756 struct device
*d
, **dp
;
1758 if (dev_boot_phase
) {
1759 /* This is NOT bug, but I am not sure, that all the
1760 devices, initialized before netdev module is started
1763 Now they are chained to device boot list
1764 and probed later. If a module is initialized
1765 before netdev, but assumes that dev->init
1766 is really called by register_netdev(), it will fail.
1768 So that this message should be printed for a while.
1770 printk(KERN_INFO
"early initialization of device %s is deferred\n", dev
->name
);
1772 /* Check for existence, and append to tail of chain */
1773 write_lock_bh(&dev_base_lock
);
1774 for (dp
=&dev_base
; (d
=*dp
) != NULL
; dp
=&d
->next
) {
1775 if (d
== dev
|| strcmp(d
->name
, dev
->name
) == 0) {
1776 write_unlock_bh(&dev_base_lock
);
1782 write_unlock_bh(&dev_base_lock
);
1788 /* Init, if this function is available */
1789 if (dev
->init
&& dev
->init(dev
) != 0)
1792 /* Check for existence, and append to tail of chain */
1793 write_lock_bh(&dev_base_lock
);
1794 for (dp
=&dev_base
; (d
=*dp
) != NULL
; dp
=&d
->next
) {
1795 if (d
== dev
|| strcmp(d
->name
, dev
->name
) == 0) {
1796 write_unlock_bh(&dev_base_lock
);
1801 dev_init_scheduler(dev
);
1803 write_unlock_bh(&dev_base_lock
);
1806 dev
->ifindex
= dev_new_index();
1807 if (dev
->iflink
== -1)
1808 dev
->iflink
= dev
->ifindex
;
1810 /* Notify protocols, that a new device appeared. */
1811 notifier_call_chain(&netdev_chain
, NETDEV_REGISTER
, dev
);
1816 int unregister_netdevice(struct device
*dev
)
1818 struct device
*d
, **dp
;
1820 if (dev_boot_phase
== 0) {
1821 /* If device is running, close it.
1822 It is very bad idea, really we should
1823 complain loudly here, but random hackery
1824 in linux/drivers/net likes it.
1826 if (dev
->flags
& IFF_UP
)
1829 #ifdef CONFIG_NET_FASTROUTE
1830 dev_clear_fastroute(dev
);
1833 /* Shutdown queueing discipline. */
1836 /* Notify protocols, that we are about to destroy
1837 this device. They should clean all the things.
1839 notifier_call_chain(&netdev_chain
, NETDEV_UNREGISTER
, dev
);
1842 * Flush the multicast chain
1844 dev_mc_discard(dev
);
1846 /* To avoid pointers looking to nowhere,
1847 we wait for end of critical section */
1851 /* And unlink it from device chain. */
1852 write_lock_bh(&dev_base_lock
);
1853 for (dp
= &dev_base
; (d
=*dp
) != NULL
; dp
=&d
->next
) {
1857 write_unlock_bh(&dev_base_lock
);
1859 if (dev
->destructor
)
1860 dev
->destructor(dev
);
1864 write_unlock_bh(&dev_base_lock
);
1870 * Initialize the DEV module. At boot time this walks the device list and
1871 * unhooks any devices that fail to initialise (normally hardware not
1872 * present) and leaves us with a valid list of present and active devices.
1875 extern int lance_init(void);
1876 extern int bpq_init(void);
1877 extern int scc_init(void);
1878 extern void sdla_setup(void);
1879 extern void dlci_setup(void);
1880 extern int dmascc_init(void);
1881 extern int sm_init(void);
1883 extern int baycom_ser_fdx_init(void);
1884 extern int baycom_ser_hdx_init(void);
1885 extern int baycom_par_init(void);
1887 extern int lapbeth_init(void);
1888 extern void arcnet_init(void);
1889 extern void ip_auto_config(void);
1891 extern int cpm_enet_init(void);
1892 #endif /* CONFIG_8xx */
1894 #ifdef CONFIG_PROC_FS
1895 static struct proc_dir_entry proc_net_dev
= {
1896 PROC_NET_DEV
, 3, "dev",
1897 S_IFREG
| S_IRUGO
, 1, 0, 0,
1898 0, &proc_net_inode_operations
,
1903 #ifdef CONFIG_NET_RADIO
1904 #ifdef CONFIG_PROC_FS
1905 static struct proc_dir_entry proc_net_wireless
= {
1906 PROC_NET_WIRELESS
, 8, "wireless",
1907 S_IFREG
| S_IRUGO
, 1, 0, 0,
1908 0, &proc_net_inode_operations
,
1909 dev_get_wireless_info
1911 #endif /* CONFIG_PROC_FS */
1912 #endif /* CONFIG_NET_RADIO */
1914 __initfunc(int net_dev_init(void))
1916 struct device
*dev
, **dp
;
1918 #ifdef CONFIG_NET_SCHED
1923 * Initialise the packet receive queue.
1926 skb_queue_head_init(&backlog
);
1929 * The bridge has to be up before the devices
1932 #ifdef CONFIG_BRIDGE
1937 * This is Very Ugly(tm).
1939 * Some devices want to be initialized early..
1942 #if defined(CONFIG_SCC)
1945 #if defined(CONFIG_DMASCC)
1948 #if defined(CONFIG_BPQETHER)
1951 #if defined(CONFIG_DLCI)
1954 #if defined(CONFIG_SDLA)
1957 #if defined(CONFIG_BAYCOM_PAR)
1960 #if defined(CONFIG_BAYCOM_SER_FDX)
1961 baycom_ser_fdx_init();
1963 #if defined(CONFIG_BAYCOM_SER_HDX)
1964 baycom_ser_hdx_init();
1966 #if defined(CONFIG_SOUNDMODEM)
1969 #if defined(CONFIG_LAPBETHER)
1972 #if defined(CONFIG_PLIP)
1975 #if defined(CONFIG_ARCNET)
1978 #if defined(CONFIG_8xx)
1982 * SLHC if present needs attaching so other people see it
1983 * even if not opened.
1987 #if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
1988 || defined(CONFIG_PPP) \
1989 || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
1994 #ifdef CONFIG_NET_PROFILE
1996 NET_PROFILE_REGISTER(dev_queue_xmit
);
1997 NET_PROFILE_REGISTER(net_bh
);
1999 NET_PROFILE_REGISTER(net_bh_skb
);
2004 * If the call to dev->init fails, the dev is removed
2005 * from the chain disconnecting the device until the
2010 write_lock_bh(&dev_base_lock
);
2011 while ((dev
= *dp
) != NULL
) {
2013 if (dev
->init
&& dev
->init(dev
)) {
2015 * It failed to come up. Unhook it.
2020 write_unlock_bh(&dev_base_lock
);
2021 dev
->ifindex
= dev_new_index();
2022 write_lock_bh(&dev_base_lock
);
2023 if (dev
->iflink
== -1)
2024 dev
->iflink
= dev
->ifindex
;
2025 dev_init_scheduler(dev
);
2028 write_unlock_bh(&dev_base_lock
);
2030 #ifdef CONFIG_PROC_FS
2031 proc_net_register(&proc_net_dev
);
2033 struct proc_dir_entry
*ent
= create_proc_entry("net/dev_stat", 0, 0);
2034 ent
->read_proc
= dev_proc_stats
;
2038 #ifdef CONFIG_NET_RADIO
2039 #ifdef CONFIG_PROC_FS
2040 proc_net_register(&proc_net_wireless
);
2041 #endif /* CONFIG_PROC_FS */
2042 #endif /* CONFIG_NET_RADIO */
2044 init_bh(NET_BH
, net_bh
);
2050 #ifdef CONFIG_IP_PNP