Import 2.3.5
[davej-history.git] / net / core / dev.c
blobca1fdea5f5279852afa6c14b42fbce8831c35244
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
21 * Changes:
22 * Alan Cox : device private ioctl copies fields back.
23 * Alan Cox : Transmit queue code does relevant stunts to
24 * keep the queue safe.
25 * Alan Cox : Fixed double lock.
26 * Alan Cox : Fixed promisc NULL pointer trap
27 * ???????? : Support the full private ioctl range
28 * Alan Cox : Moved ioctl permission check into drivers
29 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
30 * Alan Cox : 100 backlog just doesn't cut it when
31 * you start doing multicast video 8)
32 * Alan Cox : Rewrote net_bh and list manager.
33 * Alan Cox : Fix ETH_P_ALL echoback lengths.
34 * Alan Cox : Took out transmit every packet pass
35 * Saved a few bytes in the ioctl handler
36 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
37 * a function call a packet.
38 * Alan Cox : Hashed net_bh()
39 * Richard Kooijman: Timestamp fixes.
40 * Alan Cox : Wrong field in SIOCGIFDSTADDR
41 * Alan Cox : Device lock protection.
42 * Alan Cox : Fixed nasty side effect of device close changes.
43 * Rudi Cilibrasi : Pass the right thing to set_mac_address()
44 * Dave Miller : 32bit quantity for the device lock to make it work out
45 * on a Sparc.
46 * Bjorn Ekwall : Added KERNELD hack.
47 * Alan Cox : Cleaned up the backlog initialise.
48 * Craig Metz : SIOCGIFCONF fix if space for under
49 * 1 device.
50 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
51 * is no device open function.
52 * Andi Kleen : Fix error reporting for SIOCGIFCONF
53 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
54 * Cyrus Durgin : Cleaned for KMOD
55 * Adam Sulmicki : Bug Fix : Network Device Unload
56 * A network device unload needs to purge
57 * the backlog queue.
58 * Paul Rusty Russel : SIOCSIFNAME
61 #include <asm/uaccess.h>
62 #include <asm/system.h>
63 #include <asm/bitops.h>
64 #include <linux/config.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/string.h>
69 #include <linux/mm.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
73 #include <linux/interrupt.h>
74 #include <linux/if_ether.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/notifier.h>
78 #include <linux/skbuff.h>
79 #include <net/sock.h>
80 #include <linux/rtnetlink.h>
81 #include <net/slhc.h>
82 #include <linux/proc_fs.h>
83 #include <linux/stat.h>
84 #include <net/br.h>
85 #include <net/dst.h>
86 #include <net/pkt_sched.h>
87 #include <net/profile.h>
88 #include <linux/init.h>
89 #include <linux/kmod.h>
90 #ifdef CONFIG_NET_RADIO
91 #include <linux/wireless.h>
92 #endif /* CONFIG_NET_RADIO */
93 #ifdef CONFIG_PLIP
94 extern int plip_init(void);
95 #endif
97 NET_PROFILE_DEFINE(dev_queue_xmit)
98 NET_PROFILE_DEFINE(net_bh)
99 NET_PROFILE_DEFINE(net_bh_skb)
102 const char *if_port_text[] = {
103 "unknown",
104 "BNC",
105 "10baseT",
106 "AUI",
107 "100baseT",
108 "100baseTX",
109 "100baseFX"
113 * The list of packet types we will receive (as opposed to discard)
114 * and the routines to invoke.
116 * Why 16. Because with 16 the only overlap we get on a hash of the
117 * low nibble of the protocol value is RARP/SNAP/X.25.
119 * 0800 IP
120 * 0001 802.3
121 * 0002 AX.25
122 * 0004 802.2
123 * 8035 RARP
124 * 0005 SNAP
125 * 0805 X.25
126 * 0806 ARP
127 * 8137 IPX
128 * 0009 Localtalk
129 * 86DD IPv6
132 static struct packet_type *ptype_base[16]; /* 16 way hashed list */
133 static struct packet_type *ptype_all = NULL; /* Taps */
134 static rwlock_t ptype_lock = RW_LOCK_UNLOCKED;
137 * Device list lock. Setting it provides that interface
138 * will not disappear unexpectedly while kernel sleeps.
141 atomic_t dev_lockct = ATOMIC_INIT(0);
144 * Our notifier list
147 static struct notifier_block *netdev_chain=NULL;
150 * Device drivers call our routines to queue packets here. We empty the
151 * queue in the bottom half handler.
154 static struct sk_buff_head backlog;
156 #ifdef CONFIG_NET_FASTROUTE
157 int netdev_fastroute;
158 int netdev_fastroute_obstacles;
159 struct net_fastroute_stats dev_fastroute_stat;
160 #endif
162 static void dev_clear_backlog(struct device *dev);
165 /******************************************************************************************
167 Protocol management and registration routines
169 *******************************************************************************************/
172 * For efficiency
175 int netdev_nit=0;
178 * Add a protocol ID to the list. Now that the input handler is
179 * smarter we can dispense with all the messy stuff that used to be
180 * here.
182 * BEWARE!!! Protocol handlers, mangling input packets,
183 * MUST BE last in hash buckets and checking protocol handlers
184 * MUST start from promiscous ptype_all chain in net_bh.
185 * It is true now, do not change it.
186 * Explantion follows: if protocol handler, mangling packet, will
187 * be the first on list, it is not able to sense, that packet
188 * is cloned and should be copied-on-write, so that it will
189 * change it and subsequent readers will get broken packet.
190 * --ANK (980803)
193 void dev_add_pack(struct packet_type *pt)
195 int hash;
196 #ifdef CONFIG_NET_FASTROUTE
197 /* Hack to detect packet socket */
198 if (pt->data) {
199 netdev_fastroute_obstacles++;
200 dev_clear_fastroute(pt->dev);
202 #endif
203 write_lock_bh(&ptype_lock);
204 if(pt->type==htons(ETH_P_ALL))
206 netdev_nit++;
207 pt->next=ptype_all;
208 ptype_all=pt;
210 else
212 hash=ntohs(pt->type)&15;
213 pt->next = ptype_base[hash];
214 ptype_base[hash] = pt;
216 write_unlock_bh(&ptype_lock);
221 * Remove a protocol ID from the list.
224 void dev_remove_pack(struct packet_type *pt)
226 struct packet_type **pt1;
227 if(pt->type==htons(ETH_P_ALL))
229 netdev_nit--;
230 pt1=&ptype_all;
232 else
233 pt1=&ptype_base[ntohs(pt->type)&15];
234 write_lock_bh(&ptype_lock);
235 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
237 if(pt==(*pt1))
239 *pt1=pt->next;
240 #ifdef CONFIG_NET_FASTROUTE
241 if (pt->data)
242 netdev_fastroute_obstacles--;
243 #endif
244 write_unlock_bh(&ptype_lock);
245 return;
248 write_unlock_bh(&ptype_lock);
249 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
252 /*****************************************************************************************
254 Device Interface Subroutines
256 ******************************************************************************************/
259 * Find an interface by name.
262 struct device *dev_get(const char *name)
264 struct device *dev;
266 read_lock_bh(&dev_base_lock);
267 for (dev = dev_base; dev != NULL; dev = dev->next) {
268 if (strcmp(dev->name, name) == 0)
269 goto out;
271 out:
272 read_unlock_bh(&dev_base_lock);
273 return dev;
276 struct device * dev_get_by_index(int ifindex)
278 struct device *dev;
280 read_lock_bh(&dev_base_lock);
281 for (dev = dev_base; dev != NULL; dev = dev->next) {
282 if (dev->ifindex == ifindex)
283 goto out;
285 out:
286 read_unlock_bh(&dev_base_lock);
287 return dev;
290 struct device *dev_getbyhwaddr(unsigned short type, char *ha)
292 struct device *dev;
294 read_lock_bh(&dev_base_lock);
295 for (dev = dev_base; dev != NULL; dev = dev->next) {
296 if (dev->type == type &&
297 memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
298 goto out;
300 out:
301 read_unlock_bh(&dev_base_lock);
302 return dev;
306 * Passed a format string - eg "lt%d" it will try and find a suitable
307 * id. Not efficient for many devices, not called a lot..
310 int dev_alloc_name(struct device *dev, const char *name)
312 int i;
314 * If you need over 100 please also fix the algorithm...
316 for(i=0;i<100;i++)
318 sprintf(dev->name,name,i);
319 if(dev_get(dev->name)==NULL)
320 return i;
322 return -ENFILE; /* Over 100 of the things .. bail out! */
325 struct device *dev_alloc(const char *name, int *err)
327 struct device *dev=kmalloc(sizeof(struct device)+16, GFP_KERNEL);
328 if(dev==NULL)
330 *err=-ENOBUFS;
331 return NULL;
333 dev->name=(char *)(dev+1); /* Name string space */
334 *err=dev_alloc_name(dev,name);
335 if(*err<0)
337 kfree(dev);
338 return NULL;
340 return dev;
343 void netdev_state_change(struct device *dev)
345 if (dev->flags&IFF_UP)
346 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
351 * Find and possibly load an interface.
354 #ifdef CONFIG_KMOD
356 void dev_load(const char *name)
358 if(!dev_get(name) && capable(CAP_SYS_MODULE))
359 request_module(name);
362 #else
364 extern inline void dev_load(const char *unused){;}
366 #endif
368 static int default_rebuild_header(struct sk_buff *skb)
370 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", skb->dev ? skb->dev->name : "NULL!!!");
371 kfree_skb(skb);
372 return 1;
376 * Prepare an interface for use.
379 int dev_open(struct device *dev)
381 int ret = 0;
384 * Is it already up?
387 if (dev->flags&IFF_UP)
388 return 0;
390 /* Setup the lock before we open the faucet. */
391 spin_lock_init(&dev->xmit_lock);
394 * Call device private open method
397 if (dev->open)
398 ret = dev->open(dev);
401 * If it went open OK then:
404 if (ret == 0)
407 * nil rebuild_header routine,
408 * that should be never called and used as just bug trap.
411 if (dev->rebuild_header == NULL)
412 dev->rebuild_header = default_rebuild_header;
415 * Set the flags.
417 dev->flags |= (IFF_UP | IFF_RUNNING);
420 * Initialize multicasting status
422 dev_mc_upload(dev);
425 * Wakeup transmit queue engine
427 dev_activate(dev);
430 * ... and announce new interface.
432 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
435 return(ret);
438 #ifdef CONFIG_NET_FASTROUTE
440 static __inline__ void dev_do_clear_fastroute(struct device *dev)
442 if (dev->accept_fastpath) {
443 int i;
445 for (i=0; i<=NETDEV_FASTROUTE_HMASK; i++)
446 dst_release_irqwait(xchg(dev->fastpath+i, NULL));
450 void dev_clear_fastroute(struct device *dev)
452 if (dev) {
453 dev_do_clear_fastroute(dev);
454 } else {
455 read_lock_bh(&dev_base_lock);
456 for (dev = dev_base; dev; dev = dev->next)
457 dev_do_clear_fastroute(dev);
458 read_unlock_bh(&dev_base_lock);
461 #endif
464 * Completely shutdown an interface.
467 int dev_close(struct device *dev)
469 if (!(dev->flags&IFF_UP))
470 return 0;
472 dev_deactivate(dev);
474 dev_lock_wait();
477 * Call the device specific close. This cannot fail.
478 * Only if device is UP
481 if (dev->stop)
482 dev->stop(dev);
484 if (dev->start)
485 printk("dev_close: bug %s still running\n", dev->name);
488 * Device is now down.
490 dev_clear_backlog(dev);
492 dev->flags&=~(IFF_UP|IFF_RUNNING);
493 #ifdef CONFIG_NET_FASTROUTE
494 dev_clear_fastroute(dev);
495 #endif
498 * Tell people we are going down
500 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
502 return(0);
507 * Device change register/unregister. These are not inline or static
508 * as we export them to the world.
511 int register_netdevice_notifier(struct notifier_block *nb)
513 return notifier_chain_register(&netdev_chain, nb);
516 int unregister_netdevice_notifier(struct notifier_block *nb)
518 return notifier_chain_unregister(&netdev_chain,nb);
522 * Support routine. Sends outgoing frames to any network
523 * taps currently in use.
526 void dev_queue_xmit_nit(struct sk_buff *skb, struct device *dev)
528 struct packet_type *ptype;
529 get_fast_time(&skb->stamp);
531 read_lock(&ptype_lock);
532 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
534 /* Never send packets back to the socket
535 * they originated from - MvS (miquels@drinkel.ow.org)
537 if ((ptype->dev == dev || !ptype->dev) &&
538 ((struct sock *)ptype->data != skb->sk))
540 struct sk_buff *skb2;
541 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
542 break;
544 /* Code, following below is wrong.
546 The only reason, why it does work is that
547 ONLY packet sockets receive outgoing
548 packets. If such a packet will be (occasionally)
549 received by normal packet handler, which expects
550 that mac header is pulled...
553 /* More sensible variant. skb->nh should be correctly
554 set by sender, so that the second statement is
555 just protection against buggy protocols.
557 skb2->mac.raw = skb2->data;
559 if (skb2->nh.raw < skb2->data || skb2->nh.raw >= skb2->tail) {
560 if (net_ratelimit())
561 printk(KERN_DEBUG "protocol %04x is buggy, dev %s\n", skb2->protocol, dev->name);
562 skb2->nh.raw = skb2->data;
563 if (dev->hard_header)
564 skb2->nh.raw += dev->hard_header_len;
567 skb2->h.raw = skb2->nh.raw;
568 skb2->pkt_type = PACKET_OUTGOING;
569 ptype->func(skb2, skb->dev, ptype);
572 read_unlock(&ptype_lock);
576 * Fast path for loopback frames.
579 void dev_loopback_xmit(struct sk_buff *skb)
581 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
582 if (newskb==NULL)
583 return;
585 newskb->mac.raw = newskb->data;
586 skb_pull(newskb, newskb->nh.raw - newskb->data);
587 newskb->pkt_type = PACKET_LOOPBACK;
588 newskb->ip_summed = CHECKSUM_UNNECESSARY;
589 if (newskb->dst==NULL)
590 printk(KERN_DEBUG "BUG: packet without dst looped back 1\n");
591 netif_rx(newskb);
594 int dev_queue_xmit(struct sk_buff *skb)
596 struct device *dev = skb->dev;
597 struct Qdisc *q;
599 #ifdef CONFIG_NET_PROFILE
600 start_bh_atomic();
601 NET_PROFILE_ENTER(dev_queue_xmit);
602 #endif
604 spin_lock_bh(&dev->xmit_lock);
605 q = dev->qdisc;
606 if (q->enqueue) {
607 q->enqueue(skb, q);
608 qdisc_wakeup(dev);
609 spin_unlock_bh(&dev->xmit_lock);
611 #ifdef CONFIG_NET_PROFILE
612 NET_PROFILE_LEAVE(dev_queue_xmit);
613 end_bh_atomic();
614 #endif
616 return 0;
619 /* The device has no queue. Common case for software devices:
620 loopback, all the sorts of tunnels...
622 Really, it is unlikely that bh protection is necessary here:
623 virtual devices do not generate EOI events.
624 However, it is possible, that they rely on bh protection
625 made by us here.
627 if (dev->flags&IFF_UP) {
628 if (netdev_nit)
629 dev_queue_xmit_nit(skb,dev);
630 if (dev->hard_start_xmit(skb, dev) == 0) {
631 spin_unlock_bh(&dev->xmit_lock);
633 #ifdef CONFIG_NET_PROFILE
634 NET_PROFILE_LEAVE(dev_queue_xmit);
635 end_bh_atomic();
636 #endif
638 return 0;
640 if (net_ratelimit())
641 printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
643 spin_unlock_bh(&dev->xmit_lock);
645 kfree_skb(skb);
647 #ifdef CONFIG_NET_PROFILE
648 NET_PROFILE_LEAVE(dev_queue_xmit);
649 end_bh_atomic();
650 #endif
652 return 0;
656 /*=======================================================================
657 Receiver rotutines
658 =======================================================================*/
660 int netdev_dropping = 0;
661 int netdev_max_backlog = 300;
662 atomic_t netdev_rx_dropped;
663 #ifdef CONFIG_CPU_IS_SLOW
664 int net_cpu_congestion;
665 #endif
667 #ifdef CONFIG_NET_HW_FLOWCONTROL
668 int netdev_throttle_events;
669 static unsigned long netdev_fc_mask = 1;
670 unsigned long netdev_fc_xoff = 0;
672 static struct
674 void (*stimul)(struct device *);
675 struct device *dev;
676 } netdev_fc_slots[32];
678 int netdev_register_fc(struct device *dev, void (*stimul)(struct device *dev))
680 int bit = 0;
681 unsigned long flags;
683 save_flags(flags);
684 cli();
685 if (netdev_fc_mask != ~0UL) {
686 bit = ffz(netdev_fc_mask);
687 netdev_fc_slots[bit].stimul = stimul;
688 netdev_fc_slots[bit].dev = dev;
689 set_bit(bit, &netdev_fc_mask);
690 clear_bit(bit, &netdev_fc_xoff);
692 restore_flags(flags);
693 return bit;
696 void netdev_unregister_fc(int bit)
698 unsigned long flags;
700 save_flags(flags);
701 cli();
702 if (bit > 0) {
703 netdev_fc_slots[bit].stimul = NULL;
704 netdev_fc_slots[bit].dev = NULL;
705 clear_bit(bit, &netdev_fc_mask);
706 clear_bit(bit, &netdev_fc_xoff);
708 restore_flags(flags);
711 static void netdev_wakeup(void)
713 unsigned long xoff;
715 cli();
716 xoff = netdev_fc_xoff;
717 netdev_fc_xoff = 0;
718 netdev_dropping = 0;
719 netdev_throttle_events++;
720 while (xoff) {
721 int i = ffz(~xoff);
722 xoff &= ~(1<<i);
723 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
725 sti();
727 #endif
729 static void dev_clear_backlog(struct device *dev)
731 struct sk_buff *prev, *curr;
735 * Let now clear backlog queue. -AS
737 * We are competing here both with netif_rx() and net_bh().
738 * We don't want either of those to mess with skb ptrs
739 * while we work on them, thus cli()/sti().
741 * It looks better to use net_bh trick, at least
742 * to be sure, that we keep interrupt latency really low. --ANK (980727)
745 if (backlog.qlen) {
746 start_bh_atomic();
747 curr = backlog.next;
748 while ( curr != (struct sk_buff *)(&backlog) ) {
749 unsigned long flags;
750 curr=curr->next;
751 if ( curr->prev->dev == dev ) {
752 prev = curr->prev;
753 spin_lock_irqsave(&backlog.lock, flags);
754 __skb_unlink(prev, &backlog);
755 spin_unlock_irqrestore(&backlog.lock, flags);
756 kfree_skb(prev);
759 end_bh_atomic();
760 #ifdef CONFIG_NET_HW_FLOWCONTROL
761 if (netdev_dropping)
762 netdev_wakeup();
763 #else
764 netdev_dropping = 0;
765 #endif
770 * Receive a packet from a device driver and queue it for the upper
771 * (protocol) levels. It always succeeds.
774 void netif_rx(struct sk_buff *skb)
776 #ifndef CONFIG_CPU_IS_SLOW
777 if(skb->stamp.tv_sec==0)
778 get_fast_time(&skb->stamp);
779 #else
780 skb->stamp = xtime;
781 #endif
783 /* The code is rearranged so that the path is the most
784 short when CPU is congested, but is still operating.
787 if (backlog.qlen <= netdev_max_backlog) {
788 if (backlog.qlen) {
789 if (netdev_dropping == 0) {
790 skb_queue_tail(&backlog,skb);
791 mark_bh(NET_BH);
792 return;
794 atomic_inc(&netdev_rx_dropped);
795 kfree_skb(skb);
796 return;
798 #ifdef CONFIG_NET_HW_FLOWCONTROL
799 if (netdev_dropping)
800 netdev_wakeup();
801 #else
802 netdev_dropping = 0;
803 #endif
804 skb_queue_tail(&backlog,skb);
805 mark_bh(NET_BH);
806 return;
808 netdev_dropping = 1;
809 atomic_inc(&netdev_rx_dropped);
810 kfree_skb(skb);
813 #ifdef CONFIG_BRIDGE
814 static inline void handle_bridge(struct sk_buff *skb, unsigned short type)
816 if (br_stats.flags & BR_UP && br_protocol_ok(ntohs(type)))
819 * We pass the bridge a complete frame. This means
820 * recovering the MAC header first.
823 int offset;
825 skb=skb_clone(skb, GFP_ATOMIC);
826 if(skb==NULL)
827 return;
829 offset=skb->data-skb->mac.raw;
830 skb_push(skb,offset); /* Put header back on for bridge */
832 if(br_receive_frame(skb))
833 return;
834 kfree_skb(skb);
836 return;
838 #endif
842 * When we are called the queue is ready to grab, the interrupts are
843 * on and hardware can interrupt and queue to the receive queue as we
844 * run with no problems.
845 * This is run as a bottom half after an interrupt handler that does
846 * mark_bh(NET_BH);
849 void net_bh(void)
851 struct packet_type *ptype;
852 struct packet_type *pt_prev;
853 unsigned short type;
854 unsigned long start_time = jiffies;
855 #ifdef CONFIG_CPU_IS_SLOW
856 static unsigned long start_busy = 0;
857 static unsigned long ave_busy = 0;
859 if (start_busy == 0)
860 start_busy = start_time;
861 net_cpu_congestion = ave_busy>>8;
862 #endif
864 NET_PROFILE_ENTER(net_bh);
866 * Can we send anything now? We want to clear the
867 * decks for any more sends that get done as we
868 * process the input. This also minimises the
869 * latency on a transmit interrupt bh.
872 if (qdisc_head.forw != &qdisc_head)
873 qdisc_run_queues();
876 * Any data left to process. This may occur because a
877 * mark_bh() is done after we empty the queue including
878 * that from the device which does a mark_bh() just after
882 * While the queue is not empty..
884 * Note that the queue never shrinks due to
885 * an interrupt, so we can do this test without
886 * disabling interrupts.
889 while (!skb_queue_empty(&backlog))
891 struct sk_buff * skb;
893 /* Give chance to other bottom halves to run */
894 if (jiffies - start_time > 1)
895 goto net_bh_break;
898 * We have a packet. Therefore the queue has shrunk
900 skb = skb_dequeue(&backlog);
902 #ifdef CONFIG_CPU_IS_SLOW
903 if (ave_busy > 128*16) {
904 kfree_skb(skb);
905 while ((skb = skb_dequeue(&backlog)) != NULL)
906 kfree_skb(skb);
907 break;
909 #endif
912 #if 0
913 NET_PROFILE_SKB_PASSED(skb, net_bh_skb);
914 #endif
915 #ifdef CONFIG_NET_FASTROUTE
916 if (skb->pkt_type == PACKET_FASTROUTE) {
917 dev_queue_xmit(skb);
918 continue;
920 #endif
923 * Bump the pointer to the next structure.
925 * On entry to the protocol layer. skb->data and
926 * skb->nh.raw point to the MAC and encapsulated data
929 /* XXX until we figure out every place to modify.. */
930 skb->h.raw = skb->nh.raw = skb->data;
932 if (skb->mac.raw < skb->head || skb->mac.raw > skb->data) {
933 printk(KERN_CRIT "%s: wrong mac.raw ptr, proto=%04x\n", skb->dev->name, skb->protocol);
934 kfree_skb(skb);
935 continue;
939 * Fetch the packet protocol ID.
942 type = skb->protocol;
944 #ifdef CONFIG_BRIDGE
946 * If we are bridging then pass the frame up to the
947 * bridging code (if this protocol is to be bridged).
948 * If it is bridged then move on
950 handle_bridge(skb, type);
951 #endif
954 * We got a packet ID. Now loop over the "known protocols"
955 * list. There are two lists. The ptype_all list of taps (normally empty)
956 * and the main protocol list which is hashed perfectly for normal protocols.
959 pt_prev = NULL;
960 read_lock(&ptype_lock);
961 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
963 if (!ptype->dev || ptype->dev == skb->dev) {
964 if(pt_prev)
966 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
967 if(skb2)
968 pt_prev->func(skb2,skb->dev, pt_prev);
970 pt_prev=ptype;
974 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
976 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
979 * We already have a match queued. Deliver
980 * to it and then remember the new match
982 if(pt_prev)
984 struct sk_buff *skb2;
986 skb2=skb_clone(skb, GFP_ATOMIC);
989 * Kick the protocol handler. This should be fast
990 * and efficient code.
993 if(skb2)
994 pt_prev->func(skb2, skb->dev, pt_prev);
996 /* Remember the current last to do */
997 pt_prev=ptype;
999 } /* End of protocol list loop */
1002 * Is there a last item to send to ?
1005 if(pt_prev)
1006 pt_prev->func(skb, skb->dev, pt_prev);
1008 * Has an unknown packet has been received ?
1011 else {
1012 kfree_skb(skb);
1014 read_unlock(&ptype_lock);
1015 } /* End of queue loop */
1018 * We have emptied the queue
1022 * One last output flush.
1025 if (qdisc_head.forw != &qdisc_head)
1026 qdisc_run_queues();
1028 #ifdef CONFIG_CPU_IS_SLOW
1029 if (1) {
1030 unsigned long start_idle = jiffies;
1031 ave_busy += ((start_idle - start_busy)<<3) - (ave_busy>>4);
1032 start_busy = 0;
1034 #endif
1035 #ifdef CONFIG_NET_HW_FLOWCONTROL
1036 if (netdev_dropping)
1037 netdev_wakeup();
1038 #else
1039 netdev_dropping = 0;
1040 #endif
1041 NET_PROFILE_LEAVE(net_bh);
1042 return;
1044 net_bh_break:
1045 mark_bh(NET_BH);
1046 NET_PROFILE_LEAVE(net_bh);
1047 return;
1050 /* Protocol dependent address dumping routines */
1052 static gifconf_func_t * gifconf_list [NPROTO];
1054 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1056 if (family>=NPROTO)
1057 return -EINVAL;
1058 gifconf_list[family] = gifconf;
1059 return 0;
1064 * Map an interface index to its name (SIOCGIFNAME)
1068 * This call is useful, but I'd remove it too.
1070 * The reason is purely aestetical, it is the only call
1071 * from SIOC* family using struct ifreq in reversed manner.
1072 * Besides that, it is pretty silly to put "drawing" facility
1073 * to kernel, it is useful only to print ifindices
1074 * in readable form, is not it? --ANK
1076 * We need this ioctl for efficient implementation of the
1077 * if_indextoname() function required by the IPv6 API. Without
1078 * it, we would have to search all the interfaces to find a
1079 * match. --pb
1082 static int dev_ifname(struct ifreq *arg)
1084 struct device *dev;
1085 struct ifreq ifr;
1086 int err;
1089 * Fetch the caller's info block.
1092 err = copy_from_user(&ifr, arg, sizeof(struct ifreq));
1093 if (err)
1094 return -EFAULT;
1096 dev = dev_get_by_index(ifr.ifr_ifindex);
1097 if (!dev)
1098 return -ENODEV;
1100 strcpy(ifr.ifr_name, dev->name);
1102 err = copy_to_user(arg, &ifr, sizeof(struct ifreq));
1103 return (err)?-EFAULT:0;
1107 * Perform a SIOCGIFCONF call. This structure will change
1108 * size eventually, and there is nothing I can do about it.
1109 * Thus we will need a 'compatibility mode'.
1112 static int dev_ifconf(char *arg)
1114 struct ifconf ifc;
1115 struct device *dev;
1116 char *pos;
1117 int len;
1118 int total;
1119 int i;
1122 * Fetch the caller's info block.
1125 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1126 return -EFAULT;
1128 len = ifc.ifc_len;
1129 if (ifc.ifc_buf) {
1130 pos = (char *) kmalloc(len, GFP_KERNEL);
1131 if(pos == NULL)
1132 return -ENOBUFS;
1133 } else
1134 pos = NULL;
1137 * Loop over the interfaces, and write an info block for each.
1140 total = 0;
1141 read_lock_bh(&dev_base_lock);
1142 for (dev = dev_base; dev != NULL; dev = dev->next) {
1143 for (i=0; i<NPROTO; i++) {
1144 if (gifconf_list[i]) {
1145 int done;
1146 if (pos==NULL) {
1147 done = gifconf_list[i](dev, NULL, 0);
1148 } else {
1149 done = gifconf_list[i](dev, pos+total, len-total);
1151 total += done;
1155 read_unlock_bh(&dev_base_lock);
1157 if(pos != NULL) {
1158 int err = copy_to_user(ifc.ifc_buf, pos, total);
1160 kfree(pos);
1161 if(err)
1162 return -EFAULT;
1166 * All done. Write the updated control block back to the caller.
1168 ifc.ifc_len = total;
1170 if (copy_to_user(arg, &ifc, sizeof(struct ifconf)))
1171 return -EFAULT;
1174 * Both BSD and Solaris return 0 here, so we do too.
1176 return 0;
1180 * This is invoked by the /proc filesystem handler to display a device
1181 * in detail.
1184 #ifdef CONFIG_PROC_FS
1185 static int sprintf_stats(char *buffer, struct device *dev)
1187 struct net_device_stats *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
1188 int size;
1190 if (stats)
1191 size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1192 dev->name,
1193 stats->rx_bytes,
1194 stats->rx_packets, stats->rx_errors,
1195 stats->rx_dropped + stats->rx_missed_errors,
1196 stats->rx_fifo_errors,
1197 stats->rx_length_errors + stats->rx_over_errors
1198 + stats->rx_crc_errors + stats->rx_frame_errors,
1199 stats->rx_compressed, stats->multicast,
1200 stats->tx_bytes,
1201 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
1202 stats->tx_fifo_errors, stats->collisions,
1203 stats->tx_carrier_errors + stats->tx_aborted_errors
1204 + stats->tx_window_errors + stats->tx_heartbeat_errors,
1205 stats->tx_compressed);
1206 else
1207 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
1209 return size;
1213 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
1214 * to create /proc/net/dev
1217 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1219 int len=0;
1220 off_t begin=0;
1221 off_t pos=0;
1222 int size;
1224 struct device *dev;
1227 size = sprintf(buffer,
1228 "Inter-| Receive | Transmit\n"
1229 " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n");
1231 pos+=size;
1232 len+=size;
1235 read_lock_bh(&dev_base_lock);
1236 for (dev = dev_base; dev != NULL; dev = dev->next) {
1237 size = sprintf_stats(buffer+len, dev);
1238 len+=size;
1239 pos=begin+len;
1241 if(pos<offset) {
1242 len=0;
1243 begin=pos;
1245 if(pos>offset+length)
1246 break;
1248 read_unlock_bh(&dev_base_lock);
1250 *start=buffer+(offset-begin); /* Start of wanted data */
1251 len-=(offset-begin); /* Start slop */
1252 if(len>length)
1253 len=length; /* Ending slop */
1254 return len;
1257 static int dev_proc_stats(char *buffer, char **start, off_t offset,
1258 int length, int *eof, void *data)
1260 int len;
1262 len = sprintf(buffer, "%08x %08x %08x %08x %08x\n",
1263 atomic_read(&netdev_rx_dropped),
1264 #ifdef CONFIG_NET_HW_FLOWCONTROL
1265 netdev_throttle_events,
1266 #else
1268 #endif
1269 #ifdef CONFIG_NET_FASTROUTE
1270 dev_fastroute_stat.hits,
1271 dev_fastroute_stat.succeed,
1272 dev_fastroute_stat.deferred
1273 #else
1274 0, 0, 0
1275 #endif
1278 len -= offset;
1280 if (len > length)
1281 len = length;
1282 if(len < 0)
1283 len = 0;
1285 *start = buffer + offset;
1286 *eof = 1;
1288 return len;
1291 #endif /* CONFIG_PROC_FS */
1294 #ifdef CONFIG_NET_RADIO
1295 #ifdef CONFIG_PROC_FS
1298 * Print one entry of /proc/net/wireless
1299 * This is a clone of /proc/net/dev (just above)
1301 static int sprintf_wireless_stats(char *buffer, struct device *dev)
1303 /* Get stats from the driver */
1304 struct iw_statistics *stats = (dev->get_wireless_stats ?
1305 dev->get_wireless_stats(dev) :
1306 (struct iw_statistics *) NULL);
1307 int size;
1309 if(stats != (struct iw_statistics *) NULL)
1310 size = sprintf(buffer,
1311 "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
1312 dev->name,
1313 stats->status,
1314 stats->qual.qual,
1315 stats->qual.updated & 1 ? '.' : ' ',
1316 stats->qual.level,
1317 stats->qual.updated & 2 ? '.' : ' ',
1318 stats->qual.noise,
1319 stats->qual.updated & 3 ? '.' : ' ',
1320 stats->discard.nwid,
1321 stats->discard.code,
1322 stats->discard.misc);
1323 else
1324 size = 0;
1326 return size;
1330 * Print info for /proc/net/wireless (print all entries)
1331 * This is a clone of /proc/net/dev (just above)
1333 int dev_get_wireless_info(char * buffer, char **start, off_t offset,
1334 int length, int dummy)
1336 int len = 0;
1337 off_t begin = 0;
1338 off_t pos = 0;
1339 int size;
1341 struct device * dev;
1343 size = sprintf(buffer,
1344 "Inter-|sta| Quality | Discarded packets\n"
1345 " face |tus|link level noise| nwid crypt misc\n");
1347 pos+=size;
1348 len+=size;
1350 read_lock_bh(&dev_base_lock);
1351 for(dev = dev_base; dev != NULL; dev = dev->next) {
1352 size = sprintf_wireless_stats(buffer+len, dev);
1353 len+=size;
1354 pos=begin+len;
1356 if(pos < offset) {
1357 len=0;
1358 begin=pos;
1360 if(pos > offset + length)
1361 break;
1363 read_unlock_bh(&dev_base_lock);
1365 *start = buffer + (offset - begin); /* Start of wanted data */
1366 len -= (offset - begin); /* Start slop */
1367 if(len > length)
1368 len = length; /* Ending slop */
1370 return len;
1372 #endif /* CONFIG_PROC_FS */
1373 #endif /* CONFIG_NET_RADIO */
1375 void dev_set_promiscuity(struct device *dev, int inc)
1377 unsigned short old_flags = dev->flags;
1379 dev->flags |= IFF_PROMISC;
1380 if ((dev->promiscuity += inc) == 0)
1381 dev->flags &= ~IFF_PROMISC;
1382 if (dev->flags^old_flags) {
1383 #ifdef CONFIG_NET_FASTROUTE
1384 if (dev->flags&IFF_PROMISC) {
1385 netdev_fastroute_obstacles++;
1386 dev_clear_fastroute(dev);
1387 } else
1388 netdev_fastroute_obstacles--;
1389 #endif
1390 dev_mc_upload(dev);
1391 printk(KERN_INFO "device %s %s promiscuous mode\n",
1392 dev->name, (dev->flags&IFF_PROMISC) ? "entered" : "left");
1396 void dev_set_allmulti(struct device *dev, int inc)
1398 unsigned short old_flags = dev->flags;
1400 dev->flags |= IFF_ALLMULTI;
1401 if ((dev->allmulti += inc) == 0)
1402 dev->flags &= ~IFF_ALLMULTI;
1403 if (dev->flags^old_flags)
1404 dev_mc_upload(dev);
1407 int dev_change_flags(struct device *dev, unsigned flags)
1409 int ret;
1410 int old_flags = dev->flags;
1413 * Set the flags on our device.
1416 dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_RUNNING|IFF_NOARP|
1417 IFF_SLAVE|IFF_MASTER|IFF_DYNAMIC|
1418 IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) |
1419 (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI));
1422 * Load in the correct multicast list now the flags have changed.
1425 dev_mc_upload(dev);
1428 * Have we downed the interface. We handle IFF_UP ourselves
1429 * according to user attempts to set it, rather than blindly
1430 * setting it.
1433 ret = 0;
1434 if ((old_flags^flags)&IFF_UP) /* Bit is different ? */
1436 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
1438 if (ret == 0)
1439 dev_mc_upload(dev);
1442 if (dev->flags&IFF_UP &&
1443 ((old_flags^dev->flags)&~(IFF_UP|IFF_RUNNING|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE)))
1444 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
1446 if ((flags^dev->gflags)&IFF_PROMISC) {
1447 int inc = (flags&IFF_PROMISC) ? +1 : -1;
1448 dev->gflags ^= IFF_PROMISC;
1449 dev_set_promiscuity(dev, inc);
1452 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
1453 is important. Some (broken) drivers set IFF_PROMISC, when
1454 IFF_ALLMULTI is requested not asking us and not reporting.
1456 if ((flags^dev->gflags)&IFF_ALLMULTI) {
1457 int inc = (flags&IFF_ALLMULTI) ? +1 : -1;
1458 dev->gflags ^= IFF_ALLMULTI;
1459 dev_set_allmulti(dev, inc);
1462 return ret;
1466 * Perform the SIOCxIFxxx calls.
1469 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
1471 struct device *dev;
1472 int err;
1474 if ((dev = dev_get(ifr->ifr_name)) == NULL)
1475 return -ENODEV;
1477 switch(cmd)
1479 case SIOCGIFFLAGS: /* Get interface flags */
1480 ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI))
1481 |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
1482 return 0;
1484 case SIOCSIFFLAGS: /* Set interface flags */
1485 return dev_change_flags(dev, ifr->ifr_flags);
1487 case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
1488 ifr->ifr_metric = 0;
1489 return 0;
1491 case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
1492 return -EOPNOTSUPP;
1494 case SIOCGIFMTU: /* Get the MTU of a device */
1495 ifr->ifr_mtu = dev->mtu;
1496 return 0;
1498 case SIOCSIFMTU: /* Set the MTU of a device */
1499 if (ifr->ifr_mtu == dev->mtu)
1500 return 0;
1503 * MTU must be positive.
1506 if (ifr->ifr_mtu<0)
1507 return -EINVAL;
1509 if (dev->change_mtu)
1510 err = dev->change_mtu(dev, ifr->ifr_mtu);
1511 else {
1512 dev->mtu = ifr->ifr_mtu;
1513 err = 0;
1515 if (!err && dev->flags&IFF_UP)
1516 notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev);
1517 return err;
1519 case SIOCGIFHWADDR:
1520 memcpy(ifr->ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1521 ifr->ifr_hwaddr.sa_family=dev->type;
1522 return 0;
1524 case SIOCSIFHWADDR:
1525 if(dev->set_mac_address==NULL)
1526 return -EOPNOTSUPP;
1527 if(ifr->ifr_hwaddr.sa_family!=dev->type)
1528 return -EINVAL;
1529 err=dev->set_mac_address(dev,&ifr->ifr_hwaddr);
1530 if (!err)
1531 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
1532 return err;
1534 case SIOCSIFHWBROADCAST:
1535 if(ifr->ifr_hwaddr.sa_family!=dev->type)
1536 return -EINVAL;
1537 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN);
1538 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
1539 return 0;
1541 case SIOCGIFMAP:
1542 ifr->ifr_map.mem_start=dev->mem_start;
1543 ifr->ifr_map.mem_end=dev->mem_end;
1544 ifr->ifr_map.base_addr=dev->base_addr;
1545 ifr->ifr_map.irq=dev->irq;
1546 ifr->ifr_map.dma=dev->dma;
1547 ifr->ifr_map.port=dev->if_port;
1548 return 0;
1550 case SIOCSIFMAP:
1551 if (dev->set_config)
1552 return dev->set_config(dev,&ifr->ifr_map);
1553 return -EOPNOTSUPP;
1555 case SIOCADDMULTI:
1556 if(dev->set_multicast_list==NULL ||
1557 ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
1558 return -EINVAL;
1559 dev_mc_add(dev,ifr->ifr_hwaddr.sa_data, dev->addr_len, 1);
1560 return 0;
1562 case SIOCDELMULTI:
1563 if(dev->set_multicast_list==NULL ||
1564 ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
1565 return -EINVAL;
1566 dev_mc_delete(dev,ifr->ifr_hwaddr.sa_data,dev->addr_len, 1);
1567 return 0;
1569 case SIOCGIFINDEX:
1570 ifr->ifr_ifindex = dev->ifindex;
1571 return 0;
1573 case SIOCGIFTXQLEN:
1574 ifr->ifr_qlen = dev->tx_queue_len;
1575 return 0;
1577 case SIOCSIFTXQLEN:
1578 if(ifr->ifr_qlen<0)
1579 return -EINVAL;
1580 dev->tx_queue_len = ifr->ifr_qlen;
1581 return 0;
1583 case SIOCSIFNAME:
1584 if (dev->flags&IFF_UP)
1585 return -EBUSY;
1586 if (dev_get(ifr->ifr_newname))
1587 return -EEXIST;
1588 memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ);
1589 dev->name[IFNAMSIZ-1] = 0;
1590 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
1591 return 0;
1594 * Unknown or private ioctl
1597 default:
1598 if(cmd >= SIOCDEVPRIVATE &&
1599 cmd <= SIOCDEVPRIVATE + 15) {
1600 if (dev->do_ioctl)
1601 return dev->do_ioctl(dev, ifr, cmd);
1602 return -EOPNOTSUPP;
1605 #ifdef CONFIG_NET_RADIO
1606 if(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
1607 if (dev->do_ioctl)
1608 return dev->do_ioctl(dev, ifr, cmd);
1609 return -EOPNOTSUPP;
1611 #endif /* CONFIG_NET_RADIO */
1614 return -EINVAL;
1619 * This function handles all "interface"-type I/O control requests. The actual
1620 * 'doing' part of this is dev_ifsioc above.
1623 int dev_ioctl(unsigned int cmd, void *arg)
1625 struct ifreq ifr;
1626 int ret;
1627 char *colon;
1629 /* One special case: SIOCGIFCONF takes ifconf argument
1630 and requires shared lock, because it sleeps writing
1631 to user space.
1634 if (cmd == SIOCGIFCONF) {
1635 rtnl_shlock();
1636 ret = dev_ifconf((char *) arg);
1637 rtnl_shunlock();
1638 return ret;
1640 if (cmd == SIOCGIFNAME) {
1641 return dev_ifname((struct ifreq *)arg);
1644 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1645 return -EFAULT;
1647 ifr.ifr_name[IFNAMSIZ-1] = 0;
1649 colon = strchr(ifr.ifr_name, ':');
1650 if (colon)
1651 *colon = 0;
1654 * See which interface the caller is talking about.
1657 switch(cmd)
1660 * These ioctl calls:
1661 * - can be done by all.
1662 * - atomic and do not require locking.
1663 * - return a value
1666 case SIOCGIFFLAGS:
1667 case SIOCGIFMETRIC:
1668 case SIOCGIFMTU:
1669 case SIOCGIFHWADDR:
1670 case SIOCGIFSLAVE:
1671 case SIOCGIFMAP:
1672 case SIOCGIFINDEX:
1673 case SIOCGIFTXQLEN:
1674 dev_load(ifr.ifr_name);
1675 ret = dev_ifsioc(&ifr, cmd);
1676 if (!ret) {
1677 if (colon)
1678 *colon = ':';
1679 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1680 return -EFAULT;
1682 return ret;
1685 * These ioctl calls:
1686 * - require superuser power.
1687 * - require strict serialization.
1688 * - do not return a value
1691 case SIOCSIFFLAGS:
1692 case SIOCSIFMETRIC:
1693 case SIOCSIFMTU:
1694 case SIOCSIFMAP:
1695 case SIOCSIFHWADDR:
1696 case SIOCSIFSLAVE:
1697 case SIOCADDMULTI:
1698 case SIOCDELMULTI:
1699 case SIOCSIFHWBROADCAST:
1700 case SIOCSIFTXQLEN:
1701 case SIOCSIFNAME:
1702 if (!capable(CAP_NET_ADMIN))
1703 return -EPERM;
1704 dev_load(ifr.ifr_name);
1705 rtnl_lock();
1706 ret = dev_ifsioc(&ifr, cmd);
1707 rtnl_unlock();
1708 return ret;
1710 case SIOCGIFMEM:
1711 /* Get the per device memory space. We can add this but currently
1712 do not support it */
1713 case SIOCSIFMEM:
1714 /* Set the per device memory buffer space. Not applicable in our case */
1715 case SIOCSIFLINK:
1716 return -EINVAL;
1719 * Unknown or private ioctl.
1722 default:
1723 if (cmd >= SIOCDEVPRIVATE &&
1724 cmd <= SIOCDEVPRIVATE + 15) {
1725 dev_load(ifr.ifr_name);
1726 rtnl_lock();
1727 ret = dev_ifsioc(&ifr, cmd);
1728 rtnl_unlock();
1729 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1730 return -EFAULT;
1731 return ret;
1733 #ifdef CONFIG_NET_RADIO
1734 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
1735 dev_load(ifr.ifr_name);
1736 if (IW_IS_SET(cmd)) {
1737 if (!suser())
1738 return -EPERM;
1739 rtnl_lock();
1741 ret = dev_ifsioc(&ifr, cmd);
1742 if (IW_IS_SET(cmd))
1743 rtnl_unlock();
1744 if (!ret && IW_IS_GET(cmd) &&
1745 copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1746 return -EFAULT;
1747 return ret;
1749 #endif /* CONFIG_NET_RADIO */
1750 return -EINVAL;
1754 int dev_new_index(void)
1756 static int ifindex;
1757 for (;;) {
1758 if (++ifindex <= 0)
1759 ifindex=1;
1760 if (dev_get_by_index(ifindex) == NULL)
1761 return ifindex;
1765 static int dev_boot_phase = 1;
1768 int register_netdevice(struct device *dev)
1770 struct device *d, **dp;
1772 if (dev_boot_phase) {
1773 /* This is NOT bug, but I am not sure, that all the
1774 devices, initialized before netdev module is started
1775 are sane.
1777 Now they are chained to device boot list
1778 and probed later. If a module is initialized
1779 before netdev, but assumes that dev->init
1780 is really called by register_netdev(), it will fail.
1782 So that this message should be printed for a while.
1784 printk(KERN_INFO "early initialization of device %s is deferred\n", dev->name);
1786 /* Check for existence, and append to tail of chain */
1787 write_lock_bh(&dev_base_lock);
1788 for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
1789 if (d == dev || strcmp(d->name, dev->name) == 0) {
1790 write_unlock_bh(&dev_base_lock);
1791 return -EEXIST;
1794 dev->next = NULL;
1795 *dp = dev;
1796 write_unlock_bh(&dev_base_lock);
1797 return 0;
1800 dev->iflink = -1;
1802 /* Init, if this function is available */
1803 if (dev->init && dev->init(dev) != 0)
1804 return -EIO;
1806 /* Check for existence, and append to tail of chain */
1807 write_lock_bh(&dev_base_lock);
1808 for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
1809 if (d == dev || strcmp(d->name, dev->name) == 0) {
1810 write_unlock_bh(&dev_base_lock);
1811 return -EEXIST;
1814 dev->next = NULL;
1815 dev_init_scheduler(dev);
1816 *dp = dev;
1817 write_unlock_bh(&dev_base_lock);
1819 dev->ifindex = -1;
1820 dev->ifindex = dev_new_index();
1821 if (dev->iflink == -1)
1822 dev->iflink = dev->ifindex;
1824 /* Notify protocols, that a new device appeared. */
1825 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
1827 return 0;
1830 int unregister_netdevice(struct device *dev)
1832 struct device *d, **dp;
1834 if (dev_boot_phase == 0) {
1835 /* If device is running, close it.
1836 It is very bad idea, really we should
1837 complain loudly here, but random hackery
1838 in linux/drivers/net likes it.
1840 if (dev->flags & IFF_UP)
1841 dev_close(dev);
1843 #ifdef CONFIG_NET_FASTROUTE
1844 dev_clear_fastroute(dev);
1845 #endif
1847 /* Shutdown queueing discipline. */
1848 dev_shutdown(dev);
1850 /* Notify protocols, that we are about to destroy
1851 this device. They should clean all the things.
1853 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
1856 * Flush the multicast chain
1858 dev_mc_discard(dev);
1860 /* To avoid pointers looking to nowhere,
1861 we wait for end of critical section */
1862 dev_lock_wait();
1865 /* And unlink it from device chain. */
1866 write_lock_bh(&dev_base_lock);
1867 for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
1868 if (d == dev) {
1869 *dp = d->next;
1870 d->next = NULL;
1871 write_unlock_bh(&dev_base_lock);
1873 if (dev->destructor)
1874 dev->destructor(dev);
1875 return 0;
1878 write_unlock_bh(&dev_base_lock);
1879 return -ENODEV;
1884 * Initialize the DEV module. At boot time this walks the device list and
1885 * unhooks any devices that fail to initialise (normally hardware not
1886 * present) and leaves us with a valid list of present and active devices.
1889 extern int lance_init(void);
1890 extern int bpq_init(void);
1891 extern int scc_init(void);
1892 extern void sdla_setup(void);
1893 extern void dlci_setup(void);
1894 extern int dmascc_init(void);
1895 extern int sm_init(void);
1897 extern int baycom_ser_fdx_init(void);
1898 extern int baycom_ser_hdx_init(void);
1899 extern int baycom_par_init(void);
1901 extern int lapbeth_init(void);
1902 extern void arcnet_init(void);
1903 extern void ip_auto_config(void);
1904 #ifdef CONFIG_8xx
1905 extern int cpm_enet_init(void);
1906 #endif /* CONFIG_8xx */
1908 #ifdef CONFIG_PROC_FS
1909 static struct proc_dir_entry proc_net_dev = {
1910 PROC_NET_DEV, 3, "dev",
1911 S_IFREG | S_IRUGO, 1, 0, 0,
1912 0, &proc_net_inode_operations,
1913 dev_get_info
1915 #endif
1917 #ifdef CONFIG_NET_RADIO
1918 #ifdef CONFIG_PROC_FS
1919 static struct proc_dir_entry proc_net_wireless = {
1920 PROC_NET_WIRELESS, 8, "wireless",
1921 S_IFREG | S_IRUGO, 1, 0, 0,
1922 0, &proc_net_inode_operations,
1923 dev_get_wireless_info
1925 #endif /* CONFIG_PROC_FS */
1926 #endif /* CONFIG_NET_RADIO */
1928 __initfunc(int net_dev_init(void))
1930 struct device *dev, **dp;
1932 #ifdef CONFIG_NET_SCHED
1933 pktsched_init();
1934 #endif
1937 * Initialise the packet receive queue.
1940 skb_queue_head_init(&backlog);
1943 * The bridge has to be up before the devices
1946 #ifdef CONFIG_BRIDGE
1947 br_init();
1948 #endif
1951 * This is Very Ugly(tm).
1953 * Some devices want to be initialized early..
1956 #if defined(CONFIG_SCC)
1957 scc_init();
1958 #endif
1959 #if defined(CONFIG_DMASCC)
1960 dmascc_init();
1961 #endif
1962 #if defined(CONFIG_BPQETHER)
1963 bpq_init();
1964 #endif
1965 #if defined(CONFIG_DLCI)
1966 dlci_setup();
1967 #endif
1968 #if defined(CONFIG_SDLA)
1969 sdla_setup();
1970 #endif
1971 #if defined(CONFIG_BAYCOM_PAR)
1972 baycom_par_init();
1973 #endif
1974 #if defined(CONFIG_BAYCOM_SER_FDX)
1975 baycom_ser_fdx_init();
1976 #endif
1977 #if defined(CONFIG_BAYCOM_SER_HDX)
1978 baycom_ser_hdx_init();
1979 #endif
1980 #if defined(CONFIG_SOUNDMODEM)
1981 sm_init();
1982 #endif
1983 #if defined(CONFIG_LAPBETHER)
1984 lapbeth_init();
1985 #endif
1986 #if defined(CONFIG_PLIP)
1987 plip_init();
1988 #endif
1989 #if defined(CONFIG_ARCNET)
1990 arcnet_init();
1991 #endif
1992 #if defined(CONFIG_8xx)
1993 cpm_enet_init();
1994 #endif
1996 * SLHC if present needs attaching so other people see it
1997 * even if not opened.
2000 #ifdef CONFIG_INET
2001 #if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
2002 || defined(CONFIG_PPP) \
2003 || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
2004 slhc_install();
2005 #endif
2006 #endif
2008 #ifdef CONFIG_NET_PROFILE
2009 net_profile_init();
2010 NET_PROFILE_REGISTER(dev_queue_xmit);
2011 NET_PROFILE_REGISTER(net_bh);
2012 #if 0
2013 NET_PROFILE_REGISTER(net_bh_skb);
2014 #endif
2015 #endif
2017 * Add the devices.
2018 * If the call to dev->init fails, the dev is removed
2019 * from the chain disconnecting the device until the
2020 * next reboot.
2023 dp = &dev_base;
2024 while ((dev = *dp) != NULL) {
2025 dev->iflink = -1;
2026 if (dev->init && dev->init(dev)) {
2028 * It failed to come up. Unhook it.
2030 *dp = dev->next;
2031 } else {
2032 dp = &dev->next;
2033 dev->ifindex = dev_new_index();
2034 if (dev->iflink == -1)
2035 dev->iflink = dev->ifindex;
2036 dev_init_scheduler(dev);
2040 #ifdef CONFIG_PROC_FS
2041 proc_net_register(&proc_net_dev);
2043 struct proc_dir_entry *ent = create_proc_entry("net/dev_stat", 0, 0);
2044 ent->read_proc = dev_proc_stats;
2046 #endif
2048 #ifdef CONFIG_NET_RADIO
2049 #ifdef CONFIG_PROC_FS
2050 proc_net_register(&proc_net_wireless);
2051 #endif /* CONFIG_PROC_FS */
2052 #endif /* CONFIG_NET_RADIO */
2054 init_bh(NET_BH, net_bh);
2056 dev_boot_phase = 0;
2058 dev_mcast_init();
2060 #ifdef CONFIG_IP_PNP
2061 ip_auto_config();
2062 #endif
2064 return 0;