SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / 802 / tr.c
blob5e20cf8a074bb56429320118b460b06dba8b0466
1 /*
2 * NET3: Token ring device handling subroutines
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10 * Added rif table to /proc/net/tr_rif and rif timeout to
11 * /proc/sys/net/token-ring/rif_timeout.
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat.
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/jiffies.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/socket.h>
27 #include <linux/in.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/trdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/net.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/sysctl.h>
39 #include <linux/slab.h>
40 #include <net/arp.h>
41 #include <net/net_namespace.h>
43 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
44 static void rif_check_expire(unsigned long dummy);
46 #define TR_SR_DEBUG 0
49 * Each RIF entry we learn is kept this way
52 struct rif_cache {
53 unsigned char addr[TR_ALEN];
54 int iface;
55 __be16 rcf;
56 __be16 rseg[8];
57 struct rif_cache *next;
58 unsigned long last_used;
59 unsigned char local_ring;
62 #define RIF_TABLE_SIZE 32
65 * We hash the RIF cache 32 ways. We do after all have to look it
66 * up a lot.
69 static struct rif_cache *rif_table[RIF_TABLE_SIZE];
71 static DEFINE_SPINLOCK(rif_lock);
75 * Garbage disposal timer.
78 static struct timer_list rif_timer;
80 static int sysctl_tr_rif_timeout = 60*10*HZ;
82 static inline unsigned long rif_hash(const unsigned char *addr)
84 unsigned long x;
86 x = addr[0];
87 x = (x << 2) ^ addr[1];
88 x = (x << 2) ^ addr[2];
89 x = (x << 2) ^ addr[3];
90 x = (x << 2) ^ addr[4];
91 x = (x << 2) ^ addr[5];
93 x ^= x >> 8;
95 return x & (RIF_TABLE_SIZE - 1);
99 * Put the headers on a token ring packet. Token ring source routing
100 * makes this a little more exciting than on ethernet.
103 static int tr_header(struct sk_buff *skb, struct net_device *dev,
104 unsigned short type,
105 const void *daddr, const void *saddr, unsigned len)
107 struct trh_hdr *trh;
108 int hdr_len;
111 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
112 * dev->hard_header directly.
114 if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
116 struct trllc *trllc;
118 hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
119 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
120 trllc = (struct trllc *)(trh+1);
121 trllc->dsap = trllc->ssap = EXTENDED_SAP;
122 trllc->llc = UI_CMD;
123 trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
124 trllc->ethertype = htons(type);
126 else
128 hdr_len = sizeof(struct trh_hdr);
129 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
132 trh->ac=AC;
133 trh->fc=LLC_FRAME;
135 if(saddr)
136 memcpy(trh->saddr,saddr,dev->addr_len);
137 else
138 memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
141 * Build the destination and then source route the frame
144 if(daddr)
146 memcpy(trh->daddr,daddr,dev->addr_len);
147 tr_source_route(skb, trh, dev);
148 return hdr_len;
151 return -hdr_len;
155 * A neighbour discovery of some species (eg arp) has completed. We
156 * can now send the packet.
159 static int tr_rebuild_header(struct sk_buff *skb)
161 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
162 struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
163 struct net_device *dev = skb->dev;
166 * FIXME: We don't yet support IPv6 over token rings
169 if(trllc->ethertype != htons(ETH_P_IP)) {
170 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
171 return 0;
174 #ifdef CONFIG_INET
175 if(arp_find(trh->daddr, skb)) {
176 return 1;
178 else
179 #endif
181 tr_source_route(skb,trh,dev);
182 return 0;
187 * Some of this is a bit hackish. We intercept RIF information
188 * used for source routing. We also grab IP directly and don't feed
189 * it via SNAP.
192 __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
195 struct trh_hdr *trh;
196 struct trllc *trllc;
197 unsigned riflen=0;
199 skb->dev = dev;
200 skb_reset_mac_header(skb);
201 trh = tr_hdr(skb);
203 if(trh->saddr[0] & TR_RII)
204 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
206 trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
208 skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
210 if(*trh->daddr & 0x80)
212 if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
213 skb->pkt_type=PACKET_BROADCAST;
214 else
215 skb->pkt_type=PACKET_MULTICAST;
217 else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
219 skb->pkt_type=PACKET_MULTICAST;
221 else if(dev->flags & IFF_PROMISC)
223 if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
224 skb->pkt_type=PACKET_OTHERHOST;
227 if ((skb->pkt_type != PACKET_BROADCAST) &&
228 (skb->pkt_type != PACKET_MULTICAST))
229 tr_add_rif_info(trh,dev) ;
232 * Strip the SNAP header from ARP packets since we don't
233 * pass them through to the 802.2/SNAP layers.
236 if (trllc->dsap == EXTENDED_SAP &&
237 (trllc->ethertype == htons(ETH_P_IP) ||
238 trllc->ethertype == htons(ETH_P_IPV6) ||
239 trllc->ethertype == htons(ETH_P_ARP)))
241 skb_pull(skb, sizeof(struct trllc));
242 return trllc->ethertype;
245 return htons(ETH_P_TR_802_2);
249 * We try to do source routing...
252 void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
253 struct net_device *dev)
255 int slack;
256 unsigned int hash;
257 struct rif_cache *entry;
258 unsigned char *olddata;
259 unsigned long flags;
260 static const unsigned char mcast_func_addr[]
261 = {0xC0,0x00,0x00,0x04,0x00,0x00};
263 spin_lock_irqsave(&rif_lock, flags);
266 * Broadcasts are single route as stated in RFC 1042
268 if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
269 (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
271 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
272 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
273 trh->saddr[0]|=TR_RII;
275 else
277 hash = rif_hash(trh->daddr);
279 * Walk the hash table and look for an entry
281 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
284 * If we found an entry we can route the frame.
286 if(entry)
288 #if TR_SR_DEBUG
289 printk("source routing for %pM\n", trh->daddr);
290 #endif
291 if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
293 trh->rcf=entry->rcf;
294 memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
295 trh->rcf^=htons(TR_RCF_DIR_BIT);
296 trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
298 trh->saddr[0]|=TR_RII;
299 #if TR_SR_DEBUG
300 printk("entry found with rcf %04x\n", entry->rcf);
302 else
304 printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
305 #endif
307 entry->last_used=jiffies;
309 else
312 * Without the information we simply have to shout
313 * on the wire. The replies should rapidly clean this
314 * situation up.
316 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
317 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
318 trh->saddr[0]|=TR_RII;
319 #if TR_SR_DEBUG
320 printk("no entry in rif table found - broadcasting frame\n");
321 #endif
325 /* Compress the RIF here so we don't have to do it in the driver(s) */
326 if (!(trh->saddr[0] & 0x80))
327 slack = 18;
328 else
329 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
330 olddata = skb->data;
331 spin_unlock_irqrestore(&rif_lock, flags);
333 skb_pull(skb, slack);
334 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
338 * We have learned some new RIF information for our source
339 * routing.
342 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
344 unsigned int hash, rii_p = 0;
345 unsigned long flags;
346 struct rif_cache *entry;
347 unsigned char saddr0;
349 spin_lock_irqsave(&rif_lock, flags);
350 saddr0 = trh->saddr[0];
353 * Firstly see if the entry exists
356 if(trh->saddr[0] & TR_RII)
358 trh->saddr[0]&=0x7f;
359 if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
361 rii_p = 1;
365 hash = rif_hash(trh->saddr);
366 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
368 if(entry==NULL)
370 #if TR_SR_DEBUG
371 printk("adding rif_entry: addr:%pM rcf:%04X\n",
372 trh->saddr, ntohs(trh->rcf));
373 #endif
375 * Allocate our new entry. A failure to allocate loses
376 * use the information. This is harmless.
378 * FIXME: We ought to keep some kind of cache size
379 * limiting and adjust the timers to suit.
381 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
383 if(!entry)
385 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
386 spin_unlock_irqrestore(&rif_lock, flags);
387 return;
390 memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
391 entry->iface = dev->ifindex;
392 entry->next=rif_table[hash];
393 entry->last_used=jiffies;
394 rif_table[hash]=entry;
396 if (rii_p)
398 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
399 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
400 entry->local_ring = 0;
402 else
404 entry->local_ring = 1;
407 else /* Y. Tahara added */
410 * Update existing entries
412 if (!entry->local_ring)
413 if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
414 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
416 #if TR_SR_DEBUG
417 printk("updating rif_entry: addr:%pM rcf:%04X\n",
418 trh->saddr, ntohs(trh->rcf));
419 #endif
420 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
421 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
423 entry->last_used=jiffies;
425 trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
426 spin_unlock_irqrestore(&rif_lock, flags);
430 * Scan the cache with a timer and see what we need to throw out.
433 static void rif_check_expire(unsigned long dummy)
435 int i;
436 unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
438 spin_lock_irqsave(&rif_lock, flags);
440 for(i =0; i < RIF_TABLE_SIZE; i++) {
441 struct rif_cache *entry, **pentry;
443 pentry = rif_table+i;
444 while((entry=*pentry) != NULL) {
445 unsigned long expires
446 = entry->last_used + sysctl_tr_rif_timeout;
448 if (time_before_eq(expires, jiffies)) {
449 *pentry = entry->next;
450 kfree(entry);
451 } else {
452 pentry = &entry->next;
454 if (time_before(expires, next_interval))
455 next_interval = expires;
460 spin_unlock_irqrestore(&rif_lock, flags);
462 mod_timer(&rif_timer, next_interval);
467 * Generate the /proc/net information for the token ring RIF
468 * routing.
471 #ifdef CONFIG_PROC_FS
473 static struct rif_cache *rif_get_idx(loff_t pos)
475 int i;
476 struct rif_cache *entry;
477 loff_t off = 0;
479 for(i = 0; i < RIF_TABLE_SIZE; i++)
480 for(entry = rif_table[i]; entry; entry = entry->next) {
481 if (off == pos)
482 return entry;
483 ++off;
486 return NULL;
489 static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
490 __acquires(&rif_lock)
492 spin_lock_irq(&rif_lock);
494 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
497 static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
499 int i;
500 struct rif_cache *ent = v;
502 ++*pos;
504 if (v == SEQ_START_TOKEN) {
505 i = -1;
506 goto scan;
509 if (ent->next)
510 return ent->next;
512 i = rif_hash(ent->addr);
513 scan:
514 while (++i < RIF_TABLE_SIZE) {
515 if ((ent = rif_table[i]) != NULL)
516 return ent;
518 return NULL;
521 static void rif_seq_stop(struct seq_file *seq, void *v)
522 __releases(&rif_lock)
524 spin_unlock_irq(&rif_lock);
527 static int rif_seq_show(struct seq_file *seq, void *v)
529 int j, rcf_len, segment, brdgnmb;
530 struct rif_cache *entry = v;
532 if (v == SEQ_START_TOKEN)
533 seq_puts(seq,
534 "if TR address TTL rcf routing segments\n");
535 else {
536 struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
537 long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
538 - (long) jiffies;
540 seq_printf(seq, "%s %pM %7li ",
541 dev?dev->name:"?",
542 entry->addr,
543 ttl/HZ);
545 if (entry->local_ring)
546 seq_puts(seq, "local\n");
547 else {
549 seq_printf(seq, "%04X", ntohs(entry->rcf));
550 rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
551 if (rcf_len)
552 rcf_len >>= 1;
553 for(j = 1; j < rcf_len; j++) {
554 if(j==1) {
555 segment=ntohs(entry->rseg[j-1])>>4;
556 seq_printf(seq," %03X",segment);
559 segment=ntohs(entry->rseg[j])>>4;
560 brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
561 seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
563 seq_putc(seq, '\n');
566 if (dev)
567 dev_put(dev);
569 return 0;
573 static const struct seq_operations rif_seq_ops = {
574 .start = rif_seq_start,
575 .next = rif_seq_next,
576 .stop = rif_seq_stop,
577 .show = rif_seq_show,
580 static int rif_seq_open(struct inode *inode, struct file *file)
582 return seq_open(file, &rif_seq_ops);
585 static const struct file_operations rif_seq_fops = {
586 .owner = THIS_MODULE,
587 .open = rif_seq_open,
588 .read = seq_read,
589 .llseek = seq_lseek,
590 .release = seq_release,
593 #endif
595 static const struct header_ops tr_header_ops = {
596 .create = tr_header,
597 .rebuild= tr_rebuild_header,
600 static void tr_setup(struct net_device *dev)
603 * Configure and register
606 dev->header_ops = &tr_header_ops;
608 dev->type = ARPHRD_IEEE802_TR;
609 dev->hard_header_len = TR_HLEN;
610 dev->mtu = 2000;
611 dev->addr_len = TR_ALEN;
612 dev->tx_queue_len = 100; /* Long queues on tr */
614 memset(dev->broadcast,0xFF, TR_ALEN);
616 /* New-style flags. */
617 dev->flags = IFF_BROADCAST | IFF_MULTICAST ;
621 * alloc_trdev - Register token ring device
622 * @sizeof_priv: Size of additional driver-private structure to be allocated
623 * for this token ring device
625 * Fill in the fields of the device structure with token ring-generic values.
627 * Constructs a new net device, complete with a private data area of
628 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
629 * this private data area.
631 struct net_device *alloc_trdev(int sizeof_priv)
633 return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
636 #ifdef CONFIG_SYSCTL
637 static struct ctl_table tr_table[] = {
639 .procname = "rif_timeout",
640 .data = &sysctl_tr_rif_timeout,
641 .maxlen = sizeof(int),
642 .mode = 0644,
643 .proc_handler = proc_dointvec
645 { },
648 static __initdata struct ctl_path tr_path[] = {
649 { .procname = "net", },
650 { .procname = "token-ring", },
653 #endif
656 * Called during bootup. We don't actually have to initialise
657 * too much for this.
660 static int __init rif_init(void)
662 rif_timer.expires = jiffies + sysctl_tr_rif_timeout;
663 setup_timer(&rif_timer, rif_check_expire, 0);
664 add_timer(&rif_timer);
665 #ifdef CONFIG_SYSCTL
666 register_sysctl_paths(tr_path, tr_table);
667 #endif
668 proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
669 return 0;
672 module_init(rif_init);
674 EXPORT_SYMBOL(tr_type_trans);
675 EXPORT_SYMBOL(alloc_trdev);
677 MODULE_LICENSE("GPL");