2 * NET3: Token ring device handling subroutines
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10 * Added rif table to /proc/net/tr_rif and rif timeout to
11 * /proc/sys/net/token-ring/rif_timeout.
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat.
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/jiffies.h>
25 #include <linux/string.h>
27 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/trdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
35 #include <linux/net.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/init.h>
41 static void tr_add_rif_info(struct trh_hdr
*trh
, struct net_device
*dev
);
42 static void rif_check_expire(unsigned long dummy
);
47 * Each RIF entry we learn is kept this way
51 unsigned char addr
[TR_ALEN
];
55 struct rif_cache
*next
;
56 unsigned long last_used
;
57 unsigned char local_ring
;
60 #define RIF_TABLE_SIZE 32
63 * We hash the RIF cache 32 ways. We do after all have to look it
67 static struct rif_cache
*rif_table
[RIF_TABLE_SIZE
];
69 static DEFINE_SPINLOCK(rif_lock
);
73 * Garbage disposal timer.
76 static struct timer_list rif_timer
;
78 int sysctl_tr_rif_timeout
= 60*10*HZ
;
80 static inline unsigned long rif_hash(const unsigned char *addr
)
85 x
= (x
<< 2) ^ addr
[1];
86 x
= (x
<< 2) ^ addr
[2];
87 x
= (x
<< 2) ^ addr
[3];
88 x
= (x
<< 2) ^ addr
[4];
89 x
= (x
<< 2) ^ addr
[5];
93 return x
& (RIF_TABLE_SIZE
- 1);
97 * Put the headers on a token ring packet. Token ring source routing
98 * makes this a little more exciting than on ethernet.
101 static int tr_header(struct sk_buff
*skb
, struct net_device
*dev
,
103 void *daddr
, void *saddr
, unsigned len
)
109 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
110 * dev->hard_header directly.
112 if (type
== ETH_P_IP
|| type
== ETH_P_IPV6
|| type
== ETH_P_ARP
)
116 hdr_len
= sizeof(struct trh_hdr
) + sizeof(struct trllc
);
117 trh
= (struct trh_hdr
*)skb_push(skb
, hdr_len
);
118 trllc
= (struct trllc
*)(trh
+1);
119 trllc
->dsap
= trllc
->ssap
= EXTENDED_SAP
;
121 trllc
->protid
[0] = trllc
->protid
[1] = trllc
->protid
[2] = 0x00;
122 trllc
->ethertype
= htons(type
);
126 hdr_len
= sizeof(struct trh_hdr
);
127 trh
= (struct trh_hdr
*)skb_push(skb
, hdr_len
);
134 memcpy(trh
->saddr
,saddr
,dev
->addr_len
);
136 memcpy(trh
->saddr
,dev
->dev_addr
,dev
->addr_len
);
139 * Build the destination and then source route the frame
144 memcpy(trh
->daddr
,daddr
,dev
->addr_len
);
145 tr_source_route(skb
,trh
,dev
);
153 * A neighbour discovery of some species (eg arp) has completed. We
154 * can now send the packet.
157 static int tr_rebuild_header(struct sk_buff
*skb
)
159 struct trh_hdr
*trh
=(struct trh_hdr
*)skb
->data
;
160 struct trllc
*trllc
=(struct trllc
*)(skb
->data
+sizeof(struct trh_hdr
));
161 struct net_device
*dev
= skb
->dev
;
164 * FIXME: We don't yet support IPv6 over token rings
167 if(trllc
->ethertype
!= htons(ETH_P_IP
)) {
168 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n",(unsigned int)htons(trllc
->ethertype
));
173 if(arp_find(trh
->daddr
, skb
)) {
179 tr_source_route(skb
,trh
,dev
);
185 * Some of this is a bit hackish. We intercept RIF information
186 * used for source routing. We also grab IP directly and don't feed
190 unsigned short tr_type_trans(struct sk_buff
*skb
, struct net_device
*dev
)
193 struct trh_hdr
*trh
=(struct trh_hdr
*)skb
->data
;
197 skb
->mac
.raw
= skb
->data
;
199 if(trh
->saddr
[0] & TR_RII
)
200 riflen
= (ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
) >> 8;
202 trllc
= (struct trllc
*)(skb
->data
+sizeof(struct trh_hdr
)-TR_MAXRIFLEN
+riflen
);
204 skb_pull(skb
,sizeof(struct trh_hdr
)-TR_MAXRIFLEN
+riflen
);
206 if(*trh
->daddr
& 0x80)
208 if(!memcmp(trh
->daddr
,dev
->broadcast
,TR_ALEN
))
209 skb
->pkt_type
=PACKET_BROADCAST
;
211 skb
->pkt_type
=PACKET_MULTICAST
;
213 else if ( (trh
->daddr
[0] & 0x01) && (trh
->daddr
[1] & 0x00) && (trh
->daddr
[2] & 0x5E))
215 skb
->pkt_type
=PACKET_MULTICAST
;
217 else if(dev
->flags
& IFF_PROMISC
)
219 if(memcmp(trh
->daddr
, dev
->dev_addr
, TR_ALEN
))
220 skb
->pkt_type
=PACKET_OTHERHOST
;
223 if ((skb
->pkt_type
!= PACKET_BROADCAST
) &&
224 (skb
->pkt_type
!= PACKET_MULTICAST
))
225 tr_add_rif_info(trh
,dev
) ;
228 * Strip the SNAP header from ARP packets since we don't
229 * pass them through to the 802.2/SNAP layers.
232 if (trllc
->dsap
== EXTENDED_SAP
&&
233 (trllc
->ethertype
== ntohs(ETH_P_IP
) ||
234 trllc
->ethertype
== ntohs(ETH_P_IPV6
) ||
235 trllc
->ethertype
== ntohs(ETH_P_ARP
)))
237 skb_pull(skb
, sizeof(struct trllc
));
238 return trllc
->ethertype
;
241 return ntohs(ETH_P_802_2
);
245 * We try to do source routing...
248 void tr_source_route(struct sk_buff
*skb
,struct trh_hdr
*trh
,struct net_device
*dev
)
252 struct rif_cache
*entry
;
253 unsigned char *olddata
;
255 static const unsigned char mcast_func_addr
[]
256 = {0xC0,0x00,0x00,0x04,0x00,0x00};
258 spin_lock_irqsave(&rif_lock
, flags
);
261 * Broadcasts are single route as stated in RFC 1042
263 if( (!memcmp(&(trh
->daddr
[0]),&(dev
->broadcast
[0]),TR_ALEN
)) ||
264 (!memcmp(&(trh
->daddr
[0]),&(mcast_func_addr
[0]), TR_ALEN
)) )
266 trh
->rcf
=htons((((sizeof(trh
->rcf
)) << 8) & TR_RCF_LEN_MASK
)
267 | TR_RCF_FRAME2K
| TR_RCF_LIMITED_BROADCAST
);
268 trh
->saddr
[0]|=TR_RII
;
272 hash
= rif_hash(trh
->daddr
);
274 * Walk the hash table and look for an entry
276 for(entry
=rif_table
[hash
];entry
&& memcmp(&(entry
->addr
[0]),&(trh
->daddr
[0]),TR_ALEN
);entry
=entry
->next
);
279 * If we found an entry we can route the frame.
284 printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh
->daddr
[0],
285 trh
->daddr
[1],trh
->daddr
[2],trh
->daddr
[3],trh
->daddr
[4],trh
->daddr
[5]);
287 if(!entry
->local_ring
&& (ntohs(entry
->rcf
) & TR_RCF_LEN_MASK
) >> 8)
290 memcpy(&trh
->rseg
[0],&entry
->rseg
[0],8*sizeof(unsigned short));
291 trh
->rcf
^=htons(TR_RCF_DIR_BIT
);
292 trh
->rcf
&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
294 trh
->saddr
[0]|=TR_RII
;
296 printk("entry found with rcf %04x\n", entry
->rcf
);
300 printk("entry found but without rcf length, local=%02x\n", entry
->local_ring
);
303 entry
->last_used
=jiffies
;
308 * Without the information we simply have to shout
309 * on the wire. The replies should rapidly clean this
312 trh
->rcf
=htons((((sizeof(trh
->rcf
)) << 8) & TR_RCF_LEN_MASK
)
313 | TR_RCF_FRAME2K
| TR_RCF_LIMITED_BROADCAST
);
314 trh
->saddr
[0]|=TR_RII
;
316 printk("no entry in rif table found - broadcasting frame\n");
321 /* Compress the RIF here so we don't have to do it in the driver(s) */
322 if (!(trh
->saddr
[0] & 0x80))
325 slack
= 18 - ((ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
)>>8);
327 spin_unlock_irqrestore(&rif_lock
, flags
);
329 skb_pull(skb
, slack
);
330 memmove(skb
->data
, olddata
, sizeof(struct trh_hdr
) - slack
);
334 * We have learned some new RIF information for our source
338 static void tr_add_rif_info(struct trh_hdr
*trh
, struct net_device
*dev
)
340 unsigned int hash
, rii_p
= 0;
342 struct rif_cache
*entry
;
345 spin_lock_irqsave(&rif_lock
, flags
);
348 * Firstly see if the entry exists
351 if(trh
->saddr
[0] & TR_RII
)
354 if (((ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
) >> 8) > 2)
360 hash
= rif_hash(trh
->saddr
);
361 for(entry
=rif_table
[hash
];entry
&& memcmp(&(entry
->addr
[0]),&(trh
->saddr
[0]),TR_ALEN
);entry
=entry
->next
);
366 printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
367 trh
->saddr
[0],trh
->saddr
[1],trh
->saddr
[2],
368 trh
->saddr
[3],trh
->saddr
[4],trh
->saddr
[5],
372 * Allocate our new entry. A failure to allocate loses
373 * use the information. This is harmless.
375 * FIXME: We ought to keep some kind of cache size
376 * limiting and adjust the timers to suit.
378 entry
=kmalloc(sizeof(struct rif_cache
),GFP_ATOMIC
);
382 printk(KERN_DEBUG
"tr.c: Couldn't malloc rif cache entry !\n");
383 spin_unlock_irqrestore(&rif_lock
, flags
);
387 memcpy(&(entry
->addr
[0]),&(trh
->saddr
[0]),TR_ALEN
);
388 entry
->iface
= dev
->ifindex
;
389 entry
->next
=rif_table
[hash
];
390 entry
->last_used
=jiffies
;
391 rif_table
[hash
]=entry
;
395 entry
->rcf
= trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
);
396 memcpy(&(entry
->rseg
[0]),&(trh
->rseg
[0]),8*sizeof(unsigned short));
397 entry
->local_ring
= 0;
398 trh
->saddr
[0]|=TR_RII
; /* put the routing indicator back for tcpdump */
402 entry
->local_ring
= 1;
405 else /* Y. Tahara added */
408 * Update existing entries
410 if (!entry
->local_ring
)
411 if (entry
->rcf
!= (trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
)) &&
412 !(trh
->rcf
& htons(TR_RCF_BROADCAST_MASK
)))
415 printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
416 trh
->saddr
[0],trh
->saddr
[1],trh
->saddr
[2],
417 trh
->saddr
[3],trh
->saddr
[4],trh
->saddr
[5],
420 entry
->rcf
= trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
);
421 memcpy(&(entry
->rseg
[0]),&(trh
->rseg
[0]),8*sizeof(unsigned short));
423 entry
->last_used
=jiffies
;
425 spin_unlock_irqrestore(&rif_lock
, flags
);
429 * Scan the cache with a timer and see what we need to throw out.
432 static void rif_check_expire(unsigned long dummy
)
435 unsigned long flags
, next_interval
= jiffies
+ sysctl_tr_rif_timeout
/2;
437 spin_lock_irqsave(&rif_lock
, flags
);
439 for(i
=0; i
< RIF_TABLE_SIZE
; i
++) {
440 struct rif_cache
*entry
, **pentry
;
442 pentry
= rif_table
+i
;
443 while((entry
=*pentry
) != NULL
) {
444 unsigned long expires
445 = entry
->last_used
+ sysctl_tr_rif_timeout
;
447 if (time_before_eq(expires
, jiffies
)) {
448 *pentry
= entry
->next
;
451 pentry
= &entry
->next
;
453 if (time_before(expires
, next_interval
))
454 next_interval
= expires
;
459 spin_unlock_irqrestore(&rif_lock
, flags
);
461 mod_timer(&rif_timer
, next_interval
);
466 * Generate the /proc/net information for the token ring RIF
470 #ifdef CONFIG_PROC_FS
472 static struct rif_cache
*rif_get_idx(loff_t pos
)
475 struct rif_cache
*entry
;
478 for(i
= 0; i
< RIF_TABLE_SIZE
; i
++)
479 for(entry
= rif_table
[i
]; entry
; entry
= entry
->next
) {
488 static void *rif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
490 spin_lock_irq(&rif_lock
);
492 return *pos
? rif_get_idx(*pos
- 1) : SEQ_START_TOKEN
;
495 static void *rif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
498 struct rif_cache
*ent
= v
;
502 if (v
== SEQ_START_TOKEN
) {
510 i
= rif_hash(ent
->addr
);
512 while (++i
< RIF_TABLE_SIZE
) {
513 if ((ent
= rif_table
[i
]) != NULL
)
519 static void rif_seq_stop(struct seq_file
*seq
, void *v
)
521 spin_unlock_irq(&rif_lock
);
524 static int rif_seq_show(struct seq_file
*seq
, void *v
)
526 int j
, rcf_len
, segment
, brdgnmb
;
527 struct rif_cache
*entry
= v
;
529 if (v
== SEQ_START_TOKEN
)
531 "if TR address TTL rcf routing segments\n");
533 struct net_device
*dev
= dev_get_by_index(entry
->iface
);
534 long ttl
= (long) (entry
->last_used
+ sysctl_tr_rif_timeout
)
537 seq_printf(seq
, "%s %02X:%02X:%02X:%02X:%02X:%02X %7li ",
539 entry
->addr
[0],entry
->addr
[1],entry
->addr
[2],
540 entry
->addr
[3],entry
->addr
[4],entry
->addr
[5],
543 if (entry
->local_ring
)
544 seq_puts(seq
, "local\n");
547 seq_printf(seq
, "%04X", ntohs(entry
->rcf
));
548 rcf_len
= ((ntohs(entry
->rcf
) & TR_RCF_LEN_MASK
)>>8)-2;
551 for(j
= 1; j
< rcf_len
; j
++) {
553 segment
=ntohs(entry
->rseg
[j
-1])>>4;
554 seq_printf(seq
," %03X",segment
);
556 segment
=ntohs(entry
->rseg
[j
])>>4;
557 brdgnmb
=ntohs(entry
->rseg
[j
-1])&0x00f;
558 seq_printf(seq
,"-%01X-%03X",brdgnmb
,segment
);
567 static struct seq_operations rif_seq_ops
= {
568 .start
= rif_seq_start
,
569 .next
= rif_seq_next
,
570 .stop
= rif_seq_stop
,
571 .show
= rif_seq_show
,
574 static int rif_seq_open(struct inode
*inode
, struct file
*file
)
576 return seq_open(file
, &rif_seq_ops
);
579 static struct file_operations rif_seq_fops
= {
580 .owner
= THIS_MODULE
,
581 .open
= rif_seq_open
,
584 .release
= seq_release
,
589 static void tr_setup(struct net_device
*dev
)
592 * Configure and register
595 dev
->hard_header
= tr_header
;
596 dev
->rebuild_header
= tr_rebuild_header
;
598 dev
->type
= ARPHRD_IEEE802_TR
;
599 dev
->hard_header_len
= TR_HLEN
;
601 dev
->addr_len
= TR_ALEN
;
602 dev
->tx_queue_len
= 100; /* Long queues on tr */
604 memset(dev
->broadcast
,0xFF, TR_ALEN
);
606 /* New-style flags. */
607 dev
->flags
= IFF_BROADCAST
| IFF_MULTICAST
;
611 * alloc_trdev - Register token ring device
612 * @sizeof_priv: Size of additional driver-private structure to be allocated
613 * for this token ring device
615 * Fill in the fields of the device structure with token ring-generic values.
617 * Constructs a new net device, complete with a private data area of
618 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
619 * this private data area.
621 struct net_device
*alloc_trdev(int sizeof_priv
)
623 return alloc_netdev(sizeof_priv
, "tr%d", tr_setup
);
627 * Called during bootup. We don't actually have to initialise
631 static int __init
rif_init(void)
633 init_timer(&rif_timer
);
634 rif_timer
.expires
= sysctl_tr_rif_timeout
;
636 rif_timer
.function
= rif_check_expire
;
637 add_timer(&rif_timer
);
639 proc_net_fops_create("tr_rif", S_IRUGO
, &rif_seq_fops
);
643 module_init(rif_init
);
645 EXPORT_SYMBOL(tr_source_route
);
646 EXPORT_SYMBOL(tr_type_trans
);
647 EXPORT_SYMBOL(alloc_trdev
);