2 * NET3: Token ring device handling subroutines
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10 * Added rif table to /proc/net/tr_rif and rif timeout to
11 * /proc/sys/net/token-ring/rif_timeout.
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat.
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/jiffies.h>
25 #include <linux/string.h>
27 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/trdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
35 #include <linux/net.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/init.h>
41 static void tr_add_rif_info(struct trh_hdr
*trh
, struct net_device
*dev
);
42 static void rif_check_expire(unsigned long dummy
);
47 * Each RIF entry we learn is kept this way
51 unsigned char addr
[TR_ALEN
];
55 struct rif_cache
*next
;
56 unsigned long last_used
;
57 unsigned char local_ring
;
60 #define RIF_TABLE_SIZE 32
63 * We hash the RIF cache 32 ways. We do after all have to look it
67 static struct rif_cache
*rif_table
[RIF_TABLE_SIZE
];
69 static DEFINE_SPINLOCK(rif_lock
);
73 * Garbage disposal timer.
76 static struct timer_list rif_timer
;
78 int sysctl_tr_rif_timeout
= 60*10*HZ
;
80 static inline unsigned long rif_hash(const unsigned char *addr
)
85 x
= (x
<< 2) ^ addr
[1];
86 x
= (x
<< 2) ^ addr
[2];
87 x
= (x
<< 2) ^ addr
[3];
88 x
= (x
<< 2) ^ addr
[4];
89 x
= (x
<< 2) ^ addr
[5];
93 return x
& (RIF_TABLE_SIZE
- 1);
97 * Put the headers on a token ring packet. Token ring source routing
98 * makes this a little more exciting than on ethernet.
101 static int tr_header(struct sk_buff
*skb
, struct net_device
*dev
,
103 void *daddr
, void *saddr
, unsigned len
)
109 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
110 * dev->hard_header directly.
112 if (type
== ETH_P_IP
|| type
== ETH_P_IPV6
|| type
== ETH_P_ARP
)
116 hdr_len
= sizeof(struct trh_hdr
) + sizeof(struct trllc
);
117 trh
= (struct trh_hdr
*)skb_push(skb
, hdr_len
);
118 trllc
= (struct trllc
*)(trh
+1);
119 trllc
->dsap
= trllc
->ssap
= EXTENDED_SAP
;
121 trllc
->protid
[0] = trllc
->protid
[1] = trllc
->protid
[2] = 0x00;
122 trllc
->ethertype
= htons(type
);
126 hdr_len
= sizeof(struct trh_hdr
);
127 trh
= (struct trh_hdr
*)skb_push(skb
, hdr_len
);
134 memcpy(trh
->saddr
,saddr
,dev
->addr_len
);
136 memcpy(trh
->saddr
,dev
->dev_addr
,dev
->addr_len
);
139 * Build the destination and then source route the frame
144 memcpy(trh
->daddr
,daddr
,dev
->addr_len
);
145 tr_source_route(skb
,trh
,dev
);
153 * A neighbour discovery of some species (eg arp) has completed. We
154 * can now send the packet.
157 static int tr_rebuild_header(struct sk_buff
*skb
)
159 struct trh_hdr
*trh
=(struct trh_hdr
*)skb
->data
;
160 struct trllc
*trllc
=(struct trllc
*)(skb
->data
+sizeof(struct trh_hdr
));
161 struct net_device
*dev
= skb
->dev
;
164 * FIXME: We don't yet support IPv6 over token rings
167 if(trllc
->ethertype
!= htons(ETH_P_IP
)) {
168 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n",(unsigned int)htons(trllc
->ethertype
));
173 if(arp_find(trh
->daddr
, skb
)) {
179 tr_source_route(skb
,trh
,dev
);
185 * Some of this is a bit hackish. We intercept RIF information
186 * used for source routing. We also grab IP directly and don't feed
190 unsigned short tr_type_trans(struct sk_buff
*skb
, struct net_device
*dev
)
193 struct trh_hdr
*trh
=(struct trh_hdr
*)skb
->data
;
197 skb
->mac
.raw
= skb
->data
;
199 if(trh
->saddr
[0] & TR_RII
)
200 riflen
= (ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
) >> 8;
202 trllc
= (struct trllc
*)(skb
->data
+sizeof(struct trh_hdr
)-TR_MAXRIFLEN
+riflen
);
204 skb_pull(skb
,sizeof(struct trh_hdr
)-TR_MAXRIFLEN
+riflen
);
206 if(*trh
->daddr
& 0x80)
208 if(!memcmp(trh
->daddr
,dev
->broadcast
,TR_ALEN
))
209 skb
->pkt_type
=PACKET_BROADCAST
;
211 skb
->pkt_type
=PACKET_MULTICAST
;
213 else if ( (trh
->daddr
[0] & 0x01) && (trh
->daddr
[1] & 0x00) && (trh
->daddr
[2] & 0x5E))
215 skb
->pkt_type
=PACKET_MULTICAST
;
217 else if(dev
->flags
& IFF_PROMISC
)
219 if(memcmp(trh
->daddr
, dev
->dev_addr
, TR_ALEN
))
220 skb
->pkt_type
=PACKET_OTHERHOST
;
223 if ((skb
->pkt_type
!= PACKET_BROADCAST
) &&
224 (skb
->pkt_type
!= PACKET_MULTICAST
))
225 tr_add_rif_info(trh
,dev
) ;
228 * Strip the SNAP header from ARP packets since we don't
229 * pass them through to the 802.2/SNAP layers.
232 if (trllc
->dsap
== EXTENDED_SAP
&&
233 (trllc
->ethertype
== ntohs(ETH_P_IP
) ||
234 trllc
->ethertype
== ntohs(ETH_P_IPV6
) ||
235 trllc
->ethertype
== ntohs(ETH_P_ARP
)))
237 skb_pull(skb
, sizeof(struct trllc
));
238 return trllc
->ethertype
;
241 return ntohs(ETH_P_802_2
);
245 * We try to do source routing...
248 void tr_source_route(struct sk_buff
*skb
,struct trh_hdr
*trh
,struct net_device
*dev
)
252 struct rif_cache
*entry
;
253 unsigned char *olddata
;
254 static const unsigned char mcast_func_addr
[]
255 = {0xC0,0x00,0x00,0x04,0x00,0x00};
257 spin_lock_bh(&rif_lock
);
260 * Broadcasts are single route as stated in RFC 1042
262 if( (!memcmp(&(trh
->daddr
[0]),&(dev
->broadcast
[0]),TR_ALEN
)) ||
263 (!memcmp(&(trh
->daddr
[0]),&(mcast_func_addr
[0]), TR_ALEN
)) )
265 trh
->rcf
=htons((((sizeof(trh
->rcf
)) << 8) & TR_RCF_LEN_MASK
)
266 | TR_RCF_FRAME2K
| TR_RCF_LIMITED_BROADCAST
);
267 trh
->saddr
[0]|=TR_RII
;
271 hash
= rif_hash(trh
->daddr
);
273 * Walk the hash table and look for an entry
275 for(entry
=rif_table
[hash
];entry
&& memcmp(&(entry
->addr
[0]),&(trh
->daddr
[0]),TR_ALEN
);entry
=entry
->next
);
278 * If we found an entry we can route the frame.
283 printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh
->daddr
[0],
284 trh
->daddr
[1],trh
->daddr
[2],trh
->daddr
[3],trh
->daddr
[4],trh
->daddr
[5]);
286 if(!entry
->local_ring
&& (ntohs(entry
->rcf
) & TR_RCF_LEN_MASK
) >> 8)
289 memcpy(&trh
->rseg
[0],&entry
->rseg
[0],8*sizeof(unsigned short));
290 trh
->rcf
^=htons(TR_RCF_DIR_BIT
);
291 trh
->rcf
&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
293 trh
->saddr
[0]|=TR_RII
;
295 printk("entry found with rcf %04x\n", entry
->rcf
);
299 printk("entry found but without rcf length, local=%02x\n", entry
->local_ring
);
302 entry
->last_used
=jiffies
;
307 * Without the information we simply have to shout
308 * on the wire. The replies should rapidly clean this
311 trh
->rcf
=htons((((sizeof(trh
->rcf
)) << 8) & TR_RCF_LEN_MASK
)
312 | TR_RCF_FRAME2K
| TR_RCF_LIMITED_BROADCAST
);
313 trh
->saddr
[0]|=TR_RII
;
315 printk("no entry in rif table found - broadcasting frame\n");
320 /* Compress the RIF here so we don't have to do it in the driver(s) */
321 if (!(trh
->saddr
[0] & 0x80))
324 slack
= 18 - ((ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
)>>8);
326 spin_unlock_bh(&rif_lock
);
328 skb_pull(skb
, slack
);
329 memmove(skb
->data
, olddata
, sizeof(struct trh_hdr
) - slack
);
333 * We have learned some new RIF information for our source
337 static void tr_add_rif_info(struct trh_hdr
*trh
, struct net_device
*dev
)
339 unsigned int hash
, rii_p
= 0;
340 struct rif_cache
*entry
;
343 spin_lock_bh(&rif_lock
);
346 * Firstly see if the entry exists
349 if(trh
->saddr
[0] & TR_RII
)
352 if (((ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
) >> 8) > 2)
358 hash
= rif_hash(trh
->saddr
);
359 for(entry
=rif_table
[hash
];entry
&& memcmp(&(entry
->addr
[0]),&(trh
->saddr
[0]),TR_ALEN
);entry
=entry
->next
);
364 printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
365 trh
->saddr
[0],trh
->saddr
[1],trh
->saddr
[2],
366 trh
->saddr
[3],trh
->saddr
[4],trh
->saddr
[5],
370 * Allocate our new entry. A failure to allocate loses
371 * use the information. This is harmless.
373 * FIXME: We ought to keep some kind of cache size
374 * limiting and adjust the timers to suit.
376 entry
=kmalloc(sizeof(struct rif_cache
),GFP_ATOMIC
);
380 printk(KERN_DEBUG
"tr.c: Couldn't malloc rif cache entry !\n");
381 spin_unlock_bh(&rif_lock
);
385 memcpy(&(entry
->addr
[0]),&(trh
->saddr
[0]),TR_ALEN
);
386 entry
->iface
= dev
->ifindex
;
387 entry
->next
=rif_table
[hash
];
388 entry
->last_used
=jiffies
;
389 rif_table
[hash
]=entry
;
393 entry
->rcf
= trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
);
394 memcpy(&(entry
->rseg
[0]),&(trh
->rseg
[0]),8*sizeof(unsigned short));
395 entry
->local_ring
= 0;
396 trh
->saddr
[0]|=TR_RII
; /* put the routing indicator back for tcpdump */
400 entry
->local_ring
= 1;
403 else /* Y. Tahara added */
406 * Update existing entries
408 if (!entry
->local_ring
)
409 if (entry
->rcf
!= (trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
)) &&
410 !(trh
->rcf
& htons(TR_RCF_BROADCAST_MASK
)))
413 printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
414 trh
->saddr
[0],trh
->saddr
[1],trh
->saddr
[2],
415 trh
->saddr
[3],trh
->saddr
[4],trh
->saddr
[5],
418 entry
->rcf
= trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
);
419 memcpy(&(entry
->rseg
[0]),&(trh
->rseg
[0]),8*sizeof(unsigned short));
421 entry
->last_used
=jiffies
;
423 spin_unlock_bh(&rif_lock
);
427 * Scan the cache with a timer and see what we need to throw out.
430 static void rif_check_expire(unsigned long dummy
)
433 unsigned long next_interval
= jiffies
+ sysctl_tr_rif_timeout
/2;
435 spin_lock_bh(&rif_lock
);
437 for(i
=0; i
< RIF_TABLE_SIZE
; i
++) {
438 struct rif_cache
*entry
, **pentry
;
440 pentry
= rif_table
+i
;
441 while((entry
=*pentry
) != NULL
) {
442 unsigned long expires
443 = entry
->last_used
+ sysctl_tr_rif_timeout
;
445 if (time_before_eq(expires
, jiffies
)) {
446 *pentry
= entry
->next
;
449 pentry
= &entry
->next
;
451 if (time_before(expires
, next_interval
))
452 next_interval
= expires
;
457 spin_unlock_bh(&rif_lock
);
459 mod_timer(&rif_timer
, next_interval
);
464 * Generate the /proc/net information for the token ring RIF
468 #ifdef CONFIG_PROC_FS
470 static struct rif_cache
*rif_get_idx(loff_t pos
)
473 struct rif_cache
*entry
;
476 for(i
= 0; i
< RIF_TABLE_SIZE
; i
++)
477 for(entry
= rif_table
[i
]; entry
; entry
= entry
->next
) {
486 static void *rif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
488 spin_lock_bh(&rif_lock
);
490 return *pos
? rif_get_idx(*pos
- 1) : SEQ_START_TOKEN
;
493 static void *rif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
496 struct rif_cache
*ent
= v
;
500 if (v
== SEQ_START_TOKEN
) {
508 i
= rif_hash(ent
->addr
);
510 while (++i
< RIF_TABLE_SIZE
) {
511 if ((ent
= rif_table
[i
]) != NULL
)
517 static void rif_seq_stop(struct seq_file
*seq
, void *v
)
519 spin_unlock_bh(&rif_lock
);
522 static int rif_seq_show(struct seq_file
*seq
, void *v
)
524 int j
, rcf_len
, segment
, brdgnmb
;
525 struct rif_cache
*entry
= v
;
527 if (v
== SEQ_START_TOKEN
)
529 "if TR address TTL rcf routing segments\n");
531 struct net_device
*dev
= dev_get_by_index(entry
->iface
);
532 long ttl
= (long) (entry
->last_used
+ sysctl_tr_rif_timeout
)
535 seq_printf(seq
, "%s %02X:%02X:%02X:%02X:%02X:%02X %7li ",
537 entry
->addr
[0],entry
->addr
[1],entry
->addr
[2],
538 entry
->addr
[3],entry
->addr
[4],entry
->addr
[5],
541 if (entry
->local_ring
)
542 seq_puts(seq
, "local\n");
545 seq_printf(seq
, "%04X", ntohs(entry
->rcf
));
546 rcf_len
= ((ntohs(entry
->rcf
) & TR_RCF_LEN_MASK
)>>8)-2;
549 for(j
= 1; j
< rcf_len
; j
++) {
551 segment
=ntohs(entry
->rseg
[j
-1])>>4;
552 seq_printf(seq
," %03X",segment
);
554 segment
=ntohs(entry
->rseg
[j
])>>4;
555 brdgnmb
=ntohs(entry
->rseg
[j
-1])&0x00f;
556 seq_printf(seq
,"-%01X-%03X",brdgnmb
,segment
);
565 static struct seq_operations rif_seq_ops
= {
566 .start
= rif_seq_start
,
567 .next
= rif_seq_next
,
568 .stop
= rif_seq_stop
,
569 .show
= rif_seq_show
,
572 static int rif_seq_open(struct inode
*inode
, struct file
*file
)
574 return seq_open(file
, &rif_seq_ops
);
577 static struct file_operations rif_seq_fops
= {
578 .owner
= THIS_MODULE
,
579 .open
= rif_seq_open
,
582 .release
= seq_release
,
587 static void tr_setup(struct net_device
*dev
)
590 * Configure and register
593 dev
->hard_header
= tr_header
;
594 dev
->rebuild_header
= tr_rebuild_header
;
596 dev
->type
= ARPHRD_IEEE802_TR
;
597 dev
->hard_header_len
= TR_HLEN
;
599 dev
->addr_len
= TR_ALEN
;
600 dev
->tx_queue_len
= 100; /* Long queues on tr */
602 memset(dev
->broadcast
,0xFF, TR_ALEN
);
604 /* New-style flags. */
605 dev
->flags
= IFF_BROADCAST
| IFF_MULTICAST
;
609 * alloc_trdev - Register token ring device
610 * @sizeof_priv: Size of additional driver-private structure to be allocated
611 * for this token ring device
613 * Fill in the fields of the device structure with token ring-generic values.
615 * Constructs a new net device, complete with a private data area of
616 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
617 * this private data area.
619 struct net_device
*alloc_trdev(int sizeof_priv
)
621 return alloc_netdev(sizeof_priv
, "tr%d", tr_setup
);
625 * Called during bootup. We don't actually have to initialise
629 static int __init
rif_init(void)
631 init_timer(&rif_timer
);
632 rif_timer
.expires
= sysctl_tr_rif_timeout
;
634 rif_timer
.function
= rif_check_expire
;
635 add_timer(&rif_timer
);
637 proc_net_fops_create("tr_rif", S_IRUGO
, &rif_seq_fops
);
641 module_init(rif_init
);
643 EXPORT_SYMBOL(tr_source_route
);
644 EXPORT_SYMBOL(tr_type_trans
);
645 EXPORT_SYMBOL(alloc_trdev
);