2 * NET3: Token ring device handling subroutines
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10 * Added rif table to /proc/net/tr_rif and rif timeout to
11 * /proc/sys/net/token-ring/rif_timeout.
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat.
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/jiffies.h>
24 #include <linux/string.h>
26 #include <linux/socket.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/trdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/net.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
40 static void tr_add_rif_info(struct trh_hdr
*trh
, struct net_device
*dev
);
41 static void rif_check_expire(unsigned long dummy
);
46 * Each RIF entry we learn is kept this way
50 unsigned char addr
[TR_ALEN
];
54 struct rif_cache
*next
;
55 unsigned long last_used
;
56 unsigned char local_ring
;
59 #define RIF_TABLE_SIZE 32
62 * We hash the RIF cache 32 ways. We do after all have to look it
66 static struct rif_cache
*rif_table
[RIF_TABLE_SIZE
];
68 static DEFINE_SPINLOCK(rif_lock
);
72 * Garbage disposal timer.
75 static struct timer_list rif_timer
;
77 int sysctl_tr_rif_timeout
= 60*10*HZ
;
79 static inline unsigned long rif_hash(const unsigned char *addr
)
84 x
= (x
<< 2) ^ addr
[1];
85 x
= (x
<< 2) ^ addr
[2];
86 x
= (x
<< 2) ^ addr
[3];
87 x
= (x
<< 2) ^ addr
[4];
88 x
= (x
<< 2) ^ addr
[5];
92 return x
& (RIF_TABLE_SIZE
- 1);
96 * Put the headers on a token ring packet. Token ring source routing
97 * makes this a little more exciting than on ethernet.
100 static int tr_header(struct sk_buff
*skb
, struct net_device
*dev
,
102 void *daddr
, void *saddr
, unsigned len
)
108 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
109 * dev->hard_header directly.
111 if (type
== ETH_P_IP
|| type
== ETH_P_IPV6
|| type
== ETH_P_ARP
)
115 hdr_len
= sizeof(struct trh_hdr
) + sizeof(struct trllc
);
116 trh
= (struct trh_hdr
*)skb_push(skb
, hdr_len
);
117 trllc
= (struct trllc
*)(trh
+1);
118 trllc
->dsap
= trllc
->ssap
= EXTENDED_SAP
;
120 trllc
->protid
[0] = trllc
->protid
[1] = trllc
->protid
[2] = 0x00;
121 trllc
->ethertype
= htons(type
);
125 hdr_len
= sizeof(struct trh_hdr
);
126 trh
= (struct trh_hdr
*)skb_push(skb
, hdr_len
);
133 memcpy(trh
->saddr
,saddr
,dev
->addr_len
);
135 memcpy(trh
->saddr
,dev
->dev_addr
,dev
->addr_len
);
138 * Build the destination and then source route the frame
143 memcpy(trh
->daddr
,daddr
,dev
->addr_len
);
144 tr_source_route(skb
,trh
,dev
);
152 * A neighbour discovery of some species (eg arp) has completed. We
153 * can now send the packet.
156 static int tr_rebuild_header(struct sk_buff
*skb
)
158 struct trh_hdr
*trh
=(struct trh_hdr
*)skb
->data
;
159 struct trllc
*trllc
=(struct trllc
*)(skb
->data
+sizeof(struct trh_hdr
));
160 struct net_device
*dev
= skb
->dev
;
163 * FIXME: We don't yet support IPv6 over token rings
166 if(trllc
->ethertype
!= htons(ETH_P_IP
)) {
167 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc
->ethertype
));
172 if(arp_find(trh
->daddr
, skb
)) {
178 tr_source_route(skb
,trh
,dev
);
184 * Some of this is a bit hackish. We intercept RIF information
185 * used for source routing. We also grab IP directly and don't feed
189 __be16
tr_type_trans(struct sk_buff
*skb
, struct net_device
*dev
)
197 skb_reset_mac_header(skb
);
200 if(trh
->saddr
[0] & TR_RII
)
201 riflen
= (ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
) >> 8;
203 trllc
= (struct trllc
*)(skb
->data
+sizeof(struct trh_hdr
)-TR_MAXRIFLEN
+riflen
);
205 skb_pull(skb
,sizeof(struct trh_hdr
)-TR_MAXRIFLEN
+riflen
);
207 if(*trh
->daddr
& 0x80)
209 if(!memcmp(trh
->daddr
,dev
->broadcast
,TR_ALEN
))
210 skb
->pkt_type
=PACKET_BROADCAST
;
212 skb
->pkt_type
=PACKET_MULTICAST
;
214 else if ( (trh
->daddr
[0] & 0x01) && (trh
->daddr
[1] & 0x00) && (trh
->daddr
[2] & 0x5E))
216 skb
->pkt_type
=PACKET_MULTICAST
;
218 else if(dev
->flags
& IFF_PROMISC
)
220 if(memcmp(trh
->daddr
, dev
->dev_addr
, TR_ALEN
))
221 skb
->pkt_type
=PACKET_OTHERHOST
;
224 if ((skb
->pkt_type
!= PACKET_BROADCAST
) &&
225 (skb
->pkt_type
!= PACKET_MULTICAST
))
226 tr_add_rif_info(trh
,dev
) ;
229 * Strip the SNAP header from ARP packets since we don't
230 * pass them through to the 802.2/SNAP layers.
233 if (trllc
->dsap
== EXTENDED_SAP
&&
234 (trllc
->ethertype
== htons(ETH_P_IP
) ||
235 trllc
->ethertype
== htons(ETH_P_IPV6
) ||
236 trllc
->ethertype
== htons(ETH_P_ARP
)))
238 skb_pull(skb
, sizeof(struct trllc
));
239 return trllc
->ethertype
;
242 return htons(ETH_P_TR_802_2
);
246 * We try to do source routing...
249 void tr_source_route(struct sk_buff
*skb
,struct trh_hdr
*trh
,struct net_device
*dev
)
253 struct rif_cache
*entry
;
254 unsigned char *olddata
;
256 static const unsigned char mcast_func_addr
[]
257 = {0xC0,0x00,0x00,0x04,0x00,0x00};
259 spin_lock_irqsave(&rif_lock
, flags
);
262 * Broadcasts are single route as stated in RFC 1042
264 if( (!memcmp(&(trh
->daddr
[0]),&(dev
->broadcast
[0]),TR_ALEN
)) ||
265 (!memcmp(&(trh
->daddr
[0]),&(mcast_func_addr
[0]), TR_ALEN
)) )
267 trh
->rcf
=htons((((sizeof(trh
->rcf
)) << 8) & TR_RCF_LEN_MASK
)
268 | TR_RCF_FRAME2K
| TR_RCF_LIMITED_BROADCAST
);
269 trh
->saddr
[0]|=TR_RII
;
273 hash
= rif_hash(trh
->daddr
);
275 * Walk the hash table and look for an entry
277 for(entry
=rif_table
[hash
];entry
&& memcmp(&(entry
->addr
[0]),&(trh
->daddr
[0]),TR_ALEN
);entry
=entry
->next
);
280 * If we found an entry we can route the frame.
285 printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh
->daddr
[0],
286 trh
->daddr
[1],trh
->daddr
[2],trh
->daddr
[3],trh
->daddr
[4],trh
->daddr
[5]);
288 if(!entry
->local_ring
&& (ntohs(entry
->rcf
) & TR_RCF_LEN_MASK
) >> 8)
291 memcpy(&trh
->rseg
[0],&entry
->rseg
[0],8*sizeof(unsigned short));
292 trh
->rcf
^=htons(TR_RCF_DIR_BIT
);
293 trh
->rcf
&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
295 trh
->saddr
[0]|=TR_RII
;
297 printk("entry found with rcf %04x\n", entry
->rcf
);
301 printk("entry found but without rcf length, local=%02x\n", entry
->local_ring
);
304 entry
->last_used
=jiffies
;
309 * Without the information we simply have to shout
310 * on the wire. The replies should rapidly clean this
313 trh
->rcf
=htons((((sizeof(trh
->rcf
)) << 8) & TR_RCF_LEN_MASK
)
314 | TR_RCF_FRAME2K
| TR_RCF_LIMITED_BROADCAST
);
315 trh
->saddr
[0]|=TR_RII
;
317 printk("no entry in rif table found - broadcasting frame\n");
322 /* Compress the RIF here so we don't have to do it in the driver(s) */
323 if (!(trh
->saddr
[0] & 0x80))
326 slack
= 18 - ((ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
)>>8);
328 spin_unlock_irqrestore(&rif_lock
, flags
);
330 skb_pull(skb
, slack
);
331 memmove(skb
->data
, olddata
, sizeof(struct trh_hdr
) - slack
);
335 * We have learned some new RIF information for our source
339 static void tr_add_rif_info(struct trh_hdr
*trh
, struct net_device
*dev
)
341 unsigned int hash
, rii_p
= 0;
343 struct rif_cache
*entry
;
344 unsigned char saddr0
;
346 spin_lock_irqsave(&rif_lock
, flags
);
347 saddr0
= trh
->saddr
[0];
350 * Firstly see if the entry exists
353 if(trh
->saddr
[0] & TR_RII
)
356 if (((ntohs(trh
->rcf
) & TR_RCF_LEN_MASK
) >> 8) > 2)
362 hash
= rif_hash(trh
->saddr
);
363 for(entry
=rif_table
[hash
];entry
&& memcmp(&(entry
->addr
[0]),&(trh
->saddr
[0]),TR_ALEN
);entry
=entry
->next
);
368 printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
369 trh
->saddr
[0],trh
->saddr
[1],trh
->saddr
[2],
370 trh
->saddr
[3],trh
->saddr
[4],trh
->saddr
[5],
374 * Allocate our new entry. A failure to allocate loses
375 * use the information. This is harmless.
377 * FIXME: We ought to keep some kind of cache size
378 * limiting and adjust the timers to suit.
380 entry
=kmalloc(sizeof(struct rif_cache
),GFP_ATOMIC
);
384 printk(KERN_DEBUG
"tr.c: Couldn't malloc rif cache entry !\n");
385 spin_unlock_irqrestore(&rif_lock
, flags
);
389 memcpy(&(entry
->addr
[0]),&(trh
->saddr
[0]),TR_ALEN
);
390 entry
->iface
= dev
->ifindex
;
391 entry
->next
=rif_table
[hash
];
392 entry
->last_used
=jiffies
;
393 rif_table
[hash
]=entry
;
397 entry
->rcf
= trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
);
398 memcpy(&(entry
->rseg
[0]),&(trh
->rseg
[0]),8*sizeof(unsigned short));
399 entry
->local_ring
= 0;
403 entry
->local_ring
= 1;
406 else /* Y. Tahara added */
409 * Update existing entries
411 if (!entry
->local_ring
)
412 if (entry
->rcf
!= (trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
)) &&
413 !(trh
->rcf
& htons(TR_RCF_BROADCAST_MASK
)))
416 printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
417 trh
->saddr
[0],trh
->saddr
[1],trh
->saddr
[2],
418 trh
->saddr
[3],trh
->saddr
[4],trh
->saddr
[5],
421 entry
->rcf
= trh
->rcf
& htons((unsigned short)~TR_RCF_BROADCAST_MASK
);
422 memcpy(&(entry
->rseg
[0]),&(trh
->rseg
[0]),8*sizeof(unsigned short));
424 entry
->last_used
=jiffies
;
426 trh
->saddr
[0]=saddr0
; /* put the routing indicator back for tcpdump */
427 spin_unlock_irqrestore(&rif_lock
, flags
);
431 * Scan the cache with a timer and see what we need to throw out.
434 static void rif_check_expire(unsigned long dummy
)
437 unsigned long flags
, next_interval
= jiffies
+ sysctl_tr_rif_timeout
/2;
439 spin_lock_irqsave(&rif_lock
, flags
);
441 for(i
=0; i
< RIF_TABLE_SIZE
; i
++) {
442 struct rif_cache
*entry
, **pentry
;
444 pentry
= rif_table
+i
;
445 while((entry
=*pentry
) != NULL
) {
446 unsigned long expires
447 = entry
->last_used
+ sysctl_tr_rif_timeout
;
449 if (time_before_eq(expires
, jiffies
)) {
450 *pentry
= entry
->next
;
453 pentry
= &entry
->next
;
455 if (time_before(expires
, next_interval
))
456 next_interval
= expires
;
461 spin_unlock_irqrestore(&rif_lock
, flags
);
463 mod_timer(&rif_timer
, next_interval
);
468 * Generate the /proc/net information for the token ring RIF
472 #ifdef CONFIG_PROC_FS
474 static struct rif_cache
*rif_get_idx(loff_t pos
)
477 struct rif_cache
*entry
;
480 for(i
= 0; i
< RIF_TABLE_SIZE
; i
++)
481 for(entry
= rif_table
[i
]; entry
; entry
= entry
->next
) {
490 static void *rif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
492 spin_lock_irq(&rif_lock
);
494 return *pos
? rif_get_idx(*pos
- 1) : SEQ_START_TOKEN
;
497 static void *rif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
500 struct rif_cache
*ent
= v
;
504 if (v
== SEQ_START_TOKEN
) {
512 i
= rif_hash(ent
->addr
);
514 while (++i
< RIF_TABLE_SIZE
) {
515 if ((ent
= rif_table
[i
]) != NULL
)
521 static void rif_seq_stop(struct seq_file
*seq
, void *v
)
523 spin_unlock_irq(&rif_lock
);
526 static int rif_seq_show(struct seq_file
*seq
, void *v
)
528 int j
, rcf_len
, segment
, brdgnmb
;
529 struct rif_cache
*entry
= v
;
531 if (v
== SEQ_START_TOKEN
)
533 "if TR address TTL rcf routing segments\n");
535 struct net_device
*dev
= dev_get_by_index(entry
->iface
);
536 long ttl
= (long) (entry
->last_used
+ sysctl_tr_rif_timeout
)
539 seq_printf(seq
, "%s %02X:%02X:%02X:%02X:%02X:%02X %7li ",
541 entry
->addr
[0],entry
->addr
[1],entry
->addr
[2],
542 entry
->addr
[3],entry
->addr
[4],entry
->addr
[5],
545 if (entry
->local_ring
)
546 seq_puts(seq
, "local\n");
549 seq_printf(seq
, "%04X", ntohs(entry
->rcf
));
550 rcf_len
= ((ntohs(entry
->rcf
) & TR_RCF_LEN_MASK
)>>8)-2;
553 for(j
= 1; j
< rcf_len
; j
++) {
555 segment
=ntohs(entry
->rseg
[j
-1])>>4;
556 seq_printf(seq
," %03X",segment
);
559 segment
=ntohs(entry
->rseg
[j
])>>4;
560 brdgnmb
=ntohs(entry
->rseg
[j
-1])&0x00f;
561 seq_printf(seq
,"-%01X-%03X",brdgnmb
,segment
);
570 static const struct seq_operations rif_seq_ops
= {
571 .start
= rif_seq_start
,
572 .next
= rif_seq_next
,
573 .stop
= rif_seq_stop
,
574 .show
= rif_seq_show
,
577 static int rif_seq_open(struct inode
*inode
, struct file
*file
)
579 return seq_open(file
, &rif_seq_ops
);
582 static const struct file_operations rif_seq_fops
= {
583 .owner
= THIS_MODULE
,
584 .open
= rif_seq_open
,
587 .release
= seq_release
,
592 static void tr_setup(struct net_device
*dev
)
595 * Configure and register
598 dev
->hard_header
= tr_header
;
599 dev
->rebuild_header
= tr_rebuild_header
;
601 dev
->type
= ARPHRD_IEEE802_TR
;
602 dev
->hard_header_len
= TR_HLEN
;
604 dev
->addr_len
= TR_ALEN
;
605 dev
->tx_queue_len
= 100; /* Long queues on tr */
607 memset(dev
->broadcast
,0xFF, TR_ALEN
);
609 /* New-style flags. */
610 dev
->flags
= IFF_BROADCAST
| IFF_MULTICAST
;
614 * alloc_trdev - Register token ring device
615 * @sizeof_priv: Size of additional driver-private structure to be allocated
616 * for this token ring device
618 * Fill in the fields of the device structure with token ring-generic values.
620 * Constructs a new net device, complete with a private data area of
621 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
622 * this private data area.
624 struct net_device
*alloc_trdev(int sizeof_priv
)
626 return alloc_netdev(sizeof_priv
, "tr%d", tr_setup
);
630 * Called during bootup. We don't actually have to initialise
634 static int __init
rif_init(void)
636 init_timer(&rif_timer
);
637 rif_timer
.expires
= sysctl_tr_rif_timeout
;
639 rif_timer
.function
= rif_check_expire
;
640 add_timer(&rif_timer
);
642 proc_net_fops_create("tr_rif", S_IRUGO
, &rif_seq_fops
);
646 module_init(rif_init
);
648 EXPORT_SYMBOL(tr_type_trans
);
649 EXPORT_SYMBOL(alloc_trdev
);