pktcdvd: remove broken dev_t export of class devices
[linux-2.6/zen-sources.git] / drivers / net / myri_sbus.c
blob3ad7589d6a1c36b341920e6a2c4548d34de3ff92
1 /* myri_sbus.c: MyriCOM MyriNET SBUS card driver.
3 * Copyright (C) 1996, 1999, 2006, 2008 David S. Miller (davem@davemloft.net)
4 */
6 static char version[] =
7 "myri_sbus.c:v2.0 June 23, 2006 David S. Miller (davem@davemloft.net)\n";
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/fcntl.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/in.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/bitops.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
29 #include <net/dst.h>
30 #include <net/arp.h>
31 #include <net/sock.h>
32 #include <net/ipv6.h>
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/dma.h>
37 #include <asm/byteorder.h>
38 #include <asm/idprom.h>
39 #include <asm/openprom.h>
40 #include <asm/oplib.h>
41 #include <asm/auxio.h>
42 #include <asm/pgtable.h>
43 #include <asm/irq.h>
45 #include "myri_sbus.h"
46 #include "myri_code.h"
48 /* #define DEBUG_DETECT */
49 /* #define DEBUG_IRQ */
50 /* #define DEBUG_TRANSMIT */
51 /* #define DEBUG_RECEIVE */
52 /* #define DEBUG_HEADER */
54 #ifdef DEBUG_DETECT
55 #define DET(x) printk x
56 #else
57 #define DET(x)
58 #endif
60 #ifdef DEBUG_IRQ
61 #define DIRQ(x) printk x
62 #else
63 #define DIRQ(x)
64 #endif
66 #ifdef DEBUG_TRANSMIT
67 #define DTX(x) printk x
68 #else
69 #define DTX(x)
70 #endif
72 #ifdef DEBUG_RECEIVE
73 #define DRX(x) printk x
74 #else
75 #define DRX(x)
76 #endif
78 #ifdef DEBUG_HEADER
79 #define DHDR(x) printk x
80 #else
81 #define DHDR(x)
82 #endif
84 static void myri_reset_off(void __iomem *lp, void __iomem *cregs)
86 /* Clear IRQ mask. */
87 sbus_writel(0, lp + LANAI_EIMASK);
89 /* Turn RESET function off. */
90 sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL);
93 static void myri_reset_on(void __iomem *cregs)
95 /* Enable RESET function. */
96 sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL);
98 /* Disable IRQ's. */
99 sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
102 static void myri_disable_irq(void __iomem *lp, void __iomem *cregs)
104 sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
105 sbus_writel(0, lp + LANAI_EIMASK);
106 sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT);
109 static void myri_enable_irq(void __iomem *lp, void __iomem *cregs)
111 sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL);
112 sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK);
115 static inline void bang_the_chip(struct myri_eth *mp)
117 struct myri_shmem __iomem *shmem = mp->shmem;
118 void __iomem *cregs = mp->cregs;
120 sbus_writel(1, &shmem->send);
121 sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
124 static int myri_do_handshake(struct myri_eth *mp)
126 struct myri_shmem __iomem *shmem = mp->shmem;
127 void __iomem *cregs = mp->cregs;
128 struct myri_channel __iomem *chan = &shmem->channel;
129 int tick = 0;
131 DET(("myri_do_handshake: "));
132 if (sbus_readl(&chan->state) == STATE_READY) {
133 DET(("Already STATE_READY, failed.\n"));
134 return -1; /* We're hosed... */
137 myri_disable_irq(mp->lregs, cregs);
139 while (tick++ < 25) {
140 u32 softstate;
142 /* Wake it up. */
143 DET(("shakedown, CONTROL_WON, "));
144 sbus_writel(1, &shmem->shakedown);
145 sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
147 softstate = sbus_readl(&chan->state);
148 DET(("chanstate[%08x] ", softstate));
149 if (softstate == STATE_READY) {
150 DET(("wakeup successful, "));
151 break;
154 if (softstate != STATE_WFN) {
155 DET(("not WFN setting that, "));
156 sbus_writel(STATE_WFN, &chan->state);
159 udelay(20);
162 myri_enable_irq(mp->lregs, cregs);
164 if (tick > 25) {
165 DET(("25 ticks we lose, failure.\n"));
166 return -1;
168 DET(("success\n"));
169 return 0;
172 static int __devinit myri_load_lanai(struct myri_eth *mp)
174 struct net_device *dev = mp->dev;
175 struct myri_shmem __iomem *shmem = mp->shmem;
176 void __iomem *rptr;
177 int i;
179 myri_disable_irq(mp->lregs, mp->cregs);
180 myri_reset_on(mp->cregs);
182 rptr = mp->lanai;
183 for (i = 0; i < mp->eeprom.ramsz; i++)
184 sbus_writeb(0, rptr + i);
186 if (mp->eeprom.cpuvers >= CPUVERS_3_0)
187 sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL);
189 /* Load executable code. */
190 for (i = 0; i < sizeof(lanai4_code); i++)
191 sbus_writeb(lanai4_code[i], rptr + (lanai4_code_off * 2) + i);
193 /* Load data segment. */
194 for (i = 0; i < sizeof(lanai4_data); i++)
195 sbus_writeb(lanai4_data[i], rptr + (lanai4_data_off * 2) + i);
197 /* Set device address. */
198 sbus_writeb(0, &shmem->addr[0]);
199 sbus_writeb(0, &shmem->addr[1]);
200 for (i = 0; i < 6; i++)
201 sbus_writeb(dev->dev_addr[i],
202 &shmem->addr[i + 2]);
204 /* Set SBUS bursts and interrupt mask. */
205 sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst);
206 sbus_writel(SHMEM_IMASK_RX, &shmem->imask);
208 /* Release the LANAI. */
209 myri_disable_irq(mp->lregs, mp->cregs);
210 myri_reset_off(mp->lregs, mp->cregs);
211 myri_disable_irq(mp->lregs, mp->cregs);
213 /* Wait for the reset to complete. */
214 for (i = 0; i < 5000; i++) {
215 if (sbus_readl(&shmem->channel.state) != STATE_READY)
216 break;
217 else
218 udelay(10);
221 if (i == 5000)
222 printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n");
224 i = myri_do_handshake(mp);
225 if (i)
226 printk(KERN_ERR "myricom: Handshake with LANAI failed.\n");
228 if (mp->eeprom.cpuvers == CPUVERS_4_0)
229 sbus_writel(0, mp->lregs + LANAI_VERS);
231 return i;
234 static void myri_clean_rings(struct myri_eth *mp)
236 struct sendq __iomem *sq = mp->sq;
237 struct recvq __iomem *rq = mp->rq;
238 int i;
240 sbus_writel(0, &rq->tail);
241 sbus_writel(0, &rq->head);
242 for (i = 0; i < (RX_RING_SIZE+1); i++) {
243 if (mp->rx_skbs[i] != NULL) {
244 struct myri_rxd __iomem *rxd = &rq->myri_rxd[i];
245 u32 dma_addr;
247 dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
248 dma_unmap_single(&mp->myri_op->dev, dma_addr,
249 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
250 dev_kfree_skb(mp->rx_skbs[i]);
251 mp->rx_skbs[i] = NULL;
255 mp->tx_old = 0;
256 sbus_writel(0, &sq->tail);
257 sbus_writel(0, &sq->head);
258 for (i = 0; i < TX_RING_SIZE; i++) {
259 if (mp->tx_skbs[i] != NULL) {
260 struct sk_buff *skb = mp->tx_skbs[i];
261 struct myri_txd __iomem *txd = &sq->myri_txd[i];
262 u32 dma_addr;
264 dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
265 dma_unmap_single(&mp->myri_op->dev, dma_addr,
266 (skb->len + 3) & ~3,
267 DMA_TO_DEVICE);
268 dev_kfree_skb(mp->tx_skbs[i]);
269 mp->tx_skbs[i] = NULL;
274 static void myri_init_rings(struct myri_eth *mp, int from_irq)
276 struct recvq __iomem *rq = mp->rq;
277 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
278 struct net_device *dev = mp->dev;
279 gfp_t gfp_flags = GFP_KERNEL;
280 int i;
282 if (from_irq || in_interrupt())
283 gfp_flags = GFP_ATOMIC;
285 myri_clean_rings(mp);
286 for (i = 0; i < RX_RING_SIZE; i++) {
287 struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
288 u32 dma_addr;
290 if (!skb)
291 continue;
292 mp->rx_skbs[i] = skb;
293 skb->dev = dev;
294 skb_put(skb, RX_ALLOC_SIZE);
296 dma_addr = dma_map_single(&mp->myri_op->dev,
297 skb->data, RX_ALLOC_SIZE,
298 DMA_FROM_DEVICE);
299 sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
300 sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
301 sbus_writel(i, &rxd[i].ctx);
302 sbus_writel(1, &rxd[i].num_sg);
304 sbus_writel(0, &rq->head);
305 sbus_writel(RX_RING_SIZE, &rq->tail);
308 static int myri_init(struct myri_eth *mp, int from_irq)
310 myri_init_rings(mp, from_irq);
311 return 0;
314 static void myri_is_not_so_happy(struct myri_eth *mp)
318 #ifdef DEBUG_HEADER
319 static void dump_ehdr(struct ethhdr *ehdr)
321 DECLARE_MAC_BUF(mac);
322 DECLARE_MAC_BUF(mac2);
323 printk("ehdr[h_dst(%s)"
324 "h_source(%s)"
325 "h_proto(%04x)]\n",
326 print_mac(mac, ehdr->h_dest), print_mac(mac2, ehdr->h_source),
327 ehdr->h_proto);
330 static void dump_ehdr_and_myripad(unsigned char *stuff)
332 struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2);
334 printk("pad[%02x:%02x]", stuff[0], stuff[1]);
335 dump_ehdr(ehdr);
337 #endif
339 static void myri_tx(struct myri_eth *mp, struct net_device *dev)
341 struct sendq __iomem *sq= mp->sq;
342 int entry = mp->tx_old;
343 int limit = sbus_readl(&sq->head);
345 DTX(("entry[%d] limit[%d] ", entry, limit));
346 if (entry == limit)
347 return;
348 while (entry != limit) {
349 struct sk_buff *skb = mp->tx_skbs[entry];
350 u32 dma_addr;
352 DTX(("SKB[%d] ", entry));
353 dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
354 dma_unmap_single(&mp->myri_op->dev, dma_addr,
355 skb->len, DMA_TO_DEVICE);
356 dev_kfree_skb(skb);
357 mp->tx_skbs[entry] = NULL;
358 dev->stats.tx_packets++;
359 entry = NEXT_TX(entry);
361 mp->tx_old = entry;
364 /* Determine the packet's protocol ID. The rule here is that we
365 * assume 802.3 if the type field is short enough to be a length.
366 * This is normal practice and works for any 'now in use' protocol.
368 static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
370 struct ethhdr *eth;
371 unsigned char *rawp;
373 skb_set_mac_header(skb, MYRI_PAD_LEN);
374 skb_pull(skb, dev->hard_header_len);
375 eth = eth_hdr(skb);
377 #ifdef DEBUG_HEADER
378 DHDR(("myri_type_trans: "));
379 dump_ehdr(eth);
380 #endif
381 if (*eth->h_dest & 1) {
382 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
383 skb->pkt_type = PACKET_BROADCAST;
384 else
385 skb->pkt_type = PACKET_MULTICAST;
386 } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
387 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
388 skb->pkt_type = PACKET_OTHERHOST;
391 if (ntohs(eth->h_proto) >= 1536)
392 return eth->h_proto;
394 rawp = skb->data;
396 /* This is a magic hack to spot IPX packets. Older Novell breaks
397 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
398 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
399 * won't work for fault tolerant netware but does for the rest.
401 if (*(unsigned short *)rawp == 0xFFFF)
402 return htons(ETH_P_802_3);
404 /* Real 802.2 LLC */
405 return htons(ETH_P_802_2);
408 static void myri_rx(struct myri_eth *mp, struct net_device *dev)
410 struct recvq __iomem *rq = mp->rq;
411 struct recvq __iomem *rqa = mp->rqack;
412 int entry = sbus_readl(&rqa->head);
413 int limit = sbus_readl(&rqa->tail);
414 int drops;
416 DRX(("entry[%d] limit[%d] ", entry, limit));
417 if (entry == limit)
418 return;
419 drops = 0;
420 DRX(("\n"));
421 while (entry != limit) {
422 struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
423 u32 csum = sbus_readl(&rxdack->csum);
424 int len = sbus_readl(&rxdack->myri_scatters[0].len);
425 int index = sbus_readl(&rxdack->ctx);
426 struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
427 struct sk_buff *skb = mp->rx_skbs[index];
429 /* Ack it. */
430 sbus_writel(NEXT_RX(entry), &rqa->head);
432 /* Check for errors. */
433 DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
434 dma_sync_single_for_cpu(&mp->myri_op->dev,
435 sbus_readl(&rxd->myri_scatters[0].addr),
436 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
437 if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
438 DRX(("ERROR["));
439 dev->stats.rx_errors++;
440 if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
441 DRX(("BAD_LENGTH] "));
442 dev->stats.rx_length_errors++;
443 } else {
444 DRX(("NO_PADDING] "));
445 dev->stats.rx_frame_errors++;
448 /* Return it to the LANAI. */
449 drop_it:
450 drops++;
451 DRX(("DROP "));
452 dev->stats.rx_dropped++;
453 dma_sync_single_for_device(&mp->myri_op->dev,
454 sbus_readl(&rxd->myri_scatters[0].addr),
455 RX_ALLOC_SIZE,
456 DMA_FROM_DEVICE);
457 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
458 sbus_writel(index, &rxd->ctx);
459 sbus_writel(1, &rxd->num_sg);
460 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
461 goto next;
464 DRX(("len[%d] ", len));
465 if (len > RX_COPY_THRESHOLD) {
466 struct sk_buff *new_skb;
467 u32 dma_addr;
469 DRX(("BIGBUFF "));
470 new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
471 if (new_skb == NULL) {
472 DRX(("skb_alloc(FAILED) "));
473 goto drop_it;
475 dma_unmap_single(&mp->myri_op->dev,
476 sbus_readl(&rxd->myri_scatters[0].addr),
477 RX_ALLOC_SIZE,
478 DMA_FROM_DEVICE);
479 mp->rx_skbs[index] = new_skb;
480 new_skb->dev = dev;
481 skb_put(new_skb, RX_ALLOC_SIZE);
482 dma_addr = dma_map_single(&mp->myri_op->dev,
483 new_skb->data,
484 RX_ALLOC_SIZE,
485 DMA_FROM_DEVICE);
486 sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
487 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
488 sbus_writel(index, &rxd->ctx);
489 sbus_writel(1, &rxd->num_sg);
490 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
492 /* Trim the original skb for the netif. */
493 DRX(("trim(%d) ", len));
494 skb_trim(skb, len);
495 } else {
496 struct sk_buff *copy_skb = dev_alloc_skb(len);
498 DRX(("SMALLBUFF "));
499 if (copy_skb == NULL) {
500 DRX(("dev_alloc_skb(FAILED) "));
501 goto drop_it;
503 /* DMA sync already done above. */
504 copy_skb->dev = dev;
505 DRX(("resv_and_put "));
506 skb_put(copy_skb, len);
507 skb_copy_from_linear_data(skb, copy_skb->data, len);
509 /* Reuse original ring buffer. */
510 DRX(("reuse "));
511 dma_sync_single_for_device(&mp->myri_op->dev,
512 sbus_readl(&rxd->myri_scatters[0].addr),
513 RX_ALLOC_SIZE,
514 DMA_FROM_DEVICE);
515 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
516 sbus_writel(index, &rxd->ctx);
517 sbus_writel(1, &rxd->num_sg);
518 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
520 skb = copy_skb;
523 /* Just like the happy meal we get checksums from this card. */
524 skb->csum = csum;
525 skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */
527 skb->protocol = myri_type_trans(skb, dev);
528 DRX(("prot[%04x] netif_rx ", skb->protocol));
529 netif_rx(skb);
531 dev->last_rx = jiffies;
532 dev->stats.rx_packets++;
533 dev->stats.rx_bytes += len;
534 next:
535 DRX(("NEXT\n"));
536 entry = NEXT_RX(entry);
540 static irqreturn_t myri_interrupt(int irq, void *dev_id)
542 struct net_device *dev = (struct net_device *) dev_id;
543 struct myri_eth *mp = (struct myri_eth *) dev->priv;
544 void __iomem *lregs = mp->lregs;
545 struct myri_channel __iomem *chan = &mp->shmem->channel;
546 unsigned long flags;
547 u32 status;
548 int handled = 0;
550 spin_lock_irqsave(&mp->irq_lock, flags);
552 status = sbus_readl(lregs + LANAI_ISTAT);
553 DIRQ(("myri_interrupt: status[%08x] ", status));
554 if (status & ISTAT_HOST) {
555 u32 softstate;
557 handled = 1;
558 DIRQ(("IRQ_DISAB "));
559 myri_disable_irq(lregs, mp->cregs);
560 softstate = sbus_readl(&chan->state);
561 DIRQ(("state[%08x] ", softstate));
562 if (softstate != STATE_READY) {
563 DIRQ(("myri_not_so_happy "));
564 myri_is_not_so_happy(mp);
566 DIRQ(("\nmyri_rx: "));
567 myri_rx(mp, dev);
568 DIRQ(("\nistat=ISTAT_HOST "));
569 sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT);
570 DIRQ(("IRQ_ENAB "));
571 myri_enable_irq(lregs, mp->cregs);
573 DIRQ(("\n"));
575 spin_unlock_irqrestore(&mp->irq_lock, flags);
577 return IRQ_RETVAL(handled);
580 static int myri_open(struct net_device *dev)
582 struct myri_eth *mp = (struct myri_eth *) dev->priv;
584 return myri_init(mp, in_interrupt());
587 static int myri_close(struct net_device *dev)
589 struct myri_eth *mp = (struct myri_eth *) dev->priv;
591 myri_clean_rings(mp);
592 return 0;
595 static void myri_tx_timeout(struct net_device *dev)
597 struct myri_eth *mp = (struct myri_eth *) dev->priv;
599 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
601 dev->stats.tx_errors++;
602 myri_init(mp, 0);
603 netif_wake_queue(dev);
606 static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
608 struct myri_eth *mp = (struct myri_eth *) dev->priv;
609 struct sendq __iomem *sq = mp->sq;
610 struct myri_txd __iomem *txd;
611 unsigned long flags;
612 unsigned int head, tail;
613 int len, entry;
614 u32 dma_addr;
616 DTX(("myri_start_xmit: "));
618 myri_tx(mp, dev);
620 netif_stop_queue(dev);
622 /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
623 head = sbus_readl(&sq->head);
624 tail = sbus_readl(&sq->tail);
626 if (!TX_BUFFS_AVAIL(head, tail)) {
627 DTX(("no buffs available, returning 1\n"));
628 return 1;
631 spin_lock_irqsave(&mp->irq_lock, flags);
633 DHDR(("xmit[skbdata(%p)]\n", skb->data));
634 #ifdef DEBUG_HEADER
635 dump_ehdr_and_myripad(((unsigned char *) skb->data));
636 #endif
638 /* XXX Maybe this can go as well. */
639 len = skb->len;
640 if (len & 3) {
641 DTX(("len&3 "));
642 len = (len + 4) & (~3);
645 entry = sbus_readl(&sq->tail);
647 txd = &sq->myri_txd[entry];
648 mp->tx_skbs[entry] = skb;
650 /* Must do this before we sbus map it. */
651 if (skb->data[MYRI_PAD_LEN] & 0x1) {
652 sbus_writew(0xffff, &txd->addr[0]);
653 sbus_writew(0xffff, &txd->addr[1]);
654 sbus_writew(0xffff, &txd->addr[2]);
655 sbus_writew(0xffff, &txd->addr[3]);
656 } else {
657 sbus_writew(0xffff, &txd->addr[0]);
658 sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
659 sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
660 sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
663 dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
664 len, DMA_TO_DEVICE);
665 sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
666 sbus_writel(len, &txd->myri_gathers[0].len);
667 sbus_writel(1, &txd->num_sg);
668 sbus_writel(KERNEL_CHANNEL, &txd->chan);
669 sbus_writel(len, &txd->len);
670 sbus_writel((u32)-1, &txd->csum_off);
671 sbus_writel(0, &txd->csum_field);
673 sbus_writel(NEXT_TX(entry), &sq->tail);
674 DTX(("BangTheChip "));
675 bang_the_chip(mp);
677 DTX(("tbusy=0, returning 0\n"));
678 netif_start_queue(dev);
679 spin_unlock_irqrestore(&mp->irq_lock, flags);
680 return 0;
683 /* Create the MyriNet MAC header for an arbitrary protocol layer
685 * saddr=NULL means use device source address
686 * daddr=NULL means leave destination address (eg unresolved arp)
688 static int myri_header(struct sk_buff *skb, struct net_device *dev,
689 unsigned short type, const void *daddr,
690 const void *saddr, unsigned len)
692 struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
693 unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);
695 #ifdef DEBUG_HEADER
696 DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1]));
697 dump_ehdr(eth);
698 #endif
700 /* Set the MyriNET padding identifier. */
701 pad[0] = MYRI_PAD_LEN;
702 pad[1] = 0xab;
704 /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
705 * in here instead. It is up to the 802.2 layer to carry protocol information.
707 if (type != ETH_P_802_3)
708 eth->h_proto = htons(type);
709 else
710 eth->h_proto = htons(len);
712 /* Set the source hardware address. */
713 if (saddr)
714 memcpy(eth->h_source, saddr, dev->addr_len);
715 else
716 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
718 /* Anyway, the loopback-device should never use this function... */
719 if (dev->flags & IFF_LOOPBACK) {
720 int i;
721 for (i = 0; i < dev->addr_len; i++)
722 eth->h_dest[i] = 0;
723 return(dev->hard_header_len);
726 if (daddr) {
727 memcpy(eth->h_dest, daddr, dev->addr_len);
728 return dev->hard_header_len;
730 return -dev->hard_header_len;
733 /* Rebuild the MyriNet MAC header. This is called after an ARP
734 * (or in future other address resolution) has completed on this
735 * sk_buff. We now let ARP fill in the other fields.
737 static int myri_rebuild_header(struct sk_buff *skb)
739 unsigned char *pad = (unsigned char *) skb->data;
740 struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
741 struct net_device *dev = skb->dev;
743 #ifdef DEBUG_HEADER
744 DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1]));
745 dump_ehdr(eth);
746 #endif
748 /* Refill MyriNet padding identifiers, this is just being anal. */
749 pad[0] = MYRI_PAD_LEN;
750 pad[1] = 0xab;
752 switch (eth->h_proto)
754 #ifdef CONFIG_INET
755 case __constant_htons(ETH_P_IP):
756 return arp_find(eth->h_dest, skb);
757 #endif
759 default:
760 printk(KERN_DEBUG
761 "%s: unable to resolve type %X addresses.\n",
762 dev->name, (int)eth->h_proto);
764 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
765 return 0;
766 break;
769 return 0;
772 static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
774 unsigned short type = hh->hh_type;
775 unsigned char *pad;
776 struct ethhdr *eth;
777 const struct net_device *dev = neigh->dev;
779 pad = ((unsigned char *) hh->hh_data) +
780 HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN);
781 eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
783 if (type == htons(ETH_P_802_3))
784 return -1;
786 /* Refill MyriNet padding identifiers, this is just being anal. */
787 pad[0] = MYRI_PAD_LEN;
788 pad[1] = 0xab;
790 eth->h_proto = type;
791 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
792 memcpy(eth->h_dest, neigh->ha, dev->addr_len);
793 hh->hh_len = 16;
794 return 0;
798 /* Called by Address Resolution module to notify changes in address. */
799 void myri_header_cache_update(struct hh_cache *hh,
800 const struct net_device *dev,
801 const unsigned char * haddr)
803 memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
804 haddr, dev->addr_len);
807 static int myri_change_mtu(struct net_device *dev, int new_mtu)
809 if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU))
810 return -EINVAL;
811 dev->mtu = new_mtu;
812 return 0;
815 static void myri_set_multicast(struct net_device *dev)
817 /* Do nothing, all MyriCOM nodes transmit multicast frames
818 * as broadcast packets...
822 static inline void set_boardid_from_idprom(struct myri_eth *mp, int num)
824 mp->eeprom.id[0] = 0;
825 mp->eeprom.id[1] = idprom->id_machtype;
826 mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff;
827 mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff;
828 mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff;
829 mp->eeprom.id[5] = num;
832 static inline void determine_reg_space_size(struct myri_eth *mp)
834 switch(mp->eeprom.cpuvers) {
835 case CPUVERS_2_3:
836 case CPUVERS_3_0:
837 case CPUVERS_3_1:
838 case CPUVERS_3_2:
839 mp->reg_size = (3 * 128 * 1024) + 4096;
840 break;
842 case CPUVERS_4_0:
843 case CPUVERS_4_1:
844 mp->reg_size = ((4096<<1) + mp->eeprom.ramsz);
845 break;
847 case CPUVERS_4_2:
848 case CPUVERS_5_0:
849 default:
850 printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
851 mp->eeprom.cpuvers);
852 mp->reg_size = (3 * 128 * 1024) + 4096;
856 #ifdef DEBUG_DETECT
857 static void dump_eeprom(struct myri_eth *mp)
859 printk("EEPROM: clockval[%08x] cpuvers[%04x] "
860 "id[%02x,%02x,%02x,%02x,%02x,%02x]\n",
861 mp->eeprom.cval, mp->eeprom.cpuvers,
862 mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2],
863 mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]);
864 printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz);
865 printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
866 mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2],
867 mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5],
868 mp->eeprom.fvers[6], mp->eeprom.fvers[7]);
869 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
870 mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10],
871 mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13],
872 mp->eeprom.fvers[14], mp->eeprom.fvers[15]);
873 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
874 mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18],
875 mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21],
876 mp->eeprom.fvers[22], mp->eeprom.fvers[23]);
877 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
878 mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26],
879 mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29],
880 mp->eeprom.fvers[30], mp->eeprom.fvers[31]);
881 printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
882 mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2],
883 mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5],
884 mp->eeprom.mvers[6], mp->eeprom.mvers[7]);
885 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
886 mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10],
887 mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13],
888 mp->eeprom.mvers[14], mp->eeprom.mvers[15]);
889 printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n",
890 mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type,
891 mp->eeprom.prod_code);
892 printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num);
894 #endif
896 static const struct header_ops myri_header_ops = {
897 .create = myri_header,
898 .rebuild = myri_rebuild_header,
899 .cache = myri_header_cache,
900 .cache_update = myri_header_cache_update,
903 static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
905 struct device_node *dp = op->node;
906 static unsigned version_printed;
907 struct net_device *dev;
908 DECLARE_MAC_BUF(mac);
909 struct myri_eth *mp;
910 const void *prop;
911 static int num;
912 int i, len;
914 DET(("myri_ether_init(%p,%d):\n", op, num));
915 dev = alloc_etherdev(sizeof(struct myri_eth));
916 if (!dev)
917 return -ENOMEM;
919 if (version_printed++ == 0)
920 printk(version);
922 SET_NETDEV_DEV(dev, &op->dev);
924 mp = netdev_priv(dev);
925 spin_lock_init(&mp->irq_lock);
926 mp->myri_op = op;
928 /* Clean out skb arrays. */
929 for (i = 0; i < (RX_RING_SIZE + 1); i++)
930 mp->rx_skbs[i] = NULL;
932 for (i = 0; i < TX_RING_SIZE; i++)
933 mp->tx_skbs[i] = NULL;
935 /* First check for EEPROM information. */
936 prop = of_get_property(dp, "myrinet-eeprom-info", &len);
938 if (prop)
939 memcpy(&mp->eeprom, prop, sizeof(struct myri_eeprom));
940 if (!prop) {
941 /* No eeprom property, must cook up the values ourselves. */
942 DET(("No EEPROM: "));
943 mp->eeprom.bus_type = BUS_TYPE_SBUS;
944 mp->eeprom.cpuvers =
945 of_getintprop_default(dp, "cpu_version", 0);
946 mp->eeprom.cval =
947 of_getintprop_default(dp, "clock_value", 0);
948 mp->eeprom.ramsz = of_getintprop_default(dp, "sram_size", 0);
949 if (!mp->eeprom.cpuvers)
950 mp->eeprom.cpuvers = CPUVERS_2_3;
951 if (mp->eeprom.cpuvers < CPUVERS_3_0)
952 mp->eeprom.cval = 0;
953 if (!mp->eeprom.ramsz)
954 mp->eeprom.ramsz = (128 * 1024);
956 prop = of_get_property(dp, "myrinet-board-id", &len);
957 if (prop)
958 memcpy(&mp->eeprom.id[0], prop, 6);
959 else
960 set_boardid_from_idprom(mp, num);
962 prop = of_get_property(dp, "fpga_version", &len);
963 if (prop)
964 memcpy(&mp->eeprom.fvers[0], prop, 32);
965 else
966 memset(&mp->eeprom.fvers[0], 0, 32);
968 if (mp->eeprom.cpuvers == CPUVERS_4_1) {
969 if (mp->eeprom.ramsz == (128 * 1024))
970 mp->eeprom.ramsz = (256 * 1024);
971 if ((mp->eeprom.cval == 0x40414041) ||
972 (mp->eeprom.cval == 0x90449044))
973 mp->eeprom.cval = 0x50e450e4;
976 #ifdef DEBUG_DETECT
977 dump_eeprom(mp);
978 #endif
980 for (i = 0; i < 6; i++)
981 dev->dev_addr[i] = mp->eeprom.id[i];
983 determine_reg_space_size(mp);
985 /* Map in the MyriCOM register/localram set. */
986 if (mp->eeprom.cpuvers < CPUVERS_4_0) {
987 /* XXX Makes no sense, if control reg is non-existant this
988 * XXX driver cannot function at all... maybe pre-4.0 is
989 * XXX only a valid version for PCI cards? Ask feldy...
991 DET(("Mapping regs for cpuvers < CPUVERS_4_0\n"));
992 mp->regs = of_ioremap(&op->resource[0], 0,
993 mp->reg_size, "MyriCOM Regs");
994 if (!mp->regs) {
995 printk("MyriCOM: Cannot map MyriCOM registers.\n");
996 goto err;
998 mp->lanai = mp->regs + (256 * 1024);
999 mp->lregs = mp->lanai + (0x10000 * 2);
1000 } else {
1001 DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n"));
1002 mp->cregs = of_ioremap(&op->resource[0], 0,
1003 PAGE_SIZE, "MyriCOM Control Regs");
1004 mp->lregs = of_ioremap(&op->resource[0], (256 * 1024),
1005 PAGE_SIZE, "MyriCOM LANAI Regs");
1006 mp->lanai = of_ioremap(&op->resource[0], (512 * 1024),
1007 mp->eeprom.ramsz, "MyriCOM SRAM");
1009 DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n",
1010 mp->cregs, mp->lregs, mp->lanai));
1012 if (mp->eeprom.cpuvers >= CPUVERS_4_0)
1013 mp->shmem_base = 0xf000;
1014 else
1015 mp->shmem_base = 0x8000;
1017 DET(("Shared memory base is %04x, ", mp->shmem_base));
1019 mp->shmem = (struct myri_shmem __iomem *)
1020 (mp->lanai + (mp->shmem_base * 2));
1021 DET(("shmem mapped at %p\n", mp->shmem));
1023 mp->rqack = &mp->shmem->channel.recvqa;
1024 mp->rq = &mp->shmem->channel.recvq;
1025 mp->sq = &mp->shmem->channel.sendq;
1027 /* Reset the board. */
1028 DET(("Resetting LANAI\n"));
1029 myri_reset_off(mp->lregs, mp->cregs);
1030 myri_reset_on(mp->cregs);
1032 /* Turn IRQ's off. */
1033 myri_disable_irq(mp->lregs, mp->cregs);
1035 /* Reset once more. */
1036 myri_reset_on(mp->cregs);
1038 /* Get the supported DVMA burst sizes from our SBUS. */
1039 mp->myri_bursts = of_getintprop_default(dp->parent,
1040 "burst-sizes", 0x00);
1041 if (!sbus_can_burst64())
1042 mp->myri_bursts &= ~(DMA_BURST64);
1044 DET(("MYRI bursts %02x\n", mp->myri_bursts));
1046 /* Encode SBUS interrupt level in second control register. */
1047 i = of_getintprop_default(dp, "interrupts", 0);
1048 if (i == 0)
1049 i = 4;
1050 DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n",
1051 i, (1 << i)));
1053 sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL);
1055 mp->dev = dev;
1056 dev->open = &myri_open;
1057 dev->stop = &myri_close;
1058 dev->hard_start_xmit = &myri_start_xmit;
1059 dev->tx_timeout = &myri_tx_timeout;
1060 dev->watchdog_timeo = 5*HZ;
1061 dev->set_multicast_list = &myri_set_multicast;
1062 dev->irq = op->irqs[0];
1064 /* Register interrupt handler now. */
1065 DET(("Requesting MYRIcom IRQ line.\n"));
1066 if (request_irq(dev->irq, &myri_interrupt,
1067 IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
1068 printk("MyriCOM: Cannot register interrupt handler.\n");
1069 goto err;
1072 dev->mtu = MYRINET_MTU;
1073 dev->change_mtu = myri_change_mtu;
1074 dev->header_ops = &myri_header_ops;
1076 dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN);
1078 /* Load code onto the LANai. */
1079 DET(("Loading LANAI firmware\n"));
1080 myri_load_lanai(mp);
1082 if (register_netdev(dev)) {
1083 printk("MyriCOM: Cannot register device.\n");
1084 goto err_free_irq;
1087 dev_set_drvdata(&op->dev, mp);
1089 num++;
1091 printk("%s: MyriCOM MyriNET Ethernet %s\n",
1092 dev->name, print_mac(mac, dev->dev_addr));
1094 return 0;
1096 err_free_irq:
1097 free_irq(dev->irq, dev);
1098 err:
1099 /* This will also free the co-allocated 'dev->priv' */
1100 free_netdev(dev);
1101 return -ENODEV;
1104 static int __devexit myri_sbus_remove(struct of_device *op)
1106 struct myri_eth *mp = dev_get_drvdata(&op->dev);
1107 struct net_device *net_dev = mp->dev;
1109 unregister_netdev(net_dev);
1111 free_irq(net_dev->irq, net_dev);
1113 if (mp->eeprom.cpuvers < CPUVERS_4_0) {
1114 of_iounmap(&op->resource[0], mp->regs, mp->reg_size);
1115 } else {
1116 of_iounmap(&op->resource[0], mp->cregs, PAGE_SIZE);
1117 of_iounmap(&op->resource[0], mp->lregs, (256 * 1024));
1118 of_iounmap(&op->resource[0], mp->lanai, (512 * 1024));
1121 free_netdev(net_dev);
1123 dev_set_drvdata(&op->dev, NULL);
1125 return 0;
1128 static const struct of_device_id myri_sbus_match[] = {
1130 .name = "MYRICOM,mlanai",
1133 .name = "myri",
1138 MODULE_DEVICE_TABLE(of, myri_sbus_match);
1140 static struct of_platform_driver myri_sbus_driver = {
1141 .name = "myri",
1142 .match_table = myri_sbus_match,
1143 .probe = myri_sbus_probe,
1144 .remove = __devexit_p(myri_sbus_remove),
1147 static int __init myri_sbus_init(void)
1149 return of_register_driver(&myri_sbus_driver, &of_bus_type);
1152 static void __exit myri_sbus_exit(void)
1154 of_unregister_driver(&myri_sbus_driver);
1157 module_init(myri_sbus_init);
1158 module_exit(myri_sbus_exit);
1160 MODULE_LICENSE("GPL");