tracehook_notify_death: use task_detached() helper
[linux-2.6/mini2440.git] / drivers / net / myri_sbus.c
blob08534c08d30d669cecd97e9c258c97fbe10acd9c
1 /* myri_sbus.c: MyriCOM MyriNET SBUS card driver.
3 * Copyright (C) 1996, 1999, 2006, 2008 David S. Miller (davem@davemloft.net)
4 */
6 static char version[] =
7 "myri_sbus.c:v2.0 June 23, 2006 David S. Miller (davem@davemloft.net)\n";
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/fcntl.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/in.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/bitops.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
29 #include <net/dst.h>
30 #include <net/arp.h>
31 #include <net/sock.h>
32 #include <net/ipv6.h>
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/dma.h>
37 #include <asm/byteorder.h>
38 #include <asm/idprom.h>
39 #include <asm/openprom.h>
40 #include <asm/oplib.h>
41 #include <asm/auxio.h>
42 #include <asm/pgtable.h>
43 #include <asm/irq.h>
45 #include "myri_sbus.h"
46 #include "myri_code.h"
48 /* #define DEBUG_DETECT */
49 /* #define DEBUG_IRQ */
50 /* #define DEBUG_TRANSMIT */
51 /* #define DEBUG_RECEIVE */
52 /* #define DEBUG_HEADER */
54 #ifdef DEBUG_DETECT
55 #define DET(x) printk x
56 #else
57 #define DET(x)
58 #endif
60 #ifdef DEBUG_IRQ
61 #define DIRQ(x) printk x
62 #else
63 #define DIRQ(x)
64 #endif
66 #ifdef DEBUG_TRANSMIT
67 #define DTX(x) printk x
68 #else
69 #define DTX(x)
70 #endif
72 #ifdef DEBUG_RECEIVE
73 #define DRX(x) printk x
74 #else
75 #define DRX(x)
76 #endif
78 #ifdef DEBUG_HEADER
79 #define DHDR(x) printk x
80 #else
81 #define DHDR(x)
82 #endif
84 static void myri_reset_off(void __iomem *lp, void __iomem *cregs)
86 /* Clear IRQ mask. */
87 sbus_writel(0, lp + LANAI_EIMASK);
89 /* Turn RESET function off. */
90 sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL);
93 static void myri_reset_on(void __iomem *cregs)
95 /* Enable RESET function. */
96 sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL);
98 /* Disable IRQ's. */
99 sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
102 static void myri_disable_irq(void __iomem *lp, void __iomem *cregs)
104 sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
105 sbus_writel(0, lp + LANAI_EIMASK);
106 sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT);
109 static void myri_enable_irq(void __iomem *lp, void __iomem *cregs)
111 sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL);
112 sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK);
115 static inline void bang_the_chip(struct myri_eth *mp)
117 struct myri_shmem __iomem *shmem = mp->shmem;
118 void __iomem *cregs = mp->cregs;
120 sbus_writel(1, &shmem->send);
121 sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
124 static int myri_do_handshake(struct myri_eth *mp)
126 struct myri_shmem __iomem *shmem = mp->shmem;
127 void __iomem *cregs = mp->cregs;
128 struct myri_channel __iomem *chan = &shmem->channel;
129 int tick = 0;
131 DET(("myri_do_handshake: "));
132 if (sbus_readl(&chan->state) == STATE_READY) {
133 DET(("Already STATE_READY, failed.\n"));
134 return -1; /* We're hosed... */
137 myri_disable_irq(mp->lregs, cregs);
139 while (tick++ < 25) {
140 u32 softstate;
142 /* Wake it up. */
143 DET(("shakedown, CONTROL_WON, "));
144 sbus_writel(1, &shmem->shakedown);
145 sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
147 softstate = sbus_readl(&chan->state);
148 DET(("chanstate[%08x] ", softstate));
149 if (softstate == STATE_READY) {
150 DET(("wakeup successful, "));
151 break;
154 if (softstate != STATE_WFN) {
155 DET(("not WFN setting that, "));
156 sbus_writel(STATE_WFN, &chan->state);
159 udelay(20);
162 myri_enable_irq(mp->lregs, cregs);
164 if (tick > 25) {
165 DET(("25 ticks we lose, failure.\n"));
166 return -1;
168 DET(("success\n"));
169 return 0;
172 static int __devinit myri_load_lanai(struct myri_eth *mp)
174 struct net_device *dev = mp->dev;
175 struct myri_shmem __iomem *shmem = mp->shmem;
176 void __iomem *rptr;
177 int i;
179 myri_disable_irq(mp->lregs, mp->cregs);
180 myri_reset_on(mp->cregs);
182 rptr = mp->lanai;
183 for (i = 0; i < mp->eeprom.ramsz; i++)
184 sbus_writeb(0, rptr + i);
186 if (mp->eeprom.cpuvers >= CPUVERS_3_0)
187 sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL);
189 /* Load executable code. */
190 for (i = 0; i < sizeof(lanai4_code); i++)
191 sbus_writeb(lanai4_code[i], rptr + (lanai4_code_off * 2) + i);
193 /* Load data segment. */
194 for (i = 0; i < sizeof(lanai4_data); i++)
195 sbus_writeb(lanai4_data[i], rptr + (lanai4_data_off * 2) + i);
197 /* Set device address. */
198 sbus_writeb(0, &shmem->addr[0]);
199 sbus_writeb(0, &shmem->addr[1]);
200 for (i = 0; i < 6; i++)
201 sbus_writeb(dev->dev_addr[i],
202 &shmem->addr[i + 2]);
204 /* Set SBUS bursts and interrupt mask. */
205 sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst);
206 sbus_writel(SHMEM_IMASK_RX, &shmem->imask);
208 /* Release the LANAI. */
209 myri_disable_irq(mp->lregs, mp->cregs);
210 myri_reset_off(mp->lregs, mp->cregs);
211 myri_disable_irq(mp->lregs, mp->cregs);
213 /* Wait for the reset to complete. */
214 for (i = 0; i < 5000; i++) {
215 if (sbus_readl(&shmem->channel.state) != STATE_READY)
216 break;
217 else
218 udelay(10);
221 if (i == 5000)
222 printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n");
224 i = myri_do_handshake(mp);
225 if (i)
226 printk(KERN_ERR "myricom: Handshake with LANAI failed.\n");
228 if (mp->eeprom.cpuvers == CPUVERS_4_0)
229 sbus_writel(0, mp->lregs + LANAI_VERS);
231 return i;
234 static void myri_clean_rings(struct myri_eth *mp)
236 struct sendq __iomem *sq = mp->sq;
237 struct recvq __iomem *rq = mp->rq;
238 int i;
240 sbus_writel(0, &rq->tail);
241 sbus_writel(0, &rq->head);
242 for (i = 0; i < (RX_RING_SIZE+1); i++) {
243 if (mp->rx_skbs[i] != NULL) {
244 struct myri_rxd __iomem *rxd = &rq->myri_rxd[i];
245 u32 dma_addr;
247 dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
248 dma_unmap_single(&mp->myri_op->dev, dma_addr,
249 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
250 dev_kfree_skb(mp->rx_skbs[i]);
251 mp->rx_skbs[i] = NULL;
255 mp->tx_old = 0;
256 sbus_writel(0, &sq->tail);
257 sbus_writel(0, &sq->head);
258 for (i = 0; i < TX_RING_SIZE; i++) {
259 if (mp->tx_skbs[i] != NULL) {
260 struct sk_buff *skb = mp->tx_skbs[i];
261 struct myri_txd __iomem *txd = &sq->myri_txd[i];
262 u32 dma_addr;
264 dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
265 dma_unmap_single(&mp->myri_op->dev, dma_addr,
266 (skb->len + 3) & ~3,
267 DMA_TO_DEVICE);
268 dev_kfree_skb(mp->tx_skbs[i]);
269 mp->tx_skbs[i] = NULL;
274 static void myri_init_rings(struct myri_eth *mp, int from_irq)
276 struct recvq __iomem *rq = mp->rq;
277 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
278 struct net_device *dev = mp->dev;
279 gfp_t gfp_flags = GFP_KERNEL;
280 int i;
282 if (from_irq || in_interrupt())
283 gfp_flags = GFP_ATOMIC;
285 myri_clean_rings(mp);
286 for (i = 0; i < RX_RING_SIZE; i++) {
287 struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
288 u32 dma_addr;
290 if (!skb)
291 continue;
292 mp->rx_skbs[i] = skb;
293 skb->dev = dev;
294 skb_put(skb, RX_ALLOC_SIZE);
296 dma_addr = dma_map_single(&mp->myri_op->dev,
297 skb->data, RX_ALLOC_SIZE,
298 DMA_FROM_DEVICE);
299 sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
300 sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
301 sbus_writel(i, &rxd[i].ctx);
302 sbus_writel(1, &rxd[i].num_sg);
304 sbus_writel(0, &rq->head);
305 sbus_writel(RX_RING_SIZE, &rq->tail);
308 static int myri_init(struct myri_eth *mp, int from_irq)
310 myri_init_rings(mp, from_irq);
311 return 0;
314 static void myri_is_not_so_happy(struct myri_eth *mp)
318 #ifdef DEBUG_HEADER
319 static void dump_ehdr(struct ethhdr *ehdr)
321 printk("ehdr[h_dst(%pM)"
322 "h_source(%pM)"
323 "h_proto(%04x)]\n",
324 ehdr->h_dest, ehdr->h_source, ehdr->h_proto);
327 static void dump_ehdr_and_myripad(unsigned char *stuff)
329 struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2);
331 printk("pad[%02x:%02x]", stuff[0], stuff[1]);
332 dump_ehdr(ehdr);
334 #endif
336 static void myri_tx(struct myri_eth *mp, struct net_device *dev)
338 struct sendq __iomem *sq= mp->sq;
339 int entry = mp->tx_old;
340 int limit = sbus_readl(&sq->head);
342 DTX(("entry[%d] limit[%d] ", entry, limit));
343 if (entry == limit)
344 return;
345 while (entry != limit) {
346 struct sk_buff *skb = mp->tx_skbs[entry];
347 u32 dma_addr;
349 DTX(("SKB[%d] ", entry));
350 dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
351 dma_unmap_single(&mp->myri_op->dev, dma_addr,
352 skb->len, DMA_TO_DEVICE);
353 dev_kfree_skb(skb);
354 mp->tx_skbs[entry] = NULL;
355 dev->stats.tx_packets++;
356 entry = NEXT_TX(entry);
358 mp->tx_old = entry;
361 /* Determine the packet's protocol ID. The rule here is that we
362 * assume 802.3 if the type field is short enough to be a length.
363 * This is normal practice and works for any 'now in use' protocol.
365 static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
367 struct ethhdr *eth;
368 unsigned char *rawp;
370 skb_set_mac_header(skb, MYRI_PAD_LEN);
371 skb_pull(skb, dev->hard_header_len);
372 eth = eth_hdr(skb);
374 #ifdef DEBUG_HEADER
375 DHDR(("myri_type_trans: "));
376 dump_ehdr(eth);
377 #endif
378 if (*eth->h_dest & 1) {
379 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
380 skb->pkt_type = PACKET_BROADCAST;
381 else
382 skb->pkt_type = PACKET_MULTICAST;
383 } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
384 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
385 skb->pkt_type = PACKET_OTHERHOST;
388 if (ntohs(eth->h_proto) >= 1536)
389 return eth->h_proto;
391 rawp = skb->data;
393 /* This is a magic hack to spot IPX packets. Older Novell breaks
394 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
395 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
396 * won't work for fault tolerant netware but does for the rest.
398 if (*(unsigned short *)rawp == 0xFFFF)
399 return htons(ETH_P_802_3);
401 /* Real 802.2 LLC */
402 return htons(ETH_P_802_2);
405 static void myri_rx(struct myri_eth *mp, struct net_device *dev)
407 struct recvq __iomem *rq = mp->rq;
408 struct recvq __iomem *rqa = mp->rqack;
409 int entry = sbus_readl(&rqa->head);
410 int limit = sbus_readl(&rqa->tail);
411 int drops;
413 DRX(("entry[%d] limit[%d] ", entry, limit));
414 if (entry == limit)
415 return;
416 drops = 0;
417 DRX(("\n"));
418 while (entry != limit) {
419 struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
420 u32 csum = sbus_readl(&rxdack->csum);
421 int len = sbus_readl(&rxdack->myri_scatters[0].len);
422 int index = sbus_readl(&rxdack->ctx);
423 struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
424 struct sk_buff *skb = mp->rx_skbs[index];
426 /* Ack it. */
427 sbus_writel(NEXT_RX(entry), &rqa->head);
429 /* Check for errors. */
430 DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
431 dma_sync_single_for_cpu(&mp->myri_op->dev,
432 sbus_readl(&rxd->myri_scatters[0].addr),
433 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
434 if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
435 DRX(("ERROR["));
436 dev->stats.rx_errors++;
437 if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
438 DRX(("BAD_LENGTH] "));
439 dev->stats.rx_length_errors++;
440 } else {
441 DRX(("NO_PADDING] "));
442 dev->stats.rx_frame_errors++;
445 /* Return it to the LANAI. */
446 drop_it:
447 drops++;
448 DRX(("DROP "));
449 dev->stats.rx_dropped++;
450 dma_sync_single_for_device(&mp->myri_op->dev,
451 sbus_readl(&rxd->myri_scatters[0].addr),
452 RX_ALLOC_SIZE,
453 DMA_FROM_DEVICE);
454 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
455 sbus_writel(index, &rxd->ctx);
456 sbus_writel(1, &rxd->num_sg);
457 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
458 goto next;
461 DRX(("len[%d] ", len));
462 if (len > RX_COPY_THRESHOLD) {
463 struct sk_buff *new_skb;
464 u32 dma_addr;
466 DRX(("BIGBUFF "));
467 new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
468 if (new_skb == NULL) {
469 DRX(("skb_alloc(FAILED) "));
470 goto drop_it;
472 dma_unmap_single(&mp->myri_op->dev,
473 sbus_readl(&rxd->myri_scatters[0].addr),
474 RX_ALLOC_SIZE,
475 DMA_FROM_DEVICE);
476 mp->rx_skbs[index] = new_skb;
477 new_skb->dev = dev;
478 skb_put(new_skb, RX_ALLOC_SIZE);
479 dma_addr = dma_map_single(&mp->myri_op->dev,
480 new_skb->data,
481 RX_ALLOC_SIZE,
482 DMA_FROM_DEVICE);
483 sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
484 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
485 sbus_writel(index, &rxd->ctx);
486 sbus_writel(1, &rxd->num_sg);
487 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
489 /* Trim the original skb for the netif. */
490 DRX(("trim(%d) ", len));
491 skb_trim(skb, len);
492 } else {
493 struct sk_buff *copy_skb = dev_alloc_skb(len);
495 DRX(("SMALLBUFF "));
496 if (copy_skb == NULL) {
497 DRX(("dev_alloc_skb(FAILED) "));
498 goto drop_it;
500 /* DMA sync already done above. */
501 copy_skb->dev = dev;
502 DRX(("resv_and_put "));
503 skb_put(copy_skb, len);
504 skb_copy_from_linear_data(skb, copy_skb->data, len);
506 /* Reuse original ring buffer. */
507 DRX(("reuse "));
508 dma_sync_single_for_device(&mp->myri_op->dev,
509 sbus_readl(&rxd->myri_scatters[0].addr),
510 RX_ALLOC_SIZE,
511 DMA_FROM_DEVICE);
512 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
513 sbus_writel(index, &rxd->ctx);
514 sbus_writel(1, &rxd->num_sg);
515 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
517 skb = copy_skb;
520 /* Just like the happy meal we get checksums from this card. */
521 skb->csum = csum;
522 skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */
524 skb->protocol = myri_type_trans(skb, dev);
525 DRX(("prot[%04x] netif_rx ", skb->protocol));
526 netif_rx(skb);
528 dev->stats.rx_packets++;
529 dev->stats.rx_bytes += len;
530 next:
531 DRX(("NEXT\n"));
532 entry = NEXT_RX(entry);
536 static irqreturn_t myri_interrupt(int irq, void *dev_id)
538 struct net_device *dev = (struct net_device *) dev_id;
539 struct myri_eth *mp = netdev_priv(dev);
540 void __iomem *lregs = mp->lregs;
541 struct myri_channel __iomem *chan = &mp->shmem->channel;
542 unsigned long flags;
543 u32 status;
544 int handled = 0;
546 spin_lock_irqsave(&mp->irq_lock, flags);
548 status = sbus_readl(lregs + LANAI_ISTAT);
549 DIRQ(("myri_interrupt: status[%08x] ", status));
550 if (status & ISTAT_HOST) {
551 u32 softstate;
553 handled = 1;
554 DIRQ(("IRQ_DISAB "));
555 myri_disable_irq(lregs, mp->cregs);
556 softstate = sbus_readl(&chan->state);
557 DIRQ(("state[%08x] ", softstate));
558 if (softstate != STATE_READY) {
559 DIRQ(("myri_not_so_happy "));
560 myri_is_not_so_happy(mp);
562 DIRQ(("\nmyri_rx: "));
563 myri_rx(mp, dev);
564 DIRQ(("\nistat=ISTAT_HOST "));
565 sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT);
566 DIRQ(("IRQ_ENAB "));
567 myri_enable_irq(lregs, mp->cregs);
569 DIRQ(("\n"));
571 spin_unlock_irqrestore(&mp->irq_lock, flags);
573 return IRQ_RETVAL(handled);
576 static int myri_open(struct net_device *dev)
578 struct myri_eth *mp = netdev_priv(dev);
580 return myri_init(mp, in_interrupt());
583 static int myri_close(struct net_device *dev)
585 struct myri_eth *mp = netdev_priv(dev);
587 myri_clean_rings(mp);
588 return 0;
591 static void myri_tx_timeout(struct net_device *dev)
593 struct myri_eth *mp = netdev_priv(dev);
595 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
597 dev->stats.tx_errors++;
598 myri_init(mp, 0);
599 netif_wake_queue(dev);
602 static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
604 struct myri_eth *mp = netdev_priv(dev);
605 struct sendq __iomem *sq = mp->sq;
606 struct myri_txd __iomem *txd;
607 unsigned long flags;
608 unsigned int head, tail;
609 int len, entry;
610 u32 dma_addr;
612 DTX(("myri_start_xmit: "));
614 myri_tx(mp, dev);
616 netif_stop_queue(dev);
618 /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
619 head = sbus_readl(&sq->head);
620 tail = sbus_readl(&sq->tail);
622 if (!TX_BUFFS_AVAIL(head, tail)) {
623 DTX(("no buffs available, returning 1\n"));
624 return 1;
627 spin_lock_irqsave(&mp->irq_lock, flags);
629 DHDR(("xmit[skbdata(%p)]\n", skb->data));
630 #ifdef DEBUG_HEADER
631 dump_ehdr_and_myripad(((unsigned char *) skb->data));
632 #endif
634 /* XXX Maybe this can go as well. */
635 len = skb->len;
636 if (len & 3) {
637 DTX(("len&3 "));
638 len = (len + 4) & (~3);
641 entry = sbus_readl(&sq->tail);
643 txd = &sq->myri_txd[entry];
644 mp->tx_skbs[entry] = skb;
646 /* Must do this before we sbus map it. */
647 if (skb->data[MYRI_PAD_LEN] & 0x1) {
648 sbus_writew(0xffff, &txd->addr[0]);
649 sbus_writew(0xffff, &txd->addr[1]);
650 sbus_writew(0xffff, &txd->addr[2]);
651 sbus_writew(0xffff, &txd->addr[3]);
652 } else {
653 sbus_writew(0xffff, &txd->addr[0]);
654 sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
655 sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
656 sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
659 dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
660 len, DMA_TO_DEVICE);
661 sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
662 sbus_writel(len, &txd->myri_gathers[0].len);
663 sbus_writel(1, &txd->num_sg);
664 sbus_writel(KERNEL_CHANNEL, &txd->chan);
665 sbus_writel(len, &txd->len);
666 sbus_writel((u32)-1, &txd->csum_off);
667 sbus_writel(0, &txd->csum_field);
669 sbus_writel(NEXT_TX(entry), &sq->tail);
670 DTX(("BangTheChip "));
671 bang_the_chip(mp);
673 DTX(("tbusy=0, returning 0\n"));
674 netif_start_queue(dev);
675 spin_unlock_irqrestore(&mp->irq_lock, flags);
676 return 0;
679 /* Create the MyriNet MAC header for an arbitrary protocol layer
681 * saddr=NULL means use device source address
682 * daddr=NULL means leave destination address (eg unresolved arp)
684 static int myri_header(struct sk_buff *skb, struct net_device *dev,
685 unsigned short type, const void *daddr,
686 const void *saddr, unsigned len)
688 struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
689 unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);
691 #ifdef DEBUG_HEADER
692 DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1]));
693 dump_ehdr(eth);
694 #endif
696 /* Set the MyriNET padding identifier. */
697 pad[0] = MYRI_PAD_LEN;
698 pad[1] = 0xab;
700 /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
701 * in here instead. It is up to the 802.2 layer to carry protocol information.
703 if (type != ETH_P_802_3)
704 eth->h_proto = htons(type);
705 else
706 eth->h_proto = htons(len);
708 /* Set the source hardware address. */
709 if (saddr)
710 memcpy(eth->h_source, saddr, dev->addr_len);
711 else
712 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
714 /* Anyway, the loopback-device should never use this function... */
715 if (dev->flags & IFF_LOOPBACK) {
716 int i;
717 for (i = 0; i < dev->addr_len; i++)
718 eth->h_dest[i] = 0;
719 return(dev->hard_header_len);
722 if (daddr) {
723 memcpy(eth->h_dest, daddr, dev->addr_len);
724 return dev->hard_header_len;
726 return -dev->hard_header_len;
729 /* Rebuild the MyriNet MAC header. This is called after an ARP
730 * (or in future other address resolution) has completed on this
731 * sk_buff. We now let ARP fill in the other fields.
733 static int myri_rebuild_header(struct sk_buff *skb)
735 unsigned char *pad = (unsigned char *) skb->data;
736 struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
737 struct net_device *dev = skb->dev;
739 #ifdef DEBUG_HEADER
740 DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1]));
741 dump_ehdr(eth);
742 #endif
744 /* Refill MyriNet padding identifiers, this is just being anal. */
745 pad[0] = MYRI_PAD_LEN;
746 pad[1] = 0xab;
748 switch (eth->h_proto)
750 #ifdef CONFIG_INET
751 case cpu_to_be16(ETH_P_IP):
752 return arp_find(eth->h_dest, skb);
753 #endif
755 default:
756 printk(KERN_DEBUG
757 "%s: unable to resolve type %X addresses.\n",
758 dev->name, (int)eth->h_proto);
760 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
761 return 0;
762 break;
765 return 0;
768 static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
770 unsigned short type = hh->hh_type;
771 unsigned char *pad;
772 struct ethhdr *eth;
773 const struct net_device *dev = neigh->dev;
775 pad = ((unsigned char *) hh->hh_data) +
776 HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN);
777 eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
779 if (type == htons(ETH_P_802_3))
780 return -1;
782 /* Refill MyriNet padding identifiers, this is just being anal. */
783 pad[0] = MYRI_PAD_LEN;
784 pad[1] = 0xab;
786 eth->h_proto = type;
787 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
788 memcpy(eth->h_dest, neigh->ha, dev->addr_len);
789 hh->hh_len = 16;
790 return 0;
794 /* Called by Address Resolution module to notify changes in address. */
795 void myri_header_cache_update(struct hh_cache *hh,
796 const struct net_device *dev,
797 const unsigned char * haddr)
799 memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
800 haddr, dev->addr_len);
803 static int myri_change_mtu(struct net_device *dev, int new_mtu)
805 if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU))
806 return -EINVAL;
807 dev->mtu = new_mtu;
808 return 0;
811 static void myri_set_multicast(struct net_device *dev)
813 /* Do nothing, all MyriCOM nodes transmit multicast frames
814 * as broadcast packets...
818 static inline void set_boardid_from_idprom(struct myri_eth *mp, int num)
820 mp->eeprom.id[0] = 0;
821 mp->eeprom.id[1] = idprom->id_machtype;
822 mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff;
823 mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff;
824 mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff;
825 mp->eeprom.id[5] = num;
828 static inline void determine_reg_space_size(struct myri_eth *mp)
830 switch(mp->eeprom.cpuvers) {
831 case CPUVERS_2_3:
832 case CPUVERS_3_0:
833 case CPUVERS_3_1:
834 case CPUVERS_3_2:
835 mp->reg_size = (3 * 128 * 1024) + 4096;
836 break;
838 case CPUVERS_4_0:
839 case CPUVERS_4_1:
840 mp->reg_size = ((4096<<1) + mp->eeprom.ramsz);
841 break;
843 case CPUVERS_4_2:
844 case CPUVERS_5_0:
845 default:
846 printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
847 mp->eeprom.cpuvers);
848 mp->reg_size = (3 * 128 * 1024) + 4096;
852 #ifdef DEBUG_DETECT
853 static void dump_eeprom(struct myri_eth *mp)
855 printk("EEPROM: clockval[%08x] cpuvers[%04x] "
856 "id[%02x,%02x,%02x,%02x,%02x,%02x]\n",
857 mp->eeprom.cval, mp->eeprom.cpuvers,
858 mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2],
859 mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]);
860 printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz);
861 printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
862 mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2],
863 mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5],
864 mp->eeprom.fvers[6], mp->eeprom.fvers[7]);
865 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
866 mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10],
867 mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13],
868 mp->eeprom.fvers[14], mp->eeprom.fvers[15]);
869 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
870 mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18],
871 mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21],
872 mp->eeprom.fvers[22], mp->eeprom.fvers[23]);
873 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
874 mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26],
875 mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29],
876 mp->eeprom.fvers[30], mp->eeprom.fvers[31]);
877 printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
878 mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2],
879 mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5],
880 mp->eeprom.mvers[6], mp->eeprom.mvers[7]);
881 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
882 mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10],
883 mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13],
884 mp->eeprom.mvers[14], mp->eeprom.mvers[15]);
885 printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n",
886 mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type,
887 mp->eeprom.prod_code);
888 printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num);
890 #endif
892 static const struct header_ops myri_header_ops = {
893 .create = myri_header,
894 .rebuild = myri_rebuild_header,
895 .cache = myri_header_cache,
896 .cache_update = myri_header_cache_update,
899 static const struct net_device_ops myri_ops = {
900 .ndo_open = myri_open,
901 .ndo_stop = myri_close,
902 .ndo_start_xmit = myri_start_xmit,
903 .ndo_set_multicast_list = myri_set_multicast,
904 .ndo_tx_timeout = myri_tx_timeout,
905 .ndo_change_mtu = myri_change_mtu,
906 .ndo_set_mac_address = eth_mac_addr,
907 .ndo_validate_addr = eth_validate_addr,
910 static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
912 struct device_node *dp = op->node;
913 static unsigned version_printed;
914 struct net_device *dev;
915 struct myri_eth *mp;
916 const void *prop;
917 static int num;
918 int i, len;
920 DET(("myri_ether_init(%p,%d):\n", op, num));
921 dev = alloc_etherdev(sizeof(struct myri_eth));
922 if (!dev)
923 return -ENOMEM;
925 if (version_printed++ == 0)
926 printk(version);
928 SET_NETDEV_DEV(dev, &op->dev);
930 mp = netdev_priv(dev);
931 spin_lock_init(&mp->irq_lock);
932 mp->myri_op = op;
934 /* Clean out skb arrays. */
935 for (i = 0; i < (RX_RING_SIZE + 1); i++)
936 mp->rx_skbs[i] = NULL;
938 for (i = 0; i < TX_RING_SIZE; i++)
939 mp->tx_skbs[i] = NULL;
941 /* First check for EEPROM information. */
942 prop = of_get_property(dp, "myrinet-eeprom-info", &len);
944 if (prop)
945 memcpy(&mp->eeprom, prop, sizeof(struct myri_eeprom));
946 if (!prop) {
947 /* No eeprom property, must cook up the values ourselves. */
948 DET(("No EEPROM: "));
949 mp->eeprom.bus_type = BUS_TYPE_SBUS;
950 mp->eeprom.cpuvers =
951 of_getintprop_default(dp, "cpu_version", 0);
952 mp->eeprom.cval =
953 of_getintprop_default(dp, "clock_value", 0);
954 mp->eeprom.ramsz = of_getintprop_default(dp, "sram_size", 0);
955 if (!mp->eeprom.cpuvers)
956 mp->eeprom.cpuvers = CPUVERS_2_3;
957 if (mp->eeprom.cpuvers < CPUVERS_3_0)
958 mp->eeprom.cval = 0;
959 if (!mp->eeprom.ramsz)
960 mp->eeprom.ramsz = (128 * 1024);
962 prop = of_get_property(dp, "myrinet-board-id", &len);
963 if (prop)
964 memcpy(&mp->eeprom.id[0], prop, 6);
965 else
966 set_boardid_from_idprom(mp, num);
968 prop = of_get_property(dp, "fpga_version", &len);
969 if (prop)
970 memcpy(&mp->eeprom.fvers[0], prop, 32);
971 else
972 memset(&mp->eeprom.fvers[0], 0, 32);
974 if (mp->eeprom.cpuvers == CPUVERS_4_1) {
975 if (mp->eeprom.ramsz == (128 * 1024))
976 mp->eeprom.ramsz = (256 * 1024);
977 if ((mp->eeprom.cval == 0x40414041) ||
978 (mp->eeprom.cval == 0x90449044))
979 mp->eeprom.cval = 0x50e450e4;
982 #ifdef DEBUG_DETECT
983 dump_eeprom(mp);
984 #endif
986 for (i = 0; i < 6; i++)
987 dev->dev_addr[i] = mp->eeprom.id[i];
989 determine_reg_space_size(mp);
991 /* Map in the MyriCOM register/localram set. */
992 if (mp->eeprom.cpuvers < CPUVERS_4_0) {
993 /* XXX Makes no sense, if control reg is non-existant this
994 * XXX driver cannot function at all... maybe pre-4.0 is
995 * XXX only a valid version for PCI cards? Ask feldy...
997 DET(("Mapping regs for cpuvers < CPUVERS_4_0\n"));
998 mp->regs = of_ioremap(&op->resource[0], 0,
999 mp->reg_size, "MyriCOM Regs");
1000 if (!mp->regs) {
1001 printk("MyriCOM: Cannot map MyriCOM registers.\n");
1002 goto err;
1004 mp->lanai = mp->regs + (256 * 1024);
1005 mp->lregs = mp->lanai + (0x10000 * 2);
1006 } else {
1007 DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n"));
1008 mp->cregs = of_ioremap(&op->resource[0], 0,
1009 PAGE_SIZE, "MyriCOM Control Regs");
1010 mp->lregs = of_ioremap(&op->resource[0], (256 * 1024),
1011 PAGE_SIZE, "MyriCOM LANAI Regs");
1012 mp->lanai = of_ioremap(&op->resource[0], (512 * 1024),
1013 mp->eeprom.ramsz, "MyriCOM SRAM");
1015 DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n",
1016 mp->cregs, mp->lregs, mp->lanai));
1018 if (mp->eeprom.cpuvers >= CPUVERS_4_0)
1019 mp->shmem_base = 0xf000;
1020 else
1021 mp->shmem_base = 0x8000;
1023 DET(("Shared memory base is %04x, ", mp->shmem_base));
1025 mp->shmem = (struct myri_shmem __iomem *)
1026 (mp->lanai + (mp->shmem_base * 2));
1027 DET(("shmem mapped at %p\n", mp->shmem));
1029 mp->rqack = &mp->shmem->channel.recvqa;
1030 mp->rq = &mp->shmem->channel.recvq;
1031 mp->sq = &mp->shmem->channel.sendq;
1033 /* Reset the board. */
1034 DET(("Resetting LANAI\n"));
1035 myri_reset_off(mp->lregs, mp->cregs);
1036 myri_reset_on(mp->cregs);
1038 /* Turn IRQ's off. */
1039 myri_disable_irq(mp->lregs, mp->cregs);
1041 /* Reset once more. */
1042 myri_reset_on(mp->cregs);
1044 /* Get the supported DVMA burst sizes from our SBUS. */
1045 mp->myri_bursts = of_getintprop_default(dp->parent,
1046 "burst-sizes", 0x00);
1047 if (!sbus_can_burst64())
1048 mp->myri_bursts &= ~(DMA_BURST64);
1050 DET(("MYRI bursts %02x\n", mp->myri_bursts));
1052 /* Encode SBUS interrupt level in second control register. */
1053 i = of_getintprop_default(dp, "interrupts", 0);
1054 if (i == 0)
1055 i = 4;
1056 DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n",
1057 i, (1 << i)));
1059 sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL);
1061 mp->dev = dev;
1062 dev->watchdog_timeo = 5*HZ;
1063 dev->irq = op->irqs[0];
1064 dev->netdev_ops = &myri_ops;
1066 /* Register interrupt handler now. */
1067 DET(("Requesting MYRIcom IRQ line.\n"));
1068 if (request_irq(dev->irq, &myri_interrupt,
1069 IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
1070 printk("MyriCOM: Cannot register interrupt handler.\n");
1071 goto err;
1074 dev->mtu = MYRINET_MTU;
1075 dev->header_ops = &myri_header_ops;
1077 dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN);
1079 /* Load code onto the LANai. */
1080 DET(("Loading LANAI firmware\n"));
1081 myri_load_lanai(mp);
1083 if (register_netdev(dev)) {
1084 printk("MyriCOM: Cannot register device.\n");
1085 goto err_free_irq;
1088 dev_set_drvdata(&op->dev, mp);
1090 num++;
1092 printk("%s: MyriCOM MyriNET Ethernet %pM\n",
1093 dev->name, dev->dev_addr);
1095 return 0;
1097 err_free_irq:
1098 free_irq(dev->irq, dev);
1099 err:
1100 /* This will also free the co-allocated private data*/
1101 free_netdev(dev);
1102 return -ENODEV;
1105 static int __devexit myri_sbus_remove(struct of_device *op)
1107 struct myri_eth *mp = dev_get_drvdata(&op->dev);
1108 struct net_device *net_dev = mp->dev;
1110 unregister_netdev(net_dev);
1112 free_irq(net_dev->irq, net_dev);
1114 if (mp->eeprom.cpuvers < CPUVERS_4_0) {
1115 of_iounmap(&op->resource[0], mp->regs, mp->reg_size);
1116 } else {
1117 of_iounmap(&op->resource[0], mp->cregs, PAGE_SIZE);
1118 of_iounmap(&op->resource[0], mp->lregs, (256 * 1024));
1119 of_iounmap(&op->resource[0], mp->lanai, (512 * 1024));
1122 free_netdev(net_dev);
1124 dev_set_drvdata(&op->dev, NULL);
1126 return 0;
1129 static const struct of_device_id myri_sbus_match[] = {
1131 .name = "MYRICOM,mlanai",
1134 .name = "myri",
1139 MODULE_DEVICE_TABLE(of, myri_sbus_match);
1141 static struct of_platform_driver myri_sbus_driver = {
1142 .name = "myri",
1143 .match_table = myri_sbus_match,
1144 .probe = myri_sbus_probe,
1145 .remove = __devexit_p(myri_sbus_remove),
1148 static int __init myri_sbus_init(void)
1150 return of_register_driver(&myri_sbus_driver, &of_bus_type);
1153 static void __exit myri_sbus_exit(void)
1155 of_unregister_driver(&myri_sbus_driver);
1158 module_init(myri_sbus_init);
1159 module_exit(myri_sbus_exit);
1161 MODULE_LICENSE("GPL");