Fix gcc 4.5.1 miscompiling drivers/char/i8k.c (again)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / sgiseeq.c
blob6b364a6c6c60399146203ed58c3ec655f89f1cdc
1 /*
2 * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5 */
7 #undef DEBUG
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/errno.h>
12 #include <linux/init.h>
13 #include <linux/types.h>
14 #include <linux/interrupt.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/netdevice.h>
18 #include <linux/platform_device.h>
19 #include <linux/etherdevice.h>
20 #include <linux/skbuff.h>
22 #include <asm/sgi/hpc3.h>
23 #include <asm/sgi/ip22.h>
24 #include <asm/sgi/seeq.h>
26 #include "sgiseeq.h"
28 static char *sgiseeqstr = "SGI Seeq8003";
31 * If you want speed, you do something silly, it always has worked for me. So,
32 * with that in mind, I've decided to make this driver look completely like a
33 * stupid Lance from a driver architecture perspective. Only difference is that
34 * here our "ring buffer" looks and acts like a real Lance one does but is
35 * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised
36 * how a stupid idea like this can pay off in performance, not to mention
37 * making this driver 2,000 times easier to write. ;-)
40 /* Tune these if we tend to run out often etc. */
41 #define SEEQ_RX_BUFFERS 16
42 #define SEEQ_TX_BUFFERS 16
44 #define PKT_BUF_SZ 1584
46 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
47 #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
48 #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
49 #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
51 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
52 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
53 sp->tx_old - sp->tx_new - 1)
55 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
56 (dma_addr_t)((unsigned long)(v) - \
57 (unsigned long)((sp)->rx_desc)))
59 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
60 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
62 static int rx_copybreak = 100;
64 #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
66 struct sgiseeq_rx_desc {
67 volatile struct hpc_dma_desc rdma;
68 u8 padding[PAD_SIZE];
69 struct sk_buff *skb;
72 struct sgiseeq_tx_desc {
73 volatile struct hpc_dma_desc tdma;
74 u8 padding[PAD_SIZE];
75 struct sk_buff *skb;
79 * Warning: This structure is layed out in a certain way because HPC dma
80 * descriptors must be 8-byte aligned. So don't touch this without
81 * some care.
83 struct sgiseeq_init_block { /* Note the name ;-) */
84 struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
85 struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
88 struct sgiseeq_private {
89 struct sgiseeq_init_block *srings;
90 dma_addr_t srings_dma;
92 /* Ptrs to the descriptors in uncached space. */
93 struct sgiseeq_rx_desc *rx_desc;
94 struct sgiseeq_tx_desc *tx_desc;
96 char *name;
97 struct hpc3_ethregs *hregs;
98 struct sgiseeq_regs *sregs;
100 /* Ring entry counters. */
101 unsigned int rx_new, tx_new;
102 unsigned int rx_old, tx_old;
104 int is_edlc;
105 unsigned char control;
106 unsigned char mode;
108 spinlock_t tx_lock;
111 static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
113 dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
114 DMA_FROM_DEVICE);
117 static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
119 dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
120 DMA_TO_DEVICE);
123 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
125 hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
126 udelay(20);
127 hregs->reset = 0;
130 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
131 struct sgiseeq_regs *sregs)
133 hregs->rx_ctrl = hregs->tx_ctrl = 0;
134 hpc3_eth_reset(hregs);
137 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
138 SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
140 static inline void seeq_go(struct sgiseeq_private *sp,
141 struct hpc3_ethregs *hregs,
142 struct sgiseeq_regs *sregs)
144 sregs->rstat = sp->mode | RSTAT_GO_BITS;
145 hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
148 static inline void __sgiseeq_set_mac_address(struct net_device *dev)
150 struct sgiseeq_private *sp = netdev_priv(dev);
151 struct sgiseeq_regs *sregs = sp->sregs;
152 int i;
154 sregs->tstat = SEEQ_TCMD_RB0;
155 for (i = 0; i < 6; i++)
156 sregs->rw.eth_addr[i] = dev->dev_addr[i];
159 static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
161 struct sgiseeq_private *sp = netdev_priv(dev);
162 struct sockaddr *sa = addr;
164 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
166 spin_lock_irq(&sp->tx_lock);
167 __sgiseeq_set_mac_address(dev);
168 spin_unlock_irq(&sp->tx_lock);
170 return 0;
173 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
174 #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
175 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
177 static int seeq_init_ring(struct net_device *dev)
179 struct sgiseeq_private *sp = netdev_priv(dev);
180 int i;
182 netif_stop_queue(dev);
183 sp->rx_new = sp->tx_new = 0;
184 sp->rx_old = sp->tx_old = 0;
186 __sgiseeq_set_mac_address(dev);
188 /* Setup tx ring. */
189 for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
190 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
191 dma_sync_desc_dev(dev, &sp->tx_desc[i]);
194 /* And now the rx ring. */
195 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
196 if (!sp->rx_desc[i].skb) {
197 dma_addr_t dma_addr;
198 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
200 if (skb == NULL)
201 return -ENOMEM;
202 skb_reserve(skb, 2);
203 dma_addr = dma_map_single(dev->dev.parent,
204 skb->data - 2,
205 PKT_BUF_SZ, DMA_FROM_DEVICE);
206 sp->rx_desc[i].skb = skb;
207 sp->rx_desc[i].rdma.pbuf = dma_addr;
209 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
210 dma_sync_desc_dev(dev, &sp->rx_desc[i]);
212 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
213 dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
214 return 0;
217 static void seeq_purge_ring(struct net_device *dev)
219 struct sgiseeq_private *sp = netdev_priv(dev);
220 int i;
222 /* clear tx ring. */
223 for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
224 if (sp->tx_desc[i].skb) {
225 dev_kfree_skb(sp->tx_desc[i].skb);
226 sp->tx_desc[i].skb = NULL;
230 /* And now the rx ring. */
231 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
232 if (sp->rx_desc[i].skb) {
233 dev_kfree_skb(sp->rx_desc[i].skb);
234 sp->rx_desc[i].skb = NULL;
239 #ifdef DEBUG
240 static struct sgiseeq_private *gpriv;
241 static struct net_device *gdev;
243 static void sgiseeq_dump_rings(void)
245 static int once;
246 struct sgiseeq_rx_desc *r = gpriv->rx_desc;
247 struct sgiseeq_tx_desc *t = gpriv->tx_desc;
248 struct hpc3_ethregs *hregs = gpriv->hregs;
249 int i;
251 if (once)
252 return;
253 once++;
254 printk("RING DUMP:\n");
255 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
256 printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
257 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
258 r[i].rdma.pnext);
259 i += 1;
260 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
261 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
262 r[i].rdma.pnext);
264 for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
265 printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
266 i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
267 t[i].tdma.pnext);
268 i += 1;
269 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
270 i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
271 t[i].tdma.pnext);
273 printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
274 gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
275 printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
276 hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
277 printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
278 hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
280 #endif
282 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
283 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
285 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
286 struct sgiseeq_regs *sregs)
288 struct hpc3_ethregs *hregs = sp->hregs;
289 int err;
291 reset_hpc3_and_seeq(hregs, sregs);
292 err = seeq_init_ring(dev);
293 if (err)
294 return err;
296 /* Setup to field the proper interrupt types. */
297 if (sp->is_edlc) {
298 sregs->tstat = TSTAT_INIT_EDLC;
299 sregs->rw.wregs.control = sp->control;
300 sregs->rw.wregs.frame_gap = 0;
301 } else {
302 sregs->tstat = TSTAT_INIT_SEEQ;
305 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
306 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
308 seeq_go(sp, hregs, sregs);
309 return 0;
312 static void record_rx_errors(struct net_device *dev, unsigned char status)
314 if (status & SEEQ_RSTAT_OVERF ||
315 status & SEEQ_RSTAT_SFRAME)
316 dev->stats.rx_over_errors++;
317 if (status & SEEQ_RSTAT_CERROR)
318 dev->stats.rx_crc_errors++;
319 if (status & SEEQ_RSTAT_DERROR)
320 dev->stats.rx_frame_errors++;
321 if (status & SEEQ_RSTAT_REOF)
322 dev->stats.rx_errors++;
325 static inline void rx_maybe_restart(struct sgiseeq_private *sp,
326 struct hpc3_ethregs *hregs,
327 struct sgiseeq_regs *sregs)
329 if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
330 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
331 seeq_go(sp, hregs, sregs);
335 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
336 struct hpc3_ethregs *hregs,
337 struct sgiseeq_regs *sregs)
339 struct sgiseeq_rx_desc *rd;
340 struct sk_buff *skb = NULL;
341 struct sk_buff *newskb;
342 unsigned char pkt_status;
343 int len = 0;
344 unsigned int orig_end = PREV_RX(sp->rx_new);
346 /* Service every received packet. */
347 rd = &sp->rx_desc[sp->rx_new];
348 dma_sync_desc_cpu(dev, rd);
349 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
350 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
351 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
352 PKT_BUF_SZ, DMA_FROM_DEVICE);
353 pkt_status = rd->skb->data[len];
354 if (pkt_status & SEEQ_RSTAT_FIG) {
355 /* Packet is OK. */
356 /* We don't want to receive our own packets */
357 if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
358 if (len > rx_copybreak) {
359 skb = rd->skb;
360 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
361 if (!newskb) {
362 newskb = skb;
363 skb = NULL;
364 goto memory_squeeze;
366 skb_reserve(newskb, 2);
367 } else {
368 skb = netdev_alloc_skb_ip_align(dev, len);
369 if (skb)
370 skb_copy_to_linear_data(skb, rd->skb->data, len);
372 newskb = rd->skb;
374 memory_squeeze:
375 if (skb) {
376 skb_put(skb, len);
377 skb->protocol = eth_type_trans(skb, dev);
378 netif_rx(skb);
379 dev->stats.rx_packets++;
380 dev->stats.rx_bytes += len;
381 } else {
382 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
383 dev->name);
384 dev->stats.rx_dropped++;
386 } else {
387 /* Silently drop my own packets */
388 newskb = rd->skb;
390 } else {
391 record_rx_errors(dev, pkt_status);
392 newskb = rd->skb;
394 rd->skb = newskb;
395 rd->rdma.pbuf = dma_map_single(dev->dev.parent,
396 newskb->data - 2,
397 PKT_BUF_SZ, DMA_FROM_DEVICE);
399 /* Return the entry to the ring pool. */
400 rd->rdma.cntinfo = RCNTINFO_INIT;
401 sp->rx_new = NEXT_RX(sp->rx_new);
402 dma_sync_desc_dev(dev, rd);
403 rd = &sp->rx_desc[sp->rx_new];
404 dma_sync_desc_cpu(dev, rd);
406 dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
407 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
408 dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
409 dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
410 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
411 dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
412 rx_maybe_restart(sp, hregs, sregs);
415 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
416 struct sgiseeq_regs *sregs)
418 if (sp->is_edlc) {
419 sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
420 sregs->rw.wregs.control = sp->control;
424 static inline void kick_tx(struct net_device *dev,
425 struct sgiseeq_private *sp,
426 struct hpc3_ethregs *hregs)
428 struct sgiseeq_tx_desc *td;
429 int i = sp->tx_old;
431 /* If the HPC aint doin nothin, and there are more packets
432 * with ETXD cleared and XIU set we must make very certain
433 * that we restart the HPC else we risk locking up the
434 * adapter. The following code is only safe iff the HPCDMA
435 * is not active!
437 td = &sp->tx_desc[i];
438 dma_sync_desc_cpu(dev, td);
439 while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
440 (HPCDMA_XIU | HPCDMA_ETXD)) {
441 i = NEXT_TX(i);
442 td = &sp->tx_desc[i];
443 dma_sync_desc_cpu(dev, td);
445 if (td->tdma.cntinfo & HPCDMA_XIU) {
446 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
447 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
451 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
452 struct hpc3_ethregs *hregs,
453 struct sgiseeq_regs *sregs)
455 struct sgiseeq_tx_desc *td;
456 unsigned long status = hregs->tx_ctrl;
457 int j;
459 tx_maybe_reset_collisions(sp, sregs);
461 if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
462 /* Oops, HPC detected some sort of error. */
463 if (status & SEEQ_TSTAT_R16)
464 dev->stats.tx_aborted_errors++;
465 if (status & SEEQ_TSTAT_UFLOW)
466 dev->stats.tx_fifo_errors++;
467 if (status & SEEQ_TSTAT_LCLS)
468 dev->stats.collisions++;
471 /* Ack 'em... */
472 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
473 td = &sp->tx_desc[j];
475 dma_sync_desc_cpu(dev, td);
476 if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
477 break;
478 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
479 if (!(status & HPC3_ETXCTRL_ACTIVE)) {
480 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
481 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
483 break;
485 dev->stats.tx_packets++;
486 sp->tx_old = NEXT_TX(sp->tx_old);
487 td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
488 td->tdma.cntinfo |= HPCDMA_EOX;
489 if (td->skb) {
490 dev_kfree_skb_any(td->skb);
491 td->skb = NULL;
493 dma_sync_desc_dev(dev, td);
497 static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
499 struct net_device *dev = (struct net_device *) dev_id;
500 struct sgiseeq_private *sp = netdev_priv(dev);
501 struct hpc3_ethregs *hregs = sp->hregs;
502 struct sgiseeq_regs *sregs = sp->sregs;
504 spin_lock(&sp->tx_lock);
506 /* Ack the IRQ and set software state. */
507 hregs->reset = HPC3_ERST_CLRIRQ;
509 /* Always check for received packets. */
510 sgiseeq_rx(dev, sp, hregs, sregs);
512 /* Only check for tx acks if we have something queued. */
513 if (sp->tx_old != sp->tx_new)
514 sgiseeq_tx(dev, sp, hregs, sregs);
516 if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
517 netif_wake_queue(dev);
519 spin_unlock(&sp->tx_lock);
521 return IRQ_HANDLED;
524 static int sgiseeq_open(struct net_device *dev)
526 struct sgiseeq_private *sp = netdev_priv(dev);
527 struct sgiseeq_regs *sregs = sp->sregs;
528 unsigned int irq = dev->irq;
529 int err;
531 if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
532 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
533 err = -EAGAIN;
536 err = init_seeq(dev, sp, sregs);
537 if (err)
538 goto out_free_irq;
540 netif_start_queue(dev);
542 return 0;
544 out_free_irq:
545 free_irq(irq, dev);
547 return err;
550 static int sgiseeq_close(struct net_device *dev)
552 struct sgiseeq_private *sp = netdev_priv(dev);
553 struct sgiseeq_regs *sregs = sp->sregs;
554 unsigned int irq = dev->irq;
556 netif_stop_queue(dev);
558 /* Shutdown the Seeq. */
559 reset_hpc3_and_seeq(sp->hregs, sregs);
560 free_irq(irq, dev);
561 seeq_purge_ring(dev);
563 return 0;
566 static inline int sgiseeq_reset(struct net_device *dev)
568 struct sgiseeq_private *sp = netdev_priv(dev);
569 struct sgiseeq_regs *sregs = sp->sregs;
570 int err;
572 err = init_seeq(dev, sp, sregs);
573 if (err)
574 return err;
576 dev->trans_start = jiffies;
577 netif_wake_queue(dev);
579 return 0;
582 static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
584 struct sgiseeq_private *sp = netdev_priv(dev);
585 struct hpc3_ethregs *hregs = sp->hregs;
586 unsigned long flags;
587 struct sgiseeq_tx_desc *td;
588 int len, entry;
590 spin_lock_irqsave(&sp->tx_lock, flags);
592 /* Setup... */
593 len = skb->len;
594 if (len < ETH_ZLEN) {
595 if (skb_padto(skb, ETH_ZLEN))
596 return NETDEV_TX_OK;
597 len = ETH_ZLEN;
600 dev->stats.tx_bytes += len;
601 entry = sp->tx_new;
602 td = &sp->tx_desc[entry];
603 dma_sync_desc_cpu(dev, td);
605 /* Create entry. There are so many races with adding a new
606 * descriptor to the chain:
607 * 1) Assume that the HPC is off processing a DMA chain while
608 * we are changing all of the following.
609 * 2) Do no allow the HPC to look at a new descriptor until
610 * we have completely set up it's state. This means, do
611 * not clear HPCDMA_EOX in the current last descritptor
612 * until the one we are adding looks consistent and could
613 * be processes right now.
614 * 3) The tx interrupt code must notice when we've added a new
615 * entry and the HPC got to the end of the chain before we
616 * added this new entry and restarted it.
618 td->skb = skb;
619 td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
620 len, DMA_TO_DEVICE);
621 td->tdma.cntinfo = (len & HPCDMA_BCNT) |
622 HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
623 dma_sync_desc_dev(dev, td);
624 if (sp->tx_old != sp->tx_new) {
625 struct sgiseeq_tx_desc *backend;
627 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
628 dma_sync_desc_cpu(dev, backend);
629 backend->tdma.cntinfo &= ~HPCDMA_EOX;
630 dma_sync_desc_dev(dev, backend);
632 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
634 /* Maybe kick the HPC back into motion. */
635 if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
636 kick_tx(dev, sp, hregs);
638 dev->trans_start = jiffies;
640 if (!TX_BUFFS_AVAIL(sp))
641 netif_stop_queue(dev);
642 spin_unlock_irqrestore(&sp->tx_lock, flags);
644 return NETDEV_TX_OK;
647 static void timeout(struct net_device *dev)
649 printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
650 sgiseeq_reset(dev);
652 dev->trans_start = jiffies;
653 netif_wake_queue(dev);
656 static void sgiseeq_set_multicast(struct net_device *dev)
658 struct sgiseeq_private *sp = netdev_priv(dev);
659 unsigned char oldmode = sp->mode;
661 if(dev->flags & IFF_PROMISC)
662 sp->mode = SEEQ_RCMD_RANY;
663 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
664 sp->mode = SEEQ_RCMD_RBMCAST;
665 else
666 sp->mode = SEEQ_RCMD_RBCAST;
668 /* XXX I know this sucks, but is there a better way to reprogram
669 * XXX the receiver? At least, this shouldn't happen too often.
672 if (oldmode != sp->mode)
673 sgiseeq_reset(dev);
676 static inline void setup_tx_ring(struct net_device *dev,
677 struct sgiseeq_tx_desc *buf,
678 int nbufs)
680 struct sgiseeq_private *sp = netdev_priv(dev);
681 int i = 0;
683 while (i < (nbufs - 1)) {
684 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
685 buf[i].tdma.pbuf = 0;
686 dma_sync_desc_dev(dev, &buf[i]);
687 i++;
689 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
690 dma_sync_desc_dev(dev, &buf[i]);
693 static inline void setup_rx_ring(struct net_device *dev,
694 struct sgiseeq_rx_desc *buf,
695 int nbufs)
697 struct sgiseeq_private *sp = netdev_priv(dev);
698 int i = 0;
700 while (i < (nbufs - 1)) {
701 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
702 buf[i].rdma.pbuf = 0;
703 dma_sync_desc_dev(dev, &buf[i]);
704 i++;
706 buf[i].rdma.pbuf = 0;
707 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
708 dma_sync_desc_dev(dev, &buf[i]);
711 static const struct net_device_ops sgiseeq_netdev_ops = {
712 .ndo_open = sgiseeq_open,
713 .ndo_stop = sgiseeq_close,
714 .ndo_start_xmit = sgiseeq_start_xmit,
715 .ndo_tx_timeout = timeout,
716 .ndo_set_multicast_list = sgiseeq_set_multicast,
717 .ndo_set_mac_address = sgiseeq_set_mac_address,
718 .ndo_change_mtu = eth_change_mtu,
719 .ndo_validate_addr = eth_validate_addr,
722 static int __devinit sgiseeq_probe(struct platform_device *pdev)
724 struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
725 struct hpc3_regs *hpcregs = pd->hpc;
726 struct sgiseeq_init_block *sr;
727 unsigned int irq = pd->irq;
728 struct sgiseeq_private *sp;
729 struct net_device *dev;
730 int err;
732 dev = alloc_etherdev(sizeof (struct sgiseeq_private));
733 if (!dev) {
734 printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n");
735 err = -ENOMEM;
736 goto err_out;
739 platform_set_drvdata(pdev, dev);
740 sp = netdev_priv(dev);
742 /* Make private data page aligned */
743 sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
744 &sp->srings_dma, GFP_KERNEL);
745 if (!sr) {
746 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
747 err = -ENOMEM;
748 goto err_out_free_dev;
750 sp->srings = sr;
751 sp->rx_desc = sp->srings->rxvector;
752 sp->tx_desc = sp->srings->txvector;
754 /* A couple calculations now, saves many cycles later. */
755 setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
756 setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
758 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
760 #ifdef DEBUG
761 gpriv = sp;
762 gdev = dev;
763 #endif
764 sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
765 sp->hregs = &hpcregs->ethregs;
766 sp->name = sgiseeqstr;
767 sp->mode = SEEQ_RCMD_RBCAST;
769 /* Setup PIO and DMA transfer timing */
770 sp->hregs->pconfig = 0x161;
771 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
772 HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
774 /* Setup PIO and DMA transfer timing */
775 sp->hregs->pconfig = 0x161;
776 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
777 HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
779 /* Reset the chip. */
780 hpc3_eth_reset(sp->hregs);
782 sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
783 if (sp->is_edlc)
784 sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
785 SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
786 SEEQ_CTRL_ENCARR;
788 dev->netdev_ops = &sgiseeq_netdev_ops;
789 dev->watchdog_timeo = (200 * HZ) / 1000;
790 dev->irq = irq;
792 if (register_netdev(dev)) {
793 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
794 "aborting.\n");
795 err = -ENODEV;
796 goto err_out_free_page;
799 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
801 return 0;
803 err_out_free_page:
804 free_page((unsigned long) sp->srings);
805 err_out_free_dev:
806 kfree(dev);
808 err_out:
809 return err;
812 static int __exit sgiseeq_remove(struct platform_device *pdev)
814 struct net_device *dev = platform_get_drvdata(pdev);
815 struct sgiseeq_private *sp = netdev_priv(dev);
817 unregister_netdev(dev);
818 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
819 sp->srings_dma);
820 free_netdev(dev);
821 platform_set_drvdata(pdev, NULL);
823 return 0;
826 static struct platform_driver sgiseeq_driver = {
827 .probe = sgiseeq_probe,
828 .remove = __exit_p(sgiseeq_remove),
829 .driver = {
830 .name = "sgiseeq",
831 .owner = THIS_MODULE,
835 static int __init sgiseeq_module_init(void)
837 if (platform_driver_register(&sgiseeq_driver)) {
838 printk(KERN_ERR "Driver registration failed\n");
839 return -ENODEV;
842 return 0;
845 static void __exit sgiseeq_module_exit(void)
847 platform_driver_unregister(&sgiseeq_driver);
850 module_init(sgiseeq_module_init);
851 module_exit(sgiseeq_module_exit);
853 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
854 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
855 MODULE_LICENSE("GPL");
856 MODULE_ALIAS("platform:sgiseeq");