2 * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/interrupt.h>
16 #include <linux/string.h>
17 #include <linux/delay.h>
18 #include <linux/netdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/etherdevice.h>
21 #include <linux/skbuff.h>
23 #include <asm/sgi/hpc3.h>
24 #include <asm/sgi/ip22.h>
25 #include <asm/sgi/seeq.h>
29 static char *sgiseeqstr
= "SGI Seeq8003";
32 * If you want speed, you do something silly, it always has worked for me. So,
33 * with that in mind, I've decided to make this driver look completely like a
34 * stupid Lance from a driver architecture perspective. Only difference is that
35 * here our "ring buffer" looks and acts like a real Lance one does but is
36 * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised
37 * how a stupid idea like this can pay off in performance, not to mention
38 * making this driver 2,000 times easier to write. ;-)
41 /* Tune these if we tend to run out often etc. */
42 #define SEEQ_RX_BUFFERS 16
43 #define SEEQ_TX_BUFFERS 16
45 #define PKT_BUF_SZ 1584
47 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
48 #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
49 #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
50 #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
52 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
53 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
54 sp->tx_old - sp->tx_new - 1)
56 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
57 (dma_addr_t)((unsigned long)(v) - \
58 (unsigned long)((sp)->rx_desc)))
60 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
61 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
63 static int rx_copybreak
= 100;
65 #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
67 struct sgiseeq_rx_desc
{
68 volatile struct hpc_dma_desc rdma
;
73 struct sgiseeq_tx_desc
{
74 volatile struct hpc_dma_desc tdma
;
80 * Warning: This structure is layed out in a certain way because HPC dma
81 * descriptors must be 8-byte aligned. So don't touch this without
84 struct sgiseeq_init_block
{ /* Note the name ;-) */
85 struct sgiseeq_rx_desc rxvector
[SEEQ_RX_BUFFERS
];
86 struct sgiseeq_tx_desc txvector
[SEEQ_TX_BUFFERS
];
89 struct sgiseeq_private
{
90 struct sgiseeq_init_block
*srings
;
91 dma_addr_t srings_dma
;
93 /* Ptrs to the descriptors in uncached space. */
94 struct sgiseeq_rx_desc
*rx_desc
;
95 struct sgiseeq_tx_desc
*tx_desc
;
98 struct hpc3_ethregs
*hregs
;
99 struct sgiseeq_regs
*sregs
;
101 /* Ring entry counters. */
102 unsigned int rx_new
, tx_new
;
103 unsigned int rx_old
, tx_old
;
106 unsigned char control
;
112 static inline void dma_sync_desc_cpu(struct net_device
*dev
, void *addr
)
114 dma_cache_sync(dev
->dev
.parent
, addr
, sizeof(struct sgiseeq_rx_desc
),
118 static inline void dma_sync_desc_dev(struct net_device
*dev
, void *addr
)
120 dma_cache_sync(dev
->dev
.parent
, addr
, sizeof(struct sgiseeq_rx_desc
),
124 static inline void hpc3_eth_reset(struct hpc3_ethregs
*hregs
)
126 hregs
->reset
= HPC3_ERST_CRESET
| HPC3_ERST_CLRIRQ
;
131 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs
*hregs
,
132 struct sgiseeq_regs
*sregs
)
134 hregs
->rx_ctrl
= hregs
->tx_ctrl
= 0;
135 hpc3_eth_reset(hregs
);
138 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
139 SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
141 static inline void seeq_go(struct sgiseeq_private
*sp
,
142 struct hpc3_ethregs
*hregs
,
143 struct sgiseeq_regs
*sregs
)
145 sregs
->rstat
= sp
->mode
| RSTAT_GO_BITS
;
146 hregs
->rx_ctrl
= HPC3_ERXCTRL_ACTIVE
;
149 static inline void __sgiseeq_set_mac_address(struct net_device
*dev
)
151 struct sgiseeq_private
*sp
= netdev_priv(dev
);
152 struct sgiseeq_regs
*sregs
= sp
->sregs
;
155 sregs
->tstat
= SEEQ_TCMD_RB0
;
156 for (i
= 0; i
< 6; i
++)
157 sregs
->rw
.eth_addr
[i
] = dev
->dev_addr
[i
];
160 static int sgiseeq_set_mac_address(struct net_device
*dev
, void *addr
)
162 struct sgiseeq_private
*sp
= netdev_priv(dev
);
163 struct sockaddr
*sa
= addr
;
165 memcpy(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
);
167 spin_lock_irq(&sp
->tx_lock
);
168 __sgiseeq_set_mac_address(dev
);
169 spin_unlock_irq(&sp
->tx_lock
);
174 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
175 #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
176 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
178 static int seeq_init_ring(struct net_device
*dev
)
180 struct sgiseeq_private
*sp
= netdev_priv(dev
);
183 netif_stop_queue(dev
);
184 sp
->rx_new
= sp
->tx_new
= 0;
185 sp
->rx_old
= sp
->tx_old
= 0;
187 __sgiseeq_set_mac_address(dev
);
190 for(i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
191 sp
->tx_desc
[i
].tdma
.cntinfo
= TCNTINFO_INIT
;
192 dma_sync_desc_dev(dev
, &sp
->tx_desc
[i
]);
195 /* And now the rx ring. */
196 for (i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
197 if (!sp
->rx_desc
[i
].skb
) {
199 struct sk_buff
*skb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
);
204 dma_addr
= dma_map_single(dev
->dev
.parent
,
206 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
207 sp
->rx_desc
[i
].skb
= skb
;
208 sp
->rx_desc
[i
].rdma
.pbuf
= dma_addr
;
210 sp
->rx_desc
[i
].rdma
.cntinfo
= RCNTINFO_INIT
;
211 dma_sync_desc_dev(dev
, &sp
->rx_desc
[i
]);
213 sp
->rx_desc
[i
- 1].rdma
.cntinfo
|= HPCDMA_EOR
;
214 dma_sync_desc_dev(dev
, &sp
->rx_desc
[i
- 1]);
218 static void seeq_purge_ring(struct net_device
*dev
)
220 struct sgiseeq_private
*sp
= netdev_priv(dev
);
224 for (i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
225 if (sp
->tx_desc
[i
].skb
) {
226 dev_kfree_skb(sp
->tx_desc
[i
].skb
);
227 sp
->tx_desc
[i
].skb
= NULL
;
231 /* And now the rx ring. */
232 for (i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
233 if (sp
->rx_desc
[i
].skb
) {
234 dev_kfree_skb(sp
->rx_desc
[i
].skb
);
235 sp
->rx_desc
[i
].skb
= NULL
;
241 static struct sgiseeq_private
*gpriv
;
242 static struct net_device
*gdev
;
244 static void sgiseeq_dump_rings(void)
247 struct sgiseeq_rx_desc
*r
= gpriv
->rx_desc
;
248 struct sgiseeq_tx_desc
*t
= gpriv
->tx_desc
;
249 struct hpc3_ethregs
*hregs
= gpriv
->hregs
;
255 printk("RING DUMP:\n");
256 for (i
= 0; i
< SEEQ_RX_BUFFERS
; i
++) {
257 printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
258 i
, (&r
[i
]), r
[i
].rdma
.pbuf
, r
[i
].rdma
.cntinfo
,
261 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
262 i
, (&r
[i
]), r
[i
].rdma
.pbuf
, r
[i
].rdma
.cntinfo
,
265 for (i
= 0; i
< SEEQ_TX_BUFFERS
; i
++) {
266 printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
267 i
, (&t
[i
]), t
[i
].tdma
.pbuf
, t
[i
].tdma
.cntinfo
,
270 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
271 i
, (&t
[i
]), t
[i
].tdma
.pbuf
, t
[i
].tdma
.cntinfo
,
274 printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
275 gpriv
->rx_new
, gpriv
->rx_old
, gpriv
->tx_new
, gpriv
->tx_old
);
276 printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
277 hregs
->rx_cbptr
, hregs
->rx_ndptr
, hregs
->rx_ctrl
);
278 printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
279 hregs
->tx_cbptr
, hregs
->tx_ndptr
, hregs
->tx_ctrl
);
283 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
284 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
286 static int init_seeq(struct net_device
*dev
, struct sgiseeq_private
*sp
,
287 struct sgiseeq_regs
*sregs
)
289 struct hpc3_ethregs
*hregs
= sp
->hregs
;
292 reset_hpc3_and_seeq(hregs
, sregs
);
293 err
= seeq_init_ring(dev
);
297 /* Setup to field the proper interrupt types. */
299 sregs
->tstat
= TSTAT_INIT_EDLC
;
300 sregs
->rw
.wregs
.control
= sp
->control
;
301 sregs
->rw
.wregs
.frame_gap
= 0;
303 sregs
->tstat
= TSTAT_INIT_SEEQ
;
306 hregs
->rx_ndptr
= VIRT_TO_DMA(sp
, sp
->rx_desc
);
307 hregs
->tx_ndptr
= VIRT_TO_DMA(sp
, sp
->tx_desc
);
309 seeq_go(sp
, hregs
, sregs
);
313 static void record_rx_errors(struct net_device
*dev
, unsigned char status
)
315 if (status
& SEEQ_RSTAT_OVERF
||
316 status
& SEEQ_RSTAT_SFRAME
)
317 dev
->stats
.rx_over_errors
++;
318 if (status
& SEEQ_RSTAT_CERROR
)
319 dev
->stats
.rx_crc_errors
++;
320 if (status
& SEEQ_RSTAT_DERROR
)
321 dev
->stats
.rx_frame_errors
++;
322 if (status
& SEEQ_RSTAT_REOF
)
323 dev
->stats
.rx_errors
++;
326 static inline void rx_maybe_restart(struct sgiseeq_private
*sp
,
327 struct hpc3_ethregs
*hregs
,
328 struct sgiseeq_regs
*sregs
)
330 if (!(hregs
->rx_ctrl
& HPC3_ERXCTRL_ACTIVE
)) {
331 hregs
->rx_ndptr
= VIRT_TO_DMA(sp
, sp
->rx_desc
+ sp
->rx_new
);
332 seeq_go(sp
, hregs
, sregs
);
336 static inline void sgiseeq_rx(struct net_device
*dev
, struct sgiseeq_private
*sp
,
337 struct hpc3_ethregs
*hregs
,
338 struct sgiseeq_regs
*sregs
)
340 struct sgiseeq_rx_desc
*rd
;
341 struct sk_buff
*skb
= NULL
;
342 struct sk_buff
*newskb
;
343 unsigned char pkt_status
;
345 unsigned int orig_end
= PREV_RX(sp
->rx_new
);
347 /* Service every received packet. */
348 rd
= &sp
->rx_desc
[sp
->rx_new
];
349 dma_sync_desc_cpu(dev
, rd
);
350 while (!(rd
->rdma
.cntinfo
& HPCDMA_OWN
)) {
351 len
= PKT_BUF_SZ
- (rd
->rdma
.cntinfo
& HPCDMA_BCNT
) - 3;
352 dma_unmap_single(dev
->dev
.parent
, rd
->rdma
.pbuf
,
353 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
354 pkt_status
= rd
->skb
->data
[len
];
355 if (pkt_status
& SEEQ_RSTAT_FIG
) {
357 /* We don't want to receive our own packets */
358 if (memcmp(rd
->skb
->data
+ 6, dev
->dev_addr
, ETH_ALEN
)) {
359 if (len
> rx_copybreak
) {
361 newskb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
);
367 skb_reserve(newskb
, 2);
369 skb
= netdev_alloc_skb_ip_align(dev
, len
);
371 skb_copy_to_linear_data(skb
, rd
->skb
->data
, len
);
378 skb
->protocol
= eth_type_trans(skb
, dev
);
380 dev
->stats
.rx_packets
++;
381 dev
->stats
.rx_bytes
+= len
;
383 printk(KERN_NOTICE
"%s: Memory squeeze, deferring packet.\n",
385 dev
->stats
.rx_dropped
++;
388 /* Silently drop my own packets */
392 record_rx_errors(dev
, pkt_status
);
396 rd
->rdma
.pbuf
= dma_map_single(dev
->dev
.parent
,
398 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
400 /* Return the entry to the ring pool. */
401 rd
->rdma
.cntinfo
= RCNTINFO_INIT
;
402 sp
->rx_new
= NEXT_RX(sp
->rx_new
);
403 dma_sync_desc_dev(dev
, rd
);
404 rd
= &sp
->rx_desc
[sp
->rx_new
];
405 dma_sync_desc_cpu(dev
, rd
);
407 dma_sync_desc_cpu(dev
, &sp
->rx_desc
[orig_end
]);
408 sp
->rx_desc
[orig_end
].rdma
.cntinfo
&= ~(HPCDMA_EOR
);
409 dma_sync_desc_dev(dev
, &sp
->rx_desc
[orig_end
]);
410 dma_sync_desc_cpu(dev
, &sp
->rx_desc
[PREV_RX(sp
->rx_new
)]);
411 sp
->rx_desc
[PREV_RX(sp
->rx_new
)].rdma
.cntinfo
|= HPCDMA_EOR
;
412 dma_sync_desc_dev(dev
, &sp
->rx_desc
[PREV_RX(sp
->rx_new
)]);
413 rx_maybe_restart(sp
, hregs
, sregs
);
416 static inline void tx_maybe_reset_collisions(struct sgiseeq_private
*sp
,
417 struct sgiseeq_regs
*sregs
)
420 sregs
->rw
.wregs
.control
= sp
->control
& ~(SEEQ_CTRL_XCNT
);
421 sregs
->rw
.wregs
.control
= sp
->control
;
425 static inline void kick_tx(struct net_device
*dev
,
426 struct sgiseeq_private
*sp
,
427 struct hpc3_ethregs
*hregs
)
429 struct sgiseeq_tx_desc
*td
;
432 /* If the HPC aint doin nothin, and there are more packets
433 * with ETXD cleared and XIU set we must make very certain
434 * that we restart the HPC else we risk locking up the
435 * adapter. The following code is only safe iff the HPCDMA
438 td
= &sp
->tx_desc
[i
];
439 dma_sync_desc_cpu(dev
, td
);
440 while ((td
->tdma
.cntinfo
& (HPCDMA_XIU
| HPCDMA_ETXD
)) ==
441 (HPCDMA_XIU
| HPCDMA_ETXD
)) {
443 td
= &sp
->tx_desc
[i
];
444 dma_sync_desc_cpu(dev
, td
);
446 if (td
->tdma
.cntinfo
& HPCDMA_XIU
) {
447 hregs
->tx_ndptr
= VIRT_TO_DMA(sp
, td
);
448 hregs
->tx_ctrl
= HPC3_ETXCTRL_ACTIVE
;
452 static inline void sgiseeq_tx(struct net_device
*dev
, struct sgiseeq_private
*sp
,
453 struct hpc3_ethregs
*hregs
,
454 struct sgiseeq_regs
*sregs
)
456 struct sgiseeq_tx_desc
*td
;
457 unsigned long status
= hregs
->tx_ctrl
;
460 tx_maybe_reset_collisions(sp
, sregs
);
462 if (!(status
& (HPC3_ETXCTRL_ACTIVE
| SEEQ_TSTAT_PTRANS
))) {
463 /* Oops, HPC detected some sort of error. */
464 if (status
& SEEQ_TSTAT_R16
)
465 dev
->stats
.tx_aborted_errors
++;
466 if (status
& SEEQ_TSTAT_UFLOW
)
467 dev
->stats
.tx_fifo_errors
++;
468 if (status
& SEEQ_TSTAT_LCLS
)
469 dev
->stats
.collisions
++;
473 for (j
= sp
->tx_old
; j
!= sp
->tx_new
; j
= NEXT_TX(j
)) {
474 td
= &sp
->tx_desc
[j
];
476 dma_sync_desc_cpu(dev
, td
);
477 if (!(td
->tdma
.cntinfo
& (HPCDMA_XIU
)))
479 if (!(td
->tdma
.cntinfo
& (HPCDMA_ETXD
))) {
480 if (!(status
& HPC3_ETXCTRL_ACTIVE
)) {
481 hregs
->tx_ndptr
= VIRT_TO_DMA(sp
, td
);
482 hregs
->tx_ctrl
= HPC3_ETXCTRL_ACTIVE
;
486 dev
->stats
.tx_packets
++;
487 sp
->tx_old
= NEXT_TX(sp
->tx_old
);
488 td
->tdma
.cntinfo
&= ~(HPCDMA_XIU
| HPCDMA_XIE
);
489 td
->tdma
.cntinfo
|= HPCDMA_EOX
;
491 dev_kfree_skb_any(td
->skb
);
494 dma_sync_desc_dev(dev
, td
);
498 static irqreturn_t
sgiseeq_interrupt(int irq
, void *dev_id
)
500 struct net_device
*dev
= (struct net_device
*) dev_id
;
501 struct sgiseeq_private
*sp
= netdev_priv(dev
);
502 struct hpc3_ethregs
*hregs
= sp
->hregs
;
503 struct sgiseeq_regs
*sregs
= sp
->sregs
;
505 spin_lock(&sp
->tx_lock
);
507 /* Ack the IRQ and set software state. */
508 hregs
->reset
= HPC3_ERST_CLRIRQ
;
510 /* Always check for received packets. */
511 sgiseeq_rx(dev
, sp
, hregs
, sregs
);
513 /* Only check for tx acks if we have something queued. */
514 if (sp
->tx_old
!= sp
->tx_new
)
515 sgiseeq_tx(dev
, sp
, hregs
, sregs
);
517 if ((TX_BUFFS_AVAIL(sp
) > 0) && netif_queue_stopped(dev
)) {
518 netif_wake_queue(dev
);
520 spin_unlock(&sp
->tx_lock
);
525 static int sgiseeq_open(struct net_device
*dev
)
527 struct sgiseeq_private
*sp
= netdev_priv(dev
);
528 struct sgiseeq_regs
*sregs
= sp
->sregs
;
529 unsigned int irq
= dev
->irq
;
532 if (request_irq(irq
, sgiseeq_interrupt
, 0, sgiseeqstr
, dev
)) {
533 printk(KERN_ERR
"Seeq8003: Can't get irq %d\n", dev
->irq
);
537 err
= init_seeq(dev
, sp
, sregs
);
541 netif_start_queue(dev
);
551 static int sgiseeq_close(struct net_device
*dev
)
553 struct sgiseeq_private
*sp
= netdev_priv(dev
);
554 struct sgiseeq_regs
*sregs
= sp
->sregs
;
555 unsigned int irq
= dev
->irq
;
557 netif_stop_queue(dev
);
559 /* Shutdown the Seeq. */
560 reset_hpc3_and_seeq(sp
->hregs
, sregs
);
562 seeq_purge_ring(dev
);
567 static inline int sgiseeq_reset(struct net_device
*dev
)
569 struct sgiseeq_private
*sp
= netdev_priv(dev
);
570 struct sgiseeq_regs
*sregs
= sp
->sregs
;
573 err
= init_seeq(dev
, sp
, sregs
);
577 dev
->trans_start
= jiffies
;
578 netif_wake_queue(dev
);
583 static int sgiseeq_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
585 struct sgiseeq_private
*sp
= netdev_priv(dev
);
586 struct hpc3_ethregs
*hregs
= sp
->hregs
;
588 struct sgiseeq_tx_desc
*td
;
591 spin_lock_irqsave(&sp
->tx_lock
, flags
);
595 if (len
< ETH_ZLEN
) {
596 if (skb_padto(skb
, ETH_ZLEN
)) {
597 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
603 dev
->stats
.tx_bytes
+= len
;
605 td
= &sp
->tx_desc
[entry
];
606 dma_sync_desc_cpu(dev
, td
);
608 /* Create entry. There are so many races with adding a new
609 * descriptor to the chain:
610 * 1) Assume that the HPC is off processing a DMA chain while
611 * we are changing all of the following.
612 * 2) Do no allow the HPC to look at a new descriptor until
613 * we have completely set up it's state. This means, do
614 * not clear HPCDMA_EOX in the current last descritptor
615 * until the one we are adding looks consistent and could
616 * be processes right now.
617 * 3) The tx interrupt code must notice when we've added a new
618 * entry and the HPC got to the end of the chain before we
619 * added this new entry and restarted it.
622 td
->tdma
.pbuf
= dma_map_single(dev
->dev
.parent
, skb
->data
,
624 td
->tdma
.cntinfo
= (len
& HPCDMA_BCNT
) |
625 HPCDMA_XIU
| HPCDMA_EOXP
| HPCDMA_XIE
| HPCDMA_EOX
;
626 dma_sync_desc_dev(dev
, td
);
627 if (sp
->tx_old
!= sp
->tx_new
) {
628 struct sgiseeq_tx_desc
*backend
;
630 backend
= &sp
->tx_desc
[PREV_TX(sp
->tx_new
)];
631 dma_sync_desc_cpu(dev
, backend
);
632 backend
->tdma
.cntinfo
&= ~HPCDMA_EOX
;
633 dma_sync_desc_dev(dev
, backend
);
635 sp
->tx_new
= NEXT_TX(sp
->tx_new
); /* Advance. */
637 /* Maybe kick the HPC back into motion. */
638 if (!(hregs
->tx_ctrl
& HPC3_ETXCTRL_ACTIVE
))
639 kick_tx(dev
, sp
, hregs
);
641 dev
->trans_start
= jiffies
;
643 if (!TX_BUFFS_AVAIL(sp
))
644 netif_stop_queue(dev
);
645 spin_unlock_irqrestore(&sp
->tx_lock
, flags
);
650 static void timeout(struct net_device
*dev
)
652 printk(KERN_NOTICE
"%s: transmit timed out, resetting\n", dev
->name
);
655 dev
->trans_start
= jiffies
;
656 netif_wake_queue(dev
);
659 static void sgiseeq_set_multicast(struct net_device
*dev
)
661 struct sgiseeq_private
*sp
= netdev_priv(dev
);
662 unsigned char oldmode
= sp
->mode
;
664 if(dev
->flags
& IFF_PROMISC
)
665 sp
->mode
= SEEQ_RCMD_RANY
;
666 else if ((dev
->flags
& IFF_ALLMULTI
) || !netdev_mc_empty(dev
))
667 sp
->mode
= SEEQ_RCMD_RBMCAST
;
669 sp
->mode
= SEEQ_RCMD_RBCAST
;
671 /* XXX I know this sucks, but is there a better way to reprogram
672 * XXX the receiver? At least, this shouldn't happen too often.
675 if (oldmode
!= sp
->mode
)
679 static inline void setup_tx_ring(struct net_device
*dev
,
680 struct sgiseeq_tx_desc
*buf
,
683 struct sgiseeq_private
*sp
= netdev_priv(dev
);
686 while (i
< (nbufs
- 1)) {
687 buf
[i
].tdma
.pnext
= VIRT_TO_DMA(sp
, buf
+ i
+ 1);
688 buf
[i
].tdma
.pbuf
= 0;
689 dma_sync_desc_dev(dev
, &buf
[i
]);
692 buf
[i
].tdma
.pnext
= VIRT_TO_DMA(sp
, buf
);
693 dma_sync_desc_dev(dev
, &buf
[i
]);
696 static inline void setup_rx_ring(struct net_device
*dev
,
697 struct sgiseeq_rx_desc
*buf
,
700 struct sgiseeq_private
*sp
= netdev_priv(dev
);
703 while (i
< (nbufs
- 1)) {
704 buf
[i
].rdma
.pnext
= VIRT_TO_DMA(sp
, buf
+ i
+ 1);
705 buf
[i
].rdma
.pbuf
= 0;
706 dma_sync_desc_dev(dev
, &buf
[i
]);
709 buf
[i
].rdma
.pbuf
= 0;
710 buf
[i
].rdma
.pnext
= VIRT_TO_DMA(sp
, buf
);
711 dma_sync_desc_dev(dev
, &buf
[i
]);
714 static const struct net_device_ops sgiseeq_netdev_ops
= {
715 .ndo_open
= sgiseeq_open
,
716 .ndo_stop
= sgiseeq_close
,
717 .ndo_start_xmit
= sgiseeq_start_xmit
,
718 .ndo_tx_timeout
= timeout
,
719 .ndo_set_multicast_list
= sgiseeq_set_multicast
,
720 .ndo_set_mac_address
= sgiseeq_set_mac_address
,
721 .ndo_change_mtu
= eth_change_mtu
,
722 .ndo_validate_addr
= eth_validate_addr
,
725 static int __devinit
sgiseeq_probe(struct platform_device
*pdev
)
727 struct sgiseeq_platform_data
*pd
= pdev
->dev
.platform_data
;
728 struct hpc3_regs
*hpcregs
= pd
->hpc
;
729 struct sgiseeq_init_block
*sr
;
730 unsigned int irq
= pd
->irq
;
731 struct sgiseeq_private
*sp
;
732 struct net_device
*dev
;
735 dev
= alloc_etherdev(sizeof (struct sgiseeq_private
));
737 printk(KERN_ERR
"Sgiseeq: Etherdev alloc failed, aborting.\n");
742 platform_set_drvdata(pdev
, dev
);
743 sp
= netdev_priv(dev
);
745 /* Make private data page aligned */
746 sr
= dma_alloc_noncoherent(&pdev
->dev
, sizeof(*sp
->srings
),
747 &sp
->srings_dma
, GFP_KERNEL
);
749 printk(KERN_ERR
"Sgiseeq: Page alloc failed, aborting.\n");
751 goto err_out_free_dev
;
754 sp
->rx_desc
= sp
->srings
->rxvector
;
755 sp
->tx_desc
= sp
->srings
->txvector
;
757 /* A couple calculations now, saves many cycles later. */
758 setup_rx_ring(dev
, sp
->rx_desc
, SEEQ_RX_BUFFERS
);
759 setup_tx_ring(dev
, sp
->tx_desc
, SEEQ_TX_BUFFERS
);
761 memcpy(dev
->dev_addr
, pd
->mac
, ETH_ALEN
);
767 sp
->sregs
= (struct sgiseeq_regs
*) &hpcregs
->eth_ext
[0];
768 sp
->hregs
= &hpcregs
->ethregs
;
769 sp
->name
= sgiseeqstr
;
770 sp
->mode
= SEEQ_RCMD_RBCAST
;
772 /* Setup PIO and DMA transfer timing */
773 sp
->hregs
->pconfig
= 0x161;
774 sp
->hregs
->dconfig
= HPC3_EDCFG_FIRQ
| HPC3_EDCFG_FEOP
|
775 HPC3_EDCFG_FRXDC
| HPC3_EDCFG_PTO
| 0x026;
777 /* Setup PIO and DMA transfer timing */
778 sp
->hregs
->pconfig
= 0x161;
779 sp
->hregs
->dconfig
= HPC3_EDCFG_FIRQ
| HPC3_EDCFG_FEOP
|
780 HPC3_EDCFG_FRXDC
| HPC3_EDCFG_PTO
| 0x026;
782 /* Reset the chip. */
783 hpc3_eth_reset(sp
->hregs
);
785 sp
->is_edlc
= !(sp
->sregs
->rw
.rregs
.collision_tx
[0] & 0xff);
787 sp
->control
= SEEQ_CTRL_XCNT
| SEEQ_CTRL_ACCNT
|
788 SEEQ_CTRL_SFLAG
| SEEQ_CTRL_ESHORT
|
791 dev
->netdev_ops
= &sgiseeq_netdev_ops
;
792 dev
->watchdog_timeo
= (200 * HZ
) / 1000;
795 if (register_netdev(dev
)) {
796 printk(KERN_ERR
"Sgiseeq: Cannot register net device, "
799 goto err_out_free_page
;
802 printk(KERN_INFO
"%s: %s %pM\n", dev
->name
, sgiseeqstr
, dev
->dev_addr
);
807 free_page((unsigned long) sp
->srings
);
815 static int __exit
sgiseeq_remove(struct platform_device
*pdev
)
817 struct net_device
*dev
= platform_get_drvdata(pdev
);
818 struct sgiseeq_private
*sp
= netdev_priv(dev
);
820 unregister_netdev(dev
);
821 dma_free_noncoherent(&pdev
->dev
, sizeof(*sp
->srings
), sp
->srings
,
824 platform_set_drvdata(pdev
, NULL
);
829 static struct platform_driver sgiseeq_driver
= {
830 .probe
= sgiseeq_probe
,
831 .remove
= __exit_p(sgiseeq_remove
),
834 .owner
= THIS_MODULE
,
838 static int __init
sgiseeq_module_init(void)
840 if (platform_driver_register(&sgiseeq_driver
)) {
841 printk(KERN_ERR
"Driver registration failed\n");
848 static void __exit
sgiseeq_module_exit(void)
850 platform_driver_unregister(&sgiseeq_driver
);
853 module_init(sgiseeq_module_init
);
854 module_exit(sgiseeq_module_exit
);
856 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
857 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
858 MODULE_LICENSE("GPL");
859 MODULE_ALIAS("platform:sgiseeq");