2 * Ethernet driver for Motorola MPC8260.
3 * Copyright (c) 1999 Dan Malek (dmalek@jlc.net)
4 * Copyright (c) 2000 MontaVista Software Inc. (source@mvista.com)
7 * I copied this from the 8xx CPM Ethernet driver, so follow the
8 * credits back through that.
10 * This version of the driver is somewhat selectable for the different
11 * processor/board combinations. It works for the boards I know about
12 * now, and should be easily modified to include others. Some of the
13 * configuration information is contained in <asm/cpm1.h> and the
16 * Buffer descriptors are kept in the CPM dual port RAM, and the frame
17 * buffers are in the host memory.
19 * Right now, I am very watseful with the buffers. I allocate memory
20 * pages and then divide them into 2K frame buffers. This way I know I
21 * have buffers large enough to hold one frame within one buffer descriptor.
22 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
23 * will be much more memory efficient and will easily handle lots of
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/ptrace.h>
31 #include <linux/errno.h>
32 #include <linux/ioport.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/spinlock.h>
41 #include <linux/bitops.h>
43 #include <asm/immap_cpm2.h>
44 #include <asm/pgtable.h>
45 #include <asm/mpc8260.h>
46 #include <asm/uaccess.h>
53 * The MPC8260 CPM performs the Ethernet processing on an SCC. It can use
54 * an aribtrary number of buffers on byte boundaries, but must have at
55 * least two receive buffers to prevent constant overrun conditions.
57 * The buffer descriptors are allocated from the CPM dual port memory
58 * with the data buffers allocated from host memory, just like all other
59 * serial communication protocols. The host memory buffers are allocated
60 * from the free page pool, and then divided into smaller receive and
61 * transmit buffers. The size of the buffers should be a power of two,
62 * since that nicely divides the page. This creates a ring buffer
63 * structure similar to the LANCE and other controllers.
65 * Like the LANCE driver:
66 * The driver runs as two independent, single-threaded flows of control. One
67 * is the send-packet routine, which enforces single-threaded use by the
68 * cep->tx_busy flag. The other thread is the interrupt handler, which is
69 * single threaded by the hardware and other software.
72 /* The transmitter timeout
74 #define TX_TIMEOUT (2*HZ)
76 /* The number of Tx and Rx buffers. These are allocated from the page
77 * pool. The code may assume these are power of two, so it is best
78 * to keep them that size.
79 * We don't need to allocate pages for the transmitter. We just use
80 * the skbuffer directly.
82 #define CPM_ENET_RX_PAGES 4
83 #define CPM_ENET_RX_FRSIZE 2048
84 #define CPM_ENET_RX_FRPPG (PAGE_SIZE / CPM_ENET_RX_FRSIZE)
85 #define RX_RING_SIZE (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
86 #define TX_RING_SIZE 8 /* Must be power of two */
87 #define TX_RING_MOD_MASK 7 /* for this to work */
89 /* The CPM stores dest/src/type, data, and checksum for receive packets.
91 #define PKT_MAXBUF_SIZE 1518
92 #define PKT_MINBUF_SIZE 64
93 #define PKT_MAXBLR_SIZE 1520
95 /* The CPM buffer descriptors track the ring buffers. The rx_bd_base and
96 * tx_bd_base always point to the base of the buffer descriptors. The
97 * cur_rx and cur_tx point to the currently available buffer.
98 * The dirty_tx tracks the current buffer that is being sent by the
99 * controller. The cur_tx and dirty_tx are equal under both completely
100 * empty and completely full conditions. The empty/ready indicator in
101 * the buffer descriptor determines the actual condition.
103 struct scc_enet_private
{
104 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
105 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
109 /* CPM dual port RAM relative addresses.
111 cbd_t
*rx_bd_base
; /* Address of Rx and Tx buffers. */
113 cbd_t
*cur_rx
, *cur_tx
; /* The next free ring entry */
114 cbd_t
*dirty_tx
; /* The ring entries to be free()ed. */
116 struct net_device_stats stats
;
121 static int scc_enet_open(struct net_device
*dev
);
122 static int scc_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
123 static int scc_enet_rx(struct net_device
*dev
);
124 static irqreturn_t
scc_enet_interrupt(int irq
, void *dev_id
);
125 static int scc_enet_close(struct net_device
*dev
);
126 static struct net_device_stats
*scc_enet_get_stats(struct net_device
*dev
);
127 static void set_multicast_list(struct net_device
*dev
);
129 /* These will be configurable for the SCC choice.
131 #define CPM_ENET_BLOCK CPM_CR_SCC1_SBLOCK
132 #define CPM_ENET_PAGE CPM_CR_SCC1_PAGE
133 #define PROFF_ENET PROFF_SCC1
135 #define SIU_INT_ENET SIU_INT_SCC1
137 /* These are both board and SCC dependent....
139 #define PD_ENET_RXD ((uint)0x00000001)
140 #define PD_ENET_TXD ((uint)0x00000002)
141 #define PD_ENET_TENA ((uint)0x00000004)
142 #define PC_ENET_RENA ((uint)0x00020000)
143 #define PC_ENET_CLSN ((uint)0x00000004)
144 #define PC_ENET_TXCLK ((uint)0x00000800)
145 #define PC_ENET_RXCLK ((uint)0x00000400)
146 #define CMX_CLK_ROUTE ((uint)0x25000000)
147 #define CMX_CLK_MASK ((uint)0xff000000)
149 /* Specific to a board.
151 #define PC_EST8260_ENET_LOOPBACK ((uint)0x80000000)
152 #define PC_EST8260_ENET_SQE ((uint)0x40000000)
153 #define PC_EST8260_ENET_NOTFD ((uint)0x20000000)
156 scc_enet_open(struct net_device
*dev
)
159 /* I should reset the ring buffers here, but I don't yet know
160 * a simple way to do that.
162 netif_start_queue(dev
);
163 return 0; /* Always succeed */
167 scc_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
169 struct scc_enet_private
*cep
= (struct scc_enet_private
*)dev
->priv
;
173 /* Fill in a Tx ring entry */
176 #ifndef final_version
177 if (bdp
->cbd_sc
& BD_ENET_TX_READY
) {
178 /* Ooops. All transmit buffers are full. Bail out.
179 * This should not happen, since cep->tx_full should be set.
181 printk("%s: tx queue full!.\n", dev
->name
);
186 /* Clear all of the status flags.
188 bdp
->cbd_sc
&= ~BD_ENET_TX_STATS
;
190 /* If the frame is short, tell CPM to pad it.
192 if (skb
->len
<= ETH_ZLEN
)
193 bdp
->cbd_sc
|= BD_ENET_TX_PAD
;
195 bdp
->cbd_sc
&= ~BD_ENET_TX_PAD
;
197 /* Set buffer length and buffer pointer.
199 bdp
->cbd_datlen
= skb
->len
;
200 bdp
->cbd_bufaddr
= __pa(skb
->data
);
204 cep
->tx_skbuff
[cep
->skb_cur
] = skb
;
206 cep
->stats
.tx_bytes
+= skb
->len
;
207 cep
->skb_cur
= (cep
->skb_cur
+1) & TX_RING_MOD_MASK
;
209 spin_lock_irq(&cep
->lock
);
211 /* Send it on its way. Tell CPM its ready, interrupt when done,
212 * its the last BD of the frame, and to put the CRC on the end.
214 bdp
->cbd_sc
|= (BD_ENET_TX_READY
| BD_ENET_TX_INTR
| BD_ENET_TX_LAST
| BD_ENET_TX_TC
);
216 dev
->trans_start
= jiffies
;
218 /* If this was the last BD in the ring, start at the beginning again.
220 if (bdp
->cbd_sc
& BD_ENET_TX_WRAP
)
221 bdp
= cep
->tx_bd_base
;
225 if (bdp
->cbd_sc
& BD_ENET_TX_READY
) {
226 netif_stop_queue(dev
);
230 cep
->cur_tx
= (cbd_t
*)bdp
;
232 spin_unlock_irq(&cep
->lock
);
238 scc_enet_timeout(struct net_device
*dev
)
240 struct scc_enet_private
*cep
= (struct scc_enet_private
*)dev
->priv
;
242 printk("%s: transmit timed out.\n", dev
->name
);
243 cep
->stats
.tx_errors
++;
244 #ifndef final_version
248 printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
249 cep
->cur_tx
, cep
->tx_full
? " (full)" : "",
251 bdp
= cep
->tx_bd_base
;
252 printk(" Tx @base %p :\n", bdp
);
253 for (i
= 0 ; i
< TX_RING_SIZE
; i
++, bdp
++)
254 printk("%04x %04x %08x\n",
258 bdp
= cep
->rx_bd_base
;
259 printk(" Rx @base %p :\n", bdp
);
260 for (i
= 0 ; i
< RX_RING_SIZE
; i
++, bdp
++)
261 printk("%04x %04x %08x\n",
268 netif_wake_queue(dev
);
271 /* The interrupt handler.
272 * This is called from the CPM handler, not the MPC core interrupt.
275 scc_enet_interrupt(int irq
, void *dev_id
)
277 struct net_device
*dev
= dev_id
;
278 volatile struct scc_enet_private
*cep
;
285 /* Get the interrupt events that caused us to be here.
287 int_events
= cep
->sccp
->scc_scce
;
288 cep
->sccp
->scc_scce
= int_events
;
291 /* Handle receive event in its own function.
293 if (int_events
& SCCE_ENET_RXF
)
296 /* Check for a transmit error. The manual is a little unclear
297 * about this, so the debug code until I get it figured out. It
298 * appears that if TXE is set, then TXB is not set. However,
299 * if carrier sense is lost during frame transmission, the TXE
300 * bit is set, "and continues the buffer transmission normally."
301 * I don't know if "normally" implies TXB is set when the buffer
302 * descriptor is closed.....trial and error :-).
305 /* Transmit OK, or non-fatal error. Update the buffer descriptors.
307 if (int_events
& (SCCE_ENET_TXE
| SCCE_ENET_TXB
)) {
308 spin_lock(&cep
->lock
);
310 while ((bdp
->cbd_sc
&BD_ENET_TX_READY
)==0) {
311 if ((bdp
==cep
->cur_tx
) && (cep
->tx_full
== 0))
314 if (bdp
->cbd_sc
& BD_ENET_TX_HB
) /* No heartbeat */
315 cep
->stats
.tx_heartbeat_errors
++;
316 if (bdp
->cbd_sc
& BD_ENET_TX_LC
) /* Late collision */
317 cep
->stats
.tx_window_errors
++;
318 if (bdp
->cbd_sc
& BD_ENET_TX_RL
) /* Retrans limit */
319 cep
->stats
.tx_aborted_errors
++;
320 if (bdp
->cbd_sc
& BD_ENET_TX_UN
) /* Underrun */
321 cep
->stats
.tx_fifo_errors
++;
322 if (bdp
->cbd_sc
& BD_ENET_TX_CSL
) /* Carrier lost */
323 cep
->stats
.tx_carrier_errors
++;
326 /* No heartbeat or Lost carrier are not really bad errors.
327 * The others require a restart transmit command.
330 (BD_ENET_TX_LC
| BD_ENET_TX_RL
| BD_ENET_TX_UN
)) {
332 cep
->stats
.tx_errors
++;
335 cep
->stats
.tx_packets
++;
337 /* Deferred means some collisions occurred during transmit,
338 * but we eventually sent the packet OK.
340 if (bdp
->cbd_sc
& BD_ENET_TX_DEF
)
341 cep
->stats
.collisions
++;
343 /* Free the sk buffer associated with this last transmit.
345 dev_kfree_skb_irq(cep
->tx_skbuff
[cep
->skb_dirty
]);
346 cep
->skb_dirty
= (cep
->skb_dirty
+ 1) & TX_RING_MOD_MASK
;
348 /* Update pointer to next buffer descriptor to be transmitted.
350 if (bdp
->cbd_sc
& BD_ENET_TX_WRAP
)
351 bdp
= cep
->tx_bd_base
;
355 /* I don't know if we can be held off from processing these
356 * interrupts for more than one frame time. I really hope
357 * not. In such a case, we would now want to check the
358 * currently available BD (cur_tx) and determine if any
359 * buffers between the dirty_tx and cur_tx have also been
360 * sent. We would want to process anything in between that
361 * does not have BD_ENET_TX_READY set.
364 /* Since we have freed up a buffer, the ring is no longer
369 if (netif_queue_stopped(dev
)) {
370 netif_wake_queue(dev
);
374 cep
->dirty_tx
= (cbd_t
*)bdp
;
378 volatile cpm_cpm2_t
*cp
;
380 /* Some transmit errors cause the transmitter to shut
381 * down. We now issue a restart transmit. Since the
382 * errors close the BD and update the pointers, the restart
383 * _should_ pick up without having to reset any of our
389 mk_cr_cmd(CPM_ENET_PAGE
, CPM_ENET_BLOCK
, 0,
390 CPM_CR_RESTART_TX
) | CPM_CR_FLG
;
391 while (cp
->cp_cpcr
& CPM_CR_FLG
);
393 spin_unlock(&cep
->lock
);
396 /* Check for receive busy, i.e. packets coming but no place to
397 * put them. This "can't happen" because the receive interrupt
398 * is tossing previous frames.
400 if (int_events
& SCCE_ENET_BSY
) {
401 cep
->stats
.rx_dropped
++;
402 printk("SCC ENET: BSY can't happen.\n");
408 /* During a receive, the cur_rx points to the current incoming buffer.
409 * When we update through the ring, if the next incoming buffer has
410 * not been given to the system, we just set the empty indicator,
411 * effectively tossing the packet.
414 scc_enet_rx(struct net_device
*dev
)
416 struct scc_enet_private
*cep
;
423 /* First, grab all of the stats for the incoming packet.
424 * These get messed up if we get called due to a busy condition.
429 if (bdp
->cbd_sc
& BD_ENET_RX_EMPTY
)
432 #ifndef final_version
433 /* Since we have allocated space to hold a complete frame, both
434 * the first and last indicators should be set.
436 if ((bdp
->cbd_sc
& (BD_ENET_RX_FIRST
| BD_ENET_RX_LAST
)) !=
437 (BD_ENET_RX_FIRST
| BD_ENET_RX_LAST
))
438 printk("CPM ENET: rcv is not first+last\n");
441 /* Frame too long or too short.
443 if (bdp
->cbd_sc
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
))
444 cep
->stats
.rx_length_errors
++;
445 if (bdp
->cbd_sc
& BD_ENET_RX_NO
) /* Frame alignment */
446 cep
->stats
.rx_frame_errors
++;
447 if (bdp
->cbd_sc
& BD_ENET_RX_CR
) /* CRC Error */
448 cep
->stats
.rx_crc_errors
++;
449 if (bdp
->cbd_sc
& BD_ENET_RX_OV
) /* FIFO overrun */
450 cep
->stats
.rx_crc_errors
++;
452 /* Report late collisions as a frame error.
453 * On this error, the BD is closed, but we don't know what we
454 * have in the buffer. So, just drop this frame on the floor.
456 if (bdp
->cbd_sc
& BD_ENET_RX_CL
) {
457 cep
->stats
.rx_frame_errors
++;
461 /* Process the incoming frame.
463 cep
->stats
.rx_packets
++;
464 pkt_len
= bdp
->cbd_datlen
;
465 cep
->stats
.rx_bytes
+= pkt_len
;
467 /* This does 16 byte alignment, much more than we need.
468 * The packet length includes FCS, but we don't want to
469 * include that when passing upstream as it messes up
470 * bridging applications.
472 skb
= dev_alloc_skb(pkt_len
-4);
475 printk("%s: Memory squeeze, dropping packet.\n", dev
->name
);
476 cep
->stats
.rx_dropped
++;
479 skb_put(skb
,pkt_len
-4); /* Make room */
480 skb_copy_to_linear_data(skb
,
481 (unsigned char *)__va(bdp
->cbd_bufaddr
),
483 skb
->protocol
=eth_type_trans(skb
,dev
);
488 /* Clear the status flags for this buffer.
490 bdp
->cbd_sc
&= ~BD_ENET_RX_STATS
;
492 /* Mark the buffer empty.
494 bdp
->cbd_sc
|= BD_ENET_RX_EMPTY
;
496 /* Update BD pointer to next entry.
498 if (bdp
->cbd_sc
& BD_ENET_RX_WRAP
)
499 bdp
= cep
->rx_bd_base
;
504 cep
->cur_rx
= (cbd_t
*)bdp
;
510 scc_enet_close(struct net_device
*dev
)
512 /* Don't know what to do yet.
514 netif_stop_queue(dev
);
519 static struct net_device_stats
*scc_enet_get_stats(struct net_device
*dev
)
521 struct scc_enet_private
*cep
= (struct scc_enet_private
*)dev
->priv
;
526 /* Set or clear the multicast filter for this adaptor.
527 * Skeleton taken from sunlance driver.
528 * The CPM Ethernet implementation allows Multicast as well as individual
529 * MAC address filtering. Some of the drivers check to make sure it is
530 * a group multicast address, and discard those that are not. I guess I
531 * will do the same for now, but just remove the test if you want
532 * individual filtering as well (do the upper net layers want or support
533 * this kind of feature?).
536 static void set_multicast_list(struct net_device
*dev
)
538 struct scc_enet_private
*cep
;
539 struct dev_mc_list
*dmi
;
540 u_char
*mcptr
, *tdptr
;
541 volatile scc_enet_t
*ep
;
543 cep
= (struct scc_enet_private
*)dev
->priv
;
545 /* Get pointer to SCC area in parameter RAM.
547 ep
= (scc_enet_t
*)dev
->base_addr
;
549 if (dev
->flags
&IFF_PROMISC
) {
551 /* Log any net taps. */
552 printk("%s: Promiscuous mode enabled.\n", dev
->name
);
553 cep
->sccp
->scc_psmr
|= SCC_PSMR_PRO
;
556 cep
->sccp
->scc_psmr
&= ~SCC_PSMR_PRO
;
558 if (dev
->flags
& IFF_ALLMULTI
) {
559 /* Catch all multicast addresses, so set the
562 ep
->sen_gaddr1
= 0xffff;
563 ep
->sen_gaddr2
= 0xffff;
564 ep
->sen_gaddr3
= 0xffff;
565 ep
->sen_gaddr4
= 0xffff;
568 /* Clear filter and add the addresses in the list.
577 for (i
=0; i
<dev
->mc_count
; i
++) {
579 /* Only support group multicast for now.
581 if (!(dmi
->dmi_addr
[0] & 1))
584 /* The address in dmi_addr is LSB first,
585 * and taddr is MSB first. We have to
586 * copy bytes MSB first from dmi_addr.
588 mcptr
= (u_char
*)dmi
->dmi_addr
+ 5;
589 tdptr
= (u_char
*)&ep
->sen_taddrh
;
593 /* Ask CPM to run CRC and set bit in
596 cpmp
->cp_cpcr
= mk_cr_cmd(CPM_ENET_PAGE
,
598 CPM_CR_SET_GADDR
) | CPM_CR_FLG
;
599 /* this delay is necessary here -- Cort */
601 while (cpmp
->cp_cpcr
& CPM_CR_FLG
);
607 /* Initialize the CPM Ethernet on SCC.
609 static int __init
scc_enet_init(void)
611 struct net_device
*dev
;
612 struct scc_enet_private
*cep
;
616 unsigned long mem_addr
;
619 volatile cpm_cpm2_t
*cp
;
620 volatile scc_t
*sccp
;
621 volatile scc_enet_t
*ep
;
622 volatile cpm2_map_t
*immap
;
623 volatile iop_cpm2_t
*io
;
625 cp
= cpmp
; /* Get pointer to Communication Processor */
627 immap
= (cpm2_map_t
*)CPM_MAP_ADDR
; /* and to internal registers */
628 io
= &immap
->im_ioport
;
632 /* Create an Ethernet device instance.
634 dev
= alloc_etherdev(sizeof(*cep
));
639 spin_lock_init(&cep
->lock
);
641 /* Get pointer to SCC area in parameter RAM.
643 ep
= (scc_enet_t
*)(&immap
->im_dprambase
[PROFF_ENET
]);
645 /* And another to the SCC register area.
647 sccp
= (volatile scc_t
*)(&immap
->im_scc
[SCC_ENET
]);
648 cep
->sccp
= (scc_t
*)sccp
; /* Keep the pointer handy */
650 /* Disable receive and transmit in case someone left it running.
652 sccp
->scc_gsmrl
&= ~(SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
654 /* Configure port C and D pins for SCC Ethernet. This
655 * won't work for all SCC possibilities....it will be
656 * board/port specific.
659 (PC_ENET_RENA
| PC_ENET_CLSN
| PC_ENET_TXCLK
| PC_ENET_RXCLK
);
661 ~(PC_ENET_RENA
| PC_ENET_CLSN
| PC_ENET_TXCLK
| PC_ENET_RXCLK
);
663 ~(PC_ENET_RENA
| PC_ENET_TXCLK
| PC_ENET_RXCLK
);
664 io
->iop_psorc
|= PC_ENET_CLSN
;
666 io
->iop_ppard
|= (PD_ENET_RXD
| PD_ENET_TXD
| PD_ENET_TENA
);
667 io
->iop_pdird
|= (PD_ENET_TXD
| PD_ENET_TENA
);
668 io
->iop_pdird
&= ~PD_ENET_RXD
;
669 io
->iop_psord
|= PD_ENET_TXD
;
670 io
->iop_psord
&= ~(PD_ENET_RXD
| PD_ENET_TENA
);
672 /* Configure Serial Interface clock routing.
673 * First, clear all SCC bits to zero, then set the ones we want.
675 immap
->im_cpmux
.cmx_scr
&= ~CMX_CLK_MASK
;
676 immap
->im_cpmux
.cmx_scr
|= CMX_CLK_ROUTE
;
678 /* Allocate space for the buffer descriptors in the DP ram.
679 * These are relative offsets in the DP ram address space.
680 * Initialize base addresses for the buffer descriptors.
682 dp_offset
= cpm_dpalloc(sizeof(cbd_t
) * RX_RING_SIZE
, 8);
683 ep
->sen_genscc
.scc_rbase
= dp_offset
;
684 cep
->rx_bd_base
= (cbd_t
*)cpm_dpram_addr(dp_offset
);
686 dp_offset
= cpm_dpalloc(sizeof(cbd_t
) * TX_RING_SIZE
, 8);
687 ep
->sen_genscc
.scc_tbase
= dp_offset
;
688 cep
->tx_bd_base
= (cbd_t
*)cpm_dpram_addr(dp_offset
);
690 cep
->dirty_tx
= cep
->cur_tx
= cep
->tx_bd_base
;
691 cep
->cur_rx
= cep
->rx_bd_base
;
693 ep
->sen_genscc
.scc_rfcr
= CPMFCR_GBL
| CPMFCR_EB
;
694 ep
->sen_genscc
.scc_tfcr
= CPMFCR_GBL
| CPMFCR_EB
;
696 /* Set maximum bytes per receive buffer.
697 * This appears to be an Ethernet frame size, not the buffer
698 * fragment size. It must be a multiple of four.
700 ep
->sen_genscc
.scc_mrblr
= PKT_MAXBLR_SIZE
;
702 /* Set CRC preset and mask.
704 ep
->sen_cpres
= 0xffffffff;
705 ep
->sen_cmask
= 0xdebb20e3;
707 ep
->sen_crcec
= 0; /* CRC Error counter */
708 ep
->sen_alec
= 0; /* alignment error counter */
709 ep
->sen_disfc
= 0; /* discard frame counter */
711 ep
->sen_pads
= 0x8888; /* Tx short frame pad character */
712 ep
->sen_retlim
= 15; /* Retry limit threshold */
714 ep
->sen_maxflr
= PKT_MAXBUF_SIZE
; /* maximum frame length register */
715 ep
->sen_minflr
= PKT_MINBUF_SIZE
; /* minimum frame length register */
717 ep
->sen_maxd1
= PKT_MAXBLR_SIZE
; /* maximum DMA1 length */
718 ep
->sen_maxd2
= PKT_MAXBLR_SIZE
; /* maximum DMA2 length */
720 /* Clear hash tables.
731 /* Set Ethernet station address.
733 * This is supplied in the board information structure, so we
734 * copy that into the controller.
736 eap
= (unsigned char *)&(ep
->sen_paddrh
);
738 *eap
++ = dev
->dev_addr
[i
] = bd
->bi_enetaddr
[i
];
740 ep
->sen_pper
= 0; /* 'cause the book says so */
741 ep
->sen_taddrl
= 0; /* temp address (LSB) */
743 ep
->sen_taddrh
= 0; /* temp address (MSB) */
745 /* Now allocate the host memory pages and initialize the
746 * buffer descriptors.
748 bdp
= cep
->tx_bd_base
;
749 for (i
=0; i
<TX_RING_SIZE
; i
++) {
751 /* Initialize the BD for every fragment in the page.
754 bdp
->cbd_bufaddr
= 0;
758 /* Set the last buffer to wrap.
761 bdp
->cbd_sc
|= BD_SC_WRAP
;
763 bdp
= cep
->rx_bd_base
;
764 for (i
=0; i
<CPM_ENET_RX_PAGES
; i
++) {
768 mem_addr
= __get_free_page(GFP_KERNEL
);
769 /* BUG: no check for failure */
771 /* Initialize the BD for every fragment in the page.
773 for (j
=0; j
<CPM_ENET_RX_FRPPG
; j
++) {
774 bdp
->cbd_sc
= BD_ENET_RX_EMPTY
| BD_ENET_RX_INTR
;
775 bdp
->cbd_bufaddr
= __pa(mem_addr
);
776 mem_addr
+= CPM_ENET_RX_FRSIZE
;
781 /* Set the last buffer to wrap.
784 bdp
->cbd_sc
|= BD_SC_WRAP
;
786 /* Let's re-initialize the channel now. We have to do it later
787 * than the manual describes because we have just now finished
788 * the BD initialization.
790 cpmp
->cp_cpcr
= mk_cr_cmd(CPM_ENET_PAGE
, CPM_ENET_BLOCK
, 0,
791 CPM_CR_INIT_TRX
) | CPM_CR_FLG
;
792 while (cp
->cp_cpcr
& CPM_CR_FLG
);
794 cep
->skb_cur
= cep
->skb_dirty
= 0;
796 sccp
->scc_scce
= 0xffff; /* Clear any pending events */
798 /* Enable interrupts for transmit error, complete frame
799 * received, and any transmit buffer we have also set the
802 sccp
->scc_sccm
= (SCCE_ENET_TXE
| SCCE_ENET_RXF
| SCCE_ENET_TXB
);
804 /* Install our interrupt handler.
806 request_irq(SIU_INT_ENET
, scc_enet_interrupt
, 0, "enet", dev
);
807 /* BUG: no check for failure */
809 /* Set GSMR_H to enable all normal operating modes.
810 * Set GSMR_L to enable Ethernet to MC68160.
813 sccp
->scc_gsmrl
= (SCC_GSMRL_TCI
| SCC_GSMRL_TPL_48
| SCC_GSMRL_TPP_10
| SCC_GSMRL_MODE_ENET
);
815 /* Set sync/delimiters.
817 sccp
->scc_dsr
= 0xd555;
819 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
820 * start frame search 22 bit times after RENA.
822 sccp
->scc_psmr
= (SCC_PSMR_ENCRC
| SCC_PSMR_NIB22
);
824 /* It is now OK to enable the Ethernet transmitter.
825 * Unfortunately, there are board implementation differences here.
827 io
->iop_pparc
&= ~(PC_EST8260_ENET_LOOPBACK
|
828 PC_EST8260_ENET_SQE
| PC_EST8260_ENET_NOTFD
);
829 io
->iop_psorc
&= ~(PC_EST8260_ENET_LOOPBACK
|
830 PC_EST8260_ENET_SQE
| PC_EST8260_ENET_NOTFD
);
831 io
->iop_pdirc
|= (PC_EST8260_ENET_LOOPBACK
|
832 PC_EST8260_ENET_SQE
| PC_EST8260_ENET_NOTFD
);
833 io
->iop_pdatc
&= ~(PC_EST8260_ENET_LOOPBACK
| PC_EST8260_ENET_SQE
);
834 io
->iop_pdatc
|= PC_EST8260_ENET_NOTFD
;
836 dev
->base_addr
= (unsigned long)ep
;
838 /* The CPM Ethernet specific entries in the device structure. */
839 dev
->open
= scc_enet_open
;
840 dev
->hard_start_xmit
= scc_enet_start_xmit
;
841 dev
->tx_timeout
= scc_enet_timeout
;
842 dev
->watchdog_timeo
= TX_TIMEOUT
;
843 dev
->stop
= scc_enet_close
;
844 dev
->get_stats
= scc_enet_get_stats
;
845 dev
->set_multicast_list
= set_multicast_list
;
847 /* And last, enable the transmit and receive processing.
849 sccp
->scc_gsmrl
|= (SCC_GSMRL_ENR
| SCC_GSMRL_ENT
);
851 err
= register_netdev(dev
);
857 printk("%s: SCC ENET Version 0.1, ", dev
->name
);
859 printk("%02x:", dev
->dev_addr
[i
]);
860 printk("%02x\n", dev
->dev_addr
[5]);
865 module_init(scc_enet_init
);