RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / cfe / cfe / dev / dev_bcm5700.c
blobdd288ab83baa56027bf298109afc6a3347fb8b76
1 /* *********************************************************************
2 * Broadcom Common Firmware Environment (CFE)
3 *
4 * BCM5700/Tigon3 (10/100/1000 EthernetMAC) driver File: dev_bcm5700.c
5 *
6 * Author: Ed Satterthwaite
7 *
8 *********************************************************************
10 * Copyright 2000,2001,2002,2003
11 * Broadcom Corporation. All rights reserved.
13 * This software is furnished under license and may be used and
14 * copied only in accordance with the following terms and
15 * conditions. Subject to these conditions, you may download,
16 * copy, install, use, modify and distribute modified or unmodified
17 * copies of this software in source and/or binary form. No title
18 * or ownership is transferred hereby.
20 * 1) Any source code used, modified or distributed must reproduce
21 * and retain this copyright notice and list of conditions
22 * as they appear in the source file.
24 * 2) No right is granted to use any trade name, trademark, or
25 * logo of Broadcom Corporation. The "Broadcom Corporation"
26 * name may not be used to endorse or promote products derived
27 * from this software without the prior written permission of
28 * Broadcom Corporation.
30 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR
31 * IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED
32 * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
33 * PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT
34 * SHALL BROADCOM BE LIABLE FOR ANY DAMAGES WHATSOEVER, AND IN
35 * PARTICULAR, BROADCOM SHALL NOT BE LIABLE FOR DIRECT, INDIRECT,
36 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
37 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
38 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
40 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
41 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE), EVEN IF ADVISED OF
42 * THE POSSIBILITY OF SUCH DAMAGE.
43 ********************************************************************* */
45 #include "sbmips.h"
47 #ifndef _SB_MAKE64
48 #define _SB_MAKE64(x) ((uint64_t)(x))
49 #endif
50 #ifndef _SB_MAKEMASK1
51 #define _SB_MAKEMASK1(n) (_SB_MAKE64(1) << _SB_MAKE64(n))
52 #endif
54 #include "lib_types.h"
55 #include "lib_physio.h"
56 #include "lib_malloc.h"
57 #include "lib_string.h"
58 #include "lib_printf.h"
59 #include "lib_queue.h"
61 #include "cfe_iocb.h"
62 #include "cfe_device.h"
63 #include "cfe_ioctl.h"
64 #include "cfe_timer.h"
65 #include "cfe_error.h"
66 #include "cfe_irq.h"
68 #include "pcivar.h"
69 #include "pcireg.h"
71 #include "bcm5700.h"
72 #include "mii.h"
74 #include "bsp_config.h"
76 #include "proto/ethernet.h"
77 #include "bcmdevs.h"
78 #include "bcmutils.h"
79 #include "bcmnvram.h"
80 #include "hndsoc.h"
81 #include "siutils.h"
82 #include "hndgige.h"
83 #include "bcmrobo.h"
85 static int sigige = -1;
87 /* This is a driver for the Broadcom 570x ("Tigon 3") 10/100/1000 MAC.
88 Currently, the 5700, 5701, 5703C, 5704C and 5705 have been tested.
89 Only 10/100/1000 BASE-T PHYs are supported; variants with SerDes
90 PHYs are not supported.
92 Reference:
93 Host Programmer Interface Specification for the BCM570X Family
94 of Highly-Integrated Media Access Controllers, 570X-PG106-R.
95 Broadcom Corp., 16215 Alton Parkway, Irvine CA, 09/27/02
97 This driver takes advantage of DMA coherence in systems that
98 support it (e.g., SB1250). For systems without coherent DMA (e.g.,
99 BCM47xx SOCs), descriptor and packet buffer memory is explicitly
100 flushed.
102 The driver prefers "preserve bit lanes" mode for big-endian
103 systems that provide the option, but it can use "preserve byte
104 lanes" as well.
106 Note that the 5705 does not fully map all address ranges. Per
107 the manual, reads and writes of the unmapped regions are permitted
108 and do not fault; however, it apparently has some poisoned registers,
109 at least in early revs, that should not be touched. See the
110 conditionals in the code. */
112 /* PIOSWAP controls whether word-swapping takes place for transactions
113 in which the 570x is the target device. In theory, either value
114 should work (with access macros adjusted as below) and it should be
115 set to be consistent with the settings for 570x as initiator.
116 Empirically, however, some combinations only work with no swap.
117 For big-endian systems:
119 SWAP=0 SWAP=1
120 5700 32 PCI OK OK
121 5700 64 Sturgeon OK OK
122 5701-32 32 PCI OK OK
123 5701-32 64 Sturgeon OK OK
124 5701-32 64 Golem OK OK
125 5701-64 64 Sturgeon OK OK
126 5701-64 64 Golem OK FAIL
127 5705 32 PCI OK OK
128 5705 64 Sturgeon (OK)* FAIL
129 5705 64 Golem OK OK
131 For little-endian systems, only SWAP=1 appears to work.
133 * PCI status/interrupt ordering problem under load. */
135 #if __MIPSEL
136 #define PIOSWAP 1
137 #else
138 #define PIOSWAP 0
139 #endif
141 #ifndef T3_DEBUG
142 #define T3_DEBUG 0
143 #endif
145 #ifndef T3_BRINGUP
146 #define T3_BRINGUP 0
147 #endif
150 /* Broadcom recommends using PHY interrupts instead of autopolling,
151 but I haven't made it work yet. */
152 #define T3_AUTOPOLL 1
154 /* Set IPOLL to drive processing through the interrupt dispatcher.
155 Set XPOLL to drive processing by an external polling agent. One
156 must be set; setting both is ok. */
158 #ifndef IPOLL
159 #define IPOLL 0
160 #endif
161 #ifndef XPOLL
162 #define XPOLL 1
163 #endif
165 #define ENET_ADDR_LEN 6 /* size of an ethernet address */
166 #define MIN_ETHER_PACK 64 /* min size of a packet */
167 #define MAX_ETHER_PACK 1518 /* max size of a packet */
168 #define VLAN_TAG_LEN 4 /* VLAN type plus tag */
169 #define CRC_SIZE 4 /* size of CRC field */
171 /* Packet buffers. For the Tigon 3, packet buffer alignment is
172 arbitrary and can be to any byte boundary. We would like it
173 aligned to a cache line boundary for performance, although there is
174 a trade-off with IP/TCP header alignment. Jumbo frames are not
175 currently supported. */
177 #define ETH_PKTBUF_LEN (((MAX_ETHER_PACK+31)/32)*32)
179 #if __long64
180 typedef struct eth_pkt_s {
181 queue_t next; /* 16 */
182 uint8_t *buffer; /* 8 */
183 uint32_t flags; /* 4 */
184 int32_t length; /* 4 */
185 uint8_t data[ETH_PKTBUF_LEN];
186 } eth_pkt_t;
187 #else
188 typedef struct eth_pkt_s {
189 queue_t next; /* 8 */
190 uint8_t *buffer; /* 4 */
191 uint32_t flags; /* 4 */
192 int32_t length; /* 4 */
193 uint32_t unused[3]; /* 12 */
194 uint8_t data[ETH_PKTBUF_LEN];
195 } eth_pkt_t;
196 #endif
198 #define CACHE_ALIGN 32
199 #define ETH_PKTBUF_LINES ((sizeof(eth_pkt_t) + (CACHE_ALIGN-1))/CACHE_ALIGN)
200 #define ETH_PKTBUF_SIZE (ETH_PKTBUF_LINES*CACHE_ALIGN)
201 #define ETH_PKTBUF_OFFSET (offsetof(eth_pkt_t, data))
203 #define ETH_PKT_BASE(data) ((eth_pkt_t *)((data) - ETH_PKTBUF_OFFSET))
205 static void
206 show_packet(char c, eth_pkt_t *pkt)
208 int i;
209 int n = (pkt->length < 32 ? pkt->length : 32);
211 xprintf("%c[%4d]:", c, (int)pkt->length);
212 for (i = 0; i < n; i++) {
213 if (i % 4 == 0)
214 xprintf(" ");
215 xprintf("%02x", pkt->buffer[i]);
217 xprintf("\n");
221 static void t3_ether_probe(cfe_driver_t *drv,
222 unsigned long probe_a, unsigned long probe_b,
223 void *probe_ptr);
227 /* Chip documentation numbers the rings with 1-origin. */
229 #define RI(n) ((n)-1)
231 /* BCM570x Ring Sizes (no external memory). Pages 97-98 */
233 #define TXP_MAX_RINGS 16
234 #define TXP_INTERNAL_RINGS 4
235 #define TXP_RING_ENTRIES 512
237 #define RXP_STD_ENTRIES 512
239 #define RXR_MAX_RINGS 16
240 #define RXR_RING_ENTRIES 1024
242 #define RXR_MAX_RINGS_05 1
243 #define RXR_RING_ENTRIES_05 512
246 /* BCM570x Send Buffer Descriptors as a struct. Pages 100-101 */
248 typedef struct t3_snd_bd_s {
249 uint32_t bufptr_hi;
250 uint32_t bufptr_lo;
251 #ifdef __MIPSEB
252 uint16_t length;
253 uint16_t flags;
254 uint16_t pad;
255 uint16_t vlan_tag;
256 #elif __MIPSEL
257 uint16_t flags;
258 uint16_t length;
259 uint16_t vlan_tag;
260 uint16_t pad;
261 #else
262 #error "bcm5700: endian not set"
263 #endif
264 } t3_snd_bd_t;
266 #define SND_BD_SIZE 16
268 #define TX_FLAG_TCP_CKSUM 0x0001
269 #define TX_FLAG_IP_CKSUM 0x0002
270 #define TX_FLAG_PACKET_END 0x0004
271 #define TX_FLAG_IP_FRAG 0x0008
272 #define TX_FLAG_IP_FRAG_END 0x0010
273 #define TX_FLAG_VLAN_TAG 0x0040
274 #define TX_FLAG_COAL_NOW 0x0080
275 #define TX_FLAG_CPU_PRE_DMA 0x0100
276 #define TX_FLAG_CPU_POST_DMA 0x0200
277 #define TX_FLAG_ADD_SRC 0x1000
278 #define TX_FLAG_SRC_ADDR_SEL 0x6000
279 #define TX_FLAG_NO_CRC 0x8000
281 /* BCM570x Receive Buffer Descriptors as a struct. Pages 105-107 */
283 typedef struct t3_rcv_bd_s {
284 uint32_t bufptr_hi;
285 uint32_t bufptr_lo;
286 #ifdef __MIPSEB
287 uint16_t index;
288 uint16_t length;
289 uint16_t type;
290 uint16_t flags;
291 uint16_t ip_cksum;
292 uint16_t tcp_cksum;
293 uint16_t error_flag;
294 uint16_t vlan_tag;
295 #elif __MIPSEL
296 uint16_t length;
297 uint16_t index;
298 uint16_t flags;
299 uint16_t type;
300 uint16_t tcp_cksum;
301 uint16_t ip_cksum;
302 uint16_t vlan_tag;
303 uint16_t error_flag;
304 #else
305 #error "bcm5700: endian not set"
306 #endif
307 uint32_t pad;
308 uint32_t opaque;
309 } t3_rcv_bd_t;
311 #define RCV_BD_SIZE 32
313 #define RX_FLAG_PACKET_END 0x0004
314 #define RX_FLAG_JUMBO_RING 0x0020
315 #define RX_FLAG_VLAN_TAG 0x0040
316 #define RX_FLAG_ERROR 0x0400
317 #define RX_FLAG_MINI_RING 0x0800
318 #define RX_FLAG_IP_CKSUM 0x1000
319 #define RX_FLAG_TCP_CKSUM 0x2000
320 #define RX_FLAG_IS_TCP 0x4000
322 #define RX_ERR_BAD_CRC 0x0001
323 #define RX_ERR_COLL_DETECT 0x0002
324 #define RX_ERR_LINK_LOST 0x0004
325 #define RX_ERR_PHY_DECODE 0x0008
326 #define RX_ERR_DRIBBLE 0x0010
327 #define RX_ERR_MAC_ABORT 0x0020
328 #define RX_ERR_SHORT_PKT 0x0040
329 #define RX_ERR_TRUNC_NO_RES 0x0080
330 #define RX_ERR_GIANT_PKT 0x0100
332 /* BCM570x Status Block format as a struct (not BCM5705). Pages 110-111. */
334 typedef struct t3_status_s {
335 uint32_t status;
336 uint32_t tag;
337 #ifdef __MIPSEB
338 uint16_t rxc_std_index;
339 uint16_t rxc_jumbo_index;
340 uint16_t reserved2;
341 uint16_t rxc_mini_index;
342 struct {
343 uint16_t send_c;
344 uint16_t return_p;
345 } index [16];
346 #elif __MIPSEL
347 uint16_t rxc_jumbo_index;
348 uint16_t rxc_std_index;
349 uint16_t rxc_mini_index;
350 uint16_t reserved2;
351 struct {
352 uint16_t return_p;
353 uint16_t send_c;
354 } index [16];
355 #else
356 #error "bcm5700: endian not set"
357 #endif
358 } t3_status_t;
360 #define M_STATUS_UPDATED 0x00000001
361 #define M_STATUS_LINKCHNG 0x00000002
362 #define M_STATUS_ERROR 0x00000004
364 /* BCM570x Statistics Block format as a struct. Pages 112-120 */
366 typedef struct t3_stats_s {
367 uint64_t stats[L_MAC_STATS/sizeof(uint64_t)];
368 } t3_stats_t;
370 /* Encoded status transfer block size (32, 64 or 80 bytes. Page 412 */
372 #define STATUS_BLOCK_SIZE(rings) \
373 ((rings) <= 4 ? K_HCM_SBSIZE_32 : \
374 (rings) <= 12 ? K_HCM_SBSIZE_64 : \
375 K_HCM_SBSIZE_80)
377 /* End of 570X defined data structures */
379 /* The maximum supported BD ring index (QOS) for tranmit or receive. */
381 #define MAX_RI 1
384 typedef enum {
385 eth_state_uninit,
386 eth_state_off,
387 eth_state_on,
388 } eth_state_t;
390 typedef struct t3_ether_s {
391 /* status block */
392 volatile t3_status_t *status; /* should be cache-aligned */
394 /* PCI access information */
395 uint32_t regbase;
396 uint32_t membase;
397 uint8_t irq;
398 pcitag_t tag; /* tag for configuration registers */
400 uint8_t hwaddr[6];
401 uint16_t device; /* chip device code */
402 uint8_t revision; /* chip revision */
403 uint16_t asic_revision; /* mask revision */
405 eth_state_t state; /* current state */
406 uint32_t intmask; /* interrupt mask */
408 int linkspeed; /* encodings from cfe_ioctl */
410 /* packet lists */
411 queue_t freelist;
412 uint8_t *pktpool;
413 queue_t rxqueue;
415 /* rings */
416 /* For now, support only the standard Rx Producer Ring */
417 t3_rcv_bd_t *rxp_std; /* Standard Rx Producer Ring */
418 uint32_t rxp_std_index;
419 uint32_t prev_rxp_std_index;
421 /* For now, support only 1 priority */
422 uint32_t rxr_entries;
423 t3_rcv_bd_t *rxr_1; /* Rx Return Ring 1 */
424 uint32_t rxr_1_index;
425 t3_snd_bd_t *txp_1; /* Send Ring 1 */
426 uint32_t txp_1_index;
427 uint32_t txc_1_index;
429 cfe_devctx_t *devctx;
431 /* PHY access */
432 int phy_addr;
433 uint16_t phy_status;
434 uint16_t phy_ability;
435 uint16_t phy_xability;
436 uint32_t phy_vendor;
437 uint16_t phy_device;
439 /* MII polling control */
440 int phy_change;
441 int mii_polling;
443 /* statistics block */
444 volatile t3_stats_t *stats; /* should be cache-aligned */
446 /* additional driver statistics */
447 uint32_t rx_interrupts;
448 uint32_t tx_interrupts;
449 uint32_t bogus_interrupts;
451 /* SB specific fields */
452 si_t *sih;
453 uint32_t siidx;
454 uint32_t flags;
455 #define T3_RGMII_MODE 0x1
456 #define T3_SB_CORE 0x2
457 #define T3_NO_PHY 0x4
458 } t3_ether_t;
461 /* Address mapping macros */
463 #define PCI_TO_PTR(a) (PHYS_TO_K1(a))
464 #define PTR_TO_PCI(x) (K1_TO_PHYS((uint32_t)x))
467 /* Chip access macros */
469 /* These macros attempt to be compatible with match-bits mode,
470 which may put the data and byte masks into the wrong 32-bit word
471 for 64-bit accesses. See the comment above on PIOSWAP.
472 Externally mastered DMA (control and data) uses match-bits and does
473 specify word-swaps when operating big endian. */
475 /* Most registers are 32 bits wide and are accessed by 32-bit
476 transactions. The mailbox registers and on-chip RAM are 64-bits
477 wide but are generally accessed by 32-bit transactions.
478 Furthermore, the documentation is ambiguous about which 32-bits of
479 the mailbox is significant. To localize the potential confusions,
480 we define macros for the 3 different cases. */
482 #define READCSR(sc,csr) phys_read32((sc)->regbase + (csr))
483 #define WRITECSR(sc,csr,val) phys_write32((sc)->regbase + (csr), (val))
485 #if PIOSWAP
486 #define READMBOX(sc,csr) phys_read32((sc)->regbase+((csr)^4))
487 #define WRITEMBOX(sc,csr,val) phys_write32((sc)->regbase+((csr)^4), (val))
489 #define READMEM(sc,csr) phys_read32((sc)->membase+(csr))
490 #define WRITEMEM(sc,csr,val) phys_write32((sc)->membase+(csr), (val))
492 #else
493 #define READMBOX(sc,csr) phys_read32((sc)->regbase+(csr))
494 #define WRITEMBOX(sc,csr,val) phys_write32((sc)->regbase+(csr), (val))
496 #define READMEM(sc,csr) phys_read32((sc)->membase+((csr) ^ 4))
497 #define WRITEMEM(sc,csr,val) phys_write32((sc)->membase+((csr) ^ 4), (val))
499 #endif
502 /* Entry to and exit from critical sections (currently relative to
503 interrupts only, not SMP) */
505 #if CFG_INTERRUPTS
506 #define CS_ENTER(sc) cfe_disable_irq(sc->irq)
507 #define CS_EXIT(sc) cfe_enable_irq(sc->irq)
508 #else
509 #define CS_ENTER(sc) ((void)0)
510 #define CS_EXIT(sc) ((void)0)
511 #endif
514 static void
515 dumpseq(t3_ether_t *sc, int start, int next)
517 int offset, i, j;
518 int columns = 4;
519 int lines = (((next - start)/4 + 1) + 3)/columns;
520 int step = lines*4;
522 offset = start;
523 for (i = 0; i < lines; i++) {
524 xprintf("\nCSR");
525 for (j = 0; j < columns; j++) {
526 if (offset + j*step < next)
527 xprintf(" %04X: %08lX ",
528 offset+j*step, READCSR(sc, offset+j*step));
530 offset += 4;
532 xprintf("\n");
535 static void
536 dumpcsrs(t3_ether_t *sc, const char *legend)
538 xprintf("%s:\n", legend);
540 /* Some device-specific PCI configuration registers */
541 xprintf("-----PCI-----");
542 dumpseq(sc, 0x68, 0x78);
544 /* Some general control registers */
545 xprintf("---General---");
546 dumpseq(sc, 0x6800, 0x6810);
548 xprintf("-------------\n");
552 /* Memory allocation */
554 static void *
555 kmalloc_uncached( unsigned int size, unsigned int align )
557 void * ptr;
559 if ((ptr = KMALLOC(size, align)) == NULL)
560 return NULL;
562 cfe_flushcache(CFE_CACHE_FLUSH_D);
564 return (void *)UNCADDR(PHYSADDR((uint32_t)ptr));
567 static void
568 kfree_uncached( void * ptr )
570 KFREE((void *)KERNADDR(PHYSADDR((uint32_t)ptr)));
574 /* Packet management */
576 #define ETH_PKTPOOL_SIZE 64
577 #define MIN_RXP_STD_BDS 32
580 static eth_pkt_t *
581 eth_alloc_pkt(t3_ether_t *sc)
583 eth_pkt_t *pkt;
585 CS_ENTER(sc);
586 pkt = (eth_pkt_t *) q_deqnext(&sc->freelist);
587 CS_EXIT(sc);
588 if (!pkt) return NULL;
590 pkt->buffer = pkt->data;
591 pkt->length = ETH_PKTBUF_LEN;
592 pkt->flags = 0;
594 return pkt;
598 static void
599 eth_free_pkt(t3_ether_t *sc, eth_pkt_t *pkt)
601 CS_ENTER(sc);
602 q_enqueue(&sc->freelist, &pkt->next);
603 CS_EXIT(sc);
606 static void
607 eth_initfreelist(t3_ether_t *sc)
609 int idx;
610 uint8_t *ptr;
611 eth_pkt_t *pkt;
613 q_init(&sc->freelist);
615 ptr = sc->pktpool;
616 for (idx = 0; idx < ETH_PKTPOOL_SIZE; idx++) {
617 pkt = (eth_pkt_t *) ptr;
618 eth_free_pkt(sc, pkt);
619 ptr += ETH_PKTBUF_SIZE;
624 /* Utilities */
626 static const char *
627 t3_devname(t3_ether_t *sc)
629 return (sc->devctx != NULL ? cfe_device_name(sc->devctx) : "eth?");
633 /* CRCs */
635 #define IEEE_CRC32_POLY 0xEDB88320UL /* CRC-32 Poly -- either endian */
637 uint32_t eth_crc32(const uint8_t *databuf, unsigned int datalen);
638 /*static*/ uint32_t
639 eth_crc32(const uint8_t *databuf, unsigned int datalen)
641 unsigned int idx, bit, data;
642 uint32_t crc;
644 crc = 0xFFFFFFFFUL;
645 for (idx = 0; idx < datalen; idx++)
646 for (data = *databuf++, bit = 0; bit < 8; bit++, data >>= 1)
647 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? IEEE_CRC32_POLY : 0);
648 return crc;
652 /* Descriptor ring management */
654 static int
655 t3_add_rcvbuf(t3_ether_t *sc, eth_pkt_t *pkt)
657 t3_rcv_bd_t *rxp;
659 rxp = &(sc->rxp_std[sc->rxp_std_index]);
660 rxp->bufptr_lo = PTR_TO_PCI(pkt->buffer);
661 rxp->length = ETH_PKTBUF_LEN;
662 sc->rxp_std_index++;
663 if (sc->rxp_std_index == RXP_STD_ENTRIES)
664 sc->rxp_std_index = 0;
665 return 0;
668 static void
669 t3_fillrxring(t3_ether_t *sc)
671 eth_pkt_t *pkt;
672 unsigned rxp_ci, rxp_onring;
674 rxp_ci = sc->status->rxc_std_index; /* Get a snapshot */
676 if (sc->rxp_std_index >= rxp_ci)
677 rxp_onring = sc->rxp_std_index - rxp_ci;
678 else
679 rxp_onring = (sc->rxp_std_index + RXP_STD_ENTRIES) - rxp_ci;
681 while (rxp_onring < MIN_RXP_STD_BDS) {
682 pkt = eth_alloc_pkt(sc);
683 if (pkt == NULL) {
684 /* could not allocate a buffer */
685 break;
689 * Ensure that the packet memory is flushed out of the data cache
690 * before posting it to receive an incoming packet.
692 cfe_flushcache(CFE_CACHE_FLUSH_D);
694 if (t3_add_rcvbuf(sc, pkt) != 0) {
695 /* could not add buffer to ring */
696 eth_free_pkt(sc, pkt);
697 break;
699 rxp_onring++;
703 static void
704 t3_rx_callback(t3_ether_t *sc, eth_pkt_t *pkt)
706 if (T3_DEBUG) show_packet('>', pkt); /* debug */
708 CS_ENTER(sc);
709 q_enqueue(&sc->rxqueue, &pkt->next);
710 CS_EXIT(sc);
713 static void
714 t3_procrxring(t3_ether_t *sc)
716 eth_pkt_t *pkt;
717 t3_rcv_bd_t *rxc;
718 volatile t3_status_t *status = sc->status;
720 rxc = &(sc->rxr_1[sc->rxr_1_index]);
721 do {
722 pkt = ETH_PKT_BASE(PCI_TO_PTR(rxc->bufptr_lo));
723 pkt->length = rxc->length;
724 if ((rxc->flags & RX_FLAG_ERROR) == 0)
725 t3_rx_callback(sc, pkt);
726 else {
727 #if T3_BRINGUP
728 xprintf("%s: rx error %04X\n", t3_devname(sc), rxc->error_flag);
729 #endif
730 eth_free_pkt(sc, pkt); /* Could optimize */
732 sc->rxr_1_index++;
733 rxc++;
734 if (sc->rxr_1_index == sc->rxr_entries) {
735 sc->rxr_1_index = 0;
736 rxc = &(sc->rxr_1[0]);
738 } while (status->index[RI(1)].return_p != sc->rxr_1_index);
740 /* Update the return ring */
741 WRITEMBOX(sc, R_RCV_BD_RTN_CI(1), sc->rxr_1_index);
743 /* Refill the producer ring */
744 t3_fillrxring(sc);
748 static int
749 t3_transmit(t3_ether_t *sc, eth_pkt_t *pkt)
751 t3_snd_bd_t *txp;
753 if (T3_DEBUG) show_packet('<', pkt); /* debug */
756 txp = &(sc->txp_1[sc->txp_1_index]);
757 txp->bufptr_hi = 0;
758 txp->bufptr_lo = PTR_TO_PCI(pkt->buffer);
759 txp->length = pkt->length;
760 txp->flags = TX_FLAG_PACKET_END;
762 sc->txp_1_index++;
763 if (sc->txp_1_index == TXP_RING_ENTRIES)
764 sc->txp_1_index = 0;
766 WRITEMBOX(sc, R_SND_BD_PI(1), sc->txp_1_index);
768 return 0;
772 static void
773 t3_proctxring(t3_ether_t *sc)
775 eth_pkt_t *pkt;
776 t3_snd_bd_t *txc;
777 volatile t3_status_t *status = sc->status;
779 txc = &(sc->txp_1[sc->txc_1_index]);
780 do {
781 pkt = ETH_PKT_BASE(PCI_TO_PTR(txc->bufptr_lo));
782 eth_free_pkt(sc, pkt);
783 sc->txc_1_index++;
784 txc++;
785 if (sc->txc_1_index == TXP_RING_ENTRIES) {
786 sc->txc_1_index = 0;
787 txc = &(sc->txp_1[0]);
789 } while (status->index[RI(1)].send_c != sc->txc_1_index);
793 static void
794 t3_initrings(t3_ether_t *sc)
796 int i;
797 t3_rcv_bd_t *rxp;
798 volatile t3_status_t *status = sc->status;
800 /* Clear all Producer BDs */
801 rxp = &(sc->rxp_std[0]);
802 for (i = 0; i < RXP_STD_ENTRIES; i++) {
803 rxp->bufptr_hi = rxp->bufptr_lo = 0;
804 rxp->length = 0;
805 rxp->index = i;
806 rxp->flags = 0;
807 rxp->type = 0;
808 rxp->ip_cksum = rxp->tcp_cksum = 0;
809 rxp++;
812 /* Init the ring pointers */
814 sc->rxp_std_index = 0; status->rxc_std_index = 0;
815 sc->rxr_1_index = 0; status->index[RI(1)].return_p = 0;
816 sc->txp_1_index = 0; status->index[RI(1)].send_c = 0;
818 /* Allocate some initial buffers for the Producer BD ring */
819 sc->prev_rxp_std_index = 0;
820 t3_fillrxring(sc);
822 /* Nothing consumed yet */
823 sc->txc_1_index = 0;
826 static void
827 t3_init(t3_ether_t *sc)
829 /* Allocate buffer pool */
830 sc->pktpool = KMALLOC(ETH_PKTPOOL_SIZE*ETH_PKTBUF_SIZE, CACHE_ALIGN);
831 eth_initfreelist(sc);
832 q_init(&sc->rxqueue);
833 t3_initrings(sc);
836 static void
837 t3_reinit(t3_ether_t *sc)
839 eth_initfreelist(sc);
840 q_init(&sc->rxqueue);
842 t3_initrings(sc);
846 #ifdef __MIPSEB
847 /* Byte swap utilities. */
849 #define SWAP4(x) \
850 ((((x) & 0x00FF) << 24) | \
851 (((x) & 0xFF00) << 8) | \
852 (((x) >> 8) & 0xFF00) | \
853 (((x) >> 24) & 0x00FF))
855 static uint32_t
856 swap4(uint32_t x)
858 uint32_t t;
860 t = ((x & 0xFF00FF00) >> 8) | ((x & 0x00FF00FF) << 8);
861 return (t >> 16) | ((t & 0xFFFF) << 16);
863 #endif /* __MIPSEB */
866 /* EEPROM access functions (BCM5700 and BCM5701 version) */
868 /* The 570x chips support multiple access methods. We use "Auto Access",
869 which requires that
870 Miscellaneous_Local_Control.Auto_SEEPROM_Access be set,
871 Serial_EEprom.Address.HalfClock be programmed for <= 400 Hz.
872 (both done by initialization code) */
874 #define EP_MAX_RETRIES 500
875 #define EP_DEVICE_ID 0x00 /* default ATMEL device ID */
877 static void
878 eeprom_access_init(t3_ether_t *sc)
880 uint32_t mlctl;
882 if (sc->flags & T3_SB_CORE)
883 return;
885 WRITECSR(sc, R_EEPROM_ADDR, M_EPADDR_RESET | V_EPADDR_HPERIOD(0x60));
887 mlctl = READCSR(sc, R_MISC_LOCAL_CTRL);
888 mlctl |= M_MLCTL_EPAUTOACCESS;
889 WRITECSR(sc, R_MISC_LOCAL_CTRL, mlctl);
893 static uint32_t
894 eeprom_read_word(t3_ether_t *sc, unsigned int offset)
896 /* Assumes that SEEPROM is already set up for auto access. */
897 uint32_t epaddr, epdata;
898 volatile uint32_t temp;
899 int i;
901 if (sc->flags & T3_SB_CORE)
902 return 0xffffffff;
904 epaddr = READCSR(sc, R_EEPROM_ADDR);
905 epaddr &= M_EPADDR_HPERIOD;
906 epaddr |= (V_EPADDR_ADDR(offset) | V_EPADDR_DEVID(EP_DEVICE_ID)
907 | M_EPADDR_RW | M_EPADDR_START | M_EPADDR_COMPLETE);
908 WRITECSR(sc, R_EEPROM_ADDR, epaddr);
909 temp = READCSR(sc, R_EEPROM_ADDR); /* push */
911 for (i = 0; i < EP_MAX_RETRIES; i++) {
912 temp = READCSR(sc, R_EEPROM_ADDR);
913 if ((temp & M_EPADDR_COMPLETE) != 0)
914 break;
915 cfe_usleep(10);
917 if (i == EP_MAX_RETRIES)
918 xprintf("%s: eeprom_read_word: no SEEPROM response @ %x\n",
919 t3_devname(sc), offset);
921 epdata = READCSR(sc, R_EEPROM_DATA); /* little endian */
922 #ifdef __MIPSEB
923 return swap4(epdata);
924 #else
925 return epdata;
926 #endif
929 static int
930 eeprom_read_range(t3_ether_t *sc, unsigned int offset, unsigned int len,
931 uint32_t buf[])
933 int index;
935 offset &= ~3; len &= ~3; /* 4-byte words only */
936 index = 0;
938 while (len > 0) {
939 buf[index++] = eeprom_read_word(sc, offset);
940 offset += 4; len -= 4;
943 return index;
946 static void
947 eeprom_dump_range(const char *label,
948 uint32_t buf[], unsigned int offset, unsigned int len)
950 int index;
952 xprintf("EEPROM: %s", label);
954 offset &= ~3; len &= ~3; /* 4-byte words only */
955 index = 0;
957 for (index = 0; len > 0; index++) {
958 if (index % 8 == 0)
959 xprintf("\n %04x: ", offset);
960 xprintf(" %08lx", buf[offset/4]);
961 offset += 4; len -= 4;
963 xprintf("\n");
967 /* MII access functions. */
969 /* BCM5401 device specific registers */
971 #define MII_ISR 0x1A /* Interrupt Status Register */
972 #define MII_IMR 0x1B /* Interrupt Mask Register */
974 #define M_INT_LINKCHNG 0x0002
977 /* The 570x chips support multiple access methods. We use "Auto
978 Access", which requires that MDI_Control_Register.MDI_Select be
979 clear (done by initialization code) */
981 #define MII_MAX_RETRIES 5000
983 static void
984 mii_access_init(t3_ether_t *sc)
986 WRITECSR(sc, R_MDI_CTRL, 0); /* here for now */
987 #if !T3_AUTOPOLL
988 WRITECSR(sc, R_MI_MODE, V_MIMODE_CLKCNT(0x1F)); /* max divider */
989 #endif
993 static uint16_t
994 mii_read_register(t3_ether_t *sc, int phy, int index)
996 uint32_t mode;
997 uint32_t comm, val;
998 int i;
1000 mode = READCSR(sc, R_MI_MODE);
1002 comm = (V_MICOMM_CMD_RD | V_MICOMM_PHY(phy) | V_MICOMM_REG(index)
1003 | M_MICOMM_BUSY);
1004 WRITECSR(sc, R_MI_COMM, comm);
1006 for (i = 0; i < MII_MAX_RETRIES; i++) {
1007 val = READCSR(sc, R_MI_COMM);
1008 if ((val & M_MICOMM_BUSY) == 0)
1009 break;
1011 if (i == MII_MAX_RETRIES)
1012 xprintf("%s: mii_read_register: MII always busy\n", t3_devname(sc));
1015 return G_MICOMM_DATA(val);
1018 /* Register reads occasionally return spurious 0's. Verify a zero by
1019 doing a second read, or spinning when a zero is "impossible". */
1020 static uint16_t
1021 mii_read_register_v(t3_ether_t *sc, int phy, int index, int spin)
1023 uint32_t val;
1025 val = mii_read_register(sc, phy, index);
1026 if (val == 0) {
1027 do {
1028 val = mii_read_register(sc, phy, index);
1029 } while (spin && val == 0);
1031 return val;
1034 static void
1035 mii_write_register(t3_ether_t *sc, int phy, int index, uint16_t value)
1037 uint32_t mode;
1038 uint32_t comm, val;
1039 int i;
1041 mode = READCSR(sc, R_MI_MODE);
1043 comm = (V_MICOMM_CMD_WR | V_MICOMM_PHY(phy) | V_MICOMM_REG(index)
1044 | V_MICOMM_DATA(value) | M_MICOMM_BUSY);
1045 WRITECSR(sc, R_MI_COMM, comm);
1047 for (i = 0; i < MII_MAX_RETRIES; i++) {
1048 val = READCSR(sc, R_MI_COMM);
1049 if ((val & M_MICOMM_BUSY) == 0)
1050 break;
1052 if (i == MII_MAX_RETRIES)
1053 xprintf("%s: mii_write_register: MII always busy\n", t3_devname(sc));
1057 static int
1058 mii_probe(t3_ether_t *sc)
1060 #if T3_AUTOPOLL /* With autopolling, the code below is not reliable. */
1061 return 1; /* Guaranteed for integrated PHYs */
1062 #else
1063 int i;
1064 uint16_t id1, id2;
1066 for (i = 0; i < 32; i++) {
1067 id1 = mii_read_register(sc, i, MII_PHYIDR1);
1068 id2 = mii_read_register(sc, i, MII_PHYIDR2);
1069 if ((id1 != 0x0000 && id1 != 0xFFFF) ||
1070 (id2 != 0x0000 && id2 != 0xFFFF)) {
1071 if (id1 != id2) return i;
1074 return -1;
1075 #endif
1078 static uint16_t
1079 mii_read_shadow_register(t3_ether_t *sc, int index, int shadow_addr)
1081 uint16_t val;
1083 #if T3_DEBUG
1084 xprintf("\nmii_read_shadow_register: reg=0x%X shadow=0x%X\n", index, shadow_addr);
1085 #endif
1087 /* write to the shadow register first with the correct shadow address and write disabled */
1088 mii_write_register(sc, sc->phy_addr, index, (shadow_addr & ~SHDW_WR_EN) );
1090 /* read from the shadow register */
1091 val = mii_read_register(sc, sc->phy_addr, index);
1093 #if T3_DEBUG
1094 xprintf("mii_read_shadow_register: reg=0x%X shadow=0x%X value=0x%X\n", index, shadow_addr, val);
1095 #endif
1097 return(val);
1100 static void
1101 mii_write_shadow_register(t3_ether_t *sc, int index, int shadow_val)
1103 uint16_t val;
1105 #if T3_DEBUG
1106 xprintf("\nmii_write_shadow_register: reg=0x%X shadow=0x%X\n", index, (shadow_val | SHDW_WR_EN) );
1107 #endif
1109 /* write to the shadow register first with the correct shadow address and write enabled */
1110 mii_write_register(sc, sc->phy_addr, index, (shadow_val | SHDW_WR_EN));
1112 /* read from the shadow register */
1113 val = mii_read_shadow_register(sc, index, shadow_val);
1115 #if T3_DEBUG
1116 xprintf("mii_write_shadow_register: reg=0x%X shadow=0x%X val=0x%X\n", index, shadow_val, val);
1117 #endif
1120 #if T3_DEBUG
1121 #define OUI_BCM 0x001018
1122 #define IDR_BCM 0x000818
1123 /* 5400: 4, 5401: 5, 5411: 6, 5421: e, 5701: 11 */
1125 static void
1126 mii_dump(t3_ether_t *sc, const char *label)
1128 int i;
1129 uint16_t r;
1130 uint32_t idr, part;
1132 xprintf("%s, MII:\n", label);
1133 idr = part = 0;
1135 /* Required registers */
1136 for (i = 0x0; i <= 0x6; ++i) {
1137 r = mii_read_register(sc, sc->phy_addr, i);
1138 xprintf(" REG%02X: %04X", i, r);
1139 if (i == 3 || i == 6)
1140 xprintf("\n");
1141 if (i == MII_PHYIDR1) {
1142 idr |= r << 6;
1144 else if (i == MII_PHYIDR2) {
1145 idr |= (r >> 10) & 0x3F;
1146 part = (r >> 4) & 0x3F;
1150 /* GMII extensions */
1151 for (i = 0x9; i <= 0xA; ++i) {
1152 r = mii_read_register(sc, sc->phy_addr, i);
1153 xprintf(" REG%02X: %04X", i, r);
1155 r = mii_read_register(sc, sc->phy_addr, 0xF);
1156 xprintf(" REG%02X: %04X\n", 0xF, r);
1158 /* Broadcom extensions (54xx family) */
1159 if (idr == IDR_BCM) {
1160 for (i = 0x10; i <= 0x14; i++) {
1161 r = mii_read_register(sc, sc->phy_addr, i);
1162 xprintf(" REG%02X: %04X", i, r);
1164 xprintf("\n");
1165 for (i = 0x18; i <= 0x1A; i++) {
1166 r = mii_read_register(sc, sc->phy_addr, i);
1167 xprintf(" REG%02X: %04X", i, r);
1169 xprintf("\n");
1172 #else
1173 #define mii_dump(sc,label)
1174 #endif
1176 static void
1177 mii_enable_interrupts(t3_ether_t *sc)
1179 mii_write_register(sc, sc->phy_addr, MII_IMR, ~M_INT_LINKCHNG);
1183 /* For 5700/5701, LINKCHNG is read-only in the status register and
1184 cleared by writing to CFGCHNG | SYNCCHNG. For the 5705
1185 (empirically), LINKCHNG is cleared by writing a one, while CFGCHNG
1186 and SYNCCHNG are unimplemented. Thus we can safely clear the
1187 interrupt by writing ones to all the above bits. */
1189 #define M_LINKCHNG_CLR \
1190 (M_EVT_LINKCHNG | M_MACSTAT_CFGCHNG | M_MACSTAT_SYNCCHNG)
1192 static int
1193 mii_poll(t3_ether_t *sc)
1195 uint32_t macstat;
1196 uint16_t status, ability, xability;
1197 uint16_t isr;
1199 macstat = READCSR(sc, R_MAC_STATUS);
1200 if ((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0)
1201 WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
1203 /* BMSR has read-to-clear bits; read twice. */
1205 status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1206 status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1207 ability = mii_read_register_v(sc, sc->phy_addr, MII_ANLPAR, 0);
1208 if (status & BMSR_1000BT_XSR)
1209 xability = mii_read_register_v(sc, sc->phy_addr, MII_K1STSR, 0);
1210 else
1211 xability = 0;
1212 isr = mii_read_register(sc, sc->phy_addr, MII_ISR);
1214 if (status != sc->phy_status
1215 || ability != sc->phy_ability || xability != sc->phy_xability) {
1216 #if T3_DEBUG
1217 xprintf("[%04x]", isr);
1218 xprintf((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0 ? "+" : "-");
1220 if (status != sc->phy_status)
1221 xprintf(" ST: %04x %04x", sc->phy_status, status);
1222 if (ability != sc->phy_ability)
1223 xprintf(" AB: %04x %04x", sc->phy_ability, ability);
1224 if (xability != sc->phy_xability)
1225 xprintf(" XA: %04x %04x", sc->phy_xability, xability);
1226 xprintf("\n");
1227 #endif
1228 sc->phy_status = status;
1229 sc->phy_ability = ability;
1230 sc->phy_xability = xability;
1231 return 1;
1233 else if ((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0) {
1234 isr = mii_read_register(sc, sc->phy_addr, MII_ISR);
1236 return 0;
1239 static void
1240 mii_set_speed(t3_ether_t *sc, int speed)
1242 uint16_t control;
1244 control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
1246 control &= ~(BMCR_ANENABLE | BMCR_RESTARTAN);
1247 mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
1248 control &= ~(BMCR_SPEED0 | BMCR_SPEED1 | BMCR_DUPLEX);
1250 switch (speed) {
1251 case ETHER_SPEED_10HDX:
1252 default:
1253 break;
1254 case ETHER_SPEED_10FDX:
1255 control |= BMCR_DUPLEX;
1256 break;
1257 case ETHER_SPEED_100HDX:
1258 control |= BMCR_SPEED100;
1259 break;
1260 case ETHER_SPEED_100FDX:
1261 control |= BMCR_SPEED100 | BMCR_DUPLEX ;
1262 break;
1265 mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
1268 static void
1269 mii_autonegotiate(t3_ether_t *sc)
1271 uint16_t control, status, remote, xremote;
1272 unsigned int timeout;
1273 int linkspeed;
1274 uint32_t mode, ledCtrl;
1276 linkspeed = ETHER_SPEED_UNKNOWN;
1278 /* Read twice to clear latching bits */
1279 status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1280 status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1281 mii_dump(sc, "query PHY");
1283 if ((status & (BMSR_AUTONEG | BMSR_LINKSTAT)) ==
1284 (BMSR_AUTONEG | BMSR_LINKSTAT))
1285 control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
1286 else {
1287 for (timeout = 4*CFE_HZ; timeout > 0; timeout -= CFE_HZ/2) {
1288 status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1289 if ((status & BMSR_ANCOMPLETE) != 0)
1290 break;
1291 cfe_sleep(CFE_HZ/2);
1295 remote = mii_read_register_v(sc, sc->phy_addr, MII_ANLPAR, 0);
1297 mode = READCSR(sc, R_MAC_MODE);
1299 xprintf("%s: Link speed: ", t3_devname(sc));
1300 if ((status & BMSR_ANCOMPLETE) != 0) {
1301 /* A link partner was negogiated... */
1303 if (status & BMSR_1000BT_XSR)
1304 xremote = mii_read_register_v(sc, sc->phy_addr, MII_K1STSR, 0);
1305 else
1306 xremote = 0;
1308 mode &= ~(M_MACM_PORTMODE | M_MACM_HALFDUPLEX);
1310 if ((xremote & K1STSR_LP1KFD) != 0) {
1311 xprintf("1000BaseT FDX\n");
1312 linkspeed = ETHER_SPEED_1000FDX;
1313 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII);
1315 else if ((xremote & K1STSR_LP1KHD) != 0) {
1316 xprintf("1000BaseT HDX\n");
1317 linkspeed = ETHER_SPEED_1000HDX;
1318 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII) | M_MACM_HALFDUPLEX;
1320 else if ((remote & ANLPAR_TXFD) != 0) {
1321 xprintf("100BaseT FDX\n");
1322 linkspeed = ETHER_SPEED_100FDX;
1323 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1325 else if ((remote & ANLPAR_TXHD) != 0) {
1326 xprintf("100BaseT HDX\n");
1327 linkspeed = ETHER_SPEED_100HDX;
1328 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1330 else if ((remote & ANLPAR_10FD) != 0) {
1331 xprintf("10BaseT FDX\n");
1332 linkspeed = ETHER_SPEED_10FDX;
1333 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1335 else if ((remote & ANLPAR_10HD) != 0) {
1336 xprintf("10BaseT HDX\n");
1337 linkspeed = ETHER_SPEED_10HDX;
1338 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1341 /* In order for the 5750 core in BCM4785 chip to work properly
1342 * in RGMII mode, the Led Control Register must be set up.
1344 if ((sc->flags & (T3_SB_CORE | T3_RGMII_MODE)) == (T3_SB_CORE | T3_RGMII_MODE)) {
1345 ledCtrl = READCSR(sc, R_MAC_LED_CTRL);
1346 ledCtrl &= ~(M_LEDCTRL_1000MBPS | M_LEDCTRL_100MBPS);
1348 if((linkspeed == ETHER_SPEED_10FDX) || (linkspeed == ETHER_SPEED_10HDX))
1349 ledCtrl |= M_LEDCTRL_OVERRIDE;
1350 else if ((linkspeed == ETHER_SPEED_100FDX) || (linkspeed == ETHER_SPEED_100HDX))
1351 ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_100MBPS);
1352 else /* 1000MBPS */
1353 ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_1000MBPS);
1355 WRITECSR(sc, R_MAC_LED_CTRL, ledCtrl);
1357 cfe_usleep(40);;
1360 WRITECSR(sc, R_MAC_MODE, mode);
1362 else {
1363 /* no link partner convergence */
1364 xprintf("Unknown\n");
1365 linkspeed = ETHER_SPEED_UNKNOWN;
1366 remote = xremote = 0;
1368 /* If 5750 core in RGMII mode, set the speed to 1000 Mbps */
1369 if ((sc->flags & (T3_SB_CORE | T3_RGMII_MODE)) == (T3_SB_CORE | T3_RGMII_MODE)) {
1370 ledCtrl = READCSR(sc, R_MAC_LED_CTRL);
1371 ledCtrl &= ~(M_LEDCTRL_1000MBPS | M_LEDCTRL_100MBPS);
1372 ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_1000MBPS);
1373 WRITECSR(sc, R_MAC_LED_CTRL, ledCtrl);
1374 cfe_usleep(40);
1376 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII);
1377 WRITECSR(sc, R_MAC_MODE, mode);
1380 sc->linkspeed = linkspeed;
1382 status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1383 (void)mii_read_register(sc, sc->phy_addr, MII_ISR);
1385 sc->phy_status = status;
1386 sc->phy_ability = remote;
1387 sc->phy_xability = xremote;
1389 mii_dump(sc, "final PHY");
1392 static void
1393 t3_force_speed(t3_ether_t *sc, int linkspeed)
1395 uint32_t mode, ledCtrl;
1398 mode = READCSR(sc, R_MAC_MODE);
1399 mode &= ~(M_MACM_PORTMODE | M_MACM_HALFDUPLEX);
1401 xprintf("%s: Link speed: ", t3_devname(sc));
1403 switch (linkspeed)
1405 case ETHER_SPEED_1000FDX:
1406 xprintf("1000BaseT FDX\n");
1407 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII);
1408 break;
1409 case ETHER_SPEED_1000HDX:
1410 xprintf("1000BaseT HDX\n");
1411 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII) | M_MACM_HALFDUPLEX;
1412 break;
1413 case ETHER_SPEED_100FDX:
1414 xprintf("100BaseT FDX\n");
1415 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1416 break;
1417 case ETHER_SPEED_100HDX:
1418 xprintf("100BaseT HDX\n");
1419 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1420 break;
1421 case ETHER_SPEED_10FDX:
1422 xprintf("10BaseT FDX\n");
1423 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1424 break;
1425 case ETHER_SPEED_10HDX:
1426 xprintf("10BaseT HDX\n");
1427 mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1428 break;
1429 default:
1430 xprintf("Unknown\n");
1431 break;
1434 /* In order for the 5750 core in BCM4785 chip to work properly
1435 * in RGMII mode, the Led Control Register must be set up.
1437 if ((sc->flags & (T3_SB_CORE | T3_RGMII_MODE)) == (T3_SB_CORE | T3_RGMII_MODE)) {
1438 ledCtrl = READCSR(sc, R_MAC_LED_CTRL);
1439 ledCtrl &= ~(M_LEDCTRL_1000MBPS | M_LEDCTRL_100MBPS);
1441 if((linkspeed == ETHER_SPEED_10FDX) || (linkspeed == ETHER_SPEED_10HDX))
1442 ledCtrl |= M_LEDCTRL_OVERRIDE;
1443 else if ((linkspeed == ETHER_SPEED_100FDX) || (linkspeed == ETHER_SPEED_100HDX))
1444 ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_100MBPS);
1445 else /* 1000MBPS */
1446 ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_1000MBPS);
1448 WRITECSR(sc, R_MAC_LED_CTRL, ledCtrl);
1450 cfe_usleep(40);
1453 WRITECSR(sc, R_MAC_MODE, mode);
1455 sc->linkspeed = linkspeed;
1456 sc->phy_status = 0;
1457 sc->phy_ability = 0;
1458 sc->phy_xability = 0;
1461 static void
1462 t3_clear(t3_ether_t *sc, unsigned reg, uint32_t mask)
1464 uint32_t val;
1465 int timeout;
1467 val = READCSR(sc, reg);
1468 val &= ~mask;
1469 WRITECSR(sc, reg, val);
1470 val = READCSR(sc, reg);
1472 for (timeout = 4000; (val & mask) != 0 && timeout > 0; timeout -= 100) {
1473 cfe_usleep(100);
1474 val = READCSR(sc, reg);
1476 if (timeout <= 0)
1477 xprintf("%s: cannot clear %04X/%08X\n", t3_devname(sc), reg, (unsigned int)mask);
1481 /* The following functions collectively implement the recommended
1482 BCM5700 Initialization Procedure (Section 8: Device Control) */
1484 static int
1485 t3_coldreset(t3_ether_t *sc)
1487 pcireg_t cmd;
1488 pcireg_t bhlc, subsysid;
1489 pcireg_t bar0, bar1;
1490 pcireg_t cmdx;
1491 uint32_t mhc, mcr, mcfg;
1492 uint32_t mode;
1493 int timeout;
1494 uint32_t magic;
1496 /* Steps 1-18 */
1497 /* Enable memory, also clear R/WC status bits (1) */
1498 cmd = pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);
1499 cmd |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1500 cmd &= ~PCI_COMMAND_PARITY_ENABLE;
1501 cmd &= ~PCI_COMMAND_SERR_ENABLE;
1502 pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1504 /* Clear and disable INTA output. (2) */
1505 mhc = READCSR(sc, R_MISC_HOST_CTRL);
1506 mhc |= M_MHC_MASKPCIINT | M_MHC_CLEARINTA;
1507 WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1509 /* Save some config registers modified by core clock reset (3). */
1510 bhlc = pci_conf_read(sc->tag, PCI_BHLC_REG);
1511 subsysid = pci_conf_read(sc->tag, PCI_SUBSYS_ID_REG);
1512 /* Empirically, these are clobbered too. */
1513 bar0 = pci_conf_read(sc->tag, PCI_MAPREG(0));
1514 bar1 = pci_conf_read(sc->tag, PCI_MAPREG(1));
1516 /* Reset the core clocks (4, 5). */
1517 mcfg = READCSR(sc, R_MISC_CFG);
1518 mcfg |= M_MCFG_CORERESET;
1519 WRITECSR(sc, R_MISC_CFG, mcfg);
1520 cfe_usleep(100); /* 100 usec delay */
1522 /* NB: Until the BARs are restored and reenabled, only PCI
1523 configuration reads and writes will succeed. */
1525 /* Reenable MAC memory (7) */
1526 pci_conf_write(sc->tag, PCI_MAPREG(0), bar0);
1527 pci_conf_write(sc->tag, PCI_MAPREG(1), bar1);
1528 (void)pci_conf_read(sc->tag, PCI_MAPREG(1)); /* push */
1529 pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1530 (void)pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG); /* push */
1532 /* Undo some of the resets (6) */
1533 mhc = READCSR(sc, R_MISC_HOST_CTRL);
1534 mhc |= M_MHC_MASKPCIINT;
1535 WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1537 /* Verify that core clock resets completed and autocleared. */
1538 mcfg = READCSR(sc, R_MISC_CFG);
1539 if ((mcfg & M_MCFG_CORERESET) != 0) {
1540 xprintf("bcm5700: core clocks stuck in reset\n");
1543 /* Configure PCI-X (8) */
1544 if (!(sc->device == K_PCI_ID_BCM5705 || sc->device == K_PCI_ID_BCM5750)) {
1545 cmdx = pci_conf_read(sc->tag, PCI_PCIX_CMD_REG);
1546 cmdx &= ~PCIX_CMD_RLXORDER_ENABLE;
1547 pci_conf_write(sc->tag, PCI_PCIX_CMD_REG, cmdx);
1550 if (sc->flags & T3_SB_CORE) {
1551 #define HALT_CPU 0x400
1552 int ndx;
1554 /* Bang on halt request until it sticks */
1555 for (ndx = 0; ndx < 10000; ++ndx) {
1556 WRITECSR(sc, R_RX_RISC_STATE, 0xffffffff);
1557 WRITECSR(sc, R_RX_RISC_MODE, HALT_CPU);
1559 if ((READCSR(sc, R_RX_RISC_MODE) & HALT_CPU) == HALT_CPU)
1560 break;
1563 /* One more time */
1564 WRITECSR(sc, R_RX_RISC_STATE, 0xffffffff);
1565 WRITECSR(sc, R_RX_RISC_MODE, HALT_CPU);
1566 (void)READCSR(sc, R_RX_RISC_MODE);
1568 cfe_usleep(10);
1570 #undef HALT_CPU
1573 /* Enable memory arbiter (9) */
1574 mode = READCSR(sc, R_MEM_MODE);
1575 mode |= M_MAM_ENABLE; /* enable memory arbiter */
1576 WRITECSR(sc, R_MEM_MODE, mode);
1578 /* Assume no external SRAM for now (10) */
1580 /* Set up MHC for endianness and write enables (11-15) */
1581 mhc = READCSR(sc, R_MISC_HOST_CTRL);
1582 /* Since we use match-bits for Direct PCI access, don't swap bytes. */
1583 mhc &= ~M_MHC_ENBYTESWAP;
1584 #ifdef __MIPSEL
1585 mhc |= M_MHC_ENWORDSWAP;
1586 #endif
1587 #ifdef __MIPSEB
1588 #if PIOSWAP
1589 mhc |= M_MHC_ENWORDSWAP;
1590 #endif
1591 #endif
1592 mhc |= M_MHC_ENINDIRECT | M_MHC_ENPCISTATERW | M_MHC_ENCLKCTRLRW;
1593 WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1595 /* Set byte swapping (16, 17) */
1596 mcr = READCSR(sc, R_MODE_CTRL);
1597 #ifdef __MIPSEL
1598 mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1599 mcr |= M_MCTL_WSWAPCTRL;
1600 #endif
1601 #ifdef __MIPSEB
1602 #if MATCH_BYTES
1603 mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1604 mcr |= M_MCTL_BSWAPCTRL | M_MCTL_WSWAPCTRL;
1605 #else
1606 mcr &= ~(M_MCTL_BSWAPCTRL | M_MCTL_BSWAPDATA);
1607 mcr |= M_MCTL_WSWAPCTRL | M_MCTL_WSWAPDATA;
1608 #endif
1609 #endif
1610 WRITECSR(sc, R_MODE_CTRL, mcr);
1612 /* no firmware in BCM4785 */
1613 if (!(sc->flags & T3_SB_CORE)) {
1614 /* Disable PXE restart, wait for firmware (18, 19) */
1615 for (timeout = 2 * CFE_HZ; timeout > 0; timeout--) {
1616 WRITECSR(sc, R_MEMWIN_BASE_ADDR, A_PXE_MAILBOX);
1617 magic = READCSR(sc, R_MEMWIN_DATA);
1619 if (magic == ~T3_MAGIC_NUMBER)
1620 break;
1622 cfe_sleep(1);
1625 if (timeout == 0)
1626 xprintf("bcm5700: no firmware rendevous\n");
1629 WRITECSR(sc, R_MEMWIN_BASE_ADDR, 0); /* restore default memory window */
1632 /* Clear Ethernet MAC Mode (20) */
1633 WRITECSR(sc, R_MAC_MODE, 0x00000000);
1634 (void)READCSR(sc, R_MAC_MODE);
1635 cfe_usleep(40);
1637 /* Restore remaining config registers (21) */
1638 pci_conf_write(sc->tag, PCI_BHLC_REG, bhlc);
1639 pci_conf_write(sc->tag, PCI_SUBSYS_ID_REG, subsysid);
1641 return(0);
1644 static int
1645 t3_warmreset(t3_ether_t *sc)
1647 uint32_t mode;
1649 /* Enable memory arbiter (9) */
1650 mode = READCSR(sc, R_MEM_MODE);
1651 mode |= M_MAM_ENABLE; /* enable memory arbiter */
1652 WRITECSR(sc, R_MEM_MODE, mode);
1654 /* Clear Ethernet MAC Mode (20) */
1655 WRITECSR(sc, R_MAC_MODE, 0x00000000);
1657 return 0;
1661 static int
1662 t3_init_registers(t3_ether_t *sc)
1664 unsigned offset;
1665 uint32_t dmac, mcr, mcfg;
1667 /* Steps 22-29 */
1669 /* Clear MAC statistics block (22) */
1670 if(!(sc->device == K_PCI_ID_BCM5705 ||
1671 sc->device == K_PCI_ID_BCM5750)) {
1672 for (offset = A_MAC_STATS; offset < A_MAC_STATS+L_MAC_STATS; offset += 4) {
1673 WRITEMEM(sc, offset, 0);
1677 /* Clear driver status memory region (23) */
1678 /* ASSERT (sizeof(t3_status_t) == L_MAC_STATUS) */
1679 memset((uint8_t *)sc->status, 0, sizeof(t3_status_t));
1681 /* Set up PCI DMA control (24) */
1682 dmac = READCSR(sc, R_DMA_RW_CTRL);
1683 dmac &= ~(M_DMAC_RDCMD | M_DMAC_WRCMD | M_DMAC_MINDMA);
1684 dmac |= V_DMAC_RDCMD(K_PCI_MEMRD) | V_DMAC_WRCMD(K_PCI_MEMWR);
1685 switch (sc->device) {
1686 case K_PCI_ID_BCM5700:
1687 case K_PCI_ID_BCM5701:
1688 case K_PCI_ID_BCM5702:
1689 dmac |= V_DMAC_MINDMA(0xF); /* "Recommended" */
1690 break;
1691 default:
1692 dmac |= V_DMAC_MINDMA(0x0);
1693 break;
1695 if (sc->flags & T3_SB_CORE) {
1696 if ((sc->sih->chip == BCM4785_CHIP_ID) && (sc->sih->chiprev < 2))
1697 dmac |= V_DMAC_ONEDMA(1);
1699 WRITECSR(sc, R_DMA_RW_CTRL, dmac);
1701 mcr = READCSR(sc, R_MODE_CTRL);
1702 #ifdef __MIPSEL
1703 mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1704 mcr |= M_MCTL_WSWAPCTRL;
1705 #endif
1706 #ifdef __MIPSEB
1707 #if MATCH_BYTES
1708 mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1709 mcr |= M_MCTL_BSWAPCTRL | M_MCTL_WSWAPCTRL;
1710 #else
1711 mcr &= ~(M_MCTL_BSWAPCTRL | M_MCTL_BSWAPDATA);
1712 mcr |= M_MCTL_WSWAPCTRL | M_MCTL_WSWAPDATA;
1713 #endif
1714 #endif
1715 WRITECSR(sc, R_MODE_CTRL, mcr);
1717 /* Configure host rings (26) */
1718 mcr |= M_MCTL_HOSTBDS;
1719 WRITECSR(sc, R_MODE_CTRL, mcr);
1721 /* Indicate driver ready, disable checksums (27, 28) */
1722 mcr |= M_MCTL_HOSTUP;
1723 mcr |= (M_MCTL_NOTXPHSUM | M_MCTL_NORXPHSUM);
1724 WRITECSR(sc, R_MODE_CTRL, mcr);
1726 /* Configure timer (29) */
1727 mcfg = READCSR(sc, R_MISC_CFG);
1728 mcfg &= ~M_MCFG_PRESCALER;
1729 mcfg |= V_MCFG_PRESCALER(66-1); /* 66 MHz */
1730 WRITECSR(sc, R_MISC_CFG, mcfg);
1732 return 0;
1735 static int
1736 t3_init_pools(t3_ether_t *sc)
1738 uint32_t mode;
1739 int timeout;
1741 /* Steps 30-36. These use "recommended" settings (p 150) */
1743 /* Configure the MAC memory pool (30) */
1744 if(!(sc->device == K_PCI_ID_BCM5705 ||
1745 sc->device == K_PCI_ID_BCM5750))
1747 WRITECSR(sc, R_BMGR_MBUF_BASE, A_BUFFER_POOL);
1748 WRITECSR(sc, R_BMGR_MBUF_LEN, L_BUFFER_POOL);
1750 else
1752 /* Note: manual appears to recommend not even writing these (?) */
1753 /* WRITECSR(sc, R_BMGR_MBUF_BASE, A_RXMBUF); */
1754 /* WRITECSR(sc, R_BMGR_MBUF_LEN, 0x8000); */
1757 /* Configure the MAC DMA resource pool (31) */
1758 WRITECSR(sc, R_BMGR_DMA_BASE, A_DMA_DESCS);
1759 WRITECSR(sc, R_BMGR_DMA_LEN, L_DMA_DESCS);
1761 /* Configure the MAC memory watermarks (32) */
1762 if(sc->device == K_PCI_ID_BCM5705 ||
1763 sc->device == K_PCI_ID_BCM5750)
1765 WRITECSR(sc, R_BMGR_MBUF_DMA_LOW, 0x0);
1766 WRITECSR(sc, R_BMGR_MBUF_RX_LOW, 0x10);
1767 WRITECSR(sc, R_BMGR_MBUF_HIGH, 0x60);
1769 else
1771 WRITECSR(sc, R_BMGR_MBUF_DMA_LOW, 0x50);
1772 WRITECSR(sc, R_BMGR_MBUF_RX_LOW, 0x20);
1773 WRITECSR(sc, R_BMGR_MBUF_HIGH, 0x60);
1776 /* Configure the DMA resource watermarks (33) */
1777 WRITECSR(sc, R_BMGR_DMA_LOW, 5);
1778 WRITECSR(sc, R_BMGR_DMA_HIGH, 10);
1780 /* Enable the buffer manager (34, 35) */
1781 mode = READCSR(sc, R_BMGR_MODE);
1782 mode |= (M_BMODE_ENABLE | M_BMODE_MBUFLOWATTN);
1783 WRITECSR(sc, R_BMGR_MODE, mode);
1784 for (timeout = CFE_HZ/2; timeout > 0; timeout -= CFE_HZ/10) {
1785 mode = READCSR(sc, R_BMGR_MODE);
1786 if ((mode & M_BMODE_ENABLE) != 0)
1787 break;
1788 cfe_sleep(CFE_HZ/10);
1790 if ((mode & M_BMODE_ENABLE) == 0)
1791 xprintf("bcm5700: buffer manager not enabled\n");
1793 /* Enable internal queues (36) */
1794 WRITECSR(sc, R_FTQ_RESET, 0xFFFFFFFF);
1795 cfe_sleep(1);
1796 WRITECSR(sc, R_FTQ_RESET, 0x00000000);
1798 return(0);
1801 static int
1802 t3_init_rings(t3_ether_t *sc)
1804 unsigned rcbp;
1805 int i;
1807 /* Steps 37-46 */
1809 /* Initialize RCBs for Standard Receive Buffer Ring (37) */
1810 WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_HOST_ADDR_HIGH, 0);
1811 WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->rxp_std));
1812 WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_NIC_ADDR, A_STD_RCV_RINGS);
1813 if(sc->device == K_PCI_ID_BCM5705 ||
1814 sc->device == K_PCI_ID_BCM5750)
1816 WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_CTRL, V_RCB_MAXLEN(512));
1818 else
1820 WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_CTRL, V_RCB_MAXLEN(ETH_PKTBUF_LEN));
1823 /* Disable RCBs for Jumbo and Mini Receive Buffer Rings (38,39) */
1824 if(!(sc->device == K_PCI_ID_BCM5705 ||
1825 sc->device == K_PCI_ID_BCM5750))
1827 WRITECSR(sc, R_JUMBO_RCV_BD_RCB+RCB_CTRL,
1828 RCB_FLAG_USE_EXT_RCV_BD | RCB_FLAG_RING_DISABLED);
1829 WRITECSR(sc, R_MINI_RCV_BD_RCB+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1832 /* Set BD ring replenish thresholds (40) */
1833 WRITECSR(sc, R_MINI_RCV_BD_THRESH, 128);
1834 #if T3_BRINGUP
1835 WRITECSR(sc, R_STD_RCV_BD_THRESH, 1);
1836 #else
1837 WRITECSR(sc, R_STD_RCV_BD_THRESH, 25);
1838 #endif
1839 WRITECSR(sc, R_JUMBO_RCV_BD_THRESH, 16);
1841 /* Disable all send producer rings (41) */
1842 if(!(sc->device == K_PCI_ID_BCM5705 ||
1843 sc->device == K_PCI_ID_BCM5750))
1845 for (rcbp = A_SND_RCB(1); rcbp <= A_SND_RCB(16); rcbp += RCB_SIZE)
1846 WRITEMEM(sc, rcbp+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1849 /* Initialize send producer index registers (42) */
1850 for (i = 1; i <= TXP_MAX_RINGS; i++) {
1851 WRITEMBOX(sc, R_SND_BD_PI(i), 0);
1852 WRITEMBOX(sc, R_SND_BD_NIC_PI(i), 0);
1855 /* Initialize send producer ring 1 (43) */
1856 WRITEMEM(sc, A_SND_RCB(1)+RCB_HOST_ADDR_HIGH, 0);
1857 WRITEMEM(sc, A_SND_RCB(1)+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->txp_1));
1858 WRITEMEM(sc, A_SND_RCB(1)+RCB_CTRL, V_RCB_MAXLEN(TXP_RING_ENTRIES));
1859 if (!(sc->device == K_PCI_ID_BCM5705 ||
1860 sc->device == K_PCI_ID_BCM5750))
1861 WRITEMEM(sc, A_SND_RCB(1)+RCB_NIC_ADDR, A_SND_RINGS);
1863 /* Disable unused receive return rings (44) */
1864 for (rcbp = A_RTN_RCB(1); rcbp <= A_RTN_RCB(16); rcbp += RCB_SIZE)
1865 WRITEMEM(sc, rcbp+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1867 /* Initialize receive return ring 1 (45) */
1868 WRITEMEM(sc, A_RTN_RCB(1)+RCB_HOST_ADDR_HIGH, 0);
1869 WRITEMEM(sc, A_RTN_RCB(1)+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->rxr_1));
1870 WRITEMEM(sc, A_RTN_RCB(1)+RCB_CTRL, V_RCB_MAXLEN(sc->rxr_entries));
1871 WRITEMEM(sc, A_RTN_RCB(1)+RCB_NIC_ADDR, 0x0000);
1873 /* Initialize receive producer ring mailboxes (46) */
1874 WRITEMBOX(sc, R_RCV_BD_STD_PI, 0);
1875 WRITEMBOX(sc, R_RCV_BD_JUMBO_PI, 0);
1876 WRITEMBOX(sc, R_RCV_BD_MINI_PI, 0);
1878 return(0);
1881 static int
1882 t3_configure_mac(t3_ether_t *sc)
1884 uint32_t low, high;
1885 uint32_t seed;
1886 int i;
1888 /* Steps 47-52 */
1890 /* Configure the MAC unicast address (47) */
1891 high = (sc->hwaddr[0] << 8) | (sc->hwaddr[1]);
1892 low = ((sc->hwaddr[2] << 24) | (sc->hwaddr[3] << 16)
1893 | (sc->hwaddr[4] << 8) | sc->hwaddr[5]);
1894 /* For now, use a single MAC address */
1895 WRITECSR(sc, R_MAC_ADDR1_HIGH, high); WRITECSR(sc, R_MAC_ADDR1_LOW, low);
1896 WRITECSR(sc, R_MAC_ADDR2_HIGH, high); WRITECSR(sc, R_MAC_ADDR2_LOW, low);
1897 WRITECSR(sc, R_MAC_ADDR3_HIGH, high); WRITECSR(sc, R_MAC_ADDR3_LOW, low);
1898 WRITECSR(sc, R_MAC_ADDR4_HIGH, high); WRITECSR(sc, R_MAC_ADDR4_LOW, low);
1900 /* Configure the random backoff seed (48) */
1901 seed = 0;
1902 for (i = 0; i < 6; i++)
1903 seed += sc->hwaddr[i];
1904 seed &= 0x3FF;
1905 WRITECSR(sc, R_TX_BACKOFF, seed);
1907 /* Configure the MTU (49) */
1908 WRITECSR(sc, R_RX_MTU, MAX_ETHER_PACK+VLAN_TAG_LEN);
1910 /* Configure the tx IPG (50) */
1911 WRITECSR(sc, R_TX_LENS,
1912 V_TXLEN_SLOT(0x20) | V_TXLEN_IPG(0x6) | V_TXLEN_IPGCRS(0x2));
1914 /* Configure the default rx return ring 1 (51) */
1915 WRITECSR(sc, R_RX_RULES_CFG, V_RULESCFG_DEFAULT(1));
1917 /* Configure the receive lists and enable statistics (52) */
1918 WRITECSR(sc, R_RCV_LIST_CFG,
1919 V_LISTCFG_GROUP(1) | V_LISTCFG_ACTIVE(1) | V_LISTCFG_BAD(1));
1920 /* was V_LISTCFG_DEFAULT(1) | V_LISTCFG_ACTIVE(16) | V_LISTCFG_BAD(1) */
1922 return 0;
1925 static int
1926 t3_enable_stats(t3_ether_t *sc)
1928 uint32_t ctrl;
1930 /* Steps 53-56 */
1932 /* Enable rx stats (53,54) */
1933 WRITECSR(sc, R_RCV_LIST_STATS_ENB, 0xFFFFFF);
1934 ctrl = READCSR(sc, R_RCV_LIST_STATS_CTRL);
1935 ctrl |= M_STATS_ENABLE;
1936 WRITECSR(sc, R_RCV_LIST_STATS_CTRL, ctrl);
1938 /* Enable tx stats (55,56) */
1939 WRITECSR(sc, R_SND_DATA_STATS_ENB, 0xFFFFFF);
1940 ctrl = READCSR(sc, R_SND_DATA_STATS_CTRL);
1941 ctrl |= (M_STATS_ENABLE | M_STATS_FASTUPDATE);
1942 WRITECSR(sc, R_SND_DATA_STATS_CTRL, ctrl);
1944 return 0;
1947 static int
1948 t3_init_coalescing(t3_ether_t *sc)
1950 uint32_t mode = 0;
1951 int timeout;
1953 /* Steps 57-68 */
1955 /* Disable the host coalescing engine (57, 58) */
1956 WRITECSR(sc, R_HOST_COAL_MODE, 0);
1957 for (timeout = CFE_HZ/2; timeout > 0; timeout -= CFE_HZ/10) {
1958 mode = READCSR(sc, R_HOST_COAL_MODE);
1959 if (mode == 0)
1960 break;
1961 cfe_sleep(CFE_HZ/10);
1963 if (mode != 0)
1964 xprintf("bcm5700: coalescing engine not disabled\n");
1966 /* Set coalescing parameters (59-62) */
1967 #if T3_BRINGUP
1968 WRITECSR(sc, R_RCV_COAL_TICKS, 0);
1969 WRITECSR(sc, R_RCV_COAL_MAX_CNT, 1);
1970 #else
1971 WRITECSR(sc, R_RCV_COAL_TICKS, 150);
1972 WRITECSR(sc, R_RCV_COAL_MAX_CNT, 10);
1973 #endif
1974 if(!(sc->device == K_PCI_ID_BCM5705 ||
1975 sc->device == K_PCI_ID_BCM5750))
1976 WRITECSR(sc, R_RCV_COAL_INT_TICKS, 0);
1977 WRITECSR(sc, R_RCV_COAL_INT_CNT, 0);
1978 #if T3_BRINGUP
1979 WRITECSR(sc, R_SND_COAL_TICKS, 0);
1980 WRITECSR(sc, R_SND_COAL_MAX_CNT, 1);
1981 #else
1982 WRITECSR(sc, R_SND_COAL_TICKS, 150);
1983 WRITECSR(sc, R_SND_COAL_MAX_CNT, 10);
1984 #endif
1985 if(!(sc->device == K_PCI_ID_BCM5705 ||
1986 sc->device == K_PCI_ID_BCM5750))
1987 WRITECSR(sc, R_SND_COAL_INT_TICKS, 0);
1988 WRITECSR(sc, R_SND_COAL_INT_CNT, 0);
1990 /* Initialize host status block address (63) */
1991 WRITECSR(sc, R_STATUS_HOST_ADDR, 0);
1992 WRITECSR(sc, R_STATUS_HOST_ADDR+4, PTR_TO_PCI(sc->status));
1994 if(!(sc->device == K_PCI_ID_BCM5705 ||
1995 sc->device == K_PCI_ID_BCM5750))
1997 /* Initialize host statistics block address (64) */
1998 WRITECSR(sc, R_STATS_HOST_ADDR, 0);
1999 WRITECSR(sc, R_STATS_HOST_ADDR+4, PTR_TO_PCI(sc->stats));
2001 /* Set statistics block NIC address and tick count (65, 66) */
2002 WRITECSR(sc, R_STATS_TICKS, 1000000);
2003 WRITECSR(sc, R_STATS_BASE_ADDR, A_MAC_STATS);
2005 /* Set status block NIC address (67) */
2006 WRITECSR(sc, R_STATUS_BASE_ADDR, A_MAC_STATUS);
2009 /* Select the status block transfer size. */
2010 if (sc->device == K_PCI_ID_BCM5700)
2011 mode = 0; /* Truncated transfers not supported */
2012 else
2013 mode = V_HCM_SBSIZE(STATUS_BLOCK_SIZE(MAX_RI));
2015 /* Enable the host coalescing engine (68) */
2016 mode |= M_HCM_ENABLE;
2017 WRITECSR(sc, R_HOST_COAL_MODE, mode);
2019 return(0);
2022 static int
2023 t3_init_dma(t3_ether_t *sc)
2025 uint32_t mode;
2027 /* Steps 69-87 */
2029 /* Enable receive BD completion, placement, and selector blocks (69-71) */
2030 WRITECSR(sc, R_RCV_BD_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2031 WRITECSR(sc, R_RCV_LIST_MODE, M_MODE_ENABLE);
2032 if(!(sc->device == K_PCI_ID_BCM5705 ||
2033 sc->device == K_PCI_ID_BCM5750))
2035 WRITECSR(sc, R_RCV_LIST_SEL_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2038 /* Enable DMA engines, enable and clear statistics (72, 73) */
2039 mode = READCSR(sc, R_MAC_MODE);
2040 mode |= (M_MACM_FHDEENB | M_MACM_RDEENB | M_MACM_TDEENB |
2041 M_MACM_RXSTATSENB | M_MACM_RXSTATSCLR |
2042 M_MACM_TXSTATSENB | M_MACM_TXSTATSCLR);
2044 WRITECSR(sc, R_MAC_MODE, (mode | M_MACM_RXSTATSCLR |M_MACM_TXSTATSCLR) );
2046 if(!(sc->flags & T3_NO_PHY))
2048 #if T3_AUTOPOLL
2049 WRITECSR(sc, R_MISC_LOCAL_CTRL, M_MLCTL_INTATTN);
2050 #endif
2053 /* Configure GPIOs (74) - skipped */
2055 /* Clear interrupt mailbox (75) */
2056 WRITEMBOX(sc, R_INT_MBOX(0), 0);
2058 /* Enable DMA completion block (76) */
2059 if(!(sc->device == K_PCI_ID_BCM5705 ||
2060 sc->device == K_PCI_ID_BCM5750))
2062 WRITECSR(sc, R_DMA_COMP_MODE, M_MODE_ENABLE);
2065 /* Configure write and read DMA modes (77, 78) */
2066 WRITECSR(sc, R_WR_DMA_MODE, M_MODE_ENABLE | M_ATTN_ALL);
2067 WRITECSR(sc, R_RD_DMA_MODE, M_MODE_ENABLE | M_ATTN_ALL);
2069 return(0);
2072 static int
2073 t3_init_enable(t3_ether_t *sc)
2075 uint32_t mhc;
2076 uint32_t pmcs;
2077 #if T3_AUTOPOLL
2078 uint32_t mode, mask;
2079 #endif
2080 int i;
2082 /* Steps 79-97 */
2084 /* Enable completion functional blocks (79-82) */
2085 WRITECSR(sc, R_RCV_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2086 if(!(sc->device == K_PCI_ID_BCM5705 ||
2087 sc->device == K_PCI_ID_BCM5750))
2089 WRITECSR(sc, R_MBUF_FREE_MODE, M_MODE_ENABLE);
2091 WRITECSR(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE);
2092 WRITECSR(sc, R_SND_BD_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2094 /* Enable initiator functional blocks (83-86) */
2095 WRITECSR(sc, R_RCV_BD_INIT_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2096 WRITECSR(sc, R_RCV_DATA_INIT_MODE, M_MODE_ENABLE | M_RCVINITMODE_RTNSIZE);
2097 WRITECSR(sc, R_SND_DATA_MODE, M_MODE_ENABLE);
2098 WRITECSR(sc, R_SND_BD_INIT_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2100 /* Enable the send BD selector (87) */
2101 WRITECSR(sc, R_SND_BD_SEL_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2103 /* Download firmware (88) - skipped */
2105 /* Enable the MAC (89,90) */
2106 WRITECSR(sc, R_TX_MODE, M_MODE_ENABLE); /* optional flow control */
2107 WRITECSR(sc, R_RX_MODE, M_MODE_ENABLE); /* other options */
2109 /* Disable auto-polling (91) */
2110 mii_access_init(sc);
2112 /* Configure power state (92) */
2113 pmcs = READCSR(sc, PCI_PMCSR_REG);
2114 pmcs &= ~PCI_PMCSR_STATE_MASK;
2115 pmcs |= PCI_PMCSR_STATE_D0;
2116 WRITECSR(sc, PCI_PMCSR_REG, pmcs);
2118 /* Some chips require a little time to power up */
2119 cfe_sleep(1);
2121 if(!(sc->flags & T3_NO_PHY))
2123 #if T3_AUTOPOLL
2124 /* Program hardware LED control (93) */
2125 WRITECSR(sc, R_MAC_LED_CTRL, 0x00); /* LEDs at PHY layer */
2126 #endif
2128 #if T3_AUTOPOLL
2129 /* Ack/clear link change events */
2130 WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
2131 WRITECSR(sc, R_MI_STATUS, 0);
2133 /* Enable autopolling */
2134 mode = READCSR(sc, R_MI_MODE);
2135 mode &= ~(0x1f << 16);
2136 mode |= M_MIMODE_POLLING | (0x0c << 16);
2137 WRITECSR(sc, R_MI_MODE, mode);
2139 /* Enable link state attentions */
2140 mask = READCSR(sc, R_MAC_EVENT_ENB);
2141 mask |= M_EVT_LINKCHNG;
2142 WRITECSR(sc, R_MAC_EVENT_ENB, mask);
2143 #else
2144 /* Initialize link (94) */
2145 WRITECSR(sc, R_MI_STATUS, M_MISTAT_LINKED);
2147 /* Start autonegotiation (95) - see t3_initlink below */
2149 /* Setup multicast filters (96) */
2150 for (i = 0; i < 4; i++)
2151 WRITECSR(sc, R_MAC_HASH(i), 0);
2152 #endif /* T3_AUTOPOLL */
2154 else
2156 /* Initialize link (94) */
2157 WRITECSR(sc, R_MI_STATUS, M_MISTAT_LINKED);
2159 /* Start autonegotiation (95) - see t3_initlink below */
2161 /* Setup multicast filters (96) */
2162 for (i = 0; i < 4; i++)
2163 WRITECSR(sc, R_MAC_HASH(i), 0);
2166 /* Enable interrupts (97) */
2167 mhc = READCSR(sc, R_MISC_HOST_CTRL);
2168 mhc &= ~M_MHC_MASKPCIINT;
2169 WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
2171 if ((sc->flags & T3_NO_PHY))
2172 cfe_sleep(1);
2174 return(0);
2177 static void
2178 t3_initlink(t3_ether_t *sc)
2180 uint32_t mcr;
2182 if (!(sc->flags & T3_NO_PHY))
2184 sc->phy_addr = mii_probe(sc);
2185 if (sc->phy_addr < 0)
2187 xprintf("%s: no PHY found\n", t3_devname(sc));
2188 return;
2190 #if T3_DEBUG
2191 xprintf("%s: PHY addr %d\n", t3_devname(sc), sc->phy_addr);
2192 #endif
2194 if (1)
2195 mii_autonegotiate(sc);
2196 else
2197 mii_set_speed(sc, ETHER_SPEED_10HDX);
2200 ** Change the 5461 PHY INTR//ENERGYDET LED pin to function as ENERGY DET by
2201 ** writing to the shadow control register 0x1c value 00100 | masks
2203 mii_write_shadow_register(sc, MII_SHADOW, (SHDW_SPR_CTRL | SHDW_NRG_DET) );
2205 mii_enable_interrupts(sc);
2207 mcr = READCSR(sc, R_MODE_CTRL);
2208 mcr |= M_MCTL_MACINT;
2209 WRITECSR(sc, R_MODE_CTRL, mcr);
2211 else
2213 /* T3_NO_PHY means there is a ROBO switch, configure it */
2214 robo_info_t *robo;
2216 robo = bcm_robo_attach(sc->sih, sc, NULL,
2217 (miird_f)mii_read_register, (miiwr_f)mii_write_register);
2218 if (robo == NULL) {
2219 xprintf("robo_setup: failed to attach robo switch \n");
2220 goto robo_fail;
2223 if (robo->devid == DEVID5325)
2225 t3_force_speed(sc, ETHER_SPEED_100FDX);
2227 else
2229 t3_force_speed(sc, ETHER_SPEED_1000FDX);
2232 if (bcm_robo_enable_device(robo)) {
2233 xprintf("robo_setup: failed to enable robo switch \n");
2234 goto robo_fail;
2237 /* Configure the switch to do VLAN */
2238 if (bcm_robo_config_vlan(robo, sc->hwaddr)) {
2239 xprintf("robo_setup: robo_config_vlan failed\n");
2240 goto robo_fail;
2243 /* Enable the switch */
2244 if (bcm_robo_enable_switch(robo)) {
2245 xprintf("robo_setup: robo_enable_switch failed\n");
2246 robo_fail:
2247 bcm_robo_detach(robo);
2251 sc->mii_polling = 0;
2252 sc->phy_change = 0;
2255 static void
2256 t3_shutdownlink(t3_ether_t *sc)
2258 uint32_t mcr;
2260 mcr = READCSR(sc, R_MODE_CTRL);
2261 mcr &= ~M_MCTL_MACINT;
2262 WRITECSR(sc, R_MODE_CTRL, mcr);
2264 WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2266 /* The manual is fuzzy about what to do with the PHY at this
2267 point. Empirically, resetting the 5705 PHY (but not others)
2268 will cause it to get stuck in 10/100 MII mode. */
2269 if (!(sc->flags & T3_NO_PHY))
2271 if (sc->device != K_PCI_ID_BCM5705)
2272 mii_write_register(sc, sc->phy_addr, MII_BMCR, BMCR_RESET);
2274 sc->mii_polling = 0;
2275 sc->phy_change = 0;
2280 static void
2281 t3_hwinit(t3_ether_t *sc)
2283 if (sc->state != eth_state_on) {
2285 if (sc->state == eth_state_uninit) {
2286 t3_coldreset(sc);
2288 else
2289 t3_warmreset(sc);
2291 t3_init_registers(sc);
2292 t3_init_pools(sc);
2293 t3_init_rings(sc);
2294 t3_configure_mac(sc);
2295 t3_enable_stats(sc);
2296 t3_init_coalescing(sc);
2297 t3_init_dma(sc);
2298 t3_init_enable(sc);
2299 #if T3_DEBUG
2300 dumpcsrs(sc, "end init");
2301 #else
2302 (void)dumpcsrs;
2303 #endif
2305 eeprom_access_init(sc);
2306 #if T3_DEBUG
2308 uint32_t eeprom[0x100/4];
2309 int i;
2311 cfe_sleep(1);
2312 for (i = 0; i < 4; i++) {
2313 eeprom_read_range(sc, 0, 4, eeprom);
2316 eeprom_read_range(sc, 0, sizeof(eeprom), eeprom);
2317 eeprom_dump_range("Boot Strap", eeprom, 0x00, 20);
2318 eeprom_dump_range("Manufacturing Info", eeprom, 0x74, 140);
2320 #else
2321 (void)eeprom_read_range;
2322 (void)eeprom_dump_range;
2323 #endif
2325 t3_initlink(sc);
2327 sc->state = eth_state_off;
2332 static void
2333 t3_hwshutdown(t3_ether_t *sc)
2335 /* Receive path shutdown */
2336 t3_clear(sc, R_RX_MODE, M_MODE_ENABLE);
2337 t3_clear(sc, R_RCV_BD_INIT_MODE, M_MODE_ENABLE);
2338 t3_clear(sc, R_RCV_LIST_MODE, M_MODE_ENABLE);
2339 if(!(sc->device == K_PCI_ID_BCM5705 ||
2340 sc->device == K_PCI_ID_BCM5750))
2342 t3_clear(sc, R_RCV_LIST_SEL_MODE, M_MODE_ENABLE);
2344 t3_clear(sc, R_RCV_DATA_INIT_MODE, M_MODE_ENABLE);
2345 t3_clear(sc, R_RCV_COMP_MODE, M_MODE_ENABLE);
2346 t3_clear(sc, R_RCV_BD_COMP_MODE, M_MODE_ENABLE);
2348 /* Transmit path shutdown */
2349 t3_clear(sc, R_SND_BD_SEL_MODE, M_MODE_ENABLE);
2350 t3_clear(sc, R_SND_BD_INIT_MODE, M_MODE_ENABLE);
2351 t3_clear(sc, R_SND_DATA_MODE, M_MODE_ENABLE);
2352 t3_clear(sc, R_RD_DMA_MODE, M_MODE_ENABLE);
2353 t3_clear(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE);
2354 if(!(sc->device == K_PCI_ID_BCM5705 ||
2355 sc->device == K_PCI_ID_BCM5750))
2357 t3_clear(sc, R_DMA_COMP_MODE, M_MODE_ENABLE);
2359 t3_clear(sc, R_SND_BD_COMP_MODE, M_MODE_ENABLE);
2360 t3_clear(sc, R_TX_MODE, M_MODE_ENABLE);
2362 /* Memory shutdown */
2363 t3_clear(sc, R_HOST_COAL_MODE, M_HCM_ENABLE);
2364 t3_clear(sc, R_WR_DMA_MODE, M_MODE_ENABLE);
2365 if(!(sc->device == K_PCI_ID_BCM5705 ||
2366 sc->device == K_PCI_ID_BCM5750))
2368 t3_clear(sc, R_MBUF_FREE_MODE, M_MODE_ENABLE);
2370 WRITECSR(sc, R_FTQ_RESET, 0xFFFFFFFF);
2371 cfe_sleep(1);
2372 WRITECSR(sc, R_FTQ_RESET, 0x00000000);
2373 t3_clear(sc, R_BMGR_MODE, M_BMODE_ENABLE);
2374 t3_clear(sc, R_MEM_MODE, M_MAM_ENABLE);
2376 t3_shutdownlink(sc);
2378 t3_coldreset(sc);
2380 sc->state = eth_state_uninit;
2384 static void
2385 t3_isr(void *arg)
2387 t3_ether_t *sc = (t3_ether_t *)arg;
2388 volatile t3_status_t *status = sc->status;
2389 uint32_t mac_status;
2390 int handled;
2392 do {
2393 WRITEMBOX(sc, R_INT_MBOX(0), 1);
2395 handled = 0;
2396 mac_status = READCSR(sc, R_MAC_STATUS); /* force ordering */
2397 status->status &= ~M_STATUS_UPDATED;
2399 if (status->index[RI(1)].return_p != sc->rxr_1_index) {
2400 handled = 1;
2401 if (IPOLL) sc->rx_interrupts++;
2402 t3_procrxring(sc);
2405 if (status->index[RI(1)].send_c != sc->txc_1_index) {
2406 handled = 1;
2407 if (IPOLL) sc->tx_interrupts++;
2408 t3_proctxring(sc);
2411 if ((status->status & M_STATUS_LINKCHNG) != 0) {
2412 handled = 1;
2414 if (!(sc->flags & T3_NO_PHY))
2416 #if T3_AUTOPOLL
2417 WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
2418 #endif
2421 WRITECSR(sc, R_MAC_STATUS, M_EVT_MICOMPLETE);
2423 status->status &= ~M_STATUS_LINKCHNG;
2424 sc->phy_change = 1;
2427 WRITEMBOX(sc, R_INT_MBOX(0), 0);
2428 (void)READMBOX(sc, R_INT_MBOX(0)); /* push */
2430 #if !XPOLL
2431 if (!handled)
2432 sc->bogus_interrupts++;
2433 #endif
2435 } while ((status->status & M_STATUS_UPDATED) != 0);
2437 if (sc->rxp_std_index != sc->prev_rxp_std_index) {
2438 sc->prev_rxp_std_index = sc->rxp_std_index;
2439 WRITEMBOX(sc, R_RCV_BD_STD_PI, sc->rxp_std_index);
2444 static void
2445 t3_clear_stats(t3_ether_t *sc)
2447 t3_stats_t zeros;
2449 if (sc->device == K_PCI_ID_BCM5705 ||
2450 sc->device == K_PCI_ID_BCM5750)
2451 return;
2453 memset(&zeros, 0, sizeof(t3_stats_t));
2454 WRITEMBOX(sc, R_RELOAD_STATS_MBOX + 4, 0);
2455 WRITEMBOX(sc, R_RELOAD_STATS_MBOX, PTR_TO_PCI(&zeros));
2459 static void
2460 t3_start(t3_ether_t *sc)
2462 t3_hwinit(sc);
2464 sc->intmask = 0;
2466 #if IPOLL
2467 cfe_request_irq(sc->irq, t3_isr, sc, CFE_IRQ_FLAGS_SHARED, 0);
2469 if (!(sc->flags & T3_NO_PHY))
2471 #if T3_AUTOPOLL
2472 sc->intmask |= M_EVT_LINKCHNG;
2473 #else
2474 sc->intmask |= M_EVT_LINKCHNG | M_EVT_MIINT;
2475 #endif
2477 else
2479 sc->intmask |= M_EVT_LINKCHNG | M_EVT_MIINT;
2481 WRITECSR(sc, R_MAC_EVENT_ENB, sc->intmask);
2482 #endif
2484 /* Post some Rcv Producer buffers */
2485 sc->prev_rxp_std_index = sc->rxp_std_index;
2486 WRITEMBOX(sc, R_RCV_BD_STD_PI, sc->rxp_std_index);
2488 sc->state = eth_state_on;
2491 static void
2492 t3_stop(t3_ether_t *sc)
2494 WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2495 sc->intmask = 0;
2496 #if IPOLL
2497 cfe_free_irq(sc->irq, 0);
2498 #endif
2500 if (sc->state == eth_state_on) {
2501 sc->state = eth_state_off;
2502 t3_hwshutdown(sc);
2503 t3_reinit(sc);
2508 static int t3_ether_open(cfe_devctx_t *ctx);
2509 static int t3_ether_read(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2510 static int t3_ether_inpstat(cfe_devctx_t *ctx,iocb_inpstat_t *inpstat);
2511 static int t3_ether_write(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2512 static int t3_ether_ioctl(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2513 static int t3_ether_close(cfe_devctx_t *ctx);
2514 static void t3_ether_poll(cfe_devctx_t *ctx, int64_t ticks);
2515 static void t3_ether_reset(void *softc);
2517 const static cfe_devdisp_t t3_ether_dispatch = {
2518 t3_ether_open,
2519 t3_ether_read,
2520 t3_ether_inpstat,
2521 t3_ether_write,
2522 t3_ether_ioctl,
2523 t3_ether_close,
2524 t3_ether_poll,
2525 t3_ether_reset
2528 cfe_driver_t bcm5700drv = {
2529 "BCM570x Ethernet",
2530 "eth",
2531 CFE_DEV_NETWORK,
2532 &t3_ether_dispatch,
2533 t3_ether_probe
2537 static void
2538 t3_delete_sc(t3_ether_t *sc)
2540 xprintf("BCM570x attach: No memory to complete probe\n");
2541 if (sc != NULL) {
2542 if (sc->txp_1 != NULL)
2543 kfree_uncached(sc->txp_1);
2544 if (sc->rxr_1 != NULL)
2545 kfree_uncached(sc->rxr_1);
2546 if (sc->rxp_std != NULL)
2547 kfree_uncached(sc->rxp_std);
2548 if (sc->stats != NULL)
2549 kfree_uncached((t3_stats_t *)sc->stats);
2550 if (sc->status != NULL)
2551 kfree_uncached((t3_ether_t *)sc->status);
2552 KFREE(sc);
2556 static int
2557 t3_ether_attach(cfe_driver_t *drv, pcitag_t tag, int index)
2559 t3_ether_t *sc;
2560 char descr[80];
2561 phys_addr_t pa;
2562 uint32_t base;
2563 uint32_t pcictrl;
2564 uint32_t addr;
2565 pcireg_t device, class;
2566 const char *devname;
2567 bool rgmii = FALSE;
2568 si_t *sih = NULL;
2569 int i;
2571 device = pci_conf_read(tag, PCI_ID_REG);
2572 class = pci_conf_read(tag, PCI_CLASS_REG);
2574 if (PCI_PRODUCT(device) == K_PCI_ID_BCM471F) {
2575 sih = si_kattach(SI_OSH);
2576 hndgige_init(sih, ++sigige, &rgmii);
2579 pci_map_mem(tag, PCI_MAPREG(0), PCI_MATCH_BITS, &pa);
2580 base = (uint32_t)pa;
2582 sc = (t3_ether_t *) KMALLOC(sizeof(t3_ether_t), 0);
2583 if (sc == NULL) {
2584 t3_delete_sc(sc);
2585 return 0;
2588 memset(sc, 0, sizeof(*sc));
2590 sc->status = NULL;
2591 sc->stats = NULL;
2593 sc->tag = tag;
2594 sc->device = PCI_PRODUCT(device);
2595 sc->revision = PCI_REVISION(class);
2596 /* (Some?) 5700s report the 5701 device code */
2597 sc->asic_revision = G_MHC_ASICREV(pci_conf_read(tag, R_MISC_HOST_CTRL));
2598 if (sc->device == K_PCI_ID_BCM5701
2599 && (sc->asic_revision & 0xF000) == 0x7000)
2600 sc->device = K_PCI_ID_BCM5700;
2601 /* From now on we'll lose our identify to BCM5750 */
2602 if (sih) {
2603 sc->flags |= rgmii ? T3_RGMII_MODE : 0;
2604 sc->flags |= T3_SB_CORE;
2605 if (getintvar(NULL, "boardflags") & BFL_ENETROBO)
2606 sc->flags |= T3_NO_PHY;
2607 sc->device = K_PCI_ID_BCM5750;
2608 sc->sih = sih;
2609 sc->siidx = sigige;
2612 sc->status = (t3_status_t *) kmalloc_uncached(sizeof(t3_status_t), CACHE_ALIGN);
2613 if (sc->status == NULL) {
2614 t3_delete_sc(sc);
2615 return 0;
2618 sc->stats = (t3_stats_t *) kmalloc_uncached(sizeof(t3_stats_t), CACHE_ALIGN);
2619 if (sc->stats == NULL) {
2620 t3_delete_sc(sc);
2621 return 0;
2624 if (sc->device == K_PCI_ID_BCM5705 ||
2625 sc->device == K_PCI_ID_BCM5750)
2626 sc->rxr_entries = RXR_RING_ENTRIES_05;
2627 else
2628 sc->rxr_entries = RXR_RING_ENTRIES;
2630 sc->rxp_std =
2631 (t3_rcv_bd_t *) kmalloc_uncached(RXP_STD_ENTRIES*RCV_BD_SIZE, CACHE_ALIGN);
2632 sc->rxr_1 =
2633 (t3_rcv_bd_t *) kmalloc_uncached(sc->rxr_entries*RCV_BD_SIZE, CACHE_ALIGN);
2634 sc->txp_1 =
2635 (t3_snd_bd_t *) kmalloc_uncached(TXP_RING_ENTRIES*SND_BD_SIZE, CACHE_ALIGN);
2636 if (sc->rxp_std == NULL || sc->rxr_1 == NULL || sc->txp_1 == NULL) {
2637 t3_delete_sc(sc);
2638 return 0;
2641 sc->regbase = base;
2643 /* NB: the relative base of memory depends on the access model */
2644 pcictrl = pci_conf_read(tag, R_PCI_STATE);
2645 sc->membase = base + 0x8000; /* Normal mode: 32K window */
2646 sc->irq = pci_conf_read(tag, PCI_BPARAM_INTERRUPT_REG) & 0xFF;
2648 sc->devctx = NULL;
2650 if (sc->flags & T3_SB_CORE) {
2651 char etXmacaddr[] = "etXXXXmacaddr";
2652 uint32_t low, high;
2654 sprintf(etXmacaddr, "et%umacaddr", (unsigned int)sc->siidx);
2655 bcm_ether_atoe(getvar(NULL, etXmacaddr),
2656 (struct ether_addr *)sc->hwaddr);
2657 high = (sc->hwaddr[0] << 8) | (sc->hwaddr[1]);
2658 low = ((sc->hwaddr[2] << 24) | (sc->hwaddr[3] << 16)
2659 | (sc->hwaddr[4] << 8) | sc->hwaddr[5]);
2660 /* For now, use a single MAC address */
2661 WRITECSR(sc, R_MAC_ADDR1_HIGH, high); WRITECSR(sc, R_MAC_ADDR1_LOW, low);
2662 WRITECSR(sc, R_MAC_ADDR2_HIGH, high); WRITECSR(sc, R_MAC_ADDR2_LOW, low);
2663 WRITECSR(sc, R_MAC_ADDR3_HIGH, high); WRITECSR(sc, R_MAC_ADDR3_LOW, low);
2664 WRITECSR(sc, R_MAC_ADDR4_HIGH, high); WRITECSR(sc, R_MAC_ADDR4_LOW, low);
2665 } else {
2666 /* Assume on-chip firmware has initialized the MAC address. */
2667 addr = READCSR(sc, R_MAC_ADDR1_HIGH);
2668 for (i = 0; i < 2; i++)
2669 sc->hwaddr[i] = (addr >> (8*(1-i))) & 0xff;
2670 addr = READCSR(sc, R_MAC_ADDR1_LOW);
2671 for (i = 0; i < 4; i++)
2672 sc->hwaddr[2+i] = (addr >> (8*(3-i))) & 0xff;
2675 t3_init(sc);
2677 sc->state = eth_state_uninit;
2679 /* print device info */
2680 switch (sc->device) {
2681 case K_PCI_ID_BCM5700:
2682 devname = "BCM5700"; break;
2683 case K_PCI_ID_BCM5701:
2684 devname = "BCM5701"; break;
2685 case K_PCI_ID_BCM5702:
2686 devname = "BCM5702"; break;
2687 case K_PCI_ID_BCM5703:
2688 devname = "BCM5703"; break;
2689 case K_PCI_ID_BCM5705:
2690 devname = "BCM5705"; break;
2691 case K_PCI_ID_BCM5750:
2692 devname = "BCM5750"; break;
2693 default:
2694 devname = "BCM570x"; break;
2696 xsprintf(descr, "%s Ethernet at 0x%X", devname, (unsigned int)sc->regbase);
2697 printf("ge%d: %s\n", index, descr);
2699 cfe_attach(drv, sc, NULL, descr);
2700 return 1;
2703 static void
2704 t3_ether_probe(cfe_driver_t *drv,
2705 unsigned long probe_a, unsigned long probe_b,
2706 void *probe_ptr)
2708 int index;
2709 int n;
2711 n = 0;
2712 index = 0;
2713 for (;;) {
2714 pcitag_t tag;
2715 pcireg_t device;
2717 if (pci_find_class(PCI_CLASS_NETWORK, index, &tag) != 0)
2718 break;
2720 index++;
2722 device = pci_conf_read(tag, PCI_ID_REG);
2723 if (PCI_VENDOR(device) == K_PCI_VENDOR_BROADCOM) {
2724 switch (PCI_PRODUCT(device)) {
2725 case K_PCI_ID_BCM5700:
2726 case K_PCI_ID_BCM5701:
2727 case K_PCI_ID_BCM5702:
2728 case K_PCI_ID_BCM5703:
2729 case K_PCI_ID_BCM5703a:
2730 case K_PCI_ID_BCM5703b:
2731 case K_PCI_ID_BCM5704C:
2732 case K_PCI_ID_BCM5705:
2733 case K_PCI_ID_BCM5750:
2734 case K_PCI_ID_BCM471F:
2735 t3_ether_attach(drv, tag, n);
2736 n++;
2737 break;
2738 default:
2739 break;
2746 /* The functions below are called via the dispatch vector for the Tigon 3 */
2748 static int
2749 t3_ether_open(cfe_devctx_t *ctx)
2751 t3_ether_t *sc = ctx->dev_softc;
2752 volatile t3_stats_t *stats = sc->stats;
2753 int i;
2755 if (sc->state == eth_state_on)
2756 t3_stop(sc);
2758 sc->devctx = ctx;
2760 for (i = 0; i < L_MAC_STATS/sizeof(uint64_t); i++)
2762 stats->stats[i] = 0;
2765 t3_start(sc);
2767 sc->rx_interrupts = sc->tx_interrupts = sc->bogus_interrupts = 0;
2768 t3_clear_stats(sc);
2770 if (XPOLL)
2771 t3_isr(sc);
2773 return(0);
2776 static int
2777 t3_ether_read(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2779 t3_ether_t *sc = ctx->dev_softc;
2780 eth_pkt_t *pkt;
2781 int blen;
2783 if (XPOLL) t3_isr(sc);
2785 if (sc->state != eth_state_on) return -1;
2787 CS_ENTER(sc);
2788 pkt = (eth_pkt_t *) q_deqnext(&(sc->rxqueue));
2789 CS_EXIT(sc);
2791 if (pkt == NULL) {
2792 buffer->buf_retlen = 0;
2793 return 0;
2796 blen = buffer->buf_length;
2797 if (blen > pkt->length) blen = pkt->length;
2799 memcpy(buffer->buf_ptr, pkt->buffer, blen);
2800 buffer->buf_retlen = blen;
2802 eth_free_pkt(sc, pkt);
2804 if (XPOLL) t3_isr(sc);
2805 return 0;
2808 static int
2809 t3_ether_inpstat(cfe_devctx_t *ctx, iocb_inpstat_t *inpstat)
2811 t3_ether_t *sc = ctx->dev_softc;
2813 if (XPOLL) t3_isr(sc);
2815 if (sc->state != eth_state_on) return -1;
2817 /* We avoid an interlock here because the result is a hint and an
2818 interrupt cannot turn a non-empty queue into an empty one. */
2819 inpstat->inp_status = (q_isempty(&(sc->rxqueue))) ? 0 : 1;
2821 return 0;
2824 static int
2825 t3_ether_write(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2827 t3_ether_t *sc = ctx->dev_softc;
2828 eth_pkt_t *pkt;
2829 int blen;
2831 if (XPOLL) t3_isr(sc);
2833 if (sc->state != eth_state_on) return -1;
2835 pkt = eth_alloc_pkt(sc);
2836 if (!pkt) return CFE_ERR_NOMEM;
2838 blen = buffer->buf_length;
2839 if (blen > pkt->length) blen = pkt->length;
2841 memcpy(pkt->buffer, buffer->buf_ptr, blen);
2842 pkt->length = blen;
2845 * Ensure that the packet memory is flushed out of the data cache
2846 * before posting it for transmission.
2848 cfe_flushcache(CFE_CACHE_FLUSH_D);
2850 if (t3_transmit(sc, pkt) != 0) {
2851 eth_free_pkt(sc,pkt);
2852 return CFE_ERR_IOERR;
2855 if (XPOLL) t3_isr(sc);
2856 return 0;
2859 static int
2860 t3_ether_ioctl(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2862 t3_ether_t *sc = ctx->dev_softc;
2864 switch ((int)buffer->buf_ioctlcmd) {
2865 case IOCTL_ETHER_GETHWADDR:
2866 memcpy(buffer->buf_ptr, sc->hwaddr, sizeof(sc->hwaddr));
2867 return 0;
2869 default:
2870 return -1;
2874 static int
2875 t3_ether_close(cfe_devctx_t *ctx)
2877 t3_ether_t *sc = ctx->dev_softc;
2878 volatile t3_stats_t *stats = sc->stats;
2879 uint32_t inpkts, outpkts, interrupts;
2880 int i;
2882 t3_stop(sc);
2884 #if T3_BRINGUP
2885 for (i = 0; i < L_MAC_STATS/sizeof(uint64_t); i++) {
2886 if (stats->stats[i] != 0)
2887 xprintf(" stats[%d] = %8lld\n", i, stats->stats[i]);
2889 #else
2890 (void) i;
2891 #endif
2893 inpkts = stats->stats[ifHCInUcastPkts]
2894 + stats->stats[ifHCInMulticastPkts]
2895 + stats->stats[ifHCInBroadcastPkts];
2896 outpkts = stats->stats[ifHCOutUcastPkts]
2897 + stats->stats[ifHCOutMulticastPkts]
2898 + stats->stats[ifHCOutBroadcastPkts];
2899 interrupts = stats->stats[nicInterrupts];
2901 /* Empirically, counters on the 5705 are always zero. */
2902 if (!(sc->device == K_PCI_ID_BCM5705 ||
2903 sc->device == K_PCI_ID_BCM5750)) {
2904 xprintf("%s: %d sent, %d received, %d interrupts\n",
2905 t3_devname(sc), (int)outpkts, (int)inpkts, (int)interrupts);
2906 if (IPOLL) {
2907 xprintf(" %d rx interrupts, %d tx interrupts",
2908 (int)sc->rx_interrupts, (int)sc->tx_interrupts);
2909 if (sc->bogus_interrupts != 0)
2910 xprintf(", %d bogus interrupts", (int)sc->bogus_interrupts);
2911 xprintf("\n");
2915 sc->devctx = NULL;
2916 return 0;
2919 static void
2920 t3_ether_poll(cfe_devctx_t *ctx, int64_t ticks)
2922 t3_ether_t *sc = ctx->dev_softc;
2923 int changed;
2925 if(!(sc->flags & T3_NO_PHY)) {
2926 if (sc->phy_change && sc->state != eth_state_uninit && !sc->mii_polling) {
2927 uint32_t mask;
2929 sc->mii_polling++;
2930 mask = READCSR(sc, R_MAC_EVENT_ENB);
2931 WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2933 changed = mii_poll(sc);
2934 if (changed) {
2935 mii_autonegotiate(sc);
2937 sc->phy_change = 0;
2938 sc->mii_polling--;
2940 WRITECSR(sc, R_MAC_EVENT_ENB, mask);
2945 static void
2946 t3_ether_reset(void *softc)
2948 t3_ether_t *sc = (t3_ether_t *)softc;
2950 /* Turn off the Ethernet interface. */
2952 if (sc->state == eth_state_on)
2953 t3_stop(sc);
2955 sc->state = eth_state_uninit;