2 * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * This driver is designed for the Broadcom SiByte SOC built-in
20 * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/timer.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/bitops.h>
35 #include <asm/processor.h> /* Processor type for cache alignment. */
37 #include <asm/cache.h>
39 /* This is only here until the firmware is ready. In that case,
40 the firmware leaves the ethernet address in the register for us. */
41 #ifdef CONFIG_SIBYTE_STANDALONE
42 #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
43 #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
44 #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
45 #define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
49 /* These identify the driver base version and may not be removed. */
51 static char version1
[] __devinitdata
=
52 "sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
56 /* Operational parameters that usually are not changed. */
58 #define CONFIG_SBMAC_COALESCE
60 #define MAX_UNITS 4 /* More are supported, limit only on options */
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (2*HZ)
66 MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
67 MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
69 /* A few user-configurable values which may be modified when a driver
72 /* 1 normal messages, 0 quiet .. 7 verbose. */
74 module_param(debug
, int, S_IRUGO
);
75 MODULE_PARM_DESC(debug
, "Debug messages");
78 static int noisy_mii
= 1;
79 module_param(noisy_mii
, int, S_IRUGO
);
80 MODULE_PARM_DESC(noisy_mii
, "MII status messages");
82 /* Used to pass the media type, etc.
83 Both 'options[]' and 'full_duplex[]' should exist for driver
85 The media type is usually passed in 'options[]'.
88 static int options
[MAX_UNITS
] = {-1, -1, -1, -1};
89 module_param_array(options
, int, NULL
, S_IRUGO
);
90 MODULE_PARM_DESC(options
, "1-" __MODULE_STRING(MAX_UNITS
));
92 static int full_duplex
[MAX_UNITS
] = {-1, -1, -1, -1};
93 module_param_array(full_duplex
, int, NULL
, S_IRUGO
);
94 MODULE_PARM_DESC(full_duplex
, "1-" __MODULE_STRING(MAX_UNITS
));
97 #ifdef CONFIG_SBMAC_COALESCE
98 static int int_pktcnt_tx
= 255;
99 module_param(int_pktcnt_tx
, int, S_IRUGO
);
100 MODULE_PARM_DESC(int_pktcnt_tx
, "TX packet count");
102 static int int_timeout_tx
= 255;
103 module_param(int_timeout_tx
, int, S_IRUGO
);
104 MODULE_PARM_DESC(int_timeout_tx
, "TX timeout value");
106 static int int_pktcnt_rx
= 64;
107 module_param(int_pktcnt_rx
, int, S_IRUGO
);
108 MODULE_PARM_DESC(int_pktcnt_rx
, "RX packet count");
110 static int int_timeout_rx
= 64;
111 module_param(int_timeout_rx
, int, S_IRUGO
);
112 MODULE_PARM_DESC(int_timeout_rx
, "RX timeout value");
115 #include <asm/sibyte/sb1250.h>
116 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
117 #include <asm/sibyte/bcm1480_regs.h>
118 #include <asm/sibyte/bcm1480_int.h>
119 #define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
120 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
121 #include <asm/sibyte/sb1250_regs.h>
122 #include <asm/sibyte/sb1250_int.h>
124 #error invalid SiByte MAC configuation
126 #include <asm/sibyte/sb1250_scd.h>
127 #include <asm/sibyte/sb1250_mac.h>
128 #include <asm/sibyte/sb1250_dma.h>
130 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
131 #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
132 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
133 #define UNIT_INT(n) (K_INT_MAC_0 + (n))
135 #error invalid SiByte MAC configuation
138 /**********************************************************************
140 ********************************************************************* */
143 typedef enum { sbmac_speed_auto
, sbmac_speed_10
,
144 sbmac_speed_100
, sbmac_speed_1000
} sbmac_speed_t
;
146 typedef enum { sbmac_duplex_auto
, sbmac_duplex_half
,
147 sbmac_duplex_full
} sbmac_duplex_t
;
149 typedef enum { sbmac_fc_auto
, sbmac_fc_disabled
, sbmac_fc_frame
,
150 sbmac_fc_collision
, sbmac_fc_carrier
} sbmac_fc_t
;
152 typedef enum { sbmac_state_uninit
, sbmac_state_off
, sbmac_state_on
,
153 sbmac_state_broken
} sbmac_state_t
;
156 /**********************************************************************
158 ********************************************************************* */
161 #define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
162 (d)->sbdma_dscrtable : (d)->f+1)
165 #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
167 #define SBMAC_MAX_TXDESCR 256
168 #define SBMAC_MAX_RXDESCR 256
170 #define ETHER_ALIGN 2
171 #define ETHER_ADDR_LEN 6
172 #define ENET_PACKET_SIZE 1518
173 /*#define ENET_PACKET_SIZE 9216 */
175 /**********************************************************************
176 * DMA Descriptor structure
177 ********************************************************************* */
179 typedef struct sbdmadscr_s
{
184 typedef unsigned long paddr_t
;
186 /**********************************************************************
187 * DMA Controller structure
188 ********************************************************************* */
190 typedef struct sbmacdma_s
{
193 * This stuff is used to identify the channel and the registers
194 * associated with it.
197 struct sbmac_softc
*sbdma_eth
; /* back pointer to associated MAC */
198 int sbdma_channel
; /* channel number */
199 int sbdma_txdir
; /* direction (1=transmit) */
200 int sbdma_maxdescr
; /* total # of descriptors in ring */
201 #ifdef CONFIG_SBMAC_COALESCE
202 int sbdma_int_pktcnt
; /* # descriptors rx/tx before interrupt*/
203 int sbdma_int_timeout
; /* # usec rx/tx interrupt */
206 volatile void __iomem
*sbdma_config0
; /* DMA config register 0 */
207 volatile void __iomem
*sbdma_config1
; /* DMA config register 1 */
208 volatile void __iomem
*sbdma_dscrbase
; /* Descriptor base address */
209 volatile void __iomem
*sbdma_dscrcnt
; /* Descriptor count register */
210 volatile void __iomem
*sbdma_curdscr
; /* current descriptor address */
211 volatile void __iomem
*sbdma_oodpktlost
;/* pkt drop (rx only) */
215 * This stuff is for maintenance of the ring
218 sbdmadscr_t
*sbdma_dscrtable_unaligned
;
219 sbdmadscr_t
*sbdma_dscrtable
; /* base of descriptor table */
220 sbdmadscr_t
*sbdma_dscrtable_end
; /* end of descriptor table */
222 struct sk_buff
**sbdma_ctxtable
; /* context table, one per descr */
224 paddr_t sbdma_dscrtable_phys
; /* and also the phys addr */
225 sbdmadscr_t
*sbdma_addptr
; /* next dscr for sw to add */
226 sbdmadscr_t
*sbdma_remptr
; /* next dscr for sw to remove */
230 /**********************************************************************
231 * Ethernet softc structure
232 ********************************************************************* */
237 * Linux-specific things
240 struct net_device
*sbm_dev
; /* pointer to linux device */
241 spinlock_t sbm_lock
; /* spin lock */
242 struct timer_list sbm_timer
; /* for monitoring MII */
243 struct net_device_stats sbm_stats
;
244 int sbm_devflags
; /* current device flags */
247 int sbm_phy_oldanlpar
;
248 int sbm_phy_oldk1stsr
;
249 int sbm_phy_oldlinkstat
;
252 unsigned char sbm_phys
[2];
255 * Controller-specific things
258 void __iomem
*sbm_base
; /* MAC's base address */
259 sbmac_state_t sbm_state
; /* current state */
261 volatile void __iomem
*sbm_macenable
; /* MAC Enable Register */
262 volatile void __iomem
*sbm_maccfg
; /* MAC Configuration Register */
263 volatile void __iomem
*sbm_fifocfg
; /* FIFO configuration register */
264 volatile void __iomem
*sbm_framecfg
; /* Frame configuration register */
265 volatile void __iomem
*sbm_rxfilter
; /* receive filter register */
266 volatile void __iomem
*sbm_isr
; /* Interrupt status register */
267 volatile void __iomem
*sbm_imr
; /* Interrupt mask register */
268 volatile void __iomem
*sbm_mdio
; /* MDIO register */
270 sbmac_speed_t sbm_speed
; /* current speed */
271 sbmac_duplex_t sbm_duplex
; /* current duplex */
272 sbmac_fc_t sbm_fc
; /* current flow control setting */
274 unsigned char sbm_hwaddr
[ETHER_ADDR_LEN
];
276 sbmacdma_t sbm_txdma
; /* for now, only use channel 0 */
277 sbmacdma_t sbm_rxdma
;
283 /**********************************************************************
285 ********************************************************************* */
287 /**********************************************************************
289 ********************************************************************* */
291 static void sbdma_initctx(sbmacdma_t
*d
,
292 struct sbmac_softc
*s
,
296 static void sbdma_channel_start(sbmacdma_t
*d
, int rxtx
);
297 static int sbdma_add_rcvbuffer(sbmacdma_t
*d
,struct sk_buff
*m
);
298 static int sbdma_add_txbuffer(sbmacdma_t
*d
,struct sk_buff
*m
);
299 static void sbdma_emptyring(sbmacdma_t
*d
);
300 static void sbdma_fillring(sbmacdma_t
*d
);
301 static int sbdma_rx_process(struct sbmac_softc
*sc
,sbmacdma_t
*d
, int work_to_do
, int poll
);
302 static void sbdma_tx_process(struct sbmac_softc
*sc
,sbmacdma_t
*d
, int poll
);
303 static int sbmac_initctx(struct sbmac_softc
*s
);
304 static void sbmac_channel_start(struct sbmac_softc
*s
);
305 static void sbmac_channel_stop(struct sbmac_softc
*s
);
306 static sbmac_state_t
sbmac_set_channel_state(struct sbmac_softc
*,sbmac_state_t
);
307 static void sbmac_promiscuous_mode(struct sbmac_softc
*sc
,int onoff
);
308 static uint64_t sbmac_addr2reg(unsigned char *ptr
);
309 static irqreturn_t
sbmac_intr(int irq
,void *dev_instance
);
310 static int sbmac_start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
311 static void sbmac_setmulti(struct sbmac_softc
*sc
);
312 static int sbmac_init(struct net_device
*dev
, int idx
);
313 static int sbmac_set_speed(struct sbmac_softc
*s
,sbmac_speed_t speed
);
314 static int sbmac_set_duplex(struct sbmac_softc
*s
,sbmac_duplex_t duplex
,sbmac_fc_t fc
);
316 static int sbmac_open(struct net_device
*dev
);
317 static void sbmac_timer(unsigned long data
);
318 static void sbmac_tx_timeout (struct net_device
*dev
);
319 static struct net_device_stats
*sbmac_get_stats(struct net_device
*dev
);
320 static void sbmac_set_rx_mode(struct net_device
*dev
);
321 static int sbmac_mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
322 static int sbmac_close(struct net_device
*dev
);
323 static int sbmac_poll(struct net_device
*poll_dev
, int *budget
);
325 static int sbmac_mii_poll(struct sbmac_softc
*s
,int noisy
);
326 static int sbmac_mii_probe(struct net_device
*dev
);
328 static void sbmac_mii_sync(struct sbmac_softc
*s
);
329 static void sbmac_mii_senddata(struct sbmac_softc
*s
,unsigned int data
, int bitcnt
);
330 static unsigned int sbmac_mii_read(struct sbmac_softc
*s
,int phyaddr
,int regidx
);
331 static void sbmac_mii_write(struct sbmac_softc
*s
,int phyaddr
,int regidx
,
332 unsigned int regval
);
335 /**********************************************************************
337 ********************************************************************* */
339 static uint64_t sbmac_orig_hwaddr
[MAX_UNITS
];
342 /**********************************************************************
344 ********************************************************************* */
346 #define MII_COMMAND_START 0x01
347 #define MII_COMMAND_READ 0x02
348 #define MII_COMMAND_WRITE 0x01
349 #define MII_COMMAND_ACK 0x02
351 #define BMCR_RESET 0x8000
352 #define BMCR_LOOPBACK 0x4000
353 #define BMCR_SPEED0 0x2000
354 #define BMCR_ANENABLE 0x1000
355 #define BMCR_POWERDOWN 0x0800
356 #define BMCR_ISOLATE 0x0400
357 #define BMCR_RESTARTAN 0x0200
358 #define BMCR_DUPLEX 0x0100
359 #define BMCR_COLTEST 0x0080
360 #define BMCR_SPEED1 0x0040
361 #define BMCR_SPEED1000 BMCR_SPEED1
362 #define BMCR_SPEED100 BMCR_SPEED0
363 #define BMCR_SPEED10 0
365 #define BMSR_100BT4 0x8000
366 #define BMSR_100BT_FDX 0x4000
367 #define BMSR_100BT_HDX 0x2000
368 #define BMSR_10BT_FDX 0x1000
369 #define BMSR_10BT_HDX 0x0800
370 #define BMSR_100BT2_FDX 0x0400
371 #define BMSR_100BT2_HDX 0x0200
372 #define BMSR_1000BT_XSR 0x0100
373 #define BMSR_PRESUP 0x0040
374 #define BMSR_ANCOMPLT 0x0020
375 #define BMSR_REMFAULT 0x0010
376 #define BMSR_AUTONEG 0x0008
377 #define BMSR_LINKSTAT 0x0004
378 #define BMSR_JABDETECT 0x0002
379 #define BMSR_EXTCAPAB 0x0001
381 #define PHYIDR1 0x2000
382 #define PHYIDR2 0x5C60
384 #define ANAR_NP 0x8000
385 #define ANAR_RF 0x2000
386 #define ANAR_ASYPAUSE 0x0800
387 #define ANAR_PAUSE 0x0400
388 #define ANAR_T4 0x0200
389 #define ANAR_TXFD 0x0100
390 #define ANAR_TXHD 0x0080
391 #define ANAR_10FD 0x0040
392 #define ANAR_10HD 0x0020
393 #define ANAR_PSB 0x0001
395 #define ANLPAR_NP 0x8000
396 #define ANLPAR_ACK 0x4000
397 #define ANLPAR_RF 0x2000
398 #define ANLPAR_ASYPAUSE 0x0800
399 #define ANLPAR_PAUSE 0x0400
400 #define ANLPAR_T4 0x0200
401 #define ANLPAR_TXFD 0x0100
402 #define ANLPAR_TXHD 0x0080
403 #define ANLPAR_10FD 0x0040
404 #define ANLPAR_10HD 0x0020
405 #define ANLPAR_PSB 0x0001 /* 802.3 */
407 #define ANER_PDF 0x0010
408 #define ANER_LPNPABLE 0x0008
409 #define ANER_NPABLE 0x0004
410 #define ANER_PAGERX 0x0002
411 #define ANER_LPANABLE 0x0001
413 #define ANNPTR_NP 0x8000
414 #define ANNPTR_MP 0x2000
415 #define ANNPTR_ACK2 0x1000
416 #define ANNPTR_TOGTX 0x0800
417 #define ANNPTR_CODE 0x0008
419 #define ANNPRR_NP 0x8000
420 #define ANNPRR_MP 0x2000
421 #define ANNPRR_ACK3 0x1000
422 #define ANNPRR_TOGTX 0x0800
423 #define ANNPRR_CODE 0x0008
425 #define K1TCR_TESTMODE 0x0000
426 #define K1TCR_MSMCE 0x1000
427 #define K1TCR_MSCV 0x0800
428 #define K1TCR_RPTR 0x0400
429 #define K1TCR_1000BT_FDX 0x200
430 #define K1TCR_1000BT_HDX 0x100
432 #define K1STSR_MSMCFLT 0x8000
433 #define K1STSR_MSCFGRES 0x4000
434 #define K1STSR_LRSTAT 0x2000
435 #define K1STSR_RRSTAT 0x1000
436 #define K1STSR_LP1KFD 0x0800
437 #define K1STSR_LP1KHD 0x0400
438 #define K1STSR_LPASMDIR 0x0200
440 #define K1SCR_1KX_FDX 0x8000
441 #define K1SCR_1KX_HDX 0x4000
442 #define K1SCR_1KT_FDX 0x2000
443 #define K1SCR_1KT_HDX 0x1000
445 #define STRAP_PHY1 0x0800
446 #define STRAP_NCMODE 0x0400
447 #define STRAP_MANMSCFG 0x0200
448 #define STRAP_ANENABLE 0x0100
449 #define STRAP_MSVAL 0x0080
450 #define STRAP_1KHDXADV 0x0010
451 #define STRAP_1KFDXADV 0x0008
452 #define STRAP_100ADV 0x0004
453 #define STRAP_SPEEDSEL 0x0000
454 #define STRAP_SPEED100 0x0001
456 #define PHYSUP_SPEED1000 0x10
457 #define PHYSUP_SPEED100 0x08
458 #define PHYSUP_SPEED10 0x00
459 #define PHYSUP_LINKUP 0x04
460 #define PHYSUP_FDX 0x02
462 #define MII_BMCR 0x00 /* Basic mode control register (rw) */
463 #define MII_BMSR 0x01 /* Basic mode status register (ro) */
464 #define MII_PHYIDR1 0x02
465 #define MII_PHYIDR2 0x03
467 #define MII_K1STSR 0x0A /* 1K Status Register (ro) */
468 #define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
471 #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
476 /**********************************************************************
479 * Synchronize with the MII - send a pattern of bits to the MII
480 * that will guarantee that it is ready to accept a command.
483 * s - sbmac structure
487 ********************************************************************* */
489 static void sbmac_mii_sync(struct sbmac_softc
*s
)
495 mac_mdio_genc
= __raw_readq(s
->sbm_mdio
) & M_MAC_GENC
;
497 bits
= M_MAC_MDIO_DIR_OUTPUT
| M_MAC_MDIO_OUT
;
499 __raw_writeq(bits
| mac_mdio_genc
, s
->sbm_mdio
);
501 for (cnt
= 0; cnt
< 32; cnt
++) {
502 __raw_writeq(bits
| M_MAC_MDC
| mac_mdio_genc
, s
->sbm_mdio
);
503 __raw_writeq(bits
| mac_mdio_genc
, s
->sbm_mdio
);
507 /**********************************************************************
508 * SBMAC_MII_SENDDATA(s,data,bitcnt)
510 * Send some bits to the MII. The bits to be sent are right-
511 * justified in the 'data' parameter.
514 * s - sbmac structure
515 * data - data to send
516 * bitcnt - number of bits to send
517 ********************************************************************* */
519 static void sbmac_mii_senddata(struct sbmac_softc
*s
,unsigned int data
, int bitcnt
)
523 unsigned int curmask
;
526 mac_mdio_genc
= __raw_readq(s
->sbm_mdio
) & M_MAC_GENC
;
528 bits
= M_MAC_MDIO_DIR_OUTPUT
;
529 __raw_writeq(bits
| mac_mdio_genc
, s
->sbm_mdio
);
531 curmask
= 1 << (bitcnt
- 1);
533 for (i
= 0; i
< bitcnt
; i
++) {
535 bits
|= M_MAC_MDIO_OUT
;
536 else bits
&= ~M_MAC_MDIO_OUT
;
537 __raw_writeq(bits
| mac_mdio_genc
, s
->sbm_mdio
);
538 __raw_writeq(bits
| M_MAC_MDC
| mac_mdio_genc
, s
->sbm_mdio
);
539 __raw_writeq(bits
| mac_mdio_genc
, s
->sbm_mdio
);
546 /**********************************************************************
547 * SBMAC_MII_READ(s,phyaddr,regidx)
549 * Read a PHY register.
552 * s - sbmac structure
553 * phyaddr - PHY's address
554 * regidx = index of register to read
557 * value read, or 0 if an error occurred.
558 ********************************************************************* */
560 static unsigned int sbmac_mii_read(struct sbmac_softc
*s
,int phyaddr
,int regidx
)
568 * Synchronize ourselves so that the PHY knows the next
569 * thing coming down is a command
575 * Send the data to the PHY. The sequence is
576 * a "start" command (2 bits)
577 * a "read" command (2 bits)
578 * the PHY addr (5 bits)
579 * the register index (5 bits)
582 sbmac_mii_senddata(s
,MII_COMMAND_START
, 2);
583 sbmac_mii_senddata(s
,MII_COMMAND_READ
, 2);
584 sbmac_mii_senddata(s
,phyaddr
, 5);
585 sbmac_mii_senddata(s
,regidx
, 5);
587 mac_mdio_genc
= __raw_readq(s
->sbm_mdio
) & M_MAC_GENC
;
590 * Switch the port around without a clock transition.
592 __raw_writeq(M_MAC_MDIO_DIR_INPUT
| mac_mdio_genc
, s
->sbm_mdio
);
595 * Send out a clock pulse to signal we want the status
598 __raw_writeq(M_MAC_MDIO_DIR_INPUT
| M_MAC_MDC
| mac_mdio_genc
, s
->sbm_mdio
);
599 __raw_writeq(M_MAC_MDIO_DIR_INPUT
| mac_mdio_genc
, s
->sbm_mdio
);
602 * If an error occurred, the PHY will signal '1' back
604 error
= __raw_readq(s
->sbm_mdio
) & M_MAC_MDIO_IN
;
607 * Issue an 'idle' clock pulse, but keep the direction
610 __raw_writeq(M_MAC_MDIO_DIR_INPUT
| M_MAC_MDC
| mac_mdio_genc
, s
->sbm_mdio
);
611 __raw_writeq(M_MAC_MDIO_DIR_INPUT
| mac_mdio_genc
, s
->sbm_mdio
);
615 for (idx
= 0; idx
< 16; idx
++) {
619 if (__raw_readq(s
->sbm_mdio
) & M_MAC_MDIO_IN
)
623 __raw_writeq(M_MAC_MDIO_DIR_INPUT
|M_MAC_MDC
| mac_mdio_genc
, s
->sbm_mdio
);
624 __raw_writeq(M_MAC_MDIO_DIR_INPUT
| mac_mdio_genc
, s
->sbm_mdio
);
627 /* Switch back to output */
628 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT
| mac_mdio_genc
, s
->sbm_mdio
);
636 /**********************************************************************
637 * SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
639 * Write a value to a PHY register.
642 * s - sbmac structure
643 * phyaddr - PHY to use
644 * regidx - register within the PHY
645 * regval - data to write to register
649 ********************************************************************* */
651 static void sbmac_mii_write(struct sbmac_softc
*s
,int phyaddr
,int regidx
,
658 sbmac_mii_senddata(s
,MII_COMMAND_START
,2);
659 sbmac_mii_senddata(s
,MII_COMMAND_WRITE
,2);
660 sbmac_mii_senddata(s
,phyaddr
, 5);
661 sbmac_mii_senddata(s
,regidx
, 5);
662 sbmac_mii_senddata(s
,MII_COMMAND_ACK
,2);
663 sbmac_mii_senddata(s
,regval
,16);
665 mac_mdio_genc
= __raw_readq(s
->sbm_mdio
) & M_MAC_GENC
;
667 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT
| mac_mdio_genc
, s
->sbm_mdio
);
672 /**********************************************************************
673 * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
675 * Initialize a DMA channel context. Since there are potentially
676 * eight DMA channels per MAC, it's nice to do this in a standard
680 * d - sbmacdma_t structure (DMA channel context)
681 * s - sbmac_softc structure (pointer to a MAC)
682 * chan - channel number (0..1 right now)
683 * txrx - Identifies DMA_TX or DMA_RX for channel direction
684 * maxdescr - number of descriptors
688 ********************************************************************* */
690 static void sbdma_initctx(sbmacdma_t
*d
,
691 struct sbmac_softc
*s
,
696 #ifdef CONFIG_SBMAC_COALESCE
697 int int_pktcnt
, int_timeout
;
701 * Save away interesting stuff in the structure
705 d
->sbdma_channel
= chan
;
706 d
->sbdma_txdir
= txrx
;
710 s
->sbe_idx
=(s
->sbm_base
- A_MAC_BASE_0
)/MAC_SPACING
;
713 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_TX_BYTES
)));
714 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_COLLISIONS
)));
715 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_LATE_COL
)));
716 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_EX_COL
)));
717 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_FCS_ERROR
)));
718 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_TX_ABORT
)));
719 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_TX_BAD
)));
720 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_TX_GOOD
)));
721 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_TX_RUNT
)));
722 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_TX_OVERSIZE
)));
723 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_BYTES
)));
724 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_MCAST
)));
725 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_BCAST
)));
726 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_BAD
)));
727 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_GOOD
)));
728 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_RUNT
)));
729 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_OVERSIZE
)));
730 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_FCS_ERROR
)));
731 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_LENGTH_ERROR
)));
732 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_CODE_ERROR
)));
733 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s
->sbe_idx
, R_MAC_RMON_RX_ALIGN_ERROR
)));
736 * initialize register pointers
740 s
->sbm_base
+ R_MAC_DMA_REGISTER(txrx
,chan
,R_MAC_DMA_CONFIG0
);
742 s
->sbm_base
+ R_MAC_DMA_REGISTER(txrx
,chan
,R_MAC_DMA_CONFIG1
);
744 s
->sbm_base
+ R_MAC_DMA_REGISTER(txrx
,chan
,R_MAC_DMA_DSCR_BASE
);
746 s
->sbm_base
+ R_MAC_DMA_REGISTER(txrx
,chan
,R_MAC_DMA_DSCR_CNT
);
748 s
->sbm_base
+ R_MAC_DMA_REGISTER(txrx
,chan
,R_MAC_DMA_CUR_DSCRADDR
);
750 d
->sbdma_oodpktlost
= NULL
;
752 d
->sbdma_oodpktlost
=
753 s
->sbm_base
+ R_MAC_DMA_REGISTER(txrx
,chan
,R_MAC_DMA_OODPKTLOST_RX
);
756 * Allocate memory for the ring
759 d
->sbdma_maxdescr
= maxdescr
;
761 d
->sbdma_dscrtable_unaligned
=
762 d
->sbdma_dscrtable
= (sbdmadscr_t
*)
763 kmalloc((d
->sbdma_maxdescr
+1)*sizeof(sbdmadscr_t
), GFP_KERNEL
);
766 * The descriptor table must be aligned to at least 16 bytes or the
767 * MAC will corrupt it.
769 d
->sbdma_dscrtable
= (sbdmadscr_t
*)
770 ALIGN((unsigned long)d
->sbdma_dscrtable
, sizeof(sbdmadscr_t
));
772 memset(d
->sbdma_dscrtable
,0,d
->sbdma_maxdescr
*sizeof(sbdmadscr_t
));
774 d
->sbdma_dscrtable_end
= d
->sbdma_dscrtable
+ d
->sbdma_maxdescr
;
776 d
->sbdma_dscrtable_phys
= virt_to_phys(d
->sbdma_dscrtable
);
782 d
->sbdma_ctxtable
= (struct sk_buff
**)
783 kmalloc(d
->sbdma_maxdescr
*sizeof(struct sk_buff
*), GFP_KERNEL
);
785 memset(d
->sbdma_ctxtable
,0,d
->sbdma_maxdescr
*sizeof(struct sk_buff
*));
787 #ifdef CONFIG_SBMAC_COALESCE
789 * Setup Rx/Tx DMA coalescing defaults
792 int_pktcnt
= (txrx
== DMA_TX
) ? int_pktcnt_tx
: int_pktcnt_rx
;
794 d
->sbdma_int_pktcnt
= int_pktcnt
;
796 d
->sbdma_int_pktcnt
= 1;
799 int_timeout
= (txrx
== DMA_TX
) ? int_timeout_tx
: int_timeout_rx
;
801 d
->sbdma_int_timeout
= int_timeout
;
803 d
->sbdma_int_timeout
= 0;
809 /**********************************************************************
810 * SBDMA_CHANNEL_START(d)
812 * Initialize the hardware registers for a DMA channel.
815 * d - DMA channel to init (context must be previously init'd
816 * rxtx - DMA_RX or DMA_TX depending on what type of channel
820 ********************************************************************* */
822 static void sbdma_channel_start(sbmacdma_t
*d
, int rxtx
)
825 * Turn on the DMA channel
828 #ifdef CONFIG_SBMAC_COALESCE
829 __raw_writeq(V_DMA_INT_TIMEOUT(d
->sbdma_int_timeout
) |
830 0, d
->sbdma_config1
);
831 __raw_writeq(M_DMA_EOP_INT_EN
|
832 V_DMA_RINGSZ(d
->sbdma_maxdescr
) |
833 V_DMA_INT_PKTCNT(d
->sbdma_int_pktcnt
) |
834 0, d
->sbdma_config0
);
836 __raw_writeq(0, d
->sbdma_config1
);
837 __raw_writeq(V_DMA_RINGSZ(d
->sbdma_maxdescr
) |
838 0, d
->sbdma_config0
);
841 __raw_writeq(d
->sbdma_dscrtable_phys
, d
->sbdma_dscrbase
);
844 * Initialize ring pointers
847 d
->sbdma_addptr
= d
->sbdma_dscrtable
;
848 d
->sbdma_remptr
= d
->sbdma_dscrtable
;
851 /**********************************************************************
852 * SBDMA_CHANNEL_STOP(d)
854 * Initialize the hardware registers for a DMA channel.
857 * d - DMA channel to init (context must be previously init'd
861 ********************************************************************* */
863 static void sbdma_channel_stop(sbmacdma_t
*d
)
866 * Turn off the DMA channel
869 __raw_writeq(0, d
->sbdma_config1
);
871 __raw_writeq(0, d
->sbdma_dscrbase
);
873 __raw_writeq(0, d
->sbdma_config0
);
879 d
->sbdma_addptr
= NULL
;
880 d
->sbdma_remptr
= NULL
;
883 static void sbdma_align_skb(struct sk_buff
*skb
,int power2
,int offset
)
886 unsigned long newaddr
;
888 addr
= (unsigned long) skb
->data
;
890 newaddr
= (addr
+ power2
- 1) & ~(power2
- 1);
892 skb_reserve(skb
,newaddr
-addr
+offset
);
896 /**********************************************************************
897 * SBDMA_ADD_RCVBUFFER(d,sb)
899 * Add a buffer to the specified DMA channel. For receive channels,
900 * this queues a buffer for inbound packets.
903 * d - DMA channel descriptor
904 * sb - sk_buff to add, or NULL if we should allocate one
907 * 0 if buffer could not be added (ring is full)
908 * 1 if buffer added successfully
909 ********************************************************************* */
912 static int sbdma_add_rcvbuffer(sbmacdma_t
*d
,struct sk_buff
*sb
)
915 sbdmadscr_t
*nextdsc
;
916 struct sk_buff
*sb_new
= NULL
;
917 int pktsize
= ENET_PACKET_SIZE
;
919 /* get pointer to our current place in the ring */
921 dsc
= d
->sbdma_addptr
;
922 nextdsc
= SBDMA_NEXTBUF(d
,sbdma_addptr
);
925 * figure out if the ring is full - if the next descriptor
926 * is the same as the one that we're going to remove from
927 * the ring, the ring is full
930 if (nextdsc
== d
->sbdma_remptr
) {
935 * Allocate a sk_buff if we don't already have one.
936 * If we do have an sk_buff, reset it so that it's empty.
938 * Note: sk_buffs don't seem to be guaranteed to have any sort
939 * of alignment when they are allocated. Therefore, allocate enough
940 * extra space to make sure that:
942 * 1. the data does not start in the middle of a cache line.
943 * 2. The data does not end in the middle of a cache line
944 * 3. The buffer can be aligned such that the IP addresses are
947 * Remember, the SOCs MAC writes whole cache lines at a time,
948 * without reading the old contents first. So, if the sk_buff's
949 * data portion starts in the middle of a cache line, the SOC
950 * DMA will trash the beginning (and ending) portions.
954 sb_new
= dev_alloc_skb(ENET_PACKET_SIZE
+ SMP_CACHE_BYTES
* 2 + ETHER_ALIGN
);
955 if (sb_new
== NULL
) {
956 printk(KERN_INFO
"%s: sk_buff allocation failed\n",
957 d
->sbdma_eth
->sbm_dev
->name
);
961 sbdma_align_skb(sb_new
, SMP_CACHE_BYTES
, ETHER_ALIGN
);
966 * nothing special to reinit buffer, it's already aligned
967 * and sb->data already points to a good place.
972 * fill in the descriptor
975 #ifdef CONFIG_SBMAC_COALESCE
977 * Do not interrupt per DMA transfer.
979 dsc
->dscr_a
= virt_to_phys(sb_new
->data
) |
980 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize
+ETHER_ALIGN
)) | 0;
982 dsc
->dscr_a
= virt_to_phys(sb_new
->data
) |
983 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize
+ETHER_ALIGN
)) |
984 M_DMA_DSCRA_INTERRUPT
;
987 /* receiving: no options */
991 * fill in the context
994 d
->sbdma_ctxtable
[dsc
-d
->sbdma_dscrtable
] = sb_new
;
997 * point at next packet
1000 d
->sbdma_addptr
= nextdsc
;
1003 * Give the buffer to the DMA engine.
1006 __raw_writeq(1, d
->sbdma_dscrcnt
);
1008 return 0; /* we did it */
1011 /**********************************************************************
1012 * SBDMA_ADD_TXBUFFER(d,sb)
1014 * Add a transmit buffer to the specified DMA channel, causing a
1015 * transmit to start.
1018 * d - DMA channel descriptor
1019 * sb - sk_buff to add
1022 * 0 transmit queued successfully
1023 * otherwise error code
1024 ********************************************************************* */
1027 static int sbdma_add_txbuffer(sbmacdma_t
*d
,struct sk_buff
*sb
)
1030 sbdmadscr_t
*nextdsc
;
1035 /* get pointer to our current place in the ring */
1037 dsc
= d
->sbdma_addptr
;
1038 nextdsc
= SBDMA_NEXTBUF(d
,sbdma_addptr
);
1041 * figure out if the ring is full - if the next descriptor
1042 * is the same as the one that we're going to remove from
1043 * the ring, the ring is full
1046 if (nextdsc
== d
->sbdma_remptr
) {
1051 * Under Linux, it's not necessary to copy/coalesce buffers
1052 * like it is on NetBSD. We think they're all contiguous,
1053 * but that may not be true for GBE.
1059 * fill in the descriptor. Note that the number of cache
1060 * blocks in the descriptor is the number of blocks
1061 * *spanned*, so we need to add in the offset (if any)
1062 * while doing the calculation.
1065 phys
= virt_to_phys(sb
->data
);
1066 ncb
= NUMCACHEBLKS(length
+(phys
& (SMP_CACHE_BYTES
- 1)));
1068 dsc
->dscr_a
= phys
|
1069 V_DMA_DSCRA_A_SIZE(ncb
) |
1070 #ifndef CONFIG_SBMAC_COALESCE
1071 M_DMA_DSCRA_INTERRUPT
|
1075 /* transmitting: set outbound options and length */
1077 dsc
->dscr_b
= V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD
) |
1078 V_DMA_DSCRB_PKT_SIZE(length
);
1081 * fill in the context
1084 d
->sbdma_ctxtable
[dsc
-d
->sbdma_dscrtable
] = sb
;
1087 * point at next packet
1090 d
->sbdma_addptr
= nextdsc
;
1093 * Give the buffer to the DMA engine.
1096 __raw_writeq(1, d
->sbdma_dscrcnt
);
1098 return 0; /* we did it */
1104 /**********************************************************************
1105 * SBDMA_EMPTYRING(d)
1107 * Free all allocated sk_buffs on the specified DMA channel;
1114 ********************************************************************* */
1116 static void sbdma_emptyring(sbmacdma_t
*d
)
1121 for (idx
= 0; idx
< d
->sbdma_maxdescr
; idx
++) {
1122 sb
= d
->sbdma_ctxtable
[idx
];
1125 d
->sbdma_ctxtable
[idx
] = NULL
;
1131 /**********************************************************************
1134 * Fill the specified DMA channel (must be receive channel)
1142 ********************************************************************* */
1144 static void sbdma_fillring(sbmacdma_t
*d
)
1148 for (idx
= 0; idx
< SBMAC_MAX_RXDESCR
-1; idx
++) {
1149 if (sbdma_add_rcvbuffer(d
,NULL
) != 0)
1154 #ifdef CONFIG_NET_POLL_CONTROLLER
1155 static void sbmac_netpoll(struct net_device
*netdev
)
1157 struct sbmac_softc
*sc
= netdev_priv(netdev
);
1158 int irq
= sc
->sbm_dev
->irq
;
1160 __raw_writeq(0, sc
->sbm_imr
);
1162 sbmac_intr(irq
, netdev
);
1164 #ifdef CONFIG_SBMAC_COALESCE
1165 __raw_writeq(((M_MAC_INT_EOP_COUNT
| M_MAC_INT_EOP_TIMER
) << S_MAC_TX_CH0
) |
1166 ((M_MAC_INT_EOP_COUNT
| M_MAC_INT_EOP_TIMER
) << S_MAC_RX_CH0
),
1169 __raw_writeq((M_MAC_INT_CHANNEL
<< S_MAC_TX_CH0
) |
1170 (M_MAC_INT_CHANNEL
<< S_MAC_RX_CH0
), sc
->sbm_imr
);
1175 /**********************************************************************
1176 * SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
1178 * Process "completed" receive buffers on the specified DMA channel.
1181 * sc - softc structure
1182 * d - DMA channel context
1183 * work_to_do - no. of packets to process before enabling interrupt
1185 * poll - 1: using polling (for NAPI)
1189 ********************************************************************* */
1191 static int sbdma_rx_process(struct sbmac_softc
*sc
,sbmacdma_t
*d
,
1192 int work_to_do
, int poll
)
1205 /* Check if the HW dropped any frames */
1206 sc
->sbm_stats
.rx_fifo_errors
1207 += __raw_readq(sc
->sbm_rxdma
.sbdma_oodpktlost
) & 0xffff;
1208 __raw_writeq(0, sc
->sbm_rxdma
.sbdma_oodpktlost
);
1210 while (work_to_do
-- > 0) {
1212 * figure out where we are (as an index) and where
1213 * the hardware is (also as an index)
1215 * This could be done faster if (for example) the
1216 * descriptor table was page-aligned and contiguous in
1217 * both virtual and physical memory -- you could then
1218 * just compare the low-order bits of the virtual address
1219 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1222 dsc
= d
->sbdma_remptr
;
1223 curidx
= dsc
- d
->sbdma_dscrtable
;
1226 prefetch(&d
->sbdma_ctxtable
[curidx
]);
1228 hwidx
= (int) (((__raw_readq(d
->sbdma_curdscr
) & M_DMA_CURDSCR_ADDR
) -
1229 d
->sbdma_dscrtable_phys
) / sizeof(sbdmadscr_t
));
1232 * If they're the same, that means we've processed all
1233 * of the descriptors up to (but not including) the one that
1234 * the hardware is working on right now.
1237 if (curidx
== hwidx
)
1241 * Otherwise, get the packet's sk_buff ptr back
1244 sb
= d
->sbdma_ctxtable
[curidx
];
1245 d
->sbdma_ctxtable
[curidx
] = NULL
;
1247 len
= (int)G_DMA_DSCRB_PKT_SIZE(dsc
->dscr_b
) - 4;
1250 * Check packet status. If good, process it.
1251 * If not, silently drop it and put it back on the
1255 if (likely (!(dsc
->dscr_a
& M_DMA_ETHRX_BAD
))) {
1258 * Add a new buffer to replace the old one. If we fail
1259 * to allocate a buffer, we're going to drop this
1260 * packet and put it right back on the receive ring.
1263 if (unlikely (sbdma_add_rcvbuffer(d
,NULL
) ==
1265 sc
->sbm_stats
.rx_dropped
++;
1266 sbdma_add_rcvbuffer(d
,sb
); /* re-add old buffer */
1267 /* No point in continuing at the moment */
1268 printk(KERN_ERR
"dropped packet (1)\n");
1269 d
->sbdma_remptr
= SBDMA_NEXTBUF(d
,sbdma_remptr
);
1273 * Set length into the packet
1278 * Buffer has been replaced on the
1279 * receive ring. Pass the buffer to
1282 sb
->protocol
= eth_type_trans(sb
,d
->sbdma_eth
->sbm_dev
);
1283 /* Check hw IPv4/TCP checksum if supported */
1284 if (sc
->rx_hw_checksum
== ENABLE
) {
1285 if (!((dsc
->dscr_a
) & M_DMA_ETHRX_BADIP4CS
) &&
1286 !((dsc
->dscr_a
) & M_DMA_ETHRX_BADTCPCS
)) {
1287 sb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1288 /* don't need to set sb->csum */
1290 sb
->ip_summed
= CHECKSUM_NONE
;
1294 prefetch((const void *)(((char *)sb
->data
)+32));
1296 dropped
= netif_receive_skb(sb
);
1298 dropped
= netif_rx(sb
);
1300 if (dropped
== NET_RX_DROP
) {
1301 sc
->sbm_stats
.rx_dropped
++;
1302 d
->sbdma_remptr
= SBDMA_NEXTBUF(d
,sbdma_remptr
);
1306 sc
->sbm_stats
.rx_bytes
+= len
;
1307 sc
->sbm_stats
.rx_packets
++;
1312 * Packet was mangled somehow. Just drop it and
1313 * put it back on the receive ring.
1315 sc
->sbm_stats
.rx_errors
++;
1316 sbdma_add_rcvbuffer(d
,sb
);
1321 * .. and advance to the next buffer.
1324 d
->sbdma_remptr
= SBDMA_NEXTBUF(d
,sbdma_remptr
);
1329 goto again
; /* collect fifo drop statistics again */
1335 /**********************************************************************
1336 * SBDMA_TX_PROCESS(sc,d)
1338 * Process "completed" transmit buffers on the specified DMA channel.
1339 * This is normally called within the interrupt service routine.
1340 * Note that this isn't really ideal for priority channels, since
1341 * it processes all of the packets on a given channel before
1345 * sc - softc structure
1346 * d - DMA channel context
1347 * poll - 1: using polling (for NAPI)
1351 ********************************************************************* */
1353 static void sbdma_tx_process(struct sbmac_softc
*sc
,sbmacdma_t
*d
, int poll
)
1359 unsigned long flags
;
1360 int packets_handled
= 0;
1362 spin_lock_irqsave(&(sc
->sbm_lock
), flags
);
1364 if (d
->sbdma_remptr
== d
->sbdma_addptr
)
1367 hwidx
= (int) (((__raw_readq(d
->sbdma_curdscr
) & M_DMA_CURDSCR_ADDR
) -
1368 d
->sbdma_dscrtable_phys
) / sizeof(sbdmadscr_t
));
1372 * figure out where we are (as an index) and where
1373 * the hardware is (also as an index)
1375 * This could be done faster if (for example) the
1376 * descriptor table was page-aligned and contiguous in
1377 * both virtual and physical memory -- you could then
1378 * just compare the low-order bits of the virtual address
1379 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1382 curidx
= d
->sbdma_remptr
- d
->sbdma_dscrtable
;
1385 * If they're the same, that means we've processed all
1386 * of the descriptors up to (but not including) the one that
1387 * the hardware is working on right now.
1390 if (curidx
== hwidx
)
1394 * Otherwise, get the packet's sk_buff ptr back
1397 dsc
= &(d
->sbdma_dscrtable
[curidx
]);
1398 sb
= d
->sbdma_ctxtable
[curidx
];
1399 d
->sbdma_ctxtable
[curidx
] = NULL
;
1405 sc
->sbm_stats
.tx_bytes
+= sb
->len
;
1406 sc
->sbm_stats
.tx_packets
++;
1409 * for transmits, we just free buffers.
1412 dev_kfree_skb_irq(sb
);
1415 * .. and advance to the next buffer.
1418 d
->sbdma_remptr
= SBDMA_NEXTBUF(d
,sbdma_remptr
);
1425 * Decide if we should wake up the protocol or not.
1426 * Other drivers seem to do this when we reach a low
1427 * watermark on the transmit queue.
1430 if (packets_handled
)
1431 netif_wake_queue(d
->sbdma_eth
->sbm_dev
);
1434 spin_unlock_irqrestore(&(sc
->sbm_lock
), flags
);
1440 /**********************************************************************
1443 * Initialize an Ethernet context structure - this is called
1444 * once per MAC on the 1250. Memory is allocated here, so don't
1445 * call it again from inside the ioctl routines that bring the
1449 * s - sbmac context structure
1453 ********************************************************************* */
1455 static int sbmac_initctx(struct sbmac_softc
*s
)
1459 * figure out the addresses of some ports
1462 s
->sbm_macenable
= s
->sbm_base
+ R_MAC_ENABLE
;
1463 s
->sbm_maccfg
= s
->sbm_base
+ R_MAC_CFG
;
1464 s
->sbm_fifocfg
= s
->sbm_base
+ R_MAC_THRSH_CFG
;
1465 s
->sbm_framecfg
= s
->sbm_base
+ R_MAC_FRAMECFG
;
1466 s
->sbm_rxfilter
= s
->sbm_base
+ R_MAC_ADFILTER_CFG
;
1467 s
->sbm_isr
= s
->sbm_base
+ R_MAC_STATUS
;
1468 s
->sbm_imr
= s
->sbm_base
+ R_MAC_INT_MASK
;
1469 s
->sbm_mdio
= s
->sbm_base
+ R_MAC_MDIO
;
1474 s
->sbm_phy_oldbmsr
= 0;
1475 s
->sbm_phy_oldanlpar
= 0;
1476 s
->sbm_phy_oldk1stsr
= 0;
1477 s
->sbm_phy_oldlinkstat
= 0;
1480 * Initialize the DMA channels. Right now, only one per MAC is used
1481 * Note: Only do this _once_, as it allocates memory from the kernel!
1484 sbdma_initctx(&(s
->sbm_txdma
),s
,0,DMA_TX
,SBMAC_MAX_TXDESCR
);
1485 sbdma_initctx(&(s
->sbm_rxdma
),s
,0,DMA_RX
,SBMAC_MAX_RXDESCR
);
1488 * initial state is OFF
1491 s
->sbm_state
= sbmac_state_off
;
1494 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1497 s
->sbm_speed
= sbmac_speed_10
;
1498 s
->sbm_duplex
= sbmac_duplex_half
;
1499 s
->sbm_fc
= sbmac_fc_disabled
;
1505 static void sbdma_uninitctx(struct sbmacdma_s
*d
)
1507 if (d
->sbdma_dscrtable_unaligned
) {
1508 kfree(d
->sbdma_dscrtable_unaligned
);
1509 d
->sbdma_dscrtable_unaligned
= d
->sbdma_dscrtable
= NULL
;
1512 if (d
->sbdma_ctxtable
) {
1513 kfree(d
->sbdma_ctxtable
);
1514 d
->sbdma_ctxtable
= NULL
;
1519 static void sbmac_uninitctx(struct sbmac_softc
*sc
)
1521 sbdma_uninitctx(&(sc
->sbm_txdma
));
1522 sbdma_uninitctx(&(sc
->sbm_rxdma
));
1526 /**********************************************************************
1527 * SBMAC_CHANNEL_START(s)
1529 * Start packet processing on this MAC.
1532 * s - sbmac structure
1536 ********************************************************************* */
1538 static void sbmac_channel_start(struct sbmac_softc
*s
)
1541 volatile void __iomem
*port
;
1542 uint64_t cfg
,fifo
,framecfg
;
1546 * Don't do this if running
1549 if (s
->sbm_state
== sbmac_state_on
)
1553 * Bring the controller out of reset, but leave it off.
1556 __raw_writeq(0, s
->sbm_macenable
);
1559 * Ignore all received packets
1562 __raw_writeq(0, s
->sbm_rxfilter
);
1565 * Calculate values for various control registers.
1568 cfg
= M_MAC_RETRY_EN
|
1569 M_MAC_TX_HOLD_SOP_EN
|
1570 V_MAC_TX_PAUSE_CNT_16K
|
1577 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
1578 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
1579 * Use a larger RD_THRSH for gigabit
1581 if (soc_type
== K_SYS_SOC_TYPE_BCM1250
&& periph_rev
< 2)
1586 fifo
= V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1587 ((s
->sbm_speed
== sbmac_speed_1000
)
1588 ? V_MAC_TX_RD_THRSH(th_value
) : V_MAC_TX_RD_THRSH(4)) |
1589 V_MAC_TX_RL_THRSH(4) |
1590 V_MAC_RX_PL_THRSH(4) |
1591 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1592 V_MAC_RX_PL_THRSH(4) |
1593 V_MAC_RX_RL_THRSH(8) |
1596 framecfg
= V_MAC_MIN_FRAMESZ_DEFAULT
|
1597 V_MAC_MAX_FRAMESZ_DEFAULT
|
1598 V_MAC_BACKOFF_SEL(1);
1601 * Clear out the hash address map
1604 port
= s
->sbm_base
+ R_MAC_HASH_BASE
;
1605 for (idx
= 0; idx
< MAC_HASH_COUNT
; idx
++) {
1606 __raw_writeq(0, port
);
1607 port
+= sizeof(uint64_t);
1611 * Clear out the exact-match table
1614 port
= s
->sbm_base
+ R_MAC_ADDR_BASE
;
1615 for (idx
= 0; idx
< MAC_ADDR_COUNT
; idx
++) {
1616 __raw_writeq(0, port
);
1617 port
+= sizeof(uint64_t);
1621 * Clear out the DMA Channel mapping table registers
1624 port
= s
->sbm_base
+ R_MAC_CHUP0_BASE
;
1625 for (idx
= 0; idx
< MAC_CHMAP_COUNT
; idx
++) {
1626 __raw_writeq(0, port
);
1627 port
+= sizeof(uint64_t);
1631 port
= s
->sbm_base
+ R_MAC_CHLO0_BASE
;
1632 for (idx
= 0; idx
< MAC_CHMAP_COUNT
; idx
++) {
1633 __raw_writeq(0, port
);
1634 port
+= sizeof(uint64_t);
1638 * Program the hardware address. It goes into the hardware-address
1639 * register as well as the first filter register.
1642 reg
= sbmac_addr2reg(s
->sbm_hwaddr
);
1644 port
= s
->sbm_base
+ R_MAC_ADDR_BASE
;
1645 __raw_writeq(reg
, port
);
1646 port
= s
->sbm_base
+ R_MAC_ETHERNET_ADDR
;
1648 #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
1650 * Pass1 SOCs do not receive packets addressed to the
1651 * destination address in the R_MAC_ETHERNET_ADDR register.
1652 * Set the value to zero.
1654 __raw_writeq(0, port
);
1656 __raw_writeq(reg
, port
);
1660 * Set the receive filter for no packets, and write values
1661 * to the various config registers
1664 __raw_writeq(0, s
->sbm_rxfilter
);
1665 __raw_writeq(0, s
->sbm_imr
);
1666 __raw_writeq(framecfg
, s
->sbm_framecfg
);
1667 __raw_writeq(fifo
, s
->sbm_fifocfg
);
1668 __raw_writeq(cfg
, s
->sbm_maccfg
);
1671 * Initialize DMA channels (rings should be ok now)
1674 sbdma_channel_start(&(s
->sbm_rxdma
), DMA_RX
);
1675 sbdma_channel_start(&(s
->sbm_txdma
), DMA_TX
);
1678 * Configure the speed, duplex, and flow control
1681 sbmac_set_speed(s
,s
->sbm_speed
);
1682 sbmac_set_duplex(s
,s
->sbm_duplex
,s
->sbm_fc
);
1685 * Fill the receive ring
1688 sbdma_fillring(&(s
->sbm_rxdma
));
1691 * Turn on the rest of the bits in the enable register
1694 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
1695 __raw_writeq(M_MAC_RXDMA_EN0
|
1696 M_MAC_TXDMA_EN0
, s
->sbm_macenable
);
1697 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
1698 __raw_writeq(M_MAC_RXDMA_EN0
|
1701 M_MAC_TX_ENABLE
, s
->sbm_macenable
);
1703 #error invalid SiByte MAC configuation
1706 #ifdef CONFIG_SBMAC_COALESCE
1707 __raw_writeq(((M_MAC_INT_EOP_COUNT
| M_MAC_INT_EOP_TIMER
) << S_MAC_TX_CH0
) |
1708 ((M_MAC_INT_EOP_COUNT
| M_MAC_INT_EOP_TIMER
) << S_MAC_RX_CH0
), s
->sbm_imr
);
1710 __raw_writeq((M_MAC_INT_CHANNEL
<< S_MAC_TX_CH0
) |
1711 (M_MAC_INT_CHANNEL
<< S_MAC_RX_CH0
), s
->sbm_imr
);
1715 * Enable receiving unicasts and broadcasts
1718 __raw_writeq(M_MAC_UCAST_EN
| M_MAC_BCAST_EN
, s
->sbm_rxfilter
);
1721 * we're running now.
1724 s
->sbm_state
= sbmac_state_on
;
1727 * Program multicast addresses
1733 * If channel was in promiscuous mode before, turn that on
1736 if (s
->sbm_devflags
& IFF_PROMISC
) {
1737 sbmac_promiscuous_mode(s
,1);
1743 /**********************************************************************
1744 * SBMAC_CHANNEL_STOP(s)
1746 * Stop packet processing on this MAC.
1749 * s - sbmac structure
1753 ********************************************************************* */
1755 static void sbmac_channel_stop(struct sbmac_softc
*s
)
1757 /* don't do this if already stopped */
1759 if (s
->sbm_state
== sbmac_state_off
)
1762 /* don't accept any packets, disable all interrupts */
1764 __raw_writeq(0, s
->sbm_rxfilter
);
1765 __raw_writeq(0, s
->sbm_imr
);
1767 /* Turn off ticker */
1771 /* turn off receiver and transmitter */
1773 __raw_writeq(0, s
->sbm_macenable
);
1775 /* We're stopped now. */
1777 s
->sbm_state
= sbmac_state_off
;
1780 * Stop DMA channels (rings should be ok now)
1783 sbdma_channel_stop(&(s
->sbm_rxdma
));
1784 sbdma_channel_stop(&(s
->sbm_txdma
));
1786 /* Empty the receive and transmit rings */
1788 sbdma_emptyring(&(s
->sbm_rxdma
));
1789 sbdma_emptyring(&(s
->sbm_txdma
));
1793 /**********************************************************************
1794 * SBMAC_SET_CHANNEL_STATE(state)
1796 * Set the channel's state ON or OFF
1803 ********************************************************************* */
1804 static sbmac_state_t
sbmac_set_channel_state(struct sbmac_softc
*sc
,
1805 sbmac_state_t state
)
1807 sbmac_state_t oldstate
= sc
->sbm_state
;
1810 * If same as previous state, return
1813 if (state
== oldstate
) {
1818 * If new state is ON, turn channel on
1821 if (state
== sbmac_state_on
) {
1822 sbmac_channel_start(sc
);
1825 sbmac_channel_stop(sc
);
1829 * Return previous state
1836 /**********************************************************************
1837 * SBMAC_PROMISCUOUS_MODE(sc,onoff)
1839 * Turn on or off promiscuous mode
1843 * onoff - 1 to turn on, 0 to turn off
1847 ********************************************************************* */
1849 static void sbmac_promiscuous_mode(struct sbmac_softc
*sc
,int onoff
)
1853 if (sc
->sbm_state
!= sbmac_state_on
)
1857 reg
= __raw_readq(sc
->sbm_rxfilter
);
1858 reg
|= M_MAC_ALLPKT_EN
;
1859 __raw_writeq(reg
, sc
->sbm_rxfilter
);
1862 reg
= __raw_readq(sc
->sbm_rxfilter
);
1863 reg
&= ~M_MAC_ALLPKT_EN
;
1864 __raw_writeq(reg
, sc
->sbm_rxfilter
);
1868 /**********************************************************************
1869 * SBMAC_SETIPHDR_OFFSET(sc,onoff)
1871 * Set the iphdr offset as 15 assuming ethernet encapsulation
1878 ********************************************************************* */
1880 static void sbmac_set_iphdr_offset(struct sbmac_softc
*sc
)
1884 /* Hard code the off set to 15 for now */
1885 reg
= __raw_readq(sc
->sbm_rxfilter
);
1886 reg
&= ~M_MAC_IPHDR_OFFSET
| V_MAC_IPHDR_OFFSET(15);
1887 __raw_writeq(reg
, sc
->sbm_rxfilter
);
1889 /* BCM1250 pass1 didn't have hardware checksum. Everything
1891 if (soc_type
== K_SYS_SOC_TYPE_BCM1250
&& periph_rev
< 2) {
1892 sc
->rx_hw_checksum
= DISABLE
;
1894 sc
->rx_hw_checksum
= ENABLE
;
1899 /**********************************************************************
1900 * SBMAC_ADDR2REG(ptr)
1902 * Convert six bytes into the 64-bit register value that
1903 * we typically write into the SBMAC's address/mcast registers
1906 * ptr - pointer to 6 bytes
1910 ********************************************************************* */
1912 static uint64_t sbmac_addr2reg(unsigned char *ptr
)
1918 reg
|= (uint64_t) *(--ptr
);
1920 reg
|= (uint64_t) *(--ptr
);
1922 reg
|= (uint64_t) *(--ptr
);
1924 reg
|= (uint64_t) *(--ptr
);
1926 reg
|= (uint64_t) *(--ptr
);
1928 reg
|= (uint64_t) *(--ptr
);
1934 /**********************************************************************
1935 * SBMAC_SET_SPEED(s,speed)
1937 * Configure LAN speed for the specified MAC.
1938 * Warning: must be called when MAC is off!
1941 * s - sbmac structure
1942 * speed - speed to set MAC to (see sbmac_speed_t enum)
1946 * 0 indicates invalid parameters
1947 ********************************************************************* */
1949 static int sbmac_set_speed(struct sbmac_softc
*s
,sbmac_speed_t speed
)
1955 * Save new current values
1958 s
->sbm_speed
= speed
;
1960 if (s
->sbm_state
== sbmac_state_on
)
1961 return 0; /* save for next restart */
1964 * Read current register values
1967 cfg
= __raw_readq(s
->sbm_maccfg
);
1968 framecfg
= __raw_readq(s
->sbm_framecfg
);
1971 * Mask out the stuff we want to change
1974 cfg
&= ~(M_MAC_BURST_EN
| M_MAC_SPEED_SEL
);
1975 framecfg
&= ~(M_MAC_IFG_RX
| M_MAC_IFG_TX
| M_MAC_IFG_THRSH
|
1979 * Now add in the new bits
1983 case sbmac_speed_10
:
1984 framecfg
|= V_MAC_IFG_RX_10
|
1986 K_MAC_IFG_THRSH_10
|
1988 cfg
|= V_MAC_SPEED_SEL_10MBPS
;
1991 case sbmac_speed_100
:
1992 framecfg
|= V_MAC_IFG_RX_100
|
1994 V_MAC_IFG_THRSH_100
|
1995 V_MAC_SLOT_SIZE_100
;
1996 cfg
|= V_MAC_SPEED_SEL_100MBPS
;
1999 case sbmac_speed_1000
:
2000 framecfg
|= V_MAC_IFG_RX_1000
|
2002 V_MAC_IFG_THRSH_1000
|
2003 V_MAC_SLOT_SIZE_1000
;
2004 cfg
|= V_MAC_SPEED_SEL_1000MBPS
| M_MAC_BURST_EN
;
2007 case sbmac_speed_auto
: /* XXX not implemented */
2014 * Send the bits back to the hardware
2017 __raw_writeq(framecfg
, s
->sbm_framecfg
);
2018 __raw_writeq(cfg
, s
->sbm_maccfg
);
2023 /**********************************************************************
2024 * SBMAC_SET_DUPLEX(s,duplex,fc)
2026 * Set Ethernet duplex and flow control options for this MAC
2027 * Warning: must be called when MAC is off!
2030 * s - sbmac structure
2031 * duplex - duplex setting (see sbmac_duplex_t)
2032 * fc - flow control setting (see sbmac_fc_t)
2036 * 0 if an invalid parameter combination was specified
2037 ********************************************************************* */
2039 static int sbmac_set_duplex(struct sbmac_softc
*s
,sbmac_duplex_t duplex
,sbmac_fc_t fc
)
2044 * Save new current values
2047 s
->sbm_duplex
= duplex
;
2050 if (s
->sbm_state
== sbmac_state_on
)
2051 return 0; /* save for next restart */
2054 * Read current register values
2057 cfg
= __raw_readq(s
->sbm_maccfg
);
2060 * Mask off the stuff we're about to change
2063 cfg
&= ~(M_MAC_FC_SEL
| M_MAC_FC_CMD
| M_MAC_HDX_EN
);
2067 case sbmac_duplex_half
:
2069 case sbmac_fc_disabled
:
2070 cfg
|= M_MAC_HDX_EN
| V_MAC_FC_CMD_DISABLED
;
2073 case sbmac_fc_collision
:
2074 cfg
|= M_MAC_HDX_EN
| V_MAC_FC_CMD_ENABLED
;
2077 case sbmac_fc_carrier
:
2078 cfg
|= M_MAC_HDX_EN
| V_MAC_FC_CMD_ENAB_FALSECARR
;
2081 case sbmac_fc_auto
: /* XXX not implemented */
2083 case sbmac_fc_frame
: /* not valid in half duplex */
2084 default: /* invalid selection */
2089 case sbmac_duplex_full
:
2091 case sbmac_fc_disabled
:
2092 cfg
|= V_MAC_FC_CMD_DISABLED
;
2095 case sbmac_fc_frame
:
2096 cfg
|= V_MAC_FC_CMD_ENABLED
;
2099 case sbmac_fc_collision
: /* not valid in full duplex */
2100 case sbmac_fc_carrier
: /* not valid in full duplex */
2101 case sbmac_fc_auto
: /* XXX not implemented */
2107 case sbmac_duplex_auto
:
2108 /* XXX not implemented */
2113 * Send the bits back to the hardware
2116 __raw_writeq(cfg
, s
->sbm_maccfg
);
2124 /**********************************************************************
2127 * Interrupt handler for MAC interrupts
2134 ********************************************************************* */
2135 static irqreturn_t
sbmac_intr(int irq
,void *dev_instance
)
2137 struct net_device
*dev
= (struct net_device
*) dev_instance
;
2138 struct sbmac_softc
*sc
= netdev_priv(dev
);
2143 * Read the ISR (this clears the bits in the real
2144 * register, except for counter addr)
2147 isr
= __raw_readq(sc
->sbm_isr
) & ~M_MAC_COUNTER_ADDR
;
2150 return IRQ_RETVAL(0);
2154 * Transmits on channel 0
2157 if (isr
& (M_MAC_INT_CHANNEL
<< S_MAC_TX_CH0
)) {
2158 sbdma_tx_process(sc
,&(sc
->sbm_txdma
), 0);
2159 #ifdef CONFIG_NETPOLL_TRAP
2160 if (netpoll_trap()) {
2161 if (test_and_clear_bit(__LINK_STATE_XOFF
, &dev
->state
))
2162 __netif_schedule(dev
);
2167 if (isr
& (M_MAC_INT_CHANNEL
<< S_MAC_RX_CH0
)) {
2168 if (netif_rx_schedule_prep(dev
)) {
2169 __raw_writeq(0, sc
->sbm_imr
);
2170 __netif_rx_schedule(dev
);
2171 /* Depend on the exit from poll to reenable intr */
2174 /* may leave some packets behind */
2175 sbdma_rx_process(sc
,&(sc
->sbm_rxdma
),
2176 SBMAC_MAX_RXDESCR
* 2, 0);
2179 return IRQ_RETVAL(handled
);
2182 /**********************************************************************
2183 * SBMAC_START_TX(skb,dev)
2185 * Start output on the specified interface. Basically, we
2186 * queue as many buffers as we can until the ring fills up, or
2187 * we run off the end of the queue, whichever comes first.
2194 ********************************************************************* */
2195 static int sbmac_start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2197 struct sbmac_softc
*sc
= netdev_priv(dev
);
2200 spin_lock_irq (&sc
->sbm_lock
);
2203 * Put the buffer on the transmit ring. If we
2204 * don't have room, stop the queue.
2207 if (sbdma_add_txbuffer(&(sc
->sbm_txdma
),skb
)) {
2208 /* XXX save skb that we could not send */
2209 netif_stop_queue(dev
);
2210 spin_unlock_irq(&sc
->sbm_lock
);
2215 dev
->trans_start
= jiffies
;
2217 spin_unlock_irq (&sc
->sbm_lock
);
2222 /**********************************************************************
2223 * SBMAC_SETMULTI(sc)
2225 * Reprogram the multicast table into the hardware, given
2226 * the list of multicasts associated with the interface
2234 ********************************************************************* */
2236 static void sbmac_setmulti(struct sbmac_softc
*sc
)
2239 volatile void __iomem
*port
;
2241 struct dev_mc_list
*mclist
;
2242 struct net_device
*dev
= sc
->sbm_dev
;
2245 * Clear out entire multicast table. We do this by nuking
2246 * the entire hash table and all the direct matches except
2247 * the first one, which is used for our station address
2250 for (idx
= 1; idx
< MAC_ADDR_COUNT
; idx
++) {
2251 port
= sc
->sbm_base
+ R_MAC_ADDR_BASE
+(idx
*sizeof(uint64_t));
2252 __raw_writeq(0, port
);
2255 for (idx
= 0; idx
< MAC_HASH_COUNT
; idx
++) {
2256 port
= sc
->sbm_base
+ R_MAC_HASH_BASE
+(idx
*sizeof(uint64_t));
2257 __raw_writeq(0, port
);
2261 * Clear the filter to say we don't want any multicasts.
2264 reg
= __raw_readq(sc
->sbm_rxfilter
);
2265 reg
&= ~(M_MAC_MCAST_INV
| M_MAC_MCAST_EN
);
2266 __raw_writeq(reg
, sc
->sbm_rxfilter
);
2268 if (dev
->flags
& IFF_ALLMULTI
) {
2270 * Enable ALL multicasts. Do this by inverting the
2271 * multicast enable bit.
2273 reg
= __raw_readq(sc
->sbm_rxfilter
);
2274 reg
|= (M_MAC_MCAST_INV
| M_MAC_MCAST_EN
);
2275 __raw_writeq(reg
, sc
->sbm_rxfilter
);
2281 * Progam new multicast entries. For now, only use the
2282 * perfect filter. In the future we'll need to use the
2283 * hash filter if the perfect filter overflows
2286 /* XXX only using perfect filter for now, need to use hash
2287 * XXX if the table overflows */
2289 idx
= 1; /* skip station address */
2290 mclist
= dev
->mc_list
;
2291 while (mclist
&& (idx
< MAC_ADDR_COUNT
)) {
2292 reg
= sbmac_addr2reg(mclist
->dmi_addr
);
2293 port
= sc
->sbm_base
+ R_MAC_ADDR_BASE
+(idx
* sizeof(uint64_t));
2294 __raw_writeq(reg
, port
);
2296 mclist
= mclist
->next
;
2300 * Enable the "accept multicast bits" if we programmed at least one
2305 reg
= __raw_readq(sc
->sbm_rxfilter
);
2306 reg
|= M_MAC_MCAST_EN
;
2307 __raw_writeq(reg
, sc
->sbm_rxfilter
);
2311 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2312 /**********************************************************************
2313 * SBMAC_PARSE_XDIGIT(str)
2315 * Parse a hex digit, returning its value
2321 * hex value, or -1 if invalid
2322 ********************************************************************* */
2324 static int sbmac_parse_xdigit(char str
)
2328 if ((str
>= '0') && (str
<= '9'))
2330 else if ((str
>= 'a') && (str
<= 'f'))
2331 digit
= str
- 'a' + 10;
2332 else if ((str
>= 'A') && (str
<= 'F'))
2333 digit
= str
- 'A' + 10;
2340 /**********************************************************************
2341 * SBMAC_PARSE_HWADDR(str,hwaddr)
2343 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2348 * hwaddr - pointer to hardware address
2352 ********************************************************************* */
2354 static int sbmac_parse_hwaddr(char *str
, unsigned char *hwaddr
)
2359 while (*str
&& (idx
> 0)) {
2360 digit1
= sbmac_parse_xdigit(*str
);
2367 if ((*str
== ':') || (*str
== '-')) {
2372 digit2
= sbmac_parse_xdigit(*str
);
2378 *hwaddr
++ = (digit1
<< 4) | digit2
;
2390 static int sb1250_change_mtu(struct net_device
*_dev
, int new_mtu
)
2392 if (new_mtu
> ENET_PACKET_SIZE
)
2394 _dev
->mtu
= new_mtu
;
2395 printk(KERN_INFO
"changing the mtu to %d\n", new_mtu
);
2399 /**********************************************************************
2402 * Attach routine - init hardware and hook ourselves into linux
2405 * dev - net_device structure
2409 ********************************************************************* */
2411 static int sbmac_init(struct net_device
*dev
, int idx
)
2413 struct sbmac_softc
*sc
;
2414 unsigned char *eaddr
;
2419 sc
= netdev_priv(dev
);
2421 /* Determine controller base address */
2423 sc
->sbm_base
= IOADDR(dev
->base_addr
);
2427 eaddr
= sc
->sbm_hwaddr
;
2430 * Read the ethernet address. The firwmare left this programmed
2431 * for us in the ethernet address register for each mac.
2434 ea_reg
= __raw_readq(sc
->sbm_base
+ R_MAC_ETHERNET_ADDR
);
2435 __raw_writeq(0, sc
->sbm_base
+ R_MAC_ETHERNET_ADDR
);
2436 for (i
= 0; i
< 6; i
++) {
2437 eaddr
[i
] = (uint8_t) (ea_reg
& 0xFF);
2441 for (i
= 0; i
< 6; i
++) {
2442 dev
->dev_addr
[i
] = eaddr
[i
];
2450 sc
->sbm_buffersize
= ENET_PACKET_SIZE
+ SMP_CACHE_BYTES
* 2 + ETHER_ALIGN
;
2453 * Initialize context (get pointers to registers and stuff), then
2454 * allocate the memory for the descriptor tables.
2460 * Set up Linux device callins
2463 spin_lock_init(&(sc
->sbm_lock
));
2465 dev
->open
= sbmac_open
;
2466 dev
->hard_start_xmit
= sbmac_start_tx
;
2467 dev
->stop
= sbmac_close
;
2468 dev
->get_stats
= sbmac_get_stats
;
2469 dev
->set_multicast_list
= sbmac_set_rx_mode
;
2470 dev
->do_ioctl
= sbmac_mii_ioctl
;
2471 dev
->tx_timeout
= sbmac_tx_timeout
;
2472 dev
->watchdog_timeo
= TX_TIMEOUT
;
2473 dev
->poll
= sbmac_poll
;
2476 dev
->change_mtu
= sb1250_change_mtu
;
2477 #ifdef CONFIG_NET_POLL_CONTROLLER
2478 dev
->poll_controller
= sbmac_netpoll
;
2481 /* This is needed for PASS2 for Rx H/W checksum feature */
2482 sbmac_set_iphdr_offset(sc
);
2484 err
= register_netdev(dev
);
2488 if (sc
->rx_hw_checksum
== ENABLE
) {
2489 printk(KERN_INFO
"%s: enabling TCP rcv checksum\n",
2494 * Display Ethernet address (this is called during the config
2495 * process so we need to finish off the config message that
2496 * was being displayed)
2499 "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
2500 dev
->name
, dev
->base_addr
,
2501 eaddr
[0],eaddr
[1],eaddr
[2],eaddr
[3],eaddr
[4],eaddr
[5]);
2507 sbmac_uninitctx(sc
);
2513 static int sbmac_open(struct net_device
*dev
)
2515 struct sbmac_softc
*sc
= netdev_priv(dev
);
2518 printk(KERN_DEBUG
"%s: sbmac_open() irq %d.\n", dev
->name
, dev
->irq
);
2522 * map/route interrupt (clear status first, in case something
2523 * weird is pending; we haven't initialized the mac registers
2527 __raw_readq(sc
->sbm_isr
);
2528 if (request_irq(dev
->irq
, &sbmac_intr
, IRQF_SHARED
, dev
->name
, dev
))
2535 if(sbmac_mii_probe(dev
) == -1) {
2536 printk("%s: failed to probe PHY.\n", dev
->name
);
2541 * Configure default speed
2544 sbmac_mii_poll(sc
,noisy_mii
);
2547 * Turn on the channel
2550 sbmac_set_channel_state(sc
,sbmac_state_on
);
2553 * XXX Station address is in dev->dev_addr
2556 if (dev
->if_port
== 0)
2559 netif_start_queue(dev
);
2561 sbmac_set_rx_mode(dev
);
2563 /* Set the timer to check for link beat. */
2564 init_timer(&sc
->sbm_timer
);
2565 sc
->sbm_timer
.expires
= jiffies
+ 2 * HZ
/100;
2566 sc
->sbm_timer
.data
= (unsigned long)dev
;
2567 sc
->sbm_timer
.function
= &sbmac_timer
;
2568 add_timer(&sc
->sbm_timer
);
2573 static int sbmac_mii_probe(struct net_device
*dev
)
2576 struct sbmac_softc
*s
= netdev_priv(dev
);
2580 for (i
=1; i
<31; i
++) {
2581 bmsr
= sbmac_mii_read(s
, i
, MII_BMSR
);
2584 id1
= sbmac_mii_read(s
, i
, MII_PHYIDR1
);
2585 id2
= sbmac_mii_read(s
, i
, MII_PHYIDR2
);
2586 vendor
= ((u32
)id1
<< 6) | ((id2
>> 10) & 0x3f);
2587 device
= (id2
>> 4) & 0x3f;
2589 printk(KERN_INFO
"%s: found phy %d, vendor %06x part %02x\n",
2590 dev
->name
, i
, vendor
, device
);
2598 static int sbmac_mii_poll(struct sbmac_softc
*s
,int noisy
)
2600 int bmsr
,bmcr
,k1stsr
,anlpar
;
2605 /* Read the mode status and mode control registers. */
2606 bmsr
= sbmac_mii_read(s
,s
->sbm_phys
[0],MII_BMSR
);
2607 bmcr
= sbmac_mii_read(s
,s
->sbm_phys
[0],MII_BMCR
);
2609 /* get the link partner status */
2610 anlpar
= sbmac_mii_read(s
,s
->sbm_phys
[0],MII_ANLPAR
);
2612 /* if supported, read the 1000baseT register */
2613 if (bmsr
& BMSR_1000BT_XSR
) {
2614 k1stsr
= sbmac_mii_read(s
,s
->sbm_phys
[0],MII_K1STSR
);
2622 if ((bmsr
& BMSR_LINKSTAT
) == 0) {
2624 * If link status is down, clear out old info so that when
2625 * it comes back up it will force us to reconfigure speed
2627 s
->sbm_phy_oldbmsr
= 0;
2628 s
->sbm_phy_oldanlpar
= 0;
2629 s
->sbm_phy_oldk1stsr
= 0;
2633 if ((s
->sbm_phy_oldbmsr
!= bmsr
) ||
2634 (s
->sbm_phy_oldanlpar
!= anlpar
) ||
2635 (s
->sbm_phy_oldk1stsr
!= k1stsr
)) {
2637 printk(KERN_DEBUG
"%s: bmsr:%x/%x anlpar:%x/%x k1stsr:%x/%x\n",
2639 s
->sbm_phy_oldbmsr
,bmsr
,
2640 s
->sbm_phy_oldanlpar
,anlpar
,
2641 s
->sbm_phy_oldk1stsr
,k1stsr
);
2643 s
->sbm_phy_oldbmsr
= bmsr
;
2644 s
->sbm_phy_oldanlpar
= anlpar
;
2645 s
->sbm_phy_oldk1stsr
= k1stsr
;
2652 p
+= sprintf(p
,"Link speed: ");
2654 if (k1stsr
& K1STSR_LP1KFD
) {
2655 s
->sbm_speed
= sbmac_speed_1000
;
2656 s
->sbm_duplex
= sbmac_duplex_full
;
2657 s
->sbm_fc
= sbmac_fc_frame
;
2658 p
+= sprintf(p
,"1000BaseT FDX");
2660 else if (k1stsr
& K1STSR_LP1KHD
) {
2661 s
->sbm_speed
= sbmac_speed_1000
;
2662 s
->sbm_duplex
= sbmac_duplex_half
;
2663 s
->sbm_fc
= sbmac_fc_disabled
;
2664 p
+= sprintf(p
,"1000BaseT HDX");
2666 else if (anlpar
& ANLPAR_TXFD
) {
2667 s
->sbm_speed
= sbmac_speed_100
;
2668 s
->sbm_duplex
= sbmac_duplex_full
;
2669 s
->sbm_fc
= (anlpar
& ANLPAR_PAUSE
) ? sbmac_fc_frame
: sbmac_fc_disabled
;
2670 p
+= sprintf(p
,"100BaseT FDX");
2672 else if (anlpar
& ANLPAR_TXHD
) {
2673 s
->sbm_speed
= sbmac_speed_100
;
2674 s
->sbm_duplex
= sbmac_duplex_half
;
2675 s
->sbm_fc
= sbmac_fc_disabled
;
2676 p
+= sprintf(p
,"100BaseT HDX");
2678 else if (anlpar
& ANLPAR_10FD
) {
2679 s
->sbm_speed
= sbmac_speed_10
;
2680 s
->sbm_duplex
= sbmac_duplex_full
;
2681 s
->sbm_fc
= sbmac_fc_frame
;
2682 p
+= sprintf(p
,"10BaseT FDX");
2684 else if (anlpar
& ANLPAR_10HD
) {
2685 s
->sbm_speed
= sbmac_speed_10
;
2686 s
->sbm_duplex
= sbmac_duplex_half
;
2687 s
->sbm_fc
= sbmac_fc_collision
;
2688 p
+= sprintf(p
,"10BaseT HDX");
2691 p
+= sprintf(p
,"Unknown");
2695 printk(KERN_INFO
"%s: %s\n",s
->sbm_dev
->name
,buffer
);
2702 static void sbmac_timer(unsigned long data
)
2704 struct net_device
*dev
= (struct net_device
*)data
;
2705 struct sbmac_softc
*sc
= netdev_priv(dev
);
2709 spin_lock_irq (&sc
->sbm_lock
);
2711 /* make IFF_RUNNING follow the MII status bit "Link established" */
2712 mii_status
= sbmac_mii_read(sc
, sc
->sbm_phys
[0], MII_BMSR
);
2714 if ( (mii_status
& BMSR_LINKSTAT
) != (sc
->sbm_phy_oldlinkstat
) ) {
2715 sc
->sbm_phy_oldlinkstat
= mii_status
& BMSR_LINKSTAT
;
2716 if (mii_status
& BMSR_LINKSTAT
) {
2717 netif_carrier_on(dev
);
2720 netif_carrier_off(dev
);
2725 * Poll the PHY to see what speed we should be running at
2728 if (sbmac_mii_poll(sc
,noisy_mii
)) {
2729 if (sc
->sbm_state
!= sbmac_state_off
) {
2731 * something changed, restart the channel
2734 printk("%s: restarting channel because speed changed\n",
2737 sbmac_channel_stop(sc
);
2738 sbmac_channel_start(sc
);
2742 spin_unlock_irq (&sc
->sbm_lock
);
2744 sc
->sbm_timer
.expires
= jiffies
+ next_tick
;
2745 add_timer(&sc
->sbm_timer
);
2749 static void sbmac_tx_timeout (struct net_device
*dev
)
2751 struct sbmac_softc
*sc
= netdev_priv(dev
);
2753 spin_lock_irq (&sc
->sbm_lock
);
2756 dev
->trans_start
= jiffies
;
2757 sc
->sbm_stats
.tx_errors
++;
2759 spin_unlock_irq (&sc
->sbm_lock
);
2761 printk (KERN_WARNING
"%s: Transmit timed out\n",dev
->name
);
2767 static struct net_device_stats
*sbmac_get_stats(struct net_device
*dev
)
2769 struct sbmac_softc
*sc
= netdev_priv(dev
);
2770 unsigned long flags
;
2772 spin_lock_irqsave(&sc
->sbm_lock
, flags
);
2774 /* XXX update other stats here */
2776 spin_unlock_irqrestore(&sc
->sbm_lock
, flags
);
2778 return &sc
->sbm_stats
;
2783 static void sbmac_set_rx_mode(struct net_device
*dev
)
2785 unsigned long flags
;
2786 struct sbmac_softc
*sc
= netdev_priv(dev
);
2788 spin_lock_irqsave(&sc
->sbm_lock
, flags
);
2789 if ((dev
->flags
^ sc
->sbm_devflags
) & IFF_PROMISC
) {
2791 * Promiscuous changed.
2794 if (dev
->flags
& IFF_PROMISC
) {
2795 sbmac_promiscuous_mode(sc
,1);
2798 sbmac_promiscuous_mode(sc
,0);
2801 spin_unlock_irqrestore(&sc
->sbm_lock
, flags
);
2804 * Program the multicasts. Do this every time.
2811 static int sbmac_mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2813 struct sbmac_softc
*sc
= netdev_priv(dev
);
2814 u16
*data
= (u16
*)&rq
->ifr_ifru
;
2815 unsigned long flags
;
2818 spin_lock_irqsave(&sc
->sbm_lock
, flags
);
2822 case SIOCDEVPRIVATE
: /* Get the address of the PHY in use. */
2823 data
[0] = sc
->sbm_phys
[0] & 0x1f;
2825 case SIOCDEVPRIVATE
+1: /* Read the specified MII register. */
2826 data
[3] = sbmac_mii_read(sc
, data
[0] & 0x1f, data
[1] & 0x1f);
2828 case SIOCDEVPRIVATE
+2: /* Write the specified MII register */
2829 if (!capable(CAP_NET_ADMIN
)) {
2834 printk(KERN_DEBUG
"%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev
->name
,
2835 data
[0],data
[1],data
[2]);
2837 sbmac_mii_write(sc
, data
[0] & 0x1f, data
[1] & 0x1f, data
[2]);
2840 retval
= -EOPNOTSUPP
;
2843 spin_unlock_irqrestore(&sc
->sbm_lock
, flags
);
2847 static int sbmac_close(struct net_device
*dev
)
2849 struct sbmac_softc
*sc
= netdev_priv(dev
);
2850 unsigned long flags
;
2853 sbmac_set_channel_state(sc
,sbmac_state_off
);
2855 del_timer_sync(&sc
->sbm_timer
);
2857 spin_lock_irqsave(&sc
->sbm_lock
, flags
);
2859 netif_stop_queue(dev
);
2862 printk(KERN_DEBUG
"%s: Shutting down ethercard\n",dev
->name
);
2865 spin_unlock_irqrestore(&sc
->sbm_lock
, flags
);
2868 synchronize_irq(irq
);
2871 sbdma_emptyring(&(sc
->sbm_txdma
));
2872 sbdma_emptyring(&(sc
->sbm_rxdma
));
2877 static int sbmac_poll(struct net_device
*dev
, int *budget
)
2881 struct sbmac_softc
*sc
= netdev_priv(dev
);
2883 work_to_do
= min(*budget
, dev
->quota
);
2884 work_done
= sbdma_rx_process(sc
, &(sc
->sbm_rxdma
), work_to_do
, 1);
2886 if (work_done
> work_to_do
)
2887 printk(KERN_ERR
"%s exceeded work_to_do budget=%d quota=%d work-done=%d\n",
2888 sc
->sbm_dev
->name
, *budget
, dev
->quota
, work_done
);
2890 sbdma_tx_process(sc
, &(sc
->sbm_txdma
), 1);
2892 *budget
-= work_done
;
2893 dev
->quota
-= work_done
;
2895 if (work_done
< work_to_do
) {
2896 netif_rx_complete(dev
);
2898 #ifdef CONFIG_SBMAC_COALESCE
2899 __raw_writeq(((M_MAC_INT_EOP_COUNT
| M_MAC_INT_EOP_TIMER
) << S_MAC_TX_CH0
) |
2900 ((M_MAC_INT_EOP_COUNT
| M_MAC_INT_EOP_TIMER
) << S_MAC_RX_CH0
),
2903 __raw_writeq((M_MAC_INT_CHANNEL
<< S_MAC_TX_CH0
) |
2904 (M_MAC_INT_CHANNEL
<< S_MAC_RX_CH0
), sc
->sbm_imr
);
2908 return (work_done
>= work_to_do
);
2911 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2913 sbmac_setup_hwaddr(int chan
,char *addr
)
2919 port
= A_MAC_CHANNEL_BASE(chan
);
2920 sbmac_parse_hwaddr(addr
,eaddr
);
2921 val
= sbmac_addr2reg(eaddr
);
2922 __raw_writeq(val
, IOADDR(port
+R_MAC_ETHERNET_ADDR
));
2923 val
= __raw_readq(IOADDR(port
+R_MAC_ETHERNET_ADDR
));
2927 static struct net_device
*dev_sbmac
[MAX_UNITS
];
2930 sbmac_init_module(void)
2933 struct net_device
*dev
;
2937 /* Set the number of available units based on the SOC type. */
2939 case K_SYS_SOC_TYPE_BCM1250
:
2940 case K_SYS_SOC_TYPE_BCM1250_ALT
:
2943 case K_SYS_SOC_TYPE_BCM1120
:
2944 case K_SYS_SOC_TYPE_BCM1125
:
2945 case K_SYS_SOC_TYPE_BCM1125H
:
2946 case K_SYS_SOC_TYPE_BCM1250_ALT2
: /* Hybrid */
2949 case K_SYS_SOC_TYPE_BCM1x55
:
2950 case K_SYS_SOC_TYPE_BCM1x80
:
2957 if (chip_max_units
> MAX_UNITS
)
2958 chip_max_units
= MAX_UNITS
;
2961 * For bringup when not using the firmware, we can pre-fill
2962 * the MAC addresses using the environment variables
2963 * specified in this file (or maybe from the config file?)
2965 #ifdef SBMAC_ETH0_HWADDR
2966 if (chip_max_units
> 0)
2967 sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR
);
2969 #ifdef SBMAC_ETH1_HWADDR
2970 if (chip_max_units
> 1)
2971 sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR
);
2973 #ifdef SBMAC_ETH2_HWADDR
2974 if (chip_max_units
> 2)
2975 sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR
);
2977 #ifdef SBMAC_ETH3_HWADDR
2978 if (chip_max_units
> 3)
2979 sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR
);
2983 * Walk through the Ethernet controllers and find
2984 * those who have their MAC addresses set.
2986 for (idx
= 0; idx
< chip_max_units
; idx
++) {
2989 * This is the base address of the MAC.
2992 port
= A_MAC_CHANNEL_BASE(idx
);
2995 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2996 * value for us by the firmware if we are going to use this MAC.
2997 * If we find a zero, skip this MAC.
3000 sbmac_orig_hwaddr
[idx
] = __raw_readq(IOADDR(port
+R_MAC_ETHERNET_ADDR
));
3001 if (sbmac_orig_hwaddr
[idx
] == 0) {
3002 printk(KERN_DEBUG
"sbmac: not configuring MAC at "
3008 * Okay, cool. Initialize this MAC.
3011 dev
= alloc_etherdev(sizeof(struct sbmac_softc
));
3015 printk(KERN_DEBUG
"sbmac: configuring MAC at %lx\n", port
);
3017 dev
->irq
= UNIT_INT(idx
);
3018 dev
->base_addr
= port
;
3020 if (sbmac_init(dev
, idx
)) {
3021 port
= A_MAC_CHANNEL_BASE(idx
);
3022 __raw_writeq(sbmac_orig_hwaddr
[idx
], IOADDR(port
+R_MAC_ETHERNET_ADDR
));
3026 dev_sbmac
[idx
] = dev
;
3033 sbmac_cleanup_module(void)
3035 struct net_device
*dev
;
3038 for (idx
= 0; idx
< MAX_UNITS
; idx
++) {
3039 struct sbmac_softc
*sc
;
3040 dev
= dev_sbmac
[idx
];
3044 sc
= netdev_priv(dev
);
3045 unregister_netdev(dev
);
3046 sbmac_uninitctx(sc
);
3051 module_init(sbmac_init_module
);
3052 module_exit(sbmac_cleanup_module
);