microblaze: added kernel patch for Xilinx AXI Ethernet
[ana-net.git] / microblaze / xilinx_axi.patch
blob3c8147fe28ce98f1f6b504ff93fde315c8aa67cf
1 Against net-next, rev 356b95424cfb456e14a59eaa579422ce014c424b
3 Signed-off-by: Ariane Keller <ariane.keller@tik.ee.ethz.ch>
4 Signed-off-by: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
6 diff --git a/MAINTAINERS b/MAINTAINERS
7 index 4babed7..4d0b9f7 100644
8 --- a/MAINTAINERS
9 +++ b/MAINTAINERS
10 @@ -7459,6 +7459,13 @@ S: Supported
11 F: Documentation/filesystems/xfs.txt
12 F: fs/xfs/
14 +XILINX AXI ETHERNET DRIVER
15 +M: Ariane Keller <ariane.keller@tik.ee.ethz.ch>
16 +M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
17 +W: http://www.gnumaniacs.org/
18 +S: Maintained
19 +F: drivers/net/ethernet/xilinx/xilinx_axienet*
21 XILINX SYSTEMACE DRIVER
22 M: Grant Likely <grant.likely@secretlab.ca>
23 W: http://www.secretlab.ca/
24 diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
25 index d5a8260..634e705 100644
26 --- a/drivers/net/ethernet/xilinx/Kconfig
27 +++ b/drivers/net/ethernet/xilinx/Kconfig
28 @@ -25,6 +25,14 @@ config XILINX_EMACLITE
29 ---help---
30 This driver supports the 10/100 Ethernet Lite from Xilinx.
32 +config XILINX_AXI_EMAC
33 + tristate "Xilinx 10/100/1000 AXI Ethernet support"
34 + depends on (PPC32 || MICROBLAZE)
35 + select PHYLIB
36 + ---help---
37 + This driver supports the 10/100/1000 Ethernet from Xilinx for the
38 + AXI bus interface used in Xilinx Virtex FPGAs.
40 config XILINX_LL_TEMAC
41 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
42 depends on (PPC || MICROBLAZE)
43 diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
44 index 5feac73..214205e 100644
45 --- a/drivers/net/ethernet/xilinx/Makefile
46 +++ b/drivers/net/ethernet/xilinx/Makefile
47 @@ -5,3 +5,5 @@
48 ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
49 obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
50 obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
51 +xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
52 +obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
53 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
54 new file mode 100644
55 index 0000000..cc83af0
56 --- /dev/null
57 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
58 @@ -0,0 +1,508 @@
59 +/*
60 + * Definitions for Xilinx Axi Ethernet device driver.
61 + *
62 + * Copyright (c) 2009 Secret Lab Technologies, Ltd.
63 + * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
64 + * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
65 + * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
66 + */
68 +#ifndef XILINX_AXIENET_H
69 +#define XILINX_AXIENET_H
71 +#include <linux/netdevice.h>
72 +#include <linux/spinlock.h>
73 +#include <linux/interrupt.h>
75 +/* Packet size info */
76 +#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
77 +#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
78 +#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
79 +#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
80 +#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
82 +#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
83 +#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
84 +#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
86 +/* Configuration options */
88 +/* Accept all incoming packets. Default: disabled (cleared) */
89 +#define XAE_OPTION_PROMISC (1 << 0)
91 +/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
92 +#define XAE_OPTION_JUMBO (1 << 1)
94 +/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
95 +#define XAE_OPTION_VLAN (1 << 2)
97 +/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
98 +#define XAE_OPTION_FLOW_CONTROL (1 << 4)
100 +/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
101 + * stripped. Default: disabled (set) */
102 +#define XAE_OPTION_FCS_STRIP (1 << 5)
104 +/* Generate FCS field and add PAD automatically for outgoing frames.
105 + * Default: enabled (set) */
106 +#define XAE_OPTION_FCS_INSERT (1 << 6)
108 +/* Enable Length/Type error checking for incoming frames. When this option is
109 + * set, the MAC will filter frames that have a mismatched type/length field
110 + * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
111 + * types of frames are encountered. When this option is cleared, the MAC will
112 + * allow these types of frames to be received. Default: enabled (set) */
113 +#define XAE_OPTION_LENTYPE_ERR (1 << 7)
115 +/* Enable the transmitter. Default: enabled (set) */
116 +#define XAE_OPTION_TXEN (1 << 11)
118 +/* Enable the receiver. Default: enabled (set) */
119 +#define XAE_OPTION_RXEN (1 << 12)
121 +/* Default options set when device is initialized or reset */
122 +#define XAE_OPTION_DEFAULTS \
123 + (XAE_OPTION_TXEN | \
124 + XAE_OPTION_FLOW_CONTROL | \
125 + XAE_OPTION_RXEN)
127 +/* Axi DMA Register definitions */
129 +#define XAXIDMA_TX_CR_OFFSET 0x00000000 /* Channel control */
130 +#define XAXIDMA_TX_SR_OFFSET 0x00000004 /* Status */
131 +#define XAXIDMA_TX_CDESC_OFFSET 0x00000008 /* Current descriptor pointer */
132 +#define XAXIDMA_TX_TDESC_OFFSET 0x00000010 /* Tail descriptor pointer */
134 +#define XAXIDMA_RX_CR_OFFSET 0x00000030 /* Channel control */
135 +#define XAXIDMA_RX_SR_OFFSET 0x00000034 /* Status */
136 +#define XAXIDMA_RX_CDESC_OFFSET 0x00000038 /* Current descriptor pointer */
137 +#define XAXIDMA_RX_TDESC_OFFSET 0x00000040 /* Tail descriptor pointer */
139 +#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
140 +#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
142 +#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
143 +#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
144 +#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
145 +#define XAXIDMA_BD_STS_OFFSET 0x1C /* Status */
146 +#define XAXIDMA_BD_USR0_OFFSET 0x20 /* User IP specific word0 */
147 +#define XAXIDMA_BD_USR1_OFFSET 0x24 /* User IP specific word1 */
148 +#define XAXIDMA_BD_USR2_OFFSET 0x28 /* User IP specific word2 */
149 +#define XAXIDMA_BD_USR3_OFFSET 0x2C /* User IP specific word3 */
150 +#define XAXIDMA_BD_USR4_OFFSET 0x30 /* User IP specific word4 */
151 +#define XAXIDMA_BD_ID_OFFSET 0x34 /* Sw ID */
152 +#define XAXIDMA_BD_HAS_STSCNTRL_OFFSET 0x38 /* Whether has stscntrl strm */
153 +#define XAXIDMA_BD_HAS_DRE_OFFSET 0x3C /* Whether has DRE */
155 +#define XAXIDMA_BD_HAS_DRE_SHIFT 8 /* Whether has DRE shift */
156 +#define XAXIDMA_BD_HAS_DRE_MASK 0xF00 /* Whether has DRE mask */
157 +#define XAXIDMA_BD_WORDLEN_MASK 0xFF /* Whether has DRE mask */
159 +#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
160 +#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
161 +#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
162 +#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
164 +#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
165 +#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
167 +#define XAXIDMA_DELAY_SHIFT 24
168 +#define XAXIDMA_COALESCE_SHIFT 16
170 +#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
171 +#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
172 +#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
173 +#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
175 +/* Default TX/RX Threshold and waitbound values for SGDMA mode */
176 +#define XAXIDMA_DFT_TX_THRESHOLD 24
177 +#define XAXIDMA_DFT_TX_WAITBOUND 254
178 +#define XAXIDMA_DFT_RX_THRESHOLD 24
179 +#define XAXIDMA_DFT_RX_WAITBOUND 254
181 +#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
182 +#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
183 +#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
185 +#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
186 +#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
187 +#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
188 +#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
189 +#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
190 +#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
191 +#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
192 +#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
193 +#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
195 +#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
197 +/* Axi Ethernet registers definition */
198 +#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */
199 +#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */
200 +#define XAE_IFGP_OFFSET 0x00000008 /* Tx Inter-frame gap adjustment*/
201 +#define XAE_IS_OFFSET 0x0000000C /* Interrupt status */
202 +#define XAE_IP_OFFSET 0x00000010 /* Interrupt pending */
203 +#define XAE_IE_OFFSET 0x00000014 /* Interrupt enable */
204 +#define XAE_TTAG_OFFSET 0x00000018 /* Tx VLAN TAG */
205 +#define XAE_RTAG_OFFSET 0x0000001C /* Rx VLAN TAG */
206 +#define XAE_UAWL_OFFSET 0x00000020 /* Unicast address word lower */
207 +#define XAE_UAWU_OFFSET 0x00000024 /* Unicast address word upper */
208 +#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */
209 +#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */
210 +#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */
211 +#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */
212 +#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
213 +#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
214 +#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
215 +#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
216 +#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
217 +#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
218 +#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
219 +#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
220 +#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
221 +#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
222 +#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending
223 + * register offset */
224 +#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable
225 + * register offset */
226 +#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear
227 + * register offset. */
228 +#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
229 +#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
230 +#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
231 +#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
232 +#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
234 +#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */
235 +#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */
236 +#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
238 +/* Bit Masks for Axi Ethernet RAF register */
239 +#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast
240 + * destination address */
241 +#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast
242 + * destination address */
243 +#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */
244 +#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */
245 +#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
246 +#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
247 +#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
248 +#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast
249 + * Filtering mode
250 + */
251 +#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
252 +#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */
253 +#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */
254 +#define XAE_RAF_RXVTAGMODE_SHIFT 5 /* Rx Tag mode shift bits */
255 +#define XAE_RAF_TXVSTRPMODE_SHIFT 7 /* Tx strip mode shift bits*/
256 +#define XAE_RAF_RXVSTRPMODE_SHIFT 9 /* Rx Strip mode shift bits*/
258 +/* Bit Masks for Axi Ethernet TPF and IFGP registers */
259 +#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */
260 +#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame
261 + * gap adjustment value */
263 +/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
264 + * for all 3 registers. */
265 +#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access
266 + * complete */
267 +#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation
268 + * complete */
269 +#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */
270 +#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
271 +#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
272 +#define XAE_INT_TXCMPIT_MASK 0x00000020 /* Tx complete */
273 +#define XAE_INT_RXDCMLOCK_MASK 0x00000040 /* Rx Dcm Lock */
274 +#define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
275 +#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
276 +#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */
278 +#define XAE_INT_RECV_ERROR_MASK \
279 + (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
280 + * indicate receive
281 + * errors */
283 +/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
284 +#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */
285 +#define XAE_TPID_1_MASK 0xFFFF0000 /* TPID 1 */
287 +/* Bit masks for Axi Ethernet VLAN TPID Word 1 register */
288 +#define XAE_TPID_2_MASK 0x0000FFFF /* TPID 0 */
289 +#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */
291 +/* Bit masks for Axi Ethernet RCW1 register */
292 +#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
293 +#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
294 +#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable
295 + * (FCS not stripped) */
296 +#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
297 +#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */
298 +#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check
299 + * disable */
300 +#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check
301 + * disable */
302 +#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
303 + * bits [47:32]. Bits [31:0] are
304 + * stored in register RCW0 */
306 +/* Bit masks for Axi Ethernet TC register */
307 +#define XAE_TC_RST_MASK 0x80000000 /* Reset */
308 +#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
309 +#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable
310 + * (FCS not generated) */
311 +#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
312 +#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */
313 +#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment
314 + * enable */
316 +/* Bit masks for Axi Ethernet FCC register */
317 +#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */
318 +#define XAE_FCC_FCTX_MASK 0x40000000 /* Tx flow control enable */
320 +/* Bit masks for Axi Ethernet EMMC register */
321 +#define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
322 +#define XAE_EMMC_RGMII_MASK 0x20000000 /* RGMII mode enable */
323 +#define XAE_EMMC_SGMII_MASK 0x10000000 /* SGMII mode enable */
324 +#define XAE_EMMC_GPCS_MASK 0x08000000 /* 1000BaseX mode enable */
325 +#define XAE_EMMC_HOST_MASK 0x04000000 /* Host interface enable */
326 +#define XAE_EMMC_TX16BIT 0x02000000 /* 16 bit Tx client enable */
327 +#define XAE_EMMC_RX16BIT 0x01000000 /* 16 bit Rx client enable */
328 +#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
329 +#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
330 +#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
332 +/* Bit masks for Axi Ethernet PHYC register */
333 +#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/
334 +#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */
335 +#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */
336 +#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */
337 +#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */
338 +#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */
339 +#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */
340 +#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */
341 +#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
342 +#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
344 +/* Bit masks for Axi Ethernet MDIO interface MC register */
345 +#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
346 +#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */
348 +/* Bit masks for Axi Ethernet MDIO interface MCR register */
349 +#define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
350 +#define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
351 +#define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
352 +#define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
353 +#define XAE_MDIO_MCR_OP_MASK 0x0000C000 /* Operation Code Mask */
354 +#define XAE_MDIO_MCR_OP_SHIFT 13 /* Operation Code Shift */
355 +#define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
356 +#define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
357 +#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
358 +#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
360 +/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */
361 +#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
363 +/* Bit masks for Axi Ethernet UAW1 register */
364 +#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits
365 + * [47:32]; Station address
366 + * bits [31:0] are stored in
367 + * register UAW0 */
369 +/* Bit masks for Axi Ethernet FMI register */
370 +#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
371 +#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
373 +#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
375 +/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */
376 +#define XAE_PHY_TYPE_MII 0
377 +#define XAE_PHY_TYPE_GMII 1
378 +#define XAE_PHY_TYPE_RGMII_1_3 2
379 +#define XAE_PHY_TYPE_RGMII_2_0 3
380 +#define XAE_PHY_TYPE_SGMII 4
381 +#define XAE_PHY_TYPE_1000BASE_X 5
383 +#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the
384 + * hardware multicast table. */
386 +/* Axi Ethernet Synthesis features */
387 +#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
388 +#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
389 +#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
390 +#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
392 +#define XAE_NO_CSUM_OFFLOAD 0
394 +#define XAE_FULL_CSUM_STATUS_MASK 0x00000038
395 +#define XAE_IP_UDP_CSUM_VALIDATED 0x00000003
396 +#define XAE_IP_TCP_CSUM_VALIDATED 0x00000002
398 +#define DELAY_OF_ONE_MILLISEC 1000
400 +/**
401 + * struct axidma_bd - Axi Dma buffer descriptor layout
402 + * @next: MM2S/S2MM Next Descriptor Pointer
403 + * @reserved1: Reserved and not used
404 + * @phys: MM2S/S2MM Buffer Address
405 + * @reserved2: Reserved and not used
406 + * @reserved3: Reserved and not used
407 + * @reserved4: Reserved and not used
408 + * @cntrl: MM2S/S2MM Control value
409 + * @status: MM2S/S2MM Status value
410 + * @app0: MM2S/S2MM User Application Field 0.
411 + * @app1: MM2S/S2MM User Application Field 1.
412 + * @app2: MM2S/S2MM User Application Field 2.
413 + * @app3: MM2S/S2MM User Application Field 3.
414 + * @app4: MM2S/S2MM User Application Field 4.
415 + * @sw_id_offset: MM2S/S2MM Sw ID
416 + * @reserved5: Reserved and not used
417 + * @reserved6: Reserved and not used
418 + */
419 +struct axidma_bd {
420 + u32 next; /* Physical address of next buffer descriptor */
421 + u32 reserved1;
422 + u32 phys;
423 + u32 reserved2;
424 + u32 reserved3;
425 + u32 reserved4;
426 + u32 cntrl;
427 + u32 status;
428 + u32 app0;
429 + u32 app1; /* TX start << 16 | insert */
430 + u32 app2; /* TX csum seed */
431 + u32 app3;
432 + u32 app4;
433 + u32 sw_id_offset;
434 + u32 reserved5;
435 + u32 reserved6;
438 +/**
439 + * struct axienet_local - axienet private per device data
440 + * @ndev: Pointer for net_device to which it will be attached.
441 + * @dev: Pointer to device structure
442 + * @phy_dev: Pointer to PHY device structure attached to the axienet_local
443 + * @phy_node: Pointer to device node structure
444 + * @mii_bus: Pointer to MII bus structure
445 + * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure
446 + * @regs: Base address for the axienet_local device address space
447 + * @dma_regs: Base address for the axidma device address space
448 + * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
449 + * @tx_irq: Axidma TX IRQ number
450 + * @rx_irq: Axidma RX IRQ number
451 + * @temac_type: axienet type to identify between soft and hard temac
452 + * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
453 + * @options: AxiEthernet option word
454 + * @last_link: Phy link state in which the PHY was negotiated earlier
455 + * @features: Stores the extended features supported by the axienet hw
456 + * @tx_bd_v: Virtual address of the TX buffer descriptor ring
457 + * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
458 + * @rx_bd_v: Virtual address of the RX buffer descriptor ring
459 + * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
460 + * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
461 + * accessed currently. Used while alloc. BDs before a TX starts
462 + * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
463 + * accessed currently. Used while processing BDs after the TX
464 + * completed.
465 + * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
466 + * accessed currently.
467 + * @max_frm_size: Stores the maximum size of the frame that can be that
468 + * Txed/Rxed in the existing hardware. If jumbo option is
469 + * supported, the maximum frame size would be 9k. Else it is
470 + * 1522 bytes (assuming support for basic VLAN)
471 + * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
472 + * can handle jumbo packets, this entry will be 1, else 0.
473 + */
474 +struct axienet_local {
475 + struct net_device *ndev;
476 + struct device *dev;
478 + /* Connection to PHY device */
479 + struct phy_device *phy_dev; /* Pointer to PHY device */
480 + struct device_node *phy_node;
482 + /* MDIO bus data */
483 + struct mii_bus *mii_bus; /* MII bus reference */
484 + int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
486 + /* IO registers, dma functions and IRQs */
487 + void __iomem *regs;
488 + void __iomem *dma_regs;
490 + struct tasklet_struct dma_err_tasklet;
492 + int tx_irq;
493 + int rx_irq;
494 + u32 temac_type;
495 + u32 phy_type;
497 + u32 options; /* Current options word */
498 + u32 last_link;
499 + u32 features;
501 + /* Buffer descriptors */
502 + struct axidma_bd *tx_bd_v;
503 + dma_addr_t tx_bd_p;
504 + struct axidma_bd *rx_bd_v;
505 + dma_addr_t rx_bd_p;
506 + u32 tx_bd_ci;
507 + u32 tx_bd_tail;
508 + u32 rx_bd_ci;
510 + u32 max_frm_size;
511 + u32 jumbo_support;
513 + int csum_offload_on_tx_path;
514 + int csum_offload_on_rx_path;
516 + u32 coalesce_count_rx;
517 + u32 coalesce_count_tx;
520 +/**
521 + * struct axiethernet_option - Used to set axi ethernet hardware options
522 + * @opt: Option to be set.
523 + * @reg: Register offset to be written for setting the option
524 + * @m_or: Mask to be ORed for setting the option in the register
525 + */
526 +struct axienet_option {
527 + u32 opt;
528 + u32 reg;
529 + u32 m_or;
532 +/**
533 + * axienet_ior - Memory mapped Axi Ethernet register read
534 + * @lp: Pointer to axienet local structure
535 + * @offset: Address offset from the base address of Axi Ethernet core
537 + * returns: The contents of the Axi Ethernet register
539 + * This function returns the contents of the corresponding register.
540 + */
541 +static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
543 + return in_be32(lp->regs + offset);
546 +/**
547 + * axienet_iow - Memory mapped Axi Ethernet register write
548 + * @lp: Pointer to axienet local structure
549 + * @offset: Address offset from the base address of Axi Ethernet core
550 + * @value: Value to be written into the Axi Ethernet register
552 + * This function writes the desired value into the corresponding Axi Ethernet
553 + * register.
554 + */
555 +static inline void axienet_iow(struct axienet_local *lp, off_t offset,
556 + u32 value)
558 + out_be32((lp->regs + offset), value);
561 +/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
562 +int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
563 +int axienet_mdio_wait_until_ready(struct axienet_local *lp);
564 +void axienet_mdio_teardown(struct axienet_local *lp);
566 +#endif /* XILINX_AXI_ENET_H */
567 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
568 new file mode 100644
569 index 0000000..7fe9a1b
570 --- /dev/null
571 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
572 @@ -0,0 +1,1683 @@
574 + * Xilinx Axi Ethernet device driver
576 + * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
577 + * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
578 + * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
579 + * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
580 + * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
581 + * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
583 + * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
584 + * and Spartan6.
586 + * TODO:
587 + * - Add Axi Fifo support.
588 + * - Factor out Axi DMA code into separate driver.
589 + * - Test and fix basic multicast filtering.
590 + * - Add support for extended multicast filtering.
591 + * - Test basic VLAN support.
592 + * - Add support for extended VLAN support.
593 + */
595 +#include <linux/delay.h>
596 +#include <linux/etherdevice.h>
597 +#include <linux/init.h>
598 +#include <linux/module.h>
599 +#include <linux/netdevice.h>
600 +#include <linux/of_mdio.h>
601 +#include <linux/of_platform.h>
602 +#include <linux/of_address.h>
603 +#include <linux/skbuff.h>
604 +#include <linux/spinlock.h>
605 +#include <linux/phy.h>
606 +#include <linux/mii.h>
607 +#include <linux/ethtool.h>
609 +#include "xilinx_axienet.h"
611 +/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
612 +#define TX_BD_NUM 64
613 +#define RX_BD_NUM 128
615 +/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
616 +#define DRIVER_NAME "xaxienet"
617 +#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
618 +#define DRIVER_VERSION "1.00a"
620 +#define AXIENET_REGS_N 32
622 +/* Match table for of_platform binding */
623 +static struct of_device_id axienet_of_match[] __devinitdata = {
624 + { .compatible = "xlnx,axi-ethernet-1.00.a", },
625 + { .compatible = "xlnx,axi-ethernet-1.01.a", },
626 + { .compatible = "xlnx,axi-ethernet-2.01.a", },
627 + {},
630 +MODULE_DEVICE_TABLE(of, axienet_of_match);
632 +/* Option table for setting up Axi Ethernet hardware options */
633 +static struct axienet_option axienet_options[] = {
634 + /* Turn on jumbo packet support for both Rx and Tx */
636 + .opt = XAE_OPTION_JUMBO,
637 + .reg = XAE_TC_OFFSET,
638 + .m_or = XAE_TC_JUM_MASK,
639 + }, {
640 + .opt = XAE_OPTION_JUMBO,
641 + .reg = XAE_RCW1_OFFSET,
642 + .m_or = XAE_RCW1_JUM_MASK,
643 + }, { /* Turn on VLAN packet support for both Rx and Tx */
644 + .opt = XAE_OPTION_VLAN,
645 + .reg = XAE_TC_OFFSET,
646 + .m_or = XAE_TC_VLAN_MASK,
647 + }, {
648 + .opt = XAE_OPTION_VLAN,
649 + .reg = XAE_RCW1_OFFSET,
650 + .m_or = XAE_RCW1_VLAN_MASK,
651 + }, { /* Turn on FCS stripping on receive packets */
652 + .opt = XAE_OPTION_FCS_STRIP,
653 + .reg = XAE_RCW1_OFFSET,
654 + .m_or = XAE_RCW1_FCS_MASK,
655 + }, { /* Turn on FCS insertion on transmit packets */
656 + .opt = XAE_OPTION_FCS_INSERT,
657 + .reg = XAE_TC_OFFSET,
658 + .m_or = XAE_TC_FCS_MASK,
659 + }, { /* Turn off length/type field checking on receive packets */
660 + .opt = XAE_OPTION_LENTYPE_ERR,
661 + .reg = XAE_RCW1_OFFSET,
662 + .m_or = XAE_RCW1_LT_DIS_MASK,
663 + }, { /* Turn on Rx flow control */
664 + .opt = XAE_OPTION_FLOW_CONTROL,
665 + .reg = XAE_FCC_OFFSET,
666 + .m_or = XAE_FCC_FCRX_MASK,
667 + }, { /* Turn on Tx flow control */
668 + .opt = XAE_OPTION_FLOW_CONTROL,
669 + .reg = XAE_FCC_OFFSET,
670 + .m_or = XAE_FCC_FCTX_MASK,
671 + }, { /* Turn on promiscuous frame filtering */
672 + .opt = XAE_OPTION_PROMISC,
673 + .reg = XAE_FMI_OFFSET,
674 + .m_or = XAE_FMI_PM_MASK,
675 + }, { /* Enable transmitter */
676 + .opt = XAE_OPTION_TXEN,
677 + .reg = XAE_TC_OFFSET,
678 + .m_or = XAE_TC_TX_MASK,
679 + }, { /* Enable receiver */
680 + .opt = XAE_OPTION_RXEN,
681 + .reg = XAE_RCW1_OFFSET,
682 + .m_or = XAE_RCW1_RX_MASK,
683 + },
684 + {}
687 +/**
688 + * axienet_dma_in32 - Memory mapped Axi DMA register read
689 + * @lp: Pointer to axienet local structure
690 + * @reg: Address offset from the base address of the Axi DMA core
692 + * returns: The contents of the Axi DMA register
694 + * This function returns the contents of the corresponding Axi DMA register.
695 + */
696 +static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
698 + return in_be32(lp->dma_regs + reg);
701 +/**
702 + * axienet_dma_out32 - Memory mapped Axi DMA register write.
703 + * @lp: Pointer to axienet local structure
704 + * @reg: Address offset from the base address of the Axi DMA core
705 + * @value: Value to be written into the Axi DMA register
707 + * This function writes the desired value into the corresponding Axi DMA
708 + * register.
709 + */
710 +static inline void axienet_dma_out32(struct axienet_local *lp,
711 + off_t reg, u32 value)
713 + out_be32((lp->dma_regs + reg), value);
716 +/**
717 + * axienet_dma_bd_release - Release buffer descriptor rings
718 + * @ndev: Pointer to the net_device structure
720 + * This function is used to release the descriptors allocated in
721 + * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
722 + * driver stop api is called.
723 + */
724 +static void axienet_dma_bd_release(struct net_device *ndev)
726 + int i;
727 + struct axienet_local *lp = netdev_priv(ndev);
729 + for (i = 0; i < RX_BD_NUM; i++) {
730 + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
731 + lp->max_frm_size, DMA_FROM_DEVICE);
732 + dev_kfree_skb((struct sk_buff *)
733 + (lp->rx_bd_v[i].sw_id_offset));
736 + if (lp->rx_bd_v) {
737 + dma_free_coherent(ndev->dev.parent,
738 + sizeof(*lp->rx_bd_v) * RX_BD_NUM,
739 + lp->rx_bd_v,
740 + lp->rx_bd_p);
742 + if (lp->tx_bd_v) {
743 + dma_free_coherent(ndev->dev.parent,
744 + sizeof(*lp->tx_bd_v) * TX_BD_NUM,
745 + lp->tx_bd_v,
746 + lp->tx_bd_p);
750 +/**
751 + * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
752 + * @ndev: Pointer to the net_device structure
754 + * returns: 0, on success
755 + * -ENOMEM, on failure
757 + * This function is called to initialize the Rx and Tx DMA descriptor
758 + * rings. This initializes the descriptors with required default values
759 + * and is called when Axi Ethernet driver reset is called.
760 + */
761 +static int axienet_dma_bd_init(struct net_device *ndev)
763 + u32 cr;
764 + int i;
765 + struct sk_buff *skb;
766 + struct axienet_local *lp = netdev_priv(ndev);
768 + /* Reset the indexes which are used for accessing the BDs */
769 + lp->tx_bd_ci = 0;
770 + lp->tx_bd_tail = 0;
771 + lp->rx_bd_ci = 0;
773 + /*
774 + * Allocate the Tx and Rx buffer descriptors.
775 + */
776 + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
777 + sizeof(*lp->tx_bd_v) * TX_BD_NUM,
778 + &lp->tx_bd_p,
779 + GFP_KERNEL);
780 + if (!lp->tx_bd_v) {
781 + dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
782 + "descriptors");
783 + goto out;
786 + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
787 + sizeof(*lp->rx_bd_v) * RX_BD_NUM,
788 + &lp->rx_bd_p,
789 + GFP_KERNEL);
790 + if (!lp->rx_bd_v) {
791 + dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
792 + "descriptors");
793 + goto out;
796 + memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
797 + for (i = 0; i < TX_BD_NUM; i++) {
798 + lp->tx_bd_v[i].next = lp->tx_bd_p +
799 + sizeof(*lp->tx_bd_v) *
800 + ((i + 1) % TX_BD_NUM);
803 + memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
804 + for (i = 0; i < RX_BD_NUM; i++) {
805 + lp->rx_bd_v[i].next = lp->rx_bd_p +
806 + sizeof(*lp->rx_bd_v) *
807 + ((i + 1) % RX_BD_NUM);
809 + skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
810 + if (!skb) {
811 + dev_err(&ndev->dev, "alloc_skb error %d\n", i);
812 + goto out;
815 + lp->rx_bd_v[i].sw_id_offset = (u32) skb;
816 + lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
817 + skb->data,
818 + lp->max_frm_size,
819 + DMA_FROM_DEVICE);
820 + lp->rx_bd_v[i].cntrl = lp->max_frm_size;
823 + /* Start updating the Rx channel control register */
824 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
825 + /* Update the interrupt coalesce count */
826 + cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
827 + ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
828 + /* Update the delay timer count */
829 + cr = ((cr & ~XAXIDMA_DELAY_MASK) |
830 + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
831 + /* Enable coalesce, delay timer and error interrupts */
832 + cr |= XAXIDMA_IRQ_ALL_MASK;
833 + /* Write to the Rx channel control register */
834 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
836 + /* Start updating the Tx channel control register */
837 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
838 + /* Update the interrupt coalesce count */
839 + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
840 + ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
841 + /* Update the delay timer count */
842 + cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
843 + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
844 + /* Enable coalesce, delay timer and error interrupts */
845 + cr |= XAXIDMA_IRQ_ALL_MASK;
846 + /* Write to the Tx channel control register */
847 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
849 + /* Populate the tail pointer and bring the Rx Axi DMA engine out of
850 + * halted state. This will make the Rx side ready for reception.*/
851 + axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
852 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
853 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
854 + cr | XAXIDMA_CR_RUNSTOP_MASK);
855 + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
856 + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
858 + /* Write to the RS (Run-stop) bit in the Tx channel control register.
859 + * Tx channel is now ready to run. But only after we write to the
860 + * tail pointer register that the Tx channel will start transmitting */
861 + axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
862 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
863 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
864 + cr | XAXIDMA_CR_RUNSTOP_MASK);
866 + return 0;
867 +out:
868 + axienet_dma_bd_release(ndev);
869 + return -ENOMEM;
872 +/**
873 + * axienet_set_mac_address - Write the MAC address
874 + * @ndev: Pointer to the net_device structure
875 + * @address: 6 byte Address to be written as MAC address
877 + * This function is called to initialize the MAC address of the Axi Ethernet
878 + * core. It writes to the UAW0 and UAW1 registers of the core.
879 + */
880 +static void axienet_set_mac_address(struct net_device *ndev, void *address)
882 + struct axienet_local *lp = netdev_priv(ndev);
884 + if (address)
885 + memcpy(ndev->dev_addr, address, ETH_ALEN);
886 + if (!is_valid_ether_addr(ndev->dev_addr))
887 + random_ether_addr(ndev->dev_addr);
889 + /* Set up unicast MAC address filter set its mac address */
890 + axienet_iow(lp, XAE_UAW0_OFFSET,
891 + (ndev->dev_addr[0]) |
892 + (ndev->dev_addr[1] << 8) |
893 + (ndev->dev_addr[2] << 16) |
894 + (ndev->dev_addr[3] << 24));
895 + axienet_iow(lp, XAE_UAW1_OFFSET,
896 + (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
897 + ~XAE_UAW1_UNICASTADDR_MASK) |
898 + (ndev->dev_addr[4] |
899 + (ndev->dev_addr[5] << 8))));
902 +/**
903 + * netdev_set_mac_address - Write the MAC address (from outside the driver)
904 + * @ndev: Pointer to the net_device structure
905 + * @p: 6 byte Address to be written as MAC address
907 + * returns: 0 for all conditions. Presently, there is no failure case.
909 + * This function is called to initialize the MAC address of the Axi Ethernet
910 + * core. It calls the core specific axienet_set_mac_address. This is the
911 + * function that goes into net_device_ops structure entry ndo_set_mac_address.
912 + */
913 +static int netdev_set_mac_address(struct net_device *ndev, void *p)
915 + struct sockaddr *addr = p;
916 + axienet_set_mac_address(ndev, addr->sa_data);
917 + return 0;
920 +/**
921 + * axienet_set_multicast_list - Prepare the multicast table
922 + * @ndev: Pointer to the net_device structure
924 + * This function is called to initialize the multicast table during
925 + * initialization. The Axi Ethernet basic multicast support has a four-entry
926 + * multicast table which is initialized here. Additionally this function
927 + * goes into the net_device_ops structure entry ndo_set_multicast_list. This
928 + * means whenever the multicast table entries need to be updated this
929 + * function gets called.
930 + */
931 +static void axienet_set_multicast_list(struct net_device *ndev)
933 + int i;
934 + u32 reg, af0reg, af1reg;
935 + struct axienet_local *lp = netdev_priv(ndev);
937 + if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
938 + netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
939 + /* We must make the kernel realize we had to move into
940 + * promiscuous mode. If it was a promiscuous mode request
941 + * the flag is already set. If not we set it. */
942 + ndev->flags |= IFF_PROMISC;
943 + reg = axienet_ior(lp, XAE_FMI_OFFSET);
944 + reg |= XAE_FMI_PM_MASK;
945 + axienet_iow(lp, XAE_FMI_OFFSET, reg);
946 + dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
947 + } else if (!netdev_mc_empty(ndev)) {
948 + struct netdev_hw_addr *ha;
950 + i = 0;
951 + netdev_for_each_mc_addr(ha, ndev) {
952 + if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
953 + break;
955 + af0reg = (ha->addr[0]);
956 + af0reg |= (ha->addr[1] << 8);
957 + af0reg |= (ha->addr[2] << 16);
958 + af0reg |= (ha->addr[3] << 24);
960 + af1reg = (ha->addr[4]);
961 + af1reg |= (ha->addr[5] << 8);
963 + reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
964 + reg |= i;
966 + axienet_iow(lp, XAE_FMI_OFFSET, reg);
967 + axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
968 + axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
969 + i++;
971 + } else {
972 + reg = axienet_ior(lp, XAE_FMI_OFFSET);
973 + reg &= ~XAE_FMI_PM_MASK;
975 + axienet_iow(lp, XAE_FMI_OFFSET, reg);
977 + for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
978 + reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
979 + reg |= i;
981 + axienet_iow(lp, XAE_FMI_OFFSET, reg);
982 + axienet_iow(lp, XAE_AF0_OFFSET, 0);
983 + axienet_iow(lp, XAE_AF1_OFFSET, 0);
986 + dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
990 +/**
991 + * axienet_setoptions - Set an Axi Ethernet option
992 + * @ndev: Pointer to the net_device structure
993 + * @options: Option to be enabled/disabled
995 + * The Axi Ethernet core has multiple features which can be selectively turned
996 + * on or off. The typical options could be jumbo frame option, basic VLAN
997 + * option, promiscuous mode option etc. This function is used to set or clear
998 + * these options in the Axi Ethernet hardware. This is done through
999 + * axienet_option structure .
1000 + */
1001 +static void axienet_setoptions(struct net_device *ndev, u32 options)
1003 + int reg;
1004 + struct axienet_local *lp = netdev_priv(ndev);
1005 + struct axienet_option *tp = &axienet_options[0];
1007 + while (tp->opt) {
1008 + reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
1009 + if (options & tp->opt)
1010 + reg |= tp->m_or;
1011 + axienet_iow(lp, tp->reg, reg);
1012 + tp++;
1015 + lp->options |= options;
1018 +static void __axienet_device_reset(struct axienet_local *lp,
1019 + struct device *dev, off_t offset)
1021 + u32 timeout;
1022 + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
1023 + * process of Axi DMA takes a while to complete as all pending
1024 + * commands/transfers will be flushed or completed during this
1025 + * reset process. */
1026 + axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
1027 + timeout = DELAY_OF_ONE_MILLISEC;
1028 + while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
1029 + udelay(1);
1030 + if (--timeout == 0) {
1031 + dev_err(dev, "axienet_device_reset DMA "
1032 + "reset timeout!\n");
1033 + break;
1038 +/**
1039 + * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
1040 + * @ndev: Pointer to the net_device structure
1042 + * This function is called to reset and initialize the Axi Ethernet core. This
1043 + * is typically called during initialization. It does a reset of the Axi DMA
1044 + * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
1045 + * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
1046 + * Ethernet core. No separate hardware reset is done for the Axi Ethernet
1047 + * core.
1048 + */
1049 +static void axienet_device_reset(struct net_device *ndev)
1051 + u32 axienet_status;
1052 + struct axienet_local *lp = netdev_priv(ndev);
1054 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1055 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1057 + lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
1058 + lp->options &= (~XAE_OPTION_JUMBO);
1060 + if ((ndev->mtu > XAE_MTU) &&
1061 + (ndev->mtu <= XAE_JUMBO_MTU) &&
1062 + (lp->jumbo_support)) {
1063 + lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
1064 + XAE_TRL_SIZE;
1065 + lp->options |= XAE_OPTION_JUMBO;
1068 + if (axienet_dma_bd_init(ndev)) {
1069 + dev_err(&ndev->dev, "axienet_device_reset descriptor "
1070 + "allocation failed\n");
1073 + axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1074 + axienet_status &= ~XAE_RCW1_RX_MASK;
1075 + axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1077 + axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1078 + if (axienet_status & XAE_INT_RXRJECT_MASK)
1079 + axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1081 + axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1083 + /* Sync default options with HW but leave receiver and
1084 + * transmitter disabled.*/
1085 + axienet_setoptions(ndev, lp->options &
1086 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1087 + axienet_set_mac_address(ndev, NULL);
1088 + axienet_set_multicast_list(ndev);
1089 + axienet_setoptions(ndev, lp->options);
1091 + ndev->trans_start = jiffies;
1094 +/**
1095 + * axienet_adjust_link - Adjust the PHY link speed/duplex.
1096 + * @ndev: Pointer to the net_device structure
1098 + * This function is called to change the speed and duplex setting after
1099 + * auto negotiation is done by the PHY. This is the function that gets
1100 + * registered with the PHY interface through the "of_phy_connect" call.
1101 + */
1102 +static void axienet_adjust_link(struct net_device *ndev)
1104 + u32 emmc_reg;
1105 + u32 link_state;
1106 + u32 setspeed = 1;
1107 + struct axienet_local *lp = netdev_priv(ndev);
1108 + struct phy_device *phy = lp->phy_dev;
1110 + link_state = phy->speed | (phy->duplex << 1) | phy->link;
1111 + if (lp->last_link != link_state) {
1112 + if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
1113 + if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
1114 + setspeed = 0;
1115 + } else {
1116 + if ((phy->speed == SPEED_1000) &&
1117 + (lp->phy_type == XAE_PHY_TYPE_MII))
1118 + setspeed = 0;
1121 + if (setspeed == 1) {
1122 + emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1123 + emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1125 + switch (phy->speed) {
1126 + case SPEED_1000:
1127 + emmc_reg |= XAE_EMMC_LINKSPD_1000;
1128 + break;
1129 + case SPEED_100:
1130 + emmc_reg |= XAE_EMMC_LINKSPD_100;
1131 + break;
1132 + case SPEED_10:
1133 + emmc_reg |= XAE_EMMC_LINKSPD_10;
1134 + break;
1135 + default:
1136 + dev_err(&ndev->dev, "Speed other than 10, 100 "
1137 + "or 1Gbps is not supported\n");
1138 + break;
1141 + axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1142 + lp->last_link = link_state;
1143 + phy_print_status(phy);
1144 + } else {
1145 + dev_err(&ndev->dev, "Error setting Axi Ethernet "
1146 + "mac speed\n");
1151 +/**
1152 + * axienet_start_xmit_done - Invoked once a transmit is completed by the
1153 + * Axi DMA Tx channel.
1154 + * @ndev: Pointer to the net_device structure
1156 + * This function is invoked from the Axi DMA Tx isr to notify the completion
1157 + * of transmit operation. It clears fields in the corresponding Tx BDs and
1158 + * unmaps the corresponding buffer so that CPU can regain ownership of the
1159 + * buffer. It finally invokes "netif_wake_queue" to restart transmission if
1160 + * required.
1161 + */
1162 +static void axienet_start_xmit_done(struct net_device *ndev)
1164 + u32 size = 0;
1165 + u32 packets = 0;
1166 + struct axienet_local *lp = netdev_priv(ndev);
1167 + struct axidma_bd *cur_p;
1168 + unsigned int status = 0;
1170 + cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
1171 + status = cur_p->status;
1172 + while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
1173 + dma_unmap_single(ndev->dev.parent, cur_p->phys,
1174 + (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
1175 + DMA_TO_DEVICE);
1176 + if (cur_p->app4)
1177 + dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
1178 + /*cur_p->phys = 0;*/
1179 + cur_p->app0 = 0;
1180 + cur_p->app1 = 0;
1181 + cur_p->app2 = 0;
1182 + cur_p->app4 = 0;
1183 + cur_p->status = 0;
1185 + size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
1186 + packets++;
1188 + lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
1189 + cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
1190 + status = cur_p->status;
1193 + ndev->stats.tx_packets += packets;
1194 + ndev->stats.tx_bytes += size;
1195 + netif_wake_queue(ndev);
1198 +/**
1199 + * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
1200 + * @lp: Pointer to the axienet_local structure
1201 + * @num_frag: The number of BDs to check for
1203 + * returns: 0, on success
1204 + * NETDEV_TX_BUSY, if any of the descriptors are not free
1206 + * This function is invoked before BDs are allocated and transmission starts.
1207 + * This function returns 0 if a BD or group of BDs can be allocated for
1208 + * transmission. If the BD or any of the BDs are not free the function
1209 + * returns a busy status. This is invoked from axienet_start_xmit.
1210 + */
1211 +static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
1212 + int num_frag)
1214 + struct axidma_bd *cur_p;
1215 + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
1216 + if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
1217 + return NETDEV_TX_BUSY;
1218 + return 0;
1221 +/**
1222 + * axienet_start_xmit - Starts the transmission.
1223 + * @skb: sk_buff pointer that contains data to be Txed.
1224 + * @ndev: Pointer to net_device structure.
1226 + * returns: NETDEV_TX_OK, on success
1227 + * NETDEV_TX_BUSY, if any of the descriptors are not free
1229 + * This function is invoked from upper layers to initiate transmission. The
1230 + * function uses the next available free BDs and populates their fields to
1231 + * start the transmission. Additionally if checksum offloading is supported,
1232 + * it populates AXI Stream Control fields with appropriate values.
1233 + */
1234 +static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1236 + u32 ii;
1237 + u32 num_frag;
1238 + u32 csum_start_off;
1239 + u32 csum_index_off;
1240 + skb_frag_t *frag;
1241 + dma_addr_t tail_p;
1242 + struct axienet_local *lp = netdev_priv(ndev);
1243 + struct axidma_bd *cur_p;
1245 + num_frag = skb_shinfo(skb)->nr_frags;
1246 + cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
1248 + if (axienet_check_tx_bd_space(lp, num_frag)) {
1249 + if (!netif_queue_stopped(ndev))
1250 + netif_stop_queue(ndev);
1251 + return NETDEV_TX_BUSY;
1254 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
1255 + if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1256 + /* Tx Full Checksum Offload Enabled */
1257 + cur_p->app0 |= 2;
1258 + } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1259 + csum_start_off = skb_transport_offset(skb);
1260 + csum_index_off = csum_start_off + skb->csum_offset;
1261 + /* Tx Partial Checksum Offload Enabled */
1262 + cur_p->app0 |= 1;
1263 + cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1265 + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1266 + cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1269 + cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1270 + cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
1271 + skb_headlen(skb), DMA_TO_DEVICE);
1273 + for (ii = 0; ii < num_frag; ii++) {
1274 + lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
1275 + cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
1276 + frag = &skb_shinfo(skb)->frags[ii];
1277 + cur_p->phys = dma_map_single(ndev->dev.parent,
1278 + skb_frag_address(frag),
1279 + skb_frag_size(frag),
1280 + DMA_TO_DEVICE);
1281 + cur_p->cntrl = skb_frag_size(frag);
1284 + cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1285 + cur_p->app4 = (unsigned long)skb;
1287 + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
1288 + /* Start the transfer */
1289 + axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1290 + lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
1292 + return NETDEV_TX_OK;
1295 +/**
1296 + * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
1297 + * BD processing.
1298 + * @ndev: Pointer to net_device structure.
1300 + * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
1301 + * does minimal processing and invokes "netif_rx" to complete further
1302 + * processing.
1303 + */
1304 +static void axienet_recv(struct net_device *ndev)
1306 + u32 length;
1307 + u32 csumstatus;
1308 + u32 size = 0;
1309 + u32 packets = 0;
1310 + dma_addr_t tail_p;
1311 + struct axienet_local *lp = netdev_priv(ndev);
1312 + struct sk_buff *skb, *new_skb;
1313 + struct axidma_bd *cur_p;
1315 + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1316 + cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1318 + while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1319 + skb = (struct sk_buff *) (cur_p->sw_id_offset);
1320 + length = cur_p->app4 & 0x0000FFFF;
1322 + dma_unmap_single(ndev->dev.parent, cur_p->phys,
1323 + lp->max_frm_size,
1324 + DMA_FROM_DEVICE);
1326 + skb_put(skb, length);
1327 + skb->protocol = eth_type_trans(skb, ndev);
1328 + /*skb_checksum_none_assert(skb);*/
1329 + skb->ip_summed = CHECKSUM_NONE;
1331 + /* if we're doing Rx csum offload, set it up */
1332 + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1333 + csumstatus = (cur_p->app2 &
1334 + XAE_FULL_CSUM_STATUS_MASK) >> 3;
1335 + if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
1336 + (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
1337 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1339 + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
1340 + skb->protocol == __constant_htons(ETH_P_IP) &&
1341 + skb->len > 64) {
1342 + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1343 + skb->ip_summed = CHECKSUM_COMPLETE;
1346 + netif_rx(skb);
1348 + size += length;
1349 + packets++;
1351 + new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
1352 + if (!new_skb) {
1353 + dev_err(&ndev->dev, "no memory for new sk_buff\n");
1354 + return;
1356 + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
1357 + lp->max_frm_size,
1358 + DMA_FROM_DEVICE);
1359 + cur_p->cntrl = lp->max_frm_size;
1360 + cur_p->status = 0;
1361 + cur_p->sw_id_offset = (u32) new_skb;
1363 + lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
1364 + cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1367 + ndev->stats.rx_packets += packets;
1368 + ndev->stats.rx_bytes += size;
1370 + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1373 +/**
1374 + * axienet_tx_irq - Tx Done Isr.
1375 + * @irq: irq number
1376 + * @_ndev: net_device pointer
1378 + * returns: IRQ_HANDLED for all cases.
1380 + * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
1381 + * to complete the BD processing.
1382 + */
1383 +static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1385 + u32 cr;
1386 + unsigned int status;
1387 + struct net_device *ndev = _ndev;
1388 + struct axienet_local *lp = netdev_priv(ndev);
1390 + status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1391 + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
1392 + axienet_start_xmit_done(lp->ndev);
1393 + goto out;
1395 + if (!(status & XAXIDMA_IRQ_ALL_MASK))
1396 + dev_err(&ndev->dev, "No interrupts asserted in Tx path");
1397 + if (status & XAXIDMA_IRQ_ERROR_MASK) {
1398 + dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
1399 + dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
1400 + (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1402 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1403 + /* Disable coalesce, delay timer and error interrupts */
1404 + cr &= (~XAXIDMA_IRQ_ALL_MASK);
1405 + /* Write to the Tx channel control register */
1406 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1408 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1409 + /* Disable coalesce, delay timer and error interrupts */
1410 + cr &= (~XAXIDMA_IRQ_ALL_MASK);
1411 + /* Write to the Rx channel control register */
1412 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1414 + tasklet_schedule(&lp->dma_err_tasklet);
1416 +out:
1417 + axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1418 + return IRQ_HANDLED;
1421 +/**
1422 + * axienet_rx_irq - Rx Isr.
1423 + * @irq: irq number
1424 + * @_ndev: net_device pointer
1426 + * returns: IRQ_HANDLED for all cases.
1428 + * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
1429 + * processing.
1430 + */
1431 +static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1433 + u32 cr;
1434 + unsigned int status;
1435 + struct net_device *ndev = _ndev;
1436 + struct axienet_local *lp = netdev_priv(ndev);
1438 + status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1439 + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
1440 + axienet_recv(lp->ndev);
1441 + goto out;
1443 + if (!(status & XAXIDMA_IRQ_ALL_MASK))
1444 + dev_err(&ndev->dev, "No interrupts asserted in Rx path");
1445 + if (status & XAXIDMA_IRQ_ERROR_MASK) {
1446 + dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
1447 + dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
1448 + (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1450 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1451 + /* Disable coalesce, delay timer and error interrupts */
1452 + cr &= (~XAXIDMA_IRQ_ALL_MASK);
1453 + /* Finally write to the Tx channel control register */
1454 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1456 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1457 + /* Disable coalesce, delay timer and error interrupts */
1458 + cr &= (~XAXIDMA_IRQ_ALL_MASK);
1459 + /* write to the Rx channel control register */
1460 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1462 + tasklet_schedule(&lp->dma_err_tasklet);
1464 +out:
1465 + axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1466 + return IRQ_HANDLED;
1469 +/**
1470 + * axienet_open - Driver open routine.
1471 + * @ndev: Pointer to net_device structure
1473 + * returns: 0, on success.
1474 + * -ENODEV, if PHY cannot be connected to
1475 + * non-zero error value on failure
1477 + * This is the driver open routine. It calls phy_start to start the PHY device.
1478 + * It also allocates interrupt service routines, enables the interrupt lines
1479 + * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1480 + * descriptors are initialized.
1481 + */
1482 +static int axienet_open(struct net_device *ndev)
1484 + int ret, mdio_mcreg;
1485 + struct axienet_local *lp = netdev_priv(ndev);
1487 + dev_dbg(&ndev->dev, "axienet_open()\n");
1489 + mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1490 + ret = axienet_mdio_wait_until_ready(lp);
1491 + if (ret < 0)
1492 + return ret;
1493 + /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1494 + * When we do an Axi Ethernet reset, it resets the complete core
1495 + * including the MDIO. If MDIO is not disabled when the reset
1496 + * process is started, MDIO will be broken afterwards. */
1497 + axienet_iow(lp, XAE_MDIO_MC_OFFSET,
1498 + (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
1499 + axienet_device_reset(ndev);
1500 + /* Enable the MDIO */
1501 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1502 + ret = axienet_mdio_wait_until_ready(lp);
1503 + if (ret < 0)
1504 + return ret;
1506 + if (lp->phy_node) {
1507 + lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
1508 + axienet_adjust_link, 0,
1509 + PHY_INTERFACE_MODE_GMII);
1510 + if (!lp->phy_dev) {
1511 + dev_err(lp->dev, "of_phy_connect() failed\n");
1512 + return -ENODEV;
1514 + phy_start(lp->phy_dev);
1517 + /* Enable interrupts for Axi DMA Tx */
1518 + ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
1519 + if (ret)
1520 + goto err_tx_irq;
1521 + /* Enable interrupts for Axi DMA Rx */
1522 + ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
1523 + if (ret)
1524 + goto err_rx_irq;
1525 + /* Enable tasklets for Axi DMA error handling */
1526 + tasklet_enable(&lp->dma_err_tasklet);
1527 + return 0;
1529 +err_rx_irq:
1530 + free_irq(lp->tx_irq, ndev);
1531 +err_tx_irq:
1532 + if (lp->phy_dev)
1533 + phy_disconnect(lp->phy_dev);
1534 + lp->phy_dev = NULL;
1535 + dev_err(lp->dev, "request_irq() failed\n");
1536 + return ret;
1539 +/**
1540 + * axienet_stop - Driver stop routine.
1541 + * @ndev: Pointer to net_device structure
1543 + * returns: 0, on success.
1545 + * This is the driver stop routine. It calls phy_disconnect to stop the PHY
1546 + * device. It also removes the interrupt handlers and disables the interrupts.
1547 + * The Axi DMA Tx/Rx BDs are released.
1548 + */
1549 +static int axienet_stop(struct net_device *ndev)
1551 + u32 cr;
1552 + struct axienet_local *lp = netdev_priv(ndev);
1554 + dev_dbg(&ndev->dev, "axienet_close()\n");
1556 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1557 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1558 + cr & (~XAXIDMA_CR_RUNSTOP_MASK));
1559 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1560 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1561 + cr & (~XAXIDMA_CR_RUNSTOP_MASK));
1562 + axienet_setoptions(ndev, lp->options &
1563 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1565 + tasklet_disable(&lp->dma_err_tasklet);
1567 + free_irq(lp->tx_irq, ndev);
1568 + free_irq(lp->rx_irq, ndev);
1570 + if (lp->phy_dev)
1571 + phy_disconnect(lp->phy_dev);
1572 + lp->phy_dev = NULL;
1574 + axienet_dma_bd_release(ndev);
1575 + return 0;
1578 +/**
1579 + * axienet_change_mtu - Driver change mtu routine.
1580 + * @ndev: Pointer to net_device structure
1581 + * @new_mtu: New mtu value to be applied
1583 + * returns: Always returns 0 (success).
1585 + * This is the change mtu driver routine. It checks if the Axi Ethernet
1586 + * hardware supports jumbo frames before changing the mtu. This can be
1587 + * called only when the device is not up.
1588 + */
1589 +static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1591 + struct axienet_local *lp = netdev_priv(ndev);
1593 + if (netif_running(ndev))
1594 + return -EBUSY;
1595 + if (lp->jumbo_support) {
1596 + if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1597 + return -EINVAL;
1598 + ndev->mtu = new_mtu;
1599 + } else {
1600 + if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1601 + return -EINVAL;
1602 + ndev->mtu = new_mtu;
1605 + return 0;
1608 +#ifdef CONFIG_NET_POLL_CONTROLLER
1609 +/**
1610 + * axienet_poll_controller - Axi Ethernet poll mechanism.
1611 + * @ndev: Pointer to net_device structure
1613 + * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1614 + * to polling the ISRs and are enabled back after the polling is done.
1615 + */
1616 +static void axienet_poll_controller(struct net_device *ndev)
1618 + struct axienet_local *lp = netdev_priv(ndev);
1619 + disable_irq(lp->tx_irq);
1620 + disable_irq(lp->rx_irq);
1621 + axienet_rx_irq(lp->tx_irq, ndev);
1622 + axienet_tx_irq(lp->rx_irq, ndev);
1623 + enable_irq(lp->tx_irq);
1624 + enable_irq(lp->rx_irq);
1626 +#endif
1628 +static const struct net_device_ops axienet_netdev_ops = {
1629 + .ndo_open = axienet_open,
1630 + .ndo_stop = axienet_stop,
1631 + .ndo_start_xmit = axienet_start_xmit,
1632 + .ndo_change_mtu = axienet_change_mtu,
1633 + .ndo_set_mac_address = netdev_set_mac_address,
1634 + .ndo_validate_addr = eth_validate_addr,
1635 + .ndo_set_rx_mode = axienet_set_multicast_list,
1636 +#ifdef CONFIG_NET_POLL_CONTROLLER
1637 + .ndo_poll_controller = axienet_poll_controller,
1638 +#endif
1641 +/**
1642 + * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
1643 + * @ndev: Pointer to net_device structure
1644 + * @ecmd: Pointer to ethtool_cmd structure
1646 + * This implements ethtool command for getting PHY settings. If PHY could
1647 + * not be found, the function returns -ENODEV. This function calls the
1648 + * relevant PHY ethtool API to get the PHY settings.
1649 + * Issue "ethtool ethX" under linux prompt to execute this function.
1650 + */
1651 +static int axienet_ethtools_get_settings(struct net_device *ndev,
1652 + struct ethtool_cmd *ecmd)
1654 + struct axienet_local *lp = netdev_priv(ndev);
1655 + struct phy_device *phydev = lp->phy_dev;
1656 + if (!phydev)
1657 + return -ENODEV;
1658 + return phy_ethtool_gset(phydev, ecmd);
1661 +/**
1662 + * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
1663 + * @ndev: Pointer to net_device structure
1664 + * @ecmd: Pointer to ethtool_cmd structure
1666 + * This implements ethtool command for setting various PHY settings. If PHY
1667 + * could not be found, the function returns -ENODEV. This function calls the
1668 + * relevant PHY ethtool API to set the PHY.
1669 + * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
1670 + * function.
1671 + */
1672 +static int axienet_ethtools_set_settings(struct net_device *ndev,
1673 + struct ethtool_cmd *ecmd)
1675 + struct axienet_local *lp = netdev_priv(ndev);
1676 + struct phy_device *phydev = lp->phy_dev;
1677 + if (!phydev)
1678 + return -ENODEV;
1679 + return phy_ethtool_sset(phydev, ecmd);
1682 +/**
1683 + * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1684 + * @ndev: Pointer to net_device structure
1685 + * @ed: Pointer to ethtool_drvinfo structure
1687 + * This implements ethtool command for getting the driver information.
1688 + * Issue "ethtool -i ethX" under linux prompt to execute this function.
1689 + */
1690 +static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1691 + struct ethtool_drvinfo *ed)
1693 + memset(ed, 0, sizeof(struct ethtool_drvinfo));
1694 + strcpy(ed->driver, DRIVER_NAME);
1695 + strcpy(ed->version, DRIVER_VERSION);
1696 + ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1699 +/**
1700 + * axienet_ethtools_get_regs_len - Get the total regs length present in the
1701 + * AxiEthernet core.
1702 + * @ndev: Pointer to net_device structure
1704 + * This implements ethtool command for getting the total register length
1705 + * information.
1706 + */
1707 +static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1709 + return sizeof(u32) * AXIENET_REGS_N;
1712 +/**
1713 + * axienet_ethtools_get_regs - Dump the contents of all registers present
1714 + * in AxiEthernet core.
1715 + * @ndev: Pointer to net_device structure
1716 + * @regs: Pointer to ethtool_regs structure
1717 + * @ret: Void pointer used to return the contents of the registers.
1719 + * This implements ethtool command for getting the Axi Ethernet register dump.
1720 + * Issue "ethtool -d ethX" to execute this function.
1721 + */
1722 +static void axienet_ethtools_get_regs(struct net_device *ndev,
1723 + struct ethtool_regs *regs, void *ret)
1725 + u32 *data = (u32 *) ret;
1726 + size_t len = sizeof(u32) * AXIENET_REGS_N;
1727 + struct axienet_local *lp = netdev_priv(ndev);
1729 + regs->version = 0;
1730 + regs->len = len;
1732 + memset(data, 0, len);
1733 + data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1734 + data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1735 + data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1736 + data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1737 + data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1738 + data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1739 + data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1740 + data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1741 + data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1742 + data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1743 + data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1744 + data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1745 + data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1746 + data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1747 + data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1748 + data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1749 + data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1750 + data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1751 + data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1752 + data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1753 + data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1754 + data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1755 + data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1756 + data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1757 + data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1758 + data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1759 + data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1760 + data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1761 + data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1762 + data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1763 + data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1764 + data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1767 +/**
1768 + * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1769 + * Tx and Rx paths.
1770 + * @ndev: Pointer to net_device structure
1771 + * @epauseparm: Pointer to ethtool_pauseparam structure.
1773 + * This implements ethtool command for getting axi ethernet pause frame
1774 + * setting. Issue "ethtool -a ethX" to execute this function.
1775 + */
1776 +static void
1777 +axienet_ethtools_get_pauseparam(struct net_device *ndev,
1778 + struct ethtool_pauseparam *epauseparm)
1780 + u32 regval;
1781 + struct axienet_local *lp = netdev_priv(ndev);
1782 + epauseparm->autoneg = 0;
1783 + regval = axienet_ior(lp, XAE_FCC_OFFSET);
1784 + epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1785 + epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1788 +/**
1789 + * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1790 + * settings.
1791 + * @ndev: Pointer to net_device structure
1792 + * @epauseparam:Pointer to ethtool_pauseparam structure
1794 + * This implements ethtool command for enabling flow control on Rx and Tx
1795 + * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1796 + * function.
1797 + */
1798 +static int
1799 +axienet_ethtools_set_pauseparam(struct net_device *ndev,
1800 + struct ethtool_pauseparam *epauseparm)
1802 + u32 regval = 0;
1803 + struct axienet_local *lp = netdev_priv(ndev);
1805 + if (netif_running(ndev)) {
1806 + printk(KERN_ERR "%s: Please stop netif before applying "
1807 + "configruation\n", ndev->name);
1808 + return -EFAULT;
1811 + regval = axienet_ior(lp, XAE_FCC_OFFSET);
1812 + if (epauseparm->tx_pause)
1813 + regval |= XAE_FCC_FCTX_MASK;
1814 + else
1815 + regval &= ~XAE_FCC_FCTX_MASK;
1816 + if (epauseparm->rx_pause)
1817 + regval |= XAE_FCC_FCRX_MASK;
1818 + else
1819 + regval &= ~XAE_FCC_FCRX_MASK;
1820 + axienet_iow(lp, XAE_FCC_OFFSET, regval);
1822 + return 0;
1825 +/**
1826 + * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1827 + * @ndev: Pointer to net_device structure
1828 + * @ecoalesce: Pointer to ethtool_coalesce structure
1830 + * This implements ethtool command for getting the DMA interrupt coalescing
1831 + * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1832 + * execute this function.
1833 + */
1834 +static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1835 + struct ethtool_coalesce *ecoalesce)
1837 + u32 regval = 0;
1838 + struct axienet_local *lp = netdev_priv(ndev);
1839 + regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1840 + ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1841 + >> XAXIDMA_COALESCE_SHIFT;
1842 + regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1843 + ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1844 + >> XAXIDMA_COALESCE_SHIFT;
1845 + return 0;
1848 +/**
1849 + * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1850 + * @ndev: Pointer to net_device structure
1851 + * @ecoalesce: Pointer to ethtool_coalesce structure
1853 + * This implements ethtool command for setting the DMA interrupt coalescing
1854 + * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1855 + * prompt to execute this function.
1856 + */
1857 +static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1858 + struct ethtool_coalesce *ecoalesce)
1860 + struct axienet_local *lp = netdev_priv(ndev);
1862 + if (netif_running(ndev)) {
1863 + printk(KERN_ERR "%s: Please stop netif before applying "
1864 + "configruation\n", ndev->name);
1865 + return -EFAULT;
1868 + if ((ecoalesce->rx_coalesce_usecs) ||
1869 + (ecoalesce->rx_coalesce_usecs_irq) ||
1870 + (ecoalesce->rx_max_coalesced_frames_irq) ||
1871 + (ecoalesce->tx_coalesce_usecs) ||
1872 + (ecoalesce->tx_coalesce_usecs_irq) ||
1873 + (ecoalesce->tx_max_coalesced_frames_irq) ||
1874 + (ecoalesce->stats_block_coalesce_usecs) ||
1875 + (ecoalesce->use_adaptive_rx_coalesce) ||
1876 + (ecoalesce->use_adaptive_tx_coalesce) ||
1877 + (ecoalesce->pkt_rate_low) ||
1878 + (ecoalesce->rx_coalesce_usecs_low) ||
1879 + (ecoalesce->rx_max_coalesced_frames_low) ||
1880 + (ecoalesce->tx_coalesce_usecs_low) ||
1881 + (ecoalesce->tx_max_coalesced_frames_low) ||
1882 + (ecoalesce->pkt_rate_high) ||
1883 + (ecoalesce->rx_coalesce_usecs_high) ||
1884 + (ecoalesce->rx_max_coalesced_frames_high) ||
1885 + (ecoalesce->tx_coalesce_usecs_high) ||
1886 + (ecoalesce->tx_max_coalesced_frames_high) ||
1887 + (ecoalesce->rate_sample_interval))
1888 + return -EOPNOTSUPP;
1889 + if (ecoalesce->rx_max_coalesced_frames)
1890 + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1891 + if (ecoalesce->tx_max_coalesced_frames)
1892 + lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1894 + return 0;
1897 +static struct ethtool_ops axienet_ethtool_ops = {
1898 + .get_settings = axienet_ethtools_get_settings,
1899 + .set_settings = axienet_ethtools_set_settings,
1900 + .get_drvinfo = axienet_ethtools_get_drvinfo,
1901 + .get_regs_len = axienet_ethtools_get_regs_len,
1902 + .get_regs = axienet_ethtools_get_regs,
1903 + .get_link = ethtool_op_get_link,
1904 + .get_pauseparam = axienet_ethtools_get_pauseparam,
1905 + .set_pauseparam = axienet_ethtools_set_pauseparam,
1906 + .get_coalesce = axienet_ethtools_get_coalesce,
1907 + .set_coalesce = axienet_ethtools_set_coalesce,
1910 +/**
1911 + * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1912 + * @data: Data passed
1914 + * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1915 + * Tx/Rx BDs.
1916 + */
1917 +static void axienet_dma_err_handler(unsigned long data)
1919 + u32 axienet_status;
1920 + u32 cr, i;
1921 + int mdio_mcreg;
1922 + struct axienet_local *lp = (struct axienet_local *) data;
1923 + struct net_device *ndev = lp->ndev;
1924 + struct axidma_bd *cur_p;
1926 + axienet_setoptions(ndev, lp->options &
1927 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1928 + mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1929 + axienet_mdio_wait_until_ready(lp);
1930 + /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1931 + * When we do an Axi Ethernet reset, it resets the complete core
1932 + * including the MDIO. So if MDIO is not disabled when the reset
1933 + * process is started, MDIO will be broken afterwards. */
1934 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1935 + ~XAE_MDIO_MC_MDIOEN_MASK));
1937 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1938 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1940 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1941 + axienet_mdio_wait_until_ready(lp);
1943 + for (i = 0; i < TX_BD_NUM; i++) {
1944 + cur_p = &lp->tx_bd_v[i];
1945 + if (cur_p->phys)
1946 + dma_unmap_single(ndev->dev.parent, cur_p->phys,
1947 + (cur_p->cntrl &
1948 + XAXIDMA_BD_CTRL_LENGTH_MASK),
1949 + DMA_TO_DEVICE);
1950 + if (cur_p->app4)
1951 + dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1952 + cur_p->phys = 0;
1953 + cur_p->cntrl = 0;
1954 + cur_p->status = 0;
1955 + cur_p->app0 = 0;
1956 + cur_p->app1 = 0;
1957 + cur_p->app2 = 0;
1958 + cur_p->app3 = 0;
1959 + cur_p->app4 = 0;
1960 + cur_p->sw_id_offset = 0;
1963 + for (i = 0; i < RX_BD_NUM; i++) {
1964 + cur_p = &lp->rx_bd_v[i];
1965 + cur_p->status = 0;
1966 + cur_p->app0 = 0;
1967 + cur_p->app1 = 0;
1968 + cur_p->app2 = 0;
1969 + cur_p->app3 = 0;
1970 + cur_p->app4 = 0;
1973 + lp->tx_bd_ci = 0;
1974 + lp->tx_bd_tail = 0;
1975 + lp->rx_bd_ci = 0;
1977 + /* Start updating the Rx channel control register */
1978 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1979 + /* Update the interrupt coalesce count */
1980 + cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1981 + (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1982 + /* Update the delay timer count */
1983 + cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1984 + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1985 + /* Enable coalesce, delay timer and error interrupts */
1986 + cr |= XAXIDMA_IRQ_ALL_MASK;
1987 + /* Finally write to the Rx channel control register */
1988 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1990 + /* Start updating the Tx channel control register */
1991 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1992 + /* Update the interrupt coalesce count */
1993 + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1994 + (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1995 + /* Update the delay timer count */
1996 + cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1997 + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1998 + /* Enable coalesce, delay timer and error interrupts */
1999 + cr |= XAXIDMA_IRQ_ALL_MASK;
2000 + /* Finally write to the Tx channel control register */
2001 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2003 + /* Populate the tail pointer and bring the Rx Axi DMA engine out of
2004 + * halted state. This will make the Rx side ready for reception.*/
2005 + axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
2006 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2007 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
2008 + cr | XAXIDMA_CR_RUNSTOP_MASK);
2009 + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
2010 + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
2012 + /* Write to the RS (Run-stop) bit in the Tx channel control register.
2013 + * Tx channel is now ready to run. But only after we write to the
2014 + * tail pointer register that the Tx channel will start transmitting */
2015 + axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
2016 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2017 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
2018 + cr | XAXIDMA_CR_RUNSTOP_MASK);
2020 + axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2021 + axienet_status &= ~XAE_RCW1_RX_MASK;
2022 + axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2024 + axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2025 + if (axienet_status & XAE_INT_RXRJECT_MASK)
2026 + axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2027 + axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2029 + /* Sync default options with HW but leave receiver and
2030 + * transmitter disabled.*/
2031 + axienet_setoptions(ndev, lp->options &
2032 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2033 + axienet_set_mac_address(ndev, NULL);
2034 + axienet_set_multicast_list(ndev);
2035 + axienet_setoptions(ndev, lp->options);
2038 +/**
2039 + * axienet_of_probe - Axi Ethernet probe function.
2040 + * @op: Pointer to platform device structure.
2041 + * @match: Pointer to device id structure
2043 + * returns: 0, on success
2044 + * Non-zero error value on failure.
2046 + * This is the probe routine for Axi Ethernet driver. This is called before
2047 + * any other driver routines are invoked. It allocates and sets up the Ethernet
2048 + * device. Parses through device tree and populates fields of
2049 + * axienet_local. It registers the Ethernet device.
2050 + */
2051 +static int __devinit axienet_of_probe(struct platform_device *op)
2053 + __be32 *p;
2054 + int size, ret = 0;
2055 + struct device_node *np;
2056 + struct axienet_local *lp;
2057 + struct net_device *ndev;
2058 + const void *addr;
2060 + ndev = alloc_etherdev(sizeof(*lp));
2061 + if (!ndev) {
2062 + dev_err(&op->dev, "could not allocate device.\n");
2063 + return -ENOMEM;
2066 + ether_setup(ndev);
2067 + dev_set_drvdata(&op->dev, ndev);
2069 + SET_NETDEV_DEV(ndev, &op->dev);
2070 + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
2071 + ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
2072 + ndev->netdev_ops = &axienet_netdev_ops;
2073 + ndev->ethtool_ops = &axienet_ethtool_ops;
2075 + lp = netdev_priv(ndev);
2076 + lp->ndev = ndev;
2077 + lp->dev = &op->dev;
2078 + lp->options = XAE_OPTION_DEFAULTS;
2079 + /* Map device registers */
2080 + lp->regs = of_iomap(op->dev.of_node, 0);
2081 + if (!lp->regs) {
2082 + dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
2083 + goto nodev;
2085 + /* Setup checksum offload, but default to off if not specified */
2086 + lp->features = 0;
2088 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
2089 + if (p) {
2090 + switch (be32_to_cpup(p)) {
2091 + case 1:
2092 + lp->csum_offload_on_tx_path =
2093 + XAE_FEATURE_PARTIAL_TX_CSUM;
2094 + lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2095 + /* Can checksum TCP/UDP over IPv4. */
2096 + ndev->features |= NETIF_F_IP_CSUM;
2097 + break;
2098 + case 2:
2099 + lp->csum_offload_on_tx_path =
2100 + XAE_FEATURE_FULL_TX_CSUM;
2101 + lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2102 + /* Can checksum TCP/UDP over IPv4. */
2103 + ndev->features |= NETIF_F_IP_CSUM;
2104 + break;
2105 + default:
2106 + lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
2109 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
2110 + if (p) {
2111 + switch (be32_to_cpup(p)) {
2112 + case 1:
2113 + lp->csum_offload_on_rx_path =
2114 + XAE_FEATURE_PARTIAL_RX_CSUM;
2115 + lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2116 + break;
2117 + case 2:
2118 + lp->csum_offload_on_rx_path =
2119 + XAE_FEATURE_FULL_RX_CSUM;
2120 + lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2121 + break;
2122 + default:
2123 + lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
2126 + /* For supporting jumbo frames, the Axi Ethernet hardware must have
2127 + * a larger Rx/Tx Memory. Typically, the size must be more than or
2128 + * equal to 16384 bytes, so that we can enable jumbo option and start
2129 + * supporting jumbo frames. Here we check for memory allocated for
2130 + * Rx/Tx in the hardware from the device-tree and accordingly set
2131 + * flags. */
2132 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
2133 + if (p) {
2134 + if ((be32_to_cpup(p)) >= 0x4000)
2135 + lp->jumbo_support = 1;
2137 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
2138 + NULL);
2139 + if (p)
2140 + lp->temac_type = be32_to_cpup(p);
2141 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
2142 + if (p)
2143 + lp->phy_type = be32_to_cpup(p);
2145 + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2146 + np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
2147 + if (!np) {
2148 + dev_err(&op->dev, "could not find DMA node\n");
2149 + goto err_iounmap;
2151 + lp->dma_regs = of_iomap(np, 0);
2152 + if (lp->dma_regs) {
2153 + dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
2154 + } else {
2155 + dev_err(&op->dev, "unable to map DMA registers\n");
2156 + of_node_put(np);
2158 + lp->rx_irq = irq_of_parse_and_map(np, 1);
2159 + lp->tx_irq = irq_of_parse_and_map(np, 0);
2160 + of_node_put(np);
2161 + if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
2162 + dev_err(&op->dev, "could not determine irqs\n");
2163 + ret = -ENOMEM;
2164 + goto err_iounmap_2;
2167 + /* Retrieve the MAC address */
2168 + addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
2169 + if ((!addr) || (size != 6)) {
2170 + dev_err(&op->dev, "could not find MAC address\n");
2171 + ret = -ENODEV;
2172 + goto err_iounmap_2;
2174 + axienet_set_mac_address(ndev, (void *) addr);
2176 + lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2177 + lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2179 + lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
2180 + ret = axienet_mdio_setup(lp, op->dev.of_node);
2181 + if (ret)
2182 + dev_warn(&op->dev, "error registering MDIO bus\n");
2184 + ret = register_netdev(lp->ndev);
2185 + if (ret) {
2186 + dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2187 + goto err_iounmap_2;
2190 + tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
2191 + (unsigned long) lp);
2192 + tasklet_disable(&lp->dma_err_tasklet);
2194 + return 0;
2196 +err_iounmap_2:
2197 + if (lp->dma_regs)
2198 + iounmap(lp->dma_regs);
2199 +err_iounmap:
2200 + iounmap(lp->regs);
2201 +nodev:
2202 + free_netdev(ndev);
2203 + ndev = NULL;
2204 + return ret;
2207 +static int __devexit axienet_of_remove(struct platform_device *op)
2209 + struct net_device *ndev = dev_get_drvdata(&op->dev);
2210 + struct axienet_local *lp = netdev_priv(ndev);
2212 + axienet_mdio_teardown(lp);
2213 + unregister_netdev(ndev);
2215 + if (lp->phy_node)
2216 + of_node_put(lp->phy_node);
2217 + lp->phy_node = NULL;
2219 + dev_set_drvdata(&op->dev, NULL);
2221 + iounmap(lp->regs);
2222 + if (lp->dma_regs)
2223 + iounmap(lp->dma_regs);
2224 + free_netdev(ndev);
2226 + return 0;
2229 +static struct platform_driver axienet_of_driver = {
2230 + .probe = axienet_of_probe,
2231 + .remove = __devexit_p(axienet_of_remove),
2232 + .driver = {
2233 + .owner = THIS_MODULE,
2234 + .name = "xilinx_axienet",
2235 + .of_match_table = axienet_of_match,
2236 + },
2239 +static int __init axienet_init(void)
2241 + return platform_driver_register(&axienet_of_driver);
2244 +static void __exit axienet_exit(void)
2246 + platform_driver_unregister(&axienet_of_driver);
2249 +module_init(axienet_init);
2250 +module_exit(axienet_exit);
2252 +MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2253 +MODULE_AUTHOR("Xilinx");
2254 +MODULE_LICENSE("GPL");
2256 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2257 new file mode 100644
2258 index 0000000..d70b6e7
2259 --- /dev/null
2260 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2261 @@ -0,0 +1,238 @@
2263 + * MDIO bus driver for the Xilinx Axi Ethernet device
2265 + * Copyright (c) 2009 Secret Lab Technologies, Ltd.
2266 + * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
2267 + * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
2268 + * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
2269 + */
2271 +#include <linux/of_address.h>
2272 +#include <linux/of_mdio.h>
2273 +#include <linux/jiffies.h>
2275 +#include "xilinx_axienet.h"
2277 +#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
2278 +#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
2280 +/* Wait till MDIO interface is ready to accept a new transaction.*/
2281 +int axienet_mdio_wait_until_ready(struct axienet_local *lp)
2283 + long end = jiffies + 2;
2284 + while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
2285 + XAE_MDIO_MCR_READY_MASK)) {
2286 + if (end - jiffies <= 0) {
2287 + WARN_ON(1);
2288 + return -ETIMEDOUT;
2290 + udelay(1);
2292 + return 0;
2295 +/**
2296 + * axienet_mdio_read - MDIO interface read function
2297 + * @bus: Pointer to mii bus structure
2298 + * @phy_id: Address of the PHY device
2299 + * @reg: PHY register to read
2301 + * returns: The register contents on success, -ETIMEDOUT on a timeout
2303 + * Reads the contents of the requested register from the requested PHY
2304 + * address by first writing the details into MCR register. After a while
2305 + * the register MRD is read to obtain the PHY register content.
2306 + */
2307 +static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
2309 + u32 rc;
2310 + int ret;
2311 + struct axienet_local *lp = bus->priv;
2313 + ret = axienet_mdio_wait_until_ready(lp);
2314 + if (ret < 0)
2315 + return ret;
2317 + axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
2318 + (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
2319 + XAE_MDIO_MCR_PHYAD_MASK) |
2320 + ((reg << XAE_MDIO_MCR_REGAD_SHIFT) &
2321 + XAE_MDIO_MCR_REGAD_MASK) |
2322 + XAE_MDIO_MCR_INITIATE_MASK |
2323 + XAE_MDIO_MCR_OP_READ_MASK));
2325 + ret = axienet_mdio_wait_until_ready(lp);
2326 + if (ret < 0)
2327 + return ret;
2329 + rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF;
2331 + dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n",
2332 + phy_id, reg, rc);
2334 + return rc;
2337 +/**
2338 + * axienet_mdio_write - MDIO interface write function
2339 + * @bus: Pointer to mii bus structure
2340 + * @phy_id: Address of the PHY device
2341 + * @reg: PHY register to write to
2342 + * @val: Value to be written into the register
2344 + * returns: 0 on success, -ETIMEDOUT on a timeout
2346 + * Writes the value to the requested register by first writing the value
2347 + * into MWD register. The the MCR register is then appropriately setup
2348 + * to finish the write operation.
2349 + */
2350 +static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
2351 + u16 val)
2353 + int ret;
2354 + struct axienet_local *lp = bus->priv;
2356 + dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
2357 + phy_id, reg, val);
2359 + ret = axienet_mdio_wait_until_ready(lp);
2360 + if (ret < 0)
2361 + return ret;
2363 + axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
2364 + axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
2365 + (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
2366 + XAE_MDIO_MCR_PHYAD_MASK) |
2367 + ((reg << XAE_MDIO_MCR_REGAD_SHIFT) &
2368 + XAE_MDIO_MCR_REGAD_MASK) |
2369 + XAE_MDIO_MCR_INITIATE_MASK |
2370 + XAE_MDIO_MCR_OP_WRITE_MASK));
2372 + ret = axienet_mdio_wait_until_ready(lp);
2373 + if (ret < 0)
2374 + return ret;
2375 + return 0;
2378 +/**
2379 + * axienet_mdio_setup - MDIO setup function
2380 + * @lp: Pointer to axienet local data structure.
2381 + * @np: Pointer to device node
2383 + * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
2384 + * mdiobus_alloc (to allocate memory for mii bus structure) fails.
2386 + * Sets up the MDIO interface by initializing the MDIO clock and enabling the
2387 + * MDIO interface in hardware. Register the MDIO interface.
2388 + **/
2389 +int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
2391 + int ret;
2392 + u32 clk_div, host_clock;
2393 + u32 *property_p;
2394 + struct mii_bus *bus;
2395 + struct resource res;
2396 + struct device_node *np1;
2398 + /* clk_div can be calculated by deriving it from the equation:
2399 + * fMDIO = fHOST / ((1 + clk_div) * 2)
2401 + * Where fMDIO <= 2500000, so we get:
2402 + * fHOST / ((1 + clk_div) * 2) <= 2500000
2404 + * Then we get:
2405 + * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST)
2407 + * Then we get:
2408 + * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST)
2410 + * Then we get:
2411 + * 1 / (1 + clk_div) <= (5000000 / fHOST)
2413 + * So:
2414 + * (1 + clk_div) >= (fHOST / 5000000)
2416 + * And finally:
2417 + * clk_div >= (fHOST / 5000000) - 1
2419 + * fHOST can be read from the flattened device tree as property
2420 + * "clock-frequency" from the CPU
2421 + */
2423 + np1 = of_find_node_by_name(NULL, "cpu");
2424 + if (!np1) {
2425 + printk(KERN_WARNING "%s(): Could not find CPU device node.",
2426 + __func__);
2427 + printk(KERN_WARNING "Setting MDIO clock divisor to "
2428 + "default %d\n", DEFAULT_CLOCK_DIVISOR);
2429 + clk_div = DEFAULT_CLOCK_DIVISOR;
2430 + goto issue;
2432 + property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
2433 + if (!property_p) {
2434 + printk(KERN_WARNING "%s(): Could not find CPU property: "
2435 + "clock-frequency.", __func__);
2436 + printk(KERN_WARNING "Setting MDIO clock divisor to "
2437 + "default %d\n", DEFAULT_CLOCK_DIVISOR);
2438 + clk_div = DEFAULT_CLOCK_DIVISOR;
2439 + goto issue;
2442 + host_clock = be32_to_cpup(property_p);
2443 + clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
2444 + /* If there is any remainder from the division of
2445 + * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
2446 + * 1 to the clock divisor or we will surely be above 2.5 MHz */
2447 + if (host_clock % (MAX_MDIO_FREQ * 2))
2448 + clk_div++;
2450 + printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
2451 + "on %u Hz host clock.\n", __func__, clk_div, host_clock);
2453 + of_node_put(np1);
2454 +issue:
2455 + axienet_iow(lp, XAE_MDIO_MC_OFFSET,
2456 + (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
2458 + ret = axienet_mdio_wait_until_ready(lp);
2459 + if (ret < 0)
2460 + return ret;
2462 + bus = mdiobus_alloc();
2463 + if (!bus)
2464 + return -ENOMEM;
2466 + np1 = of_get_parent(lp->phy_node);
2467 + of_address_to_resource(np1, 0, &res);
2468 + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
2469 + (unsigned long long) res.start);
2471 + bus->priv = lp;
2472 + bus->name = "Xilinx Axi Ethernet MDIO";
2473 + bus->read = axienet_mdio_read;
2474 + bus->write = axienet_mdio_write;
2475 + bus->parent = lp->dev;
2476 + bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
2477 + lp->mii_bus = bus;
2479 + ret = of_mdiobus_register(bus, np1);
2480 + if (ret) {
2481 + mdiobus_free(bus);
2482 + return ret;
2484 + return 0;
2487 +/**
2488 + * axienet_mdio_teardown - MDIO remove function
2489 + * @lp: Pointer to axienet local data structure.
2491 + * Unregisters the MDIO and frees any associate memory for mii bus.
2492 + */
2493 +void axienet_mdio_teardown(struct axienet_local *lp)
2495 + mdiobus_unregister(lp->mii_bus);
2496 + kfree(lp->mii_bus->irq);
2497 + mdiobus_free(lp->mii_bus);
2498 + lp->mii_bus = NULL;