r8169: ephy, eri and efuse functions signature changes.
[linux-2.6.git] / drivers / net / ethernet / realtek / r8169.c
blob7c6c4b253ea1805755519965fc38644fada75b80
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
9 */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
51 #ifdef RTL8169_DEBUG
52 #define assert(expr) \
53 if (!(expr)) { \
54 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
55 #expr,__FILE__,__func__,__LINE__); \
57 #define dprintk(fmt, args...) \
58 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
59 #else
60 #define assert(expr) do {} while (0)
61 #define dprintk(fmt, args...) do {} while (0)
62 #endif /* RTL8169_DEBUG */
64 #define R8169_MSG_DEFAULT \
65 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67 #define TX_SLOTS_AVAIL(tp) \
68 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
71 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
72 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
75 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
76 static const int multicast_filter_limit = 32;
78 #define MAX_READ_REQUEST_SHIFT 12
79 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
80 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
87 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
88 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
89 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
91 #define RTL8169_TX_TIMEOUT (6*HZ)
92 #define RTL8169_PHY_TIMEOUT (10*HZ)
94 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
95 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
96 #define RTL_EEPROM_SIG_ADDR 0x0000
98 /* write/read MMIO register */
99 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
100 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
101 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
102 #define RTL_R8(reg) readb (ioaddr + (reg))
103 #define RTL_R16(reg) readw (ioaddr + (reg))
104 #define RTL_R32(reg) readl (ioaddr + (reg))
106 enum mac_version {
107 RTL_GIGA_MAC_VER_01 = 0,
108 RTL_GIGA_MAC_VER_02,
109 RTL_GIGA_MAC_VER_03,
110 RTL_GIGA_MAC_VER_04,
111 RTL_GIGA_MAC_VER_05,
112 RTL_GIGA_MAC_VER_06,
113 RTL_GIGA_MAC_VER_07,
114 RTL_GIGA_MAC_VER_08,
115 RTL_GIGA_MAC_VER_09,
116 RTL_GIGA_MAC_VER_10,
117 RTL_GIGA_MAC_VER_11,
118 RTL_GIGA_MAC_VER_12,
119 RTL_GIGA_MAC_VER_13,
120 RTL_GIGA_MAC_VER_14,
121 RTL_GIGA_MAC_VER_15,
122 RTL_GIGA_MAC_VER_16,
123 RTL_GIGA_MAC_VER_17,
124 RTL_GIGA_MAC_VER_18,
125 RTL_GIGA_MAC_VER_19,
126 RTL_GIGA_MAC_VER_20,
127 RTL_GIGA_MAC_VER_21,
128 RTL_GIGA_MAC_VER_22,
129 RTL_GIGA_MAC_VER_23,
130 RTL_GIGA_MAC_VER_24,
131 RTL_GIGA_MAC_VER_25,
132 RTL_GIGA_MAC_VER_26,
133 RTL_GIGA_MAC_VER_27,
134 RTL_GIGA_MAC_VER_28,
135 RTL_GIGA_MAC_VER_29,
136 RTL_GIGA_MAC_VER_30,
137 RTL_GIGA_MAC_VER_31,
138 RTL_GIGA_MAC_VER_32,
139 RTL_GIGA_MAC_VER_33,
140 RTL_GIGA_MAC_VER_34,
141 RTL_GIGA_MAC_VER_35,
142 RTL_GIGA_MAC_VER_36,
143 RTL_GIGA_MAC_VER_37,
144 RTL_GIGA_MAC_VER_38,
145 RTL_GIGA_MAC_VER_39,
146 RTL_GIGA_MAC_NONE = 0xff,
149 enum rtl_tx_desc_version {
150 RTL_TD_0 = 0,
151 RTL_TD_1 = 1,
154 #define JUMBO_1K ETH_DATA_LEN
155 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
156 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
157 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
158 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
160 #define _R(NAME,TD,FW,SZ,B) { \
161 .name = NAME, \
162 .txd_version = TD, \
163 .fw_name = FW, \
164 .jumbo_max = SZ, \
165 .jumbo_tx_csum = B \
168 static const struct {
169 const char *name;
170 enum rtl_tx_desc_version txd_version;
171 const char *fw_name;
172 u16 jumbo_max;
173 bool jumbo_tx_csum;
174 } rtl_chip_infos[] = {
175 /* PCI devices. */
176 [RTL_GIGA_MAC_VER_01] =
177 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
178 [RTL_GIGA_MAC_VER_02] =
179 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
180 [RTL_GIGA_MAC_VER_03] =
181 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
182 [RTL_GIGA_MAC_VER_04] =
183 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
184 [RTL_GIGA_MAC_VER_05] =
185 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_06] =
187 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
188 /* PCI-E devices. */
189 [RTL_GIGA_MAC_VER_07] =
190 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
191 [RTL_GIGA_MAC_VER_08] =
192 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
193 [RTL_GIGA_MAC_VER_09] =
194 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
195 [RTL_GIGA_MAC_VER_10] =
196 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
197 [RTL_GIGA_MAC_VER_11] =
198 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
199 [RTL_GIGA_MAC_VER_12] =
200 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
201 [RTL_GIGA_MAC_VER_13] =
202 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
203 [RTL_GIGA_MAC_VER_14] =
204 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
205 [RTL_GIGA_MAC_VER_15] =
206 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
207 [RTL_GIGA_MAC_VER_16] =
208 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
209 [RTL_GIGA_MAC_VER_17] =
210 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
211 [RTL_GIGA_MAC_VER_18] =
212 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
213 [RTL_GIGA_MAC_VER_19] =
214 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
215 [RTL_GIGA_MAC_VER_20] =
216 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
217 [RTL_GIGA_MAC_VER_21] =
218 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
219 [RTL_GIGA_MAC_VER_22] =
220 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
221 [RTL_GIGA_MAC_VER_23] =
222 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
223 [RTL_GIGA_MAC_VER_24] =
224 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
225 [RTL_GIGA_MAC_VER_25] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
227 JUMBO_9K, false),
228 [RTL_GIGA_MAC_VER_26] =
229 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
230 JUMBO_9K, false),
231 [RTL_GIGA_MAC_VER_27] =
232 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
233 [RTL_GIGA_MAC_VER_28] =
234 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
235 [RTL_GIGA_MAC_VER_29] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
237 JUMBO_1K, true),
238 [RTL_GIGA_MAC_VER_30] =
239 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
240 JUMBO_1K, true),
241 [RTL_GIGA_MAC_VER_31] =
242 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
243 [RTL_GIGA_MAC_VER_32] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
245 JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_33] =
247 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_34] =
250 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_35] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_36] =
256 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
257 JUMBO_9K, false),
258 [RTL_GIGA_MAC_VER_37] =
259 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
260 JUMBO_1K, true),
261 [RTL_GIGA_MAC_VER_38] =
262 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
263 JUMBO_9K, false),
264 [RTL_GIGA_MAC_VER_39] =
265 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
266 JUMBO_1K, true),
268 #undef _R
270 enum cfg_version {
271 RTL_CFG_0 = 0x00,
272 RTL_CFG_1,
273 RTL_CFG_2
276 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
277 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
278 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
283 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
284 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
285 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
286 { PCI_VENDOR_ID_LINKSYS, 0x1032,
287 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
288 { 0x0001, 0x8168,
289 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
290 {0,},
293 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
295 static int rx_buf_sz = 16383;
296 static int use_dac;
297 static struct {
298 u32 msg_enable;
299 } debug = { -1 };
301 enum rtl_registers {
302 MAC0 = 0, /* Ethernet hardware address. */
303 MAC4 = 4,
304 MAR0 = 8, /* Multicast filter. */
305 CounterAddrLow = 0x10,
306 CounterAddrHigh = 0x14,
307 TxDescStartAddrLow = 0x20,
308 TxDescStartAddrHigh = 0x24,
309 TxHDescStartAddrLow = 0x28,
310 TxHDescStartAddrHigh = 0x2c,
311 FLASH = 0x30,
312 ERSR = 0x36,
313 ChipCmd = 0x37,
314 TxPoll = 0x38,
315 IntrMask = 0x3c,
316 IntrStatus = 0x3e,
318 TxConfig = 0x40,
319 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
320 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
322 RxConfig = 0x44,
323 #define RX128_INT_EN (1 << 15) /* 8111c and later */
324 #define RX_MULTI_EN (1 << 14) /* 8111c only */
325 #define RXCFG_FIFO_SHIFT 13
326 /* No threshold before first PCI xfer */
327 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
328 #define RXCFG_DMA_SHIFT 8
329 /* Unlimited maximum PCI burst. */
330 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
332 RxMissed = 0x4c,
333 Cfg9346 = 0x50,
334 Config0 = 0x51,
335 Config1 = 0x52,
336 Config2 = 0x53,
337 #define PME_SIGNAL (1 << 5) /* 8168c and later */
339 Config3 = 0x54,
340 Config4 = 0x55,
341 Config5 = 0x56,
342 MultiIntr = 0x5c,
343 PHYAR = 0x60,
344 PHYstatus = 0x6c,
345 RxMaxSize = 0xda,
346 CPlusCmd = 0xe0,
347 IntrMitigate = 0xe2,
348 RxDescAddrLow = 0xe4,
349 RxDescAddrHigh = 0xe8,
350 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
352 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
354 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
356 #define TxPacketMax (8064 >> 7)
357 #define EarlySize 0x27
359 FuncEvent = 0xf0,
360 FuncEventMask = 0xf4,
361 FuncPresetState = 0xf8,
362 FuncForceEvent = 0xfc,
365 enum rtl8110_registers {
366 TBICSR = 0x64,
367 TBI_ANAR = 0x68,
368 TBI_LPAR = 0x6a,
371 enum rtl8168_8101_registers {
372 CSIDR = 0x64,
373 CSIAR = 0x68,
374 #define CSIAR_FLAG 0x80000000
375 #define CSIAR_WRITE_CMD 0x80000000
376 #define CSIAR_BYTE_ENABLE 0x0f
377 #define CSIAR_BYTE_ENABLE_SHIFT 12
378 #define CSIAR_ADDR_MASK 0x0fff
379 #define CSIAR_FUNC_CARD 0x00000000
380 #define CSIAR_FUNC_SDIO 0x00010000
381 #define CSIAR_FUNC_NIC 0x00020000
382 PMCH = 0x6f,
383 EPHYAR = 0x80,
384 #define EPHYAR_FLAG 0x80000000
385 #define EPHYAR_WRITE_CMD 0x80000000
386 #define EPHYAR_REG_MASK 0x1f
387 #define EPHYAR_REG_SHIFT 16
388 #define EPHYAR_DATA_MASK 0xffff
389 DLLPR = 0xd0,
390 #define PFM_EN (1 << 6)
391 DBG_REG = 0xd1,
392 #define FIX_NAK_1 (1 << 4)
393 #define FIX_NAK_2 (1 << 3)
394 TWSI = 0xd2,
395 MCU = 0xd3,
396 #define NOW_IS_OOB (1 << 7)
397 #define EN_NDP (1 << 3)
398 #define EN_OOB_RESET (1 << 2)
399 EFUSEAR = 0xdc,
400 #define EFUSEAR_FLAG 0x80000000
401 #define EFUSEAR_WRITE_CMD 0x80000000
402 #define EFUSEAR_READ_CMD 0x00000000
403 #define EFUSEAR_REG_MASK 0x03ff
404 #define EFUSEAR_REG_SHIFT 8
405 #define EFUSEAR_DATA_MASK 0xff
408 enum rtl8168_registers {
409 LED_FREQ = 0x1a,
410 EEE_LED = 0x1b,
411 ERIDR = 0x70,
412 ERIAR = 0x74,
413 #define ERIAR_FLAG 0x80000000
414 #define ERIAR_WRITE_CMD 0x80000000
415 #define ERIAR_READ_CMD 0x00000000
416 #define ERIAR_ADDR_BYTE_ALIGN 4
417 #define ERIAR_TYPE_SHIFT 16
418 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
419 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
420 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
421 #define ERIAR_MASK_SHIFT 12
422 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
423 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
424 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
425 EPHY_RXER_NUM = 0x7c,
426 OCPDR = 0xb0, /* OCP GPHY access */
427 #define OCPDR_WRITE_CMD 0x80000000
428 #define OCPDR_READ_CMD 0x00000000
429 #define OCPDR_REG_MASK 0x7f
430 #define OCPDR_GPHY_REG_SHIFT 16
431 #define OCPDR_DATA_MASK 0xffff
432 OCPAR = 0xb4,
433 #define OCPAR_FLAG 0x80000000
434 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
435 #define OCPAR_GPHY_READ_CMD 0x0000f060
436 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
437 MISC = 0xf0, /* 8168e only. */
438 #define TXPLA_RST (1 << 29)
439 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
440 #define PWM_EN (1 << 22)
441 #define EARLY_TALLY_EN (1 << 16)
444 enum rtl_register_content {
445 /* InterruptStatusBits */
446 SYSErr = 0x8000,
447 PCSTimeout = 0x4000,
448 SWInt = 0x0100,
449 TxDescUnavail = 0x0080,
450 RxFIFOOver = 0x0040,
451 LinkChg = 0x0020,
452 RxOverflow = 0x0010,
453 TxErr = 0x0008,
454 TxOK = 0x0004,
455 RxErr = 0x0002,
456 RxOK = 0x0001,
458 /* RxStatusDesc */
459 RxBOVF = (1 << 24),
460 RxFOVF = (1 << 23),
461 RxRWT = (1 << 22),
462 RxRES = (1 << 21),
463 RxRUNT = (1 << 20),
464 RxCRC = (1 << 19),
466 /* ChipCmdBits */
467 StopReq = 0x80,
468 CmdReset = 0x10,
469 CmdRxEnb = 0x08,
470 CmdTxEnb = 0x04,
471 RxBufEmpty = 0x01,
473 /* TXPoll register p.5 */
474 HPQ = 0x80, /* Poll cmd on the high prio queue */
475 NPQ = 0x40, /* Poll cmd on the low prio queue */
476 FSWInt = 0x01, /* Forced software interrupt */
478 /* Cfg9346Bits */
479 Cfg9346_Lock = 0x00,
480 Cfg9346_Unlock = 0xc0,
482 /* rx_mode_bits */
483 AcceptErr = 0x20,
484 AcceptRunt = 0x10,
485 AcceptBroadcast = 0x08,
486 AcceptMulticast = 0x04,
487 AcceptMyPhys = 0x02,
488 AcceptAllPhys = 0x01,
489 #define RX_CONFIG_ACCEPT_MASK 0x3f
491 /* TxConfigBits */
492 TxInterFrameGapShift = 24,
493 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
495 /* Config1 register p.24 */
496 LEDS1 = (1 << 7),
497 LEDS0 = (1 << 6),
498 Speed_down = (1 << 4),
499 MEMMAP = (1 << 3),
500 IOMAP = (1 << 2),
501 VPD = (1 << 1),
502 PMEnable = (1 << 0), /* Power Management Enable */
504 /* Config2 register p. 25 */
505 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
506 PCI_Clock_66MHz = 0x01,
507 PCI_Clock_33MHz = 0x00,
509 /* Config3 register p.25 */
510 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
511 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
512 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
513 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
515 /* Config4 register */
516 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
518 /* Config5 register p.27 */
519 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
520 MWF = (1 << 5), /* Accept Multicast wakeup frame */
521 UWF = (1 << 4), /* Accept Unicast wakeup frame */
522 Spi_en = (1 << 3),
523 LanWake = (1 << 1), /* LanWake enable/disable */
524 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
526 /* TBICSR p.28 */
527 TBIReset = 0x80000000,
528 TBILoopback = 0x40000000,
529 TBINwEnable = 0x20000000,
530 TBINwRestart = 0x10000000,
531 TBILinkOk = 0x02000000,
532 TBINwComplete = 0x01000000,
534 /* CPlusCmd p.31 */
535 EnableBist = (1 << 15), // 8168 8101
536 Mac_dbgo_oe = (1 << 14), // 8168 8101
537 Normal_mode = (1 << 13), // unused
538 Force_half_dup = (1 << 12), // 8168 8101
539 Force_rxflow_en = (1 << 11), // 8168 8101
540 Force_txflow_en = (1 << 10), // 8168 8101
541 Cxpl_dbg_sel = (1 << 9), // 8168 8101
542 ASF = (1 << 8), // 8168 8101
543 PktCntrDisable = (1 << 7), // 8168 8101
544 Mac_dbgo_sel = 0x001c, // 8168
545 RxVlan = (1 << 6),
546 RxChkSum = (1 << 5),
547 PCIDAC = (1 << 4),
548 PCIMulRW = (1 << 3),
549 INTT_0 = 0x0000, // 8168
550 INTT_1 = 0x0001, // 8168
551 INTT_2 = 0x0002, // 8168
552 INTT_3 = 0x0003, // 8168
554 /* rtl8169_PHYstatus */
555 TBI_Enable = 0x80,
556 TxFlowCtrl = 0x40,
557 RxFlowCtrl = 0x20,
558 _1000bpsF = 0x10,
559 _100bps = 0x08,
560 _10bps = 0x04,
561 LinkStatus = 0x02,
562 FullDup = 0x01,
564 /* _TBICSRBit */
565 TBILinkOK = 0x02000000,
567 /* DumpCounterCommand */
568 CounterDump = 0x8,
571 enum rtl_desc_bit {
572 /* First doubleword. */
573 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
574 RingEnd = (1 << 30), /* End of descriptor ring */
575 FirstFrag = (1 << 29), /* First segment of a packet */
576 LastFrag = (1 << 28), /* Final segment of a packet */
579 /* Generic case. */
580 enum rtl_tx_desc_bit {
581 /* First doubleword. */
582 TD_LSO = (1 << 27), /* Large Send Offload */
583 #define TD_MSS_MAX 0x07ffu /* MSS value */
585 /* Second doubleword. */
586 TxVlanTag = (1 << 17), /* Add VLAN tag */
589 /* 8169, 8168b and 810x except 8102e. */
590 enum rtl_tx_desc_bit_0 {
591 /* First doubleword. */
592 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
593 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
594 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
595 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
598 /* 8102e, 8168c and beyond. */
599 enum rtl_tx_desc_bit_1 {
600 /* Second doubleword. */
601 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
602 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
603 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
604 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
607 static const struct rtl_tx_desc_info {
608 struct {
609 u32 udp;
610 u32 tcp;
611 } checksum;
612 u16 mss_shift;
613 u16 opts_offset;
614 } tx_desc_info [] = {
615 [RTL_TD_0] = {
616 .checksum = {
617 .udp = TD0_IP_CS | TD0_UDP_CS,
618 .tcp = TD0_IP_CS | TD0_TCP_CS
620 .mss_shift = TD0_MSS_SHIFT,
621 .opts_offset = 0
623 [RTL_TD_1] = {
624 .checksum = {
625 .udp = TD1_IP_CS | TD1_UDP_CS,
626 .tcp = TD1_IP_CS | TD1_TCP_CS
628 .mss_shift = TD1_MSS_SHIFT,
629 .opts_offset = 1
633 enum rtl_rx_desc_bit {
634 /* Rx private */
635 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
636 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
638 #define RxProtoUDP (PID1)
639 #define RxProtoTCP (PID0)
640 #define RxProtoIP (PID1 | PID0)
641 #define RxProtoMask RxProtoIP
643 IPFail = (1 << 16), /* IP checksum failed */
644 UDPFail = (1 << 15), /* UDP/IP checksum failed */
645 TCPFail = (1 << 14), /* TCP/IP checksum failed */
646 RxVlanTag = (1 << 16), /* VLAN tag available */
649 #define RsvdMask 0x3fffc000
651 struct TxDesc {
652 __le32 opts1;
653 __le32 opts2;
654 __le64 addr;
657 struct RxDesc {
658 __le32 opts1;
659 __le32 opts2;
660 __le64 addr;
663 struct ring_info {
664 struct sk_buff *skb;
665 u32 len;
666 u8 __pad[sizeof(void *) - sizeof(u32)];
669 enum features {
670 RTL_FEATURE_WOL = (1 << 0),
671 RTL_FEATURE_MSI = (1 << 1),
672 RTL_FEATURE_GMII = (1 << 2),
675 struct rtl8169_counters {
676 __le64 tx_packets;
677 __le64 rx_packets;
678 __le64 tx_errors;
679 __le32 rx_errors;
680 __le16 rx_missed;
681 __le16 align_errors;
682 __le32 tx_one_collision;
683 __le32 tx_multi_collision;
684 __le64 rx_unicast;
685 __le64 rx_broadcast;
686 __le32 rx_multicast;
687 __le16 tx_aborted;
688 __le16 tx_underun;
691 enum rtl_flag {
692 RTL_FLAG_TASK_ENABLED,
693 RTL_FLAG_TASK_SLOW_PENDING,
694 RTL_FLAG_TASK_RESET_PENDING,
695 RTL_FLAG_TASK_PHY_PENDING,
696 RTL_FLAG_MAX
699 struct rtl8169_stats {
700 u64 packets;
701 u64 bytes;
702 struct u64_stats_sync syncp;
705 struct rtl8169_private {
706 void __iomem *mmio_addr; /* memory map physical address */
707 struct pci_dev *pci_dev;
708 struct net_device *dev;
709 struct napi_struct napi;
710 u32 msg_enable;
711 u16 txd_version;
712 u16 mac_version;
713 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
714 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
715 u32 dirty_rx;
716 u32 dirty_tx;
717 struct rtl8169_stats rx_stats;
718 struct rtl8169_stats tx_stats;
719 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
720 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
721 dma_addr_t TxPhyAddr;
722 dma_addr_t RxPhyAddr;
723 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
724 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
725 struct timer_list timer;
726 u16 cp_cmd;
728 u16 event_slow;
730 struct mdio_ops {
731 void (*write)(struct rtl8169_private *, int, int);
732 int (*read)(struct rtl8169_private *, int);
733 } mdio_ops;
735 struct pll_power_ops {
736 void (*down)(struct rtl8169_private *);
737 void (*up)(struct rtl8169_private *);
738 } pll_power_ops;
740 struct jumbo_ops {
741 void (*enable)(struct rtl8169_private *);
742 void (*disable)(struct rtl8169_private *);
743 } jumbo_ops;
745 struct csi_ops {
746 void (*write)(struct rtl8169_private *, int, int);
747 u32 (*read)(struct rtl8169_private *, int);
748 } csi_ops;
750 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
751 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
752 void (*phy_reset_enable)(struct rtl8169_private *tp);
753 void (*hw_start)(struct net_device *);
754 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
755 unsigned int (*link_ok)(void __iomem *);
756 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
758 struct {
759 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
760 struct mutex mutex;
761 struct work_struct work;
762 } wk;
764 unsigned features;
766 struct mii_if_info mii;
767 struct rtl8169_counters counters;
768 u32 saved_wolopts;
769 u32 opts1_mask;
771 struct rtl_fw {
772 const struct firmware *fw;
774 #define RTL_VER_SIZE 32
776 char version[RTL_VER_SIZE];
778 struct rtl_fw_phy_action {
779 __le32 *code;
780 size_t size;
781 } phy_action;
782 } *rtl_fw;
783 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
786 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
787 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
788 module_param(use_dac, int, 0);
789 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
790 module_param_named(debug, debug.msg_enable, int, 0);
791 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
792 MODULE_LICENSE("GPL");
793 MODULE_VERSION(RTL8169_VERSION);
794 MODULE_FIRMWARE(FIRMWARE_8168D_1);
795 MODULE_FIRMWARE(FIRMWARE_8168D_2);
796 MODULE_FIRMWARE(FIRMWARE_8168E_1);
797 MODULE_FIRMWARE(FIRMWARE_8168E_2);
798 MODULE_FIRMWARE(FIRMWARE_8168E_3);
799 MODULE_FIRMWARE(FIRMWARE_8105E_1);
800 MODULE_FIRMWARE(FIRMWARE_8168F_1);
801 MODULE_FIRMWARE(FIRMWARE_8168F_2);
802 MODULE_FIRMWARE(FIRMWARE_8402_1);
803 MODULE_FIRMWARE(FIRMWARE_8411_1);
804 MODULE_FIRMWARE(FIRMWARE_8106E_1);
806 static void rtl_lock_work(struct rtl8169_private *tp)
808 mutex_lock(&tp->wk.mutex);
811 static void rtl_unlock_work(struct rtl8169_private *tp)
813 mutex_unlock(&tp->wk.mutex);
816 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
818 int cap = pci_pcie_cap(pdev);
820 if (cap) {
821 u16 ctl;
823 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
824 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
825 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
829 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
831 void __iomem *ioaddr = tp->mmio_addr;
832 int i;
834 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
835 for (i = 0; i < 20; i++) {
836 udelay(100);
837 if (RTL_R32(OCPAR) & OCPAR_FLAG)
838 break;
840 return RTL_R32(OCPDR);
843 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
845 void __iomem *ioaddr = tp->mmio_addr;
846 int i;
848 RTL_W32(OCPDR, data);
849 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
850 for (i = 0; i < 20; i++) {
851 udelay(100);
852 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
853 break;
857 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
859 void __iomem *ioaddr = tp->mmio_addr;
860 int i;
862 RTL_W8(ERIDR, cmd);
863 RTL_W32(ERIAR, 0x800010e8);
864 msleep(2);
865 for (i = 0; i < 5; i++) {
866 udelay(100);
867 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
868 break;
871 ocp_write(tp, 0x1, 0x30, 0x00000001);
874 #define OOB_CMD_RESET 0x00
875 #define OOB_CMD_DRIVER_START 0x05
876 #define OOB_CMD_DRIVER_STOP 0x06
878 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
880 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
883 static void rtl8168_driver_start(struct rtl8169_private *tp)
885 u16 reg;
886 int i;
888 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
890 reg = rtl8168_get_ocp_reg(tp);
892 for (i = 0; i < 10; i++) {
893 msleep(10);
894 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
895 break;
899 static void rtl8168_driver_stop(struct rtl8169_private *tp)
901 u16 reg;
902 int i;
904 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
906 reg = rtl8168_get_ocp_reg(tp);
908 for (i = 0; i < 10; i++) {
909 msleep(10);
910 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
911 break;
915 static int r8168dp_check_dash(struct rtl8169_private *tp)
917 u16 reg = rtl8168_get_ocp_reg(tp);
919 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
922 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
924 void __iomem *ioaddr = tp->mmio_addr;
925 int i;
927 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
929 for (i = 20; i > 0; i--) {
931 * Check if the RTL8169 has completed writing to the specified
932 * MII register.
934 if (!(RTL_R32(PHYAR) & 0x80000000))
935 break;
936 udelay(25);
939 * According to hardware specs a 20us delay is required after write
940 * complete indication, but before sending next command.
942 udelay(20);
945 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
947 void __iomem *ioaddr = tp->mmio_addr;
948 int i, value = -1;
950 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
952 for (i = 20; i > 0; i--) {
954 * Check if the RTL8169 has completed retrieving data from
955 * the specified MII register.
957 if (RTL_R32(PHYAR) & 0x80000000) {
958 value = RTL_R32(PHYAR) & 0xffff;
959 break;
961 udelay(25);
964 * According to hardware specs a 20us delay is required after read
965 * complete indication, but before sending next command.
967 udelay(20);
969 return value;
972 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
974 void __iomem *ioaddr = tp->mmio_addr;
975 int i;
977 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
978 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
979 RTL_W32(EPHY_RXER_NUM, 0);
981 for (i = 0; i < 100; i++) {
982 mdelay(1);
983 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
984 break;
988 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
990 r8168dp_1_mdio_access(tp, reg,
991 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
994 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
996 void __iomem *ioaddr = tp->mmio_addr;
997 int i;
999 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1001 mdelay(1);
1002 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1003 RTL_W32(EPHY_RXER_NUM, 0);
1005 for (i = 0; i < 100; i++) {
1006 mdelay(1);
1007 if (RTL_R32(OCPAR) & OCPAR_FLAG)
1008 break;
1011 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
1014 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1016 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1018 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1021 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1023 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1026 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1028 void __iomem *ioaddr = tp->mmio_addr;
1030 r8168dp_2_mdio_start(ioaddr);
1032 r8169_mdio_write(tp, reg, value);
1034 r8168dp_2_mdio_stop(ioaddr);
1037 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1039 void __iomem *ioaddr = tp->mmio_addr;
1040 int value;
1042 r8168dp_2_mdio_start(ioaddr);
1044 value = r8169_mdio_read(tp, reg);
1046 r8168dp_2_mdio_stop(ioaddr);
1048 return value;
1051 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1053 tp->mdio_ops.write(tp, location, val);
1056 static int rtl_readphy(struct rtl8169_private *tp, int location)
1058 return tp->mdio_ops.read(tp, location);
1061 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1063 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1066 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1068 int val;
1070 val = rtl_readphy(tp, reg_addr);
1071 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1074 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1075 int val)
1077 struct rtl8169_private *tp = netdev_priv(dev);
1079 rtl_writephy(tp, location, val);
1082 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1084 struct rtl8169_private *tp = netdev_priv(dev);
1086 return rtl_readphy(tp, location);
1089 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1091 void __iomem *ioaddr = tp->mmio_addr;
1092 unsigned int i;
1094 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1095 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1097 for (i = 0; i < 100; i++) {
1098 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
1099 break;
1100 udelay(10);
1104 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1106 void __iomem *ioaddr = tp->mmio_addr;
1107 u16 value = 0xffff;
1108 unsigned int i;
1110 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1112 for (i = 0; i < 100; i++) {
1113 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
1114 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1115 break;
1117 udelay(10);
1120 return value;
1123 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1124 u32 val, int type)
1126 void __iomem *ioaddr = tp->mmio_addr;
1127 unsigned int i;
1129 BUG_ON((addr & 3) || (mask == 0));
1130 RTL_W32(ERIDR, val);
1131 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1133 for (i = 0; i < 100; i++) {
1134 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1135 break;
1136 udelay(100);
1140 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1142 void __iomem *ioaddr = tp->mmio_addr;
1143 u32 value = ~0x00;
1144 unsigned int i;
1146 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1148 for (i = 0; i < 100; i++) {
1149 if (RTL_R32(ERIAR) & ERIAR_FLAG) {
1150 value = RTL_R32(ERIDR);
1151 break;
1153 udelay(100);
1156 return value;
1159 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1160 u32 m, int type)
1162 u32 val;
1164 val = rtl_eri_read(tp, addr, type);
1165 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1168 struct exgmac_reg {
1169 u16 addr;
1170 u16 mask;
1171 u32 val;
1174 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1175 const struct exgmac_reg *r, int len)
1177 while (len-- > 0) {
1178 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1179 r++;
1183 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1185 void __iomem *ioaddr = tp->mmio_addr;
1186 u8 value = 0xff;
1187 unsigned int i;
1189 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1191 for (i = 0; i < 300; i++) {
1192 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
1193 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
1194 break;
1196 udelay(100);
1199 return value;
1202 static u16 rtl_get_events(struct rtl8169_private *tp)
1204 void __iomem *ioaddr = tp->mmio_addr;
1206 return RTL_R16(IntrStatus);
1209 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1211 void __iomem *ioaddr = tp->mmio_addr;
1213 RTL_W16(IntrStatus, bits);
1214 mmiowb();
1217 static void rtl_irq_disable(struct rtl8169_private *tp)
1219 void __iomem *ioaddr = tp->mmio_addr;
1221 RTL_W16(IntrMask, 0);
1222 mmiowb();
1225 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1227 void __iomem *ioaddr = tp->mmio_addr;
1229 RTL_W16(IntrMask, bits);
1232 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1233 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1234 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1236 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1238 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1241 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1243 void __iomem *ioaddr = tp->mmio_addr;
1245 rtl_irq_disable(tp);
1246 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1247 RTL_R8(ChipCmd);
1250 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1252 void __iomem *ioaddr = tp->mmio_addr;
1254 return RTL_R32(TBICSR) & TBIReset;
1257 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1259 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1262 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1264 return RTL_R32(TBICSR) & TBILinkOk;
1267 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1269 return RTL_R8(PHYstatus) & LinkStatus;
1272 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1274 void __iomem *ioaddr = tp->mmio_addr;
1276 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1279 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1281 unsigned int val;
1283 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1284 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1287 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1289 void __iomem *ioaddr = tp->mmio_addr;
1290 struct net_device *dev = tp->dev;
1292 if (!netif_running(dev))
1293 return;
1295 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1296 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1297 if (RTL_R8(PHYstatus) & _1000bpsF) {
1298 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1299 ERIAR_EXGMAC);
1300 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1301 ERIAR_EXGMAC);
1302 } else if (RTL_R8(PHYstatus) & _100bps) {
1303 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1304 ERIAR_EXGMAC);
1305 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1306 ERIAR_EXGMAC);
1307 } else {
1308 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1309 ERIAR_EXGMAC);
1310 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1311 ERIAR_EXGMAC);
1313 /* Reset packet filter */
1314 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1315 ERIAR_EXGMAC);
1316 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1317 ERIAR_EXGMAC);
1318 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1319 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1320 if (RTL_R8(PHYstatus) & _1000bpsF) {
1321 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1322 ERIAR_EXGMAC);
1323 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1324 ERIAR_EXGMAC);
1325 } else {
1326 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1327 ERIAR_EXGMAC);
1328 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1329 ERIAR_EXGMAC);
1331 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1332 if (RTL_R8(PHYstatus) & _10bps) {
1333 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1334 ERIAR_EXGMAC);
1335 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1336 ERIAR_EXGMAC);
1337 } else {
1338 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1339 ERIAR_EXGMAC);
1344 static void __rtl8169_check_link_status(struct net_device *dev,
1345 struct rtl8169_private *tp,
1346 void __iomem *ioaddr, bool pm)
1348 if (tp->link_ok(ioaddr)) {
1349 rtl_link_chg_patch(tp);
1350 /* This is to cancel a scheduled suspend if there's one. */
1351 if (pm)
1352 pm_request_resume(&tp->pci_dev->dev);
1353 netif_carrier_on(dev);
1354 if (net_ratelimit())
1355 netif_info(tp, ifup, dev, "link up\n");
1356 } else {
1357 netif_carrier_off(dev);
1358 netif_info(tp, ifdown, dev, "link down\n");
1359 if (pm)
1360 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1364 static void rtl8169_check_link_status(struct net_device *dev,
1365 struct rtl8169_private *tp,
1366 void __iomem *ioaddr)
1368 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1371 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1373 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1375 void __iomem *ioaddr = tp->mmio_addr;
1376 u8 options;
1377 u32 wolopts = 0;
1379 options = RTL_R8(Config1);
1380 if (!(options & PMEnable))
1381 return 0;
1383 options = RTL_R8(Config3);
1384 if (options & LinkUp)
1385 wolopts |= WAKE_PHY;
1386 if (options & MagicPacket)
1387 wolopts |= WAKE_MAGIC;
1389 options = RTL_R8(Config5);
1390 if (options & UWF)
1391 wolopts |= WAKE_UCAST;
1392 if (options & BWF)
1393 wolopts |= WAKE_BCAST;
1394 if (options & MWF)
1395 wolopts |= WAKE_MCAST;
1397 return wolopts;
1400 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1402 struct rtl8169_private *tp = netdev_priv(dev);
1404 rtl_lock_work(tp);
1406 wol->supported = WAKE_ANY;
1407 wol->wolopts = __rtl8169_get_wol(tp);
1409 rtl_unlock_work(tp);
1412 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1414 void __iomem *ioaddr = tp->mmio_addr;
1415 unsigned int i;
1416 static const struct {
1417 u32 opt;
1418 u16 reg;
1419 u8 mask;
1420 } cfg[] = {
1421 { WAKE_PHY, Config3, LinkUp },
1422 { WAKE_MAGIC, Config3, MagicPacket },
1423 { WAKE_UCAST, Config5, UWF },
1424 { WAKE_BCAST, Config5, BWF },
1425 { WAKE_MCAST, Config5, MWF },
1426 { WAKE_ANY, Config5, LanWake }
1428 u8 options;
1430 RTL_W8(Cfg9346, Cfg9346_Unlock);
1432 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1433 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1434 if (wolopts & cfg[i].opt)
1435 options |= cfg[i].mask;
1436 RTL_W8(cfg[i].reg, options);
1439 switch (tp->mac_version) {
1440 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1441 options = RTL_R8(Config1) & ~PMEnable;
1442 if (wolopts)
1443 options |= PMEnable;
1444 RTL_W8(Config1, options);
1445 break;
1446 default:
1447 options = RTL_R8(Config2) & ~PME_SIGNAL;
1448 if (wolopts)
1449 options |= PME_SIGNAL;
1450 RTL_W8(Config2, options);
1451 break;
1454 RTL_W8(Cfg9346, Cfg9346_Lock);
1457 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1459 struct rtl8169_private *tp = netdev_priv(dev);
1461 rtl_lock_work(tp);
1463 if (wol->wolopts)
1464 tp->features |= RTL_FEATURE_WOL;
1465 else
1466 tp->features &= ~RTL_FEATURE_WOL;
1467 __rtl8169_set_wol(tp, wol->wolopts);
1469 rtl_unlock_work(tp);
1471 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1473 return 0;
1476 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1478 return rtl_chip_infos[tp->mac_version].fw_name;
1481 static void rtl8169_get_drvinfo(struct net_device *dev,
1482 struct ethtool_drvinfo *info)
1484 struct rtl8169_private *tp = netdev_priv(dev);
1485 struct rtl_fw *rtl_fw = tp->rtl_fw;
1487 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1488 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1489 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1490 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1491 if (!IS_ERR_OR_NULL(rtl_fw))
1492 strlcpy(info->fw_version, rtl_fw->version,
1493 sizeof(info->fw_version));
1496 static int rtl8169_get_regs_len(struct net_device *dev)
1498 return R8169_REGS_SIZE;
1501 static int rtl8169_set_speed_tbi(struct net_device *dev,
1502 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1504 struct rtl8169_private *tp = netdev_priv(dev);
1505 void __iomem *ioaddr = tp->mmio_addr;
1506 int ret = 0;
1507 u32 reg;
1509 reg = RTL_R32(TBICSR);
1510 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1511 (duplex == DUPLEX_FULL)) {
1512 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1513 } else if (autoneg == AUTONEG_ENABLE)
1514 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1515 else {
1516 netif_warn(tp, link, dev,
1517 "incorrect speed setting refused in TBI mode\n");
1518 ret = -EOPNOTSUPP;
1521 return ret;
1524 static int rtl8169_set_speed_xmii(struct net_device *dev,
1525 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1527 struct rtl8169_private *tp = netdev_priv(dev);
1528 int giga_ctrl, bmcr;
1529 int rc = -EINVAL;
1531 rtl_writephy(tp, 0x1f, 0x0000);
1533 if (autoneg == AUTONEG_ENABLE) {
1534 int auto_nego;
1536 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1537 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1538 ADVERTISE_100HALF | ADVERTISE_100FULL);
1540 if (adv & ADVERTISED_10baseT_Half)
1541 auto_nego |= ADVERTISE_10HALF;
1542 if (adv & ADVERTISED_10baseT_Full)
1543 auto_nego |= ADVERTISE_10FULL;
1544 if (adv & ADVERTISED_100baseT_Half)
1545 auto_nego |= ADVERTISE_100HALF;
1546 if (adv & ADVERTISED_100baseT_Full)
1547 auto_nego |= ADVERTISE_100FULL;
1549 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1551 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1552 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1554 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1555 if (tp->mii.supports_gmii) {
1556 if (adv & ADVERTISED_1000baseT_Half)
1557 giga_ctrl |= ADVERTISE_1000HALF;
1558 if (adv & ADVERTISED_1000baseT_Full)
1559 giga_ctrl |= ADVERTISE_1000FULL;
1560 } else if (adv & (ADVERTISED_1000baseT_Half |
1561 ADVERTISED_1000baseT_Full)) {
1562 netif_info(tp, link, dev,
1563 "PHY does not support 1000Mbps\n");
1564 goto out;
1567 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1569 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1570 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1571 } else {
1572 giga_ctrl = 0;
1574 if (speed == SPEED_10)
1575 bmcr = 0;
1576 else if (speed == SPEED_100)
1577 bmcr = BMCR_SPEED100;
1578 else
1579 goto out;
1581 if (duplex == DUPLEX_FULL)
1582 bmcr |= BMCR_FULLDPLX;
1585 rtl_writephy(tp, MII_BMCR, bmcr);
1587 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1588 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1589 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1590 rtl_writephy(tp, 0x17, 0x2138);
1591 rtl_writephy(tp, 0x0e, 0x0260);
1592 } else {
1593 rtl_writephy(tp, 0x17, 0x2108);
1594 rtl_writephy(tp, 0x0e, 0x0000);
1598 rc = 0;
1599 out:
1600 return rc;
1603 static int rtl8169_set_speed(struct net_device *dev,
1604 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1606 struct rtl8169_private *tp = netdev_priv(dev);
1607 int ret;
1609 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1610 if (ret < 0)
1611 goto out;
1613 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1614 (advertising & ADVERTISED_1000baseT_Full)) {
1615 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1617 out:
1618 return ret;
1621 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1623 struct rtl8169_private *tp = netdev_priv(dev);
1624 int ret;
1626 del_timer_sync(&tp->timer);
1628 rtl_lock_work(tp);
1629 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1630 cmd->duplex, cmd->advertising);
1631 rtl_unlock_work(tp);
1633 return ret;
1636 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1637 netdev_features_t features)
1639 struct rtl8169_private *tp = netdev_priv(dev);
1641 if (dev->mtu > TD_MSS_MAX)
1642 features &= ~NETIF_F_ALL_TSO;
1644 if (dev->mtu > JUMBO_1K &&
1645 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1646 features &= ~NETIF_F_IP_CSUM;
1648 return features;
1651 static void __rtl8169_set_features(struct net_device *dev,
1652 netdev_features_t features)
1654 struct rtl8169_private *tp = netdev_priv(dev);
1655 netdev_features_t changed = features ^ dev->features;
1656 void __iomem *ioaddr = tp->mmio_addr;
1658 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1659 return;
1661 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1662 if (features & NETIF_F_RXCSUM)
1663 tp->cp_cmd |= RxChkSum;
1664 else
1665 tp->cp_cmd &= ~RxChkSum;
1667 if (dev->features & NETIF_F_HW_VLAN_RX)
1668 tp->cp_cmd |= RxVlan;
1669 else
1670 tp->cp_cmd &= ~RxVlan;
1672 RTL_W16(CPlusCmd, tp->cp_cmd);
1673 RTL_R16(CPlusCmd);
1675 if (changed & NETIF_F_RXALL) {
1676 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1677 if (features & NETIF_F_RXALL)
1678 tmp |= (AcceptErr | AcceptRunt);
1679 RTL_W32(RxConfig, tmp);
1683 static int rtl8169_set_features(struct net_device *dev,
1684 netdev_features_t features)
1686 struct rtl8169_private *tp = netdev_priv(dev);
1688 rtl_lock_work(tp);
1689 __rtl8169_set_features(dev, features);
1690 rtl_unlock_work(tp);
1692 return 0;
1696 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1697 struct sk_buff *skb)
1699 return (vlan_tx_tag_present(skb)) ?
1700 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1703 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1705 u32 opts2 = le32_to_cpu(desc->opts2);
1707 if (opts2 & RxVlanTag)
1708 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1710 desc->opts2 = 0;
1713 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1715 struct rtl8169_private *tp = netdev_priv(dev);
1716 void __iomem *ioaddr = tp->mmio_addr;
1717 u32 status;
1719 cmd->supported =
1720 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1721 cmd->port = PORT_FIBRE;
1722 cmd->transceiver = XCVR_INTERNAL;
1724 status = RTL_R32(TBICSR);
1725 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1726 cmd->autoneg = !!(status & TBINwEnable);
1728 ethtool_cmd_speed_set(cmd, SPEED_1000);
1729 cmd->duplex = DUPLEX_FULL; /* Always set */
1731 return 0;
1734 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1736 struct rtl8169_private *tp = netdev_priv(dev);
1738 return mii_ethtool_gset(&tp->mii, cmd);
1741 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1743 struct rtl8169_private *tp = netdev_priv(dev);
1744 int rc;
1746 rtl_lock_work(tp);
1747 rc = tp->get_settings(dev, cmd);
1748 rtl_unlock_work(tp);
1750 return rc;
1753 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1754 void *p)
1756 struct rtl8169_private *tp = netdev_priv(dev);
1758 if (regs->len > R8169_REGS_SIZE)
1759 regs->len = R8169_REGS_SIZE;
1761 rtl_lock_work(tp);
1762 memcpy_fromio(p, tp->mmio_addr, regs->len);
1763 rtl_unlock_work(tp);
1766 static u32 rtl8169_get_msglevel(struct net_device *dev)
1768 struct rtl8169_private *tp = netdev_priv(dev);
1770 return tp->msg_enable;
1773 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1775 struct rtl8169_private *tp = netdev_priv(dev);
1777 tp->msg_enable = value;
1780 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1781 "tx_packets",
1782 "rx_packets",
1783 "tx_errors",
1784 "rx_errors",
1785 "rx_missed",
1786 "align_errors",
1787 "tx_single_collisions",
1788 "tx_multi_collisions",
1789 "unicast",
1790 "broadcast",
1791 "multicast",
1792 "tx_aborted",
1793 "tx_underrun",
1796 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1798 switch (sset) {
1799 case ETH_SS_STATS:
1800 return ARRAY_SIZE(rtl8169_gstrings);
1801 default:
1802 return -EOPNOTSUPP;
1806 static void rtl8169_update_counters(struct net_device *dev)
1808 struct rtl8169_private *tp = netdev_priv(dev);
1809 void __iomem *ioaddr = tp->mmio_addr;
1810 struct device *d = &tp->pci_dev->dev;
1811 struct rtl8169_counters *counters;
1812 dma_addr_t paddr;
1813 u32 cmd;
1814 int wait = 1000;
1817 * Some chips are unable to dump tally counters when the receiver
1818 * is disabled.
1820 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1821 return;
1823 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1824 if (!counters)
1825 return;
1827 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1828 cmd = (u64)paddr & DMA_BIT_MASK(32);
1829 RTL_W32(CounterAddrLow, cmd);
1830 RTL_W32(CounterAddrLow, cmd | CounterDump);
1832 while (wait--) {
1833 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1834 memcpy(&tp->counters, counters, sizeof(*counters));
1835 break;
1837 udelay(10);
1840 RTL_W32(CounterAddrLow, 0);
1841 RTL_W32(CounterAddrHigh, 0);
1843 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1846 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1847 struct ethtool_stats *stats, u64 *data)
1849 struct rtl8169_private *tp = netdev_priv(dev);
1851 ASSERT_RTNL();
1853 rtl8169_update_counters(dev);
1855 data[0] = le64_to_cpu(tp->counters.tx_packets);
1856 data[1] = le64_to_cpu(tp->counters.rx_packets);
1857 data[2] = le64_to_cpu(tp->counters.tx_errors);
1858 data[3] = le32_to_cpu(tp->counters.rx_errors);
1859 data[4] = le16_to_cpu(tp->counters.rx_missed);
1860 data[5] = le16_to_cpu(tp->counters.align_errors);
1861 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1862 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1863 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1864 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1865 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1866 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1867 data[12] = le16_to_cpu(tp->counters.tx_underun);
1870 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1872 switch(stringset) {
1873 case ETH_SS_STATS:
1874 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1875 break;
1879 static const struct ethtool_ops rtl8169_ethtool_ops = {
1880 .get_drvinfo = rtl8169_get_drvinfo,
1881 .get_regs_len = rtl8169_get_regs_len,
1882 .get_link = ethtool_op_get_link,
1883 .get_settings = rtl8169_get_settings,
1884 .set_settings = rtl8169_set_settings,
1885 .get_msglevel = rtl8169_get_msglevel,
1886 .set_msglevel = rtl8169_set_msglevel,
1887 .get_regs = rtl8169_get_regs,
1888 .get_wol = rtl8169_get_wol,
1889 .set_wol = rtl8169_set_wol,
1890 .get_strings = rtl8169_get_strings,
1891 .get_sset_count = rtl8169_get_sset_count,
1892 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1893 .get_ts_info = ethtool_op_get_ts_info,
1896 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1897 struct net_device *dev, u8 default_version)
1899 void __iomem *ioaddr = tp->mmio_addr;
1901 * The driver currently handles the 8168Bf and the 8168Be identically
1902 * but they can be identified more specifically through the test below
1903 * if needed:
1905 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1907 * Same thing for the 8101Eb and the 8101Ec:
1909 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1911 static const struct rtl_mac_info {
1912 u32 mask;
1913 u32 val;
1914 int mac_version;
1915 } mac_info[] = {
1916 /* 8168F family. */
1917 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
1918 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1919 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1921 /* 8168E family. */
1922 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
1923 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1924 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1925 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1927 /* 8168D family. */
1928 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1929 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1930 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1932 /* 8168DP family. */
1933 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1934 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1935 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
1937 /* 8168C family. */
1938 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
1939 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
1940 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
1941 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
1942 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1943 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
1944 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
1945 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
1946 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
1948 /* 8168B family. */
1949 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1950 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1951 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1952 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1954 /* 8101 family. */
1955 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
1956 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
1957 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
1958 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1959 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1960 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1961 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1962 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1963 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1964 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1965 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1966 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1967 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
1968 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1969 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
1970 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1971 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1972 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
1973 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1974 /* FIXME: where did these entries come from ? -- FR */
1975 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1976 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1978 /* 8110 family. */
1979 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1980 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1981 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1982 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1983 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1984 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1986 /* Catch-all */
1987 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1989 const struct rtl_mac_info *p = mac_info;
1990 u32 reg;
1992 reg = RTL_R32(TxConfig);
1993 while ((reg & p->mask) != p->val)
1994 p++;
1995 tp->mac_version = p->mac_version;
1997 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1998 netif_notice(tp, probe, dev,
1999 "unknown MAC, using family default\n");
2000 tp->mac_version = default_version;
2004 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2006 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2009 struct phy_reg {
2010 u16 reg;
2011 u16 val;
2014 static void rtl_writephy_batch(struct rtl8169_private *tp,
2015 const struct phy_reg *regs, int len)
2017 while (len-- > 0) {
2018 rtl_writephy(tp, regs->reg, regs->val);
2019 regs++;
2023 #define PHY_READ 0x00000000
2024 #define PHY_DATA_OR 0x10000000
2025 #define PHY_DATA_AND 0x20000000
2026 #define PHY_BJMPN 0x30000000
2027 #define PHY_READ_EFUSE 0x40000000
2028 #define PHY_READ_MAC_BYTE 0x50000000
2029 #define PHY_WRITE_MAC_BYTE 0x60000000
2030 #define PHY_CLEAR_READCOUNT 0x70000000
2031 #define PHY_WRITE 0x80000000
2032 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2033 #define PHY_COMP_EQ_SKIPN 0xa0000000
2034 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2035 #define PHY_WRITE_PREVIOUS 0xc0000000
2036 #define PHY_SKIPN 0xd0000000
2037 #define PHY_DELAY_MS 0xe0000000
2038 #define PHY_WRITE_ERI_WORD 0xf0000000
2040 struct fw_info {
2041 u32 magic;
2042 char version[RTL_VER_SIZE];
2043 __le32 fw_start;
2044 __le32 fw_len;
2045 u8 chksum;
2046 } __packed;
2048 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2050 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2052 const struct firmware *fw = rtl_fw->fw;
2053 struct fw_info *fw_info = (struct fw_info *)fw->data;
2054 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2055 char *version = rtl_fw->version;
2056 bool rc = false;
2058 if (fw->size < FW_OPCODE_SIZE)
2059 goto out;
2061 if (!fw_info->magic) {
2062 size_t i, size, start;
2063 u8 checksum = 0;
2065 if (fw->size < sizeof(*fw_info))
2066 goto out;
2068 for (i = 0; i < fw->size; i++)
2069 checksum += fw->data[i];
2070 if (checksum != 0)
2071 goto out;
2073 start = le32_to_cpu(fw_info->fw_start);
2074 if (start > fw->size)
2075 goto out;
2077 size = le32_to_cpu(fw_info->fw_len);
2078 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2079 goto out;
2081 memcpy(version, fw_info->version, RTL_VER_SIZE);
2083 pa->code = (__le32 *)(fw->data + start);
2084 pa->size = size;
2085 } else {
2086 if (fw->size % FW_OPCODE_SIZE)
2087 goto out;
2089 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2091 pa->code = (__le32 *)fw->data;
2092 pa->size = fw->size / FW_OPCODE_SIZE;
2094 version[RTL_VER_SIZE - 1] = 0;
2096 rc = true;
2097 out:
2098 return rc;
2101 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2102 struct rtl_fw_phy_action *pa)
2104 bool rc = false;
2105 size_t index;
2107 for (index = 0; index < pa->size; index++) {
2108 u32 action = le32_to_cpu(pa->code[index]);
2109 u32 regno = (action & 0x0fff0000) >> 16;
2111 switch(action & 0xf0000000) {
2112 case PHY_READ:
2113 case PHY_DATA_OR:
2114 case PHY_DATA_AND:
2115 case PHY_READ_EFUSE:
2116 case PHY_CLEAR_READCOUNT:
2117 case PHY_WRITE:
2118 case PHY_WRITE_PREVIOUS:
2119 case PHY_DELAY_MS:
2120 break;
2122 case PHY_BJMPN:
2123 if (regno > index) {
2124 netif_err(tp, ifup, tp->dev,
2125 "Out of range of firmware\n");
2126 goto out;
2128 break;
2129 case PHY_READCOUNT_EQ_SKIP:
2130 if (index + 2 >= pa->size) {
2131 netif_err(tp, ifup, tp->dev,
2132 "Out of range of firmware\n");
2133 goto out;
2135 break;
2136 case PHY_COMP_EQ_SKIPN:
2137 case PHY_COMP_NEQ_SKIPN:
2138 case PHY_SKIPN:
2139 if (index + 1 + regno >= pa->size) {
2140 netif_err(tp, ifup, tp->dev,
2141 "Out of range of firmware\n");
2142 goto out;
2144 break;
2146 case PHY_READ_MAC_BYTE:
2147 case PHY_WRITE_MAC_BYTE:
2148 case PHY_WRITE_ERI_WORD:
2149 default:
2150 netif_err(tp, ifup, tp->dev,
2151 "Invalid action 0x%08x\n", action);
2152 goto out;
2155 rc = true;
2156 out:
2157 return rc;
2160 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2162 struct net_device *dev = tp->dev;
2163 int rc = -EINVAL;
2165 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2166 netif_err(tp, ifup, dev, "invalid firwmare\n");
2167 goto out;
2170 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2171 rc = 0;
2172 out:
2173 return rc;
2176 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2178 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2179 u32 predata, count;
2180 size_t index;
2182 predata = count = 0;
2184 for (index = 0; index < pa->size; ) {
2185 u32 action = le32_to_cpu(pa->code[index]);
2186 u32 data = action & 0x0000ffff;
2187 u32 regno = (action & 0x0fff0000) >> 16;
2189 if (!action)
2190 break;
2192 switch(action & 0xf0000000) {
2193 case PHY_READ:
2194 predata = rtl_readphy(tp, regno);
2195 count++;
2196 index++;
2197 break;
2198 case PHY_DATA_OR:
2199 predata |= data;
2200 index++;
2201 break;
2202 case PHY_DATA_AND:
2203 predata &= data;
2204 index++;
2205 break;
2206 case PHY_BJMPN:
2207 index -= regno;
2208 break;
2209 case PHY_READ_EFUSE:
2210 predata = rtl8168d_efuse_read(tp, regno);
2211 index++;
2212 break;
2213 case PHY_CLEAR_READCOUNT:
2214 count = 0;
2215 index++;
2216 break;
2217 case PHY_WRITE:
2218 rtl_writephy(tp, regno, data);
2219 index++;
2220 break;
2221 case PHY_READCOUNT_EQ_SKIP:
2222 index += (count == data) ? 2 : 1;
2223 break;
2224 case PHY_COMP_EQ_SKIPN:
2225 if (predata == data)
2226 index += regno;
2227 index++;
2228 break;
2229 case PHY_COMP_NEQ_SKIPN:
2230 if (predata != data)
2231 index += regno;
2232 index++;
2233 break;
2234 case PHY_WRITE_PREVIOUS:
2235 rtl_writephy(tp, regno, predata);
2236 index++;
2237 break;
2238 case PHY_SKIPN:
2239 index += regno + 1;
2240 break;
2241 case PHY_DELAY_MS:
2242 mdelay(data);
2243 index++;
2244 break;
2246 case PHY_READ_MAC_BYTE:
2247 case PHY_WRITE_MAC_BYTE:
2248 case PHY_WRITE_ERI_WORD:
2249 default:
2250 BUG();
2255 static void rtl_release_firmware(struct rtl8169_private *tp)
2257 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2258 release_firmware(tp->rtl_fw->fw);
2259 kfree(tp->rtl_fw);
2261 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2264 static void rtl_apply_firmware(struct rtl8169_private *tp)
2266 struct rtl_fw *rtl_fw = tp->rtl_fw;
2268 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2269 if (!IS_ERR_OR_NULL(rtl_fw))
2270 rtl_phy_write_fw(tp, rtl_fw);
2273 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2275 if (rtl_readphy(tp, reg) != val)
2276 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2277 else
2278 rtl_apply_firmware(tp);
2281 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2283 static const struct phy_reg phy_reg_init[] = {
2284 { 0x1f, 0x0001 },
2285 { 0x06, 0x006e },
2286 { 0x08, 0x0708 },
2287 { 0x15, 0x4000 },
2288 { 0x18, 0x65c7 },
2290 { 0x1f, 0x0001 },
2291 { 0x03, 0x00a1 },
2292 { 0x02, 0x0008 },
2293 { 0x01, 0x0120 },
2294 { 0x00, 0x1000 },
2295 { 0x04, 0x0800 },
2296 { 0x04, 0x0000 },
2298 { 0x03, 0xff41 },
2299 { 0x02, 0xdf60 },
2300 { 0x01, 0x0140 },
2301 { 0x00, 0x0077 },
2302 { 0x04, 0x7800 },
2303 { 0x04, 0x7000 },
2305 { 0x03, 0x802f },
2306 { 0x02, 0x4f02 },
2307 { 0x01, 0x0409 },
2308 { 0x00, 0xf0f9 },
2309 { 0x04, 0x9800 },
2310 { 0x04, 0x9000 },
2312 { 0x03, 0xdf01 },
2313 { 0x02, 0xdf20 },
2314 { 0x01, 0xff95 },
2315 { 0x00, 0xba00 },
2316 { 0x04, 0xa800 },
2317 { 0x04, 0xa000 },
2319 { 0x03, 0xff41 },
2320 { 0x02, 0xdf20 },
2321 { 0x01, 0x0140 },
2322 { 0x00, 0x00bb },
2323 { 0x04, 0xb800 },
2324 { 0x04, 0xb000 },
2326 { 0x03, 0xdf41 },
2327 { 0x02, 0xdc60 },
2328 { 0x01, 0x6340 },
2329 { 0x00, 0x007d },
2330 { 0x04, 0xd800 },
2331 { 0x04, 0xd000 },
2333 { 0x03, 0xdf01 },
2334 { 0x02, 0xdf20 },
2335 { 0x01, 0x100a },
2336 { 0x00, 0xa0ff },
2337 { 0x04, 0xf800 },
2338 { 0x04, 0xf000 },
2340 { 0x1f, 0x0000 },
2341 { 0x0b, 0x0000 },
2342 { 0x00, 0x9200 }
2345 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2348 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2350 static const struct phy_reg phy_reg_init[] = {
2351 { 0x1f, 0x0002 },
2352 { 0x01, 0x90d0 },
2353 { 0x1f, 0x0000 }
2356 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2359 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2361 struct pci_dev *pdev = tp->pci_dev;
2363 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2364 (pdev->subsystem_device != 0xe000))
2365 return;
2367 rtl_writephy(tp, 0x1f, 0x0001);
2368 rtl_writephy(tp, 0x10, 0xf01b);
2369 rtl_writephy(tp, 0x1f, 0x0000);
2372 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2374 static const struct phy_reg phy_reg_init[] = {
2375 { 0x1f, 0x0001 },
2376 { 0x04, 0x0000 },
2377 { 0x03, 0x00a1 },
2378 { 0x02, 0x0008 },
2379 { 0x01, 0x0120 },
2380 { 0x00, 0x1000 },
2381 { 0x04, 0x0800 },
2382 { 0x04, 0x9000 },
2383 { 0x03, 0x802f },
2384 { 0x02, 0x4f02 },
2385 { 0x01, 0x0409 },
2386 { 0x00, 0xf099 },
2387 { 0x04, 0x9800 },
2388 { 0x04, 0xa000 },
2389 { 0x03, 0xdf01 },
2390 { 0x02, 0xdf20 },
2391 { 0x01, 0xff95 },
2392 { 0x00, 0xba00 },
2393 { 0x04, 0xa800 },
2394 { 0x04, 0xf000 },
2395 { 0x03, 0xdf01 },
2396 { 0x02, 0xdf20 },
2397 { 0x01, 0x101a },
2398 { 0x00, 0xa0ff },
2399 { 0x04, 0xf800 },
2400 { 0x04, 0x0000 },
2401 { 0x1f, 0x0000 },
2403 { 0x1f, 0x0001 },
2404 { 0x10, 0xf41b },
2405 { 0x14, 0xfb54 },
2406 { 0x18, 0xf5c7 },
2407 { 0x1f, 0x0000 },
2409 { 0x1f, 0x0001 },
2410 { 0x17, 0x0cc0 },
2411 { 0x1f, 0x0000 }
2414 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2416 rtl8169scd_hw_phy_config_quirk(tp);
2419 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2421 static const struct phy_reg phy_reg_init[] = {
2422 { 0x1f, 0x0001 },
2423 { 0x04, 0x0000 },
2424 { 0x03, 0x00a1 },
2425 { 0x02, 0x0008 },
2426 { 0x01, 0x0120 },
2427 { 0x00, 0x1000 },
2428 { 0x04, 0x0800 },
2429 { 0x04, 0x9000 },
2430 { 0x03, 0x802f },
2431 { 0x02, 0x4f02 },
2432 { 0x01, 0x0409 },
2433 { 0x00, 0xf099 },
2434 { 0x04, 0x9800 },
2435 { 0x04, 0xa000 },
2436 { 0x03, 0xdf01 },
2437 { 0x02, 0xdf20 },
2438 { 0x01, 0xff95 },
2439 { 0x00, 0xba00 },
2440 { 0x04, 0xa800 },
2441 { 0x04, 0xf000 },
2442 { 0x03, 0xdf01 },
2443 { 0x02, 0xdf20 },
2444 { 0x01, 0x101a },
2445 { 0x00, 0xa0ff },
2446 { 0x04, 0xf800 },
2447 { 0x04, 0x0000 },
2448 { 0x1f, 0x0000 },
2450 { 0x1f, 0x0001 },
2451 { 0x0b, 0x8480 },
2452 { 0x1f, 0x0000 },
2454 { 0x1f, 0x0001 },
2455 { 0x18, 0x67c7 },
2456 { 0x04, 0x2000 },
2457 { 0x03, 0x002f },
2458 { 0x02, 0x4360 },
2459 { 0x01, 0x0109 },
2460 { 0x00, 0x3022 },
2461 { 0x04, 0x2800 },
2462 { 0x1f, 0x0000 },
2464 { 0x1f, 0x0001 },
2465 { 0x17, 0x0cc0 },
2466 { 0x1f, 0x0000 }
2469 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2472 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2474 static const struct phy_reg phy_reg_init[] = {
2475 { 0x10, 0xf41b },
2476 { 0x1f, 0x0000 }
2479 rtl_writephy(tp, 0x1f, 0x0001);
2480 rtl_patchphy(tp, 0x16, 1 << 0);
2482 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2485 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2487 static const struct phy_reg phy_reg_init[] = {
2488 { 0x1f, 0x0001 },
2489 { 0x10, 0xf41b },
2490 { 0x1f, 0x0000 }
2493 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2496 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2498 static const struct phy_reg phy_reg_init[] = {
2499 { 0x1f, 0x0000 },
2500 { 0x1d, 0x0f00 },
2501 { 0x1f, 0x0002 },
2502 { 0x0c, 0x1ec8 },
2503 { 0x1f, 0x0000 }
2506 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2509 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2511 static const struct phy_reg phy_reg_init[] = {
2512 { 0x1f, 0x0001 },
2513 { 0x1d, 0x3d98 },
2514 { 0x1f, 0x0000 }
2517 rtl_writephy(tp, 0x1f, 0x0000);
2518 rtl_patchphy(tp, 0x14, 1 << 5);
2519 rtl_patchphy(tp, 0x0d, 1 << 5);
2521 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2524 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2526 static const struct phy_reg phy_reg_init[] = {
2527 { 0x1f, 0x0001 },
2528 { 0x12, 0x2300 },
2529 { 0x1f, 0x0002 },
2530 { 0x00, 0x88d4 },
2531 { 0x01, 0x82b1 },
2532 { 0x03, 0x7002 },
2533 { 0x08, 0x9e30 },
2534 { 0x09, 0x01f0 },
2535 { 0x0a, 0x5500 },
2536 { 0x0c, 0x00c8 },
2537 { 0x1f, 0x0003 },
2538 { 0x12, 0xc096 },
2539 { 0x16, 0x000a },
2540 { 0x1f, 0x0000 },
2541 { 0x1f, 0x0000 },
2542 { 0x09, 0x2000 },
2543 { 0x09, 0x0000 }
2546 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2548 rtl_patchphy(tp, 0x14, 1 << 5);
2549 rtl_patchphy(tp, 0x0d, 1 << 5);
2550 rtl_writephy(tp, 0x1f, 0x0000);
2553 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2555 static const struct phy_reg phy_reg_init[] = {
2556 { 0x1f, 0x0001 },
2557 { 0x12, 0x2300 },
2558 { 0x03, 0x802f },
2559 { 0x02, 0x4f02 },
2560 { 0x01, 0x0409 },
2561 { 0x00, 0xf099 },
2562 { 0x04, 0x9800 },
2563 { 0x04, 0x9000 },
2564 { 0x1d, 0x3d98 },
2565 { 0x1f, 0x0002 },
2566 { 0x0c, 0x7eb8 },
2567 { 0x06, 0x0761 },
2568 { 0x1f, 0x0003 },
2569 { 0x16, 0x0f0a },
2570 { 0x1f, 0x0000 }
2573 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2575 rtl_patchphy(tp, 0x16, 1 << 0);
2576 rtl_patchphy(tp, 0x14, 1 << 5);
2577 rtl_patchphy(tp, 0x0d, 1 << 5);
2578 rtl_writephy(tp, 0x1f, 0x0000);
2581 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2583 static const struct phy_reg phy_reg_init[] = {
2584 { 0x1f, 0x0001 },
2585 { 0x12, 0x2300 },
2586 { 0x1d, 0x3d98 },
2587 { 0x1f, 0x0002 },
2588 { 0x0c, 0x7eb8 },
2589 { 0x06, 0x5461 },
2590 { 0x1f, 0x0003 },
2591 { 0x16, 0x0f0a },
2592 { 0x1f, 0x0000 }
2595 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2597 rtl_patchphy(tp, 0x16, 1 << 0);
2598 rtl_patchphy(tp, 0x14, 1 << 5);
2599 rtl_patchphy(tp, 0x0d, 1 << 5);
2600 rtl_writephy(tp, 0x1f, 0x0000);
2603 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2605 rtl8168c_3_hw_phy_config(tp);
2608 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2610 static const struct phy_reg phy_reg_init_0[] = {
2611 /* Channel Estimation */
2612 { 0x1f, 0x0001 },
2613 { 0x06, 0x4064 },
2614 { 0x07, 0x2863 },
2615 { 0x08, 0x059c },
2616 { 0x09, 0x26b4 },
2617 { 0x0a, 0x6a19 },
2618 { 0x0b, 0xdcc8 },
2619 { 0x10, 0xf06d },
2620 { 0x14, 0x7f68 },
2621 { 0x18, 0x7fd9 },
2622 { 0x1c, 0xf0ff },
2623 { 0x1d, 0x3d9c },
2624 { 0x1f, 0x0003 },
2625 { 0x12, 0xf49f },
2626 { 0x13, 0x070b },
2627 { 0x1a, 0x05ad },
2628 { 0x14, 0x94c0 },
2631 * Tx Error Issue
2632 * Enhance line driver power
2634 { 0x1f, 0x0002 },
2635 { 0x06, 0x5561 },
2636 { 0x1f, 0x0005 },
2637 { 0x05, 0x8332 },
2638 { 0x06, 0x5561 },
2641 * Can not link to 1Gbps with bad cable
2642 * Decrease SNR threshold form 21.07dB to 19.04dB
2644 { 0x1f, 0x0001 },
2645 { 0x17, 0x0cc0 },
2647 { 0x1f, 0x0000 },
2648 { 0x0d, 0xf880 }
2651 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2654 * Rx Error Issue
2655 * Fine Tune Switching regulator parameter
2657 rtl_writephy(tp, 0x1f, 0x0002);
2658 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2659 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2661 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2662 static const struct phy_reg phy_reg_init[] = {
2663 { 0x1f, 0x0002 },
2664 { 0x05, 0x669a },
2665 { 0x1f, 0x0005 },
2666 { 0x05, 0x8330 },
2667 { 0x06, 0x669a },
2668 { 0x1f, 0x0002 }
2670 int val;
2672 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2674 val = rtl_readphy(tp, 0x0d);
2676 if ((val & 0x00ff) != 0x006c) {
2677 static const u32 set[] = {
2678 0x0065, 0x0066, 0x0067, 0x0068,
2679 0x0069, 0x006a, 0x006b, 0x006c
2681 int i;
2683 rtl_writephy(tp, 0x1f, 0x0002);
2685 val &= 0xff00;
2686 for (i = 0; i < ARRAY_SIZE(set); i++)
2687 rtl_writephy(tp, 0x0d, val | set[i]);
2689 } else {
2690 static const struct phy_reg phy_reg_init[] = {
2691 { 0x1f, 0x0002 },
2692 { 0x05, 0x6662 },
2693 { 0x1f, 0x0005 },
2694 { 0x05, 0x8330 },
2695 { 0x06, 0x6662 }
2698 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2701 /* RSET couple improve */
2702 rtl_writephy(tp, 0x1f, 0x0002);
2703 rtl_patchphy(tp, 0x0d, 0x0300);
2704 rtl_patchphy(tp, 0x0f, 0x0010);
2706 /* Fine tune PLL performance */
2707 rtl_writephy(tp, 0x1f, 0x0002);
2708 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2709 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2711 rtl_writephy(tp, 0x1f, 0x0005);
2712 rtl_writephy(tp, 0x05, 0x001b);
2714 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2716 rtl_writephy(tp, 0x1f, 0x0000);
2719 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2721 static const struct phy_reg phy_reg_init_0[] = {
2722 /* Channel Estimation */
2723 { 0x1f, 0x0001 },
2724 { 0x06, 0x4064 },
2725 { 0x07, 0x2863 },
2726 { 0x08, 0x059c },
2727 { 0x09, 0x26b4 },
2728 { 0x0a, 0x6a19 },
2729 { 0x0b, 0xdcc8 },
2730 { 0x10, 0xf06d },
2731 { 0x14, 0x7f68 },
2732 { 0x18, 0x7fd9 },
2733 { 0x1c, 0xf0ff },
2734 { 0x1d, 0x3d9c },
2735 { 0x1f, 0x0003 },
2736 { 0x12, 0xf49f },
2737 { 0x13, 0x070b },
2738 { 0x1a, 0x05ad },
2739 { 0x14, 0x94c0 },
2742 * Tx Error Issue
2743 * Enhance line driver power
2745 { 0x1f, 0x0002 },
2746 { 0x06, 0x5561 },
2747 { 0x1f, 0x0005 },
2748 { 0x05, 0x8332 },
2749 { 0x06, 0x5561 },
2752 * Can not link to 1Gbps with bad cable
2753 * Decrease SNR threshold form 21.07dB to 19.04dB
2755 { 0x1f, 0x0001 },
2756 { 0x17, 0x0cc0 },
2758 { 0x1f, 0x0000 },
2759 { 0x0d, 0xf880 }
2762 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2764 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2765 static const struct phy_reg phy_reg_init[] = {
2766 { 0x1f, 0x0002 },
2767 { 0x05, 0x669a },
2768 { 0x1f, 0x0005 },
2769 { 0x05, 0x8330 },
2770 { 0x06, 0x669a },
2772 { 0x1f, 0x0002 }
2774 int val;
2776 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2778 val = rtl_readphy(tp, 0x0d);
2779 if ((val & 0x00ff) != 0x006c) {
2780 static const u32 set[] = {
2781 0x0065, 0x0066, 0x0067, 0x0068,
2782 0x0069, 0x006a, 0x006b, 0x006c
2784 int i;
2786 rtl_writephy(tp, 0x1f, 0x0002);
2788 val &= 0xff00;
2789 for (i = 0; i < ARRAY_SIZE(set); i++)
2790 rtl_writephy(tp, 0x0d, val | set[i]);
2792 } else {
2793 static const struct phy_reg phy_reg_init[] = {
2794 { 0x1f, 0x0002 },
2795 { 0x05, 0x2642 },
2796 { 0x1f, 0x0005 },
2797 { 0x05, 0x8330 },
2798 { 0x06, 0x2642 }
2801 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2804 /* Fine tune PLL performance */
2805 rtl_writephy(tp, 0x1f, 0x0002);
2806 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2807 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2809 /* Switching regulator Slew rate */
2810 rtl_writephy(tp, 0x1f, 0x0002);
2811 rtl_patchphy(tp, 0x0f, 0x0017);
2813 rtl_writephy(tp, 0x1f, 0x0005);
2814 rtl_writephy(tp, 0x05, 0x001b);
2816 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2818 rtl_writephy(tp, 0x1f, 0x0000);
2821 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2823 static const struct phy_reg phy_reg_init[] = {
2824 { 0x1f, 0x0002 },
2825 { 0x10, 0x0008 },
2826 { 0x0d, 0x006c },
2828 { 0x1f, 0x0000 },
2829 { 0x0d, 0xf880 },
2831 { 0x1f, 0x0001 },
2832 { 0x17, 0x0cc0 },
2834 { 0x1f, 0x0001 },
2835 { 0x0b, 0xa4d8 },
2836 { 0x09, 0x281c },
2837 { 0x07, 0x2883 },
2838 { 0x0a, 0x6b35 },
2839 { 0x1d, 0x3da4 },
2840 { 0x1c, 0xeffd },
2841 { 0x14, 0x7f52 },
2842 { 0x18, 0x7fc6 },
2843 { 0x08, 0x0601 },
2844 { 0x06, 0x4063 },
2845 { 0x10, 0xf074 },
2846 { 0x1f, 0x0003 },
2847 { 0x13, 0x0789 },
2848 { 0x12, 0xf4bd },
2849 { 0x1a, 0x04fd },
2850 { 0x14, 0x84b0 },
2851 { 0x1f, 0x0000 },
2852 { 0x00, 0x9200 },
2854 { 0x1f, 0x0005 },
2855 { 0x01, 0x0340 },
2856 { 0x1f, 0x0001 },
2857 { 0x04, 0x4000 },
2858 { 0x03, 0x1d21 },
2859 { 0x02, 0x0c32 },
2860 { 0x01, 0x0200 },
2861 { 0x00, 0x5554 },
2862 { 0x04, 0x4800 },
2863 { 0x04, 0x4000 },
2864 { 0x04, 0xf000 },
2865 { 0x03, 0xdf01 },
2866 { 0x02, 0xdf20 },
2867 { 0x01, 0x101a },
2868 { 0x00, 0xa0ff },
2869 { 0x04, 0xf800 },
2870 { 0x04, 0xf000 },
2871 { 0x1f, 0x0000 },
2873 { 0x1f, 0x0007 },
2874 { 0x1e, 0x0023 },
2875 { 0x16, 0x0000 },
2876 { 0x1f, 0x0000 }
2879 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2882 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2884 static const struct phy_reg phy_reg_init[] = {
2885 { 0x1f, 0x0001 },
2886 { 0x17, 0x0cc0 },
2888 { 0x1f, 0x0007 },
2889 { 0x1e, 0x002d },
2890 { 0x18, 0x0040 },
2891 { 0x1f, 0x0000 }
2894 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2895 rtl_patchphy(tp, 0x0d, 1 << 5);
2898 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2900 static const struct phy_reg phy_reg_init[] = {
2901 /* Enable Delay cap */
2902 { 0x1f, 0x0005 },
2903 { 0x05, 0x8b80 },
2904 { 0x06, 0xc896 },
2905 { 0x1f, 0x0000 },
2907 /* Channel estimation fine tune */
2908 { 0x1f, 0x0001 },
2909 { 0x0b, 0x6c20 },
2910 { 0x07, 0x2872 },
2911 { 0x1c, 0xefff },
2912 { 0x1f, 0x0003 },
2913 { 0x14, 0x6420 },
2914 { 0x1f, 0x0000 },
2916 /* Update PFM & 10M TX idle timer */
2917 { 0x1f, 0x0007 },
2918 { 0x1e, 0x002f },
2919 { 0x15, 0x1919 },
2920 { 0x1f, 0x0000 },
2922 { 0x1f, 0x0007 },
2923 { 0x1e, 0x00ac },
2924 { 0x18, 0x0006 },
2925 { 0x1f, 0x0000 }
2928 rtl_apply_firmware(tp);
2930 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2932 /* DCO enable for 10M IDLE Power */
2933 rtl_writephy(tp, 0x1f, 0x0007);
2934 rtl_writephy(tp, 0x1e, 0x0023);
2935 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2936 rtl_writephy(tp, 0x1f, 0x0000);
2938 /* For impedance matching */
2939 rtl_writephy(tp, 0x1f, 0x0002);
2940 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
2941 rtl_writephy(tp, 0x1f, 0x0000);
2943 /* PHY auto speed down */
2944 rtl_writephy(tp, 0x1f, 0x0007);
2945 rtl_writephy(tp, 0x1e, 0x002d);
2946 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2947 rtl_writephy(tp, 0x1f, 0x0000);
2948 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2950 rtl_writephy(tp, 0x1f, 0x0005);
2951 rtl_writephy(tp, 0x05, 0x8b86);
2952 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2953 rtl_writephy(tp, 0x1f, 0x0000);
2955 rtl_writephy(tp, 0x1f, 0x0005);
2956 rtl_writephy(tp, 0x05, 0x8b85);
2957 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2958 rtl_writephy(tp, 0x1f, 0x0007);
2959 rtl_writephy(tp, 0x1e, 0x0020);
2960 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2961 rtl_writephy(tp, 0x1f, 0x0006);
2962 rtl_writephy(tp, 0x00, 0x5a00);
2963 rtl_writephy(tp, 0x1f, 0x0000);
2964 rtl_writephy(tp, 0x0d, 0x0007);
2965 rtl_writephy(tp, 0x0e, 0x003c);
2966 rtl_writephy(tp, 0x0d, 0x4007);
2967 rtl_writephy(tp, 0x0e, 0x0000);
2968 rtl_writephy(tp, 0x0d, 0x0000);
2971 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2973 static const struct phy_reg phy_reg_init[] = {
2974 /* Enable Delay cap */
2975 { 0x1f, 0x0004 },
2976 { 0x1f, 0x0007 },
2977 { 0x1e, 0x00ac },
2978 { 0x18, 0x0006 },
2979 { 0x1f, 0x0002 },
2980 { 0x1f, 0x0000 },
2981 { 0x1f, 0x0000 },
2983 /* Channel estimation fine tune */
2984 { 0x1f, 0x0003 },
2985 { 0x09, 0xa20f },
2986 { 0x1f, 0x0000 },
2987 { 0x1f, 0x0000 },
2989 /* Green Setting */
2990 { 0x1f, 0x0005 },
2991 { 0x05, 0x8b5b },
2992 { 0x06, 0x9222 },
2993 { 0x05, 0x8b6d },
2994 { 0x06, 0x8000 },
2995 { 0x05, 0x8b76 },
2996 { 0x06, 0x8000 },
2997 { 0x1f, 0x0000 }
3000 rtl_apply_firmware(tp);
3002 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3004 /* For 4-corner performance improve */
3005 rtl_writephy(tp, 0x1f, 0x0005);
3006 rtl_writephy(tp, 0x05, 0x8b80);
3007 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3008 rtl_writephy(tp, 0x1f, 0x0000);
3010 /* PHY auto speed down */
3011 rtl_writephy(tp, 0x1f, 0x0004);
3012 rtl_writephy(tp, 0x1f, 0x0007);
3013 rtl_writephy(tp, 0x1e, 0x002d);
3014 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3015 rtl_writephy(tp, 0x1f, 0x0002);
3016 rtl_writephy(tp, 0x1f, 0x0000);
3017 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3019 /* improve 10M EEE waveform */
3020 rtl_writephy(tp, 0x1f, 0x0005);
3021 rtl_writephy(tp, 0x05, 0x8b86);
3022 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3023 rtl_writephy(tp, 0x1f, 0x0000);
3025 /* Improve 2-pair detection performance */
3026 rtl_writephy(tp, 0x1f, 0x0005);
3027 rtl_writephy(tp, 0x05, 0x8b85);
3028 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3029 rtl_writephy(tp, 0x1f, 0x0000);
3031 /* EEE setting */
3032 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3033 rtl_writephy(tp, 0x1f, 0x0005);
3034 rtl_writephy(tp, 0x05, 0x8b85);
3035 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3036 rtl_writephy(tp, 0x1f, 0x0004);
3037 rtl_writephy(tp, 0x1f, 0x0007);
3038 rtl_writephy(tp, 0x1e, 0x0020);
3039 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3040 rtl_writephy(tp, 0x1f, 0x0002);
3041 rtl_writephy(tp, 0x1f, 0x0000);
3042 rtl_writephy(tp, 0x0d, 0x0007);
3043 rtl_writephy(tp, 0x0e, 0x003c);
3044 rtl_writephy(tp, 0x0d, 0x4007);
3045 rtl_writephy(tp, 0x0e, 0x0000);
3046 rtl_writephy(tp, 0x0d, 0x0000);
3048 /* Green feature */
3049 rtl_writephy(tp, 0x1f, 0x0003);
3050 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3051 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3052 rtl_writephy(tp, 0x1f, 0x0000);
3055 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3057 /* For 4-corner performance improve */
3058 rtl_writephy(tp, 0x1f, 0x0005);
3059 rtl_writephy(tp, 0x05, 0x8b80);
3060 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3061 rtl_writephy(tp, 0x1f, 0x0000);
3063 /* PHY auto speed down */
3064 rtl_writephy(tp, 0x1f, 0x0007);
3065 rtl_writephy(tp, 0x1e, 0x002d);
3066 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3067 rtl_writephy(tp, 0x1f, 0x0000);
3068 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3070 /* Improve 10M EEE waveform */
3071 rtl_writephy(tp, 0x1f, 0x0005);
3072 rtl_writephy(tp, 0x05, 0x8b86);
3073 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3074 rtl_writephy(tp, 0x1f, 0x0000);
3077 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3079 static const struct phy_reg phy_reg_init[] = {
3080 /* Channel estimation fine tune */
3081 { 0x1f, 0x0003 },
3082 { 0x09, 0xa20f },
3083 { 0x1f, 0x0000 },
3085 /* Modify green table for giga & fnet */
3086 { 0x1f, 0x0005 },
3087 { 0x05, 0x8b55 },
3088 { 0x06, 0x0000 },
3089 { 0x05, 0x8b5e },
3090 { 0x06, 0x0000 },
3091 { 0x05, 0x8b67 },
3092 { 0x06, 0x0000 },
3093 { 0x05, 0x8b70 },
3094 { 0x06, 0x0000 },
3095 { 0x1f, 0x0000 },
3096 { 0x1f, 0x0007 },
3097 { 0x1e, 0x0078 },
3098 { 0x17, 0x0000 },
3099 { 0x19, 0x00fb },
3100 { 0x1f, 0x0000 },
3102 /* Modify green table for 10M */
3103 { 0x1f, 0x0005 },
3104 { 0x05, 0x8b79 },
3105 { 0x06, 0xaa00 },
3106 { 0x1f, 0x0000 },
3108 /* Disable hiimpedance detection (RTCT) */
3109 { 0x1f, 0x0003 },
3110 { 0x01, 0x328a },
3111 { 0x1f, 0x0000 }
3114 rtl_apply_firmware(tp);
3116 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3118 rtl8168f_hw_phy_config(tp);
3120 /* Improve 2-pair detection performance */
3121 rtl_writephy(tp, 0x1f, 0x0005);
3122 rtl_writephy(tp, 0x05, 0x8b85);
3123 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3124 rtl_writephy(tp, 0x1f, 0x0000);
3127 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3129 rtl_apply_firmware(tp);
3131 rtl8168f_hw_phy_config(tp);
3134 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3136 static const struct phy_reg phy_reg_init[] = {
3137 /* Channel estimation fine tune */
3138 { 0x1f, 0x0003 },
3139 { 0x09, 0xa20f },
3140 { 0x1f, 0x0000 },
3142 /* Modify green table for giga & fnet */
3143 { 0x1f, 0x0005 },
3144 { 0x05, 0x8b55 },
3145 { 0x06, 0x0000 },
3146 { 0x05, 0x8b5e },
3147 { 0x06, 0x0000 },
3148 { 0x05, 0x8b67 },
3149 { 0x06, 0x0000 },
3150 { 0x05, 0x8b70 },
3151 { 0x06, 0x0000 },
3152 { 0x1f, 0x0000 },
3153 { 0x1f, 0x0007 },
3154 { 0x1e, 0x0078 },
3155 { 0x17, 0x0000 },
3156 { 0x19, 0x00aa },
3157 { 0x1f, 0x0000 },
3159 /* Modify green table for 10M */
3160 { 0x1f, 0x0005 },
3161 { 0x05, 0x8b79 },
3162 { 0x06, 0xaa00 },
3163 { 0x1f, 0x0000 },
3165 /* Disable hiimpedance detection (RTCT) */
3166 { 0x1f, 0x0003 },
3167 { 0x01, 0x328a },
3168 { 0x1f, 0x0000 }
3172 rtl_apply_firmware(tp);
3174 rtl8168f_hw_phy_config(tp);
3176 /* Improve 2-pair detection performance */
3177 rtl_writephy(tp, 0x1f, 0x0005);
3178 rtl_writephy(tp, 0x05, 0x8b85);
3179 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3180 rtl_writephy(tp, 0x1f, 0x0000);
3182 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3184 /* Modify green table for giga */
3185 rtl_writephy(tp, 0x1f, 0x0005);
3186 rtl_writephy(tp, 0x05, 0x8b54);
3187 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3188 rtl_writephy(tp, 0x05, 0x8b5d);
3189 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3190 rtl_writephy(tp, 0x05, 0x8a7c);
3191 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3192 rtl_writephy(tp, 0x05, 0x8a7f);
3193 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3194 rtl_writephy(tp, 0x05, 0x8a82);
3195 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3196 rtl_writephy(tp, 0x05, 0x8a85);
3197 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3198 rtl_writephy(tp, 0x05, 0x8a88);
3199 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3200 rtl_writephy(tp, 0x1f, 0x0000);
3202 /* uc same-seed solution */
3203 rtl_writephy(tp, 0x1f, 0x0005);
3204 rtl_writephy(tp, 0x05, 0x8b85);
3205 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3206 rtl_writephy(tp, 0x1f, 0x0000);
3208 /* eee setting */
3209 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3210 rtl_writephy(tp, 0x1f, 0x0005);
3211 rtl_writephy(tp, 0x05, 0x8b85);
3212 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3213 rtl_writephy(tp, 0x1f, 0x0004);
3214 rtl_writephy(tp, 0x1f, 0x0007);
3215 rtl_writephy(tp, 0x1e, 0x0020);
3216 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3217 rtl_writephy(tp, 0x1f, 0x0000);
3218 rtl_writephy(tp, 0x0d, 0x0007);
3219 rtl_writephy(tp, 0x0e, 0x003c);
3220 rtl_writephy(tp, 0x0d, 0x4007);
3221 rtl_writephy(tp, 0x0e, 0x0000);
3222 rtl_writephy(tp, 0x0d, 0x0000);
3224 /* Green feature */
3225 rtl_writephy(tp, 0x1f, 0x0003);
3226 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3227 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3228 rtl_writephy(tp, 0x1f, 0x0000);
3231 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3233 static const struct phy_reg phy_reg_init[] = {
3234 { 0x1f, 0x0003 },
3235 { 0x08, 0x441d },
3236 { 0x01, 0x9100 },
3237 { 0x1f, 0x0000 }
3240 rtl_writephy(tp, 0x1f, 0x0000);
3241 rtl_patchphy(tp, 0x11, 1 << 12);
3242 rtl_patchphy(tp, 0x19, 1 << 13);
3243 rtl_patchphy(tp, 0x10, 1 << 15);
3245 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3248 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3250 static const struct phy_reg phy_reg_init[] = {
3251 { 0x1f, 0x0005 },
3252 { 0x1a, 0x0000 },
3253 { 0x1f, 0x0000 },
3255 { 0x1f, 0x0004 },
3256 { 0x1c, 0x0000 },
3257 { 0x1f, 0x0000 },
3259 { 0x1f, 0x0001 },
3260 { 0x15, 0x7701 },
3261 { 0x1f, 0x0000 }
3264 /* Disable ALDPS before ram code */
3265 rtl_writephy(tp, 0x1f, 0x0000);
3266 rtl_writephy(tp, 0x18, 0x0310);
3267 msleep(100);
3269 rtl_apply_firmware(tp);
3271 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3274 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3276 /* Disable ALDPS before setting firmware */
3277 rtl_writephy(tp, 0x1f, 0x0000);
3278 rtl_writephy(tp, 0x18, 0x0310);
3279 msleep(20);
3281 rtl_apply_firmware(tp);
3283 /* EEE setting */
3284 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3285 rtl_writephy(tp, 0x1f, 0x0004);
3286 rtl_writephy(tp, 0x10, 0x401f);
3287 rtl_writephy(tp, 0x19, 0x7030);
3288 rtl_writephy(tp, 0x1f, 0x0000);
3291 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3293 static const struct phy_reg phy_reg_init[] = {
3294 { 0x1f, 0x0004 },
3295 { 0x10, 0xc07f },
3296 { 0x19, 0x7030 },
3297 { 0x1f, 0x0000 }
3300 /* Disable ALDPS before ram code */
3301 rtl_writephy(tp, 0x1f, 0x0000);
3302 rtl_writephy(tp, 0x18, 0x0310);
3303 msleep(100);
3305 rtl_apply_firmware(tp);
3307 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3308 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3310 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3313 static void rtl_hw_phy_config(struct net_device *dev)
3315 struct rtl8169_private *tp = netdev_priv(dev);
3317 rtl8169_print_mac_version(tp);
3319 switch (tp->mac_version) {
3320 case RTL_GIGA_MAC_VER_01:
3321 break;
3322 case RTL_GIGA_MAC_VER_02:
3323 case RTL_GIGA_MAC_VER_03:
3324 rtl8169s_hw_phy_config(tp);
3325 break;
3326 case RTL_GIGA_MAC_VER_04:
3327 rtl8169sb_hw_phy_config(tp);
3328 break;
3329 case RTL_GIGA_MAC_VER_05:
3330 rtl8169scd_hw_phy_config(tp);
3331 break;
3332 case RTL_GIGA_MAC_VER_06:
3333 rtl8169sce_hw_phy_config(tp);
3334 break;
3335 case RTL_GIGA_MAC_VER_07:
3336 case RTL_GIGA_MAC_VER_08:
3337 case RTL_GIGA_MAC_VER_09:
3338 rtl8102e_hw_phy_config(tp);
3339 break;
3340 case RTL_GIGA_MAC_VER_11:
3341 rtl8168bb_hw_phy_config(tp);
3342 break;
3343 case RTL_GIGA_MAC_VER_12:
3344 rtl8168bef_hw_phy_config(tp);
3345 break;
3346 case RTL_GIGA_MAC_VER_17:
3347 rtl8168bef_hw_phy_config(tp);
3348 break;
3349 case RTL_GIGA_MAC_VER_18:
3350 rtl8168cp_1_hw_phy_config(tp);
3351 break;
3352 case RTL_GIGA_MAC_VER_19:
3353 rtl8168c_1_hw_phy_config(tp);
3354 break;
3355 case RTL_GIGA_MAC_VER_20:
3356 rtl8168c_2_hw_phy_config(tp);
3357 break;
3358 case RTL_GIGA_MAC_VER_21:
3359 rtl8168c_3_hw_phy_config(tp);
3360 break;
3361 case RTL_GIGA_MAC_VER_22:
3362 rtl8168c_4_hw_phy_config(tp);
3363 break;
3364 case RTL_GIGA_MAC_VER_23:
3365 case RTL_GIGA_MAC_VER_24:
3366 rtl8168cp_2_hw_phy_config(tp);
3367 break;
3368 case RTL_GIGA_MAC_VER_25:
3369 rtl8168d_1_hw_phy_config(tp);
3370 break;
3371 case RTL_GIGA_MAC_VER_26:
3372 rtl8168d_2_hw_phy_config(tp);
3373 break;
3374 case RTL_GIGA_MAC_VER_27:
3375 rtl8168d_3_hw_phy_config(tp);
3376 break;
3377 case RTL_GIGA_MAC_VER_28:
3378 rtl8168d_4_hw_phy_config(tp);
3379 break;
3380 case RTL_GIGA_MAC_VER_29:
3381 case RTL_GIGA_MAC_VER_30:
3382 rtl8105e_hw_phy_config(tp);
3383 break;
3384 case RTL_GIGA_MAC_VER_31:
3385 /* None. */
3386 break;
3387 case RTL_GIGA_MAC_VER_32:
3388 case RTL_GIGA_MAC_VER_33:
3389 rtl8168e_1_hw_phy_config(tp);
3390 break;
3391 case RTL_GIGA_MAC_VER_34:
3392 rtl8168e_2_hw_phy_config(tp);
3393 break;
3394 case RTL_GIGA_MAC_VER_35:
3395 rtl8168f_1_hw_phy_config(tp);
3396 break;
3397 case RTL_GIGA_MAC_VER_36:
3398 rtl8168f_2_hw_phy_config(tp);
3399 break;
3401 case RTL_GIGA_MAC_VER_37:
3402 rtl8402_hw_phy_config(tp);
3403 break;
3405 case RTL_GIGA_MAC_VER_38:
3406 rtl8411_hw_phy_config(tp);
3407 break;
3409 case RTL_GIGA_MAC_VER_39:
3410 rtl8106e_hw_phy_config(tp);
3411 break;
3413 default:
3414 break;
3418 static void rtl_phy_work(struct rtl8169_private *tp)
3420 struct timer_list *timer = &tp->timer;
3421 void __iomem *ioaddr = tp->mmio_addr;
3422 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3424 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3426 if (tp->phy_reset_pending(tp)) {
3428 * A busy loop could burn quite a few cycles on nowadays CPU.
3429 * Let's delay the execution of the timer for a few ticks.
3431 timeout = HZ/10;
3432 goto out_mod_timer;
3435 if (tp->link_ok(ioaddr))
3436 return;
3438 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3440 tp->phy_reset_enable(tp);
3442 out_mod_timer:
3443 mod_timer(timer, jiffies + timeout);
3446 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3448 if (!test_and_set_bit(flag, tp->wk.flags))
3449 schedule_work(&tp->wk.work);
3452 static void rtl8169_phy_timer(unsigned long __opaque)
3454 struct net_device *dev = (struct net_device *)__opaque;
3455 struct rtl8169_private *tp = netdev_priv(dev);
3457 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3460 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3461 void __iomem *ioaddr)
3463 iounmap(ioaddr);
3464 pci_release_regions(pdev);
3465 pci_clear_mwi(pdev);
3466 pci_disable_device(pdev);
3467 free_netdev(dev);
3470 static void rtl8169_phy_reset(struct net_device *dev,
3471 struct rtl8169_private *tp)
3473 unsigned int i;
3475 tp->phy_reset_enable(tp);
3476 for (i = 0; i < 100; i++) {
3477 if (!tp->phy_reset_pending(tp))
3478 return;
3479 msleep(1);
3481 netif_err(tp, link, dev, "PHY reset failed\n");
3484 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3486 void __iomem *ioaddr = tp->mmio_addr;
3488 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3489 (RTL_R8(PHYstatus) & TBI_Enable);
3492 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3494 void __iomem *ioaddr = tp->mmio_addr;
3496 rtl_hw_phy_config(dev);
3498 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3499 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3500 RTL_W8(0x82, 0x01);
3503 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3505 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3506 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3508 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3509 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3510 RTL_W8(0x82, 0x01);
3511 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3512 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3515 rtl8169_phy_reset(dev, tp);
3517 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3518 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3519 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3520 (tp->mii.supports_gmii ?
3521 ADVERTISED_1000baseT_Half |
3522 ADVERTISED_1000baseT_Full : 0));
3524 if (rtl_tbi_enabled(tp))
3525 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3528 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3530 void __iomem *ioaddr = tp->mmio_addr;
3531 u32 high;
3532 u32 low;
3534 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3535 high = addr[4] | (addr[5] << 8);
3537 rtl_lock_work(tp);
3539 RTL_W8(Cfg9346, Cfg9346_Unlock);
3541 RTL_W32(MAC4, high);
3542 RTL_R32(MAC4);
3544 RTL_W32(MAC0, low);
3545 RTL_R32(MAC0);
3547 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3548 const struct exgmac_reg e[] = {
3549 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3550 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3551 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3552 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3553 low >> 16 },
3556 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3559 RTL_W8(Cfg9346, Cfg9346_Lock);
3561 rtl_unlock_work(tp);
3564 static int rtl_set_mac_address(struct net_device *dev, void *p)
3566 struct rtl8169_private *tp = netdev_priv(dev);
3567 struct sockaddr *addr = p;
3569 if (!is_valid_ether_addr(addr->sa_data))
3570 return -EADDRNOTAVAIL;
3572 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3574 rtl_rar_set(tp, dev->dev_addr);
3576 return 0;
3579 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3581 struct rtl8169_private *tp = netdev_priv(dev);
3582 struct mii_ioctl_data *data = if_mii(ifr);
3584 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3587 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3588 struct mii_ioctl_data *data, int cmd)
3590 switch (cmd) {
3591 case SIOCGMIIPHY:
3592 data->phy_id = 32; /* Internal PHY */
3593 return 0;
3595 case SIOCGMIIREG:
3596 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3597 return 0;
3599 case SIOCSMIIREG:
3600 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3601 return 0;
3603 return -EOPNOTSUPP;
3606 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3608 return -EOPNOTSUPP;
3611 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3613 if (tp->features & RTL_FEATURE_MSI) {
3614 pci_disable_msi(pdev);
3615 tp->features &= ~RTL_FEATURE_MSI;
3619 static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3621 struct mdio_ops *ops = &tp->mdio_ops;
3623 switch (tp->mac_version) {
3624 case RTL_GIGA_MAC_VER_27:
3625 ops->write = r8168dp_1_mdio_write;
3626 ops->read = r8168dp_1_mdio_read;
3627 break;
3628 case RTL_GIGA_MAC_VER_28:
3629 case RTL_GIGA_MAC_VER_31:
3630 ops->write = r8168dp_2_mdio_write;
3631 ops->read = r8168dp_2_mdio_read;
3632 break;
3633 default:
3634 ops->write = r8169_mdio_write;
3635 ops->read = r8169_mdio_read;
3636 break;
3640 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3642 void __iomem *ioaddr = tp->mmio_addr;
3644 switch (tp->mac_version) {
3645 case RTL_GIGA_MAC_VER_29:
3646 case RTL_GIGA_MAC_VER_30:
3647 case RTL_GIGA_MAC_VER_32:
3648 case RTL_GIGA_MAC_VER_33:
3649 case RTL_GIGA_MAC_VER_34:
3650 case RTL_GIGA_MAC_VER_37:
3651 case RTL_GIGA_MAC_VER_38:
3652 case RTL_GIGA_MAC_VER_39:
3653 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3654 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3655 break;
3656 default:
3657 break;
3661 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3663 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3664 return false;
3666 rtl_writephy(tp, 0x1f, 0x0000);
3667 rtl_writephy(tp, MII_BMCR, 0x0000);
3669 rtl_wol_suspend_quirk(tp);
3671 return true;
3674 static void r810x_phy_power_down(struct rtl8169_private *tp)
3676 rtl_writephy(tp, 0x1f, 0x0000);
3677 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3680 static void r810x_phy_power_up(struct rtl8169_private *tp)
3682 rtl_writephy(tp, 0x1f, 0x0000);
3683 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3686 static void r810x_pll_power_down(struct rtl8169_private *tp)
3688 void __iomem *ioaddr = tp->mmio_addr;
3690 if (rtl_wol_pll_power_down(tp))
3691 return;
3693 r810x_phy_power_down(tp);
3695 switch (tp->mac_version) {
3696 case RTL_GIGA_MAC_VER_07:
3697 case RTL_GIGA_MAC_VER_08:
3698 case RTL_GIGA_MAC_VER_09:
3699 case RTL_GIGA_MAC_VER_10:
3700 case RTL_GIGA_MAC_VER_13:
3701 case RTL_GIGA_MAC_VER_16:
3702 break;
3703 default:
3704 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3705 break;
3709 static void r810x_pll_power_up(struct rtl8169_private *tp)
3711 void __iomem *ioaddr = tp->mmio_addr;
3713 r810x_phy_power_up(tp);
3715 switch (tp->mac_version) {
3716 case RTL_GIGA_MAC_VER_07:
3717 case RTL_GIGA_MAC_VER_08:
3718 case RTL_GIGA_MAC_VER_09:
3719 case RTL_GIGA_MAC_VER_10:
3720 case RTL_GIGA_MAC_VER_13:
3721 case RTL_GIGA_MAC_VER_16:
3722 break;
3723 default:
3724 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3725 break;
3729 static void r8168_phy_power_up(struct rtl8169_private *tp)
3731 rtl_writephy(tp, 0x1f, 0x0000);
3732 switch (tp->mac_version) {
3733 case RTL_GIGA_MAC_VER_11:
3734 case RTL_GIGA_MAC_VER_12:
3735 case RTL_GIGA_MAC_VER_17:
3736 case RTL_GIGA_MAC_VER_18:
3737 case RTL_GIGA_MAC_VER_19:
3738 case RTL_GIGA_MAC_VER_20:
3739 case RTL_GIGA_MAC_VER_21:
3740 case RTL_GIGA_MAC_VER_22:
3741 case RTL_GIGA_MAC_VER_23:
3742 case RTL_GIGA_MAC_VER_24:
3743 case RTL_GIGA_MAC_VER_25:
3744 case RTL_GIGA_MAC_VER_26:
3745 case RTL_GIGA_MAC_VER_27:
3746 case RTL_GIGA_MAC_VER_28:
3747 case RTL_GIGA_MAC_VER_31:
3748 rtl_writephy(tp, 0x0e, 0x0000);
3749 break;
3750 default:
3751 break;
3753 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3756 static void r8168_phy_power_down(struct rtl8169_private *tp)
3758 rtl_writephy(tp, 0x1f, 0x0000);
3759 switch (tp->mac_version) {
3760 case RTL_GIGA_MAC_VER_32:
3761 case RTL_GIGA_MAC_VER_33:
3762 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3763 break;
3765 case RTL_GIGA_MAC_VER_11:
3766 case RTL_GIGA_MAC_VER_12:
3767 case RTL_GIGA_MAC_VER_17:
3768 case RTL_GIGA_MAC_VER_18:
3769 case RTL_GIGA_MAC_VER_19:
3770 case RTL_GIGA_MAC_VER_20:
3771 case RTL_GIGA_MAC_VER_21:
3772 case RTL_GIGA_MAC_VER_22:
3773 case RTL_GIGA_MAC_VER_23:
3774 case RTL_GIGA_MAC_VER_24:
3775 case RTL_GIGA_MAC_VER_25:
3776 case RTL_GIGA_MAC_VER_26:
3777 case RTL_GIGA_MAC_VER_27:
3778 case RTL_GIGA_MAC_VER_28:
3779 case RTL_GIGA_MAC_VER_31:
3780 rtl_writephy(tp, 0x0e, 0x0200);
3781 default:
3782 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3783 break;
3787 static void r8168_pll_power_down(struct rtl8169_private *tp)
3789 void __iomem *ioaddr = tp->mmio_addr;
3791 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3792 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3793 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3794 r8168dp_check_dash(tp)) {
3795 return;
3798 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3799 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3800 (RTL_R16(CPlusCmd) & ASF)) {
3801 return;
3804 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3805 tp->mac_version == RTL_GIGA_MAC_VER_33)
3806 rtl_ephy_write(tp, 0x19, 0xff64);
3808 if (rtl_wol_pll_power_down(tp))
3809 return;
3811 r8168_phy_power_down(tp);
3813 switch (tp->mac_version) {
3814 case RTL_GIGA_MAC_VER_25:
3815 case RTL_GIGA_MAC_VER_26:
3816 case RTL_GIGA_MAC_VER_27:
3817 case RTL_GIGA_MAC_VER_28:
3818 case RTL_GIGA_MAC_VER_31:
3819 case RTL_GIGA_MAC_VER_32:
3820 case RTL_GIGA_MAC_VER_33:
3821 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3822 break;
3826 static void r8168_pll_power_up(struct rtl8169_private *tp)
3828 void __iomem *ioaddr = tp->mmio_addr;
3830 switch (tp->mac_version) {
3831 case RTL_GIGA_MAC_VER_25:
3832 case RTL_GIGA_MAC_VER_26:
3833 case RTL_GIGA_MAC_VER_27:
3834 case RTL_GIGA_MAC_VER_28:
3835 case RTL_GIGA_MAC_VER_31:
3836 case RTL_GIGA_MAC_VER_32:
3837 case RTL_GIGA_MAC_VER_33:
3838 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3839 break;
3842 r8168_phy_power_up(tp);
3845 static void rtl_generic_op(struct rtl8169_private *tp,
3846 void (*op)(struct rtl8169_private *))
3848 if (op)
3849 op(tp);
3852 static void rtl_pll_power_down(struct rtl8169_private *tp)
3854 rtl_generic_op(tp, tp->pll_power_ops.down);
3857 static void rtl_pll_power_up(struct rtl8169_private *tp)
3859 rtl_generic_op(tp, tp->pll_power_ops.up);
3862 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3864 struct pll_power_ops *ops = &tp->pll_power_ops;
3866 switch (tp->mac_version) {
3867 case RTL_GIGA_MAC_VER_07:
3868 case RTL_GIGA_MAC_VER_08:
3869 case RTL_GIGA_MAC_VER_09:
3870 case RTL_GIGA_MAC_VER_10:
3871 case RTL_GIGA_MAC_VER_16:
3872 case RTL_GIGA_MAC_VER_29:
3873 case RTL_GIGA_MAC_VER_30:
3874 case RTL_GIGA_MAC_VER_37:
3875 case RTL_GIGA_MAC_VER_39:
3876 ops->down = r810x_pll_power_down;
3877 ops->up = r810x_pll_power_up;
3878 break;
3880 case RTL_GIGA_MAC_VER_11:
3881 case RTL_GIGA_MAC_VER_12:
3882 case RTL_GIGA_MAC_VER_17:
3883 case RTL_GIGA_MAC_VER_18:
3884 case RTL_GIGA_MAC_VER_19:
3885 case RTL_GIGA_MAC_VER_20:
3886 case RTL_GIGA_MAC_VER_21:
3887 case RTL_GIGA_MAC_VER_22:
3888 case RTL_GIGA_MAC_VER_23:
3889 case RTL_GIGA_MAC_VER_24:
3890 case RTL_GIGA_MAC_VER_25:
3891 case RTL_GIGA_MAC_VER_26:
3892 case RTL_GIGA_MAC_VER_27:
3893 case RTL_GIGA_MAC_VER_28:
3894 case RTL_GIGA_MAC_VER_31:
3895 case RTL_GIGA_MAC_VER_32:
3896 case RTL_GIGA_MAC_VER_33:
3897 case RTL_GIGA_MAC_VER_34:
3898 case RTL_GIGA_MAC_VER_35:
3899 case RTL_GIGA_MAC_VER_36:
3900 case RTL_GIGA_MAC_VER_38:
3901 ops->down = r8168_pll_power_down;
3902 ops->up = r8168_pll_power_up;
3903 break;
3905 default:
3906 ops->down = NULL;
3907 ops->up = NULL;
3908 break;
3912 static void rtl_init_rxcfg(struct rtl8169_private *tp)
3914 void __iomem *ioaddr = tp->mmio_addr;
3916 switch (tp->mac_version) {
3917 case RTL_GIGA_MAC_VER_01:
3918 case RTL_GIGA_MAC_VER_02:
3919 case RTL_GIGA_MAC_VER_03:
3920 case RTL_GIGA_MAC_VER_04:
3921 case RTL_GIGA_MAC_VER_05:
3922 case RTL_GIGA_MAC_VER_06:
3923 case RTL_GIGA_MAC_VER_10:
3924 case RTL_GIGA_MAC_VER_11:
3925 case RTL_GIGA_MAC_VER_12:
3926 case RTL_GIGA_MAC_VER_13:
3927 case RTL_GIGA_MAC_VER_14:
3928 case RTL_GIGA_MAC_VER_15:
3929 case RTL_GIGA_MAC_VER_16:
3930 case RTL_GIGA_MAC_VER_17:
3931 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3932 break;
3933 case RTL_GIGA_MAC_VER_18:
3934 case RTL_GIGA_MAC_VER_19:
3935 case RTL_GIGA_MAC_VER_20:
3936 case RTL_GIGA_MAC_VER_21:
3937 case RTL_GIGA_MAC_VER_22:
3938 case RTL_GIGA_MAC_VER_23:
3939 case RTL_GIGA_MAC_VER_24:
3940 case RTL_GIGA_MAC_VER_34:
3941 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3942 break;
3943 default:
3944 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
3945 break;
3949 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3951 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3954 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3956 void __iomem *ioaddr = tp->mmio_addr;
3958 RTL_W8(Cfg9346, Cfg9346_Unlock);
3959 rtl_generic_op(tp, tp->jumbo_ops.enable);
3960 RTL_W8(Cfg9346, Cfg9346_Lock);
3963 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
3965 void __iomem *ioaddr = tp->mmio_addr;
3967 RTL_W8(Cfg9346, Cfg9346_Unlock);
3968 rtl_generic_op(tp, tp->jumbo_ops.disable);
3969 RTL_W8(Cfg9346, Cfg9346_Lock);
3972 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3974 void __iomem *ioaddr = tp->mmio_addr;
3976 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3977 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
3978 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3981 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3983 void __iomem *ioaddr = tp->mmio_addr;
3985 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3986 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
3987 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3990 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3992 void __iomem *ioaddr = tp->mmio_addr;
3994 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3997 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3999 void __iomem *ioaddr = tp->mmio_addr;
4001 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4004 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4006 void __iomem *ioaddr = tp->mmio_addr;
4008 RTL_W8(MaxTxPacketSize, 0x3f);
4009 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4010 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4011 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4014 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4016 void __iomem *ioaddr = tp->mmio_addr;
4018 RTL_W8(MaxTxPacketSize, 0x0c);
4019 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4020 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4021 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4024 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4026 rtl_tx_performance_tweak(tp->pci_dev,
4027 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4030 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4032 rtl_tx_performance_tweak(tp->pci_dev,
4033 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4036 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4038 void __iomem *ioaddr = tp->mmio_addr;
4040 r8168b_0_hw_jumbo_enable(tp);
4042 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4045 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4047 void __iomem *ioaddr = tp->mmio_addr;
4049 r8168b_0_hw_jumbo_disable(tp);
4051 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4054 static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4056 struct jumbo_ops *ops = &tp->jumbo_ops;
4058 switch (tp->mac_version) {
4059 case RTL_GIGA_MAC_VER_11:
4060 ops->disable = r8168b_0_hw_jumbo_disable;
4061 ops->enable = r8168b_0_hw_jumbo_enable;
4062 break;
4063 case RTL_GIGA_MAC_VER_12:
4064 case RTL_GIGA_MAC_VER_17:
4065 ops->disable = r8168b_1_hw_jumbo_disable;
4066 ops->enable = r8168b_1_hw_jumbo_enable;
4067 break;
4068 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4069 case RTL_GIGA_MAC_VER_19:
4070 case RTL_GIGA_MAC_VER_20:
4071 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4072 case RTL_GIGA_MAC_VER_22:
4073 case RTL_GIGA_MAC_VER_23:
4074 case RTL_GIGA_MAC_VER_24:
4075 case RTL_GIGA_MAC_VER_25:
4076 case RTL_GIGA_MAC_VER_26:
4077 ops->disable = r8168c_hw_jumbo_disable;
4078 ops->enable = r8168c_hw_jumbo_enable;
4079 break;
4080 case RTL_GIGA_MAC_VER_27:
4081 case RTL_GIGA_MAC_VER_28:
4082 ops->disable = r8168dp_hw_jumbo_disable;
4083 ops->enable = r8168dp_hw_jumbo_enable;
4084 break;
4085 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4086 case RTL_GIGA_MAC_VER_32:
4087 case RTL_GIGA_MAC_VER_33:
4088 case RTL_GIGA_MAC_VER_34:
4089 ops->disable = r8168e_hw_jumbo_disable;
4090 ops->enable = r8168e_hw_jumbo_enable;
4091 break;
4094 * No action needed for jumbo frames with 8169.
4095 * No jumbo for 810x at all.
4097 default:
4098 ops->disable = NULL;
4099 ops->enable = NULL;
4100 break;
4104 static void rtl_hw_reset(struct rtl8169_private *tp)
4106 void __iomem *ioaddr = tp->mmio_addr;
4107 int i;
4109 /* Soft reset the chip. */
4110 RTL_W8(ChipCmd, CmdReset);
4112 /* Check that the chip has finished the reset. */
4113 for (i = 0; i < 100; i++) {
4114 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
4115 break;
4116 udelay(100);
4120 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4122 struct rtl_fw *rtl_fw;
4123 const char *name;
4124 int rc = -ENOMEM;
4126 name = rtl_lookup_firmware_name(tp);
4127 if (!name)
4128 goto out_no_firmware;
4130 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4131 if (!rtl_fw)
4132 goto err_warn;
4134 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4135 if (rc < 0)
4136 goto err_free;
4138 rc = rtl_check_firmware(tp, rtl_fw);
4139 if (rc < 0)
4140 goto err_release_firmware;
4142 tp->rtl_fw = rtl_fw;
4143 out:
4144 return;
4146 err_release_firmware:
4147 release_firmware(rtl_fw->fw);
4148 err_free:
4149 kfree(rtl_fw);
4150 err_warn:
4151 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4152 name, rc);
4153 out_no_firmware:
4154 tp->rtl_fw = NULL;
4155 goto out;
4158 static void rtl_request_firmware(struct rtl8169_private *tp)
4160 if (IS_ERR(tp->rtl_fw))
4161 rtl_request_uncached_firmware(tp);
4164 static void rtl_rx_close(struct rtl8169_private *tp)
4166 void __iomem *ioaddr = tp->mmio_addr;
4168 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4171 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4173 void __iomem *ioaddr = tp->mmio_addr;
4175 /* Disable interrupts */
4176 rtl8169_irq_mask_and_ack(tp);
4178 rtl_rx_close(tp);
4180 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4181 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4182 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4183 while (RTL_R8(TxPoll) & NPQ)
4184 udelay(20);
4185 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4186 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4187 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4188 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4189 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4190 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4191 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
4192 udelay(100);
4193 } else {
4194 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4195 udelay(100);
4198 rtl_hw_reset(tp);
4201 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4203 void __iomem *ioaddr = tp->mmio_addr;
4205 /* Set DMA burst size and Interframe Gap Time */
4206 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4207 (InterFrameGap << TxInterFrameGapShift));
4210 static void rtl_hw_start(struct net_device *dev)
4212 struct rtl8169_private *tp = netdev_priv(dev);
4214 tp->hw_start(dev);
4216 rtl_irq_enable_all(tp);
4219 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4220 void __iomem *ioaddr)
4223 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4224 * register to be written before TxDescAddrLow to work.
4225 * Switching from MMIO to I/O access fixes the issue as well.
4227 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4228 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4229 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4230 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4233 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4235 u16 cmd;
4237 cmd = RTL_R16(CPlusCmd);
4238 RTL_W16(CPlusCmd, cmd);
4239 return cmd;
4242 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4244 /* Low hurts. Let's disable the filtering. */
4245 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4248 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4250 static const struct rtl_cfg2_info {
4251 u32 mac_version;
4252 u32 clk;
4253 u32 val;
4254 } cfg2_info [] = {
4255 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4256 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4257 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4258 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4260 const struct rtl_cfg2_info *p = cfg2_info;
4261 unsigned int i;
4262 u32 clk;
4264 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4265 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4266 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4267 RTL_W32(0x7c, p->val);
4268 break;
4273 static void rtl_set_rx_mode(struct net_device *dev)
4275 struct rtl8169_private *tp = netdev_priv(dev);
4276 void __iomem *ioaddr = tp->mmio_addr;
4277 u32 mc_filter[2]; /* Multicast hash filter */
4278 int rx_mode;
4279 u32 tmp = 0;
4281 if (dev->flags & IFF_PROMISC) {
4282 /* Unconditionally log net taps. */
4283 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4284 rx_mode =
4285 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4286 AcceptAllPhys;
4287 mc_filter[1] = mc_filter[0] = 0xffffffff;
4288 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4289 (dev->flags & IFF_ALLMULTI)) {
4290 /* Too many to filter perfectly -- accept all multicasts. */
4291 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4292 mc_filter[1] = mc_filter[0] = 0xffffffff;
4293 } else {
4294 struct netdev_hw_addr *ha;
4296 rx_mode = AcceptBroadcast | AcceptMyPhys;
4297 mc_filter[1] = mc_filter[0] = 0;
4298 netdev_for_each_mc_addr(ha, dev) {
4299 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4300 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4301 rx_mode |= AcceptMulticast;
4305 if (dev->features & NETIF_F_RXALL)
4306 rx_mode |= (AcceptErr | AcceptRunt);
4308 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4310 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4311 u32 data = mc_filter[0];
4313 mc_filter[0] = swab32(mc_filter[1]);
4314 mc_filter[1] = swab32(data);
4317 RTL_W32(MAR0 + 4, mc_filter[1]);
4318 RTL_W32(MAR0 + 0, mc_filter[0]);
4320 RTL_W32(RxConfig, tmp);
4323 static void rtl_hw_start_8169(struct net_device *dev)
4325 struct rtl8169_private *tp = netdev_priv(dev);
4326 void __iomem *ioaddr = tp->mmio_addr;
4327 struct pci_dev *pdev = tp->pci_dev;
4329 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4330 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4331 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4334 RTL_W8(Cfg9346, Cfg9346_Unlock);
4335 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4336 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4337 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4338 tp->mac_version == RTL_GIGA_MAC_VER_04)
4339 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4341 rtl_init_rxcfg(tp);
4343 RTL_W8(EarlyTxThres, NoEarlyTx);
4345 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4347 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4348 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4349 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4350 tp->mac_version == RTL_GIGA_MAC_VER_04)
4351 rtl_set_rx_tx_config_registers(tp);
4353 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4355 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4356 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4357 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4358 "Bit-3 and bit-14 MUST be 1\n");
4359 tp->cp_cmd |= (1 << 14);
4362 RTL_W16(CPlusCmd, tp->cp_cmd);
4364 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4367 * Undocumented corner. Supposedly:
4368 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4370 RTL_W16(IntrMitigate, 0x0000);
4372 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4374 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4375 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4376 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4377 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4378 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4379 rtl_set_rx_tx_config_registers(tp);
4382 RTL_W8(Cfg9346, Cfg9346_Lock);
4384 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4385 RTL_R8(IntrMask);
4387 RTL_W32(RxMissed, 0);
4389 rtl_set_rx_mode(dev);
4391 /* no early-rx interrupts */
4392 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4395 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4397 if (tp->csi_ops.write)
4398 tp->csi_ops.write(tp, addr, value);
4401 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4403 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4406 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4408 u32 csi;
4410 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4411 rtl_csi_write(tp, 0x070c, csi | bits);
4414 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4416 rtl_csi_access_enable(tp, 0x17000000);
4419 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4421 rtl_csi_access_enable(tp, 0x27000000);
4424 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4426 void __iomem *ioaddr = tp->mmio_addr;
4427 unsigned int i;
4429 RTL_W32(CSIDR, value);
4430 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4431 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4433 for (i = 0; i < 100; i++) {
4434 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4435 break;
4436 udelay(10);
4440 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4442 void __iomem *ioaddr = tp->mmio_addr;
4443 u32 value = ~0x00;
4444 unsigned int i;
4446 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4447 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4449 for (i = 0; i < 100; i++) {
4450 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4451 value = RTL_R32(CSIDR);
4452 break;
4454 udelay(10);
4457 return value;
4460 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4462 void __iomem *ioaddr = tp->mmio_addr;
4463 unsigned int i;
4465 RTL_W32(CSIDR, value);
4466 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4467 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4468 CSIAR_FUNC_NIC);
4470 for (i = 0; i < 100; i++) {
4471 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4472 break;
4473 udelay(10);
4477 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4479 void __iomem *ioaddr = tp->mmio_addr;
4480 u32 value = ~0x00;
4481 unsigned int i;
4483 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4484 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4486 for (i = 0; i < 100; i++) {
4487 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4488 value = RTL_R32(CSIDR);
4489 break;
4491 udelay(10);
4494 return value;
4497 static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4499 struct csi_ops *ops = &tp->csi_ops;
4501 switch (tp->mac_version) {
4502 case RTL_GIGA_MAC_VER_01:
4503 case RTL_GIGA_MAC_VER_02:
4504 case RTL_GIGA_MAC_VER_03:
4505 case RTL_GIGA_MAC_VER_04:
4506 case RTL_GIGA_MAC_VER_05:
4507 case RTL_GIGA_MAC_VER_06:
4508 case RTL_GIGA_MAC_VER_10:
4509 case RTL_GIGA_MAC_VER_11:
4510 case RTL_GIGA_MAC_VER_12:
4511 case RTL_GIGA_MAC_VER_13:
4512 case RTL_GIGA_MAC_VER_14:
4513 case RTL_GIGA_MAC_VER_15:
4514 case RTL_GIGA_MAC_VER_16:
4515 case RTL_GIGA_MAC_VER_17:
4516 ops->write = NULL;
4517 ops->read = NULL;
4518 break;
4520 case RTL_GIGA_MAC_VER_37:
4521 case RTL_GIGA_MAC_VER_38:
4522 ops->write = r8402_csi_write;
4523 ops->read = r8402_csi_read;
4524 break;
4526 default:
4527 ops->write = r8169_csi_write;
4528 ops->read = r8169_csi_read;
4529 break;
4533 struct ephy_info {
4534 unsigned int offset;
4535 u16 mask;
4536 u16 bits;
4539 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4540 int len)
4542 u16 w;
4544 while (len-- > 0) {
4545 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4546 rtl_ephy_write(tp, e->offset, w);
4547 e++;
4551 static void rtl_disable_clock_request(struct pci_dev *pdev)
4553 int cap = pci_pcie_cap(pdev);
4555 if (cap) {
4556 u16 ctl;
4558 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4559 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4560 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4564 static void rtl_enable_clock_request(struct pci_dev *pdev)
4566 int cap = pci_pcie_cap(pdev);
4568 if (cap) {
4569 u16 ctl;
4571 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4572 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4573 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4577 #define R8168_CPCMD_QUIRK_MASK (\
4578 EnableBist | \
4579 Mac_dbgo_oe | \
4580 Force_half_dup | \
4581 Force_rxflow_en | \
4582 Force_txflow_en | \
4583 Cxpl_dbg_sel | \
4584 ASF | \
4585 PktCntrDisable | \
4586 Mac_dbgo_sel)
4588 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4590 void __iomem *ioaddr = tp->mmio_addr;
4591 struct pci_dev *pdev = tp->pci_dev;
4593 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4595 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4597 rtl_tx_performance_tweak(pdev,
4598 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4601 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4603 void __iomem *ioaddr = tp->mmio_addr;
4605 rtl_hw_start_8168bb(tp);
4607 RTL_W8(MaxTxPacketSize, TxPacketMax);
4609 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4612 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4614 void __iomem *ioaddr = tp->mmio_addr;
4615 struct pci_dev *pdev = tp->pci_dev;
4617 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4619 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4621 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4623 rtl_disable_clock_request(pdev);
4625 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4628 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4630 static const struct ephy_info e_info_8168cp[] = {
4631 { 0x01, 0, 0x0001 },
4632 { 0x02, 0x0800, 0x1000 },
4633 { 0x03, 0, 0x0042 },
4634 { 0x06, 0x0080, 0x0000 },
4635 { 0x07, 0, 0x2000 }
4638 rtl_csi_access_enable_2(tp);
4640 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4642 __rtl_hw_start_8168cp(tp);
4645 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4647 void __iomem *ioaddr = tp->mmio_addr;
4648 struct pci_dev *pdev = tp->pci_dev;
4650 rtl_csi_access_enable_2(tp);
4652 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4654 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4656 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4659 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4661 void __iomem *ioaddr = tp->mmio_addr;
4662 struct pci_dev *pdev = tp->pci_dev;
4664 rtl_csi_access_enable_2(tp);
4666 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4668 /* Magic. */
4669 RTL_W8(DBG_REG, 0x20);
4671 RTL_W8(MaxTxPacketSize, TxPacketMax);
4673 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4675 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4678 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4680 void __iomem *ioaddr = tp->mmio_addr;
4681 static const struct ephy_info e_info_8168c_1[] = {
4682 { 0x02, 0x0800, 0x1000 },
4683 { 0x03, 0, 0x0002 },
4684 { 0x06, 0x0080, 0x0000 }
4687 rtl_csi_access_enable_2(tp);
4689 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4691 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4693 __rtl_hw_start_8168cp(tp);
4696 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4698 static const struct ephy_info e_info_8168c_2[] = {
4699 { 0x01, 0, 0x0001 },
4700 { 0x03, 0x0400, 0x0220 }
4703 rtl_csi_access_enable_2(tp);
4705 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4707 __rtl_hw_start_8168cp(tp);
4710 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4712 rtl_hw_start_8168c_2(tp);
4715 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4717 rtl_csi_access_enable_2(tp);
4719 __rtl_hw_start_8168cp(tp);
4722 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4724 void __iomem *ioaddr = tp->mmio_addr;
4725 struct pci_dev *pdev = tp->pci_dev;
4727 rtl_csi_access_enable_2(tp);
4729 rtl_disable_clock_request(pdev);
4731 RTL_W8(MaxTxPacketSize, TxPacketMax);
4733 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4735 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4738 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4740 void __iomem *ioaddr = tp->mmio_addr;
4741 struct pci_dev *pdev = tp->pci_dev;
4743 rtl_csi_access_enable_1(tp);
4745 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4747 RTL_W8(MaxTxPacketSize, TxPacketMax);
4749 rtl_disable_clock_request(pdev);
4752 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4754 void __iomem *ioaddr = tp->mmio_addr;
4755 struct pci_dev *pdev = tp->pci_dev;
4756 static const struct ephy_info e_info_8168d_4[] = {
4757 { 0x0b, ~0, 0x48 },
4758 { 0x19, 0x20, 0x50 },
4759 { 0x0c, ~0, 0x20 }
4761 int i;
4763 rtl_csi_access_enable_1(tp);
4765 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4767 RTL_W8(MaxTxPacketSize, TxPacketMax);
4769 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4770 const struct ephy_info *e = e_info_8168d_4 + i;
4771 u16 w;
4773 w = rtl_ephy_read(tp, e->offset);
4774 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4777 rtl_enable_clock_request(pdev);
4780 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4782 void __iomem *ioaddr = tp->mmio_addr;
4783 struct pci_dev *pdev = tp->pci_dev;
4784 static const struct ephy_info e_info_8168e_1[] = {
4785 { 0x00, 0x0200, 0x0100 },
4786 { 0x00, 0x0000, 0x0004 },
4787 { 0x06, 0x0002, 0x0001 },
4788 { 0x06, 0x0000, 0x0030 },
4789 { 0x07, 0x0000, 0x2000 },
4790 { 0x00, 0x0000, 0x0020 },
4791 { 0x03, 0x5800, 0x2000 },
4792 { 0x03, 0x0000, 0x0001 },
4793 { 0x01, 0x0800, 0x1000 },
4794 { 0x07, 0x0000, 0x4000 },
4795 { 0x1e, 0x0000, 0x2000 },
4796 { 0x19, 0xffff, 0xfe6c },
4797 { 0x0a, 0x0000, 0x0040 }
4800 rtl_csi_access_enable_2(tp);
4802 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4804 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4806 RTL_W8(MaxTxPacketSize, TxPacketMax);
4808 rtl_disable_clock_request(pdev);
4810 /* Reset tx FIFO pointer */
4811 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4812 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4814 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4817 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4819 void __iomem *ioaddr = tp->mmio_addr;
4820 struct pci_dev *pdev = tp->pci_dev;
4821 static const struct ephy_info e_info_8168e_2[] = {
4822 { 0x09, 0x0000, 0x0080 },
4823 { 0x19, 0x0000, 0x0224 }
4826 rtl_csi_access_enable_1(tp);
4828 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4830 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4832 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4833 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4834 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4835 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4836 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4837 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4838 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4839 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
4841 RTL_W8(MaxTxPacketSize, EarlySize);
4843 rtl_disable_clock_request(pdev);
4845 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4846 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4848 /* Adjust EEE LED frequency */
4849 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4851 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4852 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4853 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4856 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4858 void __iomem *ioaddr = tp->mmio_addr;
4859 struct pci_dev *pdev = tp->pci_dev;
4861 rtl_csi_access_enable_2(tp);
4863 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4865 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4866 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4867 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4868 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4869 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4870 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4871 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4872 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4873 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4874 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4876 RTL_W8(MaxTxPacketSize, EarlySize);
4878 rtl_disable_clock_request(pdev);
4880 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4881 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4882 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4883 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4884 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4887 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4889 void __iomem *ioaddr = tp->mmio_addr;
4890 static const struct ephy_info e_info_8168f_1[] = {
4891 { 0x06, 0x00c0, 0x0020 },
4892 { 0x08, 0x0001, 0x0002 },
4893 { 0x09, 0x0000, 0x0080 },
4894 { 0x19, 0x0000, 0x0224 }
4897 rtl_hw_start_8168f(tp);
4899 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4901 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
4903 /* Adjust EEE LED frequency */
4904 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4907 static void rtl_hw_start_8411(struct rtl8169_private *tp)
4909 static const struct ephy_info e_info_8168f_1[] = {
4910 { 0x06, 0x00c0, 0x0020 },
4911 { 0x0f, 0xffff, 0x5200 },
4912 { 0x1e, 0x0000, 0x4000 },
4913 { 0x19, 0x0000, 0x0224 }
4916 rtl_hw_start_8168f(tp);
4918 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4920 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
4923 static void rtl_hw_start_8168(struct net_device *dev)
4925 struct rtl8169_private *tp = netdev_priv(dev);
4926 void __iomem *ioaddr = tp->mmio_addr;
4928 RTL_W8(Cfg9346, Cfg9346_Unlock);
4930 RTL_W8(MaxTxPacketSize, TxPacketMax);
4932 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4934 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
4936 RTL_W16(CPlusCmd, tp->cp_cmd);
4938 RTL_W16(IntrMitigate, 0x5151);
4940 /* Work around for RxFIFO overflow. */
4941 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4942 tp->event_slow |= RxFIFOOver | PCSTimeout;
4943 tp->event_slow &= ~RxOverflow;
4946 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4948 rtl_set_rx_mode(dev);
4950 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4951 (InterFrameGap << TxInterFrameGapShift));
4953 RTL_R8(IntrMask);
4955 switch (tp->mac_version) {
4956 case RTL_GIGA_MAC_VER_11:
4957 rtl_hw_start_8168bb(tp);
4958 break;
4960 case RTL_GIGA_MAC_VER_12:
4961 case RTL_GIGA_MAC_VER_17:
4962 rtl_hw_start_8168bef(tp);
4963 break;
4965 case RTL_GIGA_MAC_VER_18:
4966 rtl_hw_start_8168cp_1(tp);
4967 break;
4969 case RTL_GIGA_MAC_VER_19:
4970 rtl_hw_start_8168c_1(tp);
4971 break;
4973 case RTL_GIGA_MAC_VER_20:
4974 rtl_hw_start_8168c_2(tp);
4975 break;
4977 case RTL_GIGA_MAC_VER_21:
4978 rtl_hw_start_8168c_3(tp);
4979 break;
4981 case RTL_GIGA_MAC_VER_22:
4982 rtl_hw_start_8168c_4(tp);
4983 break;
4985 case RTL_GIGA_MAC_VER_23:
4986 rtl_hw_start_8168cp_2(tp);
4987 break;
4989 case RTL_GIGA_MAC_VER_24:
4990 rtl_hw_start_8168cp_3(tp);
4991 break;
4993 case RTL_GIGA_MAC_VER_25:
4994 case RTL_GIGA_MAC_VER_26:
4995 case RTL_GIGA_MAC_VER_27:
4996 rtl_hw_start_8168d(tp);
4997 break;
4999 case RTL_GIGA_MAC_VER_28:
5000 rtl_hw_start_8168d_4(tp);
5001 break;
5003 case RTL_GIGA_MAC_VER_31:
5004 rtl_hw_start_8168dp(tp);
5005 break;
5007 case RTL_GIGA_MAC_VER_32:
5008 case RTL_GIGA_MAC_VER_33:
5009 rtl_hw_start_8168e_1(tp);
5010 break;
5011 case RTL_GIGA_MAC_VER_34:
5012 rtl_hw_start_8168e_2(tp);
5013 break;
5015 case RTL_GIGA_MAC_VER_35:
5016 case RTL_GIGA_MAC_VER_36:
5017 rtl_hw_start_8168f_1(tp);
5018 break;
5020 case RTL_GIGA_MAC_VER_38:
5021 rtl_hw_start_8411(tp);
5022 break;
5024 default:
5025 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5026 dev->name, tp->mac_version);
5027 break;
5030 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5032 RTL_W8(Cfg9346, Cfg9346_Lock);
5034 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5037 #define R810X_CPCMD_QUIRK_MASK (\
5038 EnableBist | \
5039 Mac_dbgo_oe | \
5040 Force_half_dup | \
5041 Force_rxflow_en | \
5042 Force_txflow_en | \
5043 Cxpl_dbg_sel | \
5044 ASF | \
5045 PktCntrDisable | \
5046 Mac_dbgo_sel)
5048 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5050 void __iomem *ioaddr = tp->mmio_addr;
5051 struct pci_dev *pdev = tp->pci_dev;
5052 static const struct ephy_info e_info_8102e_1[] = {
5053 { 0x01, 0, 0x6e65 },
5054 { 0x02, 0, 0x091f },
5055 { 0x03, 0, 0xc2f9 },
5056 { 0x06, 0, 0xafb5 },
5057 { 0x07, 0, 0x0e00 },
5058 { 0x19, 0, 0xec80 },
5059 { 0x01, 0, 0x2e65 },
5060 { 0x01, 0, 0x6e65 }
5062 u8 cfg1;
5064 rtl_csi_access_enable_2(tp);
5066 RTL_W8(DBG_REG, FIX_NAK_1);
5068 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5070 RTL_W8(Config1,
5071 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5072 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5074 cfg1 = RTL_R8(Config1);
5075 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5076 RTL_W8(Config1, cfg1 & ~LEDS0);
5078 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5081 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5083 void __iomem *ioaddr = tp->mmio_addr;
5084 struct pci_dev *pdev = tp->pci_dev;
5086 rtl_csi_access_enable_2(tp);
5088 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5090 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5091 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5094 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5096 rtl_hw_start_8102e_2(tp);
5098 rtl_ephy_write(tp, 0x03, 0xc2f9);
5101 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5103 void __iomem *ioaddr = tp->mmio_addr;
5104 static const struct ephy_info e_info_8105e_1[] = {
5105 { 0x07, 0, 0x4000 },
5106 { 0x19, 0, 0x0200 },
5107 { 0x19, 0, 0x0020 },
5108 { 0x1e, 0, 0x2000 },
5109 { 0x03, 0, 0x0001 },
5110 { 0x19, 0, 0x0100 },
5111 { 0x19, 0, 0x0004 },
5112 { 0x0a, 0, 0x0020 }
5115 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5116 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5118 /* Disable Early Tally Counter */
5119 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5121 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5122 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5124 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5127 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5129 rtl_hw_start_8105e_1(tp);
5130 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5133 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5135 void __iomem *ioaddr = tp->mmio_addr;
5136 static const struct ephy_info e_info_8402[] = {
5137 { 0x19, 0xffff, 0xff64 },
5138 { 0x1e, 0, 0x4000 }
5141 rtl_csi_access_enable_2(tp);
5143 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5144 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5146 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5147 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5149 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5151 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5153 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5154 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5155 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5156 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5157 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5158 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5159 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5162 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5164 void __iomem *ioaddr = tp->mmio_addr;
5166 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5167 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5169 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5170 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5171 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5174 static void rtl_hw_start_8101(struct net_device *dev)
5176 struct rtl8169_private *tp = netdev_priv(dev);
5177 void __iomem *ioaddr = tp->mmio_addr;
5178 struct pci_dev *pdev = tp->pci_dev;
5180 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5181 tp->event_slow &= ~RxFIFOOver;
5183 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5184 tp->mac_version == RTL_GIGA_MAC_VER_16) {
5185 int cap = pci_pcie_cap(pdev);
5187 if (cap) {
5188 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
5189 PCI_EXP_DEVCTL_NOSNOOP_EN);
5193 RTL_W8(Cfg9346, Cfg9346_Unlock);
5195 switch (tp->mac_version) {
5196 case RTL_GIGA_MAC_VER_07:
5197 rtl_hw_start_8102e_1(tp);
5198 break;
5200 case RTL_GIGA_MAC_VER_08:
5201 rtl_hw_start_8102e_3(tp);
5202 break;
5204 case RTL_GIGA_MAC_VER_09:
5205 rtl_hw_start_8102e_2(tp);
5206 break;
5208 case RTL_GIGA_MAC_VER_29:
5209 rtl_hw_start_8105e_1(tp);
5210 break;
5211 case RTL_GIGA_MAC_VER_30:
5212 rtl_hw_start_8105e_2(tp);
5213 break;
5215 case RTL_GIGA_MAC_VER_37:
5216 rtl_hw_start_8402(tp);
5217 break;
5219 case RTL_GIGA_MAC_VER_39:
5220 rtl_hw_start_8106(tp);
5221 break;
5224 RTL_W8(Cfg9346, Cfg9346_Lock);
5226 RTL_W8(MaxTxPacketSize, TxPacketMax);
5228 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5230 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5231 RTL_W16(CPlusCmd, tp->cp_cmd);
5233 RTL_W16(IntrMitigate, 0x0000);
5235 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5237 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5238 rtl_set_rx_tx_config_registers(tp);
5240 RTL_R8(IntrMask);
5242 rtl_set_rx_mode(dev);
5244 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5247 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5249 struct rtl8169_private *tp = netdev_priv(dev);
5251 if (new_mtu < ETH_ZLEN ||
5252 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5253 return -EINVAL;
5255 if (new_mtu > ETH_DATA_LEN)
5256 rtl_hw_jumbo_enable(tp);
5257 else
5258 rtl_hw_jumbo_disable(tp);
5260 dev->mtu = new_mtu;
5261 netdev_update_features(dev);
5263 return 0;
5266 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5268 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5269 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5272 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5273 void **data_buff, struct RxDesc *desc)
5275 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5276 DMA_FROM_DEVICE);
5278 kfree(*data_buff);
5279 *data_buff = NULL;
5280 rtl8169_make_unusable_by_asic(desc);
5283 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5285 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5287 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5290 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5291 u32 rx_buf_sz)
5293 desc->addr = cpu_to_le64(mapping);
5294 wmb();
5295 rtl8169_mark_to_asic(desc, rx_buf_sz);
5298 static inline void *rtl8169_align(void *data)
5300 return (void *)ALIGN((long)data, 16);
5303 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5304 struct RxDesc *desc)
5306 void *data;
5307 dma_addr_t mapping;
5308 struct device *d = &tp->pci_dev->dev;
5309 struct net_device *dev = tp->dev;
5310 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5312 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5313 if (!data)
5314 return NULL;
5316 if (rtl8169_align(data) != data) {
5317 kfree(data);
5318 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5319 if (!data)
5320 return NULL;
5323 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5324 DMA_FROM_DEVICE);
5325 if (unlikely(dma_mapping_error(d, mapping))) {
5326 if (net_ratelimit())
5327 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5328 goto err_out;
5331 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5332 return data;
5334 err_out:
5335 kfree(data);
5336 return NULL;
5339 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5341 unsigned int i;
5343 for (i = 0; i < NUM_RX_DESC; i++) {
5344 if (tp->Rx_databuff[i]) {
5345 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5346 tp->RxDescArray + i);
5351 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5353 desc->opts1 |= cpu_to_le32(RingEnd);
5356 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5358 unsigned int i;
5360 for (i = 0; i < NUM_RX_DESC; i++) {
5361 void *data;
5363 if (tp->Rx_databuff[i])
5364 continue;
5366 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5367 if (!data) {
5368 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5369 goto err_out;
5371 tp->Rx_databuff[i] = data;
5374 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5375 return 0;
5377 err_out:
5378 rtl8169_rx_clear(tp);
5379 return -ENOMEM;
5382 static int rtl8169_init_ring(struct net_device *dev)
5384 struct rtl8169_private *tp = netdev_priv(dev);
5386 rtl8169_init_ring_indexes(tp);
5388 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5389 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5391 return rtl8169_rx_fill(tp);
5394 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5395 struct TxDesc *desc)
5397 unsigned int len = tx_skb->len;
5399 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5401 desc->opts1 = 0x00;
5402 desc->opts2 = 0x00;
5403 desc->addr = 0x00;
5404 tx_skb->len = 0;
5407 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5408 unsigned int n)
5410 unsigned int i;
5412 for (i = 0; i < n; i++) {
5413 unsigned int entry = (start + i) % NUM_TX_DESC;
5414 struct ring_info *tx_skb = tp->tx_skb + entry;
5415 unsigned int len = tx_skb->len;
5417 if (len) {
5418 struct sk_buff *skb = tx_skb->skb;
5420 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5421 tp->TxDescArray + entry);
5422 if (skb) {
5423 tp->dev->stats.tx_dropped++;
5424 dev_kfree_skb(skb);
5425 tx_skb->skb = NULL;
5431 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5433 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5434 tp->cur_tx = tp->dirty_tx = 0;
5435 netdev_reset_queue(tp->dev);
5438 static void rtl_reset_work(struct rtl8169_private *tp)
5440 struct net_device *dev = tp->dev;
5441 int i;
5443 napi_disable(&tp->napi);
5444 netif_stop_queue(dev);
5445 synchronize_sched();
5447 rtl8169_hw_reset(tp);
5449 for (i = 0; i < NUM_RX_DESC; i++)
5450 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5452 rtl8169_tx_clear(tp);
5453 rtl8169_init_ring_indexes(tp);
5455 napi_enable(&tp->napi);
5456 rtl_hw_start(dev);
5457 netif_wake_queue(dev);
5458 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5461 static void rtl8169_tx_timeout(struct net_device *dev)
5463 struct rtl8169_private *tp = netdev_priv(dev);
5465 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5468 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5469 u32 *opts)
5471 struct skb_shared_info *info = skb_shinfo(skb);
5472 unsigned int cur_frag, entry;
5473 struct TxDesc * uninitialized_var(txd);
5474 struct device *d = &tp->pci_dev->dev;
5476 entry = tp->cur_tx;
5477 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5478 const skb_frag_t *frag = info->frags + cur_frag;
5479 dma_addr_t mapping;
5480 u32 status, len;
5481 void *addr;
5483 entry = (entry + 1) % NUM_TX_DESC;
5485 txd = tp->TxDescArray + entry;
5486 len = skb_frag_size(frag);
5487 addr = skb_frag_address(frag);
5488 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5489 if (unlikely(dma_mapping_error(d, mapping))) {
5490 if (net_ratelimit())
5491 netif_err(tp, drv, tp->dev,
5492 "Failed to map TX fragments DMA!\n");
5493 goto err_out;
5496 /* Anti gcc 2.95.3 bugware (sic) */
5497 status = opts[0] | len |
5498 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5500 txd->opts1 = cpu_to_le32(status);
5501 txd->opts2 = cpu_to_le32(opts[1]);
5502 txd->addr = cpu_to_le64(mapping);
5504 tp->tx_skb[entry].len = len;
5507 if (cur_frag) {
5508 tp->tx_skb[entry].skb = skb;
5509 txd->opts1 |= cpu_to_le32(LastFrag);
5512 return cur_frag;
5514 err_out:
5515 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5516 return -EIO;
5519 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5520 struct sk_buff *skb, u32 *opts)
5522 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5523 u32 mss = skb_shinfo(skb)->gso_size;
5524 int offset = info->opts_offset;
5526 if (mss) {
5527 opts[0] |= TD_LSO;
5528 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5529 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5530 const struct iphdr *ip = ip_hdr(skb);
5532 if (ip->protocol == IPPROTO_TCP)
5533 opts[offset] |= info->checksum.tcp;
5534 else if (ip->protocol == IPPROTO_UDP)
5535 opts[offset] |= info->checksum.udp;
5536 else
5537 WARN_ON_ONCE(1);
5541 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5542 struct net_device *dev)
5544 struct rtl8169_private *tp = netdev_priv(dev);
5545 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5546 struct TxDesc *txd = tp->TxDescArray + entry;
5547 void __iomem *ioaddr = tp->mmio_addr;
5548 struct device *d = &tp->pci_dev->dev;
5549 dma_addr_t mapping;
5550 u32 status, len;
5551 u32 opts[2];
5552 int frags;
5554 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5555 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5556 goto err_stop_0;
5559 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5560 goto err_stop_0;
5562 len = skb_headlen(skb);
5563 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5564 if (unlikely(dma_mapping_error(d, mapping))) {
5565 if (net_ratelimit())
5566 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5567 goto err_dma_0;
5570 tp->tx_skb[entry].len = len;
5571 txd->addr = cpu_to_le64(mapping);
5573 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5574 opts[0] = DescOwn;
5576 rtl8169_tso_csum(tp, skb, opts);
5578 frags = rtl8169_xmit_frags(tp, skb, opts);
5579 if (frags < 0)
5580 goto err_dma_1;
5581 else if (frags)
5582 opts[0] |= FirstFrag;
5583 else {
5584 opts[0] |= FirstFrag | LastFrag;
5585 tp->tx_skb[entry].skb = skb;
5588 txd->opts2 = cpu_to_le32(opts[1]);
5590 netdev_sent_queue(dev, skb->len);
5592 skb_tx_timestamp(skb);
5594 wmb();
5596 /* Anti gcc 2.95.3 bugware (sic) */
5597 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5598 txd->opts1 = cpu_to_le32(status);
5600 tp->cur_tx += frags + 1;
5602 wmb();
5604 RTL_W8(TxPoll, NPQ);
5606 mmiowb();
5608 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5609 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5610 * not miss a ring update when it notices a stopped queue.
5612 smp_wmb();
5613 netif_stop_queue(dev);
5614 /* Sync with rtl_tx:
5615 * - publish queue status and cur_tx ring index (write barrier)
5616 * - refresh dirty_tx ring index (read barrier).
5617 * May the current thread have a pessimistic view of the ring
5618 * status and forget to wake up queue, a racing rtl_tx thread
5619 * can't.
5621 smp_mb();
5622 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5623 netif_wake_queue(dev);
5626 return NETDEV_TX_OK;
5628 err_dma_1:
5629 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5630 err_dma_0:
5631 dev_kfree_skb(skb);
5632 dev->stats.tx_dropped++;
5633 return NETDEV_TX_OK;
5635 err_stop_0:
5636 netif_stop_queue(dev);
5637 dev->stats.tx_dropped++;
5638 return NETDEV_TX_BUSY;
5641 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5643 struct rtl8169_private *tp = netdev_priv(dev);
5644 struct pci_dev *pdev = tp->pci_dev;
5645 u16 pci_status, pci_cmd;
5647 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5648 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5650 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5651 pci_cmd, pci_status);
5654 * The recovery sequence below admits a very elaborated explanation:
5655 * - it seems to work;
5656 * - I did not see what else could be done;
5657 * - it makes iop3xx happy.
5659 * Feel free to adjust to your needs.
5661 if (pdev->broken_parity_status)
5662 pci_cmd &= ~PCI_COMMAND_PARITY;
5663 else
5664 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5666 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5668 pci_write_config_word(pdev, PCI_STATUS,
5669 pci_status & (PCI_STATUS_DETECTED_PARITY |
5670 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5671 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5673 /* The infamous DAC f*ckup only happens at boot time */
5674 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5675 void __iomem *ioaddr = tp->mmio_addr;
5677 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5678 tp->cp_cmd &= ~PCIDAC;
5679 RTL_W16(CPlusCmd, tp->cp_cmd);
5680 dev->features &= ~NETIF_F_HIGHDMA;
5683 rtl8169_hw_reset(tp);
5685 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5688 struct rtl_txc {
5689 int packets;
5690 int bytes;
5693 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5695 struct rtl8169_stats *tx_stats = &tp->tx_stats;
5696 unsigned int dirty_tx, tx_left;
5697 struct rtl_txc txc = { 0, 0 };
5699 dirty_tx = tp->dirty_tx;
5700 smp_rmb();
5701 tx_left = tp->cur_tx - dirty_tx;
5703 while (tx_left > 0) {
5704 unsigned int entry = dirty_tx % NUM_TX_DESC;
5705 struct ring_info *tx_skb = tp->tx_skb + entry;
5706 u32 status;
5708 rmb();
5709 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5710 if (status & DescOwn)
5711 break;
5713 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5714 tp->TxDescArray + entry);
5715 if (status & LastFrag) {
5716 struct sk_buff *skb = tx_skb->skb;
5718 txc.packets++;
5719 txc.bytes += skb->len;
5720 dev_kfree_skb(skb);
5721 tx_skb->skb = NULL;
5723 dirty_tx++;
5724 tx_left--;
5727 u64_stats_update_begin(&tx_stats->syncp);
5728 tx_stats->packets += txc.packets;
5729 tx_stats->bytes += txc.bytes;
5730 u64_stats_update_end(&tx_stats->syncp);
5732 netdev_completed_queue(dev, txc.packets, txc.bytes);
5734 if (tp->dirty_tx != dirty_tx) {
5735 tp->dirty_tx = dirty_tx;
5736 /* Sync with rtl8169_start_xmit:
5737 * - publish dirty_tx ring index (write barrier)
5738 * - refresh cur_tx ring index and queue status (read barrier)
5739 * May the current thread miss the stopped queue condition,
5740 * a racing xmit thread can only have a right view of the
5741 * ring status.
5743 smp_mb();
5744 if (netif_queue_stopped(dev) &&
5745 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5746 netif_wake_queue(dev);
5749 * 8168 hack: TxPoll requests are lost when the Tx packets are
5750 * too close. Let's kick an extra TxPoll request when a burst
5751 * of start_xmit activity is detected (if it is not detected,
5752 * it is slow enough). -- FR
5754 if (tp->cur_tx != dirty_tx) {
5755 void __iomem *ioaddr = tp->mmio_addr;
5757 RTL_W8(TxPoll, NPQ);
5762 static inline int rtl8169_fragmented_frame(u32 status)
5764 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5767 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5769 u32 status = opts1 & RxProtoMask;
5771 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5772 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5773 skb->ip_summed = CHECKSUM_UNNECESSARY;
5774 else
5775 skb_checksum_none_assert(skb);
5778 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5779 struct rtl8169_private *tp,
5780 int pkt_size,
5781 dma_addr_t addr)
5783 struct sk_buff *skb;
5784 struct device *d = &tp->pci_dev->dev;
5786 data = rtl8169_align(data);
5787 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5788 prefetch(data);
5789 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5790 if (skb)
5791 memcpy(skb->data, data, pkt_size);
5792 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5794 return skb;
5797 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5799 unsigned int cur_rx, rx_left;
5800 unsigned int count;
5802 cur_rx = tp->cur_rx;
5803 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5804 rx_left = min(rx_left, budget);
5806 for (; rx_left > 0; rx_left--, cur_rx++) {
5807 unsigned int entry = cur_rx % NUM_RX_DESC;
5808 struct RxDesc *desc = tp->RxDescArray + entry;
5809 u32 status;
5811 rmb();
5812 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5814 if (status & DescOwn)
5815 break;
5816 if (unlikely(status & RxRES)) {
5817 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5818 status);
5819 dev->stats.rx_errors++;
5820 if (status & (RxRWT | RxRUNT))
5821 dev->stats.rx_length_errors++;
5822 if (status & RxCRC)
5823 dev->stats.rx_crc_errors++;
5824 if (status & RxFOVF) {
5825 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5826 dev->stats.rx_fifo_errors++;
5828 if ((status & (RxRUNT | RxCRC)) &&
5829 !(status & (RxRWT | RxFOVF)) &&
5830 (dev->features & NETIF_F_RXALL))
5831 goto process_pkt;
5833 rtl8169_mark_to_asic(desc, rx_buf_sz);
5834 } else {
5835 struct sk_buff *skb;
5836 dma_addr_t addr;
5837 int pkt_size;
5839 process_pkt:
5840 addr = le64_to_cpu(desc->addr);
5841 if (likely(!(dev->features & NETIF_F_RXFCS)))
5842 pkt_size = (status & 0x00003fff) - 4;
5843 else
5844 pkt_size = status & 0x00003fff;
5847 * The driver does not support incoming fragmented
5848 * frames. They are seen as a symptom of over-mtu
5849 * sized frames.
5851 if (unlikely(rtl8169_fragmented_frame(status))) {
5852 dev->stats.rx_dropped++;
5853 dev->stats.rx_length_errors++;
5854 rtl8169_mark_to_asic(desc, rx_buf_sz);
5855 continue;
5858 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5859 tp, pkt_size, addr);
5860 rtl8169_mark_to_asic(desc, rx_buf_sz);
5861 if (!skb) {
5862 dev->stats.rx_dropped++;
5863 continue;
5866 rtl8169_rx_csum(skb, status);
5867 skb_put(skb, pkt_size);
5868 skb->protocol = eth_type_trans(skb, dev);
5870 rtl8169_rx_vlan_tag(desc, skb);
5872 napi_gro_receive(&tp->napi, skb);
5874 u64_stats_update_begin(&tp->rx_stats.syncp);
5875 tp->rx_stats.packets++;
5876 tp->rx_stats.bytes += pkt_size;
5877 u64_stats_update_end(&tp->rx_stats.syncp);
5880 /* Work around for AMD plateform. */
5881 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
5882 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
5883 desc->opts2 = 0;
5884 cur_rx++;
5888 count = cur_rx - tp->cur_rx;
5889 tp->cur_rx = cur_rx;
5891 tp->dirty_rx += count;
5893 return count;
5896 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5898 struct net_device *dev = dev_instance;
5899 struct rtl8169_private *tp = netdev_priv(dev);
5900 int handled = 0;
5901 u16 status;
5903 status = rtl_get_events(tp);
5904 if (status && status != 0xffff) {
5905 status &= RTL_EVENT_NAPI | tp->event_slow;
5906 if (status) {
5907 handled = 1;
5909 rtl_irq_disable(tp);
5910 napi_schedule(&tp->napi);
5913 return IRQ_RETVAL(handled);
5917 * Workqueue context.
5919 static void rtl_slow_event_work(struct rtl8169_private *tp)
5921 struct net_device *dev = tp->dev;
5922 u16 status;
5924 status = rtl_get_events(tp) & tp->event_slow;
5925 rtl_ack_events(tp, status);
5927 if (unlikely(status & RxFIFOOver)) {
5928 switch (tp->mac_version) {
5929 /* Work around for rx fifo overflow */
5930 case RTL_GIGA_MAC_VER_11:
5931 netif_stop_queue(dev);
5932 /* XXX - Hack alert. See rtl_task(). */
5933 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
5934 default:
5935 break;
5939 if (unlikely(status & SYSErr))
5940 rtl8169_pcierr_interrupt(dev);
5942 if (status & LinkChg)
5943 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5945 rtl_irq_enable_all(tp);
5948 static void rtl_task(struct work_struct *work)
5950 static const struct {
5951 int bitnr;
5952 void (*action)(struct rtl8169_private *);
5953 } rtl_work[] = {
5954 /* XXX - keep rtl_slow_event_work() as first element. */
5955 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5956 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5957 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5959 struct rtl8169_private *tp =
5960 container_of(work, struct rtl8169_private, wk.work);
5961 struct net_device *dev = tp->dev;
5962 int i;
5964 rtl_lock_work(tp);
5966 if (!netif_running(dev) ||
5967 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
5968 goto out_unlock;
5970 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5971 bool pending;
5973 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5974 if (pending)
5975 rtl_work[i].action(tp);
5978 out_unlock:
5979 rtl_unlock_work(tp);
5982 static int rtl8169_poll(struct napi_struct *napi, int budget)
5984 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5985 struct net_device *dev = tp->dev;
5986 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5987 int work_done= 0;
5988 u16 status;
5990 status = rtl_get_events(tp);
5991 rtl_ack_events(tp, status & ~tp->event_slow);
5993 if (status & RTL_EVENT_NAPI_RX)
5994 work_done = rtl_rx(dev, tp, (u32) budget);
5996 if (status & RTL_EVENT_NAPI_TX)
5997 rtl_tx(dev, tp);
5999 if (status & tp->event_slow) {
6000 enable_mask &= ~tp->event_slow;
6002 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6005 if (work_done < budget) {
6006 napi_complete(napi);
6008 rtl_irq_enable(tp, enable_mask);
6009 mmiowb();
6012 return work_done;
6015 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6017 struct rtl8169_private *tp = netdev_priv(dev);
6019 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6020 return;
6022 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6023 RTL_W32(RxMissed, 0);
6026 static void rtl8169_down(struct net_device *dev)
6028 struct rtl8169_private *tp = netdev_priv(dev);
6029 void __iomem *ioaddr = tp->mmio_addr;
6031 del_timer_sync(&tp->timer);
6033 napi_disable(&tp->napi);
6034 netif_stop_queue(dev);
6036 rtl8169_hw_reset(tp);
6038 * At this point device interrupts can not be enabled in any function,
6039 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6040 * and napi is disabled (rtl8169_poll).
6042 rtl8169_rx_missed(dev, ioaddr);
6044 /* Give a racing hard_start_xmit a few cycles to complete. */
6045 synchronize_sched();
6047 rtl8169_tx_clear(tp);
6049 rtl8169_rx_clear(tp);
6051 rtl_pll_power_down(tp);
6054 static int rtl8169_close(struct net_device *dev)
6056 struct rtl8169_private *tp = netdev_priv(dev);
6057 struct pci_dev *pdev = tp->pci_dev;
6059 pm_runtime_get_sync(&pdev->dev);
6061 /* Update counters before going down */
6062 rtl8169_update_counters(dev);
6064 rtl_lock_work(tp);
6065 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6067 rtl8169_down(dev);
6068 rtl_unlock_work(tp);
6070 free_irq(pdev->irq, dev);
6072 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6073 tp->RxPhyAddr);
6074 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6075 tp->TxPhyAddr);
6076 tp->TxDescArray = NULL;
6077 tp->RxDescArray = NULL;
6079 pm_runtime_put_sync(&pdev->dev);
6081 return 0;
6084 #ifdef CONFIG_NET_POLL_CONTROLLER
6085 static void rtl8169_netpoll(struct net_device *dev)
6087 struct rtl8169_private *tp = netdev_priv(dev);
6089 rtl8169_interrupt(tp->pci_dev->irq, dev);
6091 #endif
6093 static int rtl_open(struct net_device *dev)
6095 struct rtl8169_private *tp = netdev_priv(dev);
6096 void __iomem *ioaddr = tp->mmio_addr;
6097 struct pci_dev *pdev = tp->pci_dev;
6098 int retval = -ENOMEM;
6100 pm_runtime_get_sync(&pdev->dev);
6103 * Rx and Tx descriptors needs 256 bytes alignment.
6104 * dma_alloc_coherent provides more.
6106 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6107 &tp->TxPhyAddr, GFP_KERNEL);
6108 if (!tp->TxDescArray)
6109 goto err_pm_runtime_put;
6111 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6112 &tp->RxPhyAddr, GFP_KERNEL);
6113 if (!tp->RxDescArray)
6114 goto err_free_tx_0;
6116 retval = rtl8169_init_ring(dev);
6117 if (retval < 0)
6118 goto err_free_rx_1;
6120 INIT_WORK(&tp->wk.work, rtl_task);
6122 smp_mb();
6124 rtl_request_firmware(tp);
6126 retval = request_irq(pdev->irq, rtl8169_interrupt,
6127 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6128 dev->name, dev);
6129 if (retval < 0)
6130 goto err_release_fw_2;
6132 rtl_lock_work(tp);
6134 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6136 napi_enable(&tp->napi);
6138 rtl8169_init_phy(dev, tp);
6140 __rtl8169_set_features(dev, dev->features);
6142 rtl_pll_power_up(tp);
6144 rtl_hw_start(dev);
6146 netif_start_queue(dev);
6148 rtl_unlock_work(tp);
6150 tp->saved_wolopts = 0;
6151 pm_runtime_put_noidle(&pdev->dev);
6153 rtl8169_check_link_status(dev, tp, ioaddr);
6154 out:
6155 return retval;
6157 err_release_fw_2:
6158 rtl_release_firmware(tp);
6159 rtl8169_rx_clear(tp);
6160 err_free_rx_1:
6161 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6162 tp->RxPhyAddr);
6163 tp->RxDescArray = NULL;
6164 err_free_tx_0:
6165 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6166 tp->TxPhyAddr);
6167 tp->TxDescArray = NULL;
6168 err_pm_runtime_put:
6169 pm_runtime_put_noidle(&pdev->dev);
6170 goto out;
6173 static struct rtnl_link_stats64 *
6174 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6176 struct rtl8169_private *tp = netdev_priv(dev);
6177 void __iomem *ioaddr = tp->mmio_addr;
6178 unsigned int start;
6180 if (netif_running(dev))
6181 rtl8169_rx_missed(dev, ioaddr);
6183 do {
6184 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6185 stats->rx_packets = tp->rx_stats.packets;
6186 stats->rx_bytes = tp->rx_stats.bytes;
6187 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6190 do {
6191 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6192 stats->tx_packets = tp->tx_stats.packets;
6193 stats->tx_bytes = tp->tx_stats.bytes;
6194 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6196 stats->rx_dropped = dev->stats.rx_dropped;
6197 stats->tx_dropped = dev->stats.tx_dropped;
6198 stats->rx_length_errors = dev->stats.rx_length_errors;
6199 stats->rx_errors = dev->stats.rx_errors;
6200 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6201 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6202 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6204 return stats;
6207 static void rtl8169_net_suspend(struct net_device *dev)
6209 struct rtl8169_private *tp = netdev_priv(dev);
6211 if (!netif_running(dev))
6212 return;
6214 netif_device_detach(dev);
6215 netif_stop_queue(dev);
6217 rtl_lock_work(tp);
6218 napi_disable(&tp->napi);
6219 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6220 rtl_unlock_work(tp);
6222 rtl_pll_power_down(tp);
6225 #ifdef CONFIG_PM
6227 static int rtl8169_suspend(struct device *device)
6229 struct pci_dev *pdev = to_pci_dev(device);
6230 struct net_device *dev = pci_get_drvdata(pdev);
6232 rtl8169_net_suspend(dev);
6234 return 0;
6237 static void __rtl8169_resume(struct net_device *dev)
6239 struct rtl8169_private *tp = netdev_priv(dev);
6241 netif_device_attach(dev);
6243 rtl_pll_power_up(tp);
6245 rtl_lock_work(tp);
6246 napi_enable(&tp->napi);
6247 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6248 rtl_unlock_work(tp);
6250 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6253 static int rtl8169_resume(struct device *device)
6255 struct pci_dev *pdev = to_pci_dev(device);
6256 struct net_device *dev = pci_get_drvdata(pdev);
6257 struct rtl8169_private *tp = netdev_priv(dev);
6259 rtl8169_init_phy(dev, tp);
6261 if (netif_running(dev))
6262 __rtl8169_resume(dev);
6264 return 0;
6267 static int rtl8169_runtime_suspend(struct device *device)
6269 struct pci_dev *pdev = to_pci_dev(device);
6270 struct net_device *dev = pci_get_drvdata(pdev);
6271 struct rtl8169_private *tp = netdev_priv(dev);
6273 if (!tp->TxDescArray)
6274 return 0;
6276 rtl_lock_work(tp);
6277 tp->saved_wolopts = __rtl8169_get_wol(tp);
6278 __rtl8169_set_wol(tp, WAKE_ANY);
6279 rtl_unlock_work(tp);
6281 rtl8169_net_suspend(dev);
6283 return 0;
6286 static int rtl8169_runtime_resume(struct device *device)
6288 struct pci_dev *pdev = to_pci_dev(device);
6289 struct net_device *dev = pci_get_drvdata(pdev);
6290 struct rtl8169_private *tp = netdev_priv(dev);
6292 if (!tp->TxDescArray)
6293 return 0;
6295 rtl_lock_work(tp);
6296 __rtl8169_set_wol(tp, tp->saved_wolopts);
6297 tp->saved_wolopts = 0;
6298 rtl_unlock_work(tp);
6300 rtl8169_init_phy(dev, tp);
6302 __rtl8169_resume(dev);
6304 return 0;
6307 static int rtl8169_runtime_idle(struct device *device)
6309 struct pci_dev *pdev = to_pci_dev(device);
6310 struct net_device *dev = pci_get_drvdata(pdev);
6311 struct rtl8169_private *tp = netdev_priv(dev);
6313 return tp->TxDescArray ? -EBUSY : 0;
6316 static const struct dev_pm_ops rtl8169_pm_ops = {
6317 .suspend = rtl8169_suspend,
6318 .resume = rtl8169_resume,
6319 .freeze = rtl8169_suspend,
6320 .thaw = rtl8169_resume,
6321 .poweroff = rtl8169_suspend,
6322 .restore = rtl8169_resume,
6323 .runtime_suspend = rtl8169_runtime_suspend,
6324 .runtime_resume = rtl8169_runtime_resume,
6325 .runtime_idle = rtl8169_runtime_idle,
6328 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6330 #else /* !CONFIG_PM */
6332 #define RTL8169_PM_OPS NULL
6334 #endif /* !CONFIG_PM */
6336 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6338 void __iomem *ioaddr = tp->mmio_addr;
6340 /* WoL fails with 8168b when the receiver is disabled. */
6341 switch (tp->mac_version) {
6342 case RTL_GIGA_MAC_VER_11:
6343 case RTL_GIGA_MAC_VER_12:
6344 case RTL_GIGA_MAC_VER_17:
6345 pci_clear_master(tp->pci_dev);
6347 RTL_W8(ChipCmd, CmdRxEnb);
6348 /* PCI commit */
6349 RTL_R8(ChipCmd);
6350 break;
6351 default:
6352 break;
6356 static void rtl_shutdown(struct pci_dev *pdev)
6358 struct net_device *dev = pci_get_drvdata(pdev);
6359 struct rtl8169_private *tp = netdev_priv(dev);
6360 struct device *d = &pdev->dev;
6362 pm_runtime_get_sync(d);
6364 rtl8169_net_suspend(dev);
6366 /* Restore original MAC address */
6367 rtl_rar_set(tp, dev->perm_addr);
6369 rtl8169_hw_reset(tp);
6371 if (system_state == SYSTEM_POWER_OFF) {
6372 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6373 rtl_wol_suspend_quirk(tp);
6374 rtl_wol_shutdown_quirk(tp);
6377 pci_wake_from_d3(pdev, true);
6378 pci_set_power_state(pdev, PCI_D3hot);
6381 pm_runtime_put_noidle(d);
6384 static void __devexit rtl_remove_one(struct pci_dev *pdev)
6386 struct net_device *dev = pci_get_drvdata(pdev);
6387 struct rtl8169_private *tp = netdev_priv(dev);
6389 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6390 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6391 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6392 rtl8168_driver_stop(tp);
6395 cancel_work_sync(&tp->wk.work);
6397 netif_napi_del(&tp->napi);
6399 unregister_netdev(dev);
6401 rtl_release_firmware(tp);
6403 if (pci_dev_run_wake(pdev))
6404 pm_runtime_get_noresume(&pdev->dev);
6406 /* restore original MAC address */
6407 rtl_rar_set(tp, dev->perm_addr);
6409 rtl_disable_msi(pdev, tp);
6410 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6411 pci_set_drvdata(pdev, NULL);
6414 static const struct net_device_ops rtl_netdev_ops = {
6415 .ndo_open = rtl_open,
6416 .ndo_stop = rtl8169_close,
6417 .ndo_get_stats64 = rtl8169_get_stats64,
6418 .ndo_start_xmit = rtl8169_start_xmit,
6419 .ndo_tx_timeout = rtl8169_tx_timeout,
6420 .ndo_validate_addr = eth_validate_addr,
6421 .ndo_change_mtu = rtl8169_change_mtu,
6422 .ndo_fix_features = rtl8169_fix_features,
6423 .ndo_set_features = rtl8169_set_features,
6424 .ndo_set_mac_address = rtl_set_mac_address,
6425 .ndo_do_ioctl = rtl8169_ioctl,
6426 .ndo_set_rx_mode = rtl_set_rx_mode,
6427 #ifdef CONFIG_NET_POLL_CONTROLLER
6428 .ndo_poll_controller = rtl8169_netpoll,
6429 #endif
6433 static const struct rtl_cfg_info {
6434 void (*hw_start)(struct net_device *);
6435 unsigned int region;
6436 unsigned int align;
6437 u16 event_slow;
6438 unsigned features;
6439 u8 default_ver;
6440 } rtl_cfg_infos [] = {
6441 [RTL_CFG_0] = {
6442 .hw_start = rtl_hw_start_8169,
6443 .region = 1,
6444 .align = 0,
6445 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6446 .features = RTL_FEATURE_GMII,
6447 .default_ver = RTL_GIGA_MAC_VER_01,
6449 [RTL_CFG_1] = {
6450 .hw_start = rtl_hw_start_8168,
6451 .region = 2,
6452 .align = 8,
6453 .event_slow = SYSErr | LinkChg | RxOverflow,
6454 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6455 .default_ver = RTL_GIGA_MAC_VER_11,
6457 [RTL_CFG_2] = {
6458 .hw_start = rtl_hw_start_8101,
6459 .region = 2,
6460 .align = 8,
6461 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6462 PCSTimeout,
6463 .features = RTL_FEATURE_MSI,
6464 .default_ver = RTL_GIGA_MAC_VER_13,
6468 /* Cfg9346_Unlock assumed. */
6469 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6470 const struct rtl_cfg_info *cfg)
6472 void __iomem *ioaddr = tp->mmio_addr;
6473 unsigned msi = 0;
6474 u8 cfg2;
6476 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6477 if (cfg->features & RTL_FEATURE_MSI) {
6478 if (pci_enable_msi(tp->pci_dev)) {
6479 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6480 } else {
6481 cfg2 |= MSIEnable;
6482 msi = RTL_FEATURE_MSI;
6485 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6486 RTL_W8(Config2, cfg2);
6487 return msi;
6490 static int __devinit
6491 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6493 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6494 const unsigned int region = cfg->region;
6495 struct rtl8169_private *tp;
6496 struct mii_if_info *mii;
6497 struct net_device *dev;
6498 void __iomem *ioaddr;
6499 int chipset, i;
6500 int rc;
6502 if (netif_msg_drv(&debug)) {
6503 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6504 MODULENAME, RTL8169_VERSION);
6507 dev = alloc_etherdev(sizeof (*tp));
6508 if (!dev) {
6509 rc = -ENOMEM;
6510 goto out;
6513 SET_NETDEV_DEV(dev, &pdev->dev);
6514 dev->netdev_ops = &rtl_netdev_ops;
6515 tp = netdev_priv(dev);
6516 tp->dev = dev;
6517 tp->pci_dev = pdev;
6518 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6520 mii = &tp->mii;
6521 mii->dev = dev;
6522 mii->mdio_read = rtl_mdio_read;
6523 mii->mdio_write = rtl_mdio_write;
6524 mii->phy_id_mask = 0x1f;
6525 mii->reg_num_mask = 0x1f;
6526 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6528 /* disable ASPM completely as that cause random device stop working
6529 * problems as well as full system hangs for some PCIe devices users */
6530 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6531 PCIE_LINK_STATE_CLKPM);
6533 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6534 rc = pci_enable_device(pdev);
6535 if (rc < 0) {
6536 netif_err(tp, probe, dev, "enable failure\n");
6537 goto err_out_free_dev_1;
6540 if (pci_set_mwi(pdev) < 0)
6541 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6543 /* make sure PCI base addr 1 is MMIO */
6544 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6545 netif_err(tp, probe, dev,
6546 "region #%d not an MMIO resource, aborting\n",
6547 region);
6548 rc = -ENODEV;
6549 goto err_out_mwi_2;
6552 /* check for weird/broken PCI region reporting */
6553 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6554 netif_err(tp, probe, dev,
6555 "Invalid PCI region size(s), aborting\n");
6556 rc = -ENODEV;
6557 goto err_out_mwi_2;
6560 rc = pci_request_regions(pdev, MODULENAME);
6561 if (rc < 0) {
6562 netif_err(tp, probe, dev, "could not request regions\n");
6563 goto err_out_mwi_2;
6566 tp->cp_cmd = RxChkSum;
6568 if ((sizeof(dma_addr_t) > 4) &&
6569 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6570 tp->cp_cmd |= PCIDAC;
6571 dev->features |= NETIF_F_HIGHDMA;
6572 } else {
6573 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6574 if (rc < 0) {
6575 netif_err(tp, probe, dev, "DMA configuration failed\n");
6576 goto err_out_free_res_3;
6580 /* ioremap MMIO region */
6581 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6582 if (!ioaddr) {
6583 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6584 rc = -EIO;
6585 goto err_out_free_res_3;
6587 tp->mmio_addr = ioaddr;
6589 if (!pci_is_pcie(pdev))
6590 netif_info(tp, probe, dev, "not PCI Express\n");
6592 /* Identify chip attached to board */
6593 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6595 rtl_init_rxcfg(tp);
6597 rtl_irq_disable(tp);
6599 rtl_hw_reset(tp);
6601 rtl_ack_events(tp, 0xffff);
6603 pci_set_master(pdev);
6606 * Pretend we are using VLANs; This bypasses a nasty bug where
6607 * Interrupts stop flowing on high load on 8110SCd controllers.
6609 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6610 tp->cp_cmd |= RxVlan;
6612 rtl_init_mdio_ops(tp);
6613 rtl_init_pll_power_ops(tp);
6614 rtl_init_jumbo_ops(tp);
6615 rtl_init_csi_ops(tp);
6617 rtl8169_print_mac_version(tp);
6619 chipset = tp->mac_version;
6620 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6622 RTL_W8(Cfg9346, Cfg9346_Unlock);
6623 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6624 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6625 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6626 tp->features |= RTL_FEATURE_WOL;
6627 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6628 tp->features |= RTL_FEATURE_WOL;
6629 tp->features |= rtl_try_msi(tp, cfg);
6630 RTL_W8(Cfg9346, Cfg9346_Lock);
6632 if (rtl_tbi_enabled(tp)) {
6633 tp->set_speed = rtl8169_set_speed_tbi;
6634 tp->get_settings = rtl8169_gset_tbi;
6635 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6636 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6637 tp->link_ok = rtl8169_tbi_link_ok;
6638 tp->do_ioctl = rtl_tbi_ioctl;
6639 } else {
6640 tp->set_speed = rtl8169_set_speed_xmii;
6641 tp->get_settings = rtl8169_gset_xmii;
6642 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6643 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6644 tp->link_ok = rtl8169_xmii_link_ok;
6645 tp->do_ioctl = rtl_xmii_ioctl;
6648 mutex_init(&tp->wk.mutex);
6650 /* Get MAC address */
6651 for (i = 0; i < ETH_ALEN; i++)
6652 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6653 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6655 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6656 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6658 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6660 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6661 * properly for all devices */
6662 dev->features |= NETIF_F_RXCSUM |
6663 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6665 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6666 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6667 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6668 NETIF_F_HIGHDMA;
6670 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6671 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6672 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6674 dev->hw_features |= NETIF_F_RXALL;
6675 dev->hw_features |= NETIF_F_RXFCS;
6677 tp->hw_start = cfg->hw_start;
6678 tp->event_slow = cfg->event_slow;
6680 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6681 ~(RxBOVF | RxFOVF) : ~0;
6683 init_timer(&tp->timer);
6684 tp->timer.data = (unsigned long) dev;
6685 tp->timer.function = rtl8169_phy_timer;
6687 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6689 rc = register_netdev(dev);
6690 if (rc < 0)
6691 goto err_out_msi_4;
6693 pci_set_drvdata(pdev, dev);
6695 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6696 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6697 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6698 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6699 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6700 "tx checksumming: %s]\n",
6701 rtl_chip_infos[chipset].jumbo_max,
6702 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6705 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6706 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6707 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6708 rtl8168_driver_start(tp);
6711 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6713 if (pci_dev_run_wake(pdev))
6714 pm_runtime_put_noidle(&pdev->dev);
6716 netif_carrier_off(dev);
6718 out:
6719 return rc;
6721 err_out_msi_4:
6722 netif_napi_del(&tp->napi);
6723 rtl_disable_msi(pdev, tp);
6724 iounmap(ioaddr);
6725 err_out_free_res_3:
6726 pci_release_regions(pdev);
6727 err_out_mwi_2:
6728 pci_clear_mwi(pdev);
6729 pci_disable_device(pdev);
6730 err_out_free_dev_1:
6731 free_netdev(dev);
6732 goto out;
6735 static struct pci_driver rtl8169_pci_driver = {
6736 .name = MODULENAME,
6737 .id_table = rtl8169_pci_tbl,
6738 .probe = rtl_init_one,
6739 .remove = __devexit_p(rtl_remove_one),
6740 .shutdown = rtl_shutdown,
6741 .driver.pm = RTL8169_PM_OPS,
6744 static int __init rtl8169_init_module(void)
6746 return pci_register_driver(&rtl8169_pci_driver);
6749 static void __exit rtl8169_cleanup_module(void)
6751 pci_unregister_driver(&rtl8169_pci_driver);
6754 module_init(rtl8169_init_module);
6755 module_exit(rtl8169_cleanup_module);