linux: update to latest 4.4.x
[openadk.git] / target / linux / patches / 4.4.211 / crisv32_ethernet_driver.patch
blob0cef202fc645ad39ca6500db98b4051c1cc4fe00
1 diff -Nur linux-4.7.3.orig/arch/cris/arch-v32/drivers/Kconfig linux-4.7.3/arch/cris/arch-v32/drivers/Kconfig
2 --- linux-4.7.3.orig/arch/cris/arch-v32/drivers/Kconfig 2016-09-07 08:35:12.000000000 +0200
3 +++ linux-4.7.3/arch/cris/arch-v32/drivers/Kconfig 2016-09-13 01:47:09.507717605 +0200
4 @@ -8,9 +8,18 @@
5 This option enables the ETRAX FS built-in 10/100Mbit Ethernet
6 controller.
8 +config ETRAX_HAVE_PHY
9 + bool "PHY present"
10 + default y
11 + help
12 + Search and use the first PHY available on the MDIO bus. Fail
13 + if none is found. Say Y here if you are not in a switched
14 + environment (single port device).
16 config ETRAX_NO_PHY
17 bool "PHY not present"
18 depends on ETRAX_ETHERNET
19 + default n
20 help
21 This option disables all MDIO communication with an ethernet
22 transceiver connected to the MII interface. This option shall
23 @@ -18,6 +27,70 @@
24 switch. This option should normally be disabled. If enabled,
25 speed and duplex will be locked to 100 Mbit and full duplex.
27 +config ETRAX_PHY_FALLBACK
28 + bool "Fixed PHY fallback"
29 + depends on ETRAX_ETHERNET
30 + default n
31 + help
32 + If no PHY is found on the MDIO bus, fall back on a fixed
33 + 100/Full fixed PHY. Say Y here if you need dynamic PHY
34 + presence detection (switch connection where some but not
35 + all ports have integrated PHYs), otherwise say N.
37 +config ETRAX_ETHERNET_IFACE0
38 + depends on ETRAX_ETHERNET
39 + bool "Enable network interface 0"
41 +config ETRAX_ETHERNET_IFACE1
42 + depends on (ETRAX_ETHERNET && ETRAXFS)
43 + bool "Enable network interface 1 (uses DMA6 and DMA7)"
45 +choice
46 + prompt "Eth0 led group"
47 + depends on ETRAX_ETHERNET_IFACE0
48 + default ETRAX_ETH0_USE_LEDGRP0
50 +config ETRAX_ETH0_USE_LEDGRP0
51 + bool "Use LED grp 0"
52 + depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
53 + help
54 + Use LED grp 0 for eth0
56 +config ETRAX_ETH0_USE_LEDGRP1
57 + bool "Use LED grp 1"
58 + depends on ETRAX_NBR_LED_GRP_TWO
59 + help
60 + Use LED grp 1 for eth0
62 +config ETRAX_ETH0_USE_LEDGRPNULL
63 + bool "Use no LEDs for eth0"
64 + help
65 + Use no LEDs for eth0
66 +endchoice
68 +choice
69 + prompt "Eth1 led group"
70 + depends on ETRAX_ETHERNET_IFACE1
71 + default ETRAX_ETH1_USE_LEDGRP1
73 +config ETRAX_ETH1_USE_LEDGRP0
74 + bool "Use LED grp 0"
75 + depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
76 + help
77 + Use LED grp 0 for eth1
79 +config ETRAX_ETH1_USE_LEDGRP1
80 + bool "Use LED grp 1"
81 + depends on ETRAX_NBR_LED_GRP_TWO
82 + help
83 + Use LED grp 1 for eth1
85 +config ETRAX_ETH1_USE_LEDGRPNULL
86 + bool "Use no LEDs for eth1"
87 + help
88 + Use no LEDs for eth1
89 +endchoice
91 config ETRAXFS_SERIAL
92 bool "Serial-port support"
93 depends on ETRAX_ARCH_V32
94 diff -Nur linux-4.7.3.orig/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h linux-4.7.3/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h
95 --- linux-4.7.3.orig/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h 2016-09-07 08:35:12.000000000 +0200
96 +++ linux-4.7.3/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h 2016-09-13 01:47:09.527718381 +0200
97 @@ -2,69 +2,64 @@
98 #define __eth_defs_h
101 - * This file is autogenerated from
102 - * file: eth.r
103 - * id: eth_regs.r,v 1.16 2005/05/20 15:41:22 perz Exp
104 - * last modfied: Mon Jan 9 06:06:41 2006
106 - * by /n/asic/design/tools/rdesc/rdes2c eth.r
107 - * id: $Id: eth_defs.h,v 1.7 2006/01/26 13:45:30 karljope Exp $
108 - * Any changes here will be lost.
110 - * -*- buffer-read-only: t -*-
111 + * Note: Previously this was autogenerated code from the hardware
112 + * implementation. However, to enable the same file to be used
113 + * for both ARTPEC-3 and ETRAX FS this file is now hand-edited.
114 + * Be careful.
117 /* Main access macros */
118 #ifndef REG_RD
119 #define REG_RD( scope, inst, reg ) \
120 - REG_READ( reg_##scope##_##reg, \
121 - (inst) + REG_RD_ADDR_##scope##_##reg )
122 + REG_READ( reg_##scope##_##reg, \
123 + (inst) + REG_RD_ADDR_##scope##_##reg )
124 #endif
126 #ifndef REG_WR
127 #define REG_WR( scope, inst, reg, val ) \
128 - REG_WRITE( reg_##scope##_##reg, \
129 - (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
130 + REG_WRITE( reg_##scope##_##reg, \
131 + (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
132 #endif
134 #ifndef REG_RD_VECT
135 #define REG_RD_VECT( scope, inst, reg, index ) \
136 - REG_READ( reg_##scope##_##reg, \
137 - (inst) + REG_RD_ADDR_##scope##_##reg + \
138 - (index) * STRIDE_##scope##_##reg )
139 + REG_READ( reg_##scope##_##reg, \
140 + (inst) + REG_RD_ADDR_##scope##_##reg + \
141 + (index) * STRIDE_##scope##_##reg )
142 #endif
144 #ifndef REG_WR_VECT
145 #define REG_WR_VECT( scope, inst, reg, index, val ) \
146 - REG_WRITE( reg_##scope##_##reg, \
147 - (inst) + REG_WR_ADDR_##scope##_##reg + \
148 - (index) * STRIDE_##scope##_##reg, (val) )
149 + REG_WRITE( reg_##scope##_##reg, \
150 + (inst) + REG_WR_ADDR_##scope##_##reg + \
151 + (index) * STRIDE_##scope##_##reg, (val) )
152 #endif
154 #ifndef REG_RD_INT
155 #define REG_RD_INT( scope, inst, reg ) \
156 - REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg )
157 + REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg )
158 #endif
160 #ifndef REG_WR_INT
161 #define REG_WR_INT( scope, inst, reg, val ) \
162 - REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
163 + REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
164 #endif
166 #ifndef REG_RD_INT_VECT
167 #define REG_RD_INT_VECT( scope, inst, reg, index ) \
168 - REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \
169 - (index) * STRIDE_##scope##_##reg )
170 + REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \
171 + (index) * STRIDE_##scope##_##reg )
172 #endif
174 #ifndef REG_WR_INT_VECT
175 #define REG_WR_INT_VECT( scope, inst, reg, index, val ) \
176 - REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \
177 - (index) * STRIDE_##scope##_##reg, (val) )
178 + REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \
179 + (index) * STRIDE_##scope##_##reg, (val) )
180 #endif
182 #ifndef REG_TYPE_CONV
183 #define REG_TYPE_CONV( type, orgtype, val ) \
184 - ( { union { orgtype o; type n; } r; r.o = val; r.n; } )
185 + ( { union { orgtype o; type n; } r; r.o = val; r.n; } )
186 #endif
188 #ifndef reg_page_size
189 @@ -73,306 +68,332 @@
191 #ifndef REG_ADDR
192 #define REG_ADDR( scope, inst, reg ) \
193 - ( (inst) + REG_RD_ADDR_##scope##_##reg )
194 + ( (inst) + REG_RD_ADDR_##scope##_##reg )
195 #endif
197 #ifndef REG_ADDR_VECT
198 #define REG_ADDR_VECT( scope, inst, reg, index ) \
199 - ( (inst) + REG_RD_ADDR_##scope##_##reg + \
200 - (index) * STRIDE_##scope##_##reg )
201 + ( (inst) + REG_RD_ADDR_##scope##_##reg + \
202 + (index) * STRIDE_##scope##_##reg )
203 #endif
205 /* C-code for register scope eth */
207 /* Register rw_ma0_lo, scope eth, type rw */
208 typedef struct {
209 - unsigned int addr : 32;
210 + unsigned int addr : 32;
211 } reg_eth_rw_ma0_lo;
212 #define REG_RD_ADDR_eth_rw_ma0_lo 0
213 #define REG_WR_ADDR_eth_rw_ma0_lo 0
215 /* Register rw_ma0_hi, scope eth, type rw */
216 typedef struct {
217 - unsigned int addr : 16;
218 - unsigned int dummy1 : 16;
219 + unsigned int addr : 16;
220 + unsigned int dummy1 : 16;
221 } reg_eth_rw_ma0_hi;
222 #define REG_RD_ADDR_eth_rw_ma0_hi 4
223 #define REG_WR_ADDR_eth_rw_ma0_hi 4
225 /* Register rw_ma1_lo, scope eth, type rw */
226 typedef struct {
227 - unsigned int addr : 32;
228 + unsigned int addr : 32;
229 } reg_eth_rw_ma1_lo;
230 #define REG_RD_ADDR_eth_rw_ma1_lo 8
231 #define REG_WR_ADDR_eth_rw_ma1_lo 8
233 /* Register rw_ma1_hi, scope eth, type rw */
234 typedef struct {
235 - unsigned int addr : 16;
236 - unsigned int dummy1 : 16;
237 + unsigned int addr : 16;
238 + unsigned int dummy1 : 16;
239 } reg_eth_rw_ma1_hi;
240 #define REG_RD_ADDR_eth_rw_ma1_hi 12
241 #define REG_WR_ADDR_eth_rw_ma1_hi 12
243 /* Register rw_ga_lo, scope eth, type rw */
244 typedef struct {
245 - unsigned int tbl : 32;
246 + unsigned int table : 32;
247 } reg_eth_rw_ga_lo;
248 #define REG_RD_ADDR_eth_rw_ga_lo 16
249 #define REG_WR_ADDR_eth_rw_ga_lo 16
251 /* Register rw_ga_hi, scope eth, type rw */
252 typedef struct {
253 - unsigned int tbl : 32;
254 + unsigned int table : 32;
255 } reg_eth_rw_ga_hi;
256 #define REG_RD_ADDR_eth_rw_ga_hi 20
257 #define REG_WR_ADDR_eth_rw_ga_hi 20
259 /* Register rw_gen_ctrl, scope eth, type rw */
260 typedef struct {
261 - unsigned int en : 1;
262 - unsigned int phy : 2;
263 - unsigned int protocol : 1;
264 - unsigned int loopback : 1;
265 - unsigned int flow_ctrl : 1;
266 - unsigned int gtxclk_out : 1;
267 - unsigned int phyrst_n : 1;
268 - unsigned int dummy1 : 24;
269 + unsigned int en : 1;
270 + unsigned int phy : 2;
271 + unsigned int protocol : 1;
272 + unsigned int loopback : 1;
273 + unsigned int flow_ctrl : 1;
274 + unsigned int gtxclk_out : 1;
275 + unsigned int phyrst_n : 1;
276 + unsigned int dummy1 : 24;
277 } reg_eth_rw_gen_ctrl;
278 #define REG_RD_ADDR_eth_rw_gen_ctrl 24
279 #define REG_WR_ADDR_eth_rw_gen_ctrl 24
281 /* Register rw_rec_ctrl, scope eth, type rw */
282 typedef struct {
283 - unsigned int ma0 : 1;
284 - unsigned int ma1 : 1;
285 - unsigned int individual : 1;
286 - unsigned int broadcast : 1;
287 - unsigned int undersize : 1;
288 - unsigned int oversize : 1;
289 - unsigned int bad_crc : 1;
290 - unsigned int duplex : 1;
291 - unsigned int max_size : 16;
292 - unsigned int dummy1 : 8;
293 + unsigned int ma0 : 1;
294 + unsigned int ma1 : 1;
295 + unsigned int individual : 1;
296 + unsigned int broadcast : 1;
297 + unsigned int undersize : 1;
298 + unsigned int oversize : 1;
299 + unsigned int bad_crc : 1;
300 + unsigned int duplex : 1;
301 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
302 + unsigned int max_size : 16;
303 + unsigned int dummy1 : 8;
304 +#else
305 + unsigned int max_size : 1;
306 + unsigned int dummy1 : 23;
307 +#endif
308 } reg_eth_rw_rec_ctrl;
309 #define REG_RD_ADDR_eth_rw_rec_ctrl 28
310 #define REG_WR_ADDR_eth_rw_rec_ctrl 28
312 /* Register rw_tr_ctrl, scope eth, type rw */
313 typedef struct {
314 - unsigned int crc : 1;
315 - unsigned int pad : 1;
316 - unsigned int retry : 1;
317 - unsigned int ignore_col : 1;
318 - unsigned int cancel : 1;
319 - unsigned int hsh_delay : 1;
320 - unsigned int ignore_crs : 1;
321 - unsigned int carrier_ext : 1;
322 - unsigned int dummy1 : 24;
323 + unsigned int crc : 1;
324 + unsigned int pad : 1;
325 + unsigned int retry : 1;
326 + unsigned int ignore_col : 1;
327 + unsigned int cancel : 1;
328 + unsigned int hsh_delay : 1;
329 + unsigned int ignore_crs : 1;
330 + unsigned int carrier_ext : 1;
331 + unsigned int dummy1 : 24;
332 } reg_eth_rw_tr_ctrl;
333 #define REG_RD_ADDR_eth_rw_tr_ctrl 32
334 #define REG_WR_ADDR_eth_rw_tr_ctrl 32
336 /* Register rw_clr_err, scope eth, type rw */
337 typedef struct {
338 - unsigned int clr : 1;
339 - unsigned int dummy1 : 31;
340 + unsigned int clr : 1;
341 + unsigned int dummy1 : 31;
342 } reg_eth_rw_clr_err;
343 #define REG_RD_ADDR_eth_rw_clr_err 36
344 #define REG_WR_ADDR_eth_rw_clr_err 36
346 /* Register rw_mgm_ctrl, scope eth, type rw */
347 typedef struct {
348 - unsigned int mdio : 1;
349 - unsigned int mdoe : 1;
350 - unsigned int mdc : 1;
351 - unsigned int dummy1 : 29;
352 + unsigned int mdio : 1;
353 + unsigned int mdoe : 1;
354 + unsigned int mdc : 1;
355 + unsigned int phyclk : 1;
356 + unsigned int txdata : 4;
357 + unsigned int txen : 1;
358 + unsigned int dummy1 : 23;
359 } reg_eth_rw_mgm_ctrl;
360 #define REG_RD_ADDR_eth_rw_mgm_ctrl 40
361 #define REG_WR_ADDR_eth_rw_mgm_ctrl 40
363 /* Register r_stat, scope eth, type r */
364 typedef struct {
365 - unsigned int mdio : 1;
366 - unsigned int exc_col : 1;
367 - unsigned int urun : 1;
368 - unsigned int clk_125 : 1;
369 - unsigned int dummy1 : 28;
370 + unsigned int mdio : 1;
371 + unsigned int exc_col : 1;
372 + unsigned int urun : 1;
373 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
374 + unsigned int clk_125 : 1;
375 +#else
376 + unsigned int phyclk : 1;
377 +#endif
378 + unsigned int txdata : 4;
379 + unsigned int txen : 1;
380 + unsigned int col : 1;
381 + unsigned int crs : 1;
382 + unsigned int txclk : 1;
383 + unsigned int rxdata : 4;
384 + unsigned int rxer : 1;
385 + unsigned int rxdv : 1;
386 + unsigned int rxclk : 1;
387 + unsigned int dummy1 : 13;
388 } reg_eth_r_stat;
389 #define REG_RD_ADDR_eth_r_stat 44
391 /* Register rs_rec_cnt, scope eth, type rs */
392 typedef struct {
393 - unsigned int crc_err : 8;
394 - unsigned int align_err : 8;
395 - unsigned int oversize : 8;
396 - unsigned int congestion : 8;
397 + unsigned int crc_err : 8;
398 + unsigned int align_err : 8;
399 + unsigned int oversize : 8;
400 + unsigned int congestion : 8;
401 } reg_eth_rs_rec_cnt;
402 #define REG_RD_ADDR_eth_rs_rec_cnt 48
404 /* Register r_rec_cnt, scope eth, type r */
405 typedef struct {
406 - unsigned int crc_err : 8;
407 - unsigned int align_err : 8;
408 - unsigned int oversize : 8;
409 - unsigned int congestion : 8;
410 + unsigned int crc_err : 8;
411 + unsigned int align_err : 8;
412 + unsigned int oversize : 8;
413 + unsigned int congestion : 8;
414 } reg_eth_r_rec_cnt;
415 #define REG_RD_ADDR_eth_r_rec_cnt 52
417 /* Register rs_tr_cnt, scope eth, type rs */
418 typedef struct {
419 - unsigned int single_col : 8;
420 - unsigned int mult_col : 8;
421 - unsigned int late_col : 8;
422 - unsigned int deferred : 8;
423 + unsigned int single_col : 8;
424 + unsigned int mult_col : 8;
425 + unsigned int late_col : 8;
426 + unsigned int deferred : 8;
427 } reg_eth_rs_tr_cnt;
428 #define REG_RD_ADDR_eth_rs_tr_cnt 56
430 /* Register r_tr_cnt, scope eth, type r */
431 typedef struct {
432 - unsigned int single_col : 8;
433 - unsigned int mult_col : 8;
434 - unsigned int late_col : 8;
435 - unsigned int deferred : 8;
436 + unsigned int single_col : 8;
437 + unsigned int mult_col : 8;
438 + unsigned int late_col : 8;
439 + unsigned int deferred : 8;
440 } reg_eth_r_tr_cnt;
441 #define REG_RD_ADDR_eth_r_tr_cnt 60
443 /* Register rs_phy_cnt, scope eth, type rs */
444 typedef struct {
445 - unsigned int carrier_loss : 8;
446 - unsigned int sqe_err : 8;
447 - unsigned int dummy1 : 16;
448 + unsigned int carrier_loss : 8;
449 + unsigned int sqe_err : 8;
450 + unsigned int dummy1 : 16;
451 } reg_eth_rs_phy_cnt;
452 #define REG_RD_ADDR_eth_rs_phy_cnt 64
454 /* Register r_phy_cnt, scope eth, type r */
455 typedef struct {
456 - unsigned int carrier_loss : 8;
457 - unsigned int sqe_err : 8;
458 - unsigned int dummy1 : 16;
459 + unsigned int carrier_loss : 8;
460 + unsigned int sqe_err : 8;
461 + unsigned int dummy1 : 16;
462 } reg_eth_r_phy_cnt;
463 #define REG_RD_ADDR_eth_r_phy_cnt 68
465 /* Register rw_test_ctrl, scope eth, type rw */
466 typedef struct {
467 - unsigned int snmp_inc : 1;
468 - unsigned int snmp : 1;
469 - unsigned int backoff : 1;
470 - unsigned int dummy1 : 29;
471 + unsigned int snmp_inc : 1;
472 + unsigned int snmp : 1;
473 + unsigned int backoff : 1;
474 + unsigned int dummy1 : 29;
475 } reg_eth_rw_test_ctrl;
476 #define REG_RD_ADDR_eth_rw_test_ctrl 72
477 #define REG_WR_ADDR_eth_rw_test_ctrl 72
479 /* Register rw_intr_mask, scope eth, type rw */
480 typedef struct {
481 - unsigned int crc : 1;
482 - unsigned int align : 1;
483 - unsigned int oversize : 1;
484 - unsigned int congestion : 1;
485 - unsigned int single_col : 1;
486 - unsigned int mult_col : 1;
487 - unsigned int late_col : 1;
488 - unsigned int deferred : 1;
489 - unsigned int carrier_loss : 1;
490 - unsigned int sqe_test_err : 1;
491 - unsigned int orun : 1;
492 - unsigned int urun : 1;
493 - unsigned int exc_col : 1;
494 - unsigned int mdio : 1;
495 - unsigned int dummy1 : 18;
496 + unsigned int crc : 1;
497 + unsigned int align : 1;
498 + unsigned int oversize : 1;
499 + unsigned int congestion : 1;
500 + unsigned int single_col : 1;
501 + unsigned int mult_col : 1;
502 + unsigned int late_col : 1;
503 + unsigned int deferred : 1;
504 + unsigned int carrier_loss : 1;
505 + unsigned int sqe_test_err : 1;
506 + unsigned int orun : 1;
507 + unsigned int urun : 1;
508 + unsigned int exc_col : 1;
509 + unsigned int mdio : 1;
510 + unsigned int dummy1 : 18;
511 } reg_eth_rw_intr_mask;
512 #define REG_RD_ADDR_eth_rw_intr_mask 76
513 #define REG_WR_ADDR_eth_rw_intr_mask 76
515 /* Register rw_ack_intr, scope eth, type rw */
516 typedef struct {
517 - unsigned int crc : 1;
518 - unsigned int align : 1;
519 - unsigned int oversize : 1;
520 - unsigned int congestion : 1;
521 - unsigned int single_col : 1;
522 - unsigned int mult_col : 1;
523 - unsigned int late_col : 1;
524 - unsigned int deferred : 1;
525 - unsigned int carrier_loss : 1;
526 - unsigned int sqe_test_err : 1;
527 - unsigned int orun : 1;
528 - unsigned int urun : 1;
529 - unsigned int exc_col : 1;
530 - unsigned int mdio : 1;
531 - unsigned int dummy1 : 18;
532 + unsigned int crc : 1;
533 + unsigned int align : 1;
534 + unsigned int oversize : 1;
535 + unsigned int congestion : 1;
536 + unsigned int single_col : 1;
537 + unsigned int mult_col : 1;
538 + unsigned int late_col : 1;
539 + unsigned int deferred : 1;
540 + unsigned int carrier_loss : 1;
541 + unsigned int sqe_test_err : 1;
542 + unsigned int orun : 1;
543 + unsigned int urun : 1;
544 + unsigned int exc_col : 1;
545 + unsigned int mdio : 1;
546 + unsigned int dummy1 : 18;
547 } reg_eth_rw_ack_intr;
548 #define REG_RD_ADDR_eth_rw_ack_intr 80
549 #define REG_WR_ADDR_eth_rw_ack_intr 80
551 /* Register r_intr, scope eth, type r */
552 typedef struct {
553 - unsigned int crc : 1;
554 - unsigned int align : 1;
555 - unsigned int oversize : 1;
556 - unsigned int congestion : 1;
557 - unsigned int single_col : 1;
558 - unsigned int mult_col : 1;
559 - unsigned int late_col : 1;
560 - unsigned int deferred : 1;
561 - unsigned int carrier_loss : 1;
562 - unsigned int sqe_test_err : 1;
563 - unsigned int orun : 1;
564 - unsigned int urun : 1;
565 - unsigned int exc_col : 1;
566 - unsigned int mdio : 1;
567 - unsigned int dummy1 : 18;
568 + unsigned int crc : 1;
569 + unsigned int align : 1;
570 + unsigned int oversize : 1;
571 + unsigned int congestion : 1;
572 + unsigned int single_col : 1;
573 + unsigned int mult_col : 1;
574 + unsigned int late_col : 1;
575 + unsigned int deferred : 1;
576 + unsigned int carrier_loss : 1;
577 + unsigned int sqe_test_err : 1;
578 + unsigned int orun : 1;
579 + unsigned int urun : 1;
580 + unsigned int exc_col : 1;
581 + unsigned int mdio : 1;
582 + unsigned int dummy1 : 18;
583 } reg_eth_r_intr;
584 #define REG_RD_ADDR_eth_r_intr 84
586 /* Register r_masked_intr, scope eth, type r */
587 typedef struct {
588 - unsigned int crc : 1;
589 - unsigned int align : 1;
590 - unsigned int oversize : 1;
591 - unsigned int congestion : 1;
592 - unsigned int single_col : 1;
593 - unsigned int mult_col : 1;
594 - unsigned int late_col : 1;
595 - unsigned int deferred : 1;
596 - unsigned int carrier_loss : 1;
597 - unsigned int sqe_test_err : 1;
598 - unsigned int orun : 1;
599 - unsigned int urun : 1;
600 - unsigned int exc_col : 1;
601 - unsigned int mdio : 1;
602 - unsigned int dummy1 : 18;
603 + unsigned int crc : 1;
604 + unsigned int align : 1;
605 + unsigned int oversize : 1;
606 + unsigned int congestion : 1;
607 + unsigned int single_col : 1;
608 + unsigned int mult_col : 1;
609 + unsigned int late_col : 1;
610 + unsigned int deferred : 1;
611 + unsigned int carrier_loss : 1;
612 + unsigned int sqe_test_err : 1;
613 + unsigned int orun : 1;
614 + unsigned int urun : 1;
615 + unsigned int exc_col : 1;
616 + unsigned int mdio : 1;
617 + unsigned int dummy1 : 18;
618 } reg_eth_r_masked_intr;
619 #define REG_RD_ADDR_eth_r_masked_intr 88
622 /* Constants */
623 enum {
624 - regk_eth_discard = 0x00000000,
625 - regk_eth_ether = 0x00000000,
626 - regk_eth_full = 0x00000001,
627 - regk_eth_gmii = 0x00000003,
628 - regk_eth_gtxclk = 0x00000001,
629 - regk_eth_half = 0x00000000,
630 - regk_eth_hsh = 0x00000001,
631 - regk_eth_mii = 0x00000001,
632 - regk_eth_mii_arec = 0x00000002,
633 - regk_eth_mii_clk = 0x00000000,
634 - regk_eth_no = 0x00000000,
635 - regk_eth_phyrst = 0x00000000,
636 - regk_eth_rec = 0x00000001,
637 - regk_eth_rw_ga_hi_default = 0x00000000,
638 - regk_eth_rw_ga_lo_default = 0x00000000,
639 - regk_eth_rw_gen_ctrl_default = 0x00000000,
640 - regk_eth_rw_intr_mask_default = 0x00000000,
641 - regk_eth_rw_ma0_hi_default = 0x00000000,
642 - regk_eth_rw_ma0_lo_default = 0x00000000,
643 - regk_eth_rw_ma1_hi_default = 0x00000000,
644 - regk_eth_rw_ma1_lo_default = 0x00000000,
645 - regk_eth_rw_mgm_ctrl_default = 0x00000000,
646 - regk_eth_rw_test_ctrl_default = 0x00000000,
647 - regk_eth_size1518 = 0x000005ee,
648 - regk_eth_size1522 = 0x000005f2,
649 - regk_eth_yes = 0x00000001
650 + regk_eth_discard = 0x00000000,
651 + regk_eth_ether = 0x00000000,
652 + regk_eth_full = 0x00000001,
653 + regk_eth_gmii = 0x00000003,
654 + regk_eth_gtxclk = 0x00000001,
655 + regk_eth_half = 0x00000000,
656 + regk_eth_hsh = 0x00000001,
657 + regk_eth_mii = 0x00000001,
658 + regk_eth_mii_arec = 0x00000002,
659 + regk_eth_mii_clk = 0x00000000,
660 + regk_eth_no = 0x00000000,
661 + regk_eth_phyrst = 0x00000000,
662 + regk_eth_rec = 0x00000001,
663 + regk_eth_rw_ga_hi_default = 0x00000000,
664 + regk_eth_rw_ga_lo_default = 0x00000000,
665 + regk_eth_rw_gen_ctrl_default = 0x00000000,
666 + regk_eth_rw_intr_mask_default = 0x00000000,
667 + regk_eth_rw_ma0_hi_default = 0x00000000,
668 + regk_eth_rw_ma0_lo_default = 0x00000000,
669 + regk_eth_rw_ma1_hi_default = 0x00000000,
670 + regk_eth_rw_ma1_lo_default = 0x00000000,
671 + regk_eth_rw_mgm_ctrl_default = 0x00000000,
672 + regk_eth_rw_test_ctrl_default = 0x00000000,
673 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
674 + regk_eth_size1518 = 0x000005ee,
675 + regk_eth_size1522 = 0x000005f2,
676 +#else
677 + regk_eth_size1518 = 0x00000000,
678 + regk_eth_size1522 = 0x00000001,
679 +#endif
680 + regk_eth_yes = 0x00000001
683 #endif /* __eth_defs_h */
684 diff -Nur linux-4.7.3.orig/drivers/net/cris/eth_v32.c linux-4.7.3/drivers/net/cris/eth_v32.c
685 --- linux-4.7.3.orig/drivers/net/cris/eth_v32.c 1970-01-01 01:00:00.000000000 +0100
686 +++ linux-4.7.3/drivers/net/cris/eth_v32.c 2016-09-13 01:48:05.953910422 +0200
687 @@ -0,0 +1,3060 @@
689 + * Driver for the ETRAX FS/Artpec-3 network controller.
691 + * Copyright (c) 2003-2008 Axis Communications AB.
693 + * TODO:
694 + * * Decrease the amount of code running with interrupts disabled.
695 + * * Rework the error handling so that we do not need to touch the tx
696 + * ring from the error interrupts. When done, we should be able to
697 + * do tx completition from the NAPI loop without disabling interrupts.
698 + * * Remove the gigabit code. It's probably never going to be used.
699 + */
701 +#include <linux/module.h>
703 +#include <linux/kernel.h>
704 +#include <linux/sched.h>
705 +#include <linux/delay.h>
706 +#include <linux/types.h>
707 +#include <linux/fcntl.h>
708 +#include <linux/interrupt.h>
709 +#include <linux/spinlock.h>
710 +#include <linux/errno.h>
711 +#include <linux/init.h>
713 +#include <linux/netdevice.h>
714 +#include <linux/etherdevice.h>
715 +#include <linux/skbuff.h>
716 +#include <linux/ethtool.h>
717 +#include <linux/mii.h>
719 +#include <asm/io.h> /* CRIS_LED_* I/O functions */
720 +#include <asm/irq.h>
721 +#include <hwregs/reg_map.h>
722 +#include <hwregs/reg_rdwr.h>
723 +#include <hwregs/dma.h>
724 +#include <hwregs/eth_defs.h>
725 +#ifdef CONFIG_ETRAXFS
726 +#include <hwregs/config_defs.h>
727 +#else
728 +#include <hwregs/clkgen_defs.h>
729 +#endif
730 +#include <hwregs/intr_vect_defs.h>
731 +#include <hwregs/strmux_defs.h>
732 +#include <asm/bitops.h>
733 +#include <asm/ethernet.h>
734 +#include <mach/dma.h>
735 +#include <pinmux.h>
737 +#include "eth_v32.h"
739 +#ifndef CONFIG_ETRAXFS
740 +#define ETH0_INTR_VECT ETH_INTR_VECT
741 +#define ETH1_INTR_VECT ETH_INTR_VECT
742 +#define regi_eth0 regi_eth
743 +#define regi_eth1 regi_
744 +#endif
746 +#define DEBUG(x)
747 +#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
749 +#if defined(CONFIG_ETRAX_HAVE_PHY) || defined(CONFIG_ETRAX_PHY_FALLBACK)
750 +#define RESET_PHY 1
751 +#else
752 +#define RESET_PHY 0
753 +#endif
755 +enum {
756 + HAVE_PHY,
757 + NO_PHY,
758 + FALLBACK_PHY,
760 +#if defined(CONFIG_ETRAX_PHY_FALLBACK)
761 +#define PHY_MODE (FALLBACK_PHY)
762 +#elif defined(CONFIG_ETRAX_NO_PHY)
763 +#define PHY_MODE (NO_PHY)
764 +#elif defined(CONFIG_ETRAX_HAVE_PHY)
765 +#define PHY_MODE (HAVE_PHY)
766 +#else
767 +#error Unknown PHY behaviour
768 +#endif
770 +static struct {
771 + const char str[ETH_GSTRING_LEN];
772 +} const ethtool_stats_keys[] = {
773 + { "tx_dma_restarts" },
774 + { "tx_mac_resets" },
775 + { "rx_dma_restarts" },
776 + { "rx_dma_timeouts" },
777 + { " dropped_rx" }
780 +static void crisv32_eth_check_speed(unsigned long idev);
781 +static void crisv32_eth_check_duplex(unsigned long idev);
782 +static void update_rx_stats(struct crisv32_ethernet_local *np);
783 +static void update_tx_stats(struct crisv32_ethernet_local *np);
784 +static int crisv32_eth_poll(struct napi_struct *napi, int budget);
785 +static void crisv32_eth_setup_controller(struct net_device *dev);
786 +static int crisv32_eth_request_irqdma(struct net_device *dev);
787 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
788 +static void
789 +crisv32_eth_restart_rx_dma(struct net_device* dev,
790 + struct crisv32_ethernet_local *np);
791 +#endif
792 +#if 0
793 +static void crisv32_ethernet_bug(struct net_device *dev);
794 +#endif
797 + * The name of the card. Is used for messages and in the requests for
798 + * io regions, irqs and dma channels.
799 + */
800 +#ifdef CONFIG_ETRAXFS
801 +static const char cardname[] = "ETRAX FS built-in ethernet controller";
802 +#else
803 +static const char cardname[] = "ARTPEC-3 built-in ethernet controller";
804 +#endif
806 +/* Some chipset needs special care. */
807 +#ifndef CONFIG_ETRAX_NO_PHY
808 +struct transceiver_ops transceivers[] = {
809 + {0x1018, broadcom_check_speed, broadcom_check_duplex},
810 + {0x50EF, broadcom_check_speed, broadcom_check_duplex},
811 + /* TDK 2120 and TDK 2120C */
812 + {0xC039, tdk_check_speed, tdk_check_duplex},
813 + {0x039C, tdk_check_speed, tdk_check_duplex},
814 + /* Intel LXT972A*/
815 + {0x04de, intel_check_speed, intel_check_duplex},
816 + /* National Semiconductor DP83865 */
817 + {0x0017, national_check_speed, national_check_duplex},
818 + /* Vitesse VCS8641 */
819 + {0x01c1, vitesse_check_speed, vitesse_check_duplex},
820 + /* Davicom DM9161 */
821 + {0x606E, davicom_check_speed, davicom_check_duplex},
822 + /* Generic, must be last. */
823 + {0x0000, generic_check_speed, generic_check_duplex}
825 +#endif
827 +static struct net_device *crisv32_dev[2];
828 +static struct crisv32_eth_leds *crisv32_leds[3];
830 +/* Default MAC address for interface 0.
831 + * The real one will be set later. */
832 +static struct sockaddr default_mac_iface0 =
833 + {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00}};
835 +#ifdef CONFIG_CPU_FREQ
836 +static int
837 +crisv32_ethernet_freq_notifier(struct notifier_block *nb, unsigned long val,
838 + void *data);
840 +static struct notifier_block crisv32_ethernet_freq_notifier_block = {
841 + .notifier_call = crisv32_ethernet_freq_notifier
843 +#endif
845 +static void receive_timeout(unsigned long arg);
846 +static void receive_timeout_work(struct work_struct* work);
847 +static void transmit_timeout(unsigned long arg);
850 + * mask in and out tx/rx interrupts.
851 + */
852 +static inline void crisv32_disable_tx_ints(struct crisv32_ethernet_local *np)
854 + reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_no };
855 + REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
858 +static inline void crisv32_enable_tx_ints(struct crisv32_ethernet_local *np)
860 + reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_yes };
861 + REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
864 +static inline void crisv32_disable_rx_ints(struct crisv32_ethernet_local *np)
866 + reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_no };
867 + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
870 +static inline void crisv32_enable_rx_ints(struct crisv32_ethernet_local *np)
872 + reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_yes };
873 + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
876 +static inline void crisv32_disable_eth_ints(struct crisv32_ethernet_local *np)
878 + int intr_mask_nw = 0x0;
879 + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
882 +static inline void crisv32_enable_eth_ints(struct crisv32_ethernet_local *np)
884 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
885 + /* For Artpec-3 we use overrun to workaround voodoo TR 87 */
886 + int intr_mask_nw = 0x1c00;
887 +#else
888 + int intr_mask_nw = 0x1800;
889 +#endif
890 + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
893 +static inline int crisv32_eth_gigabit(struct crisv32_ethernet_local *np)
895 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
896 + return np->gigabit_mode;
897 +#else
898 + return 0;
899 +#endif
902 +static inline void crisv32_eth_set_gigabit(struct crisv32_ethernet_local *np,
903 + int g)
905 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
906 + np->gigabit_mode = g;
907 +#endif
910 +/* start/stop receiver */
911 +static inline void crisv32_start_receiver(struct crisv32_ethernet_local *np)
913 + reg_eth_rw_rec_ctrl rec_ctrl;
915 + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
916 + rec_ctrl.ma0 = regk_eth_yes;
917 + rec_ctrl.broadcast = regk_eth_rec;
918 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
921 +static inline void crisv32_stop_receiver(struct crisv32_ethernet_local *np)
923 + reg_eth_rw_rec_ctrl rec_ctrl;
925 + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
926 + rec_ctrl.ma0 = regk_eth_no;
927 + rec_ctrl.broadcast = regk_eth_discard;
928 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
931 +static inline void crisv32_eth_reset(struct crisv32_ethernet_local *np)
933 + reg_eth_rw_gen_ctrl gen_ctrl = { 0 };
935 + gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl);
936 + gen_ctrl.en = regk_eth_no;
937 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
938 + gen_ctrl.en = regk_eth_yes;
939 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
942 +static void crisv32_eth_tx_cancel_frame(struct crisv32_ethernet_local *np)
944 + reg_eth_rw_tr_ctrl tr_ctrl;
946 + /* Cancel any pending transmits. This should bring us to the
947 + excessive collisions state but it doesn't always do it. */
948 + tr_ctrl = REG_RD(eth, np->eth_inst, rw_tr_ctrl);
949 + tr_ctrl.cancel = 1;
950 + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
951 + tr_ctrl.cancel = 0;
952 + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
956 + * Hack to disconnect/reconnect the dma from the ethernet block while we reset
957 + * things. TODO: verify that we don't need to disconnect out channels and
958 + * remove that code.
960 + * ARTPEC-3 has only a single ethernet block so np->eth_inst is always eth0.
961 + * The strmux values are named slightly different, redefine to avoid #ifdefs
962 + * in the code blocks. For artpec3 only regk_strmux_eth0 and channel 0/1
963 + * should be used.
964 + */
965 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
966 +#define regk_strmux_eth0 regk_strmux_eth
967 +#define regk_strmux_eth1 regk_strmux_eth
968 +#endif
969 +static inline void
970 +crisv32_disconnect_eth_tx_dma(struct crisv32_ethernet_local *np)
972 + reg_strmux_rw_cfg strmux_cfg;
974 + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
975 + if (np->eth_inst == regi_eth0)
976 + strmux_cfg.dma0 = regk_strmux_off;
977 + else
978 + strmux_cfg.dma6 = regk_strmux_off;
979 + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
982 +static inline void crisv32_connect_eth_tx_dma(struct crisv32_ethernet_local *np)
984 + reg_strmux_rw_cfg strmux_cfg;
986 + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
987 + if (np->eth_inst == regi_eth0)
988 + strmux_cfg.dma0 = regk_strmux_eth0;
989 + else
990 + strmux_cfg.dma6 = regk_strmux_eth1;
991 + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
994 +static inline void
995 +crisv32_disconnect_eth_rx_dma(struct crisv32_ethernet_local *np)
997 + reg_strmux_rw_cfg strmux_cfg;
999 + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
1000 + if (np->eth_inst == regi_eth0)
1001 + strmux_cfg.dma1 = regk_strmux_off;
1002 + else
1003 + strmux_cfg.dma7 = regk_strmux_off;
1004 + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
1007 +static inline void crisv32_connect_eth_rx_dma(struct crisv32_ethernet_local *np)
1009 + reg_strmux_rw_cfg strmux_cfg;
1011 + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
1012 + if (np->eth_inst == regi_eth0)
1013 + strmux_cfg.dma1 = regk_strmux_eth0;
1014 + else
1015 + strmux_cfg.dma7 = regk_strmux_eth1;
1016 + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
1019 +static int dma_wait_busy(int inst, int timeout)
1021 + reg_dma_rw_stream_cmd dma_sc;
1023 + do {
1024 + dma_sc = REG_RD(dma, inst, rw_stream_cmd);
1025 + } while (timeout-- > 0 && dma_sc.busy);
1026 + return dma_sc.busy;
1029 +static int __init crisv32_eth_request_irqdma(struct net_device *dev)
1031 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1033 + /* Allocate IRQs and DMAs. */
1034 + if (np->eth_inst == regi_eth0) {
1035 + if (request_irq(DMA0_INTR_VECT, crisv32tx_eth_interrupt,
1036 + 0, "Ethernet TX", dev)) {
1037 + return -EAGAIN;
1040 + if (request_irq(DMA1_INTR_VECT, crisv32rx_eth_interrupt,
1041 + 0, "Ethernet RX", dev))
1042 + goto err0_1;
1044 + if (crisv32_request_dma(0, cardname, DMA_VERBOSE_ON_ERROR,
1045 + 12500000, dma_eth0))
1046 + goto err0_2;
1048 + if (crisv32_request_dma(1, cardname, DMA_VERBOSE_ON_ERROR,
1049 + 12500000, dma_eth0))
1050 + goto err0_3;
1052 + if (request_irq(ETH0_INTR_VECT, crisv32nw_eth_interrupt, 0,
1053 + cardname, dev)) {
1054 + crisv32_free_dma(1);
1055 +err0_3:
1056 + crisv32_free_dma(0);
1057 +err0_2:
1058 + free_irq(DMA1_INTR_VECT, dev);
1059 +err0_1:
1060 + free_irq(DMA0_INTR_VECT, dev);
1061 + return -EAGAIN;
1063 + } else {
1064 + if (request_irq(DMA6_INTR_VECT, crisv32tx_eth_interrupt,
1065 + 0, cardname, dev))
1066 + return -EAGAIN;
1068 + if (request_irq(DMA7_INTR_VECT, crisv32rx_eth_interrupt,
1069 + 0, cardname, dev))
1070 + goto err1_1;
1072 + if (crisv32_request_dma(6, cardname, DMA_VERBOSE_ON_ERROR,
1073 + 0, dma_eth1))
1074 + goto err1_2;
1076 + if (crisv32_request_dma(7, cardname, DMA_VERBOSE_ON_ERROR,
1077 + 0, dma_eth1))
1078 + goto err1_3;
1080 + if (request_irq(ETH1_INTR_VECT, crisv32nw_eth_interrupt, 0,
1081 + cardname, dev)) {
1082 + crisv32_free_dma(7);
1083 +err1_3:
1084 + crisv32_free_dma(6);
1085 +err1_2:
1086 + free_irq(DMA7_INTR_VECT, dev);
1087 +err1_1:
1088 + free_irq(DMA6_INTR_VECT, dev);
1089 + return -EAGAIN;
1092 + return 0;
1095 +static int __init crisv32_eth_init_phy(struct net_device *dev)
1097 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1098 + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
1100 + if (RESET_PHY) {
1101 +#ifdef CONFIG_ETRAXFS
1102 + reg_config_rw_pad_ctrl pad_ctrl;
1103 + pad_ctrl = REG_RD(config, regi_config, rw_pad_ctrl);
1104 + pad_ctrl.phyrst_n = 0;
1105 + REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl);
1107 + udelay(500); /* RESET_LEN */
1109 + pad_ctrl.phyrst_n = 1;
1110 + REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl);
1111 +#else
1112 + reg_eth_rw_gen_ctrl gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl);
1113 + gen_ctrl.phyrst_n = 0;
1114 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
1116 + udelay(500); /* RESET_LEN */
1118 + gen_ctrl.phyrst_n = 1;
1119 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
1120 +#endif
1122 + udelay(200); /* RESET_WAIT */
1125 + switch (PHY_MODE) {
1126 + case FALLBACK_PHY:
1127 + /* Fall back on using fixed iff there is no PHY on */
1128 + /* the MDIO bus */
1129 + np->fixed_phy = crisv32_eth_probe_transceiver(dev) != 0;
1130 + if (np->fixed_phy)
1131 + printk(KERN_WARNING
1132 + "eth: No transciever found, falling back "
1133 + "to fixed phy mode\n");
1134 + break;
1136 + case NO_PHY:
1137 + /* Don't even bother looking for a PHY, always rely */
1138 + /* on fixed PHY */
1139 + np->fixed_phy = 1;
1140 + break;
1142 + default: /* HAVE_PHY */
1143 + /* Look for a PHY and abort if there is none, */
1144 + /* otherwise just carry on */
1145 + if (crisv32_eth_probe_transceiver(dev)) {
1146 + printk(KERN_WARNING
1147 + "eth: No transceiver found, "
1148 + "removing interface\n");
1149 + return -ENODEV;
1151 + np->fixed_phy = 0;
1154 + if (np->fixed_phy) {
1155 + reg_eth_rw_rec_ctrl rec_ctrl;
1157 + /* speed */
1158 + np->current_speed = 100;
1159 + np->current_speed_selection = 100; /* Auto. */
1161 + /* duplex */
1162 + np->full_duplex = 1;
1163 + np->current_duplex = full;
1165 + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
1166 + rec_ctrl.duplex = regk_eth_full;
1167 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
1168 + } else {
1169 + np->mii_if.supports_gmii = mii_check_gmii_support(&np->mii_if);
1171 + /* speed */
1172 + np->current_speed = 10;
1173 + np->current_speed_selection = 0; /* Auto. */
1174 + np->speed_timer = timer_init;
1175 + np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
1176 + np->speed_timer.data = (unsigned long) dev;
1177 + np->speed_timer.function = crisv32_eth_check_speed;
1179 + /* duplex */
1180 + np->full_duplex = 0;
1181 + np->current_duplex = autoneg;
1182 + np->duplex_timer = timer_init;
1183 + np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
1184 + np->duplex_timer.data = (unsigned long) dev;
1185 + np->duplex_timer.function = crisv32_eth_check_duplex;
1188 + return 0;
1191 +static void __init crisv32_eth_setup_controller(struct net_device *dev)
1193 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1194 + reg_eth_rw_gen_ctrl gen_ctrl;
1196 + reg_eth_rw_tr_ctrl tr_ctrl = {
1197 + /* SW retransmits to avoid transmitter bugs. */
1198 + .retry = regk_eth_no,
1199 + .pad = regk_eth_yes,
1200 + .crc = regk_eth_yes
1201 + };
1203 + reg_eth_rw_rec_ctrl rec_ctrl = {
1204 + .ma0 = regk_eth_no, /* enable at open() */
1205 + .broadcast = regk_eth_no,
1206 + .max_size = regk_eth_size1522
1207 + };
1209 + reg_eth_rw_ga_lo ga_lo = { 0 };
1210 + reg_eth_rw_ga_hi ga_hi = { 0 };
1212 + /*
1213 + * Initialize group address registers to make sure that no
1214 + * unwanted addresses are matched.
1215 + */
1216 + REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
1217 + REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
1219 + /* Configure receiver and transmitter */
1220 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
1221 + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
1223 + /*
1224 + * Read from rw_gen_ctrl so that we don't override any previous
1225 + * configuration.
1226 + */
1227 + gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl);
1228 + gen_ctrl.phy = regk_eth_mii_clk;
1229 +#ifdef CONFIG_ETRAXFS
1230 + /* On ETRAX FS, this bit has reversed meaning */
1231 + gen_ctrl.flow_ctrl = regk_eth_no;
1232 +#else
1233 + gen_ctrl.flow_ctrl = regk_eth_yes;
1234 +#endif
1236 + /* Enable ethernet controller with mii clk. */
1237 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
1238 + gen_ctrl.en = regk_eth_yes;
1239 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
1242 +static void crisv32_eth_reset_rx_ring(struct net_device *dev)
1244 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1245 + int i;
1247 + /* cleanup the rx-ring */
1248 + for (i = 0; i < NBR_RX_DESC; i++) {
1249 + struct sk_buff *skb;
1250 + skb = np->dma_rx_descr_list[i].skb;
1251 + if (!skb
1252 + || (np->dma_rx_descr_list[i].descr.buf !=
1253 + (void *)virt_to_phys(skb->data))) {
1254 + if (skb)
1255 + dev_kfree_skb(skb);
1256 + skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
1257 + np->dma_rx_descr_list[i].skb = skb;
1258 + np->dma_rx_descr_list[i].descr.buf =
1259 + (char*)virt_to_phys(skb->data);
1261 + if (np->dma_rx_descr_list[i].descr.in_eop)
1262 + np->rx_restarts_dropped++;
1263 + np->dma_rx_descr_list[i].descr.after =
1264 + (char*)virt_to_phys(skb->data
1265 + + MAX_MEDIA_DATA_SIZE);
1266 + np->dma_rx_descr_list[i].descr.eol = 0;
1267 + np->dma_rx_descr_list[i].descr.in_eop = 0;
1268 + /* Workaround cache bug */
1269 + flush_dma_descr(&np->dma_rx_descr_list[i].descr, 1);
1272 + /* reset rx-ring */
1273 + np->active_rx_desc = &np->dma_rx_descr_list[0];
1274 + np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1];
1275 + np->last_rx_desc = np->prev_rx_desc;
1276 + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1;
1277 + flush_dma_descr(&np->dma_rx_descr_list[NBR_RX_DESC - 1].descr, 0);
1278 + /* ready to accept new packets. */
1279 + np->new_rx_package = 1;
1281 + /* Fill context descriptors. */
1282 + np->ctxt_in.next = 0;
1283 + np->ctxt_in.saved_data =
1284 + (void *)virt_to_phys(&np->active_rx_desc->descr);
1285 + np->ctxt_in.saved_data_buf = np->active_rx_desc->descr.buf;
1288 +static inline int crisv32_eth_tx_ring_full(struct crisv32_ethernet_local *np)
1290 + crisv32_eth_descr *active = np->active_tx_desc;
1292 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
1293 + active = phys_to_virt((unsigned long)active->descr.next);
1294 +#endif
1295 + if (active == np->catch_tx_desc)
1296 + return 1;
1297 + return 0;
1300 +static void crisv32_eth_reset_tx_ring(struct net_device *dev)
1302 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1304 + /* free un-handled tx packets */
1305 + while (np->txpackets || np->catch_tx_desc != np->active_tx_desc) {
1306 + np->txpackets--;
1307 + if (np->catch_tx_desc->skb)
1308 + dev_kfree_skb(np->catch_tx_desc->skb);
1310 + np->catch_tx_desc->skb = 0;
1311 + np->catch_tx_desc =
1312 + phys_to_virt((int)np->catch_tx_desc->descr.next);
1315 + WARN_ON(np->txpackets != 0);
1316 + np->txpackets = 0;
1318 + /* reset tx-ring */
1319 + np->dma_tx_descr_list[0].descr.buf =
1320 + np->dma_tx_descr_list[0].descr.after = 0;
1321 + np->dma_tx_descr_list[0].descr.eol = 1;
1323 + np->active_tx_desc = &np->dma_tx_descr_list[0];
1324 + np->prev_tx_desc = &np->dma_tx_descr_list[NBR_TX_DESC - 1];
1325 + np->catch_tx_desc = &np->dma_tx_descr_list[0];
1327 + np->ctxt_out.next = 0;
1328 + np->ctxt_out.saved_data =
1329 + (void *)virt_to_phys(&np->dma_tx_descr_list[0].descr);
1333 +static void crisv32_eth_reset_rings(struct net_device *dev)
1335 + crisv32_eth_reset_tx_ring(dev);
1336 + crisv32_eth_reset_rx_ring(dev);
1340 + * Really advance the receive ring. RX interrupts must be off.
1341 + */
1342 +static void __crisv32_eth_rx_ring_advance(struct crisv32_ethernet_local *np)
1344 + if (np->newbuf)
1345 + np->active_rx_desc->descr.buf = (void *) np->newbuf;
1346 + np->active_rx_desc->descr.after =
1347 + np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE;
1348 + np->active_rx_desc->descr.eol = 1;
1349 + np->active_rx_desc->descr.in_eop = 0;
1350 + np->active_rx_desc = phys_to_virt((int)np->active_rx_desc->descr.next);
1351 + barrier();
1352 + np->prev_rx_desc->descr.eol = 0;
1354 + /* Workaround cache bug. */
1355 + flush_dma_descr(&np->prev_rx_desc->descr, 0);
1356 + np->prev_rx_desc = phys_to_virt((int)np->prev_rx_desc->descr.next);
1357 + flush_dma_descr(&np->prev_rx_desc->descr, 1);
1361 + * Advance the receive ring. RX interrupts must be off.
1362 + */
1363 +static inline void
1364 +crisv32_eth_rx_ring_advance(struct crisv32_ethernet_local *np)
1366 + /*
1367 + * When the input DMA reaches eol precaution must be taken, otherwise
1368 + * the DMA could stop. The problem occurs if the eol flag is re-placed
1369 + * on the descriptor that the DMA stands on before the DMA proceed to
1370 + * the next descriptor. This case could, for example, happen if there
1371 + * is a traffic burst and then the network goes silent. To prevent this
1372 + * we make sure that we do not set the eol flag on the descriptor that
1373 + * the DMA stands on.
1374 + */
1375 + unsigned long dma_pos;
1377 + /* Get the current input dma position. */
1378 + dma_pos = REG_RD_INT(dma, np->dma_in_inst, rw_saved_data);
1380 + if (virt_to_phys(&np->active_rx_desc->descr) != dma_pos) {
1381 + crisv32_eth_descr *cur, *nxt;
1383 + /* Now really advance the ring one step. */
1384 + __crisv32_eth_rx_ring_advance(np);
1386 + cur = np->active_rx_desc;
1387 + nxt = (void *)phys_to_virt((unsigned long)cur->descr.next);
1388 + flush_dma_descr(&cur->descr, 0);
1389 + flush_dma_descr(&nxt->descr, 0);
1390 + if (!cur->descr.in_eop && nxt->descr.in_eop) {
1391 + /* TODO: Investigate this more. The DMA seems to have
1392 + skipped a descriptor, possibly due to incoherence
1393 + between the CPU L1 cache and the DMA updates to the
1394 + descriptor. */
1395 + np->newbuf = (unsigned long) np->active_rx_desc->descr.buf;
1396 + __crisv32_eth_rx_ring_advance(np);
1398 + /* flush after peek. */
1399 + flush_dma_descr(&cur->descr, 0);
1400 + flush_dma_descr(&nxt->descr, 0);
1401 + } else {
1402 + /* delay the advancing of the ring. */
1403 + np->new_rx_package = 0;
1407 +static void __init crisv32_eth_init_rings(struct net_device *dev)
1409 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1410 + int i;
1412 + /* Initialise receive descriptors for interface. */
1413 + for (i = 0; i < NBR_RX_DESC; i++) {
1414 + struct sk_buff *skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
1416 + np->dma_rx_descr_list[i].skb = skb;
1417 + np->dma_rx_descr_list[i].descr.buf =
1418 + (char*)virt_to_phys(skb->data);
1419 + np->dma_rx_descr_list[i].descr.after =
1420 + (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE);
1422 + np->dma_rx_descr_list[i].descr.eol = 0;
1423 + np->dma_rx_descr_list[i].descr.in_eop = 0;
1424 + np->dma_rx_descr_list[i].descr.next =
1425 + (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr);
1427 + /* bend the list into a ring */
1428 + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next =
1429 + (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
1431 + /* Initialize transmit descriptors. */
1432 + for (i = 0; i < NBR_TX_DESC; i++) {
1433 + np->dma_tx_descr_list[i].descr.wait = 1;
1434 + np->dma_tx_descr_list[i].descr.eol = 0;
1435 + np->dma_tx_descr_list[i].descr.out_eop = 0;
1436 + np->dma_tx_descr_list[i].descr.next =
1437 + (void*)virt_to_phys(&np->dma_tx_descr_list[i+1].descr);
1439 + /* bend the list into a ring */
1440 + np->dma_tx_descr_list[NBR_TX_DESC - 1].descr.next =
1441 + (void *) virt_to_phys(&np->dma_tx_descr_list[0].descr);
1443 + crisv32_eth_reset_rings(dev);
1446 +static void __init crisv32_init_leds(int ledgrp, struct net_device *dev)
1448 + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
1449 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1451 + /* Use already allocated led grp if initialized */
1452 + if (crisv32_leds[ledgrp] != NULL) {
1453 + np->leds = crisv32_leds[ledgrp];
1454 + return;
1457 + crisv32_leds[ledgrp] =
1458 + kmalloc(sizeof(struct crisv32_eth_leds), GFP_KERNEL);
1460 + crisv32_leds[ledgrp]->ledgrp = ledgrp;
1461 + crisv32_leds[ledgrp]->led_active = 0;
1462 + crisv32_leds[ledgrp]->ifisup[0] = 0;
1463 + crisv32_leds[ledgrp]->ifisup[1] = 0;
1464 + /* NOTE: Should this value be set to zero as the jiffies timer
1465 + can wrap? */
1466 + crisv32_leds[ledgrp]->led_next_time = jiffies;
1468 + crisv32_leds[ledgrp]->clear_led_timer = timer_init;
1469 + crisv32_leds[ledgrp]->clear_led_timer.function =
1470 + crisv32_clear_network_leds;
1471 + crisv32_leds[ledgrp]->clear_led_timer.data = (unsigned long) dev;
1473 + spin_lock_init(&crisv32_leds[ledgrp]->led_lock);
1475 + np->leds = crisv32_leds[ledgrp];
1478 +static int __init crisv32_ethernet_init(void)
1480 + struct crisv32_ethernet_local *np;
1481 + int ret = 0;
1483 +#ifdef CONFIG_ETRAXFS
1484 + printk("ETRAX FS 10/100MBit ethernet v0.01 (c)"
1485 + " 2003 Axis Communications AB\n");
1486 +#else
1487 + printk("ARTPEC-3 10/100 MBit ethernet (c)"
1488 + " 2003-2009 Axis Communications AB\n");
1489 +#endif
1491 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
1493 + reg_clkgen_rw_clk_ctrl clk_ctrl = REG_RD(clkgen, regi_clkgen,
1494 + rw_clk_ctrl);
1495 + clk_ctrl.eth = clk_ctrl.dma0_1_eth = regk_clkgen_yes;
1496 + REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
1498 +#endif
1500 + int iface0 = 0;
1502 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
1503 + if (crisv32_pinmux_alloc_fixed(pinmux_eth))
1504 + panic("Eth pinmux\n");
1505 +#endif
1507 + if (!(crisv32_dev[iface0] = alloc_etherdev(sizeof *np)))
1508 + return -ENOMEM;
1510 + ret |= crisv32_ethernet_device_init(crisv32_dev[iface0]);
1512 + crisv32_init_leds(CRIS_LED_GRP_NONE,crisv32_dev[iface0]);
1514 + np = (struct crisv32_ethernet_local *) netdev_priv(crisv32_dev[iface0]);
1515 + np->eth_inst = regi_eth0;
1516 + np->dma_out_inst = regi_dma0;
1517 + np->dma_in_inst = regi_dma1;
1519 + np->mii_if.dev = crisv32_dev[iface0];
1520 + np->mii_if.mdio_read = crisv32_eth_get_mdio_reg;
1521 + np->mii_if.mdio_write = crisv32_eth_set_mdio_reg;
1522 + np->mii_if.phy_id_mask = 0x1f;
1523 + np->mii_if.reg_num_mask = 0x1f;
1525 + np->use_leds = 1;
1526 + np->autoneg_normal = 1;
1529 + register_netdev(crisv32_dev[iface0]);
1531 + /* Set up default MAC address */
1532 + memcpy(crisv32_dev[iface0]->dev_addr, default_mac_iface0.sa_data, 6);
1533 + crisv32_eth_set_mac_address(crisv32_dev[iface0], &default_mac_iface0);
1534 + if (crisv32_eth_request_irqdma(crisv32_dev[iface0]))
1535 + printk("%s: eth0 unable to allocate IRQ and DMA resources\n",
1536 + __func__);
1537 + np->txpackets = 0;
1538 + crisv32_eth_init_rings(crisv32_dev[iface0]);
1539 + crisv32_eth_setup_controller(crisv32_dev[iface0]);
1540 + ret |= crisv32_eth_init_phy(crisv32_dev[iface0]);
1541 + if (ret) {
1542 + unregister_netdev(crisv32_dev[iface0]);
1543 + return ret;
1547 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE1
1549 + int iface1 = 0;
1550 + /* Default MAC address for interface 1.
1551 + * The real one will be set later. */
1552 + static struct sockaddr default_mac_iface1 =
1553 + {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x01}};
1555 + if (crisv32_pinmux_alloc_fixed(pinmux_eth1))
1556 + panic("Eth pinmux\n");
1558 + /* Increase index to device array if interface 0 is enabled as well.*/
1559 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE0
1560 + iface1++;
1561 +#endif
1562 + if (!(crisv32_dev[iface1] = alloc_etherdev(sizeof *np)))
1563 + return -ENOMEM;
1565 + ret |= crisv32_ethernet_device_init(crisv32_dev[iface1]);
1567 + crisv32_init_leds(CRIS_LED_GRP_NONE,crisv32_dev[iface1]);
1569 + np = (struct crisv32_ethernet_local *) netdev_priv(crisv32_dev[iface1]);
1570 + np->eth_inst = regi_eth1;
1571 + np->dma_out_inst = regi_dma6;
1572 + np->dma_in_inst = regi_dma7;
1574 + np->mii_if.dev = crisv32_dev[iface1];
1575 + np->mii_if.mdio_read = crisv32_eth_get_mdio_reg;
1576 + np->mii_if.mdio_write = crisv32_eth_set_mdio_reg;
1577 + np->mii_if.phy_id_mask = 0x1f;
1578 + np->mii_if.reg_num_mask = 0x1f;
1581 + register_netdev(crisv32_dev[iface1]);
1583 + /* Set up default MAC address */
1584 + memcpy(crisv32_dev[iface1]->dev_addr, default_mac_iface1.sa_data, 6);
1585 + crisv32_eth_set_mac_address(crisv32_dev[iface1], &default_mac_iface1);
1587 + if (crisv32_eth_request_irqdma(crisv32_dev[iface1]))
1588 + printk("%s: eth1 unable to allocate IRQ and DMA resources\n",
1589 + __func__);
1590 + np->txpackets = 0;
1591 + crisv32_eth_init_rings(crisv32_dev[iface1]);
1592 + crisv32_eth_setup_controller(crisv32_dev[iface1]);
1593 + ret |= crisv32_eth_init_phy(crisv32_dev[iface1]);
1594 + if (ret) {
1595 + unregister_netdev(crisv32_dev[iface1]);
1596 + return ret;
1599 +#endif /* CONFIG_ETRAX_ETHERNET_IFACE1 */
1601 +#ifdef CONFIG_CPU_FREQ
1602 + cpufreq_register_notifier(&crisv32_ethernet_freq_notifier_block,
1603 + CPUFREQ_TRANSITION_NOTIFIER);
1604 +#endif
1606 + return ret;
1609 +static struct net_device_ops crisv32_netdev_ops = {
1610 + .ndo_open = crisv32_eth_open,
1611 + .ndo_stop = crisv32_eth_close,
1612 + .ndo_start_xmit = crisv32_eth_send_packet,
1613 + .ndo_set_rx_mode = crisv32_eth_set_rx_mode,
1614 + .ndo_validate_addr = eth_validate_addr,
1615 + .ndo_set_mac_address = crisv32_eth_set_mac_address,
1616 + .ndo_do_ioctl =crisv32_eth_ioctl,
1617 + .ndo_get_stats = crisv32_get_stats,
1618 + .ndo_tx_timeout = crisv32_eth_do_tx_recovery,
1619 + .ndo_set_config = crisv32_eth_set_config,
1622 +static int __init crisv32_ethernet_device_init(struct net_device *dev)
1624 + struct crisv32_ethernet_local *np;
1625 + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
1627 + dev->base_addr = 0; /* Just to have something to show. */
1629 + /* we do our own locking */
1630 + dev->features |= NETIF_F_LLTX;
1632 + /* We use several IRQs and DMAs so just report 0 here. */
1633 + dev->irq = 0;
1634 + dev->dma = 0;
1636 + /*
1637 + * Fill in our handlers so the network layer can talk to us in the
1638 + * future.
1639 + */
1640 + dev->netdev_ops = &crisv32_netdev_ops;
1641 + dev->ethtool_ops = &crisv32_ethtool_ops;
1642 + dev->watchdog_timeo = HZ * 10;
1643 +#ifdef CONFIG_NET_POLL_CONTROLLER
1644 + dev->poll_controller = crisv32_netpoll;
1645 +#endif
1646 + np = netdev_priv(dev);
1647 + np->dev = dev;
1649 + /*
1650 + * 8 skbs keeps the system very reponsive even under high load.
1651 + * At 64 the system locks, pretty much the same way as without NAPI.
1653 + * TODO: meassure with 2 interfaces
1654 + */
1655 + netif_napi_add(dev, &np->napi, crisv32_eth_poll, 8);
1657 + spin_lock_init(&np->lock);
1658 + spin_lock_init(&np->transceiver_lock);
1660 + np->receive_timer = timer_init;
1661 + np->receive_timer.data = (unsigned)dev;
1662 + np->receive_timer.function = receive_timeout;
1664 + INIT_WORK(&np->receive_work, receive_timeout_work);
1666 + np->transmit_timer = timer_init;
1667 + np->transmit_timer.data = (unsigned)dev;
1668 + np->transmit_timer.function = transmit_timeout;
1670 + return 0;
1673 +static int crisv32_eth_open(struct net_device *dev)
1675 + struct sockaddr mac_addr;
1676 + reg_dma_rw_ack_intr ack_intr = { .data = 1, .in_eop = 1 };
1677 + reg_eth_rw_clr_err clr_err = {.clr = regk_eth_yes};
1678 + /*
1679 + * dont interrupt us at any stat counter thresholds, only at urun
1680 + * and exc_col.
1681 + */
1682 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
1683 + /* For Artpec-3 we use overrun to workaround voodoo TR 87 */
1684 + int intr_mask_nw = 0x1c00;
1685 +#else
1686 + int intr_mask_nw = 0x1800;
1687 +#endif
1688 + int eth_ack_intr = 0xffff;
1689 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1691 + spin_lock(&np->lock);
1692 + crisv32_eth_set_gigabit(np, 0);
1694 + crisv32_disable_tx_ints(np);
1695 + crisv32_disable_rx_ints(np);
1697 + REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
1698 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, eth_ack_intr);
1699 + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
1700 + crisv32_eth_reset_rings(dev);
1702 + /* Give the hardware an idea of what MAC address we want. */
1703 + memcpy(mac_addr.sa_data, dev->dev_addr, dev->addr_len);
1704 + crisv32_eth_set_mac_address(dev, &mac_addr);
1706 + /* Enable irq and make sure that the irqs are cleared. */
1707 + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
1708 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
1710 + crisv32_disconnect_eth_rx_dma(np);
1712 + /* Prepare input DMA. */
1713 + DMA_RESET(np->dma_in_inst);
1714 + DMA_ENABLE(np->dma_in_inst);
1715 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
1716 + DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2);
1717 +#endif
1718 + DMA_START_CONTEXT(np->dma_in_inst, virt_to_phys(&np->ctxt_in));
1719 + DMA_CONTINUE(np->dma_in_inst);
1720 + crisv32_enable_rx_ints(np);
1721 + crisv32_start_receiver(np);
1723 + /* Prepare output DMA. */
1724 + DMA_RESET(np->dma_out_inst);
1725 + DMA_ENABLE(np->dma_out_inst);
1726 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
1727 + DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4);
1728 +#endif
1729 + crisv32_connect_eth_rx_dma(np);
1731 + netif_start_queue(dev);
1732 + crisv32_enable_tx_ints(np);
1734 + if (!np->fixed_phy) {
1735 + /* Start duplex/speed timers */
1736 + if (!timer_pending(&np->speed_timer))
1737 + add_timer(&np->speed_timer);
1738 + if (!timer_pending(&np->duplex_timer))
1739 + add_timer(&np->duplex_timer);
1742 + spin_unlock(&np->lock);
1743 + /*
1744 + * We are now ready to accept transmit requests from the queueing
1745 + * layer of the networking.
1746 + */
1747 + np->link = 1;
1748 + netif_carrier_on(dev);
1749 + napi_enable(&np->napi);
1751 + return 0;
1754 +static int crisv32_eth_close(struct net_device *dev)
1756 + reg_dma_rw_ack_intr ack_intr = {0};
1758 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1759 + unsigned long flags;
1761 + del_timer(&np->transmit_timer);
1762 + spin_lock_irqsave(&np->lock, flags);
1764 + /* stop the receiver before the DMA channels to avoid overruns. */
1765 + crisv32_disable_rx_ints(np);
1766 + napi_disable(&np->napi);
1767 + crisv32_stop_receiver(np);
1769 + netif_stop_queue(dev);
1771 + /* Reset the TX DMA in case it has hung on something. */
1772 + DMA_RESET(np->dma_in_inst);
1774 + /* Stop DMA */
1775 + DMA_STOP(np->dma_in_inst);
1776 + DMA_STOP(np->dma_out_inst);
1778 + /* Disable irq and make sure that the irqs are cleared. */
1779 + crisv32_disable_tx_ints(np);
1780 + ack_intr.data = 1;
1781 + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
1783 + ack_intr.in_eop = 1;
1784 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
1786 + np->sender_started = 0;
1787 + spin_unlock_irqrestore(&np->lock, flags);
1789 + /* Update the statistics. */
1790 + update_rx_stats(np);
1791 + update_tx_stats(np);
1793 + if (!np->fixed_phy) {
1794 + /* Stop speed/duplex timers */
1795 + del_timer(&np->speed_timer);
1796 + del_timer(&np->duplex_timer);
1799 + return 0;
1802 +static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr)
1804 + int i;
1805 + static int first = 1;
1807 + unsigned char *addr = ((struct sockaddr*)vpntr)->sa_data;
1809 + reg_eth_rw_ma0_lo ma0_lo =
1810 + { addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24)};
1812 + reg_eth_rw_ma0_hi ma0_hi = { addr[4] | (addr[5] << 8) };
1814 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1816 + /* Remember the address. */
1817 + memcpy(dev->dev_addr, addr, dev->addr_len);
1819 + /*
1820 + * Write the address to the hardware.
1821 + * Note the way the address is wrapped:
1822 + * ma0_l0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
1823 + * ma0_hi = a0_4 | (a0_5 << 8);
1824 + */
1825 + REG_WR(eth, np->eth_inst, rw_ma0_lo, ma0_lo);
1826 + REG_WR(eth, np->eth_inst, rw_ma0_hi, ma0_hi);
1828 + if (first) {
1829 + printk(KERN_INFO "%s: changed MAC to ", dev->name);
1831 + for (i = 0; i < 5; i++)
1832 + printk("%02X:", dev->dev_addr[i]);
1833 + printk("%02X\n", dev->dev_addr[i]);
1835 + first = 0;
1838 + return 0;
1841 +static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id)
1843 + struct net_device *dev = (struct net_device *) dev_id;
1844 + struct crisv32_ethernet_local *np = netdev_priv(dev);
1845 + reg_dma_r_masked_intr masked_in;
1847 + masked_in = REG_RD(dma, np->dma_in_inst, r_masked_intr);
1849 + if (masked_in.in_eop) {
1850 + reg_dma_rw_ack_intr ack_intr = {0};
1852 + /*
1853 + * Ack the rx irq even if we are not prepared to start
1854 + * polling. This is needed to handle incomming packets
1855 + * during the stop sequence.
1856 + */
1857 + ack_intr.in_eop = 1;
1858 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
1860 + mod_timer(&np->receive_timer, jiffies + HZ);
1861 + np->do_rx_recovery = 0;
1863 + if (napi_schedule_prep(&np->napi)) {
1864 + crisv32_disable_rx_ints(np);
1865 + crisv32_disable_tx_ints(np);
1866 + /* put us onto the poll list */
1867 + __napi_schedule(&np->napi);
1869 + } else {
1870 + /* Unexpected, ACK it and hope for the best. */
1871 + reg_dma_rw_ack_intr ack_intr = {
1872 + .group = 1,
1873 + .ctxt = 1,
1874 + .data = 1,
1875 + .in_eop = 0,
1876 + .stream_cmd = 1,
1877 + .dummy1 = ~0
1878 + };
1879 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
1882 + return IRQ_HANDLED;
1885 +static inline void crisv32_eth_roll_tx_timer(struct crisv32_ethernet_local *np)
1887 + /* If there are more packets in the ring, roll the tx timer. */
1888 + if (np->txpackets) {
1889 + /* Eth pause frames may halt us for up to 320ms (100mbit). */
1890 + unsigned long timeout = jiffies + (HZ / 3) + 1;
1891 + mod_timer(&np->transmit_timer, timeout);
1893 + else
1894 + del_timer(&np->transmit_timer);
1897 +/* Call with np->lock held. */
1898 +static void _crisv32_tx_ring_advance(struct crisv32_ethernet_local *np,
1899 + int cleanup)
1901 + reg_dma_rw_stat stat;
1902 + dma_descr_data *dma_pos;
1903 + struct net_device *dev = np->dev;
1904 + int eol;
1906 + /* Get the current output dma position. */
1907 + dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data));
1908 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
1909 + eol = stat.list_state == regk_dma_data_at_eol;
1910 + if (cleanup || eol)
1911 + dma_pos = &np->active_tx_desc->descr;
1913 + /* Take care of transmited dma descriptors and report sent packet. */
1914 + while (np->txpackets && (&np->catch_tx_desc->descr != dma_pos)) {
1915 + /* Update sent packet statistics. */
1916 + np->stats.tx_bytes += np->catch_tx_desc->skb->len;
1917 + np->stats.tx_packets++;
1919 + dev_kfree_skb_any(np->catch_tx_desc->skb);
1920 + np->catch_tx_desc->skb = 0;
1921 + np->txpackets--;
1922 + np->catch_tx_desc->descr.buf = 0;
1923 + np->catch_tx_desc =
1924 + phys_to_virt((int)np->catch_tx_desc->descr.next);
1925 + np->do_tx_recovery = 0;
1926 + np->retrans = 0;
1928 + netif_wake_queue(dev);
1932 +static inline void crisv32_tx_ring_advance(struct crisv32_ethernet_local *np)
1934 + unsigned long flags;
1936 + spin_lock_irqsave(&np->lock, flags);
1937 + _crisv32_tx_ring_advance(np, 0);
1938 + crisv32_eth_roll_tx_timer(np);
1939 + spin_unlock_irqrestore(&np->lock, flags);
1942 +static inline int crisv32_tx_complete(struct crisv32_ethernet_local *np)
1944 + reg_dma_rw_ack_intr ack_intr = { .data = 1 };
1945 + reg_dma_r_intr ints;
1946 + int r = 0;
1948 + /* We are interested in the unmasked raw interrupt source here. When
1949 + polling with tx interrupts masked off we still want to do
1950 + tx completition when the DMA makes progress. */
1951 + ints = REG_RD(dma, np->dma_out_inst, r_intr);
1952 + if (ints.data)
1954 + /* ack the interrupt, if it was active */
1955 + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
1956 + crisv32_tx_ring_advance(np);
1957 + r = 1;
1959 + return r;
1962 +static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id)
1964 + struct crisv32_ethernet_local *np = netdev_priv(dev_id);
1966 + crisv32_tx_complete(np);
1967 + return IRQ_HANDLED;
1971 +/* Update receive errors. */
1972 +static void
1973 +update_rx_stats(struct crisv32_ethernet_local *np)
1975 + reg_eth_rs_rec_cnt r;
1977 + r = REG_RD(eth, np->eth_inst, rs_rec_cnt);
1979 + np->stats.rx_over_errors += r.congestion;
1980 + np->stats.rx_crc_errors += r.crc_err;
1981 + np->stats.rx_frame_errors += r.align_err;
1982 + np->stats.rx_length_errors += r.oversize;
1983 + np->stats.rx_errors += r.crc_err + r.align_err +
1984 + r.oversize + r.congestion;
1987 +/* Update transmit errors. */
1988 +static void update_tx_stats(struct crisv32_ethernet_local *np)
1990 + reg_eth_rs_tr_cnt r;
1991 + reg_eth_rs_phy_cnt rp;
1993 + r = REG_RD(eth, np->eth_inst, rs_tr_cnt);
1994 + rp = REG_RD(eth, np->eth_inst, rs_phy_cnt);
1996 + /* r.deferred is not good for counting collisions because it also
1997 + includes frames that have to wait for the interframe gap. That
1998 + means we get deferred frames even when in full duplex.
1999 + Here we don't actually count the number of collisions that
2000 + occured (artpec3 seems to lack such a counter), instead we count
2001 + the number of frames that collide once or more. */
2002 + np->stats.collisions += r.mult_col + r.single_col;
2003 + np->stats.tx_window_errors += r.late_col;
2004 + np->stats.tx_carrier_errors += rp.carrier_loss;
2006 + /* Ordinary collisions are not errors, they are just part of
2007 + ethernet's bus arbitration and congestion control mechanisms.
2008 + Late collisions are serious errors though. */
2009 + np->stats.tx_errors += r.late_col;
2012 +/* Get current statistics. */
2013 +static struct net_device_stats *crisv32_get_stats(struct net_device *dev)
2015 + unsigned long flags;
2016 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2018 + spin_lock_irqsave(&np->lock, flags);
2020 + update_rx_stats(np);
2021 + update_tx_stats(np);
2023 + spin_unlock_irqrestore(&np->lock, flags);
2025 + return &np->stats;
2028 +/* Check for network errors. This acknowledge the received interrupt. */
2029 +static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id)
2031 + struct net_device *dev = (struct net_device *) dev_id;
2032 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2033 + reg_eth_r_masked_intr intr_mask;
2034 + int ack_intr = 0xffff;
2035 + reg_eth_rw_clr_err clr_err;
2037 + intr_mask = REG_RD(eth, np->eth_inst, r_masked_intr);
2039 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
2040 + /* Only apply the workaround if it is not already pending.
2041 + enable_eth_ints will re-enable the orun interrupt regardless
2042 + of pending_overrun. */
2043 + if (intr_mask.orun && !np->pending_overrun) {
2044 + reg_eth_rw_rec_ctrl rec_ctrl =
2045 + REG_RD(eth, np->eth_inst, rw_rec_ctrl);
2046 + np->saved_rec_ctrl = rec_ctrl;
2047 + np->overrun_set = 1;
2048 + DMA_STOP(np->dma_in_inst);
2049 + rec_ctrl.ma0 = regk_eth_no;
2050 + rec_ctrl.broadcast = regk_eth_no;
2051 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
2052 + np->saved_ga_lo = REG_RD_INT(eth, np->eth_inst, rw_ga_lo);
2053 + np->saved_ga_hi = REG_RD_INT(eth, np->eth_inst, rw_ga_hi);
2054 + REG_WR_INT(eth, np->eth_inst, rw_ga_lo, 0);
2055 + REG_WR_INT(eth, np->eth_inst, rw_ga_hi, 0);
2056 + REG_WR_INT(eth, np->eth_inst, rw_intr_mask,
2057 + REG_RD_INT(eth, np->eth_inst, rw_intr_mask) & 0xfbff);
2058 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, 0x400);
2059 + intr_mask.orun = 0;
2060 + np->pending_overrun = 1;
2061 + if (!np->napi_processing)
2062 + crisv32_eth_restart_rx_dma(np->dev, np);
2064 + return IRQ_HANDLED;
2066 +#endif
2068 + /*
2069 + * Check for underrun and/or excessive collisions. Note that the
2070 + * rw_clr_err register clears both underrun and excessive collision
2071 + * errors, so there's no need to check them separately.
2072 + */
2073 + if (np->sender_started
2074 + && (intr_mask.urun || intr_mask.exc_col)) {
2075 + unsigned long flags;
2077 + /* Underrun are considered to be tx-errors. */
2078 + np->stats.tx_errors += intr_mask.urun;
2079 + np->stats.tx_fifo_errors += intr_mask.urun;
2081 + /*
2082 + * Protect against the tx-interrupt messing with
2083 + * the tx-ring.
2084 + */
2085 + spin_lock_irqsave(&np->lock, flags);
2087 + /* DMA should have stopped now, eat from the ring before
2088 + removing anything due to tx errors. */
2089 + _crisv32_tx_ring_advance(np, 0);
2091 + /*
2092 + * Drop packets after 15 retries.
2093 + * TODO: Add backoff.
2094 + */
2095 + if (np->retrans > 15 && np->txpackets) {
2096 + dev_kfree_skb_irq(np->catch_tx_desc->skb);
2097 + np->catch_tx_desc->skb = 0;
2098 + np->catch_tx_desc->descr.buf = 0;
2099 + np->catch_tx_desc =
2100 + phys_to_virt((int)
2101 + np->catch_tx_desc->descr.next);
2102 + flush_dma_descr(&np->catch_tx_desc->descr, 0);
2104 + np->txpackets--;
2105 + np->retrans = 0;
2106 + netif_wake_queue(dev);
2107 + np->stats.tx_dropped++;
2109 + np->ctxt_out.next = 0;
2110 + if (np->txpackets) {
2111 + np->retrans++;
2112 + np->ctxt_out.saved_data = (void *)
2113 + virt_to_phys(&np->catch_tx_desc->descr);
2114 + np->ctxt_out.saved_data_buf =
2115 + np->catch_tx_desc->descr.buf;
2116 + WARN_ON(!np->ctxt_out.saved_data_buf);
2117 + flush_dma_descr(&np->catch_tx_desc->descr, 0);
2118 + cris_flush_cache_range(&np->ctxt_out,
2119 + sizeof np->ctxt_out);
2121 + /* restart the DMA */
2122 + DMA_START_CONTEXT(np->dma_out_inst,
2123 + (int) virt_to_phys(&np->ctxt_out));
2124 + np->sender_started = 1;
2126 + else {
2127 + /* Load dummy context but do not load the data
2128 + descriptor nor start the burst. This brings the
2129 + buggy eth transmitter back in sync with the DMA
2130 + avoiding malformed frames. */
2131 + REG_WR(dma, np->dma_out_inst, rw_group_down,
2132 + (int) virt_to_phys(&np->ctxt_out));
2133 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
2134 + np->sender_started = 0;
2136 + crisv32_eth_roll_tx_timer(np);
2137 + spin_unlock_irqrestore(&np->lock, flags);
2140 + ack_intr = *(u32 *)&intr_mask;
2141 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, ack_intr);
2142 + clr_err.clr = 1;
2143 + REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
2145 + update_rx_stats(np);
2146 + update_tx_stats(np);
2148 + return IRQ_HANDLED;
2151 +/* We have a good packet(s), get it/them out of the buffers. */
2152 +static int crisv32_eth_receive_packet(struct net_device *dev)
2154 + int length;
2155 + struct sk_buff *skb;
2156 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2157 + struct sk_buff *tmp;
2158 + unsigned long flags;
2160 + DEBUG(printk("crisv32_receive_packet\n"));
2162 + /* Roll the rx bug timer. */
2163 + mod_timer(&np->receive_timer, jiffies + HZ);
2165 + /* Activate LED */
2166 + spin_lock_irqsave(&np->leds->led_lock, flags);
2167 + if (!np->leds->led_active && time_after(jiffies,
2168 + np->leds->led_next_time)) {
2169 + /* light the network leds depending on the current speed. */
2170 + crisv32_set_network_leds(CRIS_LED_ACTIVITY, dev);
2172 + /* Set the earliest time we may clear the LED */
2173 + np->leds->led_next_time = jiffies + NET_FLASH_TIME;
2174 + np->leds->led_active = 1;
2175 + np->leds->clear_led_timer.data = (unsigned long) dev;
2176 + mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
2178 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
2180 + /* Discard CRC (4 bytes). */
2181 + length = (np->active_rx_desc->descr.after) -
2182 + (np->active_rx_desc->descr.buf) - 4;
2184 + tmp = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
2185 + if (!tmp) {
2186 + np->stats.rx_errors++;
2187 + printk(KERN_NOTICE "%s: memory squeeze,"
2188 + " dropping packet.",
2189 + dev->name);
2190 + return 0;
2192 + skb = np->active_rx_desc->skb;
2193 + np->active_rx_desc->skb = tmp;
2194 + skb_put(skb, length);
2196 + np->newbuf = virt_to_phys(np->active_rx_desc->skb->data);
2198 + skb->dev = dev;
2199 + skb->protocol = eth_type_trans(skb, dev);
2200 + skb->ip_summed = CHECKSUM_NONE;
2202 + np->stats.multicast += skb->pkt_type == PACKET_MULTICAST;
2203 + /* Send the packet to the upper layer. */
2204 + netif_receive_skb(skb);
2205 + np->last_rx_desc =
2206 + phys_to_virt((int)
2207 + np->last_rx_desc->descr.next);
2209 + /* Forward rotate the receive ring. */
2210 + crisv32_eth_rx_ring_advance(np);
2211 + return length;
2214 +/* Must be called with the np-lock held. */
2215 +static void
2216 +__crisv32_eth_restart_rx_dma(struct net_device* dev,
2217 + struct crisv32_ethernet_local *np)
2219 + reg_dma_rw_ack_intr ack_intr = {0};
2220 + reg_dma_rw_stream_cmd dma_sc = {0};
2221 + reg_dma_rw_stat stat;
2222 + int resets = 0;
2223 + reg_eth_rw_intr_mask eth_intr_mask;
2225 + np->rx_dma_restarts++;
2227 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
2228 + if (np->pending_overrun) {
2229 + np->pending_overrun = 0;
2230 + REG_WR_INT(eth, np->eth_inst, rw_ga_lo, np->saved_ga_lo);
2231 + REG_WR_INT(eth, np->eth_inst, rw_ga_hi, np->saved_ga_hi);
2232 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, np->saved_rec_ctrl);
2233 + REG_WR_INT(eth, np->eth_inst, rw_intr_mask,
2234 + REG_RD_INT(eth, regi_eth, rw_intr_mask) | 0x400);
2235 + DMA_CONTINUE(np->dma_in_inst);
2237 +#endif
2238 + /* Bring down the receiver. */
2239 + crisv32_disable_rx_ints(np);
2240 + crisv32_disconnect_eth_rx_dma(np);
2242 + /* Stop DMA and ack possible ints. */
2243 + DMA_STOP(np->dma_in_inst);
2244 + ack_intr.in_eop = 1;
2245 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
2247 + crisv32_stop_receiver(np);
2249 + /* Disable overrun interrupts while receive is shut off. */
2250 + eth_intr_mask = REG_RD(eth, np->eth_inst, rw_intr_mask);
2251 + eth_intr_mask.orun = regk_eth_no;
2252 + REG_WR(eth, np->eth_inst, rw_intr_mask, eth_intr_mask);
2253 + /* ACK overrun. */
2254 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, 0x400);
2256 + crisv32_eth_reset_rx_ring(dev);
2257 + reset:
2258 + /* TODO: if nr resets grows to high we should reboot. */
2259 + if (resets++ > 0)
2260 + printk("reset DMA %d.\n", resets);
2262 + DMA_RESET(np->dma_in_inst);
2263 + /* Wait for the channel to reset. */
2264 + do {
2265 + stat = REG_RD(dma, np->dma_in_inst, rw_stat);
2266 + } while (stat.mode != regk_dma_rst);
2268 + /* Now bring the rx path back up. */
2269 + DMA_ENABLE(np->dma_in_inst);
2270 + if (dma_wait_busy(np->dma_in_inst, 100))
2271 + goto reset;
2273 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
2274 +// DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2);
2275 + dma_sc.cmd = (regk_dma_set_w_size2);
2276 + REG_WR(dma, np->dma_in_inst, rw_stream_cmd, dma_sc);
2277 + if (dma_wait_busy(np->dma_in_inst, 100))
2278 + goto reset;
2279 +#endif
2281 +// DMA_START_CONTEXT(np->dma_in_inst, virt_to_phys(&np->ctxt_in));
2282 + REG_WR_INT(dma, np->dma_in_inst, rw_group_down, (int)&np->ctxt_in);
2284 +// DMA_WR_CMD(np->dma_in_inst, regk_dma_load_c);
2285 + dma_sc.cmd = (regk_dma_load_c);
2286 + REG_WR(dma, np->dma_in_inst, rw_stream_cmd, dma_sc);
2287 + if (dma_wait_busy(np->dma_in_inst, 100))
2288 + goto reset;
2290 +// DMA_WR_CMD(np->dma_in_inst, regk_dma_load_d | regk_dma_burst);
2291 + dma_sc.cmd = (regk_dma_load_d | regk_dma_burst);
2292 + REG_WR(dma, np->dma_in_inst, rw_stream_cmd, dma_sc);
2294 + if (dma_wait_busy(np->dma_in_inst, 100))
2295 + goto reset;
2297 + /* Now things get critical again. Don't give us any interrupts until
2298 + the following sequence is complete. */
2299 + DMA_CONTINUE(np->dma_in_inst);
2300 + np->overrun_set = 0;
2301 + crisv32_enable_rx_ints(np);
2302 + crisv32_start_receiver(np);
2304 + /* Reenable overrun interrupts when receive is started again. */
2305 + eth_intr_mask = REG_RD(eth, np->eth_inst, rw_intr_mask);
2306 + eth_intr_mask.orun = regk_eth_yes;
2307 + REG_WR(eth, np->eth_inst, rw_intr_mask, eth_intr_mask);
2309 + crisv32_connect_eth_rx_dma(np);
2312 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
2313 +static void
2314 +crisv32_eth_restart_rx_dma(struct net_device* dev,
2315 + struct crisv32_ethernet_local *np)
2317 + unsigned long flags;
2319 + spin_lock_irqsave(&np->lock, flags);
2320 + __crisv32_eth_restart_rx_dma(dev, np);
2321 + spin_unlock_irqrestore(&np->lock, flags);
2323 +#endif
2326 + * Is there work to do in the rx-path?
2327 + */
2328 +static inline int crisv32_has_rx_work(struct crisv32_ethernet_local *np,
2329 + dma_descr_data *active)
2331 + int mw;
2332 + mw = (active->in_eop && np->new_rx_package);
2333 + return mw;
2336 +static void crisv32_eth_do_rx_recovery(struct net_device* dev,
2337 + struct crisv32_ethernet_local *np)
2339 + unsigned long flags;
2340 + static int r = 0;
2342 + r++;
2344 + /* Bring down the receiver. */
2345 + spin_lock_irqsave(&np->lock, flags);
2346 + if (!np->do_rx_recovery)
2347 + goto done;
2349 + napi_disable(&np->napi);
2351 + np->rx_dma_timeouts++;
2353 + __crisv32_eth_restart_rx_dma(dev, np);
2355 + np->do_rx_recovery = 0;
2357 + napi_enable(&np->napi);
2358 + done:
2359 + spin_unlock_irqrestore(&np->lock, flags);
2361 + WARN_ON(r != 1);
2362 + r--;
2365 +static void receive_timeout_work(struct work_struct* work)
2367 + struct dma_descr_data* descr;
2368 + struct dma_descr_data* descr2;
2369 + struct net_device* dev = crisv32_dev[0];
2370 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2371 + reg_eth_r_intr intr_mask;
2373 + descr = &np->active_rx_desc->descr;
2374 + descr2 = phys_to_virt(REG_RD_INT(dma, np->dma_in_inst, rw_data));
2376 + intr_mask = REG_RD(eth, np->eth_inst, r_intr);
2378 + if (!np->overrun_set
2379 + && !intr_mask.orun
2380 + && !descr->in_eop
2381 + && !descr2->in_eop)
2382 + return;
2384 + crisv32_eth_do_rx_recovery(dev, np);
2387 +static void receive_timeout(unsigned long arg)
2389 + struct net_device* dev = (struct net_device*)arg;
2390 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2392 + np->do_rx_recovery++;
2393 + schedule_work(&np->receive_work);
2394 + mod_timer(&np->receive_timer, jiffies + 1*HZ);
2397 +static void transmit_timeout(unsigned long arg)
2399 + struct net_device* dev = (struct net_device*)arg;
2400 + crisv32_eth_do_tx_recovery(dev);
2404 + * NAPI poll
2406 + * We are allowed to pull up to budget number of frames from the rx ring.
2407 + * If we are done, remove us from the poll list and re-enable rx interrupts.
2408 + * Always return number of pulled frames from the rx ring.
2409 + */
2410 +static int crisv32_eth_poll(struct napi_struct *napi, int budget)
2412 + struct crisv32_ethernet_local *np;
2413 + int work_done = 0;
2414 + int morework;
2415 + int rx_bytes = 0;
2416 + reg_dma_rw_ack_intr ack_intr = {0};
2418 + np = container_of(napi, struct crisv32_ethernet_local, napi);
2419 + crisv32_disable_eth_ints(np);
2420 + np->napi_processing = 1;
2421 + ack_intr.in_eop = 1;
2423 + if (np->new_rx_package == 0) {
2424 + /*
2425 + * In the previous round we pulled a packet from the ring but
2426 + * we didn't advance the ring due to hw DMA bug. Try to do it
2427 + * now.
2428 + */
2429 + np->new_rx_package = 1;
2430 + crisv32_eth_rx_ring_advance(np);
2433 + morework = crisv32_has_rx_work(np, &np->active_rx_desc->descr);
2435 + /* See if tx needs attention. */
2436 + crisv32_tx_complete(np);
2438 + while (morework)
2440 + rx_bytes += crisv32_eth_receive_packet(np->dev);
2441 + work_done++;
2443 + /* Ack irq and restart rx dma */
2444 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
2445 + DMA_CONTINUE_DATA(np->dma_in_inst);
2447 + if (unlikely(work_done >= budget))
2448 + break;
2450 + /* See if tx needs attention. */
2451 + crisv32_tx_complete(np);
2453 + morework = crisv32_has_rx_work(np, &np->active_rx_desc->descr);
2455 + crisv32_enable_eth_ints(np);
2457 + if (!morework) {
2458 + np->napi_processing = 0;
2459 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
2460 + if (np->pending_overrun) {
2461 + crisv32_eth_restart_rx_dma(np->dev, np);
2463 +#endif
2464 + if (irqs_disabled())
2465 + printk("WARNING: %s irqs disabled!\n", __func__);
2467 + if (work_done < budget) {
2468 + /* first mark as done, then enable irq's */
2469 + napi_complete(napi);
2470 + crisv32_enable_rx_ints(np);
2471 + crisv32_enable_tx_ints(np);
2474 + np->napi_processing = 0;
2476 + np->stats.rx_bytes += rx_bytes;
2477 + np->stats.rx_packets += work_done;
2478 + update_rx_stats(np);
2479 + return work_done;
2483 + * This function (i.e. hard_start_xmit) is protected from concurent calls by a
2484 + * spinlock (xmit_lock) in the net_device structure.
2485 + */
2486 +static int
2487 +crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev)
2489 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2490 + unsigned char *buf = skb->data;
2491 + unsigned long flags;
2493 + /*
2494 + * Need to disable irq to avoid updating pointer in interrupt while
2495 + * sending packets.
2496 + */
2497 + spin_lock_irqsave(&np->lock, flags);
2499 + np->active_tx_desc->skb = skb;
2500 + crisv32_eth_hw_send_packet(buf, skb->len, np);
2502 + /* Stop queue if full. */
2503 + if (crisv32_eth_tx_ring_full(np))
2504 + netif_stop_queue(dev);
2506 + np->txpackets++;
2507 + crisv32_eth_roll_tx_timer(np);
2508 + spin_unlock_irqrestore(&np->lock, flags);
2510 + spin_lock_irqsave(&np->leds->led_lock, flags);
2511 + if (!np->leds->led_active && time_after(jiffies,
2512 + np->leds->led_next_time)) {
2513 + /* light the network leds depending on the current speed. */
2514 + crisv32_set_network_leds(CRIS_LED_ACTIVITY, dev);
2516 + /* Set the earliest time we may clear the LED */
2517 + np->leds->led_next_time = jiffies + NET_FLASH_TIME;
2518 + np->leds->led_active = 1;
2519 + np->leds->clear_led_timer.data = (unsigned long) dev;
2520 + mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
2522 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
2524 + return 0;
2528 +static void
2529 +crisv32_eth_hw_send_packet(unsigned char *buf, int length, void *priv)
2531 + struct crisv32_ethernet_local *np =
2532 + (struct crisv32_ethernet_local *) priv;
2534 + /* Configure the tx dma descriptor. */
2535 + np->active_tx_desc->descr.buf = (unsigned char *)virt_to_phys(buf);
2537 + np->active_tx_desc->descr.after = np->active_tx_desc->descr.buf +
2538 + length;
2539 + np->active_tx_desc->descr.intr = 1;
2540 + np->active_tx_desc->descr.out_eop = 1;
2542 + /* Move eol. */
2543 + np->active_tx_desc->descr.eol = 1;
2544 + flush_dma_descr(&np->active_tx_desc->descr, 1);
2546 + if (np->sender_started)
2547 + WARN_ON(!np->prev_tx_desc->descr.eol);
2548 + np->prev_tx_desc->descr.eol = 0;
2549 + flush_dma_descr(&np->prev_tx_desc->descr, 0);
2551 + /* Update pointers. */
2552 + np->prev_tx_desc = np->active_tx_desc;
2553 + np->active_tx_desc = phys_to_virt((int)np->active_tx_desc->descr.next);
2555 + /* Start DMA. */
2556 + crisv32_start_dma_out(np);
2559 +static void crisv32_start_dma_out(struct crisv32_ethernet_local *np)
2561 + if (!np->sender_started) {
2562 + /* Start DMA for the first time. */
2563 + np->ctxt_out.saved_data =
2564 + (void *)virt_to_phys(&np->prev_tx_desc->descr);
2565 + np->ctxt_out.saved_data_buf = np->prev_tx_desc->descr.buf;
2566 + WARN_ON(!np->ctxt_out.saved_data_buf);
2568 + cris_flush_cache_range(&np->ctxt_out, sizeof np->ctxt_out);
2569 + REG_WR(dma, np->dma_out_inst, rw_group_down,
2570 + (int) virt_to_phys(&np->ctxt_out));
2571 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
2572 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
2573 + np->sender_started = 1;
2574 + } else {
2575 + DMA_CONTINUE_DATA(np->dma_out_inst);
2580 + * Bring the transmitter back to life.
2581 + */
2582 +static void
2583 +crisv32_eth_do_tx_recovery(struct net_device *dev)
2585 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2586 + reg_eth_rw_clr_err clr_err;
2587 + reg_dma_rw_stat stat = {0};
2588 + unsigned long flags;
2589 + /* ACK urun and exc_col. */
2590 + int ack_intr = 0x1800;
2591 + int do_full;
2593 + /* Give the tx recovery some time without link state polling. */
2594 + if (!np->fixed_phy)
2595 + mod_timer(&np->speed_timer, jiffies + 4 * HZ);
2597 + np->tx_dma_restarts++;
2599 + spin_lock_irqsave(&np->lock, flags);
2601 + do_full = 1;
2602 + update_tx_stats(np);
2604 + /* Cancel ongoing frame. */
2605 + crisv32_eth_tx_cancel_frame(np);
2607 + /* In case TR 125 just hit us. */
2608 + DMA_WR_CMD(np->dma_out_inst, regk_dma_ack_pkt);
2609 + dma_wait_busy(np->dma_out_inst, 100);
2611 + /* At this point, the transmit block should be idle or waiting for us
2612 + to clear the excessive collision error. Let's reset the DMA. */
2613 + DMA_STOP(np->dma_out_inst);
2615 + crisv32_disconnect_eth_tx_dma(np);
2617 + /* Eat from the tx ring. */
2618 + _crisv32_tx_ring_advance(np, 1);
2619 + np->do_tx_recovery++;
2621 + DMA_RESET(np->dma_out_inst);
2622 + do {
2623 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
2624 + } while (stat.mode != regk_dma_rst);
2626 + /* Next packet will restart output DMA. */
2627 + np->sender_started = 0;
2629 + crisv32_enable_tx_ints(np);
2631 + DMA_ENABLE(np->dma_out_inst);
2632 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
2633 + DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4);
2634 +#endif
2635 + DMA_CONTINUE(np->dma_out_inst);
2637 + /* Clear pending errors. */
2638 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, ack_intr);
2639 + clr_err.clr = 1;
2640 + REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
2642 + /* Do a full reset of the MAC block. */
2643 + if (do_full) {
2644 + np->tx_mac_resets++;
2645 + crisv32_eth_reset(np);
2648 + crisv32_connect_eth_tx_dma(np);
2650 + if (np->txpackets) {
2651 + WARN_ON(!np->catch_tx_desc->skb);
2652 + np->catch_tx_desc->descr.intr = 1;
2653 + np->catch_tx_desc->descr.out_eop = 1;
2655 + /* Start DMA for the first time. */
2656 + np->ctxt_out.saved_data =
2657 + (void *)virt_to_phys(&np->catch_tx_desc->descr);
2658 + np->ctxt_out.saved_data_buf = np->catch_tx_desc->descr.buf;
2659 + WARN_ON(!np->ctxt_out.saved_data_buf);
2660 + flush_dma_descr(&np->catch_tx_desc->descr, 0);
2661 + cris_flush_cache_range(&np->ctxt_out, sizeof np->ctxt_out);
2663 + REG_WR(dma, np->dma_out_inst, rw_group_down,
2664 + (int) virt_to_phys(&np->ctxt_out));
2665 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
2666 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
2667 + crisv32_eth_roll_tx_timer(np);
2668 + np->sender_started = 1;
2671 + if (np->txpackets && crisv32_eth_tx_ring_full(np))
2672 + netif_stop_queue(dev);
2673 + else
2674 + netif_wake_queue(dev);
2676 + spin_unlock_irqrestore(&np->lock, flags);
2680 + * Set or clear the multicast filter for this adaptor.
2681 + * num_addrs == -1 Promiscuous mode, receive all packets
2682 + * num_addrs == 0 Normal mode, clear multicast list
2683 + * num_addrs > 0 Multicast mode, receive normal and MC packets,
2684 + * and do best-effort filtering.
2685 + */
2686 +static void crisv32_eth_set_rx_mode(struct net_device *dev)
2688 + int num_addr = netdev_mc_count(dev);
2689 + unsigned long int lo_bits;
2690 + unsigned long int hi_bits;
2691 + reg_eth_rw_rec_ctrl rec_ctrl = {0};
2692 + reg_eth_rw_ga_lo ga_lo = {0};
2693 + reg_eth_rw_ga_hi ga_hi = {0};
2694 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2696 + if (dev->flags & IFF_PROMISC) {
2697 + /* Promiscuous mode. */
2698 + lo_bits = 0xfffffffful;
2699 + hi_bits = 0xfffffffful;
2701 + /* Enable individual receive. */
2702 + rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
2703 + rw_rec_ctrl);
2704 + rec_ctrl.individual = regk_eth_yes;
2705 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
2706 + } else if (dev->flags & IFF_ALLMULTI) {
2707 + /* Enable all multicasts. */
2708 + lo_bits = 0xfffffffful;
2709 + hi_bits = 0xfffffffful;
2711 + /* Disable individual receive */
2712 + rec_ctrl =
2713 + (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
2714 + rec_ctrl.individual = regk_eth_no;
2715 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
2716 + } else if (num_addr == 0) {
2717 + /* Normal, clear the mc list. */
2718 + lo_bits = 0x00000000ul;
2719 + hi_bits = 0x00000000ul;
2721 + /* Disable individual receive */
2722 + rec_ctrl =
2723 + (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
2724 + rec_ctrl.individual = regk_eth_no;
2725 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
2726 + } else {
2727 + /* MC mode, receive normal and MC packets. */
2728 + char hash_ix;
2729 + struct netdev_hw_addr *ha;
2730 + char *baddr;
2731 + lo_bits = 0x00000000ul;
2732 + hi_bits = 0x00000000ul;
2734 + netdev_for_each_mc_addr(ha, dev) {
2735 + /* Calculate the hash index for the GA registers. */
2736 + hash_ix = 0;
2737 + baddr = ha->addr;
2738 + hash_ix ^= (*baddr) & 0x3f;
2739 + hash_ix ^= ((*baddr) >> 6) & 0x03;
2740 + ++baddr;
2741 + hash_ix ^= ((*baddr) << 2) & 0x03c;
2742 + hash_ix ^= ((*baddr) >> 4) & 0xf;
2743 + ++baddr;
2744 + hash_ix ^= ((*baddr) << 4) & 0x30;
2745 + hash_ix ^= ((*baddr) >> 2) & 0x3f;
2746 + ++baddr;
2747 + hash_ix ^= (*baddr) & 0x3f;
2748 + hash_ix ^= ((*baddr) >> 6) & 0x03;
2749 + ++baddr;
2750 + hash_ix ^= ((*baddr) << 2) & 0x03c;
2751 + hash_ix ^= ((*baddr) >> 4) & 0xf;
2752 + ++baddr;
2753 + hash_ix ^= ((*baddr) << 4) & 0x30;
2754 + hash_ix ^= ((*baddr) >> 2) & 0x3f;
2756 + hash_ix &= 0x3f;
2758 + if (hash_ix > 32)
2759 + hi_bits |= (1 << (hash_ix - 32));
2760 + else
2761 + lo_bits |= (1 << hash_ix);
2764 + /* Disable individual receive. */
2765 + rec_ctrl =
2766 + (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
2767 + rec_ctrl.individual = regk_eth_no;
2768 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
2771 + ga_lo.table = (unsigned int) lo_bits;
2772 + ga_hi.table = (unsigned int) hi_bits;
2774 + REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
2775 + REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
2778 +static int
2779 +crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2781 + struct mii_ioctl_data *data = if_mii(ifr);
2782 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2783 + int old_autoneg;
2784 + int rc = 0;
2786 + spin_lock(&np->lock); /* Preempt protection */
2787 + switch (cmd) {
2788 + case SET_ETH_ENABLE_LEDS:
2789 + np->use_leds = 1;
2790 + break;
2791 + case SET_ETH_DISABLE_LEDS:
2792 + np->use_leds = 0;
2793 + break;
2794 + case SET_ETH_AUTONEG:
2795 + old_autoneg = np->autoneg_normal;
2796 + np->autoneg_normal = *(int*)data;
2797 + if (np->autoneg_normal != old_autoneg)
2798 + crisv32_eth_negotiate(dev);
2799 + break;
2800 + default:
2801 + rc = generic_mii_ioctl(&np->mii_if,
2802 + if_mii(ifr), cmd, NULL);
2803 + break;
2805 + spin_unlock(&np->lock);
2806 + return rc;
2809 +static int crisv32_eth_get_settings(struct net_device *dev,
2810 + struct ethtool_cmd *cmd)
2812 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2813 + int err;
2815 + spin_lock_irq(&np->lock);
2816 + err = mii_ethtool_gset(&np->mii_if, cmd);
2817 + spin_unlock_irq(&np->lock);
2819 + /* The PHY may support 1000baseT, but the EtraxFS does not. */
2820 + cmd->supported &= ~(SUPPORTED_1000baseT_Half
2821 + | SUPPORTED_1000baseT_Full);
2822 + return err;
2825 +static int crisv32_eth_set_settings(struct net_device *dev,
2826 + struct ethtool_cmd *ecmd)
2828 + if (ecmd->autoneg == AUTONEG_ENABLE) {
2829 + crisv32_eth_set_duplex(dev, autoneg);
2830 + crisv32_eth_set_speed(dev, 0);
2831 + } else {
2832 + crisv32_eth_set_duplex(dev, ecmd->duplex);
2833 + crisv32_eth_set_speed(dev, ecmd->speed);
2836 + return 0;
2839 +static void crisv32_eth_get_drvinfo(struct net_device *dev,
2840 + struct ethtool_drvinfo *info)
2842 +#ifdef CONFIG_ETRAXFS
2843 + strncpy(info->driver, "ETRAX FS", sizeof(info->driver) - 1);
2844 +#else
2845 + strncpy(info->driver, "ARTPEC-3", sizeof(info->driver) - 1);
2846 +#endif
2847 + strncpy(info->version, "$Revision: 1.197 $", sizeof(info->version) - 1);
2848 + strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
2849 + strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
2852 +static int crisv32_eth_get_ethtool_sset_count(struct net_device *dev,
2853 + int stringset)
2855 + if (stringset != ETH_SS_STATS)
2856 + return -EINVAL;
2858 + return ARRAY_SIZE(ethtool_stats_keys);
2861 +static void crisv32_eth_get_ethtool_stats(struct net_device *dev,
2862 + struct ethtool_stats *stats,
2863 + u64 *data)
2865 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2867 + data[0] = np->tx_dma_restarts;
2868 + data[1] = np->tx_mac_resets;
2869 + data[2] = np->rx_dma_restarts;
2870 + data[3] = np->rx_dma_timeouts;
2871 + data[4] = np->rx_restarts_dropped;
2874 +static void crisv32_eth_get_strings(struct net_device *dev,
2875 + u32 stringset, u8 *data)
2877 + switch (stringset) {
2878 + case ETH_SS_STATS:
2879 + memcpy(data, &ethtool_stats_keys,
2880 + sizeof(ethtool_stats_keys));
2881 + break;
2882 + default:
2883 + WARN_ON(1);
2884 + break;
2888 +static int crisv32_eth_nway_reset(struct net_device *dev)
2890 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2892 + if (np->current_duplex == autoneg && np->current_speed_selection == 0)
2893 + crisv32_eth_negotiate(dev);
2894 + return 0;
2896 +/* The FS/A3 ethernet block has 23 32-bit config registers. */
2897 +/* plus 2 dma_descr_context */
2898 +/* plus 2 sets of ring pointers (active, prev, last) */
2899 +/* plus 2 sets of DMA registers 40*4 bytes = 0xA0 */
2900 +#define ETRAX_ETH_REGDUMP_LEN (23 * 4 + 2 * sizeof (dma_descr_context) + 2*3*4 + 2*0xA0)
2901 +static int crisv32_eth_get_regs_len(struct net_device *dev)
2903 + return ETRAX_ETH_REGDUMP_LEN;
2906 +static void crisv32_eth_get_regs(struct net_device *dev,
2907 + struct ethtool_regs *regs, void *_p)
2909 + u32 *p = _p;
2910 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2911 + int i;
2913 + /* Let's call this major version 0, minor version 1 with some
2914 + * undecided field separation in the version data. Previously
2915 + * only the eth regs were dumped (version=0: maj 0, min 0).*/
2916 + regs->version = 1;
2917 + memset(p, 0, ETRAX_ETH_REGDUMP_LEN);
2919 +#define GET_REG32_LOOP(base, len) \
2920 + do { \
2921 + for (i = 0; i < len; i += 4) \
2922 + *(p)++ = REG_READ(u32, (base) + i); \
2923 + } while (0)
2925 + GET_REG32_LOOP(np->eth_inst, 0x30);
2926 + /* Do not dump registers with read side effects. */
2927 + GET_REG32_LOOP(np->eth_inst + 0x34, 1);
2928 + GET_REG32_LOOP(np->eth_inst + 0x3c, 1);
2929 + GET_REG32_LOOP(np->eth_inst + 0x44, 0x5c - 0x44);
2932 + memcpy(p, &np->ctxt_out, sizeof (dma_descr_context));
2933 + p += sizeof (dma_descr_context)/4;
2934 + *(p++) = (u32) np->active_tx_desc;
2935 + *(p++) = (u32) np->prev_tx_desc;
2936 + *(p++) = (u32) np->catch_tx_desc;
2938 + GET_REG32_LOOP(np->dma_out_inst, 0xa0);
2940 + memcpy(p, &np->ctxt_in, sizeof (dma_descr_context));
2941 + p += sizeof (dma_descr_context)/4;
2942 + *(p++) = (u32)np->active_rx_desc;
2943 + *(p++) = (u32)np->prev_rx_desc;
2944 + *(p++) = (u32)np->last_rx_desc;
2946 + GET_REG32_LOOP(np->dma_in_inst, 0xa0);
2947 +#undef GET_REG32_LOOP
2950 +static struct ethtool_ops crisv32_ethtool_ops = {
2951 + .get_settings = crisv32_eth_get_settings,
2952 + .set_settings = crisv32_eth_set_settings,
2953 + .get_drvinfo = crisv32_eth_get_drvinfo,
2954 + .get_regs_len = crisv32_eth_get_regs_len,
2955 + .get_regs = crisv32_eth_get_regs,
2956 + .nway_reset = crisv32_eth_nway_reset,
2957 + .get_link = ethtool_op_get_link,
2958 + .get_strings = crisv32_eth_get_strings,
2959 + .get_ethtool_stats = crisv32_eth_get_ethtool_stats,
2960 + .get_sset_count = crisv32_eth_get_ethtool_sset_count
2963 +/* Is this function really needed? Use ethtool instead? */
2964 +static int crisv32_eth_set_config(struct net_device *dev, struct ifmap *map)
2966 + struct crisv32_ethernet_local *np = netdev_priv(dev);
2968 + spin_lock(&np->lock); /* Preempt protection */
2970 + switch (map->port) {
2971 + case IF_PORT_UNKNOWN:
2972 + /* Use autoneg */
2973 + crisv32_eth_set_speed(dev, 0);
2974 + crisv32_eth_set_duplex(dev, autoneg);
2975 + break;
2976 + case IF_PORT_10BASET:
2977 + crisv32_eth_set_speed(dev, 10);
2978 + crisv32_eth_set_duplex(dev, autoneg);
2979 + break;
2980 + case IF_PORT_100BASET:
2981 + case IF_PORT_100BASETX:
2982 + crisv32_eth_set_speed(dev, 100);
2983 + crisv32_eth_set_duplex(dev, autoneg);
2984 + break;
2985 + case IF_PORT_100BASEFX:
2986 + case IF_PORT_10BASE2:
2987 + case IF_PORT_AUI:
2988 + spin_unlock(&np->lock);
2989 + return -EOPNOTSUPP;
2990 + break;
2991 + default:
2992 + printk(KERN_ERR "%s: Invalid media selected",
2993 + dev->name);
2994 + spin_unlock(&np->lock);
2995 + return -EINVAL;
2997 + spin_unlock(&np->lock);
2998 + return 0;
3001 +static void crisv32_eth_negotiate(struct net_device *dev)
3003 + unsigned short data;
3004 + unsigned short ctrl1000;
3005 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3007 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
3008 + ctrl1000 = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3009 + MII_CTRL1000);
3011 + /* Make all capabilities available */
3012 + data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
3013 + ADVERTISE_100HALF | ADVERTISE_100FULL;
3014 + ctrl1000 |= ADVERTISE_1000HALF | ADVERTISE_1000FULL;
3016 + /* Remove the speed capabilities that we that do not want */
3017 + switch (np->current_speed_selection) {
3018 + case 10 :
3019 + data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL);
3020 + ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3021 + break;
3022 + case 100 :
3023 + data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL);
3024 + ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3025 + break;
3026 + case 1000 :
3027 + data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
3028 + ADVERTISE_100HALF | ADVERTISE_100FULL);
3029 + break;
3032 + /* Remove the duplex capabilites that we do not want */
3033 + if (np->current_duplex == full) {
3034 + data &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
3035 + ctrl1000 &= ~(ADVERTISE_1000HALF);
3037 + else if (np->current_duplex == half) {
3038 + data &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
3039 + ctrl1000 &= ~(ADVERTISE_1000FULL);
3042 + crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
3043 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
3044 + crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
3045 + MII_CTRL1000, ctrl1000);
3046 +#endif
3048 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
3049 + if (np->autoneg_normal) {
3050 + /* Renegotiate with link partner */
3051 + data |= BMCR_ANENABLE | BMCR_ANRESTART;
3052 + } else {
3053 + /* Don't negitiate speed or duplex */
3054 + data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
3056 + /* Set speed and duplex static */
3057 + if (np->current_speed_selection == 10) {
3058 + data &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
3060 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
3061 + else if (np->current_speed_selection == 1000) {
3062 + data &= ~BMCR_SPEED100;
3063 + data |= BMCR_SPEED1000;
3065 +#endif
3066 + else {
3067 + data |= BMCR_SPEED100;
3068 + data &= ~BMCR_SPEED1000;
3071 + if (np->current_duplex != full) {
3072 + data &= ~BMCR_FULLDPLX;
3073 + } else {
3074 + data |= BMCR_FULLDPLX;
3077 + crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
3080 +static void crisv32_eth_check_speed(unsigned long idev)
3082 +#ifndef CONFIG_ETRAX_NO_PHY
3083 + static int led_initiated = 0;
3084 + struct net_device *dev = (struct net_device *) idev;
3085 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3087 + unsigned long data;
3088 + int old_speed;
3089 + unsigned long flags;
3091 + BUG_ON(!np);
3092 + BUG_ON(!np->transceiver);
3094 + spin_lock(&np->transceiver_lock);
3096 + old_speed = np->current_speed;
3098 + /* Do a fake read. This is needed for DM9161, otherwise the link will
3099 + * go up and down all the time.
3100 + */
3101 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
3102 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
3104 + if (!(data & BMSR_LSTATUS))
3105 + np->current_speed = 0;
3106 + else
3107 + np->transceiver->check_speed(dev);
3109 + spin_lock_irqsave(&np->leds->led_lock, flags);
3110 + if ((old_speed != np->current_speed) || !led_initiated) {
3111 + led_initiated = 1;
3112 + np->leds->clear_led_timer.data = (unsigned long) dev;
3113 + if (np->current_speed) {
3114 + if (!np->link)
3115 + netif_carrier_on(dev);
3116 + crisv32_set_network_leds(CRIS_LED_LINK, dev);
3117 + np->link = 1;
3118 + } else {
3119 + if (np->link)
3120 + netif_carrier_off(dev);
3121 + crisv32_set_network_leds(CRIS_LED_NOLINK, dev);
3122 + np->link = 0;
3125 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
3127 + /* Reinitialize the timer. */
3128 + np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
3129 + add_timer(&np->speed_timer);
3131 + spin_unlock(&np->transceiver_lock);
3132 +#endif
3135 +static void crisv32_eth_set_speed(struct net_device *dev, unsigned long speed)
3137 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3139 + spin_lock(&np->transceiver_lock);
3140 + if (np->current_speed_selection != speed) {
3141 + np->current_speed_selection = speed;
3142 + crisv32_eth_negotiate(dev);
3144 + spin_unlock(&np->transceiver_lock);
3147 +static void crisv32_eth_check_duplex(unsigned long idev)
3149 +#ifndef CONFIG_ETRAX_NO_PHY
3150 + struct net_device *dev = (struct net_device *) idev;
3151 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3152 + reg_eth_rw_rec_ctrl rec_ctrl;
3153 + int old_duplex = np->full_duplex;
3155 + np->transceiver->check_duplex(dev);
3157 + if (old_duplex != np->full_duplex) {
3158 + /* Duplex changed. */
3159 + rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
3160 + rw_rec_ctrl);
3161 + rec_ctrl.duplex = np->full_duplex;
3162 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
3165 + /* Reinitialize the timer. */
3166 + np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
3167 + add_timer(&np->duplex_timer);
3168 +#endif
3171 +static void
3172 +crisv32_eth_set_duplex(struct net_device *dev, enum duplex new_duplex)
3174 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3175 + spin_lock(&np->transceiver_lock);
3176 + if (np->current_duplex != new_duplex) {
3177 + np->current_duplex = new_duplex;
3178 + crisv32_eth_negotiate(dev);
3180 + spin_unlock(&np->transceiver_lock);
3183 +static int crisv32_eth_probe_transceiver(struct net_device *dev)
3185 +#ifndef CONFIG_ETRAX_NO_PHY
3186 + unsigned int phyid_high;
3187 + unsigned int phyid_low;
3188 + unsigned int oui;
3189 + struct transceiver_ops *ops = NULL;
3190 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3192 + /* Probe MDIO physical address. */
3193 + for (np->mii_if.phy_id = 0;
3194 + np->mii_if.phy_id <= 31; np->mii_if.phy_id++) {
3195 + if (crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR)
3196 + != 0xffff)
3197 + break;
3200 + if (np->mii_if.phy_id == 32)
3201 + return -ENODEV;
3203 + /* Get manufacturer. */
3204 + phyid_high = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3205 + MII_PHYSID1);
3206 + phyid_low = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3207 + MII_PHYSID2);
3209 + oui = (phyid_high << 6) | (phyid_low >> 10);
3211 + for (ops = &transceivers[0]; ops->oui; ops++) {
3212 + if (ops->oui == oui)
3213 + break;
3216 + np->transceiver = ops;
3218 + if (oui == DM9161_OUI) {
3219 + /* Do not bypass the scrambler/descrambler, this is needed
3220 + * to make 10Mbit work.
3221 + */
3222 + crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
3223 + MII_DM9161_SCR,MII_DM9161_SCR_INIT);
3224 + /* Clear 10BTCSR to default */
3225 + crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
3226 + MII_DM9161_10BTCSR,
3227 + MII_DM9161_10BTCSR_INIT);
3229 + return 0;
3230 +#else
3231 + return -ENODEV;
3232 +#endif
3235 +#ifndef CONFIG_ETRAX_NO_PHY
3236 +static void generic_check_speed(struct net_device *dev)
3238 + unsigned long data;
3239 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3241 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
3242 + if ((data & ADVERTISE_100FULL) ||
3243 + (data & ADVERTISE_100HALF))
3244 + np->current_speed = 100;
3245 + else
3246 + np->current_speed = 10;
3249 +static void generic_check_duplex(struct net_device *dev)
3251 + unsigned long data;
3252 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3254 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
3255 + if ((data & ADVERTISE_10FULL) ||
3256 + (data & ADVERTISE_100FULL))
3257 + np->full_duplex = 1;
3258 + else
3259 + np->full_duplex = 0;
3262 +static void broadcom_check_speed(struct net_device *dev)
3264 + unsigned long data;
3265 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3267 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3268 + MDIO_AUX_CTRL_STATUS_REG);
3269 + np->current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
3272 +static void broadcom_check_duplex(struct net_device *dev)
3274 + unsigned long data;
3275 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3277 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3278 + MDIO_AUX_CTRL_STATUS_REG);
3279 + np->full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
3282 +static void tdk_check_speed(struct net_device *dev)
3284 + unsigned long data;
3285 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3287 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3288 + MDIO_TDK_DIAGNOSTIC_REG);
3289 + np->current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
3292 +static void tdk_check_duplex(struct net_device *dev)
3294 + unsigned long data;
3295 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3297 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3298 + MDIO_TDK_DIAGNOSTIC_REG);
3299 + np->full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
3303 +static void intel_check_speed(struct net_device *dev)
3305 + unsigned long data;
3306 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3307 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3308 + MDIO_INT_STATUS_REG_2);
3309 + np->current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
3312 +static void intel_check_duplex(struct net_device *dev)
3314 + unsigned long data;
3315 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3317 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3318 + MDIO_INT_STATUS_REG_2);
3319 + np->full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
3322 +static void national_check_speed(struct net_device *dev)
3324 + unsigned long data;
3325 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3327 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3328 + MDIO_NAT_LINK_AN_REG);
3329 + if (data & MDIO_NAT_1000)
3330 + np->current_speed = 1000;
3331 + else if (data & MDIO_NAT_100)
3332 + np->current_speed = 100;
3333 + else
3334 + np->current_speed = 10;
3337 +static void national_check_duplex(struct net_device *dev)
3339 + unsigned long data;
3340 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3342 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3343 + MDIO_NAT_LINK_AN_REG);
3344 + if (data & MDIO_NAT_FULL_DUPLEX_IND)
3345 + np->full_duplex = 1;
3346 + else
3347 + np->full_duplex = 0;
3350 +static void vitesse_check_speed(struct net_device *dev)
3352 + unsigned long data;
3353 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3355 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3356 + MDIO_VIT_AUX_STAT);
3357 + if ((data & 0x18) == MDIO_VIT_1000)
3358 + np->current_speed = 1000;
3359 + else if ((data & 0x18) == MDIO_VIT_100)
3360 + np->current_speed = 100;
3361 + else
3362 + np->current_speed = 10;
3365 +static void vitesse_check_duplex(struct net_device *dev)
3367 + unsigned long data;
3368 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3370 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3371 + MDIO_VIT_AUX_STAT);
3372 + if (data & 0x20)
3373 + np->full_duplex = 1;
3374 + else
3375 + np->full_duplex = 0;
3378 +static void davicom_check_speed(struct net_device *dev)
3380 + unsigned long data;
3381 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3383 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
3384 + np->current_speed = (data & BMCR_SPEED100) ? 100 : 10;
3387 +static void davicom_check_duplex(struct net_device *dev)
3389 + unsigned long data;
3390 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3392 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
3393 + np->full_duplex = (data & BMCR_FULLDPLX) ? 1 : 0;
3395 +#endif
3397 +#if 0
3398 +static void crisv32_eth_reset_tranceiver(struct net_device *dev)
3400 + int i;
3401 + unsigned short cmd;
3402 + unsigned short data;
3403 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3405 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
3407 + cmd = (MDIO_START << 14)
3408 + | (MDIO_WRITE << 12)
3409 + | (np->mii_if.phy_id << 7)
3410 + | (MII_BMCR << 2);
3412 + crisv32_eth_send_mdio_cmd(dev, cmd, 1);
3414 + data |= 0x8000;
3416 + /* Magic value is number of bits. */
3417 + for (i = 15; i >= 0; i--)
3418 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
3420 +#endif
3422 +static int
3423 +crisv32_eth_get_mdio_reg(struct net_device *dev, int phyid, int reg_num)
3425 + int i;
3426 + unsigned short cmd; /* Data to be sent on MDIO port. */
3427 + unsigned short data; /* Data read from MDIO. */
3429 +#ifdef CONFIG_ETRAX_NO_PHY
3430 + return 0;
3431 +#endif
3433 + /* Start of frame, OP Code, Physical Address, Register Address. */
3434 + cmd = (MDIO_START << 14)
3435 + | (MDIO_READ << 12)
3436 + | (phyid << 7)
3437 + | (reg_num << 2);
3439 + crisv32_eth_send_mdio_cmd(dev, cmd, 0);
3441 + data = 0;
3443 + /* Receive data. Magic value is number of bits. */
3444 + for (i = 15; i >= 0; i--)
3445 + data |= (crisv32_eth_receive_mdio_bit(dev) << i);
3447 + return data;
3450 +static void
3451 +crisv32_eth_set_mdio_reg(struct net_device *dev, int phyid, int reg, int value)
3453 + int bitCounter;
3454 + unsigned short cmd;
3456 +#ifdef CONFIG_ETRAX_NO_PHY
3457 + return;
3458 +#endif
3459 + cmd = (MDIO_START << 14)
3460 + | (MDIO_WRITE << 12)
3461 + | (phyid << 7)
3462 + | (reg << 2);
3464 + crisv32_eth_send_mdio_cmd(dev, cmd, 1);
3466 + /* Data... */
3467 + for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
3468 + crisv32_eth_send_mdio_bit(dev, GET_BIT(bitCounter, value));
3472 +static void
3473 +crisv32_eth_send_mdio_cmd(struct net_device *dev, unsigned short cmd,
3474 + int write_cmd)
3476 + int i;
3477 + unsigned char data = 0x2;
3479 + /* Preamble. Magic value is number of bits. */
3480 + for (i = 31; i >= 0; i--)
3481 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, MDIO_PREAMBLE));
3483 + for (i = 15; i >= 2; i--)
3484 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, cmd));
3486 + /* Turnaround. */
3487 + for (i = 1; i >= 0; i--)
3488 + if (write_cmd)
3489 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
3490 + else
3491 + crisv32_eth_receive_mdio_bit(dev);
3494 +static void crisv32_eth_send_mdio_bit(struct net_device *dev, unsigned char bit)
3496 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3498 + reg_eth_rw_mgm_ctrl mgm_ctrl = {
3499 + .mdoe = regk_eth_yes,
3500 + .mdio = bit & 1
3501 + };
3503 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
3505 + udelay(1);
3507 + mgm_ctrl.mdc = 1;
3508 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
3510 + udelay(1);
3513 +static unsigned char crisv32_eth_receive_mdio_bit(struct net_device *dev)
3515 + reg_eth_r_stat stat;
3516 + reg_eth_rw_mgm_ctrl mgm_ctrl = {0};
3517 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3519 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
3520 + stat = REG_RD(eth, np->eth_inst, r_stat);
3522 + udelay(1);
3524 + mgm_ctrl.mdc = 1;
3525 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
3527 + udelay(1);
3528 + return stat.mdio;
3531 +static void crisv32_clear_network_leds(unsigned long priv)
3533 + struct net_device *dev = (struct net_device *)priv;
3534 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3535 + unsigned long flags;
3537 + spin_lock_irqsave(&np->leds->led_lock, flags);
3538 + if (np->leds->led_active && time_after(jiffies,
3539 + np->leds->led_next_time)) {
3540 + crisv32_set_network_leds(CRIS_LED_NOACTIVITY, dev);
3542 + /* Set the earliest time we may set the LED */
3543 + np->leds->led_next_time = jiffies + NET_FLASH_PAUSE;
3544 + np->leds->led_active = 0;
3546 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
3549 +static void crisv32_set_network_leds(int active, struct net_device *dev)
3551 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3552 + int light_leds = 0;
3554 + if (np->leds->ledgrp == CRIS_LED_GRP_NONE)
3555 + return;
3557 + if (!np->use_leds)
3558 + return;
3560 + if (active == CRIS_LED_NOLINK) {
3561 + if (dev == crisv32_dev[0])
3562 + np->leds->ifisup[0] = 0;
3563 + else
3564 + np->leds->ifisup[1] = 0;
3566 + else if (active == CRIS_LED_LINK) {
3567 + if (dev == crisv32_dev[0])
3568 + np->leds->ifisup[0] = 1;
3569 + else
3570 + np->leds->ifisup[1] = 1;
3571 +#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
3572 + light_leds = 1;
3573 + } else {
3574 + light_leds = (active == CRIS_LED_NOACTIVITY);
3575 +#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
3576 + light_leds = 0;
3577 + } else {
3578 + light_leds = (active == CRIS_LED_ACTIVITY);
3579 +#else
3580 +#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
3581 +#endif
3585 +#ifdef CONFIG_NET_POLL_CONTROLLER
3586 +static void crisv32_netpoll(struct net_device *netdev)
3588 + crisv32rx_eth_interrupt(DMA0_INTR_VECT, netdev);
3590 +#endif
3592 +#ifdef CONFIG_CPU_FREQ
3593 +static int crisv32_ethernet_freq_notifier(struct notifier_block *nb,
3594 + unsigned long val, void *data)
3596 + struct cpufreq_freqs *freqs = data;
3597 + int i;
3598 + if (val != CPUFREQ_POSTCHANGE)
3599 + return 0;
3601 + for (i = 0; i < 2; i++) {
3602 + struct net_device *dev = crisv32_dev[i];
3603 + unsigned short data;
3604 + if (dev == NULL)
3605 + continue;
3607 + data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
3608 + MII_BMCR);
3609 + if (freqs->new == 200000)
3610 + data &= ~BMCR_PDOWN;
3611 + else
3612 + data |= BMCR_PDOWN;
3613 + crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
3614 + MII_BMCR, data);
3616 + return 0;
3618 +#endif
3620 +#if 0
3622 + * Must be called with the np->lock held.
3623 + */
3624 +static void crisv32_ethernet_bug(struct net_device *dev)
3626 + struct crisv32_ethernet_local *np = netdev_priv(dev);
3627 + dma_descr_data *dma_pos;
3628 + dma_descr_data *in_dma_pos;
3629 + reg_dma_rw_stat stat = {0};
3630 + reg_dma_rw_stat in_stat = {0};
3631 + int i;
3633 + /* Get the current output dma position. */
3634 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
3636 + dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data));
3637 + in_dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_in_inst, rw_data));
3638 + in_stat = REG_RD(dma, np->dma_in_inst, rw_stat);
3640 + printk("%s:\n"
3641 + "stat.list_state=%x\n"
3642 + "stat.mode=%x\n"
3643 + "stat.stream_cmd_src=%x\n"
3644 + "dma_pos=%x\n"
3645 + "tx catch=%x active=%x\n"
3646 + "packets=%d queue=%d sender_started=%d\n"
3647 + "intr_vect.r_vect=%x\n"
3648 + "dma.r_masked_intr=%x dma.rw_ack_intr=%x "
3649 + "dma.r_intr=%x dma.rw_intr_masked=%x\n"
3650 + "eth.r_stat=%x\n",
3651 + __func__,
3652 + stat.list_state, stat.mode, stat.stream_cmd_src,
3653 + (unsigned int)dma_pos,
3654 + (unsigned int)&np->catch_tx_desc->descr,
3655 + (unsigned int)&np->active_tx_desc->descr,
3656 + np->txpackets,
3657 + netif_queue_stopped(dev), np->sender_started,
3658 + REG_RD_INT(intr_vect, regi_irq, r_vect),
3659 + REG_RD_INT(dma, np->dma_out_inst, r_masked_intr),
3660 + REG_RD_INT(dma, np->dma_out_inst, rw_ack_intr),
3661 + REG_RD_INT(dma, np->dma_out_inst, r_intr),
3662 + REG_RD_INT(dma, np->dma_out_inst, rw_intr_mask),
3663 + REG_RD_INT(eth, np->eth_inst, r_stat));
3665 + printk("in_stat.list_state=%x\n"
3666 + "in_stat.mode=%x\n"
3667 + "in_stat.stream_cmd_src=%x\n"
3668 + "in_dma_pos=%x\n"
3669 + "rx last=%x prev=%x active=%x\n",
3670 + in_stat.list_state, in_stat.mode, in_stat.stream_cmd_src,
3671 + (unsigned int)in_dma_pos,
3672 + (unsigned int)&np->last_rx_desc->descr,
3673 + (unsigned int)&np->prev_rx_desc->descr,
3674 + (unsigned int)&np->active_rx_desc->descr);
3676 +#if 0
3677 + printk("rx-descriptors:\n");
3678 + for (i = 0; i < NBR_RX_DESC; i++) {
3679 + printk("rxdesc[%d]=0x%x\n", i, (unsigned int)
3680 + virt_to_phys(&np->dma_rx_descr_list[i].descr));
3681 + printk("rxdesc[%d].skb=0x%x\n", i,
3682 + (unsigned int)np->dma_rx_descr_list[i].skb);
3683 + printk("rxdesc[%d].buf=0x%x\n", i,
3684 + (unsigned int)np->dma_rx_descr_list[i].descr.buf);
3685 + printk("rxdesc[%d].after=0x%x\n", i,
3686 + (unsigned int)np->dma_rx_descr_list[i].descr.after);
3687 + printk("rxdesc[%d].intr=%x\n", i,
3688 + np->dma_rx_descr_list[i].descr.intr);
3689 + printk("rxdesc[%d].eol=%x\n", i,
3690 + np->dma_rx_descr_list[i].descr.eol);
3691 + printk("rxdesc[%d].out_eop=%x\n", i,
3692 + np->dma_rx_descr_list[i].descr.out_eop);
3693 + printk("rxdesc[%d].in_eop=%x\n", i,
3694 + np->dma_rx_descr_list[i].descr.in_eop);
3695 + printk("rxdesc[%d].wait=%x\n", i,
3696 + np->dma_rx_descr_list[i].descr.wait);
3698 +#endif
3700 +#if 1
3701 + printk("tx-descriptors:\n");
3702 + for (i = 0; i < NBR_TX_DESC; i++) {
3703 + printk("txdesc[%d]=0x%x\n", i, (unsigned int)
3704 + virt_to_phys(&np->dma_tx_descr_list[i].descr));
3705 + printk("txdesc[%d].skb=0x%x\n", i,
3706 + (unsigned int)np->dma_tx_descr_list[i].skb);
3707 + printk("txdesc[%d].buf=0x%x\n", i,
3708 + (unsigned int)np->dma_tx_descr_list[i].descr.buf);
3709 + printk("txdesc[%d].after=0x%x\n", i,
3710 + (unsigned int)np->dma_tx_descr_list[i].descr.after);
3711 + printk("txdesc[%d].intr=%x\n", i,
3712 + np->dma_tx_descr_list[i].descr.intr);
3713 + printk("txdesc[%d].eol=%x\n", i,
3714 + np->dma_tx_descr_list[i].descr.eol);
3715 + printk("txdesc[%d].out_eop=%x\n", i,
3716 + np->dma_tx_descr_list[i].descr.out_eop);
3717 + printk("txdesc[%d].in_eop=%x\n", i,
3718 + np->dma_tx_descr_list[i].descr.in_eop);
3719 + printk("txdesc[%d].wait=%x\n", i,
3720 + np->dma_tx_descr_list[i].descr.wait);
3722 +#endif
3724 +#endif
3726 +static int __init crisv32_boot_setup(char *str)
3728 + struct sockaddr sa = {0};
3729 + int i;
3731 + /* Parse the colon separated Ethernet station address */
3732 + for (i = 0; i < ETH_ALEN; i++) {
3733 + unsigned int tmp;
3734 + if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
3735 + printk(KERN_WARNING "Malformed station address");
3736 + return 0;
3738 + sa.sa_data[i] = (char)tmp;
3741 + default_mac_iface0 = sa;
3742 + return 1;
3745 +__setup("crisv32_eth=", crisv32_boot_setup);
3747 +module_init(crisv32_ethernet_init);
3748 diff -Nur linux-4.7.3.orig/drivers/net/cris/eth_v32.h linux-4.7.3/drivers/net/cris/eth_v32.h
3749 --- linux-4.7.3.orig/drivers/net/cris/eth_v32.h 1970-01-01 01:00:00.000000000 +0100
3750 +++ linux-4.7.3/drivers/net/cris/eth_v32.h 2016-09-13 01:47:09.527718381 +0200
3751 @@ -0,0 +1,291 @@
3753 + * Definitions for ETRAX FS ethernet driver.
3755 + * Copyright (C) 2003, 2004, 2005 Axis Communications.
3756 + */
3758 +#ifndef _ETRAX_ETHERNET_H_
3759 +#define _ETRAX_ETHERNET_H_
3761 +#include <hwregs/dma.h>
3763 +#define MAX_MEDIA_DATA_SIZE 1522 /* Max packet size. */
3765 +#define NBR_RX_DESC 128 /* Number of RX descriptors. */
3766 +#define NBR_TX_DESC 16 /* Number of TX descriptors. */
3767 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
3768 +#define NBR_INTMEM_RX_DESC 16 /* Number of RX descriptors in int. mem.
3769 + * when running in gigabit mode.
3770 + * Should be less then NBR_RX_DESC
3771 + */
3772 +#define NBR_INTMEM_TX_BUF 4 /* Number of TX buffers in int. mem
3773 + * when running in gigabit mode.
3774 + * Should be less than NBR_TX_DESC
3775 + */
3776 +#endif
3778 +/* Large packets are sent directly to upper layers while small packets
3779 + * are copied (to reduce memory waste). The following constant
3780 + * decides the breakpoint.
3781 + */
3782 +#define RX_COPYBREAK (256)
3784 +#define ETHER_HEAD_LEN (14)
3787 + * MDIO constants.
3788 + */
3789 +#define MDIO_START 0x1
3790 +#define MDIO_READ 0x2
3791 +#define MDIO_WRITE 0x1
3792 +#define MDIO_PREAMBLE 0xfffffffful
3794 +/* Broadcom specific */
3795 +#define MDIO_AUX_CTRL_STATUS_REG 0x18
3796 +#define MDIO_BC_FULL_DUPLEX_IND 0x1
3797 +#define MDIO_BC_SPEED 0x2
3799 +/* TDK specific */
3800 +#define MDIO_TDK_DIAGNOSTIC_REG 18
3801 +#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
3802 +#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
3804 +/*Intel LXT972A specific*/
3805 +#define MDIO_INT_STATUS_REG_2 0x0011
3806 +#define MDIO_INT_FULL_DUPLEX_IND ( 0x0001 << 9 )
3807 +#define MDIO_INT_SPEED ( 0x0001 << 14 )
3809 +/*National Semiconductor DP83865 specific*/
3810 +#define MDIO_NAT_LINK_AN_REG 0x11
3811 +#define MDIO_NAT_1000 (0x0001 << 4)
3812 +#define MDIO_NAT_100 (0x0001 << 3)
3813 +#define MDIO_NAT_FULL_DUPLEX_IND (0x0001 << 1)
3815 +/* Vitesse VCS8641 specific */
3816 +#define MDIO_VIT_AUX_STAT 0x1c
3817 +#define MDIO_VIT_1000 (0x2 << 3)
3818 +#define MDIO_VIT_100 (0x1 << 3)
3819 +#define MDIO_VIT_10 0
3820 +#define MDIO_VIT_FD (0x1 << 5)
3822 +/* Davicom DM9161 specific */
3823 +#define DM9161_OUI 0x606E
3824 +#define MII_DM9161_SCR 0x10
3825 +#define MII_DM9161_SCR_INIT 0x0610
3826 +#define MII_DM9161_SCR_RMII 0x0100
3827 +#define MII_DM9161_10BTCSR 0x12
3828 +#define MII_DM9161_10BTCSR_INIT 0x7800
3830 +/* Network flash constants */
3831 +#define NET_FLASH_TIME (HZ/50) /* 20 ms */
3832 +#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
3833 +#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 seconds. */
3834 +#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 seconds. */
3836 +/* Duplex settings. */
3837 +enum duplex {
3838 + half,
3839 + full,
3840 + autoneg
3843 +/* Some transceivers requires special handling. */
3844 +struct transceiver_ops {
3845 + unsigned int oui;
3846 + void (*check_speed) (struct net_device * dev);
3847 + void (*check_duplex) (struct net_device * dev);
3850 +typedef struct crisv32_eth_descr {
3851 + dma_descr_data descr __attribute__ ((__aligned__(32)));
3852 + struct sk_buff *skb;
3853 + unsigned char *linearized_packet;
3854 +} crisv32_eth_descr;
3856 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
3857 +struct tx_buffer_list {
3858 + struct tx_buffer_list *next;
3859 + unsigned char *buf;
3860 + char free;
3862 +#endif
3864 +/* LED stuff */
3865 +#define CRIS_LED_GRP_0 0
3866 +#define CRIS_LED_GRP_1 1
3867 +#define CRIS_LED_GRP_NONE 2
3869 +#define CRIS_LED_ACTIVITY 0
3870 +#define CRIS_LED_NOACTIVITY 1
3871 +#define CRIS_LED_LINK 2
3872 +#define CRIS_LED_NOLINK 3
3874 +struct crisv32_eth_leds {
3875 + unsigned int ledgrp;
3876 + int led_active;
3877 + unsigned long led_next_time;
3878 + struct timer_list clear_led_timer;
3879 + spinlock_t led_lock; /* Protect LED state */
3880 + int ifisup[2];
3883 +/* Information that need to be kept for each device. */
3884 +struct crisv32_ethernet_local {
3885 + /* FIXME: These align attributes don't really help. If they are really
3886 + * needed alignment has to be enforced at runtime, these objects
3887 + * are dynamically allocated. */
3888 + dma_descr_context ctxt_in __attribute__ ((__aligned__(32)));
3889 + dma_descr_context ctxt_out __attribute__ ((__aligned__(32)));
3891 + crisv32_eth_descr dma_rx_descr_list[NBR_RX_DESC];
3892 + crisv32_eth_descr dma_tx_descr_list[NBR_TX_DESC];
3893 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
3894 + struct tx_buffer_list tx_intmem_buf_list[NBR_INTMEM_TX_BUF];
3895 + struct tx_buffer_list *intmem_tx_buf_active;
3896 + struct tx_buffer_list *intmem_tx_buf_catch;
3897 + int gigabit_mode;
3898 +#endif
3899 + /* Transmit data path. */
3900 + int dma_out_inst;
3901 + int sender_started;
3903 + /* TX-ring state. */
3904 + crisv32_eth_descr *active_tx_desc;
3905 + crisv32_eth_descr *prev_tx_desc;
3906 + crisv32_eth_descr *catch_tx_desc;
3907 + int txpackets;
3908 + int retrans;
3909 + int do_tx_recovery;
3910 + struct timer_list transmit_timer;
3912 + /* Receive data path. */
3913 + struct napi_struct napi;
3914 + int dma_in_inst;
3916 + /* RX-ring state. */
3917 + crisv32_eth_descr *active_rx_desc;
3918 + crisv32_eth_descr *prev_rx_desc;
3919 + crisv32_eth_descr *last_rx_desc;
3921 + unsigned long newbuf;
3922 + u8 new_rx_package;
3923 + u8 pending_overrun;
3924 + u8 overrun_set;
3925 + u8 link;
3926 + int napi_processing;
3927 + struct timer_list receive_timer;
3928 + struct work_struct receive_work;
3929 + reg_eth_rw_rec_ctrl saved_rec_ctrl;
3930 + int saved_ga_lo;
3931 + int saved_ga_hi;
3932 + int do_rx_recovery;
3934 + /* Control paths. */
3935 + spinlock_t lock;
3936 + struct net_device *dev;
3937 + int eth_inst;
3939 + /* Toggle network LEDs usage at runtime */
3940 + int use_leds;
3941 + struct crisv32_eth_leds *leds;
3943 + /* PHY control. */
3944 + int fixed_phy;
3945 + spinlock_t transceiver_lock; /* Protect transceiver state. */
3946 + struct transceiver_ops *transceiver;
3947 + struct mii_if_info mii_if;
3949 + /* Specifies if we should do autonegotiation or not.
3950 + * TODO: This ad-hoc hack should be removed. Ethtool already supports
3951 + * this kind of control.
3952 + */
3953 + int autoneg_normal;
3955 + struct timer_list duplex_timer;
3956 + int full_duplex;
3957 + enum duplex current_duplex;
3959 + struct timer_list speed_timer;
3960 + int current_speed; /* Speed read from tranceiver */
3961 + int current_speed_selection; /* Speed selected by user */
3963 + /* Statistics. */
3964 + u64 tx_dma_restarts;
3965 + u64 tx_mac_resets;
3966 + u64 rx_dma_restarts;
3967 + u64 rx_dma_timeouts;
3968 + u64 rx_restarts_dropped;
3970 + struct net_device_stats stats;
3973 +/* Function prototypes. */
3974 +static int crisv32_ethernet_init(void);
3975 +static int crisv32_ethernet_device_init(struct net_device *dev);
3976 +static int crisv32_eth_open(struct net_device *dev);
3977 +static int crisv32_eth_close(struct net_device *dev);
3978 +static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr);
3979 +static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id);
3980 +static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id);
3981 +static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id);
3982 +static int crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev);
3983 +static void crisv32_eth_hw_send_packet(unsigned char *buf, int length,
3984 + void *priv);
3985 +static void crisv32_eth_do_tx_recovery(struct net_device *dev);
3986 +static void crisv32_eth_set_rx_mode(struct net_device *dev);
3987 +static int crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
3988 + int cmd);
3989 +static int crisv32_eth_set_config(struct net_device *dev, struct ifmap *map);
3990 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
3991 +static void crisv32_eth_switch_intmem_usage(struct net_device *dev);
3992 +#endif
3993 +static void crisv32_eth_negotiate(struct net_device *dev);
3994 +static void crisv32_eth_set_speed(struct net_device *dev, unsigned long speed);
3995 +#ifndef CONFIG_ETRAX_NO_PHY
3996 +static void crisv32_eth_check_duplex(unsigned long idev);
3997 +static void crisv32_eth_check_speed(unsigned long idev);
3998 +#endif
4000 +static void crisv32_eth_set_duplex(struct net_device *dev, enum duplex);
4001 +static int crisv32_eth_probe_transceiver(struct net_device *dev);
4003 +static struct ethtool_ops crisv32_ethtool_ops;
4005 +#ifndef CONFIG_ETRAX_NO_PHY
4006 +static void generic_check_speed(struct net_device *dev);
4007 +static void generic_check_duplex(struct net_device *dev);
4008 +static void broadcom_check_speed(struct net_device *dev);
4009 +static void broadcom_check_duplex(struct net_device *dev);
4010 +static void tdk_check_speed(struct net_device *dev);
4011 +static void tdk_check_duplex(struct net_device *dev);
4012 +static void intel_check_speed(struct net_device *dev);
4013 +static void intel_check_duplex(struct net_device *dev);
4014 +static void national_check_speed(struct net_device *dev);
4015 +static void national_check_duplex(struct net_device *dev);
4016 +static void vitesse_check_speed(struct net_device *dev);
4017 +static void vitesse_check_duplex(struct net_device *dev);
4018 +static void davicom_check_speed(struct net_device *dev);
4019 +static void davicom_check_duplex(struct net_device *dev);
4020 +#endif
4022 +#ifdef CONFIG_NET_POLL_CONTROLLER
4023 +static void crisv32_netpoll(struct net_device *dev);
4024 +#endif
4026 +static void crisv32_clear_network_leds(unsigned long dummy);
4027 +static void crisv32_set_network_leds(int active, struct net_device *dev);
4029 +static int crisv32_eth_get_mdio_reg(struct net_device *dev,
4030 + int phyid, int reg_num);
4031 +static void crisv32_eth_set_mdio_reg(struct net_device *dev,
4032 + int phyid, int reg_num, int val);
4033 +static void crisv32_eth_send_mdio_cmd(struct net_device *dev,
4034 + unsigned short cmd, int write_cmd);
4035 +static void crisv32_eth_send_mdio_bit(struct net_device *dev,
4036 + unsigned char bit);
4037 +static unsigned char crisv32_eth_receive_mdio_bit(struct net_device *dev);
4039 +static struct net_device_stats *crisv32_get_stats(struct net_device *dev);
4040 +static void crisv32_start_dma_out(struct crisv32_ethernet_local *np);
4042 +#endif /* _ETRAX_ETHERNET_H_ */
4043 diff -Nur linux-4.7.3.orig/drivers/net/cris/Makefile linux-4.7.3/drivers/net/cris/Makefile
4044 --- linux-4.7.3.orig/drivers/net/cris/Makefile 2016-09-07 08:35:12.000000000 +0200
4045 +++ linux-4.7.3/drivers/net/cris/Makefile 2016-09-13 01:47:09.527718381 +0200
4046 @@ -1 +1,2 @@
4047 obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o
4048 +obj-$(CONFIG_ETRAX_ARCH_V32) += eth_v32.o