2 * drivers/net/ethernet/freescale/gianfar_ethtool.c
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
14 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/skbuff.h>
31 #include <linux/spinlock.h>
36 #include <asm/uaccess.h>
37 #include <linux/module.h>
38 #include <linux/crc32.h>
39 #include <asm/types.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/phy.h>
43 #include <linux/sort.h>
44 #include <linux/if_vlan.h>
48 extern void gfar_start(struct net_device
*dev
);
49 extern int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
,
52 #define GFAR_MAX_COAL_USECS 0xffff
53 #define GFAR_MAX_COAL_FRAMES 0xff
54 static void gfar_fill_stats(struct net_device
*dev
, struct ethtool_stats
*dummy
,
56 static void gfar_gstrings(struct net_device
*dev
, u32 stringset
, u8
* buf
);
57 static int gfar_gcoalesce(struct net_device
*dev
,
58 struct ethtool_coalesce
*cvals
);
59 static int gfar_scoalesce(struct net_device
*dev
,
60 struct ethtool_coalesce
*cvals
);
61 static void gfar_gringparam(struct net_device
*dev
,
62 struct ethtool_ringparam
*rvals
);
63 static int gfar_sringparam(struct net_device
*dev
,
64 struct ethtool_ringparam
*rvals
);
65 static void gfar_gdrvinfo(struct net_device
*dev
,
66 struct ethtool_drvinfo
*drvinfo
);
68 static const char stat_gstrings
[][ETH_GSTRING_LEN
] = {
69 "rx-dropped-by-kernel",
70 "rx-large-frame-errors",
71 "rx-short-frame-errors",
72 "rx-non-octet-errors",
77 "rx-truncated-frames",
81 "rx-skb-missing-errors",
84 "tx-rx-65-127-frames",
85 "tx-rx-128-255-frames",
86 "tx-rx-256-511-frames",
87 "tx-rx-512-1023-frames",
88 "tx-rx-1024-1518-frames",
89 "tx-rx-1519-1522-good-vlan",
93 "receive-multicast-packet",
94 "receive-broadcast-packet",
95 "rx-control-frame-packets",
96 "rx-pause-frame-packets",
99 "rx-frame-length-error",
101 "rx-carrier-sense-error",
102 "rx-undersize-packets",
103 "rx-oversize-packets",
104 "rx-fragmented-frames",
109 "tx-multicast-packets",
110 "tx-broadcast-packets",
111 "tx-pause-control-frames",
112 "tx-deferral-packets",
113 "tx-excessive-deferral-packets",
114 "tx-single-collision-packets",
115 "tx-multiple-collision-packets",
116 "tx-late-collision-packets",
117 "tx-excessive-collision-packets",
118 "tx-total-collision",
124 "tx-oversize-frames",
125 "tx-undersize-frames",
126 "tx-fragmented-frames",
129 /* Fill in a buffer with the strings which correspond to the
131 static void gfar_gstrings(struct net_device
*dev
, u32 stringset
, u8
* buf
)
133 struct gfar_private
*priv
= netdev_priv(dev
);
135 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
)
136 memcpy(buf
, stat_gstrings
, GFAR_STATS_LEN
* ETH_GSTRING_LEN
);
138 memcpy(buf
, stat_gstrings
,
139 GFAR_EXTRA_STATS_LEN
* ETH_GSTRING_LEN
);
142 /* Fill in an array of 64-bit statistics from various sources.
143 * This array will be appended to the end of the ethtool_stats
144 * structure, and returned to user space
146 static void gfar_fill_stats(struct net_device
*dev
, struct ethtool_stats
*dummy
,
150 struct gfar_private
*priv
= netdev_priv(dev
);
151 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
152 u64
*extra
= (u64
*) & priv
->extra_stats
;
154 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
155 u32 __iomem
*rmon
= (u32 __iomem
*) ®s
->rmon
;
156 struct gfar_stats
*stats
= (struct gfar_stats
*) buf
;
158 for (i
= 0; i
< GFAR_RMON_LEN
; i
++)
159 stats
->rmon
[i
] = (u64
) gfar_read(&rmon
[i
]);
161 for (i
= 0; i
< GFAR_EXTRA_STATS_LEN
; i
++)
162 stats
->extra
[i
] = extra
[i
];
164 for (i
= 0; i
< GFAR_EXTRA_STATS_LEN
; i
++)
168 static int gfar_sset_count(struct net_device
*dev
, int sset
)
170 struct gfar_private
*priv
= netdev_priv(dev
);
174 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
)
175 return GFAR_STATS_LEN
;
177 return GFAR_EXTRA_STATS_LEN
;
183 /* Fills in the drvinfo structure with some basic info */
184 static void gfar_gdrvinfo(struct net_device
*dev
,
185 struct ethtool_drvinfo
*drvinfo
)
187 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
188 strlcpy(drvinfo
->version
, gfar_driver_version
,
189 sizeof(drvinfo
->version
));
190 strlcpy(drvinfo
->fw_version
, "N/A", sizeof(drvinfo
->fw_version
));
191 strlcpy(drvinfo
->bus_info
, "N/A", sizeof(drvinfo
->bus_info
));
192 drvinfo
->regdump_len
= 0;
193 drvinfo
->eedump_len
= 0;
197 static int gfar_ssettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
199 struct gfar_private
*priv
= netdev_priv(dev
);
200 struct phy_device
*phydev
= priv
->phydev
;
205 return phy_ethtool_sset(phydev
, cmd
);
209 /* Return the current settings in the ethtool_cmd structure */
210 static int gfar_gsettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
212 struct gfar_private
*priv
= netdev_priv(dev
);
213 struct phy_device
*phydev
= priv
->phydev
;
214 struct gfar_priv_rx_q
*rx_queue
= NULL
;
215 struct gfar_priv_tx_q
*tx_queue
= NULL
;
219 tx_queue
= priv
->tx_queue
[0];
220 rx_queue
= priv
->rx_queue
[0];
222 /* etsec-1.7 and older versions have only one txic
223 * and rxic regs although they support multiple queues */
224 cmd
->maxtxpkt
= get_icft_value(tx_queue
->txic
);
225 cmd
->maxrxpkt
= get_icft_value(rx_queue
->rxic
);
227 return phy_ethtool_gset(phydev
, cmd
);
230 /* Return the length of the register structure */
231 static int gfar_reglen(struct net_device
*dev
)
233 return sizeof (struct gfar
);
236 /* Return a dump of the GFAR register space */
237 static void gfar_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
241 struct gfar_private
*priv
= netdev_priv(dev
);
242 u32 __iomem
*theregs
= (u32 __iomem
*) priv
->gfargrp
[0].regs
;
243 u32
*buf
= (u32
*) regbuf
;
245 for (i
= 0; i
< sizeof (struct gfar
) / sizeof (u32
); i
++)
246 buf
[i
] = gfar_read(&theregs
[i
]);
249 /* Convert microseconds to ethernet clock ticks, which changes
250 * depending on what speed the controller is running at */
251 static unsigned int gfar_usecs2ticks(struct gfar_private
*priv
,
256 /* The timer is different, depending on the interface speed */
257 switch (priv
->phydev
->speed
) {
259 count
= GFAR_GBIT_TIME
;
262 count
= GFAR_100_TIME
;
266 count
= GFAR_10_TIME
;
270 /* Make sure we return a number greater than 0
272 return (usecs
* 1000 + count
- 1) / count
;
275 /* Convert ethernet clock ticks to microseconds */
276 static unsigned int gfar_ticks2usecs(struct gfar_private
*priv
,
281 /* The timer is different, depending on the interface speed */
282 switch (priv
->phydev
->speed
) {
284 count
= GFAR_GBIT_TIME
;
287 count
= GFAR_100_TIME
;
291 count
= GFAR_10_TIME
;
295 /* Make sure we return a number greater than 0 */
296 /* if ticks is > 0 */
297 return (ticks
* count
) / 1000;
300 /* Get the coalescing parameters, and put them in the cvals
302 static int gfar_gcoalesce(struct net_device
*dev
,
303 struct ethtool_coalesce
*cvals
)
305 struct gfar_private
*priv
= netdev_priv(dev
);
306 struct gfar_priv_rx_q
*rx_queue
= NULL
;
307 struct gfar_priv_tx_q
*tx_queue
= NULL
;
308 unsigned long rxtime
;
309 unsigned long rxcount
;
310 unsigned long txtime
;
311 unsigned long txcount
;
313 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_COALESCE
))
316 if (NULL
== priv
->phydev
)
319 rx_queue
= priv
->rx_queue
[0];
320 tx_queue
= priv
->tx_queue
[0];
322 rxtime
= get_ictt_value(rx_queue
->rxic
);
323 rxcount
= get_icft_value(rx_queue
->rxic
);
324 txtime
= get_ictt_value(tx_queue
->txic
);
325 txcount
= get_icft_value(tx_queue
->txic
);
326 cvals
->rx_coalesce_usecs
= gfar_ticks2usecs(priv
, rxtime
);
327 cvals
->rx_max_coalesced_frames
= rxcount
;
329 cvals
->tx_coalesce_usecs
= gfar_ticks2usecs(priv
, txtime
);
330 cvals
->tx_max_coalesced_frames
= txcount
;
332 cvals
->use_adaptive_rx_coalesce
= 0;
333 cvals
->use_adaptive_tx_coalesce
= 0;
335 cvals
->pkt_rate_low
= 0;
336 cvals
->rx_coalesce_usecs_low
= 0;
337 cvals
->rx_max_coalesced_frames_low
= 0;
338 cvals
->tx_coalesce_usecs_low
= 0;
339 cvals
->tx_max_coalesced_frames_low
= 0;
341 /* When the packet rate is below pkt_rate_high but above
342 * pkt_rate_low (both measured in packets per second) the
343 * normal {rx,tx}_* coalescing parameters are used.
346 /* When the packet rate is (measured in packets per second)
347 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
350 cvals
->pkt_rate_high
= 0;
351 cvals
->rx_coalesce_usecs_high
= 0;
352 cvals
->rx_max_coalesced_frames_high
= 0;
353 cvals
->tx_coalesce_usecs_high
= 0;
354 cvals
->tx_max_coalesced_frames_high
= 0;
356 /* How often to do adaptive coalescing packet rate sampling,
357 * measured in seconds. Must not be zero.
359 cvals
->rate_sample_interval
= 0;
364 /* Change the coalescing values.
365 * Both cvals->*_usecs and cvals->*_frames have to be > 0
366 * in order for coalescing to be active
368 static int gfar_scoalesce(struct net_device
*dev
,
369 struct ethtool_coalesce
*cvals
)
371 struct gfar_private
*priv
= netdev_priv(dev
);
374 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_COALESCE
))
377 /* Set up rx coalescing */
378 /* As of now, we will enable/disable coalescing for all
379 * queues together in case of eTSEC2, this will be modified
380 * along with the ethtool interface
382 if ((cvals
->rx_coalesce_usecs
== 0) ||
383 (cvals
->rx_max_coalesced_frames
== 0)) {
384 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
385 priv
->rx_queue
[i
]->rxcoalescing
= 0;
387 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
388 priv
->rx_queue
[i
]->rxcoalescing
= 1;
391 if (NULL
== priv
->phydev
)
394 /* Check the bounds of the values */
395 if (cvals
->rx_coalesce_usecs
> GFAR_MAX_COAL_USECS
) {
396 pr_info("Coalescing is limited to %d microseconds\n",
397 GFAR_MAX_COAL_USECS
);
401 if (cvals
->rx_max_coalesced_frames
> GFAR_MAX_COAL_FRAMES
) {
402 pr_info("Coalescing is limited to %d frames\n",
403 GFAR_MAX_COAL_FRAMES
);
407 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
408 priv
->rx_queue
[i
]->rxic
= mk_ic_value(
409 cvals
->rx_max_coalesced_frames
,
410 gfar_usecs2ticks(priv
, cvals
->rx_coalesce_usecs
));
413 /* Set up tx coalescing */
414 if ((cvals
->tx_coalesce_usecs
== 0) ||
415 (cvals
->tx_max_coalesced_frames
== 0)) {
416 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
417 priv
->tx_queue
[i
]->txcoalescing
= 0;
419 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
420 priv
->tx_queue
[i
]->txcoalescing
= 1;
423 /* Check the bounds of the values */
424 if (cvals
->tx_coalesce_usecs
> GFAR_MAX_COAL_USECS
) {
425 pr_info("Coalescing is limited to %d microseconds\n",
426 GFAR_MAX_COAL_USECS
);
430 if (cvals
->tx_max_coalesced_frames
> GFAR_MAX_COAL_FRAMES
) {
431 pr_info("Coalescing is limited to %d frames\n",
432 GFAR_MAX_COAL_FRAMES
);
436 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
437 priv
->tx_queue
[i
]->txic
= mk_ic_value(
438 cvals
->tx_max_coalesced_frames
,
439 gfar_usecs2ticks(priv
, cvals
->tx_coalesce_usecs
));
442 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
447 /* Fills in rvals with the current ring parameters. Currently,
448 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
449 * jumbo are ignored by the driver */
450 static void gfar_gringparam(struct net_device
*dev
,
451 struct ethtool_ringparam
*rvals
)
453 struct gfar_private
*priv
= netdev_priv(dev
);
454 struct gfar_priv_tx_q
*tx_queue
= NULL
;
455 struct gfar_priv_rx_q
*rx_queue
= NULL
;
457 tx_queue
= priv
->tx_queue
[0];
458 rx_queue
= priv
->rx_queue
[0];
460 rvals
->rx_max_pending
= GFAR_RX_MAX_RING_SIZE
;
461 rvals
->rx_mini_max_pending
= GFAR_RX_MAX_RING_SIZE
;
462 rvals
->rx_jumbo_max_pending
= GFAR_RX_MAX_RING_SIZE
;
463 rvals
->tx_max_pending
= GFAR_TX_MAX_RING_SIZE
;
465 /* Values changeable by the user. The valid values are
466 * in the range 1 to the "*_max_pending" counterpart above.
468 rvals
->rx_pending
= rx_queue
->rx_ring_size
;
469 rvals
->rx_mini_pending
= rx_queue
->rx_ring_size
;
470 rvals
->rx_jumbo_pending
= rx_queue
->rx_ring_size
;
471 rvals
->tx_pending
= tx_queue
->tx_ring_size
;
474 /* Change the current ring parameters, stopping the controller if
475 * necessary so that we don't mess things up while we're in
476 * motion. We wait for the ring to be clean before reallocating
479 static int gfar_sringparam(struct net_device
*dev
,
480 struct ethtool_ringparam
*rvals
)
482 struct gfar_private
*priv
= netdev_priv(dev
);
485 if (rvals
->rx_pending
> GFAR_RX_MAX_RING_SIZE
)
488 if (!is_power_of_2(rvals
->rx_pending
)) {
489 netdev_err(dev
, "Ring sizes must be a power of 2\n");
493 if (rvals
->tx_pending
> GFAR_TX_MAX_RING_SIZE
)
496 if (!is_power_of_2(rvals
->tx_pending
)) {
497 netdev_err(dev
, "Ring sizes must be a power of 2\n");
502 if (dev
->flags
& IFF_UP
) {
505 /* Halt TX and RX, and process the frames which
506 * have already been received
508 local_irq_save(flags
);
516 local_irq_restore(flags
);
518 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
519 gfar_clean_rx_ring(priv
->rx_queue
[i
],
520 priv
->rx_queue
[i
]->rx_ring_size
);
522 /* Now we take down the rings to rebuild them */
526 /* Change the size */
527 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
528 priv
->rx_queue
[i
]->rx_ring_size
= rvals
->rx_pending
;
529 priv
->tx_queue
[i
]->tx_ring_size
= rvals
->tx_pending
;
530 priv
->tx_queue
[i
]->num_txbdfree
=
531 priv
->tx_queue
[i
]->tx_ring_size
;
534 /* Rebuild the rings with the new size */
535 if (dev
->flags
& IFF_UP
) {
536 err
= startup_gfar(dev
);
537 netif_tx_wake_all_queues(dev
);
542 int gfar_set_features(struct net_device
*dev
, netdev_features_t features
)
544 struct gfar_private
*priv
= netdev_priv(dev
);
547 netdev_features_t changed
= dev
->features
^ features
;
549 if (changed
& (NETIF_F_HW_VLAN_TX
|NETIF_F_HW_VLAN_RX
))
550 gfar_vlan_mode(dev
, features
);
552 if (!(changed
& NETIF_F_RXCSUM
))
555 if (dev
->flags
& IFF_UP
) {
556 /* Halt TX and RX, and process the frames which
557 * have already been received
559 local_irq_save(flags
);
567 local_irq_restore(flags
);
569 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
570 gfar_clean_rx_ring(priv
->rx_queue
[i
],
571 priv
->rx_queue
[i
]->rx_ring_size
);
573 /* Now we take down the rings to rebuild them */
576 dev
->features
= features
;
578 err
= startup_gfar(dev
);
579 netif_tx_wake_all_queues(dev
);
584 static uint32_t gfar_get_msglevel(struct net_device
*dev
)
586 struct gfar_private
*priv
= netdev_priv(dev
);
588 return priv
->msg_enable
;
591 static void gfar_set_msglevel(struct net_device
*dev
, uint32_t data
)
593 struct gfar_private
*priv
= netdev_priv(dev
);
595 priv
->msg_enable
= data
;
599 static void gfar_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
601 struct gfar_private
*priv
= netdev_priv(dev
);
603 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) {
604 wol
->supported
= WAKE_MAGIC
;
605 wol
->wolopts
= priv
->wol_en
? WAKE_MAGIC
: 0;
607 wol
->supported
= wol
->wolopts
= 0;
611 static int gfar_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
613 struct gfar_private
*priv
= netdev_priv(dev
);
616 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
620 if (wol
->wolopts
& ~WAKE_MAGIC
)
623 device_set_wakeup_enable(&dev
->dev
, wol
->wolopts
& WAKE_MAGIC
);
625 spin_lock_irqsave(&priv
->bflock
, flags
);
626 priv
->wol_en
= !!device_may_wakeup(&dev
->dev
);
627 spin_unlock_irqrestore(&priv
->bflock
, flags
);
633 static void ethflow_to_filer_rules (struct gfar_private
*priv
, u64 ethflow
)
635 u32 fcr
= 0x0, fpr
= FPR_FILER_MASK
;
637 if (ethflow
& RXH_L2DA
) {
638 fcr
= RQFCR_PID_DAH
|RQFCR_CMP_NOMATCH
|
639 RQFCR_HASH
| RQFCR_AND
| RQFCR_HASHTBL_0
;
640 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
641 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
642 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
643 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
645 fcr
= RQFCR_PID_DAL
| RQFCR_AND
| RQFCR_CMP_NOMATCH
|
646 RQFCR_HASH
| RQFCR_AND
| RQFCR_HASHTBL_0
;
647 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
648 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
649 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
650 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
653 if (ethflow
& RXH_VLAN
) {
654 fcr
= RQFCR_PID_VID
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
655 RQFCR_AND
| RQFCR_HASHTBL_0
;
656 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
657 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
658 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
659 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
662 if (ethflow
& RXH_IP_SRC
) {
663 fcr
= RQFCR_PID_SIA
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
664 RQFCR_AND
| RQFCR_HASHTBL_0
;
665 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
666 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
667 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
668 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
671 if (ethflow
& (RXH_IP_DST
)) {
672 fcr
= RQFCR_PID_DIA
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
673 RQFCR_AND
| RQFCR_HASHTBL_0
;
674 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
675 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
676 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
677 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
680 if (ethflow
& RXH_L3_PROTO
) {
681 fcr
= RQFCR_PID_L4P
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
682 RQFCR_AND
| RQFCR_HASHTBL_0
;
683 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
684 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
685 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
686 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
689 if (ethflow
& RXH_L4_B_0_1
) {
690 fcr
= RQFCR_PID_SPT
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
691 RQFCR_AND
| RQFCR_HASHTBL_0
;
692 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
693 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
694 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
695 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
698 if (ethflow
& RXH_L4_B_2_3
) {
699 fcr
= RQFCR_PID_DPT
| RQFCR_CMP_NOMATCH
| RQFCR_HASH
|
700 RQFCR_AND
| RQFCR_HASHTBL_0
;
701 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = fpr
;
702 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = fcr
;
703 gfar_write_filer(priv
, priv
->cur_filer_idx
, fcr
, fpr
);
704 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
708 static int gfar_ethflow_to_filer_table(struct gfar_private
*priv
, u64 ethflow
,
711 unsigned int last_rule_idx
= priv
->cur_filer_idx
;
712 unsigned int cmp_rqfpr
;
713 unsigned int *local_rqfpr
;
714 unsigned int *local_rqfcr
;
715 int i
= 0x0, k
= 0x0;
716 int j
= MAX_FILER_IDX
, l
= 0x0;
719 local_rqfpr
= kmalloc_array(MAX_FILER_IDX
+ 1, sizeof(unsigned int),
721 local_rqfcr
= kmalloc_array(MAX_FILER_IDX
+ 1, sizeof(unsigned int),
723 if (!local_rqfpr
|| !local_rqfcr
) {
730 cmp_rqfpr
= RQFPR_IPV4
|RQFPR_TCP
;
733 cmp_rqfpr
= RQFPR_IPV4
|RQFPR_UDP
;
736 cmp_rqfpr
= RQFPR_IPV6
|RQFPR_TCP
;
739 cmp_rqfpr
= RQFPR_IPV6
|RQFPR_UDP
;
742 pr_err("Right now this class is not supported\n");
747 for (i
= 0; i
< MAX_FILER_IDX
+ 1; i
++) {
748 local_rqfpr
[j
] = priv
->ftp_rqfpr
[i
];
749 local_rqfcr
[j
] = priv
->ftp_rqfcr
[i
];
751 if ((priv
->ftp_rqfcr
[i
] ==
752 (RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
)) &&
753 (priv
->ftp_rqfpr
[i
] == cmp_rqfpr
))
757 if (i
== MAX_FILER_IDX
+ 1) {
758 pr_err("No parse rule found, can't create hash rules\n");
763 /* If a match was found, then it begins the starting of a cluster rule
764 * if it was already programmed, we need to overwrite these rules
766 for (l
= i
+1; l
< MAX_FILER_IDX
; l
++) {
767 if ((priv
->ftp_rqfcr
[l
] & RQFCR_CLE
) &&
768 !(priv
->ftp_rqfcr
[l
] & RQFCR_AND
)) {
769 priv
->ftp_rqfcr
[l
] = RQFCR_CLE
| RQFCR_CMP_EXACT
|
770 RQFCR_HASHTBL_0
| RQFCR_PID_MASK
;
771 priv
->ftp_rqfpr
[l
] = FPR_FILER_MASK
;
772 gfar_write_filer(priv
, l
, priv
->ftp_rqfcr
[l
],
777 if (!(priv
->ftp_rqfcr
[l
] & RQFCR_CLE
) &&
778 (priv
->ftp_rqfcr
[l
] & RQFCR_AND
))
781 local_rqfpr
[j
] = priv
->ftp_rqfpr
[l
];
782 local_rqfcr
[j
] = priv
->ftp_rqfcr
[l
];
787 priv
->cur_filer_idx
= l
- 1;
791 ethflow_to_filer_rules(priv
, ethflow
);
793 /* Write back the popped out rules again */
794 for (k
= j
+1; k
< MAX_FILER_IDX
; k
++) {
795 priv
->ftp_rqfpr
[priv
->cur_filer_idx
] = local_rqfpr
[k
];
796 priv
->ftp_rqfcr
[priv
->cur_filer_idx
] = local_rqfcr
[k
];
797 gfar_write_filer(priv
, priv
->cur_filer_idx
,
798 local_rqfcr
[k
], local_rqfpr
[k
]);
799 if (!priv
->cur_filer_idx
)
801 priv
->cur_filer_idx
= priv
->cur_filer_idx
- 1;
810 static int gfar_set_hash_opts(struct gfar_private
*priv
,
811 struct ethtool_rxnfc
*cmd
)
813 /* write the filer rules here */
814 if (!gfar_ethflow_to_filer_table(priv
, cmd
->data
, cmd
->flow_type
))
820 static int gfar_check_filer_hardware(struct gfar_private
*priv
)
822 struct gfar __iomem
*regs
= NULL
;
825 regs
= priv
->gfargrp
[0].regs
;
827 /* Check if we are in FIFO mode */
828 i
= gfar_read(®s
->ecntrl
);
830 if (i
== ECNTRL_FIFM
) {
831 netdev_notice(priv
->ndev
, "Interface in FIFO mode\n");
832 i
= gfar_read(®s
->rctrl
);
833 i
&= RCTRL_PRSDEP_MASK
| RCTRL_PRSFM
;
834 if (i
== (RCTRL_PRSDEP_MASK
| RCTRL_PRSFM
)) {
835 netdev_info(priv
->ndev
,
836 "Receive Queue Filtering enabled\n");
838 netdev_warn(priv
->ndev
,
839 "Receive Queue Filtering disabled\n");
843 /* Or in standard mode */
845 i
= gfar_read(®s
->rctrl
);
846 i
&= RCTRL_PRSDEP_MASK
;
847 if (i
== RCTRL_PRSDEP_MASK
) {
848 netdev_info(priv
->ndev
,
849 "Receive Queue Filtering enabled\n");
851 netdev_warn(priv
->ndev
,
852 "Receive Queue Filtering disabled\n");
857 /* Sets the properties for arbitrary filer rule
858 * to the first 4 Layer 4 Bytes
860 regs
->rbifx
= 0xC0C1C2C3;
864 static int gfar_comp_asc(const void *a
, const void *b
)
866 return memcmp(a
, b
, 4);
869 static int gfar_comp_desc(const void *a
, const void *b
)
871 return -memcmp(a
, b
, 4);
874 static void gfar_swap(void *a
, void *b
, int size
)
885 /* Write a mask to filer cache */
886 static void gfar_set_mask(u32 mask
, struct filer_table
*tab
)
888 tab
->fe
[tab
->index
].ctrl
= RQFCR_AND
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
889 tab
->fe
[tab
->index
].prop
= mask
;
893 /* Sets parse bits (e.g. IP or TCP) */
894 static void gfar_set_parse_bits(u32 value
, u32 mask
, struct filer_table
*tab
)
896 gfar_set_mask(mask
, tab
);
897 tab
->fe
[tab
->index
].ctrl
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
|
899 tab
->fe
[tab
->index
].prop
= value
;
903 static void gfar_set_general_attribute(u32 value
, u32 mask
, u32 flag
,
904 struct filer_table
*tab
)
906 gfar_set_mask(mask
, tab
);
907 tab
->fe
[tab
->index
].ctrl
= RQFCR_CMP_EXACT
| RQFCR_AND
| flag
;
908 tab
->fe
[tab
->index
].prop
= value
;
912 /* For setting a tuple of value and mask of type flag
914 * IP-Src = 10.0.0.0/255.0.0.0
915 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
917 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
918 * For a don't care mask it gives us a 0
920 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
921 * and MAC stuff on an upper level (due to missing information on this level).
922 * For these guys we can discard them if they are value=0 and mask=0.
924 * Further the all masks are one-padded for better hardware efficiency.
926 static void gfar_set_attribute(u32 value
, u32 mask
, u32 flag
,
927 struct filer_table
*tab
)
934 mask
|= RQFCR_PID_PRI_MASK
;
939 if (!~(mask
| RQFCR_PID_L4P_MASK
))
944 mask
|= RQFCR_PID_L4P_MASK
;
950 mask
|= RQFCR_PID_VID_MASK
;
956 if (!~(mask
| RQFCR_PID_PORT_MASK
))
961 mask
|= RQFCR_PID_PORT_MASK
;
970 mask
|= RQFCR_PID_MAC_MASK
;
972 /* for all real 32bit masks */
980 gfar_set_general_attribute(value
, mask
, flag
, tab
);
983 /* Translates value and mask for UDP, TCP or SCTP */
984 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec
*value
,
985 struct ethtool_tcpip4_spec
*mask
,
986 struct filer_table
*tab
)
988 gfar_set_attribute(value
->ip4src
, mask
->ip4src
, RQFCR_PID_SIA
, tab
);
989 gfar_set_attribute(value
->ip4dst
, mask
->ip4dst
, RQFCR_PID_DIA
, tab
);
990 gfar_set_attribute(value
->pdst
, mask
->pdst
, RQFCR_PID_DPT
, tab
);
991 gfar_set_attribute(value
->psrc
, mask
->psrc
, RQFCR_PID_SPT
, tab
);
992 gfar_set_attribute(value
->tos
, mask
->tos
, RQFCR_PID_TOS
, tab
);
995 /* Translates value and mask for RAW-IP4 */
996 static void gfar_set_user_ip(struct ethtool_usrip4_spec
*value
,
997 struct ethtool_usrip4_spec
*mask
,
998 struct filer_table
*tab
)
1000 gfar_set_attribute(value
->ip4src
, mask
->ip4src
, RQFCR_PID_SIA
, tab
);
1001 gfar_set_attribute(value
->ip4dst
, mask
->ip4dst
, RQFCR_PID_DIA
, tab
);
1002 gfar_set_attribute(value
->tos
, mask
->tos
, RQFCR_PID_TOS
, tab
);
1003 gfar_set_attribute(value
->proto
, mask
->proto
, RQFCR_PID_L4P
, tab
);
1004 gfar_set_attribute(value
->l4_4_bytes
, mask
->l4_4_bytes
, RQFCR_PID_ARB
,
1009 /* Translates value and mask for ETHER spec */
1010 static void gfar_set_ether(struct ethhdr
*value
, struct ethhdr
*mask
,
1011 struct filer_table
*tab
)
1013 u32 upper_temp_mask
= 0;
1014 u32 lower_temp_mask
= 0;
1016 /* Source address */
1017 if (!is_broadcast_ether_addr(mask
->h_source
)) {
1018 if (is_zero_ether_addr(mask
->h_source
)) {
1019 upper_temp_mask
= 0xFFFFFFFF;
1020 lower_temp_mask
= 0xFFFFFFFF;
1022 upper_temp_mask
= mask
->h_source
[0] << 16 |
1023 mask
->h_source
[1] << 8 |
1025 lower_temp_mask
= mask
->h_source
[3] << 16 |
1026 mask
->h_source
[4] << 8 |
1030 gfar_set_attribute(value
->h_source
[0] << 16 |
1031 value
->h_source
[1] << 8 |
1033 upper_temp_mask
, RQFCR_PID_SAH
, tab
);
1034 /* And the same for the lower part */
1035 gfar_set_attribute(value
->h_source
[3] << 16 |
1036 value
->h_source
[4] << 8 |
1038 lower_temp_mask
, RQFCR_PID_SAL
, tab
);
1040 /* Destination address */
1041 if (!is_broadcast_ether_addr(mask
->h_dest
)) {
1042 /* Special for destination is limited broadcast */
1043 if ((is_broadcast_ether_addr(value
->h_dest
) &&
1044 is_zero_ether_addr(mask
->h_dest
))) {
1045 gfar_set_parse_bits(RQFPR_EBC
, RQFPR_EBC
, tab
);
1047 if (is_zero_ether_addr(mask
->h_dest
)) {
1048 upper_temp_mask
= 0xFFFFFFFF;
1049 lower_temp_mask
= 0xFFFFFFFF;
1051 upper_temp_mask
= mask
->h_dest
[0] << 16 |
1052 mask
->h_dest
[1] << 8 |
1054 lower_temp_mask
= mask
->h_dest
[3] << 16 |
1055 mask
->h_dest
[4] << 8 |
1060 gfar_set_attribute(value
->h_dest
[0] << 16 |
1061 value
->h_dest
[1] << 8 |
1063 upper_temp_mask
, RQFCR_PID_DAH
, tab
);
1064 /* And the same for the lower part */
1065 gfar_set_attribute(value
->h_dest
[3] << 16 |
1066 value
->h_dest
[4] << 8 |
1068 lower_temp_mask
, RQFCR_PID_DAL
, tab
);
1072 gfar_set_attribute(value
->h_proto
, mask
->h_proto
, RQFCR_PID_ETY
, tab
);
1075 /* Convert a rule to binary filter format of gianfar */
1076 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec
*rule
,
1077 struct filer_table
*tab
)
1079 u32 vlan
= 0, vlan_mask
= 0;
1080 u32 id
= 0, id_mask
= 0;
1081 u32 cfi
= 0, cfi_mask
= 0;
1082 u32 prio
= 0, prio_mask
= 0;
1083 u32 old_index
= tab
->index
;
1085 /* Check if vlan is wanted */
1086 if ((rule
->flow_type
& FLOW_EXT
) && (rule
->m_ext
.vlan_tci
!= 0xFFFF)) {
1087 if (!rule
->m_ext
.vlan_tci
)
1088 rule
->m_ext
.vlan_tci
= 0xFFFF;
1091 vlan_mask
= RQFPR_VLN
;
1093 /* Separate the fields */
1094 id
= rule
->h_ext
.vlan_tci
& VLAN_VID_MASK
;
1095 id_mask
= rule
->m_ext
.vlan_tci
& VLAN_VID_MASK
;
1096 cfi
= rule
->h_ext
.vlan_tci
& VLAN_CFI_MASK
;
1097 cfi_mask
= rule
->m_ext
.vlan_tci
& VLAN_CFI_MASK
;
1098 prio
= (rule
->h_ext
.vlan_tci
& VLAN_PRIO_MASK
) >>
1100 prio_mask
= (rule
->m_ext
.vlan_tci
& VLAN_PRIO_MASK
) >>
1103 if (cfi
== VLAN_TAG_PRESENT
&& cfi_mask
== VLAN_TAG_PRESENT
) {
1105 vlan_mask
|= RQFPR_CFI
;
1106 } else if (cfi
!= VLAN_TAG_PRESENT
&&
1107 cfi_mask
== VLAN_TAG_PRESENT
) {
1108 vlan_mask
|= RQFPR_CFI
;
1112 switch (rule
->flow_type
& ~FLOW_EXT
) {
1114 gfar_set_parse_bits(RQFPR_IPV4
| RQFPR_TCP
| vlan
,
1115 RQFPR_IPV4
| RQFPR_TCP
| vlan_mask
, tab
);
1116 gfar_set_basic_ip(&rule
->h_u
.tcp_ip4_spec
,
1117 &rule
->m_u
.tcp_ip4_spec
, tab
);
1120 gfar_set_parse_bits(RQFPR_IPV4
| RQFPR_UDP
| vlan
,
1121 RQFPR_IPV4
| RQFPR_UDP
| vlan_mask
, tab
);
1122 gfar_set_basic_ip(&rule
->h_u
.udp_ip4_spec
,
1123 &rule
->m_u
.udp_ip4_spec
, tab
);
1126 gfar_set_parse_bits(RQFPR_IPV4
| vlan
, RQFPR_IPV4
| vlan_mask
,
1128 gfar_set_attribute(132, 0, RQFCR_PID_L4P
, tab
);
1129 gfar_set_basic_ip((struct ethtool_tcpip4_spec
*)&rule
->h_u
,
1130 (struct ethtool_tcpip4_spec
*)&rule
->m_u
,
1134 gfar_set_parse_bits(RQFPR_IPV4
| vlan
, RQFPR_IPV4
| vlan_mask
,
1136 gfar_set_user_ip((struct ethtool_usrip4_spec
*) &rule
->h_u
,
1137 (struct ethtool_usrip4_spec
*) &rule
->m_u
,
1142 gfar_set_parse_bits(vlan
, vlan_mask
, tab
);
1143 gfar_set_ether((struct ethhdr
*) &rule
->h_u
,
1144 (struct ethhdr
*) &rule
->m_u
, tab
);
1150 /* Set the vlan attributes in the end */
1152 gfar_set_attribute(id
, id_mask
, RQFCR_PID_VID
, tab
);
1153 gfar_set_attribute(prio
, prio_mask
, RQFCR_PID_PRI
, tab
);
1156 /* If there has been nothing written till now, it must be a default */
1157 if (tab
->index
== old_index
) {
1158 gfar_set_mask(0xFFFFFFFF, tab
);
1159 tab
->fe
[tab
->index
].ctrl
= 0x20;
1160 tab
->fe
[tab
->index
].prop
= 0x0;
1164 /* Remove last AND */
1165 tab
->fe
[tab
->index
- 1].ctrl
&= (~RQFCR_AND
);
1167 /* Specify which queue to use or to drop */
1168 if (rule
->ring_cookie
== RX_CLS_FLOW_DISC
)
1169 tab
->fe
[tab
->index
- 1].ctrl
|= RQFCR_RJE
;
1171 tab
->fe
[tab
->index
- 1].ctrl
|= (rule
->ring_cookie
<< 10);
1173 /* Only big enough entries can be clustered */
1174 if (tab
->index
> (old_index
+ 2)) {
1175 tab
->fe
[old_index
+ 1].ctrl
|= RQFCR_CLE
;
1176 tab
->fe
[tab
->index
- 1].ctrl
|= RQFCR_CLE
;
1179 /* In rare cases the cache can be full while there is
1182 if (tab
->index
> MAX_FILER_CACHE_IDX
- 1)
1188 /* Copy size filer entries */
1189 static void gfar_copy_filer_entries(struct gfar_filer_entry dst
[0],
1190 struct gfar_filer_entry src
[0], s32 size
)
1194 dst
[size
].ctrl
= src
[size
].ctrl
;
1195 dst
[size
].prop
= src
[size
].prop
;
1199 /* Delete the contents of the filer-table between start and end
1202 static int gfar_trim_filer_entries(u32 begin
, u32 end
, struct filer_table
*tab
)
1206 if (end
> MAX_FILER_CACHE_IDX
|| end
< begin
)
1210 length
= end
- begin
;
1213 while (end
< tab
->index
) {
1214 tab
->fe
[begin
].ctrl
= tab
->fe
[end
].ctrl
;
1215 tab
->fe
[begin
++].prop
= tab
->fe
[end
++].prop
;
1218 /* Fill up with don't cares */
1219 while (begin
< tab
->index
) {
1220 tab
->fe
[begin
].ctrl
= 0x60;
1221 tab
->fe
[begin
].prop
= 0xFFFFFFFF;
1225 tab
->index
-= length
;
1229 /* Make space on the wanted location */
1230 static int gfar_expand_filer_entries(u32 begin
, u32 length
,
1231 struct filer_table
*tab
)
1233 if (length
== 0 || length
+ tab
->index
> MAX_FILER_CACHE_IDX
||
1234 begin
> MAX_FILER_CACHE_IDX
)
1237 gfar_copy_filer_entries(&(tab
->fe
[begin
+ length
]), &(tab
->fe
[begin
]),
1238 tab
->index
- length
+ 1);
1240 tab
->index
+= length
;
1244 static int gfar_get_next_cluster_start(int start
, struct filer_table
*tab
)
1246 for (; (start
< tab
->index
) && (start
< MAX_FILER_CACHE_IDX
- 1);
1248 if ((tab
->fe
[start
].ctrl
& (RQFCR_AND
| RQFCR_CLE
)) ==
1249 (RQFCR_AND
| RQFCR_CLE
))
1255 static int gfar_get_next_cluster_end(int start
, struct filer_table
*tab
)
1257 for (; (start
< tab
->index
) && (start
< MAX_FILER_CACHE_IDX
- 1);
1259 if ((tab
->fe
[start
].ctrl
& (RQFCR_AND
| RQFCR_CLE
)) ==
1266 /* Uses hardwares clustering option to reduce
1267 * the number of filer table entries
1269 static void gfar_cluster_filer(struct filer_table
*tab
)
1271 s32 i
= -1, j
, iend
, jend
;
1273 while ((i
= gfar_get_next_cluster_start(++i
, tab
)) != -1) {
1275 while ((j
= gfar_get_next_cluster_start(++j
, tab
)) != -1) {
1276 /* The cluster entries self and the previous one
1277 * (a mask) must be identical!
1279 if (tab
->fe
[i
].ctrl
!= tab
->fe
[j
].ctrl
)
1281 if (tab
->fe
[i
].prop
!= tab
->fe
[j
].prop
)
1283 if (tab
->fe
[i
- 1].ctrl
!= tab
->fe
[j
- 1].ctrl
)
1285 if (tab
->fe
[i
- 1].prop
!= tab
->fe
[j
- 1].prop
)
1287 iend
= gfar_get_next_cluster_end(i
, tab
);
1288 jend
= gfar_get_next_cluster_end(j
, tab
);
1289 if (jend
== -1 || iend
== -1)
1292 /* First we make some free space, where our cluster
1293 * element should be. Then we copy it there and finally
1294 * delete in from its old location.
1296 if (gfar_expand_filer_entries(iend
, (jend
- j
), tab
) ==
1300 gfar_copy_filer_entries(&(tab
->fe
[iend
+ 1]),
1301 &(tab
->fe
[jend
+ 1]), jend
- j
);
1303 if (gfar_trim_filer_entries(jend
- 1,
1308 /* Mask out cluster bit */
1309 tab
->fe
[iend
].ctrl
&= ~(RQFCR_CLE
);
1314 /* Swaps the masked bits of a1<>a2 and b1<>b2 */
1315 static void gfar_swap_bits(struct gfar_filer_entry
*a1
,
1316 struct gfar_filer_entry
*a2
,
1317 struct gfar_filer_entry
*b1
,
1318 struct gfar_filer_entry
*b2
, u32 mask
)
1321 temp
[0] = a1
->ctrl
& mask
;
1322 temp
[1] = a2
->ctrl
& mask
;
1323 temp
[2] = b1
->ctrl
& mask
;
1324 temp
[3] = b2
->ctrl
& mask
;
1331 a1
->ctrl
|= temp
[1];
1332 a2
->ctrl
|= temp
[0];
1333 b1
->ctrl
|= temp
[3];
1334 b2
->ctrl
|= temp
[2];
1337 /* Generate a list consisting of masks values with their start and
1338 * end of validity and block as indicator for parts belonging
1339 * together (glued by ANDs) in mask_table
1341 static u32
gfar_generate_mask_table(struct gfar_mask_entry
*mask_table
,
1342 struct filer_table
*tab
)
1344 u32 i
, and_index
= 0, block_index
= 1;
1346 for (i
= 0; i
< tab
->index
; i
++) {
1348 /* LSByte of control = 0 sets a mask */
1349 if (!(tab
->fe
[i
].ctrl
& 0xF)) {
1350 mask_table
[and_index
].mask
= tab
->fe
[i
].prop
;
1351 mask_table
[and_index
].start
= i
;
1352 mask_table
[and_index
].block
= block_index
;
1354 mask_table
[and_index
- 1].end
= i
- 1;
1357 /* cluster starts and ends will be separated because they should
1358 * hold their position
1360 if (tab
->fe
[i
].ctrl
& RQFCR_CLE
)
1362 /* A not set AND indicates the end of a depended block */
1363 if (!(tab
->fe
[i
].ctrl
& RQFCR_AND
))
1367 mask_table
[and_index
- 1].end
= i
- 1;
1372 /* Sorts the entries of mask_table by the values of the masks.
1373 * Important: The 0xFF80 flags of the first and last entry of a
1374 * block must hold their position (which queue, CLusterEnable, ReJEct,
1377 static void gfar_sort_mask_table(struct gfar_mask_entry
*mask_table
,
1378 struct filer_table
*temp_table
, u32 and_index
)
1380 /* Pointer to compare function (_asc or _desc) */
1381 int (*gfar_comp
)(const void *, const void *);
1383 u32 i
, size
= 0, start
= 0, prev
= 1;
1384 u32 old_first
, old_last
, new_first
, new_last
;
1386 gfar_comp
= &gfar_comp_desc
;
1388 for (i
= 0; i
< and_index
; i
++) {
1389 if (prev
!= mask_table
[i
].block
) {
1390 old_first
= mask_table
[start
].start
+ 1;
1391 old_last
= mask_table
[i
- 1].end
;
1392 sort(mask_table
+ start
, size
,
1393 sizeof(struct gfar_mask_entry
),
1394 gfar_comp
, &gfar_swap
);
1396 /* Toggle order for every block. This makes the
1397 * thing more efficient!
1399 if (gfar_comp
== gfar_comp_desc
)
1400 gfar_comp
= &gfar_comp_asc
;
1402 gfar_comp
= &gfar_comp_desc
;
1404 new_first
= mask_table
[start
].start
+ 1;
1405 new_last
= mask_table
[i
- 1].end
;
1407 gfar_swap_bits(&temp_table
->fe
[new_first
],
1408 &temp_table
->fe
[old_first
],
1409 &temp_table
->fe
[new_last
],
1410 &temp_table
->fe
[old_last
],
1411 RQFCR_QUEUE
| RQFCR_CLE
|
1412 RQFCR_RJE
| RQFCR_AND
);
1418 prev
= mask_table
[i
].block
;
1422 /* Reduces the number of masks needed in the filer table to save entries
1423 * This is done by sorting the masks of a depended block. A depended block is
1424 * identified by gluing ANDs or CLE. The sorting order toggles after every
1425 * block. Of course entries in scope of a mask must change their location with
1428 static int gfar_optimize_filer_masks(struct filer_table
*tab
)
1430 struct filer_table
*temp_table
;
1431 struct gfar_mask_entry
*mask_table
;
1433 u32 and_index
= 0, previous_mask
= 0, i
= 0, j
= 0, size
= 0;
1436 /* We need a copy of the filer table because
1437 * we want to change its order
1439 temp_table
= kmemdup(tab
, sizeof(*temp_table
), GFP_KERNEL
);
1440 if (temp_table
== NULL
)
1443 mask_table
= kcalloc(MAX_FILER_CACHE_IDX
/ 2 + 1,
1444 sizeof(struct gfar_mask_entry
), GFP_KERNEL
);
1446 if (mask_table
== NULL
) {
1451 and_index
= gfar_generate_mask_table(mask_table
, tab
);
1453 gfar_sort_mask_table(mask_table
, temp_table
, and_index
);
1455 /* Now we can copy the data from our duplicated filer table to
1456 * the real one in the order the mask table says
1458 for (i
= 0; i
< and_index
; i
++) {
1459 size
= mask_table
[i
].end
- mask_table
[i
].start
+ 1;
1460 gfar_copy_filer_entries(&(tab
->fe
[j
]),
1461 &(temp_table
->fe
[mask_table
[i
].start
]), size
);
1465 /* And finally we just have to check for duplicated masks and drop the
1468 for (i
= 0; i
< tab
->index
&& i
< MAX_FILER_CACHE_IDX
; i
++) {
1469 if (tab
->fe
[i
].ctrl
== 0x80) {
1470 previous_mask
= i
++;
1474 for (; i
< tab
->index
&& i
< MAX_FILER_CACHE_IDX
; i
++) {
1475 if (tab
->fe
[i
].ctrl
== 0x80) {
1476 if (tab
->fe
[i
].prop
== tab
->fe
[previous_mask
].prop
) {
1477 /* Two identical ones found!
1478 * So drop the second one!
1480 gfar_trim_filer_entries(i
, i
, tab
);
1482 /* Not identical! */
1488 end
: kfree(temp_table
);
1492 /* Write the bit-pattern from software's buffer to hardware registers */
1493 static int gfar_write_filer_table(struct gfar_private
*priv
,
1494 struct filer_table
*tab
)
1497 if (tab
->index
> MAX_FILER_IDX
- 1)
1500 /* Avoid inconsistent filer table to be processed */
1503 /* Fill regular entries */
1504 for (; i
< MAX_FILER_IDX
- 1 && (tab
->fe
[i
].ctrl
| tab
->fe
[i
].ctrl
);
1506 gfar_write_filer(priv
, i
, tab
->fe
[i
].ctrl
, tab
->fe
[i
].prop
);
1507 /* Fill the rest with fall-troughs */
1508 for (; i
< MAX_FILER_IDX
- 1; i
++)
1509 gfar_write_filer(priv
, i
, 0x60, 0xFFFFFFFF);
1510 /* Last entry must be default accept
1511 * because that's what people expect
1513 gfar_write_filer(priv
, i
, 0x20, 0x0);
1520 static int gfar_check_capability(struct ethtool_rx_flow_spec
*flow
,
1521 struct gfar_private
*priv
)
1524 if (flow
->flow_type
& FLOW_EXT
) {
1525 if (~flow
->m_ext
.data
[0] || ~flow
->m_ext
.data
[1])
1526 netdev_warn(priv
->ndev
,
1527 "User-specific data not supported!\n");
1528 if (~flow
->m_ext
.vlan_etype
)
1529 netdev_warn(priv
->ndev
,
1530 "VLAN-etype not supported!\n");
1532 if (flow
->flow_type
== IP_USER_FLOW
)
1533 if (flow
->h_u
.usr_ip4_spec
.ip_ver
!= ETH_RX_NFC_IP4
)
1534 netdev_warn(priv
->ndev
,
1535 "IP-Version differing from IPv4 not supported!\n");
1540 static int gfar_process_filer_changes(struct gfar_private
*priv
)
1542 struct ethtool_flow_spec_container
*j
;
1543 struct filer_table
*tab
;
1547 /* So index is set to zero, too! */
1548 tab
= kzalloc(sizeof(*tab
), GFP_KERNEL
);
1552 /* Now convert the existing filer data from flow_spec into
1553 * filer tables binary format
1555 list_for_each_entry(j
, &priv
->rx_list
.list
, list
) {
1556 ret
= gfar_convert_to_filer(&j
->fs
, tab
);
1557 if (ret
== -EBUSY
) {
1558 netdev_err(priv
->ndev
,
1559 "Rule not added: No free space!\n");
1563 netdev_err(priv
->ndev
,
1564 "Rule not added: Unsupported Flow-type!\n");
1571 /* Optimizations to save entries */
1572 gfar_cluster_filer(tab
);
1573 gfar_optimize_filer_masks(tab
);
1575 pr_debug("\n\tSummary:\n"
1576 "\tData on hardware: %d\n"
1577 "\tCompression rate: %d%%\n",
1578 tab
->index
, 100 - (100 * tab
->index
) / i
);
1580 /* Write everything to hardware */
1581 ret
= gfar_write_filer_table(priv
, tab
);
1582 if (ret
== -EBUSY
) {
1583 netdev_err(priv
->ndev
, "Rule not added: No free space!\n");
1592 static void gfar_invert_masks(struct ethtool_rx_flow_spec
*flow
)
1596 for (i
= 0; i
< sizeof(flow
->m_u
); i
++)
1597 flow
->m_u
.hdata
[i
] ^= 0xFF;
1599 flow
->m_ext
.vlan_etype
^= 0xFFFF;
1600 flow
->m_ext
.vlan_tci
^= 0xFFFF;
1601 flow
->m_ext
.data
[0] ^= ~0;
1602 flow
->m_ext
.data
[1] ^= ~0;
1605 static int gfar_add_cls(struct gfar_private
*priv
,
1606 struct ethtool_rx_flow_spec
*flow
)
1608 struct ethtool_flow_spec_container
*temp
, *comp
;
1611 temp
= kmalloc(sizeof(*temp
), GFP_KERNEL
);
1614 memcpy(&temp
->fs
, flow
, sizeof(temp
->fs
));
1616 gfar_invert_masks(&temp
->fs
);
1617 ret
= gfar_check_capability(&temp
->fs
, priv
);
1620 /* Link in the new element at the right @location */
1621 if (list_empty(&priv
->rx_list
.list
)) {
1622 ret
= gfar_check_filer_hardware(priv
);
1625 list_add(&temp
->list
, &priv
->rx_list
.list
);
1628 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1629 if (comp
->fs
.location
> flow
->location
) {
1630 list_add_tail(&temp
->list
, &comp
->list
);
1633 if (comp
->fs
.location
== flow
->location
) {
1634 netdev_err(priv
->ndev
,
1635 "Rule not added: ID %d not free!\n",
1641 list_add_tail(&temp
->list
, &priv
->rx_list
.list
);
1645 ret
= gfar_process_filer_changes(priv
);
1648 priv
->rx_list
.count
++;
1652 list_del(&temp
->list
);
1658 static int gfar_del_cls(struct gfar_private
*priv
, u32 loc
)
1660 struct ethtool_flow_spec_container
*comp
;
1663 if (list_empty(&priv
->rx_list
.list
))
1666 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1667 if (comp
->fs
.location
== loc
) {
1668 list_del(&comp
->list
);
1670 priv
->rx_list
.count
--;
1671 gfar_process_filer_changes(priv
);
1680 static int gfar_get_cls(struct gfar_private
*priv
, struct ethtool_rxnfc
*cmd
)
1682 struct ethtool_flow_spec_container
*comp
;
1685 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1686 if (comp
->fs
.location
== cmd
->fs
.location
) {
1687 memcpy(&cmd
->fs
, &comp
->fs
, sizeof(cmd
->fs
));
1688 gfar_invert_masks(&cmd
->fs
);
1697 static int gfar_get_cls_all(struct gfar_private
*priv
,
1698 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
1700 struct ethtool_flow_spec_container
*comp
;
1703 list_for_each_entry(comp
, &priv
->rx_list
.list
, list
) {
1704 if (i
== cmd
->rule_cnt
)
1706 rule_locs
[i
] = comp
->fs
.location
;
1710 cmd
->data
= MAX_FILER_IDX
;
1716 static int gfar_set_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1718 struct gfar_private
*priv
= netdev_priv(dev
);
1721 mutex_lock(&priv
->rx_queue_access
);
1725 ret
= gfar_set_hash_opts(priv
, cmd
);
1727 case ETHTOOL_SRXCLSRLINS
:
1728 if ((cmd
->fs
.ring_cookie
!= RX_CLS_FLOW_DISC
&&
1729 cmd
->fs
.ring_cookie
>= priv
->num_rx_queues
) ||
1730 cmd
->fs
.location
>= MAX_FILER_IDX
) {
1734 ret
= gfar_add_cls(priv
, &cmd
->fs
);
1736 case ETHTOOL_SRXCLSRLDEL
:
1737 ret
= gfar_del_cls(priv
, cmd
->fs
.location
);
1743 mutex_unlock(&priv
->rx_queue_access
);
1748 static int gfar_get_nfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1751 struct gfar_private
*priv
= netdev_priv(dev
);
1755 case ETHTOOL_GRXRINGS
:
1756 cmd
->data
= priv
->num_rx_queues
;
1758 case ETHTOOL_GRXCLSRLCNT
:
1759 cmd
->rule_cnt
= priv
->rx_list
.count
;
1761 case ETHTOOL_GRXCLSRULE
:
1762 ret
= gfar_get_cls(priv
, cmd
);
1764 case ETHTOOL_GRXCLSRLALL
:
1765 ret
= gfar_get_cls_all(priv
, cmd
, rule_locs
);
1775 int gfar_phc_index
= -1;
1776 EXPORT_SYMBOL(gfar_phc_index
);
1778 static int gfar_get_ts_info(struct net_device
*dev
,
1779 struct ethtool_ts_info
*info
)
1781 struct gfar_private
*priv
= netdev_priv(dev
);
1783 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)) {
1784 info
->so_timestamping
= SOF_TIMESTAMPING_RX_SOFTWARE
|
1785 SOF_TIMESTAMPING_SOFTWARE
;
1786 info
->phc_index
= -1;
1789 info
->so_timestamping
= SOF_TIMESTAMPING_TX_HARDWARE
|
1790 SOF_TIMESTAMPING_RX_HARDWARE
|
1791 SOF_TIMESTAMPING_RAW_HARDWARE
;
1792 info
->phc_index
= gfar_phc_index
;
1793 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
1794 (1 << HWTSTAMP_TX_ON
);
1795 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
1796 (1 << HWTSTAMP_FILTER_ALL
);
1800 const struct ethtool_ops gfar_ethtool_ops
= {
1801 .get_settings
= gfar_gsettings
,
1802 .set_settings
= gfar_ssettings
,
1803 .get_drvinfo
= gfar_gdrvinfo
,
1804 .get_regs_len
= gfar_reglen
,
1805 .get_regs
= gfar_get_regs
,
1806 .get_link
= ethtool_op_get_link
,
1807 .get_coalesce
= gfar_gcoalesce
,
1808 .set_coalesce
= gfar_scoalesce
,
1809 .get_ringparam
= gfar_gringparam
,
1810 .set_ringparam
= gfar_sringparam
,
1811 .get_strings
= gfar_gstrings
,
1812 .get_sset_count
= gfar_sset_count
,
1813 .get_ethtool_stats
= gfar_fill_stats
,
1814 .get_msglevel
= gfar_get_msglevel
,
1815 .set_msglevel
= gfar_set_msglevel
,
1817 .get_wol
= gfar_get_wol
,
1818 .set_wol
= gfar_set_wol
,
1820 .set_rxnfc
= gfar_set_nfc
,
1821 .get_rxnfc
= gfar_get_nfc
,
1822 .get_ts_info
= gfar_get_ts_info
,