Staging: vt665x: Clean up include files, Part 2
[linux-2.6/linux-2.6-openrd.git] / drivers / staging / octeon / ethernet.c
blobb8479517dce28b9fa76af52c532c8af556cb54e5
1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/delay.h>
33 #include <linux/mii.h>
35 #include <net/dst.h>
37 #include <asm/octeon/octeon.h>
39 #include "ethernet-defines.h"
40 #include "octeon-ethernet.h"
41 #include "ethernet-mem.h"
42 #include "ethernet-rx.h"
43 #include "ethernet-tx.h"
44 #include "ethernet-mdio.h"
45 #include "ethernet-util.h"
46 #include "ethernet-proc.h"
49 #include "cvmx-pip.h"
50 #include "cvmx-pko.h"
51 #include "cvmx-fau.h"
52 #include "cvmx-ipd.h"
53 #include "cvmx-helper.h"
55 #include "cvmx-gmxx-defs.h"
56 #include "cvmx-smix-defs.h"
58 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
59 && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
60 int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
61 #else
62 int num_packet_buffers = 1024;
63 #endif
64 module_param(num_packet_buffers, int, 0444);
65 MODULE_PARM_DESC(num_packet_buffers, "\n"
66 "\tNumber of packet buffers to allocate and store in the\n"
67 "\tFPA. By default, 1024 packet buffers are used unless\n"
68 "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
70 int pow_receive_group = 15;
71 module_param(pow_receive_group, int, 0444);
72 MODULE_PARM_DESC(pow_receive_group, "\n"
73 "\tPOW group to receive packets from. All ethernet hardware\n"
74 "\twill be configured to send incomming packets to this POW\n"
75 "\tgroup. Also any other software can submit packets to this\n"
76 "\tgroup for the kernel to process.");
78 int pow_send_group = -1;
79 module_param(pow_send_group, int, 0644);
80 MODULE_PARM_DESC(pow_send_group, "\n"
81 "\tPOW group to send packets to other software on. This\n"
82 "\tcontrols the creation of the virtual device pow0.\n"
83 "\talways_use_pow also depends on this value.");
85 int always_use_pow;
86 module_param(always_use_pow, int, 0444);
87 MODULE_PARM_DESC(always_use_pow, "\n"
88 "\tWhen set, always send to the pow group. This will cause\n"
89 "\tpackets sent to real ethernet devices to be sent to the\n"
90 "\tPOW group instead of the hardware. Unless some other\n"
91 "\tapplication changes the config, packets will still be\n"
92 "\treceived from the low level hardware. Use this option\n"
93 "\tto allow a CVMX app to intercept all packets from the\n"
94 "\tlinux kernel. You must specify pow_send_group along with\n"
95 "\tthis option.");
97 char pow_send_list[128] = "";
98 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
99 MODULE_PARM_DESC(pow_send_list, "\n"
100 "\tComma separated list of ethernet devices that should use the\n"
101 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
102 "\tis a per port version of always_use_pow. always_use_pow takes\n"
103 "\tprecedence over this list. For example, setting this to\n"
104 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
105 "\tusing the pow_send_group.");
107 static int disable_core_queueing = 1;
108 module_param(disable_core_queueing, int, 0444);
109 MODULE_PARM_DESC(disable_core_queueing, "\n"
110 "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
111 "\tallows packets to be sent without lock contention in the packet\n"
112 "\tscheduler resulting in some cases in improved throughput.\n");
115 * Periodic timer to check auto negotiation
117 static struct timer_list cvm_oct_poll_timer;
120 * Array of every ethernet device owned by this driver indexed by
121 * the ipd input port number.
123 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
125 extern struct semaphore mdio_sem;
128 * Periodic timer tick for slow management operations
130 * @arg: Device to check
132 static void cvm_do_timer(unsigned long arg)
134 int32_t skb_to_free, undo;
135 int queues_per_port;
136 int qos;
137 struct octeon_ethernet *priv;
138 static int port;
140 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
142 * All ports have been polled. Start the next
143 * iteration through the ports in one second.
145 port = 0;
146 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
147 return;
149 if (!cvm_oct_device[port])
150 goto out;
152 priv = netdev_priv(cvm_oct_device[port]);
153 if (priv->poll) {
154 /* skip polling if we don't get the lock */
155 if (!down_trylock(&mdio_sem)) {
156 priv->poll(cvm_oct_device[port]);
157 up(&mdio_sem);
161 queues_per_port = cvmx_pko_get_num_queues(port);
162 /* Drain any pending packets in the free list */
163 for (qos = 0; qos < queues_per_port; qos++) {
164 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
165 continue;
166 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
167 MAX_SKB_TO_FREE);
168 undo = skb_to_free > 0 ?
169 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
170 if (undo > 0)
171 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
172 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
173 MAX_SKB_TO_FREE : -skb_to_free;
174 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
176 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
178 out:
179 port++;
180 /* Poll the next port in a 50th of a second.
181 This spreads the polling of ports out a little bit */
182 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
186 * Configure common hardware for all interfaces
188 static __init void cvm_oct_configure_common_hw(void)
190 int r;
191 /* Setup the FPA */
192 cvmx_fpa_enable();
193 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
194 num_packet_buffers);
195 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
196 num_packet_buffers);
197 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
198 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
199 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
201 if (USE_RED)
202 cvmx_helper_setup_red(num_packet_buffers / 4,
203 num_packet_buffers / 8);
205 /* Enable the MII interface */
206 if (!octeon_is_simulation())
207 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
209 /* Register an IRQ hander for to receive POW interrupts */
210 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
211 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
212 cvm_oct_device);
214 #if defined(CONFIG_SMP) && 0
215 if (USE_MULTICORE_RECEIVE) {
216 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
217 cpu_online_mask);
219 #endif
223 * Free a work queue entry received in a intercept callback.
225 * @work_queue_entry:
226 * Work queue entry to free
227 * Returns Zero on success, Negative on failure.
229 int cvm_oct_free_work(void *work_queue_entry)
231 cvmx_wqe_t *work = work_queue_entry;
233 int segments = work->word2.s.bufs;
234 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
236 while (segments--) {
237 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
238 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
239 if (unlikely(!segment_ptr.s.i))
240 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
241 segment_ptr.s.pool,
242 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
243 128));
244 segment_ptr = next_ptr;
246 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
248 return 0;
250 EXPORT_SYMBOL(cvm_oct_free_work);
253 * Get the low level ethernet statistics
255 * @dev: Device to get the statistics from
256 * Returns Pointer to the statistics
258 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
260 cvmx_pip_port_status_t rx_status;
261 cvmx_pko_port_status_t tx_status;
262 struct octeon_ethernet *priv = netdev_priv(dev);
264 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
265 if (octeon_is_simulation()) {
266 /* The simulator doesn't support statistics */
267 memset(&rx_status, 0, sizeof(rx_status));
268 memset(&tx_status, 0, sizeof(tx_status));
269 } else {
270 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
271 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
274 priv->stats.rx_packets += rx_status.inb_packets;
275 priv->stats.tx_packets += tx_status.packets;
276 priv->stats.rx_bytes += rx_status.inb_octets;
277 priv->stats.tx_bytes += tx_status.octets;
278 priv->stats.multicast += rx_status.multicast_packets;
279 priv->stats.rx_crc_errors += rx_status.inb_errors;
280 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
283 * The drop counter must be incremented atomically
284 * since the RX tasklet also increments it.
286 #ifdef CONFIG_64BIT
287 atomic64_add(rx_status.dropped_packets,
288 (atomic64_t *)&priv->stats.rx_dropped);
289 #else
290 atomic_add(rx_status.dropped_packets,
291 (atomic_t *)&priv->stats.rx_dropped);
292 #endif
295 return &priv->stats;
299 * Change the link MTU. Unimplemented
301 * @dev: Device to change
302 * @new_mtu: The new MTU
304 * Returns Zero on success
306 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
308 struct octeon_ethernet *priv = netdev_priv(dev);
309 int interface = INTERFACE(priv->port);
310 int index = INDEX(priv->port);
311 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
312 int vlan_bytes = 4;
313 #else
314 int vlan_bytes = 0;
315 #endif
318 * Limit the MTU to make sure the ethernet packets are between
319 * 64 bytes and 65535 bytes.
321 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
322 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
323 pr_err("MTU must be between %d and %d.\n",
324 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
325 return -EINVAL;
327 dev->mtu = new_mtu;
329 if ((interface < 2)
330 && (cvmx_helper_interface_get_mode(interface) !=
331 CVMX_HELPER_INTERFACE_MODE_SPI)) {
332 /* Add ethernet header and FCS, and VLAN if configured. */
333 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
335 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
336 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
337 /* Signal errors on packets larger than the MTU */
338 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
339 max_packet);
340 } else {
342 * Set the hardware to truncate packets larger
343 * than the MTU and smaller the 64 bytes.
345 union cvmx_pip_frm_len_chkx frm_len_chk;
346 frm_len_chk.u64 = 0;
347 frm_len_chk.s.minlen = 64;
348 frm_len_chk.s.maxlen = max_packet;
349 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
350 frm_len_chk.u64);
353 * Set the hardware to truncate packets larger than
354 * the MTU. The jabber register must be set to a
355 * multiple of 8 bytes, so round up.
357 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
358 (max_packet + 7) & ~7u);
360 return 0;
364 * Set the multicast list. Currently unimplemented.
366 * @dev: Device to work on
368 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
370 union cvmx_gmxx_prtx_cfg gmx_cfg;
371 struct octeon_ethernet *priv = netdev_priv(dev);
372 int interface = INTERFACE(priv->port);
373 int index = INDEX(priv->port);
375 if ((interface < 2)
376 && (cvmx_helper_interface_get_mode(interface) !=
377 CVMX_HELPER_INTERFACE_MODE_SPI)) {
378 union cvmx_gmxx_rxx_adr_ctl control;
379 control.u64 = 0;
380 control.s.bcst = 1; /* Allow broadcast MAC addresses */
382 if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
383 (dev->flags & IFF_PROMISC))
384 /* Force accept multicast packets */
385 control.s.mcst = 2;
386 else
387 /* Force reject multicat packets */
388 control.s.mcst = 1;
390 if (dev->flags & IFF_PROMISC)
392 * Reject matches if promisc. Since CAM is
393 * shut off, should accept everything.
395 control.s.cam_mode = 0;
396 else
397 /* Filter packets based on the CAM */
398 control.s.cam_mode = 1;
400 gmx_cfg.u64 =
401 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
402 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
403 gmx_cfg.u64 & ~1ull);
405 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
406 control.u64);
407 if (dev->flags & IFF_PROMISC)
408 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
409 (index, interface), 0);
410 else
411 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
412 (index, interface), 1);
414 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
415 gmx_cfg.u64);
420 * Set the hardware MAC address for a device
422 * @dev: Device to change the MAC address for
423 * @addr: Address structure to change it too. MAC address is addr + 2.
424 * Returns Zero on success
426 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
428 struct octeon_ethernet *priv = netdev_priv(dev);
429 union cvmx_gmxx_prtx_cfg gmx_cfg;
430 int interface = INTERFACE(priv->port);
431 int index = INDEX(priv->port);
433 memcpy(dev->dev_addr, addr + 2, 6);
435 if ((interface < 2)
436 && (cvmx_helper_interface_get_mode(interface) !=
437 CVMX_HELPER_INTERFACE_MODE_SPI)) {
438 int i;
439 uint8_t *ptr = addr;
440 uint64_t mac = 0;
441 for (i = 0; i < 6; i++)
442 mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
444 gmx_cfg.u64 =
445 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
446 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
447 gmx_cfg.u64 & ~1ull);
449 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
450 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
451 ptr[2]);
452 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
453 ptr[3]);
454 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
455 ptr[4]);
456 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
457 ptr[5]);
458 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
459 ptr[6]);
460 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
461 ptr[7]);
462 cvm_oct_common_set_multicast_list(dev);
463 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
464 gmx_cfg.u64);
466 return 0;
470 * Per network device initialization
472 * @dev: Device to initialize
473 * Returns Zero on success
475 int cvm_oct_common_init(struct net_device *dev)
477 static int count;
478 char mac[8] = { 0x00, 0x00,
479 octeon_bootinfo->mac_addr_base[0],
480 octeon_bootinfo->mac_addr_base[1],
481 octeon_bootinfo->mac_addr_base[2],
482 octeon_bootinfo->mac_addr_base[3],
483 octeon_bootinfo->mac_addr_base[4],
484 octeon_bootinfo->mac_addr_base[5] + count
486 struct octeon_ethernet *priv = netdev_priv(dev);
489 * Force the interface to use the POW send if always_use_pow
490 * was specified or it is in the pow send list.
492 if ((pow_send_group != -1)
493 && (always_use_pow || strstr(pow_send_list, dev->name)))
494 priv->queue = -1;
496 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
497 dev->features |= NETIF_F_IP_CSUM;
499 count++;
501 /* We do our own locking, Linux doesn't need to */
502 dev->features |= NETIF_F_LLTX;
503 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
505 cvm_oct_mdio_setup_device(dev);
506 dev->netdev_ops->ndo_set_mac_address(dev, mac);
507 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
510 * Zero out stats for port so we won't mistakenly show
511 * counters from the bootloader.
513 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
514 sizeof(struct net_device_stats));
516 return 0;
519 void cvm_oct_common_uninit(struct net_device *dev)
521 /* Currently nothing to do */
524 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
525 .ndo_init = cvm_oct_common_init,
526 .ndo_uninit = cvm_oct_common_uninit,
527 .ndo_start_xmit = cvm_oct_xmit,
528 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
529 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
530 .ndo_do_ioctl = cvm_oct_ioctl,
531 .ndo_change_mtu = cvm_oct_common_change_mtu,
532 .ndo_get_stats = cvm_oct_common_get_stats,
533 #ifdef CONFIG_NET_POLL_CONTROLLER
534 .ndo_poll_controller = cvm_oct_poll_controller,
535 #endif
537 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
538 .ndo_init = cvm_oct_xaui_init,
539 .ndo_uninit = cvm_oct_xaui_uninit,
540 .ndo_open = cvm_oct_xaui_open,
541 .ndo_stop = cvm_oct_xaui_stop,
542 .ndo_start_xmit = cvm_oct_xmit,
543 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
544 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
545 .ndo_do_ioctl = cvm_oct_ioctl,
546 .ndo_change_mtu = cvm_oct_common_change_mtu,
547 .ndo_get_stats = cvm_oct_common_get_stats,
548 #ifdef CONFIG_NET_POLL_CONTROLLER
549 .ndo_poll_controller = cvm_oct_poll_controller,
550 #endif
552 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
553 .ndo_init = cvm_oct_sgmii_init,
554 .ndo_uninit = cvm_oct_sgmii_uninit,
555 .ndo_open = cvm_oct_sgmii_open,
556 .ndo_stop = cvm_oct_sgmii_stop,
557 .ndo_start_xmit = cvm_oct_xmit,
558 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
559 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
560 .ndo_do_ioctl = cvm_oct_ioctl,
561 .ndo_change_mtu = cvm_oct_common_change_mtu,
562 .ndo_get_stats = cvm_oct_common_get_stats,
563 #ifdef CONFIG_NET_POLL_CONTROLLER
564 .ndo_poll_controller = cvm_oct_poll_controller,
565 #endif
567 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
568 .ndo_init = cvm_oct_spi_init,
569 .ndo_uninit = cvm_oct_spi_uninit,
570 .ndo_start_xmit = cvm_oct_xmit,
571 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
572 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
573 .ndo_do_ioctl = cvm_oct_ioctl,
574 .ndo_change_mtu = cvm_oct_common_change_mtu,
575 .ndo_get_stats = cvm_oct_common_get_stats,
576 #ifdef CONFIG_NET_POLL_CONTROLLER
577 .ndo_poll_controller = cvm_oct_poll_controller,
578 #endif
580 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
581 .ndo_init = cvm_oct_rgmii_init,
582 .ndo_uninit = cvm_oct_rgmii_uninit,
583 .ndo_open = cvm_oct_rgmii_open,
584 .ndo_stop = cvm_oct_rgmii_stop,
585 .ndo_start_xmit = cvm_oct_xmit,
586 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
587 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
588 .ndo_do_ioctl = cvm_oct_ioctl,
589 .ndo_change_mtu = cvm_oct_common_change_mtu,
590 .ndo_get_stats = cvm_oct_common_get_stats,
591 #ifdef CONFIG_NET_POLL_CONTROLLER
592 .ndo_poll_controller = cvm_oct_poll_controller,
593 #endif
595 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
596 .ndo_init = cvm_oct_common_init,
597 .ndo_start_xmit = cvm_oct_xmit_pow,
598 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
599 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
600 .ndo_do_ioctl = cvm_oct_ioctl,
601 .ndo_change_mtu = cvm_oct_common_change_mtu,
602 .ndo_get_stats = cvm_oct_common_get_stats,
603 #ifdef CONFIG_NET_POLL_CONTROLLER
604 .ndo_poll_controller = cvm_oct_poll_controller,
605 #endif
609 * Module/ driver initialization. Creates the linux network
610 * devices.
612 * Returns Zero on success
614 static int __init cvm_oct_init_module(void)
616 int num_interfaces;
617 int interface;
618 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
619 int qos;
621 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
623 cvm_oct_proc_initialize();
624 cvm_oct_rx_initialize();
625 cvm_oct_configure_common_hw();
627 cvmx_helper_initialize_packet_io_global();
629 /* Change the input group for all ports before input is enabled */
630 num_interfaces = cvmx_helper_get_number_of_interfaces();
631 for (interface = 0; interface < num_interfaces; interface++) {
632 int num_ports = cvmx_helper_ports_on_interface(interface);
633 int port;
635 for (port = cvmx_helper_get_ipd_port(interface, 0);
636 port < cvmx_helper_get_ipd_port(interface, num_ports);
637 port++) {
638 union cvmx_pip_prt_tagx pip_prt_tagx;
639 pip_prt_tagx.u64 =
640 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
641 pip_prt_tagx.s.grp = pow_receive_group;
642 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
643 pip_prt_tagx.u64);
647 cvmx_helper_ipd_and_packet_input_enable();
649 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
652 * Initialize the FAU used for counting packet buffers that
653 * need to be freed.
655 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
657 if ((pow_send_group != -1)) {
658 struct net_device *dev;
659 pr_info("\tConfiguring device for POW only access\n");
660 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
661 if (dev) {
662 /* Initialize the device private structure. */
663 struct octeon_ethernet *priv = netdev_priv(dev);
664 memset(priv, 0, sizeof(struct octeon_ethernet));
666 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
667 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
668 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
669 priv->queue = -1;
670 strcpy(dev->name, "pow%d");
671 for (qos = 0; qos < 16; qos++)
672 skb_queue_head_init(&priv->tx_free_list[qos]);
674 if (register_netdev(dev) < 0) {
675 pr_err("Failed to register ethernet "
676 "device for POW\n");
677 kfree(dev);
678 } else {
679 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
680 pr_info("%s: POW send group %d, receive "
681 "group %d\n",
682 dev->name, pow_send_group,
683 pow_receive_group);
685 } else {
686 pr_err("Failed to allocate ethernet device "
687 "for POW\n");
691 num_interfaces = cvmx_helper_get_number_of_interfaces();
692 for (interface = 0; interface < num_interfaces; interface++) {
693 cvmx_helper_interface_mode_t imode =
694 cvmx_helper_interface_get_mode(interface);
695 int num_ports = cvmx_helper_ports_on_interface(interface);
696 int port;
698 for (port = cvmx_helper_get_ipd_port(interface, 0);
699 port < cvmx_helper_get_ipd_port(interface, num_ports);
700 port++) {
701 struct octeon_ethernet *priv;
702 struct net_device *dev =
703 alloc_etherdev(sizeof(struct octeon_ethernet));
704 if (!dev) {
705 pr_err("Failed to allocate ethernet device "
706 "for port %d\n", port);
707 continue;
709 if (disable_core_queueing)
710 dev->tx_queue_len = 0;
712 /* Initialize the device private structure. */
713 priv = netdev_priv(dev);
714 memset(priv, 0, sizeof(struct octeon_ethernet));
716 priv->imode = imode;
717 priv->port = port;
718 priv->queue = cvmx_pko_get_base_queue(priv->port);
719 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
720 for (qos = 0; qos < 16; qos++)
721 skb_queue_head_init(&priv->tx_free_list[qos]);
722 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
723 qos++)
724 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
726 switch (priv->imode) {
728 /* These types don't support ports to IPD/PKO */
729 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
730 case CVMX_HELPER_INTERFACE_MODE_PCIE:
731 case CVMX_HELPER_INTERFACE_MODE_PICMG:
732 break;
734 case CVMX_HELPER_INTERFACE_MODE_NPI:
735 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
736 strcpy(dev->name, "npi%d");
737 break;
739 case CVMX_HELPER_INTERFACE_MODE_XAUI:
740 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
741 strcpy(dev->name, "xaui%d");
742 break;
744 case CVMX_HELPER_INTERFACE_MODE_LOOP:
745 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
746 strcpy(dev->name, "loop%d");
747 break;
749 case CVMX_HELPER_INTERFACE_MODE_SGMII:
750 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
751 strcpy(dev->name, "eth%d");
752 break;
754 case CVMX_HELPER_INTERFACE_MODE_SPI:
755 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
756 strcpy(dev->name, "spi%d");
757 break;
759 case CVMX_HELPER_INTERFACE_MODE_RGMII:
760 case CVMX_HELPER_INTERFACE_MODE_GMII:
761 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
762 strcpy(dev->name, "eth%d");
763 break;
766 if (!dev->netdev_ops) {
767 kfree(dev);
768 } else if (register_netdev(dev) < 0) {
769 pr_err("Failed to register ethernet device "
770 "for interface %d, port %d\n",
771 interface, priv->port);
772 kfree(dev);
773 } else {
774 cvm_oct_device[priv->port] = dev;
775 fau -=
776 cvmx_pko_get_num_queues(priv->port) *
777 sizeof(uint32_t);
782 if (INTERRUPT_LIMIT) {
784 * Set the POW timer rate to give an interrupt at most
785 * INTERRUPT_LIMIT times per second.
787 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
788 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
789 16 * 256) << 8);
792 * Enable POW timer interrupt. It will count when
793 * there are packets available.
795 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
796 0x1ful << 24);
797 } else {
798 /* Enable POW interrupt when our port has at least one packet */
799 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
802 /* Enable the poll timer for checking RGMII status */
803 init_timer(&cvm_oct_poll_timer);
804 cvm_oct_poll_timer.data = 0;
805 cvm_oct_poll_timer.function = cvm_do_timer;
806 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
808 return 0;
812 * Module / driver shutdown
814 * Returns Zero on success
816 static void __exit cvm_oct_cleanup_module(void)
818 int port;
820 /* Disable POW interrupt */
821 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
823 cvmx_ipd_disable();
825 /* Free the interrupt handler */
826 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
828 del_timer(&cvm_oct_poll_timer);
829 cvm_oct_rx_shutdown();
830 cvmx_pko_disable();
832 /* Free the ethernet devices */
833 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
834 if (cvm_oct_device[port]) {
835 cvm_oct_tx_shutdown(cvm_oct_device[port]);
836 unregister_netdev(cvm_oct_device[port]);
837 kfree(cvm_oct_device[port]);
838 cvm_oct_device[port] = NULL;
842 cvmx_pko_shutdown();
843 cvm_oct_proc_shutdown();
845 cvmx_ipd_free_ptr();
847 /* Free the HW pools */
848 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
849 num_packet_buffers);
850 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
851 num_packet_buffers);
852 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
853 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
854 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
857 MODULE_LICENSE("GPL");
858 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
859 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
860 module_init(cvm_oct_init_module);
861 module_exit(cvm_oct_cleanup_module);