Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / octeon / ethernet.c
blob4a2161f70c7f734646f131013f5d8914296970a9
1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/phy.h>
34 #include <net/dst.h>
36 #include <asm/octeon/octeon.h>
38 #include "ethernet-defines.h"
39 #include "octeon-ethernet.h"
40 #include "ethernet-mem.h"
41 #include "ethernet-rx.h"
42 #include "ethernet-tx.h"
43 #include "ethernet-mdio.h"
44 #include "ethernet-util.h"
46 #include "cvmx-pip.h"
47 #include "cvmx-pko.h"
48 #include "cvmx-fau.h"
49 #include "cvmx-ipd.h"
50 #include "cvmx-helper.h"
52 #include "cvmx-gmxx-defs.h"
53 #include "cvmx-smix-defs.h"
55 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
56 && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
57 int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
58 #else
59 int num_packet_buffers = 1024;
60 #endif
61 module_param(num_packet_buffers, int, 0444);
62 MODULE_PARM_DESC(num_packet_buffers, "\n"
63 "\tNumber of packet buffers to allocate and store in the\n"
64 "\tFPA. By default, 1024 packet buffers are used unless\n"
65 "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
67 int pow_receive_group = 15;
68 module_param(pow_receive_group, int, 0444);
69 MODULE_PARM_DESC(pow_receive_group, "\n"
70 "\tPOW group to receive packets from. All ethernet hardware\n"
71 "\twill be configured to send incomming packets to this POW\n"
72 "\tgroup. Also any other software can submit packets to this\n"
73 "\tgroup for the kernel to process.");
75 int pow_send_group = -1;
76 module_param(pow_send_group, int, 0644);
77 MODULE_PARM_DESC(pow_send_group, "\n"
78 "\tPOW group to send packets to other software on. This\n"
79 "\tcontrols the creation of the virtual device pow0.\n"
80 "\talways_use_pow also depends on this value.");
82 int always_use_pow;
83 module_param(always_use_pow, int, 0444);
84 MODULE_PARM_DESC(always_use_pow, "\n"
85 "\tWhen set, always send to the pow group. This will cause\n"
86 "\tpackets sent to real ethernet devices to be sent to the\n"
87 "\tPOW group instead of the hardware. Unless some other\n"
88 "\tapplication changes the config, packets will still be\n"
89 "\treceived from the low level hardware. Use this option\n"
90 "\tto allow a CVMX app to intercept all packets from the\n"
91 "\tlinux kernel. You must specify pow_send_group along with\n"
92 "\tthis option.");
94 char pow_send_list[128] = "";
95 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
96 MODULE_PARM_DESC(pow_send_list, "\n"
97 "\tComma separated list of ethernet devices that should use the\n"
98 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
99 "\tis a per port version of always_use_pow. always_use_pow takes\n"
100 "\tprecedence over this list. For example, setting this to\n"
101 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
102 "\tusing the pow_send_group.");
104 int max_rx_cpus = -1;
105 module_param(max_rx_cpus, int, 0444);
106 MODULE_PARM_DESC(max_rx_cpus, "\n"
107 "\t\tThe maximum number of CPUs to use for packet reception.\n"
108 "\t\tUse -1 to use all available CPUs.");
110 int rx_napi_weight = 32;
111 module_param(rx_napi_weight, int, 0444);
112 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
115 * The offset from mac_addr_base that should be used for the next port
116 * that is configured. By convention, if any mgmt ports exist on the
117 * chip, they get the first mac addresses, The ports controlled by
118 * this driver are numbered sequencially following any mgmt addresses
119 * that may exist.
121 static unsigned int cvm_oct_mac_addr_offset;
124 * cvm_oct_poll_queue - Workqueue for polling operations.
126 struct workqueue_struct *cvm_oct_poll_queue;
129 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
131 * Set to one right before cvm_oct_poll_queue is destroyed.
133 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
136 * Array of every ethernet device owned by this driver indexed by
137 * the ipd input port number.
139 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
141 u64 cvm_oct_tx_poll_interval;
143 static void cvm_oct_rx_refill_worker(struct work_struct *work);
144 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
146 static void cvm_oct_rx_refill_worker(struct work_struct *work)
149 * FPA 0 may have been drained, try to refill it if we need
150 * more than num_packet_buffers / 2, otherwise normal receive
151 * processing will refill it. If it were drained, no packets
152 * could be received so cvm_oct_napi_poll would never be
153 * invoked to do the refill.
155 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
157 if (!atomic_read(&cvm_oct_poll_queue_stopping))
158 queue_delayed_work(cvm_oct_poll_queue,
159 &cvm_oct_rx_refill_work, HZ);
162 static void cvm_oct_periodic_worker(struct work_struct *work)
164 struct octeon_ethernet *priv = container_of(work,
165 struct octeon_ethernet,
166 port_periodic_work.work);
168 if (priv->poll)
169 priv->poll(cvm_oct_device[priv->port]);
171 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
173 if (!atomic_read(&cvm_oct_poll_queue_stopping))
174 queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
177 static __init void cvm_oct_configure_common_hw(void)
179 /* Setup the FPA */
180 cvmx_fpa_enable();
181 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
182 num_packet_buffers);
183 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
184 num_packet_buffers);
185 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
186 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
187 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
189 if (USE_RED)
190 cvmx_helper_setup_red(num_packet_buffers / 4,
191 num_packet_buffers / 8);
196 * cvm_oct_free_work- Free a work queue entry
198 * @work_queue_entry: Work queue entry to free
200 * Returns Zero on success, Negative on failure.
202 int cvm_oct_free_work(void *work_queue_entry)
204 cvmx_wqe_t *work = work_queue_entry;
206 int segments = work->word2.s.bufs;
207 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
209 while (segments--) {
210 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
211 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
212 if (unlikely(!segment_ptr.s.i))
213 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
214 segment_ptr.s.pool,
215 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
216 128));
217 segment_ptr = next_ptr;
219 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
221 return 0;
223 EXPORT_SYMBOL(cvm_oct_free_work);
226 * cvm_oct_common_get_stats - get the low level ethernet statistics
227 * @dev: Device to get the statistics from
229 * Returns Pointer to the statistics
231 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
233 cvmx_pip_port_status_t rx_status;
234 cvmx_pko_port_status_t tx_status;
235 struct octeon_ethernet *priv = netdev_priv(dev);
237 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
238 if (octeon_is_simulation()) {
239 /* The simulator doesn't support statistics */
240 memset(&rx_status, 0, sizeof(rx_status));
241 memset(&tx_status, 0, sizeof(tx_status));
242 } else {
243 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
244 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
247 priv->stats.rx_packets += rx_status.inb_packets;
248 priv->stats.tx_packets += tx_status.packets;
249 priv->stats.rx_bytes += rx_status.inb_octets;
250 priv->stats.tx_bytes += tx_status.octets;
251 priv->stats.multicast += rx_status.multicast_packets;
252 priv->stats.rx_crc_errors += rx_status.inb_errors;
253 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
256 * The drop counter must be incremented atomically
257 * since the RX tasklet also increments it.
259 #ifdef CONFIG_64BIT
260 atomic64_add(rx_status.dropped_packets,
261 (atomic64_t *)&priv->stats.rx_dropped);
262 #else
263 atomic_add(rx_status.dropped_packets,
264 (atomic_t *)&priv->stats.rx_dropped);
265 #endif
268 return &priv->stats;
272 * cvm_oct_common_change_mtu - change the link MTU
273 * @dev: Device to change
274 * @new_mtu: The new MTU
276 * Returns Zero on success
278 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
280 struct octeon_ethernet *priv = netdev_priv(dev);
281 int interface = INTERFACE(priv->port);
282 int index = INDEX(priv->port);
283 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
284 int vlan_bytes = 4;
285 #else
286 int vlan_bytes = 0;
287 #endif
290 * Limit the MTU to make sure the ethernet packets are between
291 * 64 bytes and 65535 bytes.
293 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
294 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
295 pr_err("MTU must be between %d and %d.\n",
296 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
297 return -EINVAL;
299 dev->mtu = new_mtu;
301 if ((interface < 2)
302 && (cvmx_helper_interface_get_mode(interface) !=
303 CVMX_HELPER_INTERFACE_MODE_SPI)) {
304 /* Add ethernet header and FCS, and VLAN if configured. */
305 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
307 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
308 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
309 /* Signal errors on packets larger than the MTU */
310 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
311 max_packet);
312 } else {
314 * Set the hardware to truncate packets larger
315 * than the MTU and smaller the 64 bytes.
317 union cvmx_pip_frm_len_chkx frm_len_chk;
318 frm_len_chk.u64 = 0;
319 frm_len_chk.s.minlen = 64;
320 frm_len_chk.s.maxlen = max_packet;
321 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
322 frm_len_chk.u64);
325 * Set the hardware to truncate packets larger than
326 * the MTU. The jabber register must be set to a
327 * multiple of 8 bytes, so round up.
329 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
330 (max_packet + 7) & ~7u);
332 return 0;
336 * cvm_oct_common_set_multicast_list - set the multicast list
337 * @dev: Device to work on
339 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
341 union cvmx_gmxx_prtx_cfg gmx_cfg;
342 struct octeon_ethernet *priv = netdev_priv(dev);
343 int interface = INTERFACE(priv->port);
344 int index = INDEX(priv->port);
346 if ((interface < 2)
347 && (cvmx_helper_interface_get_mode(interface) !=
348 CVMX_HELPER_INTERFACE_MODE_SPI)) {
349 union cvmx_gmxx_rxx_adr_ctl control;
350 control.u64 = 0;
351 control.s.bcst = 1; /* Allow broadcast MAC addresses */
353 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
354 (dev->flags & IFF_PROMISC))
355 /* Force accept multicast packets */
356 control.s.mcst = 2;
357 else
358 /* Force reject multicat packets */
359 control.s.mcst = 1;
361 if (dev->flags & IFF_PROMISC)
363 * Reject matches if promisc. Since CAM is
364 * shut off, should accept everything.
366 control.s.cam_mode = 0;
367 else
368 /* Filter packets based on the CAM */
369 control.s.cam_mode = 1;
371 gmx_cfg.u64 =
372 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
373 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
374 gmx_cfg.u64 & ~1ull);
376 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
377 control.u64);
378 if (dev->flags & IFF_PROMISC)
379 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
380 (index, interface), 0);
381 else
382 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
383 (index, interface), 1);
385 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
386 gmx_cfg.u64);
391 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
392 * @dev: The device in question.
393 * @addr: Address structure to change it too.
395 * Returns Zero on success
397 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
399 struct octeon_ethernet *priv = netdev_priv(dev);
400 union cvmx_gmxx_prtx_cfg gmx_cfg;
401 int interface = INTERFACE(priv->port);
402 int index = INDEX(priv->port);
404 memcpy(dev->dev_addr, addr + 2, 6);
406 if ((interface < 2)
407 && (cvmx_helper_interface_get_mode(interface) !=
408 CVMX_HELPER_INTERFACE_MODE_SPI)) {
409 int i;
410 uint8_t *ptr = addr;
411 uint64_t mac = 0;
412 for (i = 0; i < 6; i++)
413 mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
415 gmx_cfg.u64 =
416 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
417 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
418 gmx_cfg.u64 & ~1ull);
420 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
421 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
422 ptr[2]);
423 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
424 ptr[3]);
425 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
426 ptr[4]);
427 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
428 ptr[5]);
429 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
430 ptr[6]);
431 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
432 ptr[7]);
433 cvm_oct_common_set_multicast_list(dev);
434 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
435 gmx_cfg.u64);
437 return 0;
441 * cvm_oct_common_init - per network device initialization
442 * @dev: Device to initialize
444 * Returns Zero on success
446 int cvm_oct_common_init(struct net_device *dev)
448 struct octeon_ethernet *priv = netdev_priv(dev);
449 struct sockaddr sa;
450 u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
451 ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
452 ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
453 ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
454 ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
455 (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
457 mac += cvm_oct_mac_addr_offset;
458 sa.sa_data[0] = (mac >> 40) & 0xff;
459 sa.sa_data[1] = (mac >> 32) & 0xff;
460 sa.sa_data[2] = (mac >> 24) & 0xff;
461 sa.sa_data[3] = (mac >> 16) & 0xff;
462 sa.sa_data[4] = (mac >> 8) & 0xff;
463 sa.sa_data[5] = mac & 0xff;
465 if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
466 printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
467 " %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
468 sa.sa_data[0] & 0xff, sa.sa_data[1] & 0xff,
469 sa.sa_data[2] & 0xff, sa.sa_data[3] & 0xff,
470 sa.sa_data[4] & 0xff, sa.sa_data[5] & 0xff);
471 cvm_oct_mac_addr_offset++;
474 * Force the interface to use the POW send if always_use_pow
475 * was specified or it is in the pow send list.
477 if ((pow_send_group != -1)
478 && (always_use_pow || strstr(pow_send_list, dev->name)))
479 priv->queue = -1;
481 if (priv->queue != -1) {
482 dev->features |= NETIF_F_SG;
483 if (USE_HW_TCPUDP_CHECKSUM)
484 dev->features |= NETIF_F_IP_CSUM;
487 /* We do our own locking, Linux doesn't need to */
488 dev->features |= NETIF_F_LLTX;
489 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
491 cvm_oct_phy_setup_device(dev);
492 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
493 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
496 * Zero out stats for port so we won't mistakenly show
497 * counters from the bootloader.
499 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
500 sizeof(struct net_device_stats));
502 return 0;
505 void cvm_oct_common_uninit(struct net_device *dev)
507 struct octeon_ethernet *priv = netdev_priv(dev);
509 if (priv->phydev)
510 phy_disconnect(priv->phydev);
513 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
514 .ndo_init = cvm_oct_common_init,
515 .ndo_uninit = cvm_oct_common_uninit,
516 .ndo_start_xmit = cvm_oct_xmit,
517 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
518 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
519 .ndo_do_ioctl = cvm_oct_ioctl,
520 .ndo_change_mtu = cvm_oct_common_change_mtu,
521 .ndo_get_stats = cvm_oct_common_get_stats,
522 #ifdef CONFIG_NET_POLL_CONTROLLER
523 .ndo_poll_controller = cvm_oct_poll_controller,
524 #endif
526 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
527 .ndo_init = cvm_oct_xaui_init,
528 .ndo_uninit = cvm_oct_xaui_uninit,
529 .ndo_open = cvm_oct_xaui_open,
530 .ndo_stop = cvm_oct_xaui_stop,
531 .ndo_start_xmit = cvm_oct_xmit,
532 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
533 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
534 .ndo_do_ioctl = cvm_oct_ioctl,
535 .ndo_change_mtu = cvm_oct_common_change_mtu,
536 .ndo_get_stats = cvm_oct_common_get_stats,
537 #ifdef CONFIG_NET_POLL_CONTROLLER
538 .ndo_poll_controller = cvm_oct_poll_controller,
539 #endif
541 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
542 .ndo_init = cvm_oct_sgmii_init,
543 .ndo_uninit = cvm_oct_sgmii_uninit,
544 .ndo_open = cvm_oct_sgmii_open,
545 .ndo_stop = cvm_oct_sgmii_stop,
546 .ndo_start_xmit = cvm_oct_xmit,
547 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
548 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
549 .ndo_do_ioctl = cvm_oct_ioctl,
550 .ndo_change_mtu = cvm_oct_common_change_mtu,
551 .ndo_get_stats = cvm_oct_common_get_stats,
552 #ifdef CONFIG_NET_POLL_CONTROLLER
553 .ndo_poll_controller = cvm_oct_poll_controller,
554 #endif
556 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
557 .ndo_init = cvm_oct_spi_init,
558 .ndo_uninit = cvm_oct_spi_uninit,
559 .ndo_start_xmit = cvm_oct_xmit,
560 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
561 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
562 .ndo_do_ioctl = cvm_oct_ioctl,
563 .ndo_change_mtu = cvm_oct_common_change_mtu,
564 .ndo_get_stats = cvm_oct_common_get_stats,
565 #ifdef CONFIG_NET_POLL_CONTROLLER
566 .ndo_poll_controller = cvm_oct_poll_controller,
567 #endif
569 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
570 .ndo_init = cvm_oct_rgmii_init,
571 .ndo_uninit = cvm_oct_rgmii_uninit,
572 .ndo_open = cvm_oct_rgmii_open,
573 .ndo_stop = cvm_oct_rgmii_stop,
574 .ndo_start_xmit = cvm_oct_xmit,
575 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
576 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
577 .ndo_do_ioctl = cvm_oct_ioctl,
578 .ndo_change_mtu = cvm_oct_common_change_mtu,
579 .ndo_get_stats = cvm_oct_common_get_stats,
580 #ifdef CONFIG_NET_POLL_CONTROLLER
581 .ndo_poll_controller = cvm_oct_poll_controller,
582 #endif
584 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
585 .ndo_init = cvm_oct_common_init,
586 .ndo_start_xmit = cvm_oct_xmit_pow,
587 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
588 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
589 .ndo_do_ioctl = cvm_oct_ioctl,
590 .ndo_change_mtu = cvm_oct_common_change_mtu,
591 .ndo_get_stats = cvm_oct_common_get_stats,
592 #ifdef CONFIG_NET_POLL_CONTROLLER
593 .ndo_poll_controller = cvm_oct_poll_controller,
594 #endif
597 extern void octeon_mdiobus_force_mod_depencency(void);
599 static int __init cvm_oct_init_module(void)
601 int num_interfaces;
602 int interface;
603 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
604 int qos;
606 octeon_mdiobus_force_mod_depencency();
607 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
609 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
610 cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
611 else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
612 cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
613 else
614 cvm_oct_mac_addr_offset = 0;
616 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
617 if (cvm_oct_poll_queue == NULL) {
618 pr_err("octeon-ethernet: Cannot create workqueue");
619 return -ENOMEM;
622 cvm_oct_configure_common_hw();
624 cvmx_helper_initialize_packet_io_global();
626 /* Change the input group for all ports before input is enabled */
627 num_interfaces = cvmx_helper_get_number_of_interfaces();
628 for (interface = 0; interface < num_interfaces; interface++) {
629 int num_ports = cvmx_helper_ports_on_interface(interface);
630 int port;
632 for (port = cvmx_helper_get_ipd_port(interface, 0);
633 port < cvmx_helper_get_ipd_port(interface, num_ports);
634 port++) {
635 union cvmx_pip_prt_tagx pip_prt_tagx;
636 pip_prt_tagx.u64 =
637 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
638 pip_prt_tagx.s.grp = pow_receive_group;
639 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
640 pip_prt_tagx.u64);
644 cvmx_helper_ipd_and_packet_input_enable();
646 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
649 * Initialize the FAU used for counting packet buffers that
650 * need to be freed.
652 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
654 /* Initialize the FAU used for counting tx SKBs that need to be freed */
655 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
657 if ((pow_send_group != -1)) {
658 struct net_device *dev;
659 pr_info("\tConfiguring device for POW only access\n");
660 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
661 if (dev) {
662 /* Initialize the device private structure. */
663 struct octeon_ethernet *priv = netdev_priv(dev);
665 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
666 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
667 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
668 priv->queue = -1;
669 strcpy(dev->name, "pow%d");
670 for (qos = 0; qos < 16; qos++)
671 skb_queue_head_init(&priv->tx_free_list[qos]);
673 if (register_netdev(dev) < 0) {
674 pr_err("Failed to register ethernet device for POW\n");
675 kfree(dev);
676 } else {
677 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
678 pr_info("%s: POW send group %d, receive group %d\n",
679 dev->name, pow_send_group,
680 pow_receive_group);
682 } else {
683 pr_err("Failed to allocate ethernet device for POW\n");
687 num_interfaces = cvmx_helper_get_number_of_interfaces();
688 for (interface = 0; interface < num_interfaces; interface++) {
689 cvmx_helper_interface_mode_t imode =
690 cvmx_helper_interface_get_mode(interface);
691 int num_ports = cvmx_helper_ports_on_interface(interface);
692 int port;
694 for (port = cvmx_helper_get_ipd_port(interface, 0);
695 port < cvmx_helper_get_ipd_port(interface, num_ports);
696 port++) {
697 struct octeon_ethernet *priv;
698 struct net_device *dev =
699 alloc_etherdev(sizeof(struct octeon_ethernet));
700 if (!dev) {
701 pr_err("Failed to allocate ethernet device for port %d\n", port);
702 continue;
705 /* Initialize the device private structure. */
706 priv = netdev_priv(dev);
708 INIT_DELAYED_WORK(&priv->port_periodic_work,
709 cvm_oct_periodic_worker);
710 priv->imode = imode;
711 priv->port = port;
712 priv->queue = cvmx_pko_get_base_queue(priv->port);
713 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
714 for (qos = 0; qos < 16; qos++)
715 skb_queue_head_init(&priv->tx_free_list[qos]);
716 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
717 qos++)
718 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
720 switch (priv->imode) {
722 /* These types don't support ports to IPD/PKO */
723 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
724 case CVMX_HELPER_INTERFACE_MODE_PCIE:
725 case CVMX_HELPER_INTERFACE_MODE_PICMG:
726 break;
728 case CVMX_HELPER_INTERFACE_MODE_NPI:
729 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
730 strcpy(dev->name, "npi%d");
731 break;
733 case CVMX_HELPER_INTERFACE_MODE_XAUI:
734 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
735 strcpy(dev->name, "xaui%d");
736 break;
738 case CVMX_HELPER_INTERFACE_MODE_LOOP:
739 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
740 strcpy(dev->name, "loop%d");
741 break;
743 case CVMX_HELPER_INTERFACE_MODE_SGMII:
744 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
745 strcpy(dev->name, "eth%d");
746 break;
748 case CVMX_HELPER_INTERFACE_MODE_SPI:
749 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
750 strcpy(dev->name, "spi%d");
751 break;
753 case CVMX_HELPER_INTERFACE_MODE_RGMII:
754 case CVMX_HELPER_INTERFACE_MODE_GMII:
755 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
756 strcpy(dev->name, "eth%d");
757 break;
760 if (!dev->netdev_ops) {
761 kfree(dev);
762 } else if (register_netdev(dev) < 0) {
763 pr_err("Failed to register ethernet device "
764 "for interface %d, port %d\n",
765 interface, priv->port);
766 kfree(dev);
767 } else {
768 cvm_oct_device[priv->port] = dev;
769 fau -=
770 cvmx_pko_get_num_queues(priv->port) *
771 sizeof(uint32_t);
772 queue_delayed_work(cvm_oct_poll_queue,
773 &priv->port_periodic_work, HZ);
778 cvm_oct_tx_initialize();
779 cvm_oct_rx_initialize();
782 * 150 uS: about 10 1500-byte packtes at 1GE.
784 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
786 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
788 return 0;
791 static void __exit cvm_oct_cleanup_module(void)
793 int port;
795 /* Disable POW interrupt */
796 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
798 cvmx_ipd_disable();
800 /* Free the interrupt handler */
801 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
803 atomic_inc_return(&cvm_oct_poll_queue_stopping);
804 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
806 cvm_oct_rx_shutdown();
807 cvm_oct_tx_shutdown();
809 cvmx_pko_disable();
811 /* Free the ethernet devices */
812 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
813 if (cvm_oct_device[port]) {
814 struct net_device *dev = cvm_oct_device[port];
815 struct octeon_ethernet *priv = netdev_priv(dev);
816 cancel_delayed_work_sync(&priv->port_periodic_work);
818 cvm_oct_tx_shutdown_dev(dev);
819 unregister_netdev(dev);
820 kfree(dev);
821 cvm_oct_device[port] = NULL;
825 destroy_workqueue(cvm_oct_poll_queue);
827 cvmx_pko_shutdown();
829 cvmx_ipd_free_ptr();
831 /* Free the HW pools */
832 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
833 num_packet_buffers);
834 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
835 num_packet_buffers);
836 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
837 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
838 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
841 MODULE_LICENSE("GPL");
842 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
843 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
844 module_init(cvm_oct_init_module);
845 module_exit(cvm_oct_cleanup_module);