S2io: Updating transceiver information in ethtool function
[linux-2.6/kvm.git] / drivers / net / s2io.c
blob203cc1e87de09e6508c337a90738e4468e468df9
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.26.5"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
98 int ret;
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
103 return ret;
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC 1
120 #define LOW 2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
135 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140 "Register test\t(offline)",
141 "Eeprom test\t(offline)",
142 "Link test\t(online)",
143 "RLDRAM test\t(offline)",
144 "BIST Test\t(offline)"
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148 {"tmac_frms"},
149 {"tmac_data_octets"},
150 {"tmac_drop_frms"},
151 {"tmac_mcst_frms"},
152 {"tmac_bcst_frms"},
153 {"tmac_pause_ctrl_frms"},
154 {"tmac_ttl_octets"},
155 {"tmac_ucst_frms"},
156 {"tmac_nucst_frms"},
157 {"tmac_any_err_frms"},
158 {"tmac_ttl_less_fb_octets"},
159 {"tmac_vld_ip_octets"},
160 {"tmac_vld_ip"},
161 {"tmac_drop_ip"},
162 {"tmac_icmp"},
163 {"tmac_rst_tcp"},
164 {"tmac_tcp"},
165 {"tmac_udp"},
166 {"rmac_vld_frms"},
167 {"rmac_data_octets"},
168 {"rmac_fcs_err_frms"},
169 {"rmac_drop_frms"},
170 {"rmac_vld_mcst_frms"},
171 {"rmac_vld_bcst_frms"},
172 {"rmac_in_rng_len_err_frms"},
173 {"rmac_out_rng_len_err_frms"},
174 {"rmac_long_frms"},
175 {"rmac_pause_ctrl_frms"},
176 {"rmac_unsup_ctrl_frms"},
177 {"rmac_ttl_octets"},
178 {"rmac_accepted_ucst_frms"},
179 {"rmac_accepted_nucst_frms"},
180 {"rmac_discarded_frms"},
181 {"rmac_drop_events"},
182 {"rmac_ttl_less_fb_octets"},
183 {"rmac_ttl_frms"},
184 {"rmac_usized_frms"},
185 {"rmac_osized_frms"},
186 {"rmac_frag_frms"},
187 {"rmac_jabber_frms"},
188 {"rmac_ttl_64_frms"},
189 {"rmac_ttl_65_127_frms"},
190 {"rmac_ttl_128_255_frms"},
191 {"rmac_ttl_256_511_frms"},
192 {"rmac_ttl_512_1023_frms"},
193 {"rmac_ttl_1024_1518_frms"},
194 {"rmac_ip"},
195 {"rmac_ip_octets"},
196 {"rmac_hdr_err_ip"},
197 {"rmac_drop_ip"},
198 {"rmac_icmp"},
199 {"rmac_tcp"},
200 {"rmac_udp"},
201 {"rmac_err_drp_udp"},
202 {"rmac_xgmii_err_sym"},
203 {"rmac_frms_q0"},
204 {"rmac_frms_q1"},
205 {"rmac_frms_q2"},
206 {"rmac_frms_q3"},
207 {"rmac_frms_q4"},
208 {"rmac_frms_q5"},
209 {"rmac_frms_q6"},
210 {"rmac_frms_q7"},
211 {"rmac_full_q0"},
212 {"rmac_full_q1"},
213 {"rmac_full_q2"},
214 {"rmac_full_q3"},
215 {"rmac_full_q4"},
216 {"rmac_full_q5"},
217 {"rmac_full_q6"},
218 {"rmac_full_q7"},
219 {"rmac_pause_cnt"},
220 {"rmac_xgmii_data_err_cnt"},
221 {"rmac_xgmii_ctrl_err_cnt"},
222 {"rmac_accepted_ip"},
223 {"rmac_err_tcp"},
224 {"rd_req_cnt"},
225 {"new_rd_req_cnt"},
226 {"new_rd_req_rtry_cnt"},
227 {"rd_rtry_cnt"},
228 {"wr_rtry_rd_ack_cnt"},
229 {"wr_req_cnt"},
230 {"new_wr_req_cnt"},
231 {"new_wr_req_rtry_cnt"},
232 {"wr_rtry_cnt"},
233 {"wr_disc_cnt"},
234 {"rd_rtry_wr_ack_cnt"},
235 {"txp_wr_cnt"},
236 {"txd_rd_cnt"},
237 {"txd_wr_cnt"},
238 {"rxd_rd_cnt"},
239 {"rxd_wr_cnt"},
240 {"txf_rd_cnt"},
241 {"rxf_wr_cnt"}
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245 {"rmac_ttl_1519_4095_frms"},
246 {"rmac_ttl_4096_8191_frms"},
247 {"rmac_ttl_8192_max_frms"},
248 {"rmac_ttl_gt_max_frms"},
249 {"rmac_osized_alt_frms"},
250 {"rmac_jabber_alt_frms"},
251 {"rmac_gt_max_alt_frms"},
252 {"rmac_vlan_frms"},
253 {"rmac_len_discard"},
254 {"rmac_fcs_discard"},
255 {"rmac_pf_discard"},
256 {"rmac_da_discard"},
257 {"rmac_red_discard"},
258 {"rmac_rts_discard"},
259 {"rmac_ingm_full_discard"},
260 {"link_fault_cnt"}
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264 {"\n DRIVER STATISTICS"},
265 {"single_bit_ecc_errs"},
266 {"double_bit_ecc_errs"},
267 {"parity_err_cnt"},
268 {"serious_err_cnt"},
269 {"soft_reset_cnt"},
270 {"fifo_full_cnt"},
271 {"ring_0_full_cnt"},
272 {"ring_1_full_cnt"},
273 {"ring_2_full_cnt"},
274 {"ring_3_full_cnt"},
275 {"ring_4_full_cnt"},
276 {"ring_5_full_cnt"},
277 {"ring_6_full_cnt"},
278 {"ring_7_full_cnt"},
279 ("alarm_transceiver_temp_high"),
280 ("alarm_transceiver_temp_low"),
281 ("alarm_laser_bias_current_high"),
282 ("alarm_laser_bias_current_low"),
283 ("alarm_laser_output_power_high"),
284 ("alarm_laser_output_power_low"),
285 ("warn_transceiver_temp_high"),
286 ("warn_transceiver_temp_low"),
287 ("warn_laser_bias_current_high"),
288 ("warn_laser_bias_current_low"),
289 ("warn_laser_output_power_high"),
290 ("warn_laser_output_power_low"),
291 ("lro_aggregated_pkts"),
292 ("lro_flush_both_count"),
293 ("lro_out_of_sequence_pkts"),
294 ("lro_flush_due_to_max_pkts"),
295 ("lro_avg_aggr_pkts"),
296 ("mem_alloc_fail_cnt"),
297 ("pci_map_fail_cnt"),
298 ("watchdog_timer_cnt"),
299 ("mem_allocated"),
300 ("mem_freed"),
301 ("link_up_cnt"),
302 ("link_down_cnt"),
303 ("link_up_time"),
304 ("link_down_time"),
305 ("tx_tcode_buf_abort_cnt"),
306 ("tx_tcode_desc_abort_cnt"),
307 ("tx_tcode_parity_err_cnt"),
308 ("tx_tcode_link_loss_cnt"),
309 ("tx_tcode_list_proc_err_cnt"),
310 ("rx_tcode_parity_err_cnt"),
311 ("rx_tcode_abort_cnt"),
312 ("rx_tcode_parity_abort_cnt"),
313 ("rx_tcode_rda_fail_cnt"),
314 ("rx_tcode_unkn_prot_cnt"),
315 ("rx_tcode_fcs_err_cnt"),
316 ("rx_tcode_buf_size_err_cnt"),
317 ("rx_tcode_rxd_corrupt_cnt"),
318 ("rx_tcode_unkn_err_cnt"),
319 {"tda_err_cnt"},
320 {"pfc_err_cnt"},
321 {"pcc_err_cnt"},
322 {"tti_err_cnt"},
323 {"tpa_err_cnt"},
324 {"sm_err_cnt"},
325 {"lso_err_cnt"},
326 {"mac_tmac_err_cnt"},
327 {"mac_rmac_err_cnt"},
328 {"xgxs_txgxs_err_cnt"},
329 {"xgxs_rxgxs_err_cnt"},
330 {"rc_err_cnt"},
331 {"prc_pcix_err_cnt"},
332 {"rpa_err_cnt"},
333 {"rda_err_cnt"},
334 {"rti_err_cnt"},
335 {"mc_err_cnt"}
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340 ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
349 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
353 init_timer(&timer); \
354 timer.function = handle; \
355 timer.data = (unsigned long) arg; \
356 mod_timer(&timer, (jiffies + exp)) \
358 /* copy mac addr to def_mac_addr array */
359 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
361 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
362 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
363 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
364 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
365 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
366 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
368 /* Add the vlan */
369 static void s2io_vlan_rx_register(struct net_device *dev,
370 struct vlan_group *grp)
372 struct s2io_nic *nic = dev->priv;
373 unsigned long flags;
375 spin_lock_irqsave(&nic->tx_lock, flags);
376 nic->vlgrp = grp;
377 spin_unlock_irqrestore(&nic->tx_lock, flags);
380 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
381 static int vlan_strip_flag;
384 * Constants to be programmed into the Xena's registers, to configure
385 * the XAUI.
388 #define END_SIGN 0x0
389 static const u64 herc_act_dtx_cfg[] = {
390 /* Set address */
391 0x8000051536750000ULL, 0x80000515367500E0ULL,
392 /* Write data */
393 0x8000051536750004ULL, 0x80000515367500E4ULL,
394 /* Set address */
395 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
396 /* Write data */
397 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
398 /* Set address */
399 0x801205150D440000ULL, 0x801205150D4400E0ULL,
400 /* Write data */
401 0x801205150D440004ULL, 0x801205150D4400E4ULL,
402 /* Set address */
403 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
404 /* Write data */
405 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
406 /* Done */
407 END_SIGN
410 static const u64 xena_dtx_cfg[] = {
411 /* Set address */
412 0x8000051500000000ULL, 0x80000515000000E0ULL,
413 /* Write data */
414 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
415 /* Set address */
416 0x8001051500000000ULL, 0x80010515000000E0ULL,
417 /* Write data */
418 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
419 /* Set address */
420 0x8002051500000000ULL, 0x80020515000000E0ULL,
421 /* Write data */
422 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
423 END_SIGN
427 * Constants for Fixing the MacAddress problem seen mostly on
428 * Alpha machines.
430 static const u64 fix_mac[] = {
431 0x0060000000000000ULL, 0x0060600000000000ULL,
432 0x0040600000000000ULL, 0x0000600000000000ULL,
433 0x0020600000000000ULL, 0x0060600000000000ULL,
434 0x0020600000000000ULL, 0x0060600000000000ULL,
435 0x0020600000000000ULL, 0x0060600000000000ULL,
436 0x0020600000000000ULL, 0x0060600000000000ULL,
437 0x0020600000000000ULL, 0x0060600000000000ULL,
438 0x0020600000000000ULL, 0x0060600000000000ULL,
439 0x0020600000000000ULL, 0x0060600000000000ULL,
440 0x0020600000000000ULL, 0x0060600000000000ULL,
441 0x0020600000000000ULL, 0x0060600000000000ULL,
442 0x0020600000000000ULL, 0x0060600000000000ULL,
443 0x0020600000000000ULL, 0x0000600000000000ULL,
444 0x0040600000000000ULL, 0x0060600000000000ULL,
445 END_SIGN
448 MODULE_LICENSE("GPL");
449 MODULE_VERSION(DRV_VERSION);
452 /* Module Loadable parameters. */
453 S2IO_PARM_INT(tx_fifo_num, 1);
454 S2IO_PARM_INT(rx_ring_num, 1);
457 S2IO_PARM_INT(rx_ring_mode, 1);
458 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
459 S2IO_PARM_INT(rmac_pause_time, 0x100);
460 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
461 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
462 S2IO_PARM_INT(shared_splits, 0);
463 S2IO_PARM_INT(tmac_util_period, 5);
464 S2IO_PARM_INT(rmac_util_period, 5);
465 S2IO_PARM_INT(l3l4hdr_size, 128);
466 /* Frequency of Rx desc syncs expressed as power of 2 */
467 S2IO_PARM_INT(rxsync_frequency, 3);
468 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
469 S2IO_PARM_INT(intr_type, 2);
470 /* Large receive offload feature */
471 S2IO_PARM_INT(lro, 0);
472 /* Max pkts to be aggregated by LRO at one time. If not specified,
473 * aggregation happens until we hit max IP pkt size(64K)
475 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
476 S2IO_PARM_INT(indicate_max_pkts, 0);
478 S2IO_PARM_INT(napi, 1);
479 S2IO_PARM_INT(ufo, 0);
480 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
482 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
483 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
484 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
485 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
486 static unsigned int rts_frm_len[MAX_RX_RINGS] =
487 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
489 module_param_array(tx_fifo_len, uint, NULL, 0);
490 module_param_array(rx_ring_sz, uint, NULL, 0);
491 module_param_array(rts_frm_len, uint, NULL, 0);
494 * S2IO device table.
495 * This table lists all the devices that this driver supports.
497 static struct pci_device_id s2io_tbl[] __devinitdata = {
498 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
499 PCI_ANY_ID, PCI_ANY_ID},
500 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
501 PCI_ANY_ID, PCI_ANY_ID},
502 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
503 PCI_ANY_ID, PCI_ANY_ID},
504 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
505 PCI_ANY_ID, PCI_ANY_ID},
506 {0,}
509 MODULE_DEVICE_TABLE(pci, s2io_tbl);
511 static struct pci_error_handlers s2io_err_handler = {
512 .error_detected = s2io_io_error_detected,
513 .slot_reset = s2io_io_slot_reset,
514 .resume = s2io_io_resume,
517 static struct pci_driver s2io_driver = {
518 .name = "S2IO",
519 .id_table = s2io_tbl,
520 .probe = s2io_init_nic,
521 .remove = __devexit_p(s2io_rem_nic),
522 .err_handler = &s2io_err_handler,
525 /* A simplifier macro used both by init and free shared_mem Fns(). */
526 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
529 * init_shared_mem - Allocation and Initialization of Memory
530 * @nic: Device private variable.
531 * Description: The function allocates all the memory areas shared
532 * between the NIC and the driver. This includes Tx descriptors,
533 * Rx descriptors and the statistics block.
536 static int init_shared_mem(struct s2io_nic *nic)
538 u32 size;
539 void *tmp_v_addr, *tmp_v_addr_next;
540 dma_addr_t tmp_p_addr, tmp_p_addr_next;
541 struct RxD_block *pre_rxd_blk = NULL;
542 int i, j, blk_cnt;
543 int lst_size, lst_per_page;
544 struct net_device *dev = nic->dev;
545 unsigned long tmp;
546 struct buffAdd *ba;
548 struct mac_info *mac_control;
549 struct config_param *config;
550 unsigned long long mem_allocated = 0;
552 mac_control = &nic->mac_control;
553 config = &nic->config;
556 /* Allocation and initialization of TXDLs in FIOFs */
557 size = 0;
558 for (i = 0; i < config->tx_fifo_num; i++) {
559 size += config->tx_cfg[i].fifo_len;
561 if (size > MAX_AVAILABLE_TXDS) {
562 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
563 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
564 return -EINVAL;
567 lst_size = (sizeof(struct TxD) * config->max_txds);
568 lst_per_page = PAGE_SIZE / lst_size;
570 for (i = 0; i < config->tx_fifo_num; i++) {
571 int fifo_len = config->tx_cfg[i].fifo_len;
572 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
573 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
574 GFP_KERNEL);
575 if (!mac_control->fifos[i].list_info) {
576 DBG_PRINT(INFO_DBG,
577 "Malloc failed for list_info\n");
578 return -ENOMEM;
580 mem_allocated += list_holder_size;
582 for (i = 0; i < config->tx_fifo_num; i++) {
583 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
584 lst_per_page);
585 mac_control->fifos[i].tx_curr_put_info.offset = 0;
586 mac_control->fifos[i].tx_curr_put_info.fifo_len =
587 config->tx_cfg[i].fifo_len - 1;
588 mac_control->fifos[i].tx_curr_get_info.offset = 0;
589 mac_control->fifos[i].tx_curr_get_info.fifo_len =
590 config->tx_cfg[i].fifo_len - 1;
591 mac_control->fifos[i].fifo_no = i;
592 mac_control->fifos[i].nic = nic;
593 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
595 for (j = 0; j < page_num; j++) {
596 int k = 0;
597 dma_addr_t tmp_p;
598 void *tmp_v;
599 tmp_v = pci_alloc_consistent(nic->pdev,
600 PAGE_SIZE, &tmp_p);
601 if (!tmp_v) {
602 DBG_PRINT(INFO_DBG,
603 "pci_alloc_consistent ");
604 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
605 return -ENOMEM;
607 /* If we got a zero DMA address(can happen on
608 * certain platforms like PPC), reallocate.
609 * Store virtual address of page we don't want,
610 * to be freed later.
612 if (!tmp_p) {
613 mac_control->zerodma_virt_addr = tmp_v;
614 DBG_PRINT(INIT_DBG,
615 "%s: Zero DMA address for TxDL. ", dev->name);
616 DBG_PRINT(INIT_DBG,
617 "Virtual address %p\n", tmp_v);
618 tmp_v = pci_alloc_consistent(nic->pdev,
619 PAGE_SIZE, &tmp_p);
620 if (!tmp_v) {
621 DBG_PRINT(INFO_DBG,
622 "pci_alloc_consistent ");
623 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
624 return -ENOMEM;
626 mem_allocated += PAGE_SIZE;
628 while (k < lst_per_page) {
629 int l = (j * lst_per_page) + k;
630 if (l == config->tx_cfg[i].fifo_len)
631 break;
632 mac_control->fifos[i].list_info[l].list_virt_addr =
633 tmp_v + (k * lst_size);
634 mac_control->fifos[i].list_info[l].list_phy_addr =
635 tmp_p + (k * lst_size);
636 k++;
641 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
642 if (!nic->ufo_in_band_v)
643 return -ENOMEM;
644 mem_allocated += (size * sizeof(u64));
646 /* Allocation and initialization of RXDs in Rings */
647 size = 0;
648 for (i = 0; i < config->rx_ring_num; i++) {
649 if (config->rx_cfg[i].num_rxd %
650 (rxd_count[nic->rxd_mode] + 1)) {
651 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
652 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
654 DBG_PRINT(ERR_DBG, "RxDs per Block");
655 return FAILURE;
657 size += config->rx_cfg[i].num_rxd;
658 mac_control->rings[i].block_count =
659 config->rx_cfg[i].num_rxd /
660 (rxd_count[nic->rxd_mode] + 1 );
661 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
662 mac_control->rings[i].block_count;
664 if (nic->rxd_mode == RXD_MODE_1)
665 size = (size * (sizeof(struct RxD1)));
666 else
667 size = (size * (sizeof(struct RxD3)));
669 for (i = 0; i < config->rx_ring_num; i++) {
670 mac_control->rings[i].rx_curr_get_info.block_index = 0;
671 mac_control->rings[i].rx_curr_get_info.offset = 0;
672 mac_control->rings[i].rx_curr_get_info.ring_len =
673 config->rx_cfg[i].num_rxd - 1;
674 mac_control->rings[i].rx_curr_put_info.block_index = 0;
675 mac_control->rings[i].rx_curr_put_info.offset = 0;
676 mac_control->rings[i].rx_curr_put_info.ring_len =
677 config->rx_cfg[i].num_rxd - 1;
678 mac_control->rings[i].nic = nic;
679 mac_control->rings[i].ring_no = i;
681 blk_cnt = config->rx_cfg[i].num_rxd /
682 (rxd_count[nic->rxd_mode] + 1);
683 /* Allocating all the Rx blocks */
684 for (j = 0; j < blk_cnt; j++) {
685 struct rx_block_info *rx_blocks;
686 int l;
688 rx_blocks = &mac_control->rings[i].rx_blocks[j];
689 size = SIZE_OF_BLOCK; //size is always page size
690 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
691 &tmp_p_addr);
692 if (tmp_v_addr == NULL) {
694 * In case of failure, free_shared_mem()
695 * is called, which should free any
696 * memory that was alloced till the
697 * failure happened.
699 rx_blocks->block_virt_addr = tmp_v_addr;
700 return -ENOMEM;
702 mem_allocated += size;
703 memset(tmp_v_addr, 0, size);
704 rx_blocks->block_virt_addr = tmp_v_addr;
705 rx_blocks->block_dma_addr = tmp_p_addr;
706 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
707 rxd_count[nic->rxd_mode],
708 GFP_KERNEL);
709 if (!rx_blocks->rxds)
710 return -ENOMEM;
711 mem_allocated +=
712 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
713 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
714 rx_blocks->rxds[l].virt_addr =
715 rx_blocks->block_virt_addr +
716 (rxd_size[nic->rxd_mode] * l);
717 rx_blocks->rxds[l].dma_addr =
718 rx_blocks->block_dma_addr +
719 (rxd_size[nic->rxd_mode] * l);
722 /* Interlinking all Rx Blocks */
723 for (j = 0; j < blk_cnt; j++) {
724 tmp_v_addr =
725 mac_control->rings[i].rx_blocks[j].block_virt_addr;
726 tmp_v_addr_next =
727 mac_control->rings[i].rx_blocks[(j + 1) %
728 blk_cnt].block_virt_addr;
729 tmp_p_addr =
730 mac_control->rings[i].rx_blocks[j].block_dma_addr;
731 tmp_p_addr_next =
732 mac_control->rings[i].rx_blocks[(j + 1) %
733 blk_cnt].block_dma_addr;
735 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
736 pre_rxd_blk->reserved_2_pNext_RxD_block =
737 (unsigned long) tmp_v_addr_next;
738 pre_rxd_blk->pNext_RxD_Blk_physical =
739 (u64) tmp_p_addr_next;
742 if (nic->rxd_mode == RXD_MODE_3B) {
744 * Allocation of Storages for buffer addresses in 2BUFF mode
745 * and the buffers as well.
747 for (i = 0; i < config->rx_ring_num; i++) {
748 blk_cnt = config->rx_cfg[i].num_rxd /
749 (rxd_count[nic->rxd_mode]+ 1);
750 mac_control->rings[i].ba =
751 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
752 GFP_KERNEL);
753 if (!mac_control->rings[i].ba)
754 return -ENOMEM;
755 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
756 for (j = 0; j < blk_cnt; j++) {
757 int k = 0;
758 mac_control->rings[i].ba[j] =
759 kmalloc((sizeof(struct buffAdd) *
760 (rxd_count[nic->rxd_mode] + 1)),
761 GFP_KERNEL);
762 if (!mac_control->rings[i].ba[j])
763 return -ENOMEM;
764 mem_allocated += (sizeof(struct buffAdd) * \
765 (rxd_count[nic->rxd_mode] + 1));
766 while (k != rxd_count[nic->rxd_mode]) {
767 ba = &mac_control->rings[i].ba[j][k];
769 ba->ba_0_org = (void *) kmalloc
770 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
771 if (!ba->ba_0_org)
772 return -ENOMEM;
773 mem_allocated +=
774 (BUF0_LEN + ALIGN_SIZE);
775 tmp = (unsigned long)ba->ba_0_org;
776 tmp += ALIGN_SIZE;
777 tmp &= ~((unsigned long) ALIGN_SIZE);
778 ba->ba_0 = (void *) tmp;
780 ba->ba_1_org = (void *) kmalloc
781 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
782 if (!ba->ba_1_org)
783 return -ENOMEM;
784 mem_allocated
785 += (BUF1_LEN + ALIGN_SIZE);
786 tmp = (unsigned long) ba->ba_1_org;
787 tmp += ALIGN_SIZE;
788 tmp &= ~((unsigned long) ALIGN_SIZE);
789 ba->ba_1 = (void *) tmp;
790 k++;
796 /* Allocation and initialization of Statistics block */
797 size = sizeof(struct stat_block);
798 mac_control->stats_mem = pci_alloc_consistent
799 (nic->pdev, size, &mac_control->stats_mem_phy);
801 if (!mac_control->stats_mem) {
803 * In case of failure, free_shared_mem() is called, which
804 * should free any memory that was alloced till the
805 * failure happened.
807 return -ENOMEM;
809 mem_allocated += size;
810 mac_control->stats_mem_sz = size;
812 tmp_v_addr = mac_control->stats_mem;
813 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
814 memset(tmp_v_addr, 0, size);
815 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
816 (unsigned long long) tmp_p_addr);
817 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
818 return SUCCESS;
822 * free_shared_mem - Free the allocated Memory
823 * @nic: Device private variable.
824 * Description: This function is to free all memory locations allocated by
825 * the init_shared_mem() function and return it to the kernel.
828 static void free_shared_mem(struct s2io_nic *nic)
830 int i, j, blk_cnt, size;
831 u32 ufo_size = 0;
832 void *tmp_v_addr;
833 dma_addr_t tmp_p_addr;
834 struct mac_info *mac_control;
835 struct config_param *config;
836 int lst_size, lst_per_page;
837 struct net_device *dev;
838 int page_num = 0;
840 if (!nic)
841 return;
843 dev = nic->dev;
845 mac_control = &nic->mac_control;
846 config = &nic->config;
848 lst_size = (sizeof(struct TxD) * config->max_txds);
849 lst_per_page = PAGE_SIZE / lst_size;
851 for (i = 0; i < config->tx_fifo_num; i++) {
852 ufo_size += config->tx_cfg[i].fifo_len;
853 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
854 lst_per_page);
855 for (j = 0; j < page_num; j++) {
856 int mem_blks = (j * lst_per_page);
857 if (!mac_control->fifos[i].list_info)
858 return;
859 if (!mac_control->fifos[i].list_info[mem_blks].
860 list_virt_addr)
861 break;
862 pci_free_consistent(nic->pdev, PAGE_SIZE,
863 mac_control->fifos[i].
864 list_info[mem_blks].
865 list_virt_addr,
866 mac_control->fifos[i].
867 list_info[mem_blks].
868 list_phy_addr);
869 nic->mac_control.stats_info->sw_stat.mem_freed
870 += PAGE_SIZE;
872 /* If we got a zero DMA address during allocation,
873 * free the page now
875 if (mac_control->zerodma_virt_addr) {
876 pci_free_consistent(nic->pdev, PAGE_SIZE,
877 mac_control->zerodma_virt_addr,
878 (dma_addr_t)0);
879 DBG_PRINT(INIT_DBG,
880 "%s: Freeing TxDL with zero DMA addr. ",
881 dev->name);
882 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
883 mac_control->zerodma_virt_addr);
884 nic->mac_control.stats_info->sw_stat.mem_freed
885 += PAGE_SIZE;
887 kfree(mac_control->fifos[i].list_info);
888 nic->mac_control.stats_info->sw_stat.mem_freed +=
889 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
892 size = SIZE_OF_BLOCK;
893 for (i = 0; i < config->rx_ring_num; i++) {
894 blk_cnt = mac_control->rings[i].block_count;
895 for (j = 0; j < blk_cnt; j++) {
896 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
897 block_virt_addr;
898 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
899 block_dma_addr;
900 if (tmp_v_addr == NULL)
901 break;
902 pci_free_consistent(nic->pdev, size,
903 tmp_v_addr, tmp_p_addr);
904 nic->mac_control.stats_info->sw_stat.mem_freed += size;
905 kfree(mac_control->rings[i].rx_blocks[j].rxds);
906 nic->mac_control.stats_info->sw_stat.mem_freed +=
907 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
911 if (nic->rxd_mode == RXD_MODE_3B) {
912 /* Freeing buffer storage addresses in 2BUFF mode. */
913 for (i = 0; i < config->rx_ring_num; i++) {
914 blk_cnt = config->rx_cfg[i].num_rxd /
915 (rxd_count[nic->rxd_mode] + 1);
916 for (j = 0; j < blk_cnt; j++) {
917 int k = 0;
918 if (!mac_control->rings[i].ba[j])
919 continue;
920 while (k != rxd_count[nic->rxd_mode]) {
921 struct buffAdd *ba =
922 &mac_control->rings[i].ba[j][k];
923 kfree(ba->ba_0_org);
924 nic->mac_control.stats_info->sw_stat.\
925 mem_freed += (BUF0_LEN + ALIGN_SIZE);
926 kfree(ba->ba_1_org);
927 nic->mac_control.stats_info->sw_stat.\
928 mem_freed += (BUF1_LEN + ALIGN_SIZE);
929 k++;
931 kfree(mac_control->rings[i].ba[j]);
932 nic->mac_control.stats_info->sw_stat.mem_freed +=
933 (sizeof(struct buffAdd) *
934 (rxd_count[nic->rxd_mode] + 1));
936 kfree(mac_control->rings[i].ba);
937 nic->mac_control.stats_info->sw_stat.mem_freed +=
938 (sizeof(struct buffAdd *) * blk_cnt);
942 if (mac_control->stats_mem) {
943 pci_free_consistent(nic->pdev,
944 mac_control->stats_mem_sz,
945 mac_control->stats_mem,
946 mac_control->stats_mem_phy);
947 nic->mac_control.stats_info->sw_stat.mem_freed +=
948 mac_control->stats_mem_sz;
950 if (nic->ufo_in_band_v) {
951 kfree(nic->ufo_in_band_v);
952 nic->mac_control.stats_info->sw_stat.mem_freed
953 += (ufo_size * sizeof(u64));
958 * s2io_verify_pci_mode -
961 static int s2io_verify_pci_mode(struct s2io_nic *nic)
963 struct XENA_dev_config __iomem *bar0 = nic->bar0;
964 register u64 val64 = 0;
965 int mode;
967 val64 = readq(&bar0->pci_mode);
968 mode = (u8)GET_PCI_MODE(val64);
970 if ( val64 & PCI_MODE_UNKNOWN_MODE)
971 return -1; /* Unknown PCI mode */
972 return mode;
975 #define NEC_VENID 0x1033
976 #define NEC_DEVID 0x0125
977 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
979 struct pci_dev *tdev = NULL;
980 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
981 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
982 if (tdev->bus == s2io_pdev->bus->parent)
983 pci_dev_put(tdev);
984 return 1;
987 return 0;
990 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
992 * s2io_print_pci_mode -
994 static int s2io_print_pci_mode(struct s2io_nic *nic)
996 struct XENA_dev_config __iomem *bar0 = nic->bar0;
997 register u64 val64 = 0;
998 int mode;
999 struct config_param *config = &nic->config;
1001 val64 = readq(&bar0->pci_mode);
1002 mode = (u8)GET_PCI_MODE(val64);
1004 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1005 return -1; /* Unknown PCI mode */
1007 config->bus_speed = bus_speed[mode];
1009 if (s2io_on_nec_bridge(nic->pdev)) {
1010 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1011 nic->dev->name);
1012 return mode;
1015 if (val64 & PCI_MODE_32_BITS) {
1016 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1017 } else {
1018 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1021 switch(mode) {
1022 case PCI_MODE_PCI_33:
1023 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1024 break;
1025 case PCI_MODE_PCI_66:
1026 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1027 break;
1028 case PCI_MODE_PCIX_M1_66:
1029 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1030 break;
1031 case PCI_MODE_PCIX_M1_100:
1032 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1033 break;
1034 case PCI_MODE_PCIX_M1_133:
1035 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1036 break;
1037 case PCI_MODE_PCIX_M2_66:
1038 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1039 break;
1040 case PCI_MODE_PCIX_M2_100:
1041 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1042 break;
1043 case PCI_MODE_PCIX_M2_133:
1044 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1045 break;
1046 default:
1047 return -1; /* Unsupported bus speed */
1050 return mode;
1054 * init_nic - Initialization of hardware
1055 * @nic: device peivate variable
1056 * Description: The function sequentially configures every block
1057 * of the H/W from their reset values.
1058 * Return Value: SUCCESS on success and
1059 * '-1' on failure (endian settings incorrect).
1062 static int init_nic(struct s2io_nic *nic)
1064 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1065 struct net_device *dev = nic->dev;
1066 register u64 val64 = 0;
1067 void __iomem *add;
1068 u32 time;
1069 int i, j;
1070 struct mac_info *mac_control;
1071 struct config_param *config;
1072 int dtx_cnt = 0;
1073 unsigned long long mem_share;
1074 int mem_size;
1076 mac_control = &nic->mac_control;
1077 config = &nic->config;
1079 /* to set the swapper controle on the card */
1080 if(s2io_set_swapper(nic)) {
1081 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1082 return -1;
1086 * Herc requires EOI to be removed from reset before XGXS, so..
1088 if (nic->device_type & XFRAME_II_DEVICE) {
1089 val64 = 0xA500000000ULL;
1090 writeq(val64, &bar0->sw_reset);
1091 msleep(500);
1092 val64 = readq(&bar0->sw_reset);
1095 /* Remove XGXS from reset state */
1096 val64 = 0;
1097 writeq(val64, &bar0->sw_reset);
1098 msleep(500);
1099 val64 = readq(&bar0->sw_reset);
1101 /* Enable Receiving broadcasts */
1102 add = &bar0->mac_cfg;
1103 val64 = readq(&bar0->mac_cfg);
1104 val64 |= MAC_RMAC_BCAST_ENABLE;
1105 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1106 writel((u32) val64, add);
1107 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1108 writel((u32) (val64 >> 32), (add + 4));
1110 /* Read registers in all blocks */
1111 val64 = readq(&bar0->mac_int_mask);
1112 val64 = readq(&bar0->mc_int_mask);
1113 val64 = readq(&bar0->xgxs_int_mask);
1115 /* Set MTU */
1116 val64 = dev->mtu;
1117 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1119 if (nic->device_type & XFRAME_II_DEVICE) {
1120 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1121 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1122 &bar0->dtx_control, UF);
1123 if (dtx_cnt & 0x1)
1124 msleep(1); /* Necessary!! */
1125 dtx_cnt++;
1127 } else {
1128 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1129 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1130 &bar0->dtx_control, UF);
1131 val64 = readq(&bar0->dtx_control);
1132 dtx_cnt++;
1136 /* Tx DMA Initialization */
1137 val64 = 0;
1138 writeq(val64, &bar0->tx_fifo_partition_0);
1139 writeq(val64, &bar0->tx_fifo_partition_1);
1140 writeq(val64, &bar0->tx_fifo_partition_2);
1141 writeq(val64, &bar0->tx_fifo_partition_3);
1144 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1145 val64 |=
1146 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1147 13) | vBIT(config->tx_cfg[i].fifo_priority,
1148 ((i * 32) + 5), 3);
1150 if (i == (config->tx_fifo_num - 1)) {
1151 if (i % 2 == 0)
1152 i++;
1155 switch (i) {
1156 case 1:
1157 writeq(val64, &bar0->tx_fifo_partition_0);
1158 val64 = 0;
1159 break;
1160 case 3:
1161 writeq(val64, &bar0->tx_fifo_partition_1);
1162 val64 = 0;
1163 break;
1164 case 5:
1165 writeq(val64, &bar0->tx_fifo_partition_2);
1166 val64 = 0;
1167 break;
1168 case 7:
1169 writeq(val64, &bar0->tx_fifo_partition_3);
1170 break;
1175 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1176 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1178 if ((nic->device_type == XFRAME_I_DEVICE) &&
1179 (nic->pdev->revision < 4))
1180 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1182 val64 = readq(&bar0->tx_fifo_partition_0);
1183 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1184 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1187 * Initialization of Tx_PA_CONFIG register to ignore packet
1188 * integrity checking.
1190 val64 = readq(&bar0->tx_pa_cfg);
1191 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1192 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1193 writeq(val64, &bar0->tx_pa_cfg);
1195 /* Rx DMA intialization. */
1196 val64 = 0;
1197 for (i = 0; i < config->rx_ring_num; i++) {
1198 val64 |=
1199 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1202 writeq(val64, &bar0->rx_queue_priority);
1205 * Allocating equal share of memory to all the
1206 * configured Rings.
1208 val64 = 0;
1209 if (nic->device_type & XFRAME_II_DEVICE)
1210 mem_size = 32;
1211 else
1212 mem_size = 64;
1214 for (i = 0; i < config->rx_ring_num; i++) {
1215 switch (i) {
1216 case 0:
1217 mem_share = (mem_size / config->rx_ring_num +
1218 mem_size % config->rx_ring_num);
1219 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1220 continue;
1221 case 1:
1222 mem_share = (mem_size / config->rx_ring_num);
1223 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1224 continue;
1225 case 2:
1226 mem_share = (mem_size / config->rx_ring_num);
1227 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1228 continue;
1229 case 3:
1230 mem_share = (mem_size / config->rx_ring_num);
1231 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1232 continue;
1233 case 4:
1234 mem_share = (mem_size / config->rx_ring_num);
1235 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1236 continue;
1237 case 5:
1238 mem_share = (mem_size / config->rx_ring_num);
1239 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1240 continue;
1241 case 6:
1242 mem_share = (mem_size / config->rx_ring_num);
1243 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1244 continue;
1245 case 7:
1246 mem_share = (mem_size / config->rx_ring_num);
1247 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1248 continue;
1251 writeq(val64, &bar0->rx_queue_cfg);
1254 * Filling Tx round robin registers
1255 * as per the number of FIFOs
1257 switch (config->tx_fifo_num) {
1258 case 1:
1259 val64 = 0x0000000000000000ULL;
1260 writeq(val64, &bar0->tx_w_round_robin_0);
1261 writeq(val64, &bar0->tx_w_round_robin_1);
1262 writeq(val64, &bar0->tx_w_round_robin_2);
1263 writeq(val64, &bar0->tx_w_round_robin_3);
1264 writeq(val64, &bar0->tx_w_round_robin_4);
1265 break;
1266 case 2:
1267 val64 = 0x0000010000010000ULL;
1268 writeq(val64, &bar0->tx_w_round_robin_0);
1269 val64 = 0x0100000100000100ULL;
1270 writeq(val64, &bar0->tx_w_round_robin_1);
1271 val64 = 0x0001000001000001ULL;
1272 writeq(val64, &bar0->tx_w_round_robin_2);
1273 val64 = 0x0000010000010000ULL;
1274 writeq(val64, &bar0->tx_w_round_robin_3);
1275 val64 = 0x0100000000000000ULL;
1276 writeq(val64, &bar0->tx_w_round_robin_4);
1277 break;
1278 case 3:
1279 val64 = 0x0001000102000001ULL;
1280 writeq(val64, &bar0->tx_w_round_robin_0);
1281 val64 = 0x0001020000010001ULL;
1282 writeq(val64, &bar0->tx_w_round_robin_1);
1283 val64 = 0x0200000100010200ULL;
1284 writeq(val64, &bar0->tx_w_round_robin_2);
1285 val64 = 0x0001000102000001ULL;
1286 writeq(val64, &bar0->tx_w_round_robin_3);
1287 val64 = 0x0001020000000000ULL;
1288 writeq(val64, &bar0->tx_w_round_robin_4);
1289 break;
1290 case 4:
1291 val64 = 0x0001020300010200ULL;
1292 writeq(val64, &bar0->tx_w_round_robin_0);
1293 val64 = 0x0100000102030001ULL;
1294 writeq(val64, &bar0->tx_w_round_robin_1);
1295 val64 = 0x0200010000010203ULL;
1296 writeq(val64, &bar0->tx_w_round_robin_2);
1297 val64 = 0x0001020001000001ULL;
1298 writeq(val64, &bar0->tx_w_round_robin_3);
1299 val64 = 0x0203000100000000ULL;
1300 writeq(val64, &bar0->tx_w_round_robin_4);
1301 break;
1302 case 5:
1303 val64 = 0x0001000203000102ULL;
1304 writeq(val64, &bar0->tx_w_round_robin_0);
1305 val64 = 0x0001020001030004ULL;
1306 writeq(val64, &bar0->tx_w_round_robin_1);
1307 val64 = 0x0001000203000102ULL;
1308 writeq(val64, &bar0->tx_w_round_robin_2);
1309 val64 = 0x0001020001030004ULL;
1310 writeq(val64, &bar0->tx_w_round_robin_3);
1311 val64 = 0x0001000000000000ULL;
1312 writeq(val64, &bar0->tx_w_round_robin_4);
1313 break;
1314 case 6:
1315 val64 = 0x0001020304000102ULL;
1316 writeq(val64, &bar0->tx_w_round_robin_0);
1317 val64 = 0x0304050001020001ULL;
1318 writeq(val64, &bar0->tx_w_round_robin_1);
1319 val64 = 0x0203000100000102ULL;
1320 writeq(val64, &bar0->tx_w_round_robin_2);
1321 val64 = 0x0304000102030405ULL;
1322 writeq(val64, &bar0->tx_w_round_robin_3);
1323 val64 = 0x0001000200000000ULL;
1324 writeq(val64, &bar0->tx_w_round_robin_4);
1325 break;
1326 case 7:
1327 val64 = 0x0001020001020300ULL;
1328 writeq(val64, &bar0->tx_w_round_robin_0);
1329 val64 = 0x0102030400010203ULL;
1330 writeq(val64, &bar0->tx_w_round_robin_1);
1331 val64 = 0x0405060001020001ULL;
1332 writeq(val64, &bar0->tx_w_round_robin_2);
1333 val64 = 0x0304050000010200ULL;
1334 writeq(val64, &bar0->tx_w_round_robin_3);
1335 val64 = 0x0102030000000000ULL;
1336 writeq(val64, &bar0->tx_w_round_robin_4);
1337 break;
1338 case 8:
1339 val64 = 0x0001020300040105ULL;
1340 writeq(val64, &bar0->tx_w_round_robin_0);
1341 val64 = 0x0200030106000204ULL;
1342 writeq(val64, &bar0->tx_w_round_robin_1);
1343 val64 = 0x0103000502010007ULL;
1344 writeq(val64, &bar0->tx_w_round_robin_2);
1345 val64 = 0x0304010002060500ULL;
1346 writeq(val64, &bar0->tx_w_round_robin_3);
1347 val64 = 0x0103020400000000ULL;
1348 writeq(val64, &bar0->tx_w_round_robin_4);
1349 break;
1352 /* Enable all configured Tx FIFO partitions */
1353 val64 = readq(&bar0->tx_fifo_partition_0);
1354 val64 |= (TX_FIFO_PARTITION_EN);
1355 writeq(val64, &bar0->tx_fifo_partition_0);
1357 /* Filling the Rx round robin registers as per the
1358 * number of Rings and steering based on QoS.
1360 switch (config->rx_ring_num) {
1361 case 1:
1362 val64 = 0x8080808080808080ULL;
1363 writeq(val64, &bar0->rts_qos_steering);
1364 break;
1365 case 2:
1366 val64 = 0x0000010000010000ULL;
1367 writeq(val64, &bar0->rx_w_round_robin_0);
1368 val64 = 0x0100000100000100ULL;
1369 writeq(val64, &bar0->rx_w_round_robin_1);
1370 val64 = 0x0001000001000001ULL;
1371 writeq(val64, &bar0->rx_w_round_robin_2);
1372 val64 = 0x0000010000010000ULL;
1373 writeq(val64, &bar0->rx_w_round_robin_3);
1374 val64 = 0x0100000000000000ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_4);
1377 val64 = 0x8080808040404040ULL;
1378 writeq(val64, &bar0->rts_qos_steering);
1379 break;
1380 case 3:
1381 val64 = 0x0001000102000001ULL;
1382 writeq(val64, &bar0->rx_w_round_robin_0);
1383 val64 = 0x0001020000010001ULL;
1384 writeq(val64, &bar0->rx_w_round_robin_1);
1385 val64 = 0x0200000100010200ULL;
1386 writeq(val64, &bar0->rx_w_round_robin_2);
1387 val64 = 0x0001000102000001ULL;
1388 writeq(val64, &bar0->rx_w_round_robin_3);
1389 val64 = 0x0001020000000000ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_4);
1392 val64 = 0x8080804040402020ULL;
1393 writeq(val64, &bar0->rts_qos_steering);
1394 break;
1395 case 4:
1396 val64 = 0x0001020300010200ULL;
1397 writeq(val64, &bar0->rx_w_round_robin_0);
1398 val64 = 0x0100000102030001ULL;
1399 writeq(val64, &bar0->rx_w_round_robin_1);
1400 val64 = 0x0200010000010203ULL;
1401 writeq(val64, &bar0->rx_w_round_robin_2);
1402 val64 = 0x0001020001000001ULL;
1403 writeq(val64, &bar0->rx_w_round_robin_3);
1404 val64 = 0x0203000100000000ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_4);
1407 val64 = 0x8080404020201010ULL;
1408 writeq(val64, &bar0->rts_qos_steering);
1409 break;
1410 case 5:
1411 val64 = 0x0001000203000102ULL;
1412 writeq(val64, &bar0->rx_w_round_robin_0);
1413 val64 = 0x0001020001030004ULL;
1414 writeq(val64, &bar0->rx_w_round_robin_1);
1415 val64 = 0x0001000203000102ULL;
1416 writeq(val64, &bar0->rx_w_round_robin_2);
1417 val64 = 0x0001020001030004ULL;
1418 writeq(val64, &bar0->rx_w_round_robin_3);
1419 val64 = 0x0001000000000000ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_4);
1422 val64 = 0x8080404020201008ULL;
1423 writeq(val64, &bar0->rts_qos_steering);
1424 break;
1425 case 6:
1426 val64 = 0x0001020304000102ULL;
1427 writeq(val64, &bar0->rx_w_round_robin_0);
1428 val64 = 0x0304050001020001ULL;
1429 writeq(val64, &bar0->rx_w_round_robin_1);
1430 val64 = 0x0203000100000102ULL;
1431 writeq(val64, &bar0->rx_w_round_robin_2);
1432 val64 = 0x0304000102030405ULL;
1433 writeq(val64, &bar0->rx_w_round_robin_3);
1434 val64 = 0x0001000200000000ULL;
1435 writeq(val64, &bar0->rx_w_round_robin_4);
1437 val64 = 0x8080404020100804ULL;
1438 writeq(val64, &bar0->rts_qos_steering);
1439 break;
1440 case 7:
1441 val64 = 0x0001020001020300ULL;
1442 writeq(val64, &bar0->rx_w_round_robin_0);
1443 val64 = 0x0102030400010203ULL;
1444 writeq(val64, &bar0->rx_w_round_robin_1);
1445 val64 = 0x0405060001020001ULL;
1446 writeq(val64, &bar0->rx_w_round_robin_2);
1447 val64 = 0x0304050000010200ULL;
1448 writeq(val64, &bar0->rx_w_round_robin_3);
1449 val64 = 0x0102030000000000ULL;
1450 writeq(val64, &bar0->rx_w_round_robin_4);
1452 val64 = 0x8080402010080402ULL;
1453 writeq(val64, &bar0->rts_qos_steering);
1454 break;
1455 case 8:
1456 val64 = 0x0001020300040105ULL;
1457 writeq(val64, &bar0->rx_w_round_robin_0);
1458 val64 = 0x0200030106000204ULL;
1459 writeq(val64, &bar0->rx_w_round_robin_1);
1460 val64 = 0x0103000502010007ULL;
1461 writeq(val64, &bar0->rx_w_round_robin_2);
1462 val64 = 0x0304010002060500ULL;
1463 writeq(val64, &bar0->rx_w_round_robin_3);
1464 val64 = 0x0103020400000000ULL;
1465 writeq(val64, &bar0->rx_w_round_robin_4);
1467 val64 = 0x8040201008040201ULL;
1468 writeq(val64, &bar0->rts_qos_steering);
1469 break;
1472 /* UDP Fix */
1473 val64 = 0;
1474 for (i = 0; i < 8; i++)
1475 writeq(val64, &bar0->rts_frm_len_n[i]);
1477 /* Set the default rts frame length for the rings configured */
1478 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1479 for (i = 0 ; i < config->rx_ring_num ; i++)
1480 writeq(val64, &bar0->rts_frm_len_n[i]);
1482 /* Set the frame length for the configured rings
1483 * desired by the user
1485 for (i = 0; i < config->rx_ring_num; i++) {
1486 /* If rts_frm_len[i] == 0 then it is assumed that user not
1487 * specified frame length steering.
1488 * If the user provides the frame length then program
1489 * the rts_frm_len register for those values or else
1490 * leave it as it is.
1492 if (rts_frm_len[i] != 0) {
1493 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1494 &bar0->rts_frm_len_n[i]);
1498 /* Disable differentiated services steering logic */
1499 for (i = 0; i < 64; i++) {
1500 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1501 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1502 dev->name);
1503 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1504 return FAILURE;
1508 /* Program statistics memory */
1509 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1511 if (nic->device_type == XFRAME_II_DEVICE) {
1512 val64 = STAT_BC(0x320);
1513 writeq(val64, &bar0->stat_byte_cnt);
1517 * Initializing the sampling rate for the device to calculate the
1518 * bandwidth utilization.
1520 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1521 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1522 writeq(val64, &bar0->mac_link_util);
1526 * Initializing the Transmit and Receive Traffic Interrupt
1527 * Scheme.
1530 * TTI Initialization. Default Tx timer gets us about
1531 * 250 interrupts per sec. Continuous interrupts are enabled
1532 * by default.
1534 if (nic->device_type == XFRAME_II_DEVICE) {
1535 int count = (nic->config.bus_speed * 125)/2;
1536 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1537 } else {
1539 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1541 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1542 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1543 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1544 if (use_continuous_tx_intrs)
1545 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1546 writeq(val64, &bar0->tti_data1_mem);
1548 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1549 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1550 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1551 writeq(val64, &bar0->tti_data2_mem);
1553 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1554 writeq(val64, &bar0->tti_command_mem);
1557 * Once the operation completes, the Strobe bit of the command
1558 * register will be reset. We poll for this particular condition
1559 * We wait for a maximum of 500ms for the operation to complete,
1560 * if it's not complete by then we return error.
1562 time = 0;
1563 while (TRUE) {
1564 val64 = readq(&bar0->tti_command_mem);
1565 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1566 break;
1568 if (time > 10) {
1569 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1570 dev->name);
1571 return -1;
1573 msleep(50);
1574 time++;
1577 /* RTI Initialization */
1578 if (nic->device_type == XFRAME_II_DEVICE) {
1580 * Programmed to generate Apprx 500 Intrs per
1581 * second
1583 int count = (nic->config.bus_speed * 125)/4;
1584 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1585 } else
1586 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1587 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1588 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1589 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1591 writeq(val64, &bar0->rti_data1_mem);
1593 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1594 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1595 if (nic->config.intr_type == MSI_X)
1596 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1597 RTI_DATA2_MEM_RX_UFC_D(0x40));
1598 else
1599 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1600 RTI_DATA2_MEM_RX_UFC_D(0x80));
1601 writeq(val64, &bar0->rti_data2_mem);
1603 for (i = 0; i < config->rx_ring_num; i++) {
1604 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1605 | RTI_CMD_MEM_OFFSET(i);
1606 writeq(val64, &bar0->rti_command_mem);
1609 * Once the operation completes, the Strobe bit of the
1610 * command register will be reset. We poll for this
1611 * particular condition. We wait for a maximum of 500ms
1612 * for the operation to complete, if it's not complete
1613 * by then we return error.
1615 time = 0;
1616 while (TRUE) {
1617 val64 = readq(&bar0->rti_command_mem);
1618 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1619 break;
1621 if (time > 10) {
1622 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1623 dev->name);
1624 return -1;
1626 time++;
1627 msleep(50);
1632 * Initializing proper values as Pause threshold into all
1633 * the 8 Queues on Rx side.
1635 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1636 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1638 /* Disable RMAC PAD STRIPPING */
1639 add = &bar0->mac_cfg;
1640 val64 = readq(&bar0->mac_cfg);
1641 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1642 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1643 writel((u32) (val64), add);
1644 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1645 writel((u32) (val64 >> 32), (add + 4));
1646 val64 = readq(&bar0->mac_cfg);
1648 /* Enable FCS stripping by adapter */
1649 add = &bar0->mac_cfg;
1650 val64 = readq(&bar0->mac_cfg);
1651 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1652 if (nic->device_type == XFRAME_II_DEVICE)
1653 writeq(val64, &bar0->mac_cfg);
1654 else {
1655 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1656 writel((u32) (val64), add);
1657 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1658 writel((u32) (val64 >> 32), (add + 4));
1662 * Set the time value to be inserted in the pause frame
1663 * generated by xena.
1665 val64 = readq(&bar0->rmac_pause_cfg);
1666 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1667 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1668 writeq(val64, &bar0->rmac_pause_cfg);
1671 * Set the Threshold Limit for Generating the pause frame
1672 * If the amount of data in any Queue exceeds ratio of
1673 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1674 * pause frame is generated
1676 val64 = 0;
1677 for (i = 0; i < 4; i++) {
1678 val64 |=
1679 (((u64) 0xFF00 | nic->mac_control.
1680 mc_pause_threshold_q0q3)
1681 << (i * 2 * 8));
1683 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1685 val64 = 0;
1686 for (i = 0; i < 4; i++) {
1687 val64 |=
1688 (((u64) 0xFF00 | nic->mac_control.
1689 mc_pause_threshold_q4q7)
1690 << (i * 2 * 8));
1692 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1695 * TxDMA will stop Read request if the number of read split has
1696 * exceeded the limit pointed by shared_splits
1698 val64 = readq(&bar0->pic_control);
1699 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1700 writeq(val64, &bar0->pic_control);
1702 if (nic->config.bus_speed == 266) {
1703 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1704 writeq(0x0, &bar0->read_retry_delay);
1705 writeq(0x0, &bar0->write_retry_delay);
1709 * Programming the Herc to split every write transaction
1710 * that does not start on an ADB to reduce disconnects.
1712 if (nic->device_type == XFRAME_II_DEVICE) {
1713 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1714 MISC_LINK_STABILITY_PRD(3);
1715 writeq(val64, &bar0->misc_control);
1716 val64 = readq(&bar0->pic_control2);
1717 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1718 writeq(val64, &bar0->pic_control2);
1720 if (strstr(nic->product_name, "CX4")) {
1721 val64 = TMAC_AVG_IPG(0x17);
1722 writeq(val64, &bar0->tmac_avg_ipg);
1725 return SUCCESS;
1727 #define LINK_UP_DOWN_INTERRUPT 1
1728 #define MAC_RMAC_ERR_TIMER 2
1730 static int s2io_link_fault_indication(struct s2io_nic *nic)
1732 if (nic->config.intr_type != INTA)
1733 return MAC_RMAC_ERR_TIMER;
1734 if (nic->device_type == XFRAME_II_DEVICE)
1735 return LINK_UP_DOWN_INTERRUPT;
1736 else
1737 return MAC_RMAC_ERR_TIMER;
1741 * do_s2io_write_bits - update alarm bits in alarm register
1742 * @value: alarm bits
1743 * @flag: interrupt status
1744 * @addr: address value
1745 * Description: update alarm bits in alarm register
1746 * Return Value:
1747 * NONE.
1749 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1751 u64 temp64;
1753 temp64 = readq(addr);
1755 if(flag == ENABLE_INTRS)
1756 temp64 &= ~((u64) value);
1757 else
1758 temp64 |= ((u64) value);
1759 writeq(temp64, addr);
1762 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1764 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1765 register u64 gen_int_mask = 0;
1767 if (mask & TX_DMA_INTR) {
1769 gen_int_mask |= TXDMA_INT_M;
1771 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1772 TXDMA_PCC_INT | TXDMA_TTI_INT |
1773 TXDMA_LSO_INT | TXDMA_TPA_INT |
1774 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1776 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1777 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1778 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1779 &bar0->pfc_err_mask);
1781 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1782 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1783 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1785 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1786 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1787 PCC_N_SERR | PCC_6_COF_OV_ERR |
1788 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1789 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1790 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1792 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1793 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1795 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1796 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1797 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1798 flag, &bar0->lso_err_mask);
1800 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1801 flag, &bar0->tpa_err_mask);
1803 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1807 if (mask & TX_MAC_INTR) {
1808 gen_int_mask |= TXMAC_INT_M;
1809 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1810 &bar0->mac_int_mask);
1811 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1812 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1813 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1814 flag, &bar0->mac_tmac_err_mask);
1817 if (mask & TX_XGXS_INTR) {
1818 gen_int_mask |= TXXGXS_INT_M;
1819 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1820 &bar0->xgxs_int_mask);
1821 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1822 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1823 flag, &bar0->xgxs_txgxs_err_mask);
1826 if (mask & RX_DMA_INTR) {
1827 gen_int_mask |= RXDMA_INT_M;
1828 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1829 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1830 flag, &bar0->rxdma_int_mask);
1831 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1832 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1833 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1834 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1835 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1836 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1837 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1838 &bar0->prc_pcix_err_mask);
1839 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1840 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1841 &bar0->rpa_err_mask);
1842 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1843 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1844 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1845 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1846 flag, &bar0->rda_err_mask);
1847 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1848 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1849 flag, &bar0->rti_err_mask);
1852 if (mask & RX_MAC_INTR) {
1853 gen_int_mask |= RXMAC_INT_M;
1854 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1855 &bar0->mac_int_mask);
1856 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1857 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1858 RMAC_DOUBLE_ECC_ERR |
1859 RMAC_LINK_STATE_CHANGE_INT,
1860 flag, &bar0->mac_rmac_err_mask);
1863 if (mask & RX_XGXS_INTR)
1865 gen_int_mask |= RXXGXS_INT_M;
1866 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1867 &bar0->xgxs_int_mask);
1868 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1869 &bar0->xgxs_rxgxs_err_mask);
1872 if (mask & MC_INTR) {
1873 gen_int_mask |= MC_INT_M;
1874 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1875 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1876 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1877 &bar0->mc_err_mask);
1879 nic->general_int_mask = gen_int_mask;
1881 /* Remove this line when alarm interrupts are enabled */
1882 nic->general_int_mask = 0;
1885 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1886 * @nic: device private variable,
1887 * @mask: A mask indicating which Intr block must be modified and,
1888 * @flag: A flag indicating whether to enable or disable the Intrs.
1889 * Description: This function will either disable or enable the interrupts
1890 * depending on the flag argument. The mask argument can be used to
1891 * enable/disable any Intr block.
1892 * Return Value: NONE.
1895 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1897 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1898 register u64 temp64 = 0, intr_mask = 0;
1900 intr_mask = nic->general_int_mask;
1902 /* Top level interrupt classification */
1903 /* PIC Interrupts */
1904 if (mask & TX_PIC_INTR) {
1905 /* Enable PIC Intrs in the general intr mask register */
1906 intr_mask |= TXPIC_INT_M;
1907 if (flag == ENABLE_INTRS) {
1909 * If Hercules adapter enable GPIO otherwise
1910 * disable all PCIX, Flash, MDIO, IIC and GPIO
1911 * interrupts for now.
1912 * TODO
1914 if (s2io_link_fault_indication(nic) ==
1915 LINK_UP_DOWN_INTERRUPT ) {
1916 do_s2io_write_bits(PIC_INT_GPIO, flag,
1917 &bar0->pic_int_mask);
1918 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1919 &bar0->gpio_int_mask);
1920 } else
1921 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1922 } else if (flag == DISABLE_INTRS) {
1924 * Disable PIC Intrs in the general
1925 * intr mask register
1927 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1931 /* Tx traffic interrupts */
1932 if (mask & TX_TRAFFIC_INTR) {
1933 intr_mask |= TXTRAFFIC_INT_M;
1934 if (flag == ENABLE_INTRS) {
1936 * Enable all the Tx side interrupts
1937 * writing 0 Enables all 64 TX interrupt levels
1939 writeq(0x0, &bar0->tx_traffic_mask);
1940 } else if (flag == DISABLE_INTRS) {
1942 * Disable Tx Traffic Intrs in the general intr mask
1943 * register.
1945 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1949 /* Rx traffic interrupts */
1950 if (mask & RX_TRAFFIC_INTR) {
1951 intr_mask |= RXTRAFFIC_INT_M;
1952 if (flag == ENABLE_INTRS) {
1953 /* writing 0 Enables all 8 RX interrupt levels */
1954 writeq(0x0, &bar0->rx_traffic_mask);
1955 } else if (flag == DISABLE_INTRS) {
1957 * Disable Rx Traffic Intrs in the general intr mask
1958 * register.
1960 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1964 temp64 = readq(&bar0->general_int_mask);
1965 if (flag == ENABLE_INTRS)
1966 temp64 &= ~((u64) intr_mask);
1967 else
1968 temp64 = DISABLE_ALL_INTRS;
1969 writeq(temp64, &bar0->general_int_mask);
1971 nic->general_int_mask = readq(&bar0->general_int_mask);
1975 * verify_pcc_quiescent- Checks for PCC quiescent state
1976 * Return: 1 If PCC is quiescence
1977 * 0 If PCC is not quiescence
1979 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1981 int ret = 0, herc;
1982 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1983 u64 val64 = readq(&bar0->adapter_status);
1985 herc = (sp->device_type == XFRAME_II_DEVICE);
1987 if (flag == FALSE) {
1988 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1989 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1990 ret = 1;
1991 } else {
1992 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1993 ret = 1;
1995 } else {
1996 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1997 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1998 ADAPTER_STATUS_RMAC_PCC_IDLE))
1999 ret = 1;
2000 } else {
2001 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2002 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2003 ret = 1;
2007 return ret;
2010 * verify_xena_quiescence - Checks whether the H/W is ready
2011 * Description: Returns whether the H/W is ready to go or not. Depending
2012 * on whether adapter enable bit was written or not the comparison
2013 * differs and the calling function passes the input argument flag to
2014 * indicate this.
2015 * Return: 1 If xena is quiescence
2016 * 0 If Xena is not quiescence
2019 static int verify_xena_quiescence(struct s2io_nic *sp)
2021 int mode;
2022 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2023 u64 val64 = readq(&bar0->adapter_status);
2024 mode = s2io_verify_pci_mode(sp);
2026 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2027 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2028 return 0;
2030 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2031 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2032 return 0;
2034 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2035 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2036 return 0;
2038 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2039 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2040 return 0;
2042 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2043 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2044 return 0;
2046 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2047 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2048 return 0;
2050 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2051 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2052 return 0;
2054 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2055 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2056 return 0;
2060 * In PCI 33 mode, the P_PLL is not used, and therefore,
2061 * the the P_PLL_LOCK bit in the adapter_status register will
2062 * not be asserted.
2064 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2065 sp->device_type == XFRAME_II_DEVICE && mode !=
2066 PCI_MODE_PCI_33) {
2067 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2068 return 0;
2070 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2071 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2072 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2073 return 0;
2075 return 1;
2079 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2080 * @sp: Pointer to device specifc structure
2081 * Description :
2082 * New procedure to clear mac address reading problems on Alpha platforms
2086 static void fix_mac_address(struct s2io_nic * sp)
2088 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2089 u64 val64;
2090 int i = 0;
2092 while (fix_mac[i] != END_SIGN) {
2093 writeq(fix_mac[i++], &bar0->gpio_control);
2094 udelay(10);
2095 val64 = readq(&bar0->gpio_control);
2100 * start_nic - Turns the device on
2101 * @nic : device private variable.
2102 * Description:
2103 * This function actually turns the device on. Before this function is
2104 * called,all Registers are configured from their reset states
2105 * and shared memory is allocated but the NIC is still quiescent. On
2106 * calling this function, the device interrupts are cleared and the NIC is
2107 * literally switched on by writing into the adapter control register.
2108 * Return Value:
2109 * SUCCESS on success and -1 on failure.
2112 static int start_nic(struct s2io_nic *nic)
2114 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2115 struct net_device *dev = nic->dev;
2116 register u64 val64 = 0;
2117 u16 subid, i;
2118 struct mac_info *mac_control;
2119 struct config_param *config;
2121 mac_control = &nic->mac_control;
2122 config = &nic->config;
2124 /* PRC Initialization and configuration */
2125 for (i = 0; i < config->rx_ring_num; i++) {
2126 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2127 &bar0->prc_rxd0_n[i]);
2129 val64 = readq(&bar0->prc_ctrl_n[i]);
2130 if (nic->rxd_mode == RXD_MODE_1)
2131 val64 |= PRC_CTRL_RC_ENABLED;
2132 else
2133 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2134 if (nic->device_type == XFRAME_II_DEVICE)
2135 val64 |= PRC_CTRL_GROUP_READS;
2136 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2137 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2138 writeq(val64, &bar0->prc_ctrl_n[i]);
2141 if (nic->rxd_mode == RXD_MODE_3B) {
2142 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2143 val64 = readq(&bar0->rx_pa_cfg);
2144 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2145 writeq(val64, &bar0->rx_pa_cfg);
2148 if (vlan_tag_strip == 0) {
2149 val64 = readq(&bar0->rx_pa_cfg);
2150 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2151 writeq(val64, &bar0->rx_pa_cfg);
2152 vlan_strip_flag = 0;
2156 * Enabling MC-RLDRAM. After enabling the device, we timeout
2157 * for around 100ms, which is approximately the time required
2158 * for the device to be ready for operation.
2160 val64 = readq(&bar0->mc_rldram_mrs);
2161 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2162 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2163 val64 = readq(&bar0->mc_rldram_mrs);
2165 msleep(100); /* Delay by around 100 ms. */
2167 /* Enabling ECC Protection. */
2168 val64 = readq(&bar0->adapter_control);
2169 val64 &= ~ADAPTER_ECC_EN;
2170 writeq(val64, &bar0->adapter_control);
2173 * Verify if the device is ready to be enabled, if so enable
2174 * it.
2176 val64 = readq(&bar0->adapter_status);
2177 if (!verify_xena_quiescence(nic)) {
2178 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2179 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2180 (unsigned long long) val64);
2181 return FAILURE;
2185 * With some switches, link might be already up at this point.
2186 * Because of this weird behavior, when we enable laser,
2187 * we may not get link. We need to handle this. We cannot
2188 * figure out which switch is misbehaving. So we are forced to
2189 * make a global change.
2192 /* Enabling Laser. */
2193 val64 = readq(&bar0->adapter_control);
2194 val64 |= ADAPTER_EOI_TX_ON;
2195 writeq(val64, &bar0->adapter_control);
2197 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2199 * Dont see link state interrupts initally on some switches,
2200 * so directly scheduling the link state task here.
2202 schedule_work(&nic->set_link_task);
2204 /* SXE-002: Initialize link and activity LED */
2205 subid = nic->pdev->subsystem_device;
2206 if (((subid & 0xFF) >= 0x07) &&
2207 (nic->device_type == XFRAME_I_DEVICE)) {
2208 val64 = readq(&bar0->gpio_control);
2209 val64 |= 0x0000800000000000ULL;
2210 writeq(val64, &bar0->gpio_control);
2211 val64 = 0x0411040400000000ULL;
2212 writeq(val64, (void __iomem *)bar0 + 0x2700);
2215 return SUCCESS;
2218 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2220 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2221 TxD *txdlp, int get_off)
2223 struct s2io_nic *nic = fifo_data->nic;
2224 struct sk_buff *skb;
2225 struct TxD *txds;
2226 u16 j, frg_cnt;
2228 txds = txdlp;
2229 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2230 pci_unmap_single(nic->pdev, (dma_addr_t)
2231 txds->Buffer_Pointer, sizeof(u64),
2232 PCI_DMA_TODEVICE);
2233 txds++;
2236 skb = (struct sk_buff *) ((unsigned long)
2237 txds->Host_Control);
2238 if (!skb) {
2239 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2240 return NULL;
2242 pci_unmap_single(nic->pdev, (dma_addr_t)
2243 txds->Buffer_Pointer,
2244 skb->len - skb->data_len,
2245 PCI_DMA_TODEVICE);
2246 frg_cnt = skb_shinfo(skb)->nr_frags;
2247 if (frg_cnt) {
2248 txds++;
2249 for (j = 0; j < frg_cnt; j++, txds++) {
2250 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2251 if (!txds->Buffer_Pointer)
2252 break;
2253 pci_unmap_page(nic->pdev, (dma_addr_t)
2254 txds->Buffer_Pointer,
2255 frag->size, PCI_DMA_TODEVICE);
2258 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2259 return(skb);
2263 * free_tx_buffers - Free all queued Tx buffers
2264 * @nic : device private variable.
2265 * Description:
2266 * Free all queued Tx buffers.
2267 * Return Value: void
2270 static void free_tx_buffers(struct s2io_nic *nic)
2272 struct net_device *dev = nic->dev;
2273 struct sk_buff *skb;
2274 struct TxD *txdp;
2275 int i, j;
2276 struct mac_info *mac_control;
2277 struct config_param *config;
2278 int cnt = 0;
2280 mac_control = &nic->mac_control;
2281 config = &nic->config;
2283 for (i = 0; i < config->tx_fifo_num; i++) {
2284 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2285 txdp = (struct TxD *) \
2286 mac_control->fifos[i].list_info[j].list_virt_addr;
2287 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2288 if (skb) {
2289 nic->mac_control.stats_info->sw_stat.mem_freed
2290 += skb->truesize;
2291 dev_kfree_skb(skb);
2292 cnt++;
2295 DBG_PRINT(INTR_DBG,
2296 "%s:forcibly freeing %d skbs on FIFO%d\n",
2297 dev->name, cnt, i);
2298 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2299 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2304 * stop_nic - To stop the nic
2305 * @nic ; device private variable.
2306 * Description:
2307 * This function does exactly the opposite of what the start_nic()
2308 * function does. This function is called to stop the device.
2309 * Return Value:
2310 * void.
2313 static void stop_nic(struct s2io_nic *nic)
2315 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2316 register u64 val64 = 0;
2317 u16 interruptible;
2318 struct mac_info *mac_control;
2319 struct config_param *config;
2321 mac_control = &nic->mac_control;
2322 config = &nic->config;
2324 /* Disable all interrupts */
2325 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2326 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2327 interruptible |= TX_PIC_INTR;
2328 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2330 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2331 val64 = readq(&bar0->adapter_control);
2332 val64 &= ~(ADAPTER_CNTL_EN);
2333 writeq(val64, &bar0->adapter_control);
2337 * fill_rx_buffers - Allocates the Rx side skbs
2338 * @nic: device private variable
2339 * @ring_no: ring number
2340 * Description:
2341 * The function allocates Rx side skbs and puts the physical
2342 * address of these buffers into the RxD buffer pointers, so that the NIC
2343 * can DMA the received frame into these locations.
2344 * The NIC supports 3 receive modes, viz
2345 * 1. single buffer,
2346 * 2. three buffer and
2347 * 3. Five buffer modes.
2348 * Each mode defines how many fragments the received frame will be split
2349 * up into by the NIC. The frame is split into L3 header, L4 Header,
2350 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2351 * is split into 3 fragments. As of now only single buffer mode is
2352 * supported.
2353 * Return Value:
2354 * SUCCESS on success or an appropriate -ve value on failure.
2357 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2359 struct net_device *dev = nic->dev;
2360 struct sk_buff *skb;
2361 struct RxD_t *rxdp;
2362 int off, off1, size, block_no, block_no1;
2363 u32 alloc_tab = 0;
2364 u32 alloc_cnt;
2365 struct mac_info *mac_control;
2366 struct config_param *config;
2367 u64 tmp;
2368 struct buffAdd *ba;
2369 unsigned long flags;
2370 struct RxD_t *first_rxdp = NULL;
2371 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2372 struct RxD1 *rxdp1;
2373 struct RxD3 *rxdp3;
2374 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2376 mac_control = &nic->mac_control;
2377 config = &nic->config;
2378 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2379 atomic_read(&nic->rx_bufs_left[ring_no]);
2381 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2382 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2383 while (alloc_tab < alloc_cnt) {
2384 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2385 block_index;
2386 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2388 rxdp = mac_control->rings[ring_no].
2389 rx_blocks[block_no].rxds[off].virt_addr;
2391 if ((block_no == block_no1) && (off == off1) &&
2392 (rxdp->Host_Control)) {
2393 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2394 dev->name);
2395 DBG_PRINT(INTR_DBG, " info equated\n");
2396 goto end;
2398 if (off && (off == rxd_count[nic->rxd_mode])) {
2399 mac_control->rings[ring_no].rx_curr_put_info.
2400 block_index++;
2401 if (mac_control->rings[ring_no].rx_curr_put_info.
2402 block_index == mac_control->rings[ring_no].
2403 block_count)
2404 mac_control->rings[ring_no].rx_curr_put_info.
2405 block_index = 0;
2406 block_no = mac_control->rings[ring_no].
2407 rx_curr_put_info.block_index;
2408 if (off == rxd_count[nic->rxd_mode])
2409 off = 0;
2410 mac_control->rings[ring_no].rx_curr_put_info.
2411 offset = off;
2412 rxdp = mac_control->rings[ring_no].
2413 rx_blocks[block_no].block_virt_addr;
2414 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2415 dev->name, rxdp);
2417 if(!napi) {
2418 spin_lock_irqsave(&nic->put_lock, flags);
2419 mac_control->rings[ring_no].put_pos =
2420 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2421 spin_unlock_irqrestore(&nic->put_lock, flags);
2422 } else {
2423 mac_control->rings[ring_no].put_pos =
2424 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2426 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2427 ((nic->rxd_mode == RXD_MODE_3B) &&
2428 (rxdp->Control_2 & BIT(0)))) {
2429 mac_control->rings[ring_no].rx_curr_put_info.
2430 offset = off;
2431 goto end;
2433 /* calculate size of skb based on ring mode */
2434 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2435 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2436 if (nic->rxd_mode == RXD_MODE_1)
2437 size += NET_IP_ALIGN;
2438 else
2439 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2441 /* allocate skb */
2442 skb = dev_alloc_skb(size);
2443 if(!skb) {
2444 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2445 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2446 if (first_rxdp) {
2447 wmb();
2448 first_rxdp->Control_1 |= RXD_OWN_XENA;
2450 nic->mac_control.stats_info->sw_stat. \
2451 mem_alloc_fail_cnt++;
2452 return -ENOMEM ;
2454 nic->mac_control.stats_info->sw_stat.mem_allocated
2455 += skb->truesize;
2456 if (nic->rxd_mode == RXD_MODE_1) {
2457 /* 1 buffer mode - normal operation mode */
2458 rxdp1 = (struct RxD1*)rxdp;
2459 memset(rxdp, 0, sizeof(struct RxD1));
2460 skb_reserve(skb, NET_IP_ALIGN);
2461 rxdp1->Buffer0_ptr = pci_map_single
2462 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2463 PCI_DMA_FROMDEVICE);
2464 if( (rxdp1->Buffer0_ptr == 0) ||
2465 (rxdp1->Buffer0_ptr ==
2466 DMA_ERROR_CODE))
2467 goto pci_map_failed;
2469 rxdp->Control_2 =
2470 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2472 } else if (nic->rxd_mode == RXD_MODE_3B) {
2474 * 2 buffer mode -
2475 * 2 buffer mode provides 128
2476 * byte aligned receive buffers.
2479 rxdp3 = (struct RxD3*)rxdp;
2480 /* save buffer pointers to avoid frequent dma mapping */
2481 Buffer0_ptr = rxdp3->Buffer0_ptr;
2482 Buffer1_ptr = rxdp3->Buffer1_ptr;
2483 memset(rxdp, 0, sizeof(struct RxD3));
2484 /* restore the buffer pointers for dma sync*/
2485 rxdp3->Buffer0_ptr = Buffer0_ptr;
2486 rxdp3->Buffer1_ptr = Buffer1_ptr;
2488 ba = &mac_control->rings[ring_no].ba[block_no][off];
2489 skb_reserve(skb, BUF0_LEN);
2490 tmp = (u64)(unsigned long) skb->data;
2491 tmp += ALIGN_SIZE;
2492 tmp &= ~ALIGN_SIZE;
2493 skb->data = (void *) (unsigned long)tmp;
2494 skb_reset_tail_pointer(skb);
2496 if (!(rxdp3->Buffer0_ptr))
2497 rxdp3->Buffer0_ptr =
2498 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2499 PCI_DMA_FROMDEVICE);
2500 else
2501 pci_dma_sync_single_for_device(nic->pdev,
2502 (dma_addr_t) rxdp3->Buffer0_ptr,
2503 BUF0_LEN, PCI_DMA_FROMDEVICE);
2504 if( (rxdp3->Buffer0_ptr == 0) ||
2505 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2506 goto pci_map_failed;
2508 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2509 if (nic->rxd_mode == RXD_MODE_3B) {
2510 /* Two buffer mode */
2513 * Buffer2 will have L3/L4 header plus
2514 * L4 payload
2516 rxdp3->Buffer2_ptr = pci_map_single
2517 (nic->pdev, skb->data, dev->mtu + 4,
2518 PCI_DMA_FROMDEVICE);
2520 if( (rxdp3->Buffer2_ptr == 0) ||
2521 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2522 goto pci_map_failed;
2524 rxdp3->Buffer1_ptr =
2525 pci_map_single(nic->pdev,
2526 ba->ba_1, BUF1_LEN,
2527 PCI_DMA_FROMDEVICE);
2528 if( (rxdp3->Buffer1_ptr == 0) ||
2529 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2530 pci_unmap_single
2531 (nic->pdev,
2532 (dma_addr_t)rxdp3->Buffer2_ptr,
2533 dev->mtu + 4,
2534 PCI_DMA_FROMDEVICE);
2535 goto pci_map_failed;
2537 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2538 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2539 (dev->mtu + 4);
2541 rxdp->Control_2 |= BIT(0);
2543 rxdp->Host_Control = (unsigned long) (skb);
2544 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2545 rxdp->Control_1 |= RXD_OWN_XENA;
2546 off++;
2547 if (off == (rxd_count[nic->rxd_mode] + 1))
2548 off = 0;
2549 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2551 rxdp->Control_2 |= SET_RXD_MARKER;
2552 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2553 if (first_rxdp) {
2554 wmb();
2555 first_rxdp->Control_1 |= RXD_OWN_XENA;
2557 first_rxdp = rxdp;
2559 atomic_inc(&nic->rx_bufs_left[ring_no]);
2560 alloc_tab++;
2563 end:
2564 /* Transfer ownership of first descriptor to adapter just before
2565 * exiting. Before that, use memory barrier so that ownership
2566 * and other fields are seen by adapter correctly.
2568 if (first_rxdp) {
2569 wmb();
2570 first_rxdp->Control_1 |= RXD_OWN_XENA;
2573 return SUCCESS;
2574 pci_map_failed:
2575 stats->pci_map_fail_cnt++;
2576 stats->mem_freed += skb->truesize;
2577 dev_kfree_skb_irq(skb);
2578 return -ENOMEM;
2581 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2583 struct net_device *dev = sp->dev;
2584 int j;
2585 struct sk_buff *skb;
2586 struct RxD_t *rxdp;
2587 struct mac_info *mac_control;
2588 struct buffAdd *ba;
2589 struct RxD1 *rxdp1;
2590 struct RxD3 *rxdp3;
2592 mac_control = &sp->mac_control;
2593 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2594 rxdp = mac_control->rings[ring_no].
2595 rx_blocks[blk].rxds[j].virt_addr;
2596 skb = (struct sk_buff *)
2597 ((unsigned long) rxdp->Host_Control);
2598 if (!skb) {
2599 continue;
2601 if (sp->rxd_mode == RXD_MODE_1) {
2602 rxdp1 = (struct RxD1*)rxdp;
2603 pci_unmap_single(sp->pdev, (dma_addr_t)
2604 rxdp1->Buffer0_ptr,
2605 dev->mtu +
2606 HEADER_ETHERNET_II_802_3_SIZE
2607 + HEADER_802_2_SIZE +
2608 HEADER_SNAP_SIZE,
2609 PCI_DMA_FROMDEVICE);
2610 memset(rxdp, 0, sizeof(struct RxD1));
2611 } else if(sp->rxd_mode == RXD_MODE_3B) {
2612 rxdp3 = (struct RxD3*)rxdp;
2613 ba = &mac_control->rings[ring_no].
2614 ba[blk][j];
2615 pci_unmap_single(sp->pdev, (dma_addr_t)
2616 rxdp3->Buffer0_ptr,
2617 BUF0_LEN,
2618 PCI_DMA_FROMDEVICE);
2619 pci_unmap_single(sp->pdev, (dma_addr_t)
2620 rxdp3->Buffer1_ptr,
2621 BUF1_LEN,
2622 PCI_DMA_FROMDEVICE);
2623 pci_unmap_single(sp->pdev, (dma_addr_t)
2624 rxdp3->Buffer2_ptr,
2625 dev->mtu + 4,
2626 PCI_DMA_FROMDEVICE);
2627 memset(rxdp, 0, sizeof(struct RxD3));
2629 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2630 dev_kfree_skb(skb);
2631 atomic_dec(&sp->rx_bufs_left[ring_no]);
2636 * free_rx_buffers - Frees all Rx buffers
2637 * @sp: device private variable.
2638 * Description:
2639 * This function will free all Rx buffers allocated by host.
2640 * Return Value:
2641 * NONE.
2644 static void free_rx_buffers(struct s2io_nic *sp)
2646 struct net_device *dev = sp->dev;
2647 int i, blk = 0, buf_cnt = 0;
2648 struct mac_info *mac_control;
2649 struct config_param *config;
2651 mac_control = &sp->mac_control;
2652 config = &sp->config;
2654 for (i = 0; i < config->rx_ring_num; i++) {
2655 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2656 free_rxd_blk(sp,i,blk);
2658 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2659 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2660 mac_control->rings[i].rx_curr_put_info.offset = 0;
2661 mac_control->rings[i].rx_curr_get_info.offset = 0;
2662 atomic_set(&sp->rx_bufs_left[i], 0);
2663 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2664 dev->name, buf_cnt, i);
2669 * s2io_poll - Rx interrupt handler for NAPI support
2670 * @napi : pointer to the napi structure.
2671 * @budget : The number of packets that were budgeted to be processed
2672 * during one pass through the 'Poll" function.
2673 * Description:
2674 * Comes into picture only if NAPI support has been incorporated. It does
2675 * the same thing that rx_intr_handler does, but not in a interrupt context
2676 * also It will process only a given number of packets.
2677 * Return value:
2678 * 0 on success and 1 if there are No Rx packets to be processed.
2681 static int s2io_poll(struct napi_struct *napi, int budget)
2683 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2684 struct net_device *dev = nic->dev;
2685 int pkt_cnt = 0, org_pkts_to_process;
2686 struct mac_info *mac_control;
2687 struct config_param *config;
2688 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2689 int i;
2691 if (!is_s2io_card_up(nic))
2692 return 0;
2694 mac_control = &nic->mac_control;
2695 config = &nic->config;
2697 nic->pkts_to_process = budget;
2698 org_pkts_to_process = nic->pkts_to_process;
2700 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2701 readl(&bar0->rx_traffic_int);
2703 for (i = 0; i < config->rx_ring_num; i++) {
2704 rx_intr_handler(&mac_control->rings[i]);
2705 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2706 if (!nic->pkts_to_process) {
2707 /* Quota for the current iteration has been met */
2708 goto no_rx;
2712 netif_rx_complete(dev, napi);
2714 for (i = 0; i < config->rx_ring_num; i++) {
2715 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2716 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2717 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2718 break;
2721 /* Re enable the Rx interrupts. */
2722 writeq(0x0, &bar0->rx_traffic_mask);
2723 readl(&bar0->rx_traffic_mask);
2724 return pkt_cnt;
2726 no_rx:
2727 for (i = 0; i < config->rx_ring_num; i++) {
2728 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2729 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2730 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2731 break;
2734 return pkt_cnt;
2737 #ifdef CONFIG_NET_POLL_CONTROLLER
2739 * s2io_netpoll - netpoll event handler entry point
2740 * @dev : pointer to the device structure.
2741 * Description:
2742 * This function will be called by upper layer to check for events on the
2743 * interface in situations where interrupts are disabled. It is used for
2744 * specific in-kernel networking tasks, such as remote consoles and kernel
2745 * debugging over the network (example netdump in RedHat).
2747 static void s2io_netpoll(struct net_device *dev)
2749 struct s2io_nic *nic = dev->priv;
2750 struct mac_info *mac_control;
2751 struct config_param *config;
2752 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2753 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2754 int i;
2756 if (pci_channel_offline(nic->pdev))
2757 return;
2759 disable_irq(dev->irq);
2761 mac_control = &nic->mac_control;
2762 config = &nic->config;
2764 writeq(val64, &bar0->rx_traffic_int);
2765 writeq(val64, &bar0->tx_traffic_int);
2767 /* we need to free up the transmitted skbufs or else netpoll will
2768 * run out of skbs and will fail and eventually netpoll application such
2769 * as netdump will fail.
2771 for (i = 0; i < config->tx_fifo_num; i++)
2772 tx_intr_handler(&mac_control->fifos[i]);
2774 /* check for received packet and indicate up to network */
2775 for (i = 0; i < config->rx_ring_num; i++)
2776 rx_intr_handler(&mac_control->rings[i]);
2778 for (i = 0; i < config->rx_ring_num; i++) {
2779 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2780 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2781 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2782 break;
2785 enable_irq(dev->irq);
2786 return;
2788 #endif
2791 * rx_intr_handler - Rx interrupt handler
2792 * @nic: device private variable.
2793 * Description:
2794 * If the interrupt is because of a received frame or if the
2795 * receive ring contains fresh as yet un-processed frames,this function is
2796 * called. It picks out the RxD at which place the last Rx processing had
2797 * stopped and sends the skb to the OSM's Rx handler and then increments
2798 * the offset.
2799 * Return Value:
2800 * NONE.
2802 static void rx_intr_handler(struct ring_info *ring_data)
2804 struct s2io_nic *nic = ring_data->nic;
2805 struct net_device *dev = (struct net_device *) nic->dev;
2806 int get_block, put_block, put_offset;
2807 struct rx_curr_get_info get_info, put_info;
2808 struct RxD_t *rxdp;
2809 struct sk_buff *skb;
2810 int pkt_cnt = 0;
2811 int i;
2812 struct RxD1* rxdp1;
2813 struct RxD3* rxdp3;
2815 spin_lock(&nic->rx_lock);
2817 get_info = ring_data->rx_curr_get_info;
2818 get_block = get_info.block_index;
2819 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2820 put_block = put_info.block_index;
2821 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2822 if (!napi) {
2823 spin_lock(&nic->put_lock);
2824 put_offset = ring_data->put_pos;
2825 spin_unlock(&nic->put_lock);
2826 } else
2827 put_offset = ring_data->put_pos;
2829 while (RXD_IS_UP2DT(rxdp)) {
2831 * If your are next to put index then it's
2832 * FIFO full condition
2834 if ((get_block == put_block) &&
2835 (get_info.offset + 1) == put_info.offset) {
2836 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2837 break;
2839 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2840 if (skb == NULL) {
2841 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2842 dev->name);
2843 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2844 spin_unlock(&nic->rx_lock);
2845 return;
2847 if (nic->rxd_mode == RXD_MODE_1) {
2848 rxdp1 = (struct RxD1*)rxdp;
2849 pci_unmap_single(nic->pdev, (dma_addr_t)
2850 rxdp1->Buffer0_ptr,
2851 dev->mtu +
2852 HEADER_ETHERNET_II_802_3_SIZE +
2853 HEADER_802_2_SIZE +
2854 HEADER_SNAP_SIZE,
2855 PCI_DMA_FROMDEVICE);
2856 } else if (nic->rxd_mode == RXD_MODE_3B) {
2857 rxdp3 = (struct RxD3*)rxdp;
2858 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2859 rxdp3->Buffer0_ptr,
2860 BUF0_LEN, PCI_DMA_FROMDEVICE);
2861 pci_unmap_single(nic->pdev, (dma_addr_t)
2862 rxdp3->Buffer2_ptr,
2863 dev->mtu + 4,
2864 PCI_DMA_FROMDEVICE);
2866 prefetch(skb->data);
2867 rx_osm_handler(ring_data, rxdp);
2868 get_info.offset++;
2869 ring_data->rx_curr_get_info.offset = get_info.offset;
2870 rxdp = ring_data->rx_blocks[get_block].
2871 rxds[get_info.offset].virt_addr;
2872 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2873 get_info.offset = 0;
2874 ring_data->rx_curr_get_info.offset = get_info.offset;
2875 get_block++;
2876 if (get_block == ring_data->block_count)
2877 get_block = 0;
2878 ring_data->rx_curr_get_info.block_index = get_block;
2879 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2882 nic->pkts_to_process -= 1;
2883 if ((napi) && (!nic->pkts_to_process))
2884 break;
2885 pkt_cnt++;
2886 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2887 break;
2889 if (nic->lro) {
2890 /* Clear all LRO sessions before exiting */
2891 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2892 struct lro *lro = &nic->lro0_n[i];
2893 if (lro->in_use) {
2894 update_L3L4_header(nic, lro);
2895 queue_rx_frame(lro->parent);
2896 clear_lro_session(lro);
2901 spin_unlock(&nic->rx_lock);
2905 * tx_intr_handler - Transmit interrupt handler
2906 * @nic : device private variable
2907 * Description:
2908 * If an interrupt was raised to indicate DMA complete of the
2909 * Tx packet, this function is called. It identifies the last TxD
2910 * whose buffer was freed and frees all skbs whose data have already
2911 * DMA'ed into the NICs internal memory.
2912 * Return Value:
2913 * NONE
2916 static void tx_intr_handler(struct fifo_info *fifo_data)
2918 struct s2io_nic *nic = fifo_data->nic;
2919 struct net_device *dev = (struct net_device *) nic->dev;
2920 struct tx_curr_get_info get_info, put_info;
2921 struct sk_buff *skb;
2922 struct TxD *txdlp;
2923 u8 err_mask;
2925 get_info = fifo_data->tx_curr_get_info;
2926 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2927 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2928 list_virt_addr;
2929 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2930 (get_info.offset != put_info.offset) &&
2931 (txdlp->Host_Control)) {
2932 /* Check for TxD errors */
2933 if (txdlp->Control_1 & TXD_T_CODE) {
2934 unsigned long long err;
2935 err = txdlp->Control_1 & TXD_T_CODE;
2936 if (err & 0x1) {
2937 nic->mac_control.stats_info->sw_stat.
2938 parity_err_cnt++;
2941 /* update t_code statistics */
2942 err_mask = err >> 48;
2943 switch(err_mask) {
2944 case 2:
2945 nic->mac_control.stats_info->sw_stat.
2946 tx_buf_abort_cnt++;
2947 break;
2949 case 3:
2950 nic->mac_control.stats_info->sw_stat.
2951 tx_desc_abort_cnt++;
2952 break;
2954 case 7:
2955 nic->mac_control.stats_info->sw_stat.
2956 tx_parity_err_cnt++;
2957 break;
2959 case 10:
2960 nic->mac_control.stats_info->sw_stat.
2961 tx_link_loss_cnt++;
2962 break;
2964 case 15:
2965 nic->mac_control.stats_info->sw_stat.
2966 tx_list_proc_err_cnt++;
2967 break;
2971 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2972 if (skb == NULL) {
2973 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2974 __FUNCTION__);
2975 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2976 return;
2979 /* Updating the statistics block */
2980 nic->stats.tx_bytes += skb->len;
2981 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2982 dev_kfree_skb_irq(skb);
2984 get_info.offset++;
2985 if (get_info.offset == get_info.fifo_len + 1)
2986 get_info.offset = 0;
2987 txdlp = (struct TxD *) fifo_data->list_info
2988 [get_info.offset].list_virt_addr;
2989 fifo_data->tx_curr_get_info.offset =
2990 get_info.offset;
2993 spin_lock(&nic->tx_lock);
2994 if (netif_queue_stopped(dev))
2995 netif_wake_queue(dev);
2996 spin_unlock(&nic->tx_lock);
3000 * s2io_mdio_write - Function to write in to MDIO registers
3001 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3002 * @addr : address value
3003 * @value : data value
3004 * @dev : pointer to net_device structure
3005 * Description:
3006 * This function is used to write values to the MDIO registers
3007 * NONE
3009 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3011 u64 val64 = 0x0;
3012 struct s2io_nic *sp = dev->priv;
3013 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3015 //address transaction
3016 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3017 | MDIO_MMD_DEV_ADDR(mmd_type)
3018 | MDIO_MMS_PRT_ADDR(0x0);
3019 writeq(val64, &bar0->mdio_control);
3020 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3021 writeq(val64, &bar0->mdio_control);
3022 udelay(100);
3024 //Data transaction
3025 val64 = 0x0;
3026 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3027 | MDIO_MMD_DEV_ADDR(mmd_type)
3028 | MDIO_MMS_PRT_ADDR(0x0)
3029 | MDIO_MDIO_DATA(value)
3030 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3031 writeq(val64, &bar0->mdio_control);
3032 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3033 writeq(val64, &bar0->mdio_control);
3034 udelay(100);
3036 val64 = 0x0;
3037 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3038 | MDIO_MMD_DEV_ADDR(mmd_type)
3039 | MDIO_MMS_PRT_ADDR(0x0)
3040 | MDIO_OP(MDIO_OP_READ_TRANS);
3041 writeq(val64, &bar0->mdio_control);
3042 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3043 writeq(val64, &bar0->mdio_control);
3044 udelay(100);
3049 * s2io_mdio_read - Function to write in to MDIO registers
3050 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3051 * @addr : address value
3052 * @dev : pointer to net_device structure
3053 * Description:
3054 * This function is used to read values to the MDIO registers
3055 * NONE
3057 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3059 u64 val64 = 0x0;
3060 u64 rval64 = 0x0;
3061 struct s2io_nic *sp = dev->priv;
3062 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3064 /* address transaction */
3065 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3066 | MDIO_MMD_DEV_ADDR(mmd_type)
3067 | MDIO_MMS_PRT_ADDR(0x0);
3068 writeq(val64, &bar0->mdio_control);
3069 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3070 writeq(val64, &bar0->mdio_control);
3071 udelay(100);
3073 /* Data transaction */
3074 val64 = 0x0;
3075 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3076 | MDIO_MMD_DEV_ADDR(mmd_type)
3077 | MDIO_MMS_PRT_ADDR(0x0)
3078 | MDIO_OP(MDIO_OP_READ_TRANS);
3079 writeq(val64, &bar0->mdio_control);
3080 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3081 writeq(val64, &bar0->mdio_control);
3082 udelay(100);
3084 /* Read the value from regs */
3085 rval64 = readq(&bar0->mdio_control);
3086 rval64 = rval64 & 0xFFFF0000;
3087 rval64 = rval64 >> 16;
3088 return rval64;
3091 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3092 * @counter : couter value to be updated
3093 * @flag : flag to indicate the status
3094 * @type : counter type
3095 * Description:
3096 * This function is to check the status of the xpak counters value
3097 * NONE
3100 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3102 u64 mask = 0x3;
3103 u64 val64;
3104 int i;
3105 for(i = 0; i <index; i++)
3106 mask = mask << 0x2;
3108 if(flag > 0)
3110 *counter = *counter + 1;
3111 val64 = *regs_stat & mask;
3112 val64 = val64 >> (index * 0x2);
3113 val64 = val64 + 1;
3114 if(val64 == 3)
3116 switch(type)
3118 case 1:
3119 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3120 "service. Excessive temperatures may "
3121 "result in premature transceiver "
3122 "failure \n");
3123 break;
3124 case 2:
3125 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3126 "service Excessive bias currents may "
3127 "indicate imminent laser diode "
3128 "failure \n");
3129 break;
3130 case 3:
3131 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3132 "service Excessive laser output "
3133 "power may saturate far-end "
3134 "receiver\n");
3135 break;
3136 default:
3137 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3138 "type \n");
3140 val64 = 0x0;
3142 val64 = val64 << (index * 0x2);
3143 *regs_stat = (*regs_stat & (~mask)) | (val64);
3145 } else {
3146 *regs_stat = *regs_stat & (~mask);
3151 * s2io_updt_xpak_counter - Function to update the xpak counters
3152 * @dev : pointer to net_device struct
3153 * Description:
3154 * This function is to upate the status of the xpak counters value
3155 * NONE
3157 static void s2io_updt_xpak_counter(struct net_device *dev)
3159 u16 flag = 0x0;
3160 u16 type = 0x0;
3161 u16 val16 = 0x0;
3162 u64 val64 = 0x0;
3163 u64 addr = 0x0;
3165 struct s2io_nic *sp = dev->priv;
3166 struct stat_block *stat_info = sp->mac_control.stats_info;
3168 /* Check the communication with the MDIO slave */
3169 addr = 0x0000;
3170 val64 = 0x0;
3171 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3172 if((val64 == 0xFFFF) || (val64 == 0x0000))
3174 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3175 "Returned %llx\n", (unsigned long long)val64);
3176 return;
3179 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3180 if(val64 != 0x2040)
3182 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3183 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3184 (unsigned long long)val64);
3185 return;
3188 /* Loading the DOM register to MDIO register */
3189 addr = 0xA100;
3190 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3191 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3193 /* Reading the Alarm flags */
3194 addr = 0xA070;
3195 val64 = 0x0;
3196 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3198 flag = CHECKBIT(val64, 0x7);
3199 type = 1;
3200 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3201 &stat_info->xpak_stat.xpak_regs_stat,
3202 0x0, flag, type);
3204 if(CHECKBIT(val64, 0x6))
3205 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3207 flag = CHECKBIT(val64, 0x3);
3208 type = 2;
3209 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3210 &stat_info->xpak_stat.xpak_regs_stat,
3211 0x2, flag, type);
3213 if(CHECKBIT(val64, 0x2))
3214 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3216 flag = CHECKBIT(val64, 0x1);
3217 type = 3;
3218 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3219 &stat_info->xpak_stat.xpak_regs_stat,
3220 0x4, flag, type);
3222 if(CHECKBIT(val64, 0x0))
3223 stat_info->xpak_stat.alarm_laser_output_power_low++;
3225 /* Reading the Warning flags */
3226 addr = 0xA074;
3227 val64 = 0x0;
3228 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3230 if(CHECKBIT(val64, 0x7))
3231 stat_info->xpak_stat.warn_transceiver_temp_high++;
3233 if(CHECKBIT(val64, 0x6))
3234 stat_info->xpak_stat.warn_transceiver_temp_low++;
3236 if(CHECKBIT(val64, 0x3))
3237 stat_info->xpak_stat.warn_laser_bias_current_high++;
3239 if(CHECKBIT(val64, 0x2))
3240 stat_info->xpak_stat.warn_laser_bias_current_low++;
3242 if(CHECKBIT(val64, 0x1))
3243 stat_info->xpak_stat.warn_laser_output_power_high++;
3245 if(CHECKBIT(val64, 0x0))
3246 stat_info->xpak_stat.warn_laser_output_power_low++;
3250 * wait_for_cmd_complete - waits for a command to complete.
3251 * @sp : private member of the device structure, which is a pointer to the
3252 * s2io_nic structure.
3253 * Description: Function that waits for a command to Write into RMAC
3254 * ADDR DATA registers to be completed and returns either success or
3255 * error depending on whether the command was complete or not.
3256 * Return value:
3257 * SUCCESS on success and FAILURE on failure.
3260 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3261 int bit_state)
3263 int ret = FAILURE, cnt = 0, delay = 1;
3264 u64 val64;
3266 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3267 return FAILURE;
3269 do {
3270 val64 = readq(addr);
3271 if (bit_state == S2IO_BIT_RESET) {
3272 if (!(val64 & busy_bit)) {
3273 ret = SUCCESS;
3274 break;
3276 } else {
3277 if (!(val64 & busy_bit)) {
3278 ret = SUCCESS;
3279 break;
3283 if(in_interrupt())
3284 mdelay(delay);
3285 else
3286 msleep(delay);
3288 if (++cnt >= 10)
3289 delay = 50;
3290 } while (cnt < 20);
3291 return ret;
3294 * check_pci_device_id - Checks if the device id is supported
3295 * @id : device id
3296 * Description: Function to check if the pci device id is supported by driver.
3297 * Return value: Actual device id if supported else PCI_ANY_ID
3299 static u16 check_pci_device_id(u16 id)
3301 switch (id) {
3302 case PCI_DEVICE_ID_HERC_WIN:
3303 case PCI_DEVICE_ID_HERC_UNI:
3304 return XFRAME_II_DEVICE;
3305 case PCI_DEVICE_ID_S2IO_UNI:
3306 case PCI_DEVICE_ID_S2IO_WIN:
3307 return XFRAME_I_DEVICE;
3308 default:
3309 return PCI_ANY_ID;
3314 * s2io_reset - Resets the card.
3315 * @sp : private member of the device structure.
3316 * Description: Function to Reset the card. This function then also
3317 * restores the previously saved PCI configuration space registers as
3318 * the card reset also resets the configuration space.
3319 * Return value:
3320 * void.
3323 static void s2io_reset(struct s2io_nic * sp)
3325 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3326 u64 val64;
3327 u16 subid, pci_cmd;
3328 int i;
3329 u16 val16;
3330 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3331 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3333 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3334 __FUNCTION__, sp->dev->name);
3336 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3337 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3339 val64 = SW_RESET_ALL;
3340 writeq(val64, &bar0->sw_reset);
3341 if (strstr(sp->product_name, "CX4")) {
3342 msleep(750);
3344 msleep(250);
3345 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3347 /* Restore the PCI state saved during initialization. */
3348 pci_restore_state(sp->pdev);
3349 pci_read_config_word(sp->pdev, 0x2, &val16);
3350 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3351 break;
3352 msleep(200);
3355 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3356 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3359 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3361 s2io_init_pci(sp);
3363 /* Set swapper to enable I/O register access */
3364 s2io_set_swapper(sp);
3366 /* Restore the MSIX table entries from local variables */
3367 restore_xmsi_data(sp);
3369 /* Clear certain PCI/PCI-X fields after reset */
3370 if (sp->device_type == XFRAME_II_DEVICE) {
3371 /* Clear "detected parity error" bit */
3372 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3374 /* Clearing PCIX Ecc status register */
3375 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3377 /* Clearing PCI_STATUS error reflected here */
3378 writeq(BIT(62), &bar0->txpic_int_reg);
3381 /* Reset device statistics maintained by OS */
3382 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3384 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3385 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3386 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3387 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3388 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3389 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3390 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3391 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3392 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3393 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3394 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3395 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3396 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3397 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3398 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3399 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3400 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3401 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3402 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3404 /* SXE-002: Configure link and activity LED to turn it off */
3405 subid = sp->pdev->subsystem_device;
3406 if (((subid & 0xFF) >= 0x07) &&
3407 (sp->device_type == XFRAME_I_DEVICE)) {
3408 val64 = readq(&bar0->gpio_control);
3409 val64 |= 0x0000800000000000ULL;
3410 writeq(val64, &bar0->gpio_control);
3411 val64 = 0x0411040400000000ULL;
3412 writeq(val64, (void __iomem *)bar0 + 0x2700);
3416 * Clear spurious ECC interrupts that would have occured on
3417 * XFRAME II cards after reset.
3419 if (sp->device_type == XFRAME_II_DEVICE) {
3420 val64 = readq(&bar0->pcc_err_reg);
3421 writeq(val64, &bar0->pcc_err_reg);
3424 /* restore the previously assigned mac address */
3425 do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3427 sp->device_enabled_once = FALSE;
3431 * s2io_set_swapper - to set the swapper controle on the card
3432 * @sp : private member of the device structure,
3433 * pointer to the s2io_nic structure.
3434 * Description: Function to set the swapper control on the card
3435 * correctly depending on the 'endianness' of the system.
3436 * Return value:
3437 * SUCCESS on success and FAILURE on failure.
3440 static int s2io_set_swapper(struct s2io_nic * sp)
3442 struct net_device *dev = sp->dev;
3443 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3444 u64 val64, valt, valr;
3447 * Set proper endian settings and verify the same by reading
3448 * the PIF Feed-back register.
3451 val64 = readq(&bar0->pif_rd_swapper_fb);
3452 if (val64 != 0x0123456789ABCDEFULL) {
3453 int i = 0;
3454 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3455 0x8100008181000081ULL, /* FE=1, SE=0 */
3456 0x4200004242000042ULL, /* FE=0, SE=1 */
3457 0}; /* FE=0, SE=0 */
3459 while(i<4) {
3460 writeq(value[i], &bar0->swapper_ctrl);
3461 val64 = readq(&bar0->pif_rd_swapper_fb);
3462 if (val64 == 0x0123456789ABCDEFULL)
3463 break;
3464 i++;
3466 if (i == 4) {
3467 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3468 dev->name);
3469 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3470 (unsigned long long) val64);
3471 return FAILURE;
3473 valr = value[i];
3474 } else {
3475 valr = readq(&bar0->swapper_ctrl);
3478 valt = 0x0123456789ABCDEFULL;
3479 writeq(valt, &bar0->xmsi_address);
3480 val64 = readq(&bar0->xmsi_address);
3482 if(val64 != valt) {
3483 int i = 0;
3484 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3485 0x0081810000818100ULL, /* FE=1, SE=0 */
3486 0x0042420000424200ULL, /* FE=0, SE=1 */
3487 0}; /* FE=0, SE=0 */
3489 while(i<4) {
3490 writeq((value[i] | valr), &bar0->swapper_ctrl);
3491 writeq(valt, &bar0->xmsi_address);
3492 val64 = readq(&bar0->xmsi_address);
3493 if(val64 == valt)
3494 break;
3495 i++;
3497 if(i == 4) {
3498 unsigned long long x = val64;
3499 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3500 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3501 return FAILURE;
3504 val64 = readq(&bar0->swapper_ctrl);
3505 val64 &= 0xFFFF000000000000ULL;
3507 #ifdef __BIG_ENDIAN
3509 * The device by default set to a big endian format, so a
3510 * big endian driver need not set anything.
3512 val64 |= (SWAPPER_CTRL_TXP_FE |
3513 SWAPPER_CTRL_TXP_SE |
3514 SWAPPER_CTRL_TXD_R_FE |
3515 SWAPPER_CTRL_TXD_W_FE |
3516 SWAPPER_CTRL_TXF_R_FE |
3517 SWAPPER_CTRL_RXD_R_FE |
3518 SWAPPER_CTRL_RXD_W_FE |
3519 SWAPPER_CTRL_RXF_W_FE |
3520 SWAPPER_CTRL_XMSI_FE |
3521 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3522 if (sp->config.intr_type == INTA)
3523 val64 |= SWAPPER_CTRL_XMSI_SE;
3524 writeq(val64, &bar0->swapper_ctrl);
3525 #else
3527 * Initially we enable all bits to make it accessible by the
3528 * driver, then we selectively enable only those bits that
3529 * we want to set.
3531 val64 |= (SWAPPER_CTRL_TXP_FE |
3532 SWAPPER_CTRL_TXP_SE |
3533 SWAPPER_CTRL_TXD_R_FE |
3534 SWAPPER_CTRL_TXD_R_SE |
3535 SWAPPER_CTRL_TXD_W_FE |
3536 SWAPPER_CTRL_TXD_W_SE |
3537 SWAPPER_CTRL_TXF_R_FE |
3538 SWAPPER_CTRL_RXD_R_FE |
3539 SWAPPER_CTRL_RXD_R_SE |
3540 SWAPPER_CTRL_RXD_W_FE |
3541 SWAPPER_CTRL_RXD_W_SE |
3542 SWAPPER_CTRL_RXF_W_FE |
3543 SWAPPER_CTRL_XMSI_FE |
3544 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3545 if (sp->config.intr_type == INTA)
3546 val64 |= SWAPPER_CTRL_XMSI_SE;
3547 writeq(val64, &bar0->swapper_ctrl);
3548 #endif
3549 val64 = readq(&bar0->swapper_ctrl);
3552 * Verifying if endian settings are accurate by reading a
3553 * feedback register.
3555 val64 = readq(&bar0->pif_rd_swapper_fb);
3556 if (val64 != 0x0123456789ABCDEFULL) {
3557 /* Endian settings are incorrect, calls for another dekko. */
3558 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3559 dev->name);
3560 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3561 (unsigned long long) val64);
3562 return FAILURE;
3565 return SUCCESS;
3568 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3570 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3571 u64 val64;
3572 int ret = 0, cnt = 0;
3574 do {
3575 val64 = readq(&bar0->xmsi_access);
3576 if (!(val64 & BIT(15)))
3577 break;
3578 mdelay(1);
3579 cnt++;
3580 } while(cnt < 5);
3581 if (cnt == 5) {
3582 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3583 ret = 1;
3586 return ret;
3589 static void restore_xmsi_data(struct s2io_nic *nic)
3591 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3592 u64 val64;
3593 int i;
3595 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3596 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3597 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3598 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3599 writeq(val64, &bar0->xmsi_access);
3600 if (wait_for_msix_trans(nic, i)) {
3601 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3602 continue;
3607 static void store_xmsi_data(struct s2io_nic *nic)
3609 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3610 u64 val64, addr, data;
3611 int i;
3613 /* Store and display */
3614 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3615 val64 = (BIT(15) | vBIT(i, 26, 6));
3616 writeq(val64, &bar0->xmsi_access);
3617 if (wait_for_msix_trans(nic, i)) {
3618 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3619 continue;
3621 addr = readq(&bar0->xmsi_address);
3622 data = readq(&bar0->xmsi_data);
3623 if (addr && data) {
3624 nic->msix_info[i].addr = addr;
3625 nic->msix_info[i].data = data;
3630 static int s2io_enable_msi_x(struct s2io_nic *nic)
3632 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3633 u64 tx_mat, rx_mat;
3634 u16 msi_control; /* Temp variable */
3635 int ret, i, j, msix_indx = 1;
3637 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3638 GFP_KERNEL);
3639 if (!nic->entries) {
3640 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3641 __FUNCTION__);
3642 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3643 return -ENOMEM;
3645 nic->mac_control.stats_info->sw_stat.mem_allocated
3646 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3648 nic->s2io_entries =
3649 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3650 GFP_KERNEL);
3651 if (!nic->s2io_entries) {
3652 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3653 __FUNCTION__);
3654 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3655 kfree(nic->entries);
3656 nic->mac_control.stats_info->sw_stat.mem_freed
3657 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3658 return -ENOMEM;
3660 nic->mac_control.stats_info->sw_stat.mem_allocated
3661 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3663 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3664 nic->entries[i].entry = i;
3665 nic->s2io_entries[i].entry = i;
3666 nic->s2io_entries[i].arg = NULL;
3667 nic->s2io_entries[i].in_use = 0;
3670 tx_mat = readq(&bar0->tx_mat0_n[0]);
3671 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3672 tx_mat |= TX_MAT_SET(i, msix_indx);
3673 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3674 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3675 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3677 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3679 rx_mat = readq(&bar0->rx_mat);
3680 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3681 rx_mat |= RX_MAT_SET(j, msix_indx);
3682 nic->s2io_entries[msix_indx].arg
3683 = &nic->mac_control.rings[j];
3684 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3685 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3687 writeq(rx_mat, &bar0->rx_mat);
3689 nic->avail_msix_vectors = 0;
3690 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3691 /* We fail init if error or we get less vectors than min required */
3692 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3693 nic->avail_msix_vectors = ret;
3694 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3696 if (ret) {
3697 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3698 kfree(nic->entries);
3699 nic->mac_control.stats_info->sw_stat.mem_freed
3700 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3701 kfree(nic->s2io_entries);
3702 nic->mac_control.stats_info->sw_stat.mem_freed
3703 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3704 nic->entries = NULL;
3705 nic->s2io_entries = NULL;
3706 nic->avail_msix_vectors = 0;
3707 return -ENOMEM;
3709 if (!nic->avail_msix_vectors)
3710 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3713 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3714 * in the herc NIC. (Temp change, needs to be removed later)
3716 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3717 msi_control |= 0x1; /* Enable MSI */
3718 pci_write_config_word(nic->pdev, 0x42, msi_control);
3720 return 0;
3723 /* Handle software interrupt used during MSI(X) test */
3724 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3726 struct s2io_nic *sp = dev_id;
3728 sp->msi_detected = 1;
3729 wake_up(&sp->msi_wait);
3731 return IRQ_HANDLED;
3734 /* Test interrupt path by forcing a a software IRQ */
3735 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3737 struct pci_dev *pdev = sp->pdev;
3738 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3739 int err;
3740 u64 val64, saved64;
3742 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3743 sp->name, sp);
3744 if (err) {
3745 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3746 sp->dev->name, pci_name(pdev), pdev->irq);
3747 return err;
3750 init_waitqueue_head (&sp->msi_wait);
3751 sp->msi_detected = 0;
3753 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3754 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3755 val64 |= SCHED_INT_CTRL_TIMER_EN;
3756 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3757 writeq(val64, &bar0->scheduled_int_ctrl);
3759 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3761 if (!sp->msi_detected) {
3762 /* MSI(X) test failed, go back to INTx mode */
3763 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3764 "using MSI(X) during test\n", sp->dev->name,
3765 pci_name(pdev));
3767 err = -EOPNOTSUPP;
3770 free_irq(sp->entries[1].vector, sp);
3772 writeq(saved64, &bar0->scheduled_int_ctrl);
3774 return err;
3776 /* ********************************************************* *
3777 * Functions defined below concern the OS part of the driver *
3778 * ********************************************************* */
3781 * s2io_open - open entry point of the driver
3782 * @dev : pointer to the device structure.
3783 * Description:
3784 * This function is the open entry point of the driver. It mainly calls a
3785 * function to allocate Rx buffers and inserts them into the buffer
3786 * descriptors and then enables the Rx part of the NIC.
3787 * Return value:
3788 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3789 * file on failure.
3792 static int s2io_open(struct net_device *dev)
3794 struct s2io_nic *sp = dev->priv;
3795 int err = 0;
3798 * Make sure you have link off by default every time
3799 * Nic is initialized
3801 netif_carrier_off(dev);
3802 sp->last_link_state = 0;
3804 napi_enable(&sp->napi);
3806 if (sp->config.intr_type == MSI_X) {
3807 int ret = s2io_enable_msi_x(sp);
3809 if (!ret) {
3810 u16 msi_control;
3812 ret = s2io_test_msi(sp);
3814 /* rollback MSI-X, will re-enable during add_isr() */
3815 kfree(sp->entries);
3816 sp->mac_control.stats_info->sw_stat.mem_freed +=
3817 (MAX_REQUESTED_MSI_X *
3818 sizeof(struct msix_entry));
3819 kfree(sp->s2io_entries);
3820 sp->mac_control.stats_info->sw_stat.mem_freed +=
3821 (MAX_REQUESTED_MSI_X *
3822 sizeof(struct s2io_msix_entry));
3823 sp->entries = NULL;
3824 sp->s2io_entries = NULL;
3826 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3827 msi_control &= 0xFFFE; /* Disable MSI */
3828 pci_write_config_word(sp->pdev, 0x42, msi_control);
3830 pci_disable_msix(sp->pdev);
3833 if (ret) {
3835 DBG_PRINT(ERR_DBG,
3836 "%s: MSI-X requested but failed to enable\n",
3837 dev->name);
3838 sp->config.intr_type = INTA;
3842 /* NAPI doesn't work well with MSI(X) */
3843 if (sp->config.intr_type != INTA) {
3844 if(sp->config.napi)
3845 sp->config.napi = 0;
3848 /* Initialize H/W and enable interrupts */
3849 err = s2io_card_up(sp);
3850 if (err) {
3851 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3852 dev->name);
3853 goto hw_init_failed;
3856 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3857 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3858 s2io_card_down(sp);
3859 err = -ENODEV;
3860 goto hw_init_failed;
3863 netif_start_queue(dev);
3864 return 0;
3866 hw_init_failed:
3867 napi_disable(&sp->napi);
3868 if (sp->config.intr_type == MSI_X) {
3869 if (sp->entries) {
3870 kfree(sp->entries);
3871 sp->mac_control.stats_info->sw_stat.mem_freed
3872 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3874 if (sp->s2io_entries) {
3875 kfree(sp->s2io_entries);
3876 sp->mac_control.stats_info->sw_stat.mem_freed
3877 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3880 return err;
3884 * s2io_close -close entry point of the driver
3885 * @dev : device pointer.
3886 * Description:
3887 * This is the stop entry point of the driver. It needs to undo exactly
3888 * whatever was done by the open entry point,thus it's usually referred to
3889 * as the close function.Among other things this function mainly stops the
3890 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3891 * Return value:
3892 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3893 * file on failure.
3896 static int s2io_close(struct net_device *dev)
3898 struct s2io_nic *sp = dev->priv;
3900 netif_stop_queue(dev);
3901 napi_disable(&sp->napi);
3902 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3903 s2io_card_down(sp);
3905 return 0;
3909 * s2io_xmit - Tx entry point of te driver
3910 * @skb : the socket buffer containing the Tx data.
3911 * @dev : device pointer.
3912 * Description :
3913 * This function is the Tx entry point of the driver. S2IO NIC supports
3914 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3915 * NOTE: when device cant queue the pkt,just the trans_start variable will
3916 * not be upadted.
3917 * Return value:
3918 * 0 on success & 1 on failure.
3921 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3923 struct s2io_nic *sp = dev->priv;
3924 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3925 register u64 val64;
3926 struct TxD *txdp;
3927 struct TxFIFO_element __iomem *tx_fifo;
3928 unsigned long flags;
3929 u16 vlan_tag = 0;
3930 int vlan_priority = 0;
3931 struct mac_info *mac_control;
3932 struct config_param *config;
3933 int offload_type;
3934 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3936 mac_control = &sp->mac_control;
3937 config = &sp->config;
3939 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3941 if (unlikely(skb->len <= 0)) {
3942 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3943 dev_kfree_skb_any(skb);
3944 return 0;
3947 spin_lock_irqsave(&sp->tx_lock, flags);
3948 if (!is_s2io_card_up(sp)) {
3949 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3950 dev->name);
3951 spin_unlock_irqrestore(&sp->tx_lock, flags);
3952 dev_kfree_skb(skb);
3953 return 0;
3956 queue = 0;
3957 /* Get Fifo number to Transmit based on vlan priority */
3958 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3959 vlan_tag = vlan_tx_tag_get(skb);
3960 vlan_priority = vlan_tag >> 13;
3961 queue = config->fifo_mapping[vlan_priority];
3964 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3965 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3966 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3967 list_virt_addr;
3969 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3970 /* Avoid "put" pointer going beyond "get" pointer */
3971 if (txdp->Host_Control ||
3972 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3973 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3974 netif_stop_queue(dev);
3975 dev_kfree_skb(skb);
3976 spin_unlock_irqrestore(&sp->tx_lock, flags);
3977 return 0;
3980 offload_type = s2io_offload_type(skb);
3981 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3982 txdp->Control_1 |= TXD_TCP_LSO_EN;
3983 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3985 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3986 txdp->Control_2 |=
3987 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3988 TXD_TX_CKO_UDP_EN);
3990 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3991 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3992 txdp->Control_2 |= config->tx_intr_type;
3994 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3995 txdp->Control_2 |= TXD_VLAN_ENABLE;
3996 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3999 frg_len = skb->len - skb->data_len;
4000 if (offload_type == SKB_GSO_UDP) {
4001 int ufo_size;
4003 ufo_size = s2io_udp_mss(skb);
4004 ufo_size &= ~7;
4005 txdp->Control_1 |= TXD_UFO_EN;
4006 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4007 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4008 #ifdef __BIG_ENDIAN
4009 sp->ufo_in_band_v[put_off] =
4010 (u64)skb_shinfo(skb)->ip6_frag_id;
4011 #else
4012 sp->ufo_in_band_v[put_off] =
4013 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4014 #endif
4015 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4016 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4017 sp->ufo_in_band_v,
4018 sizeof(u64), PCI_DMA_TODEVICE);
4019 if((txdp->Buffer_Pointer == 0) ||
4020 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4021 goto pci_map_failed;
4022 txdp++;
4025 txdp->Buffer_Pointer = pci_map_single
4026 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4027 if((txdp->Buffer_Pointer == 0) ||
4028 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4029 goto pci_map_failed;
4031 txdp->Host_Control = (unsigned long) skb;
4032 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4033 if (offload_type == SKB_GSO_UDP)
4034 txdp->Control_1 |= TXD_UFO_EN;
4036 frg_cnt = skb_shinfo(skb)->nr_frags;
4037 /* For fragmented SKB. */
4038 for (i = 0; i < frg_cnt; i++) {
4039 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4040 /* A '0' length fragment will be ignored */
4041 if (!frag->size)
4042 continue;
4043 txdp++;
4044 txdp->Buffer_Pointer = (u64) pci_map_page
4045 (sp->pdev, frag->page, frag->page_offset,
4046 frag->size, PCI_DMA_TODEVICE);
4047 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4048 if (offload_type == SKB_GSO_UDP)
4049 txdp->Control_1 |= TXD_UFO_EN;
4051 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4053 if (offload_type == SKB_GSO_UDP)
4054 frg_cnt++; /* as Txd0 was used for inband header */
4056 tx_fifo = mac_control->tx_FIFO_start[queue];
4057 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4058 writeq(val64, &tx_fifo->TxDL_Pointer);
4060 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4061 TX_FIFO_LAST_LIST);
4062 if (offload_type)
4063 val64 |= TX_FIFO_SPECIAL_FUNC;
4065 writeq(val64, &tx_fifo->List_Control);
4067 mmiowb();
4069 put_off++;
4070 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4071 put_off = 0;
4072 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4074 /* Avoid "put" pointer going beyond "get" pointer */
4075 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4076 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4077 DBG_PRINT(TX_DBG,
4078 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4079 put_off, get_off);
4080 netif_stop_queue(dev);
4082 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4083 dev->trans_start = jiffies;
4084 spin_unlock_irqrestore(&sp->tx_lock, flags);
4086 return 0;
4087 pci_map_failed:
4088 stats->pci_map_fail_cnt++;
4089 netif_stop_queue(dev);
4090 stats->mem_freed += skb->truesize;
4091 dev_kfree_skb(skb);
4092 spin_unlock_irqrestore(&sp->tx_lock, flags);
4093 return 0;
4096 static void
4097 s2io_alarm_handle(unsigned long data)
4099 struct s2io_nic *sp = (struct s2io_nic *)data;
4100 struct net_device *dev = sp->dev;
4102 s2io_handle_errors(dev);
4103 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4106 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4108 int rxb_size, level;
4110 if (!sp->lro) {
4111 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4112 level = rx_buffer_level(sp, rxb_size, rng_n);
4114 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4115 int ret;
4116 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4117 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4118 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4119 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4120 __FUNCTION__);
4121 clear_bit(0, (&sp->tasklet_status));
4122 return -1;
4124 clear_bit(0, (&sp->tasklet_status));
4125 } else if (level == LOW)
4126 tasklet_schedule(&sp->task);
4128 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4129 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4130 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4132 return 0;
4135 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4137 struct ring_info *ring = (struct ring_info *)dev_id;
4138 struct s2io_nic *sp = ring->nic;
4140 if (!is_s2io_card_up(sp))
4141 return IRQ_HANDLED;
4143 rx_intr_handler(ring);
4144 s2io_chk_rx_buffers(sp, ring->ring_no);
4146 return IRQ_HANDLED;
4149 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4151 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4152 struct s2io_nic *sp = fifo->nic;
4154 if (!is_s2io_card_up(sp))
4155 return IRQ_HANDLED;
4157 tx_intr_handler(fifo);
4158 return IRQ_HANDLED;
4160 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4162 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4163 u64 val64;
4165 val64 = readq(&bar0->pic_int_status);
4166 if (val64 & PIC_INT_GPIO) {
4167 val64 = readq(&bar0->gpio_int_reg);
4168 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4169 (val64 & GPIO_INT_REG_LINK_UP)) {
4171 * This is unstable state so clear both up/down
4172 * interrupt and adapter to re-evaluate the link state.
4174 val64 |= GPIO_INT_REG_LINK_DOWN;
4175 val64 |= GPIO_INT_REG_LINK_UP;
4176 writeq(val64, &bar0->gpio_int_reg);
4177 val64 = readq(&bar0->gpio_int_mask);
4178 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4179 GPIO_INT_MASK_LINK_DOWN);
4180 writeq(val64, &bar0->gpio_int_mask);
4182 else if (val64 & GPIO_INT_REG_LINK_UP) {
4183 val64 = readq(&bar0->adapter_status);
4184 /* Enable Adapter */
4185 val64 = readq(&bar0->adapter_control);
4186 val64 |= ADAPTER_CNTL_EN;
4187 writeq(val64, &bar0->adapter_control);
4188 val64 |= ADAPTER_LED_ON;
4189 writeq(val64, &bar0->adapter_control);
4190 if (!sp->device_enabled_once)
4191 sp->device_enabled_once = 1;
4193 s2io_link(sp, LINK_UP);
4195 * unmask link down interrupt and mask link-up
4196 * intr
4198 val64 = readq(&bar0->gpio_int_mask);
4199 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4200 val64 |= GPIO_INT_MASK_LINK_UP;
4201 writeq(val64, &bar0->gpio_int_mask);
4203 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4204 val64 = readq(&bar0->adapter_status);
4205 s2io_link(sp, LINK_DOWN);
4206 /* Link is down so unmaks link up interrupt */
4207 val64 = readq(&bar0->gpio_int_mask);
4208 val64 &= ~GPIO_INT_MASK_LINK_UP;
4209 val64 |= GPIO_INT_MASK_LINK_DOWN;
4210 writeq(val64, &bar0->gpio_int_mask);
4212 /* turn off LED */
4213 val64 = readq(&bar0->adapter_control);
4214 val64 = val64 &(~ADAPTER_LED_ON);
4215 writeq(val64, &bar0->adapter_control);
4218 val64 = readq(&bar0->gpio_int_mask);
4222 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4223 * @value: alarm bits
4224 * @addr: address value
4225 * @cnt: counter variable
4226 * Description: Check for alarm and increment the counter
4227 * Return Value:
4228 * 1 - if alarm bit set
4229 * 0 - if alarm bit is not set
4231 int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4232 unsigned long long *cnt)
4234 u64 val64;
4235 val64 = readq(addr);
4236 if ( val64 & value ) {
4237 writeq(val64, addr);
4238 (*cnt)++;
4239 return 1;
4241 return 0;
4246 * s2io_handle_errors - Xframe error indication handler
4247 * @nic: device private variable
4248 * Description: Handle alarms such as loss of link, single or
4249 * double ECC errors, critical and serious errors.
4250 * Return Value:
4251 * NONE
4253 static void s2io_handle_errors(void * dev_id)
4255 struct net_device *dev = (struct net_device *) dev_id;
4256 struct s2io_nic *sp = dev->priv;
4257 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4258 u64 temp64 = 0,val64=0;
4259 int i = 0;
4261 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4262 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4264 if (!is_s2io_card_up(sp))
4265 return;
4267 if (pci_channel_offline(sp->pdev))
4268 return;
4270 memset(&sw_stat->ring_full_cnt, 0,
4271 sizeof(sw_stat->ring_full_cnt));
4273 /* Handling the XPAK counters update */
4274 if(stats->xpak_timer_count < 72000) {
4275 /* waiting for an hour */
4276 stats->xpak_timer_count++;
4277 } else {
4278 s2io_updt_xpak_counter(dev);
4279 /* reset the count to zero */
4280 stats->xpak_timer_count = 0;
4283 /* Handling link status change error Intr */
4284 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4285 val64 = readq(&bar0->mac_rmac_err_reg);
4286 writeq(val64, &bar0->mac_rmac_err_reg);
4287 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4288 schedule_work(&sp->set_link_task);
4291 /* In case of a serious error, the device will be Reset. */
4292 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4293 &sw_stat->serious_err_cnt))
4294 goto reset;
4296 /* Check for data parity error */
4297 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4298 &sw_stat->parity_err_cnt))
4299 goto reset;
4301 /* Check for ring full counter */
4302 if (sp->device_type == XFRAME_II_DEVICE) {
4303 val64 = readq(&bar0->ring_bump_counter1);
4304 for (i=0; i<4; i++) {
4305 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4306 temp64 >>= 64 - ((i+1)*16);
4307 sw_stat->ring_full_cnt[i] += temp64;
4310 val64 = readq(&bar0->ring_bump_counter2);
4311 for (i=0; i<4; i++) {
4312 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4313 temp64 >>= 64 - ((i+1)*16);
4314 sw_stat->ring_full_cnt[i+4] += temp64;
4318 val64 = readq(&bar0->txdma_int_status);
4319 /*check for pfc_err*/
4320 if (val64 & TXDMA_PFC_INT) {
4321 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4322 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4323 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4324 &sw_stat->pfc_err_cnt))
4325 goto reset;
4326 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4327 &sw_stat->pfc_err_cnt);
4330 /*check for tda_err*/
4331 if (val64 & TXDMA_TDA_INT) {
4332 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4333 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4334 &sw_stat->tda_err_cnt))
4335 goto reset;
4336 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4337 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4339 /*check for pcc_err*/
4340 if (val64 & TXDMA_PCC_INT) {
4341 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4342 | PCC_N_SERR | PCC_6_COF_OV_ERR
4343 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4344 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4345 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4346 &sw_stat->pcc_err_cnt))
4347 goto reset;
4348 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4349 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4352 /*check for tti_err*/
4353 if (val64 & TXDMA_TTI_INT) {
4354 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4355 &sw_stat->tti_err_cnt))
4356 goto reset;
4357 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4358 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4361 /*check for lso_err*/
4362 if (val64 & TXDMA_LSO_INT) {
4363 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4364 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4365 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4366 goto reset;
4367 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4368 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4371 /*check for tpa_err*/
4372 if (val64 & TXDMA_TPA_INT) {
4373 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4374 &sw_stat->tpa_err_cnt))
4375 goto reset;
4376 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4377 &sw_stat->tpa_err_cnt);
4380 /*check for sm_err*/
4381 if (val64 & TXDMA_SM_INT) {
4382 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4383 &sw_stat->sm_err_cnt))
4384 goto reset;
4387 val64 = readq(&bar0->mac_int_status);
4388 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4389 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4390 &bar0->mac_tmac_err_reg,
4391 &sw_stat->mac_tmac_err_cnt))
4392 goto reset;
4393 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4394 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4395 &bar0->mac_tmac_err_reg,
4396 &sw_stat->mac_tmac_err_cnt);
4399 val64 = readq(&bar0->xgxs_int_status);
4400 if (val64 & XGXS_INT_STATUS_TXGXS) {
4401 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4402 &bar0->xgxs_txgxs_err_reg,
4403 &sw_stat->xgxs_txgxs_err_cnt))
4404 goto reset;
4405 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4406 &bar0->xgxs_txgxs_err_reg,
4407 &sw_stat->xgxs_txgxs_err_cnt);
4410 val64 = readq(&bar0->rxdma_int_status);
4411 if (val64 & RXDMA_INT_RC_INT_M) {
4412 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4413 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4414 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4415 goto reset;
4416 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4417 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4418 &sw_stat->rc_err_cnt);
4419 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4420 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4421 &sw_stat->prc_pcix_err_cnt))
4422 goto reset;
4423 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4424 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4425 &sw_stat->prc_pcix_err_cnt);
4428 if (val64 & RXDMA_INT_RPA_INT_M) {
4429 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4430 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4431 goto reset;
4432 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4433 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4436 if (val64 & RXDMA_INT_RDA_INT_M) {
4437 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4438 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4439 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4440 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4441 goto reset;
4442 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4443 | RDA_MISC_ERR | RDA_PCIX_ERR,
4444 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4447 if (val64 & RXDMA_INT_RTI_INT_M) {
4448 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4449 &sw_stat->rti_err_cnt))
4450 goto reset;
4451 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4452 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4455 val64 = readq(&bar0->mac_int_status);
4456 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4457 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4458 &bar0->mac_rmac_err_reg,
4459 &sw_stat->mac_rmac_err_cnt))
4460 goto reset;
4461 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4462 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4463 &sw_stat->mac_rmac_err_cnt);
4466 val64 = readq(&bar0->xgxs_int_status);
4467 if (val64 & XGXS_INT_STATUS_RXGXS) {
4468 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4469 &bar0->xgxs_rxgxs_err_reg,
4470 &sw_stat->xgxs_rxgxs_err_cnt))
4471 goto reset;
4474 val64 = readq(&bar0->mc_int_status);
4475 if(val64 & MC_INT_STATUS_MC_INT) {
4476 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4477 &sw_stat->mc_err_cnt))
4478 goto reset;
4480 /* Handling Ecc errors */
4481 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4482 writeq(val64, &bar0->mc_err_reg);
4483 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4484 sw_stat->double_ecc_errs++;
4485 if (sp->device_type != XFRAME_II_DEVICE) {
4487 * Reset XframeI only if critical error
4489 if (val64 &
4490 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4491 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4492 goto reset;
4494 } else
4495 sw_stat->single_ecc_errs++;
4498 return;
4500 reset:
4501 netif_stop_queue(dev);
4502 schedule_work(&sp->rst_timer_task);
4503 sw_stat->soft_reset_cnt++;
4504 return;
4508 * s2io_isr - ISR handler of the device .
4509 * @irq: the irq of the device.
4510 * @dev_id: a void pointer to the dev structure of the NIC.
4511 * Description: This function is the ISR handler of the device. It
4512 * identifies the reason for the interrupt and calls the relevant
4513 * service routines. As a contongency measure, this ISR allocates the
4514 * recv buffers, if their numbers are below the panic value which is
4515 * presently set to 25% of the original number of rcv buffers allocated.
4516 * Return value:
4517 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4518 * IRQ_NONE: will be returned if interrupt is not from our device
4520 static irqreturn_t s2io_isr(int irq, void *dev_id)
4522 struct net_device *dev = (struct net_device *) dev_id;
4523 struct s2io_nic *sp = dev->priv;
4524 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4525 int i;
4526 u64 reason = 0;
4527 struct mac_info *mac_control;
4528 struct config_param *config;
4530 /* Pretend we handled any irq's from a disconnected card */
4531 if (pci_channel_offline(sp->pdev))
4532 return IRQ_NONE;
4534 if (!is_s2io_card_up(sp))
4535 return IRQ_NONE;
4537 mac_control = &sp->mac_control;
4538 config = &sp->config;
4541 * Identify the cause for interrupt and call the appropriate
4542 * interrupt handler. Causes for the interrupt could be;
4543 * 1. Rx of packet.
4544 * 2. Tx complete.
4545 * 3. Link down.
4547 reason = readq(&bar0->general_int_status);
4549 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4550 /* Nothing much can be done. Get out */
4551 return IRQ_HANDLED;
4554 if (reason & (GEN_INTR_RXTRAFFIC |
4555 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4557 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4559 if (config->napi) {
4560 if (reason & GEN_INTR_RXTRAFFIC) {
4561 if (likely(netif_rx_schedule_prep(dev,
4562 &sp->napi))) {
4563 __netif_rx_schedule(dev, &sp->napi);
4564 writeq(S2IO_MINUS_ONE,
4565 &bar0->rx_traffic_mask);
4566 } else
4567 writeq(S2IO_MINUS_ONE,
4568 &bar0->rx_traffic_int);
4570 } else {
4572 * rx_traffic_int reg is an R1 register, writing all 1's
4573 * will ensure that the actual interrupt causing bit
4574 * get's cleared and hence a read can be avoided.
4576 if (reason & GEN_INTR_RXTRAFFIC)
4577 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4579 for (i = 0; i < config->rx_ring_num; i++)
4580 rx_intr_handler(&mac_control->rings[i]);
4584 * tx_traffic_int reg is an R1 register, writing all 1's
4585 * will ensure that the actual interrupt causing bit get's
4586 * cleared and hence a read can be avoided.
4588 if (reason & GEN_INTR_TXTRAFFIC)
4589 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4591 for (i = 0; i < config->tx_fifo_num; i++)
4592 tx_intr_handler(&mac_control->fifos[i]);
4594 if (reason & GEN_INTR_TXPIC)
4595 s2io_txpic_intr_handle(sp);
4598 * Reallocate the buffers from the interrupt handler itself.
4600 if (!config->napi) {
4601 for (i = 0; i < config->rx_ring_num; i++)
4602 s2io_chk_rx_buffers(sp, i);
4604 writeq(sp->general_int_mask, &bar0->general_int_mask);
4605 readl(&bar0->general_int_status);
4607 return IRQ_HANDLED;
4610 else if (!reason) {
4611 /* The interrupt was not raised by us */
4612 return IRQ_NONE;
4615 return IRQ_HANDLED;
4619 * s2io_updt_stats -
4621 static void s2io_updt_stats(struct s2io_nic *sp)
4623 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4624 u64 val64;
4625 int cnt = 0;
4627 if (is_s2io_card_up(sp)) {
4628 /* Apprx 30us on a 133 MHz bus */
4629 val64 = SET_UPDT_CLICKS(10) |
4630 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4631 writeq(val64, &bar0->stat_cfg);
4632 do {
4633 udelay(100);
4634 val64 = readq(&bar0->stat_cfg);
4635 if (!(val64 & BIT(0)))
4636 break;
4637 cnt++;
4638 if (cnt == 5)
4639 break; /* Updt failed */
4640 } while(1);
4645 * s2io_get_stats - Updates the device statistics structure.
4646 * @dev : pointer to the device structure.
4647 * Description:
4648 * This function updates the device statistics structure in the s2io_nic
4649 * structure and returns a pointer to the same.
4650 * Return value:
4651 * pointer to the updated net_device_stats structure.
4654 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4656 struct s2io_nic *sp = dev->priv;
4657 struct mac_info *mac_control;
4658 struct config_param *config;
4661 mac_control = &sp->mac_control;
4662 config = &sp->config;
4664 /* Configure Stats for immediate updt */
4665 s2io_updt_stats(sp);
4667 sp->stats.tx_packets =
4668 le32_to_cpu(mac_control->stats_info->tmac_frms);
4669 sp->stats.tx_errors =
4670 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4671 sp->stats.rx_errors =
4672 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4673 sp->stats.multicast =
4674 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4675 sp->stats.rx_length_errors =
4676 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4678 return (&sp->stats);
4682 * s2io_set_multicast - entry point for multicast address enable/disable.
4683 * @dev : pointer to the device structure
4684 * Description:
4685 * This function is a driver entry point which gets called by the kernel
4686 * whenever multicast addresses must be enabled/disabled. This also gets
4687 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4688 * determine, if multicast address must be enabled or if promiscuous mode
4689 * is to be disabled etc.
4690 * Return value:
4691 * void.
4694 static void s2io_set_multicast(struct net_device *dev)
4696 int i, j, prev_cnt;
4697 struct dev_mc_list *mclist;
4698 struct s2io_nic *sp = dev->priv;
4699 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4700 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4701 0xfeffffffffffULL;
4702 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4703 void __iomem *add;
4705 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4706 /* Enable all Multicast addresses */
4707 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4708 &bar0->rmac_addr_data0_mem);
4709 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4710 &bar0->rmac_addr_data1_mem);
4711 val64 = RMAC_ADDR_CMD_MEM_WE |
4712 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4713 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4714 writeq(val64, &bar0->rmac_addr_cmd_mem);
4715 /* Wait till command completes */
4716 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4717 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4718 S2IO_BIT_RESET);
4720 sp->m_cast_flg = 1;
4721 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4722 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4723 /* Disable all Multicast addresses */
4724 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4725 &bar0->rmac_addr_data0_mem);
4726 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4727 &bar0->rmac_addr_data1_mem);
4728 val64 = RMAC_ADDR_CMD_MEM_WE |
4729 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4730 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4731 writeq(val64, &bar0->rmac_addr_cmd_mem);
4732 /* Wait till command completes */
4733 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4734 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4735 S2IO_BIT_RESET);
4737 sp->m_cast_flg = 0;
4738 sp->all_multi_pos = 0;
4741 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4742 /* Put the NIC into promiscuous mode */
4743 add = &bar0->mac_cfg;
4744 val64 = readq(&bar0->mac_cfg);
4745 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4748 writel((u32) val64, add);
4749 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4750 writel((u32) (val64 >> 32), (add + 4));
4752 if (vlan_tag_strip != 1) {
4753 val64 = readq(&bar0->rx_pa_cfg);
4754 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4755 writeq(val64, &bar0->rx_pa_cfg);
4756 vlan_strip_flag = 0;
4759 val64 = readq(&bar0->mac_cfg);
4760 sp->promisc_flg = 1;
4761 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4762 dev->name);
4763 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4764 /* Remove the NIC from promiscuous mode */
4765 add = &bar0->mac_cfg;
4766 val64 = readq(&bar0->mac_cfg);
4767 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4769 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4770 writel((u32) val64, add);
4771 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4772 writel((u32) (val64 >> 32), (add + 4));
4774 if (vlan_tag_strip != 0) {
4775 val64 = readq(&bar0->rx_pa_cfg);
4776 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4777 writeq(val64, &bar0->rx_pa_cfg);
4778 vlan_strip_flag = 1;
4781 val64 = readq(&bar0->mac_cfg);
4782 sp->promisc_flg = 0;
4783 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4784 dev->name);
4787 /* Update individual M_CAST address list */
4788 if ((!sp->m_cast_flg) && dev->mc_count) {
4789 if (dev->mc_count >
4790 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4791 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4792 dev->name);
4793 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4794 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4795 return;
4798 prev_cnt = sp->mc_addr_count;
4799 sp->mc_addr_count = dev->mc_count;
4801 /* Clear out the previous list of Mc in the H/W. */
4802 for (i = 0; i < prev_cnt; i++) {
4803 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4804 &bar0->rmac_addr_data0_mem);
4805 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4806 &bar0->rmac_addr_data1_mem);
4807 val64 = RMAC_ADDR_CMD_MEM_WE |
4808 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4809 RMAC_ADDR_CMD_MEM_OFFSET
4810 (MAC_MC_ADDR_START_OFFSET + i);
4811 writeq(val64, &bar0->rmac_addr_cmd_mem);
4813 /* Wait for command completes */
4814 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4815 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4816 S2IO_BIT_RESET)) {
4817 DBG_PRINT(ERR_DBG, "%s: Adding ",
4818 dev->name);
4819 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4820 return;
4824 /* Create the new Rx filter list and update the same in H/W. */
4825 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4826 i++, mclist = mclist->next) {
4827 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4828 ETH_ALEN);
4829 mac_addr = 0;
4830 for (j = 0; j < ETH_ALEN; j++) {
4831 mac_addr |= mclist->dmi_addr[j];
4832 mac_addr <<= 8;
4834 mac_addr >>= 8;
4835 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4836 &bar0->rmac_addr_data0_mem);
4837 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4838 &bar0->rmac_addr_data1_mem);
4839 val64 = RMAC_ADDR_CMD_MEM_WE |
4840 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4841 RMAC_ADDR_CMD_MEM_OFFSET
4842 (i + MAC_MC_ADDR_START_OFFSET);
4843 writeq(val64, &bar0->rmac_addr_cmd_mem);
4845 /* Wait for command completes */
4846 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4847 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4848 S2IO_BIT_RESET)) {
4849 DBG_PRINT(ERR_DBG, "%s: Adding ",
4850 dev->name);
4851 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4852 return;
4858 /* add unicast MAC address to CAM */
4859 static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off)
4861 u64 val64;
4862 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4864 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
4865 &bar0->rmac_addr_data0_mem);
4867 val64 =
4868 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4869 RMAC_ADDR_CMD_MEM_OFFSET(off);
4870 writeq(val64, &bar0->rmac_addr_cmd_mem);
4872 /* Wait till command completes */
4873 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4874 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4875 S2IO_BIT_RESET)) {
4876 DBG_PRINT(INFO_DBG, "add_mac_addr failed\n");
4877 return FAILURE;
4879 return SUCCESS;
4883 * s2io_set_mac_addr driver entry point
4885 static int s2io_set_mac_addr(struct net_device *dev, void *p)
4887 struct sockaddr *addr = p;
4889 if (!is_valid_ether_addr(addr->sa_data))
4890 return -EINVAL;
4892 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4894 /* store the MAC address in CAM */
4895 return (do_s2io_prog_unicast(dev, dev->dev_addr));
4899 * do_s2io_prog_unicast - Programs the Xframe mac address
4900 * @dev : pointer to the device structure.
4901 * @addr: a uchar pointer to the new mac address which is to be set.
4902 * Description : This procedure will program the Xframe to receive
4903 * frames with new Mac Address
4904 * Return value: SUCCESS on success and an appropriate (-)ve integer
4905 * as defined in errno.h file on failure.
4907 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
4909 struct s2io_nic *sp = dev->priv;
4910 register u64 mac_addr = 0, perm_addr = 0;
4911 int i;
4914 * Set the new MAC address as the new unicast filter and reflect this
4915 * change on the device address registered with the OS. It will be
4916 * at offset 0.
4918 for (i = 0; i < ETH_ALEN; i++) {
4919 mac_addr <<= 8;
4920 mac_addr |= addr[i];
4921 perm_addr <<= 8;
4922 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
4925 /* check if the dev_addr is different than perm_addr */
4926 if (mac_addr == perm_addr)
4927 return SUCCESS;
4929 /* Update the internal structure with this new mac address */
4930 do_s2io_copy_mac_addr(sp, 0, mac_addr);
4931 return (do_s2io_add_unicast(sp, mac_addr, 0));
4935 * s2io_ethtool_sset - Sets different link parameters.
4936 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4937 * @info: pointer to the structure with parameters given by ethtool to set
4938 * link information.
4939 * Description:
4940 * The function sets different link parameters provided by the user onto
4941 * the NIC.
4942 * Return value:
4943 * 0 on success.
4946 static int s2io_ethtool_sset(struct net_device *dev,
4947 struct ethtool_cmd *info)
4949 struct s2io_nic *sp = dev->priv;
4950 if ((info->autoneg == AUTONEG_ENABLE) ||
4951 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4952 return -EINVAL;
4953 else {
4954 s2io_close(sp->dev);
4955 s2io_open(sp->dev);
4958 return 0;
4962 * s2io_ethtol_gset - Return link specific information.
4963 * @sp : private member of the device structure, pointer to the
4964 * s2io_nic structure.
4965 * @info : pointer to the structure with parameters given by ethtool
4966 * to return link information.
4967 * Description:
4968 * Returns link specific information like speed, duplex etc.. to ethtool.
4969 * Return value :
4970 * return 0 on success.
4973 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4975 struct s2io_nic *sp = dev->priv;
4976 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4977 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4978 info->port = PORT_FIBRE;
4980 /* info->transceiver */
4981 info->transceiver = XCVR_EXTERNAL;
4983 if (netif_carrier_ok(sp->dev)) {
4984 info->speed = 10000;
4985 info->duplex = DUPLEX_FULL;
4986 } else {
4987 info->speed = -1;
4988 info->duplex = -1;
4991 info->autoneg = AUTONEG_DISABLE;
4992 return 0;
4996 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4997 * @sp : private member of the device structure, which is a pointer to the
4998 * s2io_nic structure.
4999 * @info : pointer to the structure with parameters given by ethtool to
5000 * return driver information.
5001 * Description:
5002 * Returns driver specefic information like name, version etc.. to ethtool.
5003 * Return value:
5004 * void
5007 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5008 struct ethtool_drvinfo *info)
5010 struct s2io_nic *sp = dev->priv;
5012 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5013 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5014 strncpy(info->fw_version, "", sizeof(info->fw_version));
5015 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5016 info->regdump_len = XENA_REG_SPACE;
5017 info->eedump_len = XENA_EEPROM_SPACE;
5021 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5022 * @sp: private member of the device structure, which is a pointer to the
5023 * s2io_nic structure.
5024 * @regs : pointer to the structure with parameters given by ethtool for
5025 * dumping the registers.
5026 * @reg_space: The input argumnet into which all the registers are dumped.
5027 * Description:
5028 * Dumps the entire register space of xFrame NIC into the user given
5029 * buffer area.
5030 * Return value :
5031 * void .
5034 static void s2io_ethtool_gregs(struct net_device *dev,
5035 struct ethtool_regs *regs, void *space)
5037 int i;
5038 u64 reg;
5039 u8 *reg_space = (u8 *) space;
5040 struct s2io_nic *sp = dev->priv;
5042 regs->len = XENA_REG_SPACE;
5043 regs->version = sp->pdev->subsystem_device;
5045 for (i = 0; i < regs->len; i += 8) {
5046 reg = readq(sp->bar0 + i);
5047 memcpy((reg_space + i), &reg, 8);
5052 * s2io_phy_id - timer function that alternates adapter LED.
5053 * @data : address of the private member of the device structure, which
5054 * is a pointer to the s2io_nic structure, provided as an u32.
5055 * Description: This is actually the timer function that alternates the
5056 * adapter LED bit of the adapter control bit to set/reset every time on
5057 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5058 * once every second.
5060 static void s2io_phy_id(unsigned long data)
5062 struct s2io_nic *sp = (struct s2io_nic *) data;
5063 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5064 u64 val64 = 0;
5065 u16 subid;
5067 subid = sp->pdev->subsystem_device;
5068 if ((sp->device_type == XFRAME_II_DEVICE) ||
5069 ((subid & 0xFF) >= 0x07)) {
5070 val64 = readq(&bar0->gpio_control);
5071 val64 ^= GPIO_CTRL_GPIO_0;
5072 writeq(val64, &bar0->gpio_control);
5073 } else {
5074 val64 = readq(&bar0->adapter_control);
5075 val64 ^= ADAPTER_LED_ON;
5076 writeq(val64, &bar0->adapter_control);
5079 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5083 * s2io_ethtool_idnic - To physically identify the nic on the system.
5084 * @sp : private member of the device structure, which is a pointer to the
5085 * s2io_nic structure.
5086 * @id : pointer to the structure with identification parameters given by
5087 * ethtool.
5088 * Description: Used to physically identify the NIC on the system.
5089 * The Link LED will blink for a time specified by the user for
5090 * identification.
5091 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5092 * identification is possible only if it's link is up.
5093 * Return value:
5094 * int , returns 0 on success
5097 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5099 u64 val64 = 0, last_gpio_ctrl_val;
5100 struct s2io_nic *sp = dev->priv;
5101 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5102 u16 subid;
5104 subid = sp->pdev->subsystem_device;
5105 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5106 if ((sp->device_type == XFRAME_I_DEVICE) &&
5107 ((subid & 0xFF) < 0x07)) {
5108 val64 = readq(&bar0->adapter_control);
5109 if (!(val64 & ADAPTER_CNTL_EN)) {
5110 printk(KERN_ERR
5111 "Adapter Link down, cannot blink LED\n");
5112 return -EFAULT;
5115 if (sp->id_timer.function == NULL) {
5116 init_timer(&sp->id_timer);
5117 sp->id_timer.function = s2io_phy_id;
5118 sp->id_timer.data = (unsigned long) sp;
5120 mod_timer(&sp->id_timer, jiffies);
5121 if (data)
5122 msleep_interruptible(data * HZ);
5123 else
5124 msleep_interruptible(MAX_FLICKER_TIME);
5125 del_timer_sync(&sp->id_timer);
5127 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5128 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5129 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5132 return 0;
5135 static void s2io_ethtool_gringparam(struct net_device *dev,
5136 struct ethtool_ringparam *ering)
5138 struct s2io_nic *sp = dev->priv;
5139 int i,tx_desc_count=0,rx_desc_count=0;
5141 if (sp->rxd_mode == RXD_MODE_1)
5142 ering->rx_max_pending = MAX_RX_DESC_1;
5143 else if (sp->rxd_mode == RXD_MODE_3B)
5144 ering->rx_max_pending = MAX_RX_DESC_2;
5146 ering->tx_max_pending = MAX_TX_DESC;
5147 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5148 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5150 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5151 ering->tx_pending = tx_desc_count;
5152 rx_desc_count = 0;
5153 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5154 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5156 ering->rx_pending = rx_desc_count;
5158 ering->rx_mini_max_pending = 0;
5159 ering->rx_mini_pending = 0;
5160 if(sp->rxd_mode == RXD_MODE_1)
5161 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5162 else if (sp->rxd_mode == RXD_MODE_3B)
5163 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5164 ering->rx_jumbo_pending = rx_desc_count;
5168 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5169 * @sp : private member of the device structure, which is a pointer to the
5170 * s2io_nic structure.
5171 * @ep : pointer to the structure with pause parameters given by ethtool.
5172 * Description:
5173 * Returns the Pause frame generation and reception capability of the NIC.
5174 * Return value:
5175 * void
5177 static void s2io_ethtool_getpause_data(struct net_device *dev,
5178 struct ethtool_pauseparam *ep)
5180 u64 val64;
5181 struct s2io_nic *sp = dev->priv;
5182 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5184 val64 = readq(&bar0->rmac_pause_cfg);
5185 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5186 ep->tx_pause = TRUE;
5187 if (val64 & RMAC_PAUSE_RX_ENABLE)
5188 ep->rx_pause = TRUE;
5189 ep->autoneg = FALSE;
5193 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5194 * @sp : private member of the device structure, which is a pointer to the
5195 * s2io_nic structure.
5196 * @ep : pointer to the structure with pause parameters given by ethtool.
5197 * Description:
5198 * It can be used to set or reset Pause frame generation or reception
5199 * support of the NIC.
5200 * Return value:
5201 * int, returns 0 on Success
5204 static int s2io_ethtool_setpause_data(struct net_device *dev,
5205 struct ethtool_pauseparam *ep)
5207 u64 val64;
5208 struct s2io_nic *sp = dev->priv;
5209 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5211 val64 = readq(&bar0->rmac_pause_cfg);
5212 if (ep->tx_pause)
5213 val64 |= RMAC_PAUSE_GEN_ENABLE;
5214 else
5215 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5216 if (ep->rx_pause)
5217 val64 |= RMAC_PAUSE_RX_ENABLE;
5218 else
5219 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5220 writeq(val64, &bar0->rmac_pause_cfg);
5221 return 0;
5225 * read_eeprom - reads 4 bytes of data from user given offset.
5226 * @sp : private member of the device structure, which is a pointer to the
5227 * s2io_nic structure.
5228 * @off : offset at which the data must be written
5229 * @data : Its an output parameter where the data read at the given
5230 * offset is stored.
5231 * Description:
5232 * Will read 4 bytes of data from the user given offset and return the
5233 * read data.
5234 * NOTE: Will allow to read only part of the EEPROM visible through the
5235 * I2C bus.
5236 * Return value:
5237 * -1 on failure and 0 on success.
5240 #define S2IO_DEV_ID 5
5241 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5243 int ret = -1;
5244 u32 exit_cnt = 0;
5245 u64 val64;
5246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5248 if (sp->device_type == XFRAME_I_DEVICE) {
5249 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5250 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5251 I2C_CONTROL_CNTL_START;
5252 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5254 while (exit_cnt < 5) {
5255 val64 = readq(&bar0->i2c_control);
5256 if (I2C_CONTROL_CNTL_END(val64)) {
5257 *data = I2C_CONTROL_GET_DATA(val64);
5258 ret = 0;
5259 break;
5261 msleep(50);
5262 exit_cnt++;
5266 if (sp->device_type == XFRAME_II_DEVICE) {
5267 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5268 SPI_CONTROL_BYTECNT(0x3) |
5269 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5270 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5271 val64 |= SPI_CONTROL_REQ;
5272 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5273 while (exit_cnt < 5) {
5274 val64 = readq(&bar0->spi_control);
5275 if (val64 & SPI_CONTROL_NACK) {
5276 ret = 1;
5277 break;
5278 } else if (val64 & SPI_CONTROL_DONE) {
5279 *data = readq(&bar0->spi_data);
5280 *data &= 0xffffff;
5281 ret = 0;
5282 break;
5284 msleep(50);
5285 exit_cnt++;
5288 return ret;
5292 * write_eeprom - actually writes the relevant part of the data value.
5293 * @sp : private member of the device structure, which is a pointer to the
5294 * s2io_nic structure.
5295 * @off : offset at which the data must be written
5296 * @data : The data that is to be written
5297 * @cnt : Number of bytes of the data that are actually to be written into
5298 * the Eeprom. (max of 3)
5299 * Description:
5300 * Actually writes the relevant part of the data value into the Eeprom
5301 * through the I2C bus.
5302 * Return value:
5303 * 0 on success, -1 on failure.
5306 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5308 int exit_cnt = 0, ret = -1;
5309 u64 val64;
5310 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5312 if (sp->device_type == XFRAME_I_DEVICE) {
5313 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5314 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5315 I2C_CONTROL_CNTL_START;
5316 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5318 while (exit_cnt < 5) {
5319 val64 = readq(&bar0->i2c_control);
5320 if (I2C_CONTROL_CNTL_END(val64)) {
5321 if (!(val64 & I2C_CONTROL_NACK))
5322 ret = 0;
5323 break;
5325 msleep(50);
5326 exit_cnt++;
5330 if (sp->device_type == XFRAME_II_DEVICE) {
5331 int write_cnt = (cnt == 8) ? 0 : cnt;
5332 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5334 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5335 SPI_CONTROL_BYTECNT(write_cnt) |
5336 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5337 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5338 val64 |= SPI_CONTROL_REQ;
5339 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5340 while (exit_cnt < 5) {
5341 val64 = readq(&bar0->spi_control);
5342 if (val64 & SPI_CONTROL_NACK) {
5343 ret = 1;
5344 break;
5345 } else if (val64 & SPI_CONTROL_DONE) {
5346 ret = 0;
5347 break;
5349 msleep(50);
5350 exit_cnt++;
5353 return ret;
5355 static void s2io_vpd_read(struct s2io_nic *nic)
5357 u8 *vpd_data;
5358 u8 data;
5359 int i=0, cnt, fail = 0;
5360 int vpd_addr = 0x80;
5362 if (nic->device_type == XFRAME_II_DEVICE) {
5363 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5364 vpd_addr = 0x80;
5366 else {
5367 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5368 vpd_addr = 0x50;
5370 strcpy(nic->serial_num, "NOT AVAILABLE");
5372 vpd_data = kmalloc(256, GFP_KERNEL);
5373 if (!vpd_data) {
5374 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5375 return;
5377 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5379 for (i = 0; i < 256; i +=4 ) {
5380 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5381 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5382 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5383 for (cnt = 0; cnt <5; cnt++) {
5384 msleep(2);
5385 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5386 if (data == 0x80)
5387 break;
5389 if (cnt >= 5) {
5390 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5391 fail = 1;
5392 break;
5394 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5395 (u32 *)&vpd_data[i]);
5398 if(!fail) {
5399 /* read serial number of adapter */
5400 for (cnt = 0; cnt < 256; cnt++) {
5401 if ((vpd_data[cnt] == 'S') &&
5402 (vpd_data[cnt+1] == 'N') &&
5403 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5404 memset(nic->serial_num, 0, VPD_STRING_LEN);
5405 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5406 vpd_data[cnt+2]);
5407 break;
5412 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5413 memset(nic->product_name, 0, vpd_data[1]);
5414 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5416 kfree(vpd_data);
5417 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5421 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5422 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5423 * @eeprom : pointer to the user level structure provided by ethtool,
5424 * containing all relevant information.
5425 * @data_buf : user defined value to be written into Eeprom.
5426 * Description: Reads the values stored in the Eeprom at given offset
5427 * for a given length. Stores these values int the input argument data
5428 * buffer 'data_buf' and returns these to the caller (ethtool.)
5429 * Return value:
5430 * int 0 on success
5433 static int s2io_ethtool_geeprom(struct net_device *dev,
5434 struct ethtool_eeprom *eeprom, u8 * data_buf)
5436 u32 i, valid;
5437 u64 data;
5438 struct s2io_nic *sp = dev->priv;
5440 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5442 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5443 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5445 for (i = 0; i < eeprom->len; i += 4) {
5446 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5447 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5448 return -EFAULT;
5450 valid = INV(data);
5451 memcpy((data_buf + i), &valid, 4);
5453 return 0;
5457 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5458 * @sp : private member of the device structure, which is a pointer to the
5459 * s2io_nic structure.
5460 * @eeprom : pointer to the user level structure provided by ethtool,
5461 * containing all relevant information.
5462 * @data_buf ; user defined value to be written into Eeprom.
5463 * Description:
5464 * Tries to write the user provided value in the Eeprom, at the offset
5465 * given by the user.
5466 * Return value:
5467 * 0 on success, -EFAULT on failure.
5470 static int s2io_ethtool_seeprom(struct net_device *dev,
5471 struct ethtool_eeprom *eeprom,
5472 u8 * data_buf)
5474 int len = eeprom->len, cnt = 0;
5475 u64 valid = 0, data;
5476 struct s2io_nic *sp = dev->priv;
5478 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5479 DBG_PRINT(ERR_DBG,
5480 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5481 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5482 eeprom->magic);
5483 return -EFAULT;
5486 while (len) {
5487 data = (u32) data_buf[cnt] & 0x000000FF;
5488 if (data) {
5489 valid = (u32) (data << 24);
5490 } else
5491 valid = data;
5493 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5494 DBG_PRINT(ERR_DBG,
5495 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5496 DBG_PRINT(ERR_DBG,
5497 "write into the specified offset\n");
5498 return -EFAULT;
5500 cnt++;
5501 len--;
5504 return 0;
5508 * s2io_register_test - reads and writes into all clock domains.
5509 * @sp : private member of the device structure, which is a pointer to the
5510 * s2io_nic structure.
5511 * @data : variable that returns the result of each of the test conducted b
5512 * by the driver.
5513 * Description:
5514 * Read and write into all clock domains. The NIC has 3 clock domains,
5515 * see that registers in all the three regions are accessible.
5516 * Return value:
5517 * 0 on success.
5520 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5522 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5523 u64 val64 = 0, exp_val;
5524 int fail = 0;
5526 val64 = readq(&bar0->pif_rd_swapper_fb);
5527 if (val64 != 0x123456789abcdefULL) {
5528 fail = 1;
5529 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5532 val64 = readq(&bar0->rmac_pause_cfg);
5533 if (val64 != 0xc000ffff00000000ULL) {
5534 fail = 1;
5535 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5538 val64 = readq(&bar0->rx_queue_cfg);
5539 if (sp->device_type == XFRAME_II_DEVICE)
5540 exp_val = 0x0404040404040404ULL;
5541 else
5542 exp_val = 0x0808080808080808ULL;
5543 if (val64 != exp_val) {
5544 fail = 1;
5545 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5548 val64 = readq(&bar0->xgxs_efifo_cfg);
5549 if (val64 != 0x000000001923141EULL) {
5550 fail = 1;
5551 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5554 val64 = 0x5A5A5A5A5A5A5A5AULL;
5555 writeq(val64, &bar0->xmsi_data);
5556 val64 = readq(&bar0->xmsi_data);
5557 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5558 fail = 1;
5559 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5562 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5563 writeq(val64, &bar0->xmsi_data);
5564 val64 = readq(&bar0->xmsi_data);
5565 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5566 fail = 1;
5567 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5570 *data = fail;
5571 return fail;
5575 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5576 * @sp : private member of the device structure, which is a pointer to the
5577 * s2io_nic structure.
5578 * @data:variable that returns the result of each of the test conducted by
5579 * the driver.
5580 * Description:
5581 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5582 * register.
5583 * Return value:
5584 * 0 on success.
5587 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5589 int fail = 0;
5590 u64 ret_data, org_4F0, org_7F0;
5591 u8 saved_4F0 = 0, saved_7F0 = 0;
5592 struct net_device *dev = sp->dev;
5594 /* Test Write Error at offset 0 */
5595 /* Note that SPI interface allows write access to all areas
5596 * of EEPROM. Hence doing all negative testing only for Xframe I.
5598 if (sp->device_type == XFRAME_I_DEVICE)
5599 if (!write_eeprom(sp, 0, 0, 3))
5600 fail = 1;
5602 /* Save current values at offsets 0x4F0 and 0x7F0 */
5603 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5604 saved_4F0 = 1;
5605 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5606 saved_7F0 = 1;
5608 /* Test Write at offset 4f0 */
5609 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5610 fail = 1;
5611 if (read_eeprom(sp, 0x4F0, &ret_data))
5612 fail = 1;
5614 if (ret_data != 0x012345) {
5615 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5616 "Data written %llx Data read %llx\n",
5617 dev->name, (unsigned long long)0x12345,
5618 (unsigned long long)ret_data);
5619 fail = 1;
5622 /* Reset the EEPROM data go FFFF */
5623 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5625 /* Test Write Request Error at offset 0x7c */
5626 if (sp->device_type == XFRAME_I_DEVICE)
5627 if (!write_eeprom(sp, 0x07C, 0, 3))
5628 fail = 1;
5630 /* Test Write Request at offset 0x7f0 */
5631 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5632 fail = 1;
5633 if (read_eeprom(sp, 0x7F0, &ret_data))
5634 fail = 1;
5636 if (ret_data != 0x012345) {
5637 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5638 "Data written %llx Data read %llx\n",
5639 dev->name, (unsigned long long)0x12345,
5640 (unsigned long long)ret_data);
5641 fail = 1;
5644 /* Reset the EEPROM data go FFFF */
5645 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5647 if (sp->device_type == XFRAME_I_DEVICE) {
5648 /* Test Write Error at offset 0x80 */
5649 if (!write_eeprom(sp, 0x080, 0, 3))
5650 fail = 1;
5652 /* Test Write Error at offset 0xfc */
5653 if (!write_eeprom(sp, 0x0FC, 0, 3))
5654 fail = 1;
5656 /* Test Write Error at offset 0x100 */
5657 if (!write_eeprom(sp, 0x100, 0, 3))
5658 fail = 1;
5660 /* Test Write Error at offset 4ec */
5661 if (!write_eeprom(sp, 0x4EC, 0, 3))
5662 fail = 1;
5665 /* Restore values at offsets 0x4F0 and 0x7F0 */
5666 if (saved_4F0)
5667 write_eeprom(sp, 0x4F0, org_4F0, 3);
5668 if (saved_7F0)
5669 write_eeprom(sp, 0x7F0, org_7F0, 3);
5671 *data = fail;
5672 return fail;
5676 * s2io_bist_test - invokes the MemBist test of the card .
5677 * @sp : private member of the device structure, which is a pointer to the
5678 * s2io_nic structure.
5679 * @data:variable that returns the result of each of the test conducted by
5680 * the driver.
5681 * Description:
5682 * This invokes the MemBist test of the card. We give around
5683 * 2 secs time for the Test to complete. If it's still not complete
5684 * within this peiod, we consider that the test failed.
5685 * Return value:
5686 * 0 on success and -1 on failure.
5689 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5691 u8 bist = 0;
5692 int cnt = 0, ret = -1;
5694 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5695 bist |= PCI_BIST_START;
5696 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5698 while (cnt < 20) {
5699 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5700 if (!(bist & PCI_BIST_START)) {
5701 *data = (bist & PCI_BIST_CODE_MASK);
5702 ret = 0;
5703 break;
5705 msleep(100);
5706 cnt++;
5709 return ret;
5713 * s2io-link_test - verifies the link state of the nic
5714 * @sp ; private member of the device structure, which is a pointer to the
5715 * s2io_nic structure.
5716 * @data: variable that returns the result of each of the test conducted by
5717 * the driver.
5718 * Description:
5719 * The function verifies the link state of the NIC and updates the input
5720 * argument 'data' appropriately.
5721 * Return value:
5722 * 0 on success.
5725 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5727 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5728 u64 val64;
5730 val64 = readq(&bar0->adapter_status);
5731 if(!(LINK_IS_UP(val64)))
5732 *data = 1;
5733 else
5734 *data = 0;
5736 return *data;
5740 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5741 * @sp - private member of the device structure, which is a pointer to the
5742 * s2io_nic structure.
5743 * @data - variable that returns the result of each of the test
5744 * conducted by the driver.
5745 * Description:
5746 * This is one of the offline test that tests the read and write
5747 * access to the RldRam chip on the NIC.
5748 * Return value:
5749 * 0 on success.
5752 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5754 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5755 u64 val64;
5756 int cnt, iteration = 0, test_fail = 0;
5758 val64 = readq(&bar0->adapter_control);
5759 val64 &= ~ADAPTER_ECC_EN;
5760 writeq(val64, &bar0->adapter_control);
5762 val64 = readq(&bar0->mc_rldram_test_ctrl);
5763 val64 |= MC_RLDRAM_TEST_MODE;
5764 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5766 val64 = readq(&bar0->mc_rldram_mrs);
5767 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5768 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5770 val64 |= MC_RLDRAM_MRS_ENABLE;
5771 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5773 while (iteration < 2) {
5774 val64 = 0x55555555aaaa0000ULL;
5775 if (iteration == 1) {
5776 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5778 writeq(val64, &bar0->mc_rldram_test_d0);
5780 val64 = 0xaaaa5a5555550000ULL;
5781 if (iteration == 1) {
5782 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5784 writeq(val64, &bar0->mc_rldram_test_d1);
5786 val64 = 0x55aaaaaaaa5a0000ULL;
5787 if (iteration == 1) {
5788 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5790 writeq(val64, &bar0->mc_rldram_test_d2);
5792 val64 = (u64) (0x0000003ffffe0100ULL);
5793 writeq(val64, &bar0->mc_rldram_test_add);
5795 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5796 MC_RLDRAM_TEST_GO;
5797 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5799 for (cnt = 0; cnt < 5; cnt++) {
5800 val64 = readq(&bar0->mc_rldram_test_ctrl);
5801 if (val64 & MC_RLDRAM_TEST_DONE)
5802 break;
5803 msleep(200);
5806 if (cnt == 5)
5807 break;
5809 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5810 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5812 for (cnt = 0; cnt < 5; cnt++) {
5813 val64 = readq(&bar0->mc_rldram_test_ctrl);
5814 if (val64 & MC_RLDRAM_TEST_DONE)
5815 break;
5816 msleep(500);
5819 if (cnt == 5)
5820 break;
5822 val64 = readq(&bar0->mc_rldram_test_ctrl);
5823 if (!(val64 & MC_RLDRAM_TEST_PASS))
5824 test_fail = 1;
5826 iteration++;
5829 *data = test_fail;
5831 /* Bring the adapter out of test mode */
5832 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5834 return test_fail;
5838 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5839 * @sp : private member of the device structure, which is a pointer to the
5840 * s2io_nic structure.
5841 * @ethtest : pointer to a ethtool command specific structure that will be
5842 * returned to the user.
5843 * @data : variable that returns the result of each of the test
5844 * conducted by the driver.
5845 * Description:
5846 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5847 * the health of the card.
5848 * Return value:
5849 * void
5852 static void s2io_ethtool_test(struct net_device *dev,
5853 struct ethtool_test *ethtest,
5854 uint64_t * data)
5856 struct s2io_nic *sp = dev->priv;
5857 int orig_state = netif_running(sp->dev);
5859 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5860 /* Offline Tests. */
5861 if (orig_state)
5862 s2io_close(sp->dev);
5864 if (s2io_register_test(sp, &data[0]))
5865 ethtest->flags |= ETH_TEST_FL_FAILED;
5867 s2io_reset(sp);
5869 if (s2io_rldram_test(sp, &data[3]))
5870 ethtest->flags |= ETH_TEST_FL_FAILED;
5872 s2io_reset(sp);
5874 if (s2io_eeprom_test(sp, &data[1]))
5875 ethtest->flags |= ETH_TEST_FL_FAILED;
5877 if (s2io_bist_test(sp, &data[4]))
5878 ethtest->flags |= ETH_TEST_FL_FAILED;
5880 if (orig_state)
5881 s2io_open(sp->dev);
5883 data[2] = 0;
5884 } else {
5885 /* Online Tests. */
5886 if (!orig_state) {
5887 DBG_PRINT(ERR_DBG,
5888 "%s: is not up, cannot run test\n",
5889 dev->name);
5890 data[0] = -1;
5891 data[1] = -1;
5892 data[2] = -1;
5893 data[3] = -1;
5894 data[4] = -1;
5897 if (s2io_link_test(sp, &data[2]))
5898 ethtest->flags |= ETH_TEST_FL_FAILED;
5900 data[0] = 0;
5901 data[1] = 0;
5902 data[3] = 0;
5903 data[4] = 0;
5907 static void s2io_get_ethtool_stats(struct net_device *dev,
5908 struct ethtool_stats *estats,
5909 u64 * tmp_stats)
5911 int i = 0, k;
5912 struct s2io_nic *sp = dev->priv;
5913 struct stat_block *stat_info = sp->mac_control.stats_info;
5915 s2io_updt_stats(sp);
5916 tmp_stats[i++] =
5917 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5918 le32_to_cpu(stat_info->tmac_frms);
5919 tmp_stats[i++] =
5920 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5921 le32_to_cpu(stat_info->tmac_data_octets);
5922 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5923 tmp_stats[i++] =
5924 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5925 le32_to_cpu(stat_info->tmac_mcst_frms);
5926 tmp_stats[i++] =
5927 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5928 le32_to_cpu(stat_info->tmac_bcst_frms);
5929 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5930 tmp_stats[i++] =
5931 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5932 le32_to_cpu(stat_info->tmac_ttl_octets);
5933 tmp_stats[i++] =
5934 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5935 le32_to_cpu(stat_info->tmac_ucst_frms);
5936 tmp_stats[i++] =
5937 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5938 le32_to_cpu(stat_info->tmac_nucst_frms);
5939 tmp_stats[i++] =
5940 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5941 le32_to_cpu(stat_info->tmac_any_err_frms);
5942 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5943 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5944 tmp_stats[i++] =
5945 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5946 le32_to_cpu(stat_info->tmac_vld_ip);
5947 tmp_stats[i++] =
5948 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5949 le32_to_cpu(stat_info->tmac_drop_ip);
5950 tmp_stats[i++] =
5951 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5952 le32_to_cpu(stat_info->tmac_icmp);
5953 tmp_stats[i++] =
5954 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5955 le32_to_cpu(stat_info->tmac_rst_tcp);
5956 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5957 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5958 le32_to_cpu(stat_info->tmac_udp);
5959 tmp_stats[i++] =
5960 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5961 le32_to_cpu(stat_info->rmac_vld_frms);
5962 tmp_stats[i++] =
5963 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5964 le32_to_cpu(stat_info->rmac_data_octets);
5965 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5966 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5967 tmp_stats[i++] =
5968 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5969 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5970 tmp_stats[i++] =
5971 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5972 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5973 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5974 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5975 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5976 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5977 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5978 tmp_stats[i++] =
5979 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5980 le32_to_cpu(stat_info->rmac_ttl_octets);
5981 tmp_stats[i++] =
5982 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5983 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5984 tmp_stats[i++] =
5985 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5986 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5987 tmp_stats[i++] =
5988 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5989 le32_to_cpu(stat_info->rmac_discarded_frms);
5990 tmp_stats[i++] =
5991 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5992 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5993 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5994 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5995 tmp_stats[i++] =
5996 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5997 le32_to_cpu(stat_info->rmac_usized_frms);
5998 tmp_stats[i++] =
5999 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6000 le32_to_cpu(stat_info->rmac_osized_frms);
6001 tmp_stats[i++] =
6002 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6003 le32_to_cpu(stat_info->rmac_frag_frms);
6004 tmp_stats[i++] =
6005 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6006 le32_to_cpu(stat_info->rmac_jabber_frms);
6007 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6008 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6009 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6010 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6011 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6012 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6013 tmp_stats[i++] =
6014 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6015 le32_to_cpu(stat_info->rmac_ip);
6016 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6017 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6018 tmp_stats[i++] =
6019 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6020 le32_to_cpu(stat_info->rmac_drop_ip);
6021 tmp_stats[i++] =
6022 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6023 le32_to_cpu(stat_info->rmac_icmp);
6024 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6025 tmp_stats[i++] =
6026 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6027 le32_to_cpu(stat_info->rmac_udp);
6028 tmp_stats[i++] =
6029 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6030 le32_to_cpu(stat_info->rmac_err_drp_udp);
6031 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6032 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6033 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6034 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6035 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6036 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6037 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6038 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6039 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6040 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6041 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6042 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6043 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6044 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6045 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6046 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6047 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6048 tmp_stats[i++] =
6049 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6050 le32_to_cpu(stat_info->rmac_pause_cnt);
6051 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6052 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6053 tmp_stats[i++] =
6054 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6055 le32_to_cpu(stat_info->rmac_accepted_ip);
6056 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6057 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6058 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6059 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6060 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6061 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6062 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6063 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6064 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6065 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6066 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6067 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6068 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6069 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6070 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6071 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6072 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6073 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6074 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6076 /* Enhanced statistics exist only for Hercules */
6077 if(sp->device_type == XFRAME_II_DEVICE) {
6078 tmp_stats[i++] =
6079 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6080 tmp_stats[i++] =
6081 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6082 tmp_stats[i++] =
6083 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6084 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6085 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6086 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6087 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6088 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6089 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6090 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6091 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6092 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6093 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6094 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6095 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6096 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6099 tmp_stats[i++] = 0;
6100 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6101 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6102 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6103 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6104 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6105 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6106 for (k = 0; k < MAX_RX_RINGS; k++)
6107 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6108 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6109 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6110 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6111 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6112 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6113 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6114 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6115 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6116 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6117 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6118 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6119 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6120 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6121 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6122 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6123 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6124 if (stat_info->sw_stat.num_aggregations) {
6125 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6126 int count = 0;
6128 * Since 64-bit divide does not work on all platforms,
6129 * do repeated subtraction.
6131 while (tmp >= stat_info->sw_stat.num_aggregations) {
6132 tmp -= stat_info->sw_stat.num_aggregations;
6133 count++;
6135 tmp_stats[i++] = count;
6137 else
6138 tmp_stats[i++] = 0;
6139 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6140 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6141 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6142 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6143 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6144 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6145 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6146 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6147 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6149 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6150 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6151 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6152 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6153 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6155 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6156 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6157 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6158 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6159 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6160 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6161 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6162 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6163 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6164 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6165 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6166 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6167 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6168 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6169 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6170 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6171 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6172 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6173 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6174 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6175 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6176 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6177 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6178 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6179 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6180 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6183 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6185 return (XENA_REG_SPACE);
6189 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6191 struct s2io_nic *sp = dev->priv;
6193 return (sp->rx_csum);
6196 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6198 struct s2io_nic *sp = dev->priv;
6200 if (data)
6201 sp->rx_csum = 1;
6202 else
6203 sp->rx_csum = 0;
6205 return 0;
6208 static int s2io_get_eeprom_len(struct net_device *dev)
6210 return (XENA_EEPROM_SPACE);
6213 static int s2io_get_sset_count(struct net_device *dev, int sset)
6215 struct s2io_nic *sp = dev->priv;
6217 switch (sset) {
6218 case ETH_SS_TEST:
6219 return S2IO_TEST_LEN;
6220 case ETH_SS_STATS:
6221 switch(sp->device_type) {
6222 case XFRAME_I_DEVICE:
6223 return XFRAME_I_STAT_LEN;
6224 case XFRAME_II_DEVICE:
6225 return XFRAME_II_STAT_LEN;
6226 default:
6227 return 0;
6229 default:
6230 return -EOPNOTSUPP;
6234 static void s2io_ethtool_get_strings(struct net_device *dev,
6235 u32 stringset, u8 * data)
6237 int stat_size = 0;
6238 struct s2io_nic *sp = dev->priv;
6240 switch (stringset) {
6241 case ETH_SS_TEST:
6242 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6243 break;
6244 case ETH_SS_STATS:
6245 stat_size = sizeof(ethtool_xena_stats_keys);
6246 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6247 if(sp->device_type == XFRAME_II_DEVICE) {
6248 memcpy(data + stat_size,
6249 &ethtool_enhanced_stats_keys,
6250 sizeof(ethtool_enhanced_stats_keys));
6251 stat_size += sizeof(ethtool_enhanced_stats_keys);
6254 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6255 sizeof(ethtool_driver_stats_keys));
6259 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6261 if (data)
6262 dev->features |= NETIF_F_IP_CSUM;
6263 else
6264 dev->features &= ~NETIF_F_IP_CSUM;
6266 return 0;
6269 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6271 return (dev->features & NETIF_F_TSO) != 0;
6273 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6275 if (data)
6276 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6277 else
6278 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6280 return 0;
6283 static const struct ethtool_ops netdev_ethtool_ops = {
6284 .get_settings = s2io_ethtool_gset,
6285 .set_settings = s2io_ethtool_sset,
6286 .get_drvinfo = s2io_ethtool_gdrvinfo,
6287 .get_regs_len = s2io_ethtool_get_regs_len,
6288 .get_regs = s2io_ethtool_gregs,
6289 .get_link = ethtool_op_get_link,
6290 .get_eeprom_len = s2io_get_eeprom_len,
6291 .get_eeprom = s2io_ethtool_geeprom,
6292 .set_eeprom = s2io_ethtool_seeprom,
6293 .get_ringparam = s2io_ethtool_gringparam,
6294 .get_pauseparam = s2io_ethtool_getpause_data,
6295 .set_pauseparam = s2io_ethtool_setpause_data,
6296 .get_rx_csum = s2io_ethtool_get_rx_csum,
6297 .set_rx_csum = s2io_ethtool_set_rx_csum,
6298 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6299 .set_sg = ethtool_op_set_sg,
6300 .get_tso = s2io_ethtool_op_get_tso,
6301 .set_tso = s2io_ethtool_op_set_tso,
6302 .set_ufo = ethtool_op_set_ufo,
6303 .self_test = s2io_ethtool_test,
6304 .get_strings = s2io_ethtool_get_strings,
6305 .phys_id = s2io_ethtool_idnic,
6306 .get_ethtool_stats = s2io_get_ethtool_stats,
6307 .get_sset_count = s2io_get_sset_count,
6311 * s2io_ioctl - Entry point for the Ioctl
6312 * @dev : Device pointer.
6313 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6314 * a proprietary structure used to pass information to the driver.
6315 * @cmd : This is used to distinguish between the different commands that
6316 * can be passed to the IOCTL functions.
6317 * Description:
6318 * Currently there are no special functionality supported in IOCTL, hence
6319 * function always return EOPNOTSUPPORTED
6322 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6324 return -EOPNOTSUPP;
6328 * s2io_change_mtu - entry point to change MTU size for the device.
6329 * @dev : device pointer.
6330 * @new_mtu : the new MTU size for the device.
6331 * Description: A driver entry point to change MTU size for the device.
6332 * Before changing the MTU the device must be stopped.
6333 * Return value:
6334 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6335 * file on failure.
6338 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6340 struct s2io_nic *sp = dev->priv;
6342 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6343 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6344 dev->name);
6345 return -EPERM;
6348 dev->mtu = new_mtu;
6349 if (netif_running(dev)) {
6350 s2io_card_down(sp);
6351 netif_stop_queue(dev);
6352 if (s2io_card_up(sp)) {
6353 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6354 __FUNCTION__);
6356 if (netif_queue_stopped(dev))
6357 netif_wake_queue(dev);
6358 } else { /* Device is down */
6359 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6360 u64 val64 = new_mtu;
6362 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6365 return 0;
6369 * s2io_tasklet - Bottom half of the ISR.
6370 * @dev_adr : address of the device structure in dma_addr_t format.
6371 * Description:
6372 * This is the tasklet or the bottom half of the ISR. This is
6373 * an extension of the ISR which is scheduled by the scheduler to be run
6374 * when the load on the CPU is low. All low priority tasks of the ISR can
6375 * be pushed into the tasklet. For now the tasklet is used only to
6376 * replenish the Rx buffers in the Rx buffer descriptors.
6377 * Return value:
6378 * void.
6381 static void s2io_tasklet(unsigned long dev_addr)
6383 struct net_device *dev = (struct net_device *) dev_addr;
6384 struct s2io_nic *sp = dev->priv;
6385 int i, ret;
6386 struct mac_info *mac_control;
6387 struct config_param *config;
6389 mac_control = &sp->mac_control;
6390 config = &sp->config;
6392 if (!TASKLET_IN_USE) {
6393 for (i = 0; i < config->rx_ring_num; i++) {
6394 ret = fill_rx_buffers(sp, i);
6395 if (ret == -ENOMEM) {
6396 DBG_PRINT(INFO_DBG, "%s: Out of ",
6397 dev->name);
6398 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6399 break;
6400 } else if (ret == -EFILL) {
6401 DBG_PRINT(INFO_DBG,
6402 "%s: Rx Ring %d is full\n",
6403 dev->name, i);
6404 break;
6407 clear_bit(0, (&sp->tasklet_status));
6412 * s2io_set_link - Set the LInk status
6413 * @data: long pointer to device private structue
6414 * Description: Sets the link status for the adapter
6417 static void s2io_set_link(struct work_struct *work)
6419 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6420 struct net_device *dev = nic->dev;
6421 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6422 register u64 val64;
6423 u16 subid;
6425 rtnl_lock();
6427 if (!netif_running(dev))
6428 goto out_unlock;
6430 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6431 /* The card is being reset, no point doing anything */
6432 goto out_unlock;
6435 subid = nic->pdev->subsystem_device;
6436 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6438 * Allow a small delay for the NICs self initiated
6439 * cleanup to complete.
6441 msleep(100);
6444 val64 = readq(&bar0->adapter_status);
6445 if (LINK_IS_UP(val64)) {
6446 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6447 if (verify_xena_quiescence(nic)) {
6448 val64 = readq(&bar0->adapter_control);
6449 val64 |= ADAPTER_CNTL_EN;
6450 writeq(val64, &bar0->adapter_control);
6451 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6452 nic->device_type, subid)) {
6453 val64 = readq(&bar0->gpio_control);
6454 val64 |= GPIO_CTRL_GPIO_0;
6455 writeq(val64, &bar0->gpio_control);
6456 val64 = readq(&bar0->gpio_control);
6457 } else {
6458 val64 |= ADAPTER_LED_ON;
6459 writeq(val64, &bar0->adapter_control);
6461 nic->device_enabled_once = TRUE;
6462 } else {
6463 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6464 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6465 netif_stop_queue(dev);
6468 val64 = readq(&bar0->adapter_control);
6469 val64 |= ADAPTER_LED_ON;
6470 writeq(val64, &bar0->adapter_control);
6471 s2io_link(nic, LINK_UP);
6472 } else {
6473 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6474 subid)) {
6475 val64 = readq(&bar0->gpio_control);
6476 val64 &= ~GPIO_CTRL_GPIO_0;
6477 writeq(val64, &bar0->gpio_control);
6478 val64 = readq(&bar0->gpio_control);
6480 /* turn off LED */
6481 val64 = readq(&bar0->adapter_control);
6482 val64 = val64 &(~ADAPTER_LED_ON);
6483 writeq(val64, &bar0->adapter_control);
6484 s2io_link(nic, LINK_DOWN);
6486 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6488 out_unlock:
6489 rtnl_unlock();
6492 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6493 struct buffAdd *ba,
6494 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6495 u64 *temp2, int size)
6497 struct net_device *dev = sp->dev;
6498 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6500 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6501 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6502 /* allocate skb */
6503 if (*skb) {
6504 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6506 * As Rx frame are not going to be processed,
6507 * using same mapped address for the Rxd
6508 * buffer pointer
6510 rxdp1->Buffer0_ptr = *temp0;
6511 } else {
6512 *skb = dev_alloc_skb(size);
6513 if (!(*skb)) {
6514 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6515 DBG_PRINT(INFO_DBG, "memory to allocate ");
6516 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6517 sp->mac_control.stats_info->sw_stat. \
6518 mem_alloc_fail_cnt++;
6519 return -ENOMEM ;
6521 sp->mac_control.stats_info->sw_stat.mem_allocated
6522 += (*skb)->truesize;
6523 /* storing the mapped addr in a temp variable
6524 * such it will be used for next rxd whose
6525 * Host Control is NULL
6527 rxdp1->Buffer0_ptr = *temp0 =
6528 pci_map_single( sp->pdev, (*skb)->data,
6529 size - NET_IP_ALIGN,
6530 PCI_DMA_FROMDEVICE);
6531 if( (rxdp1->Buffer0_ptr == 0) ||
6532 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6533 goto memalloc_failed;
6535 rxdp->Host_Control = (unsigned long) (*skb);
6537 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6538 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6539 /* Two buffer Mode */
6540 if (*skb) {
6541 rxdp3->Buffer2_ptr = *temp2;
6542 rxdp3->Buffer0_ptr = *temp0;
6543 rxdp3->Buffer1_ptr = *temp1;
6544 } else {
6545 *skb = dev_alloc_skb(size);
6546 if (!(*skb)) {
6547 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6548 DBG_PRINT(INFO_DBG, "memory to allocate ");
6549 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6550 sp->mac_control.stats_info->sw_stat. \
6551 mem_alloc_fail_cnt++;
6552 return -ENOMEM;
6554 sp->mac_control.stats_info->sw_stat.mem_allocated
6555 += (*skb)->truesize;
6556 rxdp3->Buffer2_ptr = *temp2 =
6557 pci_map_single(sp->pdev, (*skb)->data,
6558 dev->mtu + 4,
6559 PCI_DMA_FROMDEVICE);
6560 if( (rxdp3->Buffer2_ptr == 0) ||
6561 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6562 goto memalloc_failed;
6564 rxdp3->Buffer0_ptr = *temp0 =
6565 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6566 PCI_DMA_FROMDEVICE);
6567 if( (rxdp3->Buffer0_ptr == 0) ||
6568 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6569 pci_unmap_single (sp->pdev,
6570 (dma_addr_t)rxdp3->Buffer2_ptr,
6571 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6572 goto memalloc_failed;
6574 rxdp->Host_Control = (unsigned long) (*skb);
6576 /* Buffer-1 will be dummy buffer not used */
6577 rxdp3->Buffer1_ptr = *temp1 =
6578 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6579 PCI_DMA_FROMDEVICE);
6580 if( (rxdp3->Buffer1_ptr == 0) ||
6581 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6582 pci_unmap_single (sp->pdev,
6583 (dma_addr_t)rxdp3->Buffer0_ptr,
6584 BUF0_LEN, PCI_DMA_FROMDEVICE);
6585 pci_unmap_single (sp->pdev,
6586 (dma_addr_t)rxdp3->Buffer2_ptr,
6587 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6588 goto memalloc_failed;
6592 return 0;
6593 memalloc_failed:
6594 stats->pci_map_fail_cnt++;
6595 stats->mem_freed += (*skb)->truesize;
6596 dev_kfree_skb(*skb);
6597 return -ENOMEM;
6600 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6601 int size)
6603 struct net_device *dev = sp->dev;
6604 if (sp->rxd_mode == RXD_MODE_1) {
6605 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6606 } else if (sp->rxd_mode == RXD_MODE_3B) {
6607 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6608 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6609 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6613 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6615 int i, j, k, blk_cnt = 0, size;
6616 struct mac_info * mac_control = &sp->mac_control;
6617 struct config_param *config = &sp->config;
6618 struct net_device *dev = sp->dev;
6619 struct RxD_t *rxdp = NULL;
6620 struct sk_buff *skb = NULL;
6621 struct buffAdd *ba = NULL;
6622 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6624 /* Calculate the size based on ring mode */
6625 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6626 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6627 if (sp->rxd_mode == RXD_MODE_1)
6628 size += NET_IP_ALIGN;
6629 else if (sp->rxd_mode == RXD_MODE_3B)
6630 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6632 for (i = 0; i < config->rx_ring_num; i++) {
6633 blk_cnt = config->rx_cfg[i].num_rxd /
6634 (rxd_count[sp->rxd_mode] +1);
6636 for (j = 0; j < blk_cnt; j++) {
6637 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6638 rxdp = mac_control->rings[i].
6639 rx_blocks[j].rxds[k].virt_addr;
6640 if(sp->rxd_mode == RXD_MODE_3B)
6641 ba = &mac_control->rings[i].ba[j][k];
6642 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6643 &skb,(u64 *)&temp0_64,
6644 (u64 *)&temp1_64,
6645 (u64 *)&temp2_64,
6646 size) == ENOMEM) {
6647 return 0;
6650 set_rxd_buffer_size(sp, rxdp, size);
6651 wmb();
6652 /* flip the Ownership bit to Hardware */
6653 rxdp->Control_1 |= RXD_OWN_XENA;
6657 return 0;
6661 static int s2io_add_isr(struct s2io_nic * sp)
6663 int ret = 0;
6664 struct net_device *dev = sp->dev;
6665 int err = 0;
6667 if (sp->config.intr_type == MSI_X)
6668 ret = s2io_enable_msi_x(sp);
6669 if (ret) {
6670 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6671 sp->config.intr_type = INTA;
6674 /* Store the values of the MSIX table in the struct s2io_nic structure */
6675 store_xmsi_data(sp);
6677 /* After proper initialization of H/W, register ISR */
6678 if (sp->config.intr_type == MSI_X) {
6679 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6681 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6682 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6683 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6684 dev->name, i);
6685 err = request_irq(sp->entries[i].vector,
6686 s2io_msix_fifo_handle, 0, sp->desc[i],
6687 sp->s2io_entries[i].arg);
6688 /* If either data or addr is zero print it */
6689 if(!(sp->msix_info[i].addr &&
6690 sp->msix_info[i].data)) {
6691 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6692 "Data:0x%lx\n",sp->desc[i],
6693 (unsigned long long)
6694 sp->msix_info[i].addr,
6695 (unsigned long)
6696 ntohl(sp->msix_info[i].data));
6697 } else {
6698 msix_tx_cnt++;
6700 } else {
6701 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6702 dev->name, i);
6703 err = request_irq(sp->entries[i].vector,
6704 s2io_msix_ring_handle, 0, sp->desc[i],
6705 sp->s2io_entries[i].arg);
6706 /* If either data or addr is zero print it */
6707 if(!(sp->msix_info[i].addr &&
6708 sp->msix_info[i].data)) {
6709 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6710 "Data:0x%lx\n",sp->desc[i],
6711 (unsigned long long)
6712 sp->msix_info[i].addr,
6713 (unsigned long)
6714 ntohl(sp->msix_info[i].data));
6715 } else {
6716 msix_rx_cnt++;
6719 if (err) {
6720 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6721 "failed\n", dev->name, i);
6722 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6723 return -1;
6725 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6727 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6728 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6730 if (sp->config.intr_type == INTA) {
6731 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6732 sp->name, dev);
6733 if (err) {
6734 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6735 dev->name);
6736 return -1;
6739 return 0;
6741 static void s2io_rem_isr(struct s2io_nic * sp)
6743 struct net_device *dev = sp->dev;
6744 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6746 if (sp->config.intr_type == MSI_X) {
6747 int i;
6748 u16 msi_control;
6750 for (i=1; (sp->s2io_entries[i].in_use ==
6751 MSIX_REGISTERED_SUCCESS); i++) {
6752 int vector = sp->entries[i].vector;
6753 void *arg = sp->s2io_entries[i].arg;
6755 synchronize_irq(vector);
6756 free_irq(vector, arg);
6759 kfree(sp->entries);
6760 stats->mem_freed +=
6761 (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6762 kfree(sp->s2io_entries);
6763 stats->mem_freed +=
6764 (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6765 sp->entries = NULL;
6766 sp->s2io_entries = NULL;
6768 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6769 msi_control &= 0xFFFE; /* Disable MSI */
6770 pci_write_config_word(sp->pdev, 0x42, msi_control);
6772 pci_disable_msix(sp->pdev);
6773 } else {
6774 synchronize_irq(sp->pdev->irq);
6775 free_irq(sp->pdev->irq, dev);
6779 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6781 int cnt = 0;
6782 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6783 unsigned long flags;
6784 register u64 val64 = 0;
6786 del_timer_sync(&sp->alarm_timer);
6787 /* If s2io_set_link task is executing, wait till it completes. */
6788 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6789 msleep(50);
6791 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6793 /* disable Tx and Rx traffic on the NIC */
6794 if (do_io)
6795 stop_nic(sp);
6797 s2io_rem_isr(sp);
6799 /* Kill tasklet. */
6800 tasklet_kill(&sp->task);
6802 /* Check if the device is Quiescent and then Reset the NIC */
6803 while(do_io) {
6804 /* As per the HW requirement we need to replenish the
6805 * receive buffer to avoid the ring bump. Since there is
6806 * no intention of processing the Rx frame at this pointwe are
6807 * just settting the ownership bit of rxd in Each Rx
6808 * ring to HW and set the appropriate buffer size
6809 * based on the ring mode
6811 rxd_owner_bit_reset(sp);
6813 val64 = readq(&bar0->adapter_status);
6814 if (verify_xena_quiescence(sp)) {
6815 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6816 break;
6819 msleep(50);
6820 cnt++;
6821 if (cnt == 10) {
6822 DBG_PRINT(ERR_DBG,
6823 "s2io_close:Device not Quiescent ");
6824 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6825 (unsigned long long) val64);
6826 break;
6829 if (do_io)
6830 s2io_reset(sp);
6832 spin_lock_irqsave(&sp->tx_lock, flags);
6833 /* Free all Tx buffers */
6834 free_tx_buffers(sp);
6835 spin_unlock_irqrestore(&sp->tx_lock, flags);
6837 /* Free all Rx buffers */
6838 spin_lock_irqsave(&sp->rx_lock, flags);
6839 free_rx_buffers(sp);
6840 spin_unlock_irqrestore(&sp->rx_lock, flags);
6842 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6845 static void s2io_card_down(struct s2io_nic * sp)
6847 do_s2io_card_down(sp, 1);
6850 static int s2io_card_up(struct s2io_nic * sp)
6852 int i, ret = 0;
6853 struct mac_info *mac_control;
6854 struct config_param *config;
6855 struct net_device *dev = (struct net_device *) sp->dev;
6856 u16 interruptible;
6858 /* Initialize the H/W I/O registers */
6859 if (init_nic(sp) != 0) {
6860 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6861 dev->name);
6862 s2io_reset(sp);
6863 return -ENODEV;
6867 * Initializing the Rx buffers. For now we are considering only 1
6868 * Rx ring and initializing buffers into 30 Rx blocks
6870 mac_control = &sp->mac_control;
6871 config = &sp->config;
6873 for (i = 0; i < config->rx_ring_num; i++) {
6874 if ((ret = fill_rx_buffers(sp, i))) {
6875 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6876 dev->name);
6877 s2io_reset(sp);
6878 free_rx_buffers(sp);
6879 return -ENOMEM;
6881 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6882 atomic_read(&sp->rx_bufs_left[i]));
6884 /* Maintain the state prior to the open */
6885 if (sp->promisc_flg)
6886 sp->promisc_flg = 0;
6887 if (sp->m_cast_flg) {
6888 sp->m_cast_flg = 0;
6889 sp->all_multi_pos= 0;
6892 /* Setting its receive mode */
6893 s2io_set_multicast(dev);
6895 if (sp->lro) {
6896 /* Initialize max aggregatable pkts per session based on MTU */
6897 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6898 /* Check if we can use(if specified) user provided value */
6899 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6900 sp->lro_max_aggr_per_sess = lro_max_pkts;
6903 /* Enable Rx Traffic and interrupts on the NIC */
6904 if (start_nic(sp)) {
6905 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6906 s2io_reset(sp);
6907 free_rx_buffers(sp);
6908 return -ENODEV;
6911 /* Add interrupt service routine */
6912 if (s2io_add_isr(sp) != 0) {
6913 if (sp->config.intr_type == MSI_X)
6914 s2io_rem_isr(sp);
6915 s2io_reset(sp);
6916 free_rx_buffers(sp);
6917 return -ENODEV;
6920 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6922 /* Enable tasklet for the device */
6923 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6925 /* Enable select interrupts */
6926 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6927 if (sp->config.intr_type != INTA)
6928 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6929 else {
6930 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6931 interruptible |= TX_PIC_INTR;
6932 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6935 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6936 return 0;
6940 * s2io_restart_nic - Resets the NIC.
6941 * @data : long pointer to the device private structure
6942 * Description:
6943 * This function is scheduled to be run by the s2io_tx_watchdog
6944 * function after 0.5 secs to reset the NIC. The idea is to reduce
6945 * the run time of the watch dog routine which is run holding a
6946 * spin lock.
6949 static void s2io_restart_nic(struct work_struct *work)
6951 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6952 struct net_device *dev = sp->dev;
6954 rtnl_lock();
6956 if (!netif_running(dev))
6957 goto out_unlock;
6959 s2io_card_down(sp);
6960 if (s2io_card_up(sp)) {
6961 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6962 dev->name);
6964 netif_wake_queue(dev);
6965 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6966 dev->name);
6967 out_unlock:
6968 rtnl_unlock();
6972 * s2io_tx_watchdog - Watchdog for transmit side.
6973 * @dev : Pointer to net device structure
6974 * Description:
6975 * This function is triggered if the Tx Queue is stopped
6976 * for a pre-defined amount of time when the Interface is still up.
6977 * If the Interface is jammed in such a situation, the hardware is
6978 * reset (by s2io_close) and restarted again (by s2io_open) to
6979 * overcome any problem that might have been caused in the hardware.
6980 * Return value:
6981 * void
6984 static void s2io_tx_watchdog(struct net_device *dev)
6986 struct s2io_nic *sp = dev->priv;
6988 if (netif_carrier_ok(dev)) {
6989 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6990 schedule_work(&sp->rst_timer_task);
6991 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6996 * rx_osm_handler - To perform some OS related operations on SKB.
6997 * @sp: private member of the device structure,pointer to s2io_nic structure.
6998 * @skb : the socket buffer pointer.
6999 * @len : length of the packet
7000 * @cksum : FCS checksum of the frame.
7001 * @ring_no : the ring from which this RxD was extracted.
7002 * Description:
7003 * This function is called by the Rx interrupt serivce routine to perform
7004 * some OS related operations on the SKB before passing it to the upper
7005 * layers. It mainly checks if the checksum is OK, if so adds it to the
7006 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7007 * to the upper layer. If the checksum is wrong, it increments the Rx
7008 * packet error count, frees the SKB and returns error.
7009 * Return value:
7010 * SUCCESS on success and -1 on failure.
7012 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7014 struct s2io_nic *sp = ring_data->nic;
7015 struct net_device *dev = (struct net_device *) sp->dev;
7016 struct sk_buff *skb = (struct sk_buff *)
7017 ((unsigned long) rxdp->Host_Control);
7018 int ring_no = ring_data->ring_no;
7019 u16 l3_csum, l4_csum;
7020 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7021 struct lro *lro;
7022 u8 err_mask;
7024 skb->dev = dev;
7026 if (err) {
7027 /* Check for parity error */
7028 if (err & 0x1) {
7029 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7031 err_mask = err >> 48;
7032 switch(err_mask) {
7033 case 1:
7034 sp->mac_control.stats_info->sw_stat.
7035 rx_parity_err_cnt++;
7036 break;
7038 case 2:
7039 sp->mac_control.stats_info->sw_stat.
7040 rx_abort_cnt++;
7041 break;
7043 case 3:
7044 sp->mac_control.stats_info->sw_stat.
7045 rx_parity_abort_cnt++;
7046 break;
7048 case 4:
7049 sp->mac_control.stats_info->sw_stat.
7050 rx_rda_fail_cnt++;
7051 break;
7053 case 5:
7054 sp->mac_control.stats_info->sw_stat.
7055 rx_unkn_prot_cnt++;
7056 break;
7058 case 6:
7059 sp->mac_control.stats_info->sw_stat.
7060 rx_fcs_err_cnt++;
7061 break;
7063 case 7:
7064 sp->mac_control.stats_info->sw_stat.
7065 rx_buf_size_err_cnt++;
7066 break;
7068 case 8:
7069 sp->mac_control.stats_info->sw_stat.
7070 rx_rxd_corrupt_cnt++;
7071 break;
7073 case 15:
7074 sp->mac_control.stats_info->sw_stat.
7075 rx_unkn_err_cnt++;
7076 break;
7079 * Drop the packet if bad transfer code. Exception being
7080 * 0x5, which could be due to unsupported IPv6 extension header.
7081 * In this case, we let stack handle the packet.
7082 * Note that in this case, since checksum will be incorrect,
7083 * stack will validate the same.
7085 if (err_mask != 0x5) {
7086 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7087 dev->name, err_mask);
7088 sp->stats.rx_crc_errors++;
7089 sp->mac_control.stats_info->sw_stat.mem_freed
7090 += skb->truesize;
7091 dev_kfree_skb(skb);
7092 atomic_dec(&sp->rx_bufs_left[ring_no]);
7093 rxdp->Host_Control = 0;
7094 return 0;
7098 /* Updating statistics */
7099 sp->stats.rx_packets++;
7100 rxdp->Host_Control = 0;
7101 if (sp->rxd_mode == RXD_MODE_1) {
7102 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7104 sp->stats.rx_bytes += len;
7105 skb_put(skb, len);
7107 } else if (sp->rxd_mode == RXD_MODE_3B) {
7108 int get_block = ring_data->rx_curr_get_info.block_index;
7109 int get_off = ring_data->rx_curr_get_info.offset;
7110 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7111 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7112 unsigned char *buff = skb_push(skb, buf0_len);
7114 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7115 sp->stats.rx_bytes += buf0_len + buf2_len;
7116 memcpy(buff, ba->ba_0, buf0_len);
7117 skb_put(skb, buf2_len);
7120 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7121 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7122 (sp->rx_csum)) {
7123 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7124 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7125 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7127 * NIC verifies if the Checksum of the received
7128 * frame is Ok or not and accordingly returns
7129 * a flag in the RxD.
7131 skb->ip_summed = CHECKSUM_UNNECESSARY;
7132 if (sp->lro) {
7133 u32 tcp_len;
7134 u8 *tcp;
7135 int ret = 0;
7137 ret = s2io_club_tcp_session(skb->data, &tcp,
7138 &tcp_len, &lro, rxdp, sp);
7139 switch (ret) {
7140 case 3: /* Begin anew */
7141 lro->parent = skb;
7142 goto aggregate;
7143 case 1: /* Aggregate */
7145 lro_append_pkt(sp, lro,
7146 skb, tcp_len);
7147 goto aggregate;
7149 case 4: /* Flush session */
7151 lro_append_pkt(sp, lro,
7152 skb, tcp_len);
7153 queue_rx_frame(lro->parent);
7154 clear_lro_session(lro);
7155 sp->mac_control.stats_info->
7156 sw_stat.flush_max_pkts++;
7157 goto aggregate;
7159 case 2: /* Flush both */
7160 lro->parent->data_len =
7161 lro->frags_len;
7162 sp->mac_control.stats_info->
7163 sw_stat.sending_both++;
7164 queue_rx_frame(lro->parent);
7165 clear_lro_session(lro);
7166 goto send_up;
7167 case 0: /* sessions exceeded */
7168 case -1: /* non-TCP or not
7169 * L2 aggregatable
7171 case 5: /*
7172 * First pkt in session not
7173 * L3/L4 aggregatable
7175 break;
7176 default:
7177 DBG_PRINT(ERR_DBG,
7178 "%s: Samadhana!!\n",
7179 __FUNCTION__);
7180 BUG();
7183 } else {
7185 * Packet with erroneous checksum, let the
7186 * upper layers deal with it.
7188 skb->ip_summed = CHECKSUM_NONE;
7190 } else {
7191 skb->ip_summed = CHECKSUM_NONE;
7193 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7194 if (!sp->lro) {
7195 skb->protocol = eth_type_trans(skb, dev);
7196 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7197 vlan_strip_flag)) {
7198 /* Queueing the vlan frame to the upper layer */
7199 if (napi)
7200 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7201 RXD_GET_VLAN_TAG(rxdp->Control_2));
7202 else
7203 vlan_hwaccel_rx(skb, sp->vlgrp,
7204 RXD_GET_VLAN_TAG(rxdp->Control_2));
7205 } else {
7206 if (napi)
7207 netif_receive_skb(skb);
7208 else
7209 netif_rx(skb);
7211 } else {
7212 send_up:
7213 queue_rx_frame(skb);
7215 dev->last_rx = jiffies;
7216 aggregate:
7217 atomic_dec(&sp->rx_bufs_left[ring_no]);
7218 return SUCCESS;
7222 * s2io_link - stops/starts the Tx queue.
7223 * @sp : private member of the device structure, which is a pointer to the
7224 * s2io_nic structure.
7225 * @link : inidicates whether link is UP/DOWN.
7226 * Description:
7227 * This function stops/starts the Tx queue depending on whether the link
7228 * status of the NIC is is down or up. This is called by the Alarm
7229 * interrupt handler whenever a link change interrupt comes up.
7230 * Return value:
7231 * void.
7234 static void s2io_link(struct s2io_nic * sp, int link)
7236 struct net_device *dev = (struct net_device *) sp->dev;
7238 if (link != sp->last_link_state) {
7239 if (link == LINK_DOWN) {
7240 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7241 netif_carrier_off(dev);
7242 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7243 sp->mac_control.stats_info->sw_stat.link_up_time =
7244 jiffies - sp->start_time;
7245 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7246 } else {
7247 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7248 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7249 sp->mac_control.stats_info->sw_stat.link_down_time =
7250 jiffies - sp->start_time;
7251 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7252 netif_carrier_on(dev);
7255 sp->last_link_state = link;
7256 sp->start_time = jiffies;
7260 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7261 * @sp : private member of the device structure, which is a pointer to the
7262 * s2io_nic structure.
7263 * Description:
7264 * This function initializes a few of the PCI and PCI-X configuration registers
7265 * with recommended values.
7266 * Return value:
7267 * void
7270 static void s2io_init_pci(struct s2io_nic * sp)
7272 u16 pci_cmd = 0, pcix_cmd = 0;
7274 /* Enable Data Parity Error Recovery in PCI-X command register. */
7275 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7276 &(pcix_cmd));
7277 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7278 (pcix_cmd | 1));
7279 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7280 &(pcix_cmd));
7282 /* Set the PErr Response bit in PCI command register. */
7283 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7284 pci_write_config_word(sp->pdev, PCI_COMMAND,
7285 (pci_cmd | PCI_COMMAND_PARITY));
7286 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7289 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7291 if ( tx_fifo_num > 8) {
7292 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7293 "supported\n");
7294 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7295 tx_fifo_num = 8;
7297 if ( rx_ring_num > 8) {
7298 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7299 "supported\n");
7300 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7301 rx_ring_num = 8;
7303 if (*dev_intr_type != INTA)
7304 napi = 0;
7306 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7307 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7308 "Defaulting to INTA\n");
7309 *dev_intr_type = INTA;
7312 if ((*dev_intr_type == MSI_X) &&
7313 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7314 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7315 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7316 "Defaulting to INTA\n");
7317 *dev_intr_type = INTA;
7320 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7321 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7322 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7323 rx_ring_mode = 1;
7325 return SUCCESS;
7329 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7330 * or Traffic class respectively.
7331 * @nic: device peivate variable
7332 * Description: The function configures the receive steering to
7333 * desired receive ring.
7334 * Return Value: SUCCESS on success and
7335 * '-1' on failure (endian settings incorrect).
7337 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7339 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7340 register u64 val64 = 0;
7342 if (ds_codepoint > 63)
7343 return FAILURE;
7345 val64 = RTS_DS_MEM_DATA(ring);
7346 writeq(val64, &bar0->rts_ds_mem_data);
7348 val64 = RTS_DS_MEM_CTRL_WE |
7349 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7350 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7352 writeq(val64, &bar0->rts_ds_mem_ctrl);
7354 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7355 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7356 S2IO_BIT_RESET);
7360 * s2io_init_nic - Initialization of the adapter .
7361 * @pdev : structure containing the PCI related information of the device.
7362 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7363 * Description:
7364 * The function initializes an adapter identified by the pci_dec structure.
7365 * All OS related initialization including memory and device structure and
7366 * initlaization of the device private variable is done. Also the swapper
7367 * control register is initialized to enable read and write into the I/O
7368 * registers of the device.
7369 * Return value:
7370 * returns 0 on success and negative on failure.
7373 static int __devinit
7374 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7376 struct s2io_nic *sp;
7377 struct net_device *dev;
7378 int i, j, ret;
7379 int dma_flag = FALSE;
7380 u32 mac_up, mac_down;
7381 u64 val64 = 0, tmp64 = 0;
7382 struct XENA_dev_config __iomem *bar0 = NULL;
7383 u16 subid;
7384 struct mac_info *mac_control;
7385 struct config_param *config;
7386 int mode;
7387 u8 dev_intr_type = intr_type;
7388 DECLARE_MAC_BUF(mac);
7390 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7391 return ret;
7393 if ((ret = pci_enable_device(pdev))) {
7394 DBG_PRINT(ERR_DBG,
7395 "s2io_init_nic: pci_enable_device failed\n");
7396 return ret;
7399 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7400 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7401 dma_flag = TRUE;
7402 if (pci_set_consistent_dma_mask
7403 (pdev, DMA_64BIT_MASK)) {
7404 DBG_PRINT(ERR_DBG,
7405 "Unable to obtain 64bit DMA for \
7406 consistent allocations\n");
7407 pci_disable_device(pdev);
7408 return -ENOMEM;
7410 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7411 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7412 } else {
7413 pci_disable_device(pdev);
7414 return -ENOMEM;
7416 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7417 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7418 pci_disable_device(pdev);
7419 return -ENODEV;
7422 dev = alloc_etherdev(sizeof(struct s2io_nic));
7423 if (dev == NULL) {
7424 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7425 pci_disable_device(pdev);
7426 pci_release_regions(pdev);
7427 return -ENODEV;
7430 pci_set_master(pdev);
7431 pci_set_drvdata(pdev, dev);
7432 SET_NETDEV_DEV(dev, &pdev->dev);
7434 /* Private member variable initialized to s2io NIC structure */
7435 sp = dev->priv;
7436 memset(sp, 0, sizeof(struct s2io_nic));
7437 sp->dev = dev;
7438 sp->pdev = pdev;
7439 sp->high_dma_flag = dma_flag;
7440 sp->device_enabled_once = FALSE;
7441 if (rx_ring_mode == 1)
7442 sp->rxd_mode = RXD_MODE_1;
7443 if (rx_ring_mode == 2)
7444 sp->rxd_mode = RXD_MODE_3B;
7446 sp->config.intr_type = dev_intr_type;
7448 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7449 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7450 sp->device_type = XFRAME_II_DEVICE;
7451 else
7452 sp->device_type = XFRAME_I_DEVICE;
7454 sp->lro = lro;
7456 /* Initialize some PCI/PCI-X fields of the NIC. */
7457 s2io_init_pci(sp);
7460 * Setting the device configuration parameters.
7461 * Most of these parameters can be specified by the user during
7462 * module insertion as they are module loadable parameters. If
7463 * these parameters are not not specified during load time, they
7464 * are initialized with default values.
7466 mac_control = &sp->mac_control;
7467 config = &sp->config;
7469 config->napi = napi;
7471 /* Tx side parameters. */
7472 config->tx_fifo_num = tx_fifo_num;
7473 for (i = 0; i < MAX_TX_FIFOS; i++) {
7474 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7475 config->tx_cfg[i].fifo_priority = i;
7478 /* mapping the QoS priority to the configured fifos */
7479 for (i = 0; i < MAX_TX_FIFOS; i++)
7480 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7482 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7483 for (i = 0; i < config->tx_fifo_num; i++) {
7484 config->tx_cfg[i].f_no_snoop =
7485 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7486 if (config->tx_cfg[i].fifo_len < 65) {
7487 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7488 break;
7491 /* + 2 because one Txd for skb->data and one Txd for UFO */
7492 config->max_txds = MAX_SKB_FRAGS + 2;
7494 /* Rx side parameters. */
7495 config->rx_ring_num = rx_ring_num;
7496 for (i = 0; i < MAX_RX_RINGS; i++) {
7497 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7498 (rxd_count[sp->rxd_mode] + 1);
7499 config->rx_cfg[i].ring_priority = i;
7502 for (i = 0; i < rx_ring_num; i++) {
7503 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7504 config->rx_cfg[i].f_no_snoop =
7505 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7508 /* Setting Mac Control parameters */
7509 mac_control->rmac_pause_time = rmac_pause_time;
7510 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7511 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7514 /* Initialize Ring buffer parameters. */
7515 for (i = 0; i < config->rx_ring_num; i++)
7516 atomic_set(&sp->rx_bufs_left[i], 0);
7518 /* initialize the shared memory used by the NIC and the host */
7519 if (init_shared_mem(sp)) {
7520 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7521 dev->name);
7522 ret = -ENOMEM;
7523 goto mem_alloc_failed;
7526 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7527 pci_resource_len(pdev, 0));
7528 if (!sp->bar0) {
7529 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7530 dev->name);
7531 ret = -ENOMEM;
7532 goto bar0_remap_failed;
7535 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7536 pci_resource_len(pdev, 2));
7537 if (!sp->bar1) {
7538 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7539 dev->name);
7540 ret = -ENOMEM;
7541 goto bar1_remap_failed;
7544 dev->irq = pdev->irq;
7545 dev->base_addr = (unsigned long) sp->bar0;
7547 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7548 for (j = 0; j < MAX_TX_FIFOS; j++) {
7549 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7550 (sp->bar1 + (j * 0x00020000));
7553 /* Driver entry points */
7554 dev->open = &s2io_open;
7555 dev->stop = &s2io_close;
7556 dev->hard_start_xmit = &s2io_xmit;
7557 dev->get_stats = &s2io_get_stats;
7558 dev->set_multicast_list = &s2io_set_multicast;
7559 dev->do_ioctl = &s2io_ioctl;
7560 dev->set_mac_address = &s2io_set_mac_addr;
7561 dev->change_mtu = &s2io_change_mtu;
7562 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7563 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7564 dev->vlan_rx_register = s2io_vlan_rx_register;
7567 * will use eth_mac_addr() for dev->set_mac_address
7568 * mac address will be set every time dev->open() is called
7570 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7572 #ifdef CONFIG_NET_POLL_CONTROLLER
7573 dev->poll_controller = s2io_netpoll;
7574 #endif
7576 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7577 if (sp->high_dma_flag == TRUE)
7578 dev->features |= NETIF_F_HIGHDMA;
7579 dev->features |= NETIF_F_TSO;
7580 dev->features |= NETIF_F_TSO6;
7581 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7582 dev->features |= NETIF_F_UFO;
7583 dev->features |= NETIF_F_HW_CSUM;
7586 dev->tx_timeout = &s2io_tx_watchdog;
7587 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7588 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7589 INIT_WORK(&sp->set_link_task, s2io_set_link);
7591 pci_save_state(sp->pdev);
7593 /* Setting swapper control on the NIC, for proper reset operation */
7594 if (s2io_set_swapper(sp)) {
7595 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7596 dev->name);
7597 ret = -EAGAIN;
7598 goto set_swap_failed;
7601 /* Verify if the Herc works on the slot its placed into */
7602 if (sp->device_type & XFRAME_II_DEVICE) {
7603 mode = s2io_verify_pci_mode(sp);
7604 if (mode < 0) {
7605 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7606 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7607 ret = -EBADSLT;
7608 goto set_swap_failed;
7612 /* Not needed for Herc */
7613 if (sp->device_type & XFRAME_I_DEVICE) {
7615 * Fix for all "FFs" MAC address problems observed on
7616 * Alpha platforms
7618 fix_mac_address(sp);
7619 s2io_reset(sp);
7623 * MAC address initialization.
7624 * For now only one mac address will be read and used.
7626 bar0 = sp->bar0;
7627 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7628 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7629 writeq(val64, &bar0->rmac_addr_cmd_mem);
7630 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7631 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7632 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7633 mac_down = (u32) tmp64;
7634 mac_up = (u32) (tmp64 >> 32);
7636 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7637 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7638 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7639 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7640 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7641 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7643 /* Set the factory defined MAC address initially */
7644 dev->addr_len = ETH_ALEN;
7645 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7646 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7648 /* Store the values of the MSIX table in the s2io_nic structure */
7649 store_xmsi_data(sp);
7650 /* reset Nic and bring it to known state */
7651 s2io_reset(sp);
7654 * Initialize the tasklet status and link state flags
7655 * and the card state parameter
7657 sp->tasklet_status = 0;
7658 sp->state = 0;
7660 /* Initialize spinlocks */
7661 spin_lock_init(&sp->tx_lock);
7663 if (!napi)
7664 spin_lock_init(&sp->put_lock);
7665 spin_lock_init(&sp->rx_lock);
7668 * SXE-002: Configure link and activity LED to init state
7669 * on driver load.
7671 subid = sp->pdev->subsystem_device;
7672 if ((subid & 0xFF) >= 0x07) {
7673 val64 = readq(&bar0->gpio_control);
7674 val64 |= 0x0000800000000000ULL;
7675 writeq(val64, &bar0->gpio_control);
7676 val64 = 0x0411040400000000ULL;
7677 writeq(val64, (void __iomem *) bar0 + 0x2700);
7678 val64 = readq(&bar0->gpio_control);
7681 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7683 if (register_netdev(dev)) {
7684 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7685 ret = -ENODEV;
7686 goto register_failed;
7688 s2io_vpd_read(sp);
7689 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7690 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7691 sp->product_name, pdev->revision);
7692 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7693 s2io_driver_version);
7694 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7695 dev->name, print_mac(mac, dev->dev_addr));
7696 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7697 if (sp->device_type & XFRAME_II_DEVICE) {
7698 mode = s2io_print_pci_mode(sp);
7699 if (mode < 0) {
7700 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7701 ret = -EBADSLT;
7702 unregister_netdev(dev);
7703 goto set_swap_failed;
7706 switch(sp->rxd_mode) {
7707 case RXD_MODE_1:
7708 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7709 dev->name);
7710 break;
7711 case RXD_MODE_3B:
7712 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7713 dev->name);
7714 break;
7717 if (napi)
7718 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7719 switch(sp->config.intr_type) {
7720 case INTA:
7721 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7722 break;
7723 case MSI_X:
7724 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7725 break;
7727 if (sp->lro)
7728 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7729 dev->name);
7730 if (ufo)
7731 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7732 " enabled\n", dev->name);
7733 /* Initialize device name */
7734 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7737 * Make Link state as off at this point, when the Link change
7738 * interrupt comes the state will be automatically changed to
7739 * the right state.
7741 netif_carrier_off(dev);
7743 return 0;
7745 register_failed:
7746 set_swap_failed:
7747 iounmap(sp->bar1);
7748 bar1_remap_failed:
7749 iounmap(sp->bar0);
7750 bar0_remap_failed:
7751 mem_alloc_failed:
7752 free_shared_mem(sp);
7753 pci_disable_device(pdev);
7754 pci_release_regions(pdev);
7755 pci_set_drvdata(pdev, NULL);
7756 free_netdev(dev);
7758 return ret;
7762 * s2io_rem_nic - Free the PCI device
7763 * @pdev: structure containing the PCI related information of the device.
7764 * Description: This function is called by the Pci subsystem to release a
7765 * PCI device and free up all resource held up by the device. This could
7766 * be in response to a Hot plug event or when the driver is to be removed
7767 * from memory.
7770 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7772 struct net_device *dev =
7773 (struct net_device *) pci_get_drvdata(pdev);
7774 struct s2io_nic *sp;
7776 if (dev == NULL) {
7777 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7778 return;
7781 flush_scheduled_work();
7783 sp = dev->priv;
7784 unregister_netdev(dev);
7786 free_shared_mem(sp);
7787 iounmap(sp->bar0);
7788 iounmap(sp->bar1);
7789 pci_release_regions(pdev);
7790 pci_set_drvdata(pdev, NULL);
7791 free_netdev(dev);
7792 pci_disable_device(pdev);
7796 * s2io_starter - Entry point for the driver
7797 * Description: This function is the entry point for the driver. It verifies
7798 * the module loadable parameters and initializes PCI configuration space.
7801 int __init s2io_starter(void)
7803 return pci_register_driver(&s2io_driver);
7807 * s2io_closer - Cleanup routine for the driver
7808 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7811 static __exit void s2io_closer(void)
7813 pci_unregister_driver(&s2io_driver);
7814 DBG_PRINT(INIT_DBG, "cleanup done\n");
7817 module_init(s2io_starter);
7818 module_exit(s2io_closer);
7820 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7821 struct tcphdr **tcp, struct RxD_t *rxdp)
7823 int ip_off;
7824 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7826 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7827 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7828 __FUNCTION__);
7829 return -1;
7832 /* TODO:
7833 * By default the VLAN field in the MAC is stripped by the card, if this
7834 * feature is turned off in rx_pa_cfg register, then the ip_off field
7835 * has to be shifted by a further 2 bytes
7837 switch (l2_type) {
7838 case 0: /* DIX type */
7839 case 4: /* DIX type with VLAN */
7840 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7841 break;
7842 /* LLC, SNAP etc are considered non-mergeable */
7843 default:
7844 return -1;
7847 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7848 ip_len = (u8)((*ip)->ihl);
7849 ip_len <<= 2;
7850 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7852 return 0;
7855 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7856 struct tcphdr *tcp)
7858 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7859 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7860 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7861 return -1;
7862 return 0;
7865 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7867 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7870 static void initiate_new_session(struct lro *lro, u8 *l2h,
7871 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7873 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7874 lro->l2h = l2h;
7875 lro->iph = ip;
7876 lro->tcph = tcp;
7877 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7878 lro->tcp_ack = ntohl(tcp->ack_seq);
7879 lro->sg_num = 1;
7880 lro->total_len = ntohs(ip->tot_len);
7881 lro->frags_len = 0;
7883 * check if we saw TCP timestamp. Other consistency checks have
7884 * already been done.
7886 if (tcp->doff == 8) {
7887 u32 *ptr;
7888 ptr = (u32 *)(tcp+1);
7889 lro->saw_ts = 1;
7890 lro->cur_tsval = *(ptr+1);
7891 lro->cur_tsecr = *(ptr+2);
7893 lro->in_use = 1;
7896 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7898 struct iphdr *ip = lro->iph;
7899 struct tcphdr *tcp = lro->tcph;
7900 __sum16 nchk;
7901 struct stat_block *statinfo = sp->mac_control.stats_info;
7902 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7904 /* Update L3 header */
7905 ip->tot_len = htons(lro->total_len);
7906 ip->check = 0;
7907 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7908 ip->check = nchk;
7910 /* Update L4 header */
7911 tcp->ack_seq = lro->tcp_ack;
7912 tcp->window = lro->window;
7914 /* Update tsecr field if this session has timestamps enabled */
7915 if (lro->saw_ts) {
7916 u32 *ptr = (u32 *)(tcp + 1);
7917 *(ptr+2) = lro->cur_tsecr;
7920 /* Update counters required for calculation of
7921 * average no. of packets aggregated.
7923 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7924 statinfo->sw_stat.num_aggregations++;
7927 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7928 struct tcphdr *tcp, u32 l4_pyld)
7930 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7931 lro->total_len += l4_pyld;
7932 lro->frags_len += l4_pyld;
7933 lro->tcp_next_seq += l4_pyld;
7934 lro->sg_num++;
7936 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7937 lro->tcp_ack = tcp->ack_seq;
7938 lro->window = tcp->window;
7940 if (lro->saw_ts) {
7941 u32 *ptr;
7942 /* Update tsecr and tsval from this packet */
7943 ptr = (u32 *) (tcp + 1);
7944 lro->cur_tsval = *(ptr + 1);
7945 lro->cur_tsecr = *(ptr + 2);
7949 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7950 struct tcphdr *tcp, u32 tcp_pyld_len)
7952 u8 *ptr;
7954 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7956 if (!tcp_pyld_len) {
7957 /* Runt frame or a pure ack */
7958 return -1;
7961 if (ip->ihl != 5) /* IP has options */
7962 return -1;
7964 /* If we see CE codepoint in IP header, packet is not mergeable */
7965 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7966 return -1;
7968 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7969 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7970 tcp->ece || tcp->cwr || !tcp->ack) {
7972 * Currently recognize only the ack control word and
7973 * any other control field being set would result in
7974 * flushing the LRO session
7976 return -1;
7980 * Allow only one TCP timestamp option. Don't aggregate if
7981 * any other options are detected.
7983 if (tcp->doff != 5 && tcp->doff != 8)
7984 return -1;
7986 if (tcp->doff == 8) {
7987 ptr = (u8 *)(tcp + 1);
7988 while (*ptr == TCPOPT_NOP)
7989 ptr++;
7990 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7991 return -1;
7993 /* Ensure timestamp value increases monotonically */
7994 if (l_lro)
7995 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7996 return -1;
7998 /* timestamp echo reply should be non-zero */
7999 if (*((u32 *)(ptr+6)) == 0)
8000 return -1;
8003 return 0;
8006 static int
8007 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8008 struct RxD_t *rxdp, struct s2io_nic *sp)
8010 struct iphdr *ip;
8011 struct tcphdr *tcph;
8012 int ret = 0, i;
8014 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8015 rxdp))) {
8016 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8017 ip->saddr, ip->daddr);
8018 } else {
8019 return ret;
8022 tcph = (struct tcphdr *)*tcp;
8023 *tcp_len = get_l4_pyld_length(ip, tcph);
8024 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8025 struct lro *l_lro = &sp->lro0_n[i];
8026 if (l_lro->in_use) {
8027 if (check_for_socket_match(l_lro, ip, tcph))
8028 continue;
8029 /* Sock pair matched */
8030 *lro = l_lro;
8032 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8033 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8034 "0x%x, actual 0x%x\n", __FUNCTION__,
8035 (*lro)->tcp_next_seq,
8036 ntohl(tcph->seq));
8038 sp->mac_control.stats_info->
8039 sw_stat.outof_sequence_pkts++;
8040 ret = 2;
8041 break;
8044 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8045 ret = 1; /* Aggregate */
8046 else
8047 ret = 2; /* Flush both */
8048 break;
8052 if (ret == 0) {
8053 /* Before searching for available LRO objects,
8054 * check if the pkt is L3/L4 aggregatable. If not
8055 * don't create new LRO session. Just send this
8056 * packet up.
8058 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8059 return 5;
8062 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8063 struct lro *l_lro = &sp->lro0_n[i];
8064 if (!(l_lro->in_use)) {
8065 *lro = l_lro;
8066 ret = 3; /* Begin anew */
8067 break;
8072 if (ret == 0) { /* sessions exceeded */
8073 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8074 __FUNCTION__);
8075 *lro = NULL;
8076 return ret;
8079 switch (ret) {
8080 case 3:
8081 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8082 break;
8083 case 2:
8084 update_L3L4_header(sp, *lro);
8085 break;
8086 case 1:
8087 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8088 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8089 update_L3L4_header(sp, *lro);
8090 ret = 4; /* Flush the LRO */
8092 break;
8093 default:
8094 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8095 __FUNCTION__);
8096 break;
8099 return ret;
8102 static void clear_lro_session(struct lro *lro)
8104 static u16 lro_struct_size = sizeof(struct lro);
8106 memset(lro, 0, lro_struct_size);
8109 static void queue_rx_frame(struct sk_buff *skb)
8111 struct net_device *dev = skb->dev;
8113 skb->protocol = eth_type_trans(skb, dev);
8114 if (napi)
8115 netif_receive_skb(skb);
8116 else
8117 netif_rx(skb);
8120 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8121 struct sk_buff *skb,
8122 u32 tcp_len)
8124 struct sk_buff *first = lro->parent;
8126 first->len += tcp_len;
8127 first->data_len = lro->frags_len;
8128 skb_pull(skb, (skb->len - tcp_len));
8129 if (skb_shinfo(first)->frag_list)
8130 lro->last_frag->next = skb;
8131 else
8132 skb_shinfo(first)->frag_list = skb;
8133 first->truesize += skb->truesize;
8134 lro->last_frag = skb;
8135 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8136 return;
8140 * s2io_io_error_detected - called when PCI error is detected
8141 * @pdev: Pointer to PCI device
8142 * @state: The current pci connection state
8144 * This function is called after a PCI bus error affecting
8145 * this device has been detected.
8147 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8148 pci_channel_state_t state)
8150 struct net_device *netdev = pci_get_drvdata(pdev);
8151 struct s2io_nic *sp = netdev->priv;
8153 netif_device_detach(netdev);
8155 if (netif_running(netdev)) {
8156 /* Bring down the card, while avoiding PCI I/O */
8157 do_s2io_card_down(sp, 0);
8159 pci_disable_device(pdev);
8161 return PCI_ERS_RESULT_NEED_RESET;
8165 * s2io_io_slot_reset - called after the pci bus has been reset.
8166 * @pdev: Pointer to PCI device
8168 * Restart the card from scratch, as if from a cold-boot.
8169 * At this point, the card has exprienced a hard reset,
8170 * followed by fixups by BIOS, and has its config space
8171 * set up identically to what it was at cold boot.
8173 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8175 struct net_device *netdev = pci_get_drvdata(pdev);
8176 struct s2io_nic *sp = netdev->priv;
8178 if (pci_enable_device(pdev)) {
8179 printk(KERN_ERR "s2io: "
8180 "Cannot re-enable PCI device after reset.\n");
8181 return PCI_ERS_RESULT_DISCONNECT;
8184 pci_set_master(pdev);
8185 s2io_reset(sp);
8187 return PCI_ERS_RESULT_RECOVERED;
8191 * s2io_io_resume - called when traffic can start flowing again.
8192 * @pdev: Pointer to PCI device
8194 * This callback is called when the error recovery driver tells
8195 * us that its OK to resume normal operation.
8197 static void s2io_io_resume(struct pci_dev *pdev)
8199 struct net_device *netdev = pci_get_drvdata(pdev);
8200 struct s2io_nic *sp = netdev->priv;
8202 if (netif_running(netdev)) {
8203 if (s2io_card_up(sp)) {
8204 printk(KERN_ERR "s2io: "
8205 "Can't bring device back up after reset.\n");
8206 return;
8209 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8210 s2io_card_down(sp);
8211 printk(KERN_ERR "s2io: "
8212 "Can't resetore mac addr after reset.\n");
8213 return;
8217 netif_device_attach(netdev);
8218 netif_wake_queue(netdev);