added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / drivers / net / s2io.c
blob0886054ec261fda05304176e20610d16c682d7e3
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
55 ************************************************************************/
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
89 #define DRV_VERSION "2.0.26.25"
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 int ret;
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
105 return ret;
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136 {"tmac_frms"},
137 {"tmac_data_octets"},
138 {"tmac_drop_frms"},
139 {"tmac_mcst_frms"},
140 {"tmac_bcst_frms"},
141 {"tmac_pause_ctrl_frms"},
142 {"tmac_ttl_octets"},
143 {"tmac_ucst_frms"},
144 {"tmac_nucst_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
148 {"tmac_vld_ip"},
149 {"tmac_drop_ip"},
150 {"tmac_icmp"},
151 {"tmac_rst_tcp"},
152 {"tmac_tcp"},
153 {"tmac_udp"},
154 {"rmac_vld_frms"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
157 {"rmac_drop_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
162 {"rmac_long_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
165 {"rmac_ttl_octets"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
171 {"rmac_ttl_frms"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
174 {"rmac_frag_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
182 {"rmac_ip"},
183 {"rmac_ip_octets"},
184 {"rmac_hdr_err_ip"},
185 {"rmac_drop_ip"},
186 {"rmac_icmp"},
187 {"rmac_tcp"},
188 {"rmac_udp"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
191 {"rmac_frms_q0"},
192 {"rmac_frms_q1"},
193 {"rmac_frms_q2"},
194 {"rmac_frms_q3"},
195 {"rmac_frms_q4"},
196 {"rmac_frms_q5"},
197 {"rmac_frms_q6"},
198 {"rmac_frms_q7"},
199 {"rmac_full_q0"},
200 {"rmac_full_q1"},
201 {"rmac_full_q2"},
202 {"rmac_full_q3"},
203 {"rmac_full_q4"},
204 {"rmac_full_q5"},
205 {"rmac_full_q6"},
206 {"rmac_full_q7"},
207 {"rmac_pause_cnt"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
211 {"rmac_err_tcp"},
212 {"rd_req_cnt"},
213 {"new_rd_req_cnt"},
214 {"new_rd_req_rtry_cnt"},
215 {"rd_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
217 {"wr_req_cnt"},
218 {"new_wr_req_cnt"},
219 {"new_wr_req_rtry_cnt"},
220 {"wr_rtry_cnt"},
221 {"wr_disc_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
223 {"txp_wr_cnt"},
224 {"txd_rd_cnt"},
225 {"txd_wr_cnt"},
226 {"rxd_rd_cnt"},
227 {"rxd_wr_cnt"},
228 {"txf_rd_cnt"},
229 {"rxf_wr_cnt"}
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
240 {"rmac_vlan_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
243 {"rmac_pf_discard"},
244 {"rmac_da_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
248 {"link_fault_cnt"}
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
255 {"parity_err_cnt"},
256 {"serious_err_cnt"},
257 {"soft_reset_cnt"},
258 {"fifo_full_cnt"},
259 {"ring_0_full_cnt"},
260 {"ring_1_full_cnt"},
261 {"ring_2_full_cnt"},
262 {"ring_3_full_cnt"},
263 {"ring_4_full_cnt"},
264 {"ring_5_full_cnt"},
265 {"ring_6_full_cnt"},
266 {"ring_7_full_cnt"},
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
287 {"mem_allocated"},
288 {"mem_freed"},
289 {"link_up_cnt"},
290 {"link_down_cnt"},
291 {"link_up_time"},
292 {"link_down_time"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
307 {"tda_err_cnt"},
308 {"pfc_err_cnt"},
309 {"pcc_err_cnt"},
310 {"tti_err_cnt"},
311 {"tpa_err_cnt"},
312 {"sm_err_cnt"},
313 {"lso_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
318 {"rc_err_cnt"},
319 {"prc_pcix_err_cnt"},
320 {"rpa_err_cnt"},
321 {"rda_err_cnt"},
322 {"rti_err_cnt"},
323 {"mc_err_cnt"}
326 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
336 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 /* Add the vlan */
357 static void s2io_vlan_rx_register(struct net_device *dev,
358 struct vlan_group *grp)
360 int i;
361 struct s2io_nic *nic = netdev_priv(dev);
362 unsigned long flags[MAX_TX_FIFOS];
363 struct mac_info *mac_control = &nic->mac_control;
364 struct config_param *config = &nic->config;
366 for (i = 0; i < config->tx_fifo_num; i++)
367 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
369 nic->vlgrp = grp;
370 for (i = config->tx_fifo_num - 1; i >= 0; i--)
371 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
372 flags[i]);
375 /* Unregister the vlan */
376 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
378 int i;
379 struct s2io_nic *nic = netdev_priv(dev);
380 unsigned long flags[MAX_TX_FIFOS];
381 struct mac_info *mac_control = &nic->mac_control;
382 struct config_param *config = &nic->config;
384 for (i = 0; i < config->tx_fifo_num; i++)
385 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
387 if (nic->vlgrp)
388 vlan_group_set_device(nic->vlgrp, vid, NULL);
390 for (i = config->tx_fifo_num - 1; i >= 0; i--)
391 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
392 flags[i]);
396 * Constants to be programmed into the Xena's registers, to configure
397 * the XAUI.
400 #define END_SIGN 0x0
401 static const u64 herc_act_dtx_cfg[] = {
402 /* Set address */
403 0x8000051536750000ULL, 0x80000515367500E0ULL,
404 /* Write data */
405 0x8000051536750004ULL, 0x80000515367500E4ULL,
406 /* Set address */
407 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
408 /* Write data */
409 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
410 /* Set address */
411 0x801205150D440000ULL, 0x801205150D4400E0ULL,
412 /* Write data */
413 0x801205150D440004ULL, 0x801205150D4400E4ULL,
414 /* Set address */
415 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
416 /* Write data */
417 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
418 /* Done */
419 END_SIGN
422 static const u64 xena_dtx_cfg[] = {
423 /* Set address */
424 0x8000051500000000ULL, 0x80000515000000E0ULL,
425 /* Write data */
426 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
427 /* Set address */
428 0x8001051500000000ULL, 0x80010515000000E0ULL,
429 /* Write data */
430 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
431 /* Set address */
432 0x8002051500000000ULL, 0x80020515000000E0ULL,
433 /* Write data */
434 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
435 END_SIGN
439 * Constants for Fixing the MacAddress problem seen mostly on
440 * Alpha machines.
442 static const u64 fix_mac[] = {
443 0x0060000000000000ULL, 0x0060600000000000ULL,
444 0x0040600000000000ULL, 0x0000600000000000ULL,
445 0x0020600000000000ULL, 0x0060600000000000ULL,
446 0x0020600000000000ULL, 0x0060600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0060600000000000ULL,
455 0x0020600000000000ULL, 0x0000600000000000ULL,
456 0x0040600000000000ULL, 0x0060600000000000ULL,
457 END_SIGN
460 MODULE_LICENSE("GPL");
461 MODULE_VERSION(DRV_VERSION);
464 /* Module Loadable parameters. */
465 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
466 S2IO_PARM_INT(rx_ring_num, 1);
467 S2IO_PARM_INT(multiq, 0);
468 S2IO_PARM_INT(rx_ring_mode, 1);
469 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
470 S2IO_PARM_INT(rmac_pause_time, 0x100);
471 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
472 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
473 S2IO_PARM_INT(shared_splits, 0);
474 S2IO_PARM_INT(tmac_util_period, 5);
475 S2IO_PARM_INT(rmac_util_period, 5);
476 S2IO_PARM_INT(l3l4hdr_size, 128);
477 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
478 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
479 /* Frequency of Rx desc syncs expressed as power of 2 */
480 S2IO_PARM_INT(rxsync_frequency, 3);
481 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
482 S2IO_PARM_INT(intr_type, 2);
483 /* Large receive offload feature */
484 static unsigned int lro_enable;
485 module_param_named(lro, lro_enable, uint, 0);
487 /* Max pkts to be aggregated by LRO at one time. If not specified,
488 * aggregation happens until we hit max IP pkt size(64K)
490 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
491 S2IO_PARM_INT(indicate_max_pkts, 0);
493 S2IO_PARM_INT(napi, 1);
494 S2IO_PARM_INT(ufo, 0);
495 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
497 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
498 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
499 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
500 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
501 static unsigned int rts_frm_len[MAX_RX_RINGS] =
502 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
504 module_param_array(tx_fifo_len, uint, NULL, 0);
505 module_param_array(rx_ring_sz, uint, NULL, 0);
506 module_param_array(rts_frm_len, uint, NULL, 0);
509 * S2IO device table.
510 * This table lists all the devices that this driver supports.
512 static struct pci_device_id s2io_tbl[] __devinitdata = {
513 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
514 PCI_ANY_ID, PCI_ANY_ID},
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
516 PCI_ANY_ID, PCI_ANY_ID},
517 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
518 PCI_ANY_ID, PCI_ANY_ID},
519 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
520 PCI_ANY_ID, PCI_ANY_ID},
521 {0,}
524 MODULE_DEVICE_TABLE(pci, s2io_tbl);
526 static struct pci_error_handlers s2io_err_handler = {
527 .error_detected = s2io_io_error_detected,
528 .slot_reset = s2io_io_slot_reset,
529 .resume = s2io_io_resume,
532 static struct pci_driver s2io_driver = {
533 .name = "S2IO",
534 .id_table = s2io_tbl,
535 .probe = s2io_init_nic,
536 .remove = __devexit_p(s2io_rem_nic),
537 .err_handler = &s2io_err_handler,
540 /* A simplifier macro used both by init and free shared_mem Fns(). */
541 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
543 /* netqueue manipulation helper functions */
544 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
546 if (!sp->config.multiq) {
547 int i;
549 for (i = 0; i < sp->config.tx_fifo_num; i++)
550 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
552 netif_tx_stop_all_queues(sp->dev);
555 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
557 if (!sp->config.multiq)
558 sp->mac_control.fifos[fifo_no].queue_state =
559 FIFO_QUEUE_STOP;
561 netif_tx_stop_all_queues(sp->dev);
564 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
566 if (!sp->config.multiq) {
567 int i;
569 for (i = 0; i < sp->config.tx_fifo_num; i++)
570 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
572 netif_tx_start_all_queues(sp->dev);
575 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
577 if (!sp->config.multiq)
578 sp->mac_control.fifos[fifo_no].queue_state =
579 FIFO_QUEUE_START;
581 netif_tx_start_all_queues(sp->dev);
584 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
586 if (!sp->config.multiq) {
587 int i;
589 for (i = 0; i < sp->config.tx_fifo_num; i++)
590 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
592 netif_tx_wake_all_queues(sp->dev);
595 static inline void s2io_wake_tx_queue(
596 struct fifo_info *fifo, int cnt, u8 multiq)
599 if (multiq) {
600 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
601 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
602 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
603 if (netif_queue_stopped(fifo->dev)) {
604 fifo->queue_state = FIFO_QUEUE_START;
605 netif_wake_queue(fifo->dev);
611 * init_shared_mem - Allocation and Initialization of Memory
612 * @nic: Device private variable.
613 * Description: The function allocates all the memory areas shared
614 * between the NIC and the driver. This includes Tx descriptors,
615 * Rx descriptors and the statistics block.
618 static int init_shared_mem(struct s2io_nic *nic)
620 u32 size;
621 void *tmp_v_addr, *tmp_v_addr_next;
622 dma_addr_t tmp_p_addr, tmp_p_addr_next;
623 struct RxD_block *pre_rxd_blk = NULL;
624 int i, j, blk_cnt;
625 int lst_size, lst_per_page;
626 struct net_device *dev = nic->dev;
627 unsigned long tmp;
628 struct buffAdd *ba;
630 struct mac_info *mac_control;
631 struct config_param *config;
632 unsigned long long mem_allocated = 0;
634 mac_control = &nic->mac_control;
635 config = &nic->config;
638 /* Allocation and initialization of TXDLs in FIOFs */
639 size = 0;
640 for (i = 0; i < config->tx_fifo_num; i++) {
641 size += config->tx_cfg[i].fifo_len;
643 if (size > MAX_AVAILABLE_TXDS) {
644 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
645 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
646 return -EINVAL;
649 size = 0;
650 for (i = 0; i < config->tx_fifo_num; i++) {
651 size = config->tx_cfg[i].fifo_len;
653 * Legal values are from 2 to 8192
655 if (size < 2) {
656 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
657 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
658 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
659 "are 2 to 8192\n");
660 return -EINVAL;
664 lst_size = (sizeof(struct TxD) * config->max_txds);
665 lst_per_page = PAGE_SIZE / lst_size;
667 for (i = 0; i < config->tx_fifo_num; i++) {
668 int fifo_len = config->tx_cfg[i].fifo_len;
669 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
670 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
671 GFP_KERNEL);
672 if (!mac_control->fifos[i].list_info) {
673 DBG_PRINT(INFO_DBG,
674 "Malloc failed for list_info\n");
675 return -ENOMEM;
677 mem_allocated += list_holder_size;
679 for (i = 0; i < config->tx_fifo_num; i++) {
680 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
681 lst_per_page);
682 mac_control->fifos[i].tx_curr_put_info.offset = 0;
683 mac_control->fifos[i].tx_curr_put_info.fifo_len =
684 config->tx_cfg[i].fifo_len - 1;
685 mac_control->fifos[i].tx_curr_get_info.offset = 0;
686 mac_control->fifos[i].tx_curr_get_info.fifo_len =
687 config->tx_cfg[i].fifo_len - 1;
688 mac_control->fifos[i].fifo_no = i;
689 mac_control->fifos[i].nic = nic;
690 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
691 mac_control->fifos[i].dev = dev;
693 for (j = 0; j < page_num; j++) {
694 int k = 0;
695 dma_addr_t tmp_p;
696 void *tmp_v;
697 tmp_v = pci_alloc_consistent(nic->pdev,
698 PAGE_SIZE, &tmp_p);
699 if (!tmp_v) {
700 DBG_PRINT(INFO_DBG,
701 "pci_alloc_consistent ");
702 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
703 return -ENOMEM;
705 /* If we got a zero DMA address(can happen on
706 * certain platforms like PPC), reallocate.
707 * Store virtual address of page we don't want,
708 * to be freed later.
710 if (!tmp_p) {
711 mac_control->zerodma_virt_addr = tmp_v;
712 DBG_PRINT(INIT_DBG,
713 "%s: Zero DMA address for TxDL. ", dev->name);
714 DBG_PRINT(INIT_DBG,
715 "Virtual address %p\n", tmp_v);
716 tmp_v = pci_alloc_consistent(nic->pdev,
717 PAGE_SIZE, &tmp_p);
718 if (!tmp_v) {
719 DBG_PRINT(INFO_DBG,
720 "pci_alloc_consistent ");
721 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
722 return -ENOMEM;
724 mem_allocated += PAGE_SIZE;
726 while (k < lst_per_page) {
727 int l = (j * lst_per_page) + k;
728 if (l == config->tx_cfg[i].fifo_len)
729 break;
730 mac_control->fifos[i].list_info[l].list_virt_addr =
731 tmp_v + (k * lst_size);
732 mac_control->fifos[i].list_info[l].list_phy_addr =
733 tmp_p + (k * lst_size);
734 k++;
739 for (i = 0; i < config->tx_fifo_num; i++) {
740 size = config->tx_cfg[i].fifo_len;
741 mac_control->fifos[i].ufo_in_band_v
742 = kcalloc(size, sizeof(u64), GFP_KERNEL);
743 if (!mac_control->fifos[i].ufo_in_band_v)
744 return -ENOMEM;
745 mem_allocated += (size * sizeof(u64));
748 /* Allocation and initialization of RXDs in Rings */
749 size = 0;
750 for (i = 0; i < config->rx_ring_num; i++) {
751 if (config->rx_cfg[i].num_rxd %
752 (rxd_count[nic->rxd_mode] + 1)) {
753 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
754 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
756 DBG_PRINT(ERR_DBG, "RxDs per Block");
757 return FAILURE;
759 size += config->rx_cfg[i].num_rxd;
760 mac_control->rings[i].block_count =
761 config->rx_cfg[i].num_rxd /
762 (rxd_count[nic->rxd_mode] + 1 );
763 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
764 mac_control->rings[i].block_count;
766 if (nic->rxd_mode == RXD_MODE_1)
767 size = (size * (sizeof(struct RxD1)));
768 else
769 size = (size * (sizeof(struct RxD3)));
771 for (i = 0; i < config->rx_ring_num; i++) {
772 mac_control->rings[i].rx_curr_get_info.block_index = 0;
773 mac_control->rings[i].rx_curr_get_info.offset = 0;
774 mac_control->rings[i].rx_curr_get_info.ring_len =
775 config->rx_cfg[i].num_rxd - 1;
776 mac_control->rings[i].rx_curr_put_info.block_index = 0;
777 mac_control->rings[i].rx_curr_put_info.offset = 0;
778 mac_control->rings[i].rx_curr_put_info.ring_len =
779 config->rx_cfg[i].num_rxd - 1;
780 mac_control->rings[i].nic = nic;
781 mac_control->rings[i].ring_no = i;
782 mac_control->rings[i].lro = lro_enable;
784 blk_cnt = config->rx_cfg[i].num_rxd /
785 (rxd_count[nic->rxd_mode] + 1);
786 /* Allocating all the Rx blocks */
787 for (j = 0; j < blk_cnt; j++) {
788 struct rx_block_info *rx_blocks;
789 int l;
791 rx_blocks = &mac_control->rings[i].rx_blocks[j];
792 size = SIZE_OF_BLOCK; //size is always page size
793 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
794 &tmp_p_addr);
795 if (tmp_v_addr == NULL) {
797 * In case of failure, free_shared_mem()
798 * is called, which should free any
799 * memory that was alloced till the
800 * failure happened.
802 rx_blocks->block_virt_addr = tmp_v_addr;
803 return -ENOMEM;
805 mem_allocated += size;
806 memset(tmp_v_addr, 0, size);
807 rx_blocks->block_virt_addr = tmp_v_addr;
808 rx_blocks->block_dma_addr = tmp_p_addr;
809 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
810 rxd_count[nic->rxd_mode],
811 GFP_KERNEL);
812 if (!rx_blocks->rxds)
813 return -ENOMEM;
814 mem_allocated +=
815 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
816 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
817 rx_blocks->rxds[l].virt_addr =
818 rx_blocks->block_virt_addr +
819 (rxd_size[nic->rxd_mode] * l);
820 rx_blocks->rxds[l].dma_addr =
821 rx_blocks->block_dma_addr +
822 (rxd_size[nic->rxd_mode] * l);
825 /* Interlinking all Rx Blocks */
826 for (j = 0; j < blk_cnt; j++) {
827 tmp_v_addr =
828 mac_control->rings[i].rx_blocks[j].block_virt_addr;
829 tmp_v_addr_next =
830 mac_control->rings[i].rx_blocks[(j + 1) %
831 blk_cnt].block_virt_addr;
832 tmp_p_addr =
833 mac_control->rings[i].rx_blocks[j].block_dma_addr;
834 tmp_p_addr_next =
835 mac_control->rings[i].rx_blocks[(j + 1) %
836 blk_cnt].block_dma_addr;
838 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
839 pre_rxd_blk->reserved_2_pNext_RxD_block =
840 (unsigned long) tmp_v_addr_next;
841 pre_rxd_blk->pNext_RxD_Blk_physical =
842 (u64) tmp_p_addr_next;
845 if (nic->rxd_mode == RXD_MODE_3B) {
847 * Allocation of Storages for buffer addresses in 2BUFF mode
848 * and the buffers as well.
850 for (i = 0; i < config->rx_ring_num; i++) {
851 blk_cnt = config->rx_cfg[i].num_rxd /
852 (rxd_count[nic->rxd_mode]+ 1);
853 mac_control->rings[i].ba =
854 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
855 GFP_KERNEL);
856 if (!mac_control->rings[i].ba)
857 return -ENOMEM;
858 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
859 for (j = 0; j < blk_cnt; j++) {
860 int k = 0;
861 mac_control->rings[i].ba[j] =
862 kmalloc((sizeof(struct buffAdd) *
863 (rxd_count[nic->rxd_mode] + 1)),
864 GFP_KERNEL);
865 if (!mac_control->rings[i].ba[j])
866 return -ENOMEM;
867 mem_allocated += (sizeof(struct buffAdd) * \
868 (rxd_count[nic->rxd_mode] + 1));
869 while (k != rxd_count[nic->rxd_mode]) {
870 ba = &mac_control->rings[i].ba[j][k];
872 ba->ba_0_org = (void *) kmalloc
873 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
874 if (!ba->ba_0_org)
875 return -ENOMEM;
876 mem_allocated +=
877 (BUF0_LEN + ALIGN_SIZE);
878 tmp = (unsigned long)ba->ba_0_org;
879 tmp += ALIGN_SIZE;
880 tmp &= ~((unsigned long) ALIGN_SIZE);
881 ba->ba_0 = (void *) tmp;
883 ba->ba_1_org = (void *) kmalloc
884 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
885 if (!ba->ba_1_org)
886 return -ENOMEM;
887 mem_allocated
888 += (BUF1_LEN + ALIGN_SIZE);
889 tmp = (unsigned long) ba->ba_1_org;
890 tmp += ALIGN_SIZE;
891 tmp &= ~((unsigned long) ALIGN_SIZE);
892 ba->ba_1 = (void *) tmp;
893 k++;
899 /* Allocation and initialization of Statistics block */
900 size = sizeof(struct stat_block);
901 mac_control->stats_mem = pci_alloc_consistent
902 (nic->pdev, size, &mac_control->stats_mem_phy);
904 if (!mac_control->stats_mem) {
906 * In case of failure, free_shared_mem() is called, which
907 * should free any memory that was alloced till the
908 * failure happened.
910 return -ENOMEM;
912 mem_allocated += size;
913 mac_control->stats_mem_sz = size;
915 tmp_v_addr = mac_control->stats_mem;
916 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
917 memset(tmp_v_addr, 0, size);
918 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
919 (unsigned long long) tmp_p_addr);
920 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
921 return SUCCESS;
925 * free_shared_mem - Free the allocated Memory
926 * @nic: Device private variable.
927 * Description: This function is to free all memory locations allocated by
928 * the init_shared_mem() function and return it to the kernel.
931 static void free_shared_mem(struct s2io_nic *nic)
933 int i, j, blk_cnt, size;
934 void *tmp_v_addr;
935 dma_addr_t tmp_p_addr;
936 struct mac_info *mac_control;
937 struct config_param *config;
938 int lst_size, lst_per_page;
939 struct net_device *dev;
940 int page_num = 0;
942 if (!nic)
943 return;
945 dev = nic->dev;
947 mac_control = &nic->mac_control;
948 config = &nic->config;
950 lst_size = (sizeof(struct TxD) * config->max_txds);
951 lst_per_page = PAGE_SIZE / lst_size;
953 for (i = 0; i < config->tx_fifo_num; i++) {
954 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
955 lst_per_page);
956 for (j = 0; j < page_num; j++) {
957 int mem_blks = (j * lst_per_page);
958 if (!mac_control->fifos[i].list_info)
959 return;
960 if (!mac_control->fifos[i].list_info[mem_blks].
961 list_virt_addr)
962 break;
963 pci_free_consistent(nic->pdev, PAGE_SIZE,
964 mac_control->fifos[i].
965 list_info[mem_blks].
966 list_virt_addr,
967 mac_control->fifos[i].
968 list_info[mem_blks].
969 list_phy_addr);
970 nic->mac_control.stats_info->sw_stat.mem_freed
971 += PAGE_SIZE;
973 /* If we got a zero DMA address during allocation,
974 * free the page now
976 if (mac_control->zerodma_virt_addr) {
977 pci_free_consistent(nic->pdev, PAGE_SIZE,
978 mac_control->zerodma_virt_addr,
979 (dma_addr_t)0);
980 DBG_PRINT(INIT_DBG,
981 "%s: Freeing TxDL with zero DMA addr. ",
982 dev->name);
983 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
984 mac_control->zerodma_virt_addr);
985 nic->mac_control.stats_info->sw_stat.mem_freed
986 += PAGE_SIZE;
988 kfree(mac_control->fifos[i].list_info);
989 nic->mac_control.stats_info->sw_stat.mem_freed +=
990 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
993 size = SIZE_OF_BLOCK;
994 for (i = 0; i < config->rx_ring_num; i++) {
995 blk_cnt = mac_control->rings[i].block_count;
996 for (j = 0; j < blk_cnt; j++) {
997 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
998 block_virt_addr;
999 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1000 block_dma_addr;
1001 if (tmp_v_addr == NULL)
1002 break;
1003 pci_free_consistent(nic->pdev, size,
1004 tmp_v_addr, tmp_p_addr);
1005 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1006 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1007 nic->mac_control.stats_info->sw_stat.mem_freed +=
1008 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1012 if (nic->rxd_mode == RXD_MODE_3B) {
1013 /* Freeing buffer storage addresses in 2BUFF mode. */
1014 for (i = 0; i < config->rx_ring_num; i++) {
1015 blk_cnt = config->rx_cfg[i].num_rxd /
1016 (rxd_count[nic->rxd_mode] + 1);
1017 for (j = 0; j < blk_cnt; j++) {
1018 int k = 0;
1019 if (!mac_control->rings[i].ba[j])
1020 continue;
1021 while (k != rxd_count[nic->rxd_mode]) {
1022 struct buffAdd *ba =
1023 &mac_control->rings[i].ba[j][k];
1024 kfree(ba->ba_0_org);
1025 nic->mac_control.stats_info->sw_stat.\
1026 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1027 kfree(ba->ba_1_org);
1028 nic->mac_control.stats_info->sw_stat.\
1029 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1030 k++;
1032 kfree(mac_control->rings[i].ba[j]);
1033 nic->mac_control.stats_info->sw_stat.mem_freed +=
1034 (sizeof(struct buffAdd) *
1035 (rxd_count[nic->rxd_mode] + 1));
1037 kfree(mac_control->rings[i].ba);
1038 nic->mac_control.stats_info->sw_stat.mem_freed +=
1039 (sizeof(struct buffAdd *) * blk_cnt);
1043 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1044 if (mac_control->fifos[i].ufo_in_band_v) {
1045 nic->mac_control.stats_info->sw_stat.mem_freed
1046 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1047 kfree(mac_control->fifos[i].ufo_in_band_v);
1051 if (mac_control->stats_mem) {
1052 nic->mac_control.stats_info->sw_stat.mem_freed +=
1053 mac_control->stats_mem_sz;
1054 pci_free_consistent(nic->pdev,
1055 mac_control->stats_mem_sz,
1056 mac_control->stats_mem,
1057 mac_control->stats_mem_phy);
1062 * s2io_verify_pci_mode -
1065 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1067 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1068 register u64 val64 = 0;
1069 int mode;
1071 val64 = readq(&bar0->pci_mode);
1072 mode = (u8)GET_PCI_MODE(val64);
1074 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1075 return -1; /* Unknown PCI mode */
1076 return mode;
1079 #define NEC_VENID 0x1033
1080 #define NEC_DEVID 0x0125
1081 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1083 struct pci_dev *tdev = NULL;
1084 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1085 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1086 if (tdev->bus == s2io_pdev->bus->parent) {
1087 pci_dev_put(tdev);
1088 return 1;
1092 return 0;
1095 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1097 * s2io_print_pci_mode -
1099 static int s2io_print_pci_mode(struct s2io_nic *nic)
1101 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1102 register u64 val64 = 0;
1103 int mode;
1104 struct config_param *config = &nic->config;
1106 val64 = readq(&bar0->pci_mode);
1107 mode = (u8)GET_PCI_MODE(val64);
1109 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1110 return -1; /* Unknown PCI mode */
1112 config->bus_speed = bus_speed[mode];
1114 if (s2io_on_nec_bridge(nic->pdev)) {
1115 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1116 nic->dev->name);
1117 return mode;
1120 if (val64 & PCI_MODE_32_BITS) {
1121 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1122 } else {
1123 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1126 switch(mode) {
1127 case PCI_MODE_PCI_33:
1128 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1129 break;
1130 case PCI_MODE_PCI_66:
1131 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1132 break;
1133 case PCI_MODE_PCIX_M1_66:
1134 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1135 break;
1136 case PCI_MODE_PCIX_M1_100:
1137 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1138 break;
1139 case PCI_MODE_PCIX_M1_133:
1140 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1141 break;
1142 case PCI_MODE_PCIX_M2_66:
1143 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1144 break;
1145 case PCI_MODE_PCIX_M2_100:
1146 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1147 break;
1148 case PCI_MODE_PCIX_M2_133:
1149 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1150 break;
1151 default:
1152 return -1; /* Unsupported bus speed */
1155 return mode;
1159 * init_tti - Initialization transmit traffic interrupt scheme
1160 * @nic: device private variable
1161 * @link: link status (UP/DOWN) used to enable/disable continuous
1162 * transmit interrupts
1163 * Description: The function configures transmit traffic interrupts
1164 * Return Value: SUCCESS on success and
1165 * '-1' on failure
1168 static int init_tti(struct s2io_nic *nic, int link)
1170 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1171 register u64 val64 = 0;
1172 int i;
1173 struct config_param *config;
1175 config = &nic->config;
1177 for (i = 0; i < config->tx_fifo_num; i++) {
1179 * TTI Initialization. Default Tx timer gets us about
1180 * 250 interrupts per sec. Continuous interrupts are enabled
1181 * by default.
1183 if (nic->device_type == XFRAME_II_DEVICE) {
1184 int count = (nic->config.bus_speed * 125)/2;
1185 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1186 } else
1187 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1189 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1190 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1191 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1192 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1193 if (i == 0)
1194 if (use_continuous_tx_intrs && (link == LINK_UP))
1195 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1196 writeq(val64, &bar0->tti_data1_mem);
1198 if (nic->config.intr_type == MSI_X) {
1199 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1200 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1201 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1202 TTI_DATA2_MEM_TX_UFC_D(0x300);
1203 } else {
1204 if ((nic->config.tx_steering_type ==
1205 TX_DEFAULT_STEERING) &&
1206 (config->tx_fifo_num > 1) &&
1207 (i >= nic->udp_fifo_idx) &&
1208 (i < (nic->udp_fifo_idx +
1209 nic->total_udp_fifos)))
1210 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1211 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1212 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1213 TTI_DATA2_MEM_TX_UFC_D(0x120);
1214 else
1215 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1216 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1217 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1218 TTI_DATA2_MEM_TX_UFC_D(0x80);
1221 writeq(val64, &bar0->tti_data2_mem);
1223 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1224 TTI_CMD_MEM_OFFSET(i);
1225 writeq(val64, &bar0->tti_command_mem);
1227 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1228 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1229 return FAILURE;
1232 return SUCCESS;
1236 * init_nic - Initialization of hardware
1237 * @nic: device private variable
1238 * Description: The function sequentially configures every block
1239 * of the H/W from their reset values.
1240 * Return Value: SUCCESS on success and
1241 * '-1' on failure (endian settings incorrect).
1244 static int init_nic(struct s2io_nic *nic)
1246 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1247 struct net_device *dev = nic->dev;
1248 register u64 val64 = 0;
1249 void __iomem *add;
1250 u32 time;
1251 int i, j;
1252 struct mac_info *mac_control;
1253 struct config_param *config;
1254 int dtx_cnt = 0;
1255 unsigned long long mem_share;
1256 int mem_size;
1258 mac_control = &nic->mac_control;
1259 config = &nic->config;
1261 /* to set the swapper controle on the card */
1262 if(s2io_set_swapper(nic)) {
1263 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1264 return -EIO;
1268 * Herc requires EOI to be removed from reset before XGXS, so..
1270 if (nic->device_type & XFRAME_II_DEVICE) {
1271 val64 = 0xA500000000ULL;
1272 writeq(val64, &bar0->sw_reset);
1273 msleep(500);
1274 val64 = readq(&bar0->sw_reset);
1277 /* Remove XGXS from reset state */
1278 val64 = 0;
1279 writeq(val64, &bar0->sw_reset);
1280 msleep(500);
1281 val64 = readq(&bar0->sw_reset);
1283 /* Ensure that it's safe to access registers by checking
1284 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1286 if (nic->device_type == XFRAME_II_DEVICE) {
1287 for (i = 0; i < 50; i++) {
1288 val64 = readq(&bar0->adapter_status);
1289 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1290 break;
1291 msleep(10);
1293 if (i == 50)
1294 return -ENODEV;
1297 /* Enable Receiving broadcasts */
1298 add = &bar0->mac_cfg;
1299 val64 = readq(&bar0->mac_cfg);
1300 val64 |= MAC_RMAC_BCAST_ENABLE;
1301 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1302 writel((u32) val64, add);
1303 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1304 writel((u32) (val64 >> 32), (add + 4));
1306 /* Read registers in all blocks */
1307 val64 = readq(&bar0->mac_int_mask);
1308 val64 = readq(&bar0->mc_int_mask);
1309 val64 = readq(&bar0->xgxs_int_mask);
1311 /* Set MTU */
1312 val64 = dev->mtu;
1313 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1315 if (nic->device_type & XFRAME_II_DEVICE) {
1316 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1317 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1318 &bar0->dtx_control, UF);
1319 if (dtx_cnt & 0x1)
1320 msleep(1); /* Necessary!! */
1321 dtx_cnt++;
1323 } else {
1324 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1325 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1326 &bar0->dtx_control, UF);
1327 val64 = readq(&bar0->dtx_control);
1328 dtx_cnt++;
1332 /* Tx DMA Initialization */
1333 val64 = 0;
1334 writeq(val64, &bar0->tx_fifo_partition_0);
1335 writeq(val64, &bar0->tx_fifo_partition_1);
1336 writeq(val64, &bar0->tx_fifo_partition_2);
1337 writeq(val64, &bar0->tx_fifo_partition_3);
1340 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1341 val64 |=
1342 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1343 13) | vBIT(config->tx_cfg[i].fifo_priority,
1344 ((j * 32) + 5), 3);
1346 if (i == (config->tx_fifo_num - 1)) {
1347 if (i % 2 == 0)
1348 i++;
1351 switch (i) {
1352 case 1:
1353 writeq(val64, &bar0->tx_fifo_partition_0);
1354 val64 = 0;
1355 j = 0;
1356 break;
1357 case 3:
1358 writeq(val64, &bar0->tx_fifo_partition_1);
1359 val64 = 0;
1360 j = 0;
1361 break;
1362 case 5:
1363 writeq(val64, &bar0->tx_fifo_partition_2);
1364 val64 = 0;
1365 j = 0;
1366 break;
1367 case 7:
1368 writeq(val64, &bar0->tx_fifo_partition_3);
1369 val64 = 0;
1370 j = 0;
1371 break;
1372 default:
1373 j++;
1374 break;
1379 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1380 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1382 if ((nic->device_type == XFRAME_I_DEVICE) &&
1383 (nic->pdev->revision < 4))
1384 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1386 val64 = readq(&bar0->tx_fifo_partition_0);
1387 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1388 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1391 * Initialization of Tx_PA_CONFIG register to ignore packet
1392 * integrity checking.
1394 val64 = readq(&bar0->tx_pa_cfg);
1395 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1396 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1397 writeq(val64, &bar0->tx_pa_cfg);
1399 /* Rx DMA intialization. */
1400 val64 = 0;
1401 for (i = 0; i < config->rx_ring_num; i++) {
1402 val64 |=
1403 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1406 writeq(val64, &bar0->rx_queue_priority);
1409 * Allocating equal share of memory to all the
1410 * configured Rings.
1412 val64 = 0;
1413 if (nic->device_type & XFRAME_II_DEVICE)
1414 mem_size = 32;
1415 else
1416 mem_size = 64;
1418 for (i = 0; i < config->rx_ring_num; i++) {
1419 switch (i) {
1420 case 0:
1421 mem_share = (mem_size / config->rx_ring_num +
1422 mem_size % config->rx_ring_num);
1423 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1424 continue;
1425 case 1:
1426 mem_share = (mem_size / config->rx_ring_num);
1427 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1428 continue;
1429 case 2:
1430 mem_share = (mem_size / config->rx_ring_num);
1431 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1432 continue;
1433 case 3:
1434 mem_share = (mem_size / config->rx_ring_num);
1435 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1436 continue;
1437 case 4:
1438 mem_share = (mem_size / config->rx_ring_num);
1439 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1440 continue;
1441 case 5:
1442 mem_share = (mem_size / config->rx_ring_num);
1443 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1444 continue;
1445 case 6:
1446 mem_share = (mem_size / config->rx_ring_num);
1447 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1448 continue;
1449 case 7:
1450 mem_share = (mem_size / config->rx_ring_num);
1451 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1452 continue;
1455 writeq(val64, &bar0->rx_queue_cfg);
1458 * Filling Tx round robin registers
1459 * as per the number of FIFOs for equal scheduling priority
1461 switch (config->tx_fifo_num) {
1462 case 1:
1463 val64 = 0x0;
1464 writeq(val64, &bar0->tx_w_round_robin_0);
1465 writeq(val64, &bar0->tx_w_round_robin_1);
1466 writeq(val64, &bar0->tx_w_round_robin_2);
1467 writeq(val64, &bar0->tx_w_round_robin_3);
1468 writeq(val64, &bar0->tx_w_round_robin_4);
1469 break;
1470 case 2:
1471 val64 = 0x0001000100010001ULL;
1472 writeq(val64, &bar0->tx_w_round_robin_0);
1473 writeq(val64, &bar0->tx_w_round_robin_1);
1474 writeq(val64, &bar0->tx_w_round_robin_2);
1475 writeq(val64, &bar0->tx_w_round_robin_3);
1476 val64 = 0x0001000100000000ULL;
1477 writeq(val64, &bar0->tx_w_round_robin_4);
1478 break;
1479 case 3:
1480 val64 = 0x0001020001020001ULL;
1481 writeq(val64, &bar0->tx_w_round_robin_0);
1482 val64 = 0x0200010200010200ULL;
1483 writeq(val64, &bar0->tx_w_round_robin_1);
1484 val64 = 0x0102000102000102ULL;
1485 writeq(val64, &bar0->tx_w_round_robin_2);
1486 val64 = 0x0001020001020001ULL;
1487 writeq(val64, &bar0->tx_w_round_robin_3);
1488 val64 = 0x0200010200000000ULL;
1489 writeq(val64, &bar0->tx_w_round_robin_4);
1490 break;
1491 case 4:
1492 val64 = 0x0001020300010203ULL;
1493 writeq(val64, &bar0->tx_w_round_robin_0);
1494 writeq(val64, &bar0->tx_w_round_robin_1);
1495 writeq(val64, &bar0->tx_w_round_robin_2);
1496 writeq(val64, &bar0->tx_w_round_robin_3);
1497 val64 = 0x0001020300000000ULL;
1498 writeq(val64, &bar0->tx_w_round_robin_4);
1499 break;
1500 case 5:
1501 val64 = 0x0001020304000102ULL;
1502 writeq(val64, &bar0->tx_w_round_robin_0);
1503 val64 = 0x0304000102030400ULL;
1504 writeq(val64, &bar0->tx_w_round_robin_1);
1505 val64 = 0x0102030400010203ULL;
1506 writeq(val64, &bar0->tx_w_round_robin_2);
1507 val64 = 0x0400010203040001ULL;
1508 writeq(val64, &bar0->tx_w_round_robin_3);
1509 val64 = 0x0203040000000000ULL;
1510 writeq(val64, &bar0->tx_w_round_robin_4);
1511 break;
1512 case 6:
1513 val64 = 0x0001020304050001ULL;
1514 writeq(val64, &bar0->tx_w_round_robin_0);
1515 val64 = 0x0203040500010203ULL;
1516 writeq(val64, &bar0->tx_w_round_robin_1);
1517 val64 = 0x0405000102030405ULL;
1518 writeq(val64, &bar0->tx_w_round_robin_2);
1519 val64 = 0x0001020304050001ULL;
1520 writeq(val64, &bar0->tx_w_round_robin_3);
1521 val64 = 0x0203040500000000ULL;
1522 writeq(val64, &bar0->tx_w_round_robin_4);
1523 break;
1524 case 7:
1525 val64 = 0x0001020304050600ULL;
1526 writeq(val64, &bar0->tx_w_round_robin_0);
1527 val64 = 0x0102030405060001ULL;
1528 writeq(val64, &bar0->tx_w_round_robin_1);
1529 val64 = 0x0203040506000102ULL;
1530 writeq(val64, &bar0->tx_w_round_robin_2);
1531 val64 = 0x0304050600010203ULL;
1532 writeq(val64, &bar0->tx_w_round_robin_3);
1533 val64 = 0x0405060000000000ULL;
1534 writeq(val64, &bar0->tx_w_round_robin_4);
1535 break;
1536 case 8:
1537 val64 = 0x0001020304050607ULL;
1538 writeq(val64, &bar0->tx_w_round_robin_0);
1539 writeq(val64, &bar0->tx_w_round_robin_1);
1540 writeq(val64, &bar0->tx_w_round_robin_2);
1541 writeq(val64, &bar0->tx_w_round_robin_3);
1542 val64 = 0x0001020300000000ULL;
1543 writeq(val64, &bar0->tx_w_round_robin_4);
1544 break;
1547 /* Enable all configured Tx FIFO partitions */
1548 val64 = readq(&bar0->tx_fifo_partition_0);
1549 val64 |= (TX_FIFO_PARTITION_EN);
1550 writeq(val64, &bar0->tx_fifo_partition_0);
1552 /* Filling the Rx round robin registers as per the
1553 * number of Rings and steering based on QoS with
1554 * equal priority.
1556 switch (config->rx_ring_num) {
1557 case 1:
1558 val64 = 0x0;
1559 writeq(val64, &bar0->rx_w_round_robin_0);
1560 writeq(val64, &bar0->rx_w_round_robin_1);
1561 writeq(val64, &bar0->rx_w_round_robin_2);
1562 writeq(val64, &bar0->rx_w_round_robin_3);
1563 writeq(val64, &bar0->rx_w_round_robin_4);
1565 val64 = 0x8080808080808080ULL;
1566 writeq(val64, &bar0->rts_qos_steering);
1567 break;
1568 case 2:
1569 val64 = 0x0001000100010001ULL;
1570 writeq(val64, &bar0->rx_w_round_robin_0);
1571 writeq(val64, &bar0->rx_w_round_robin_1);
1572 writeq(val64, &bar0->rx_w_round_robin_2);
1573 writeq(val64, &bar0->rx_w_round_robin_3);
1574 val64 = 0x0001000100000000ULL;
1575 writeq(val64, &bar0->rx_w_round_robin_4);
1577 val64 = 0x8080808040404040ULL;
1578 writeq(val64, &bar0->rts_qos_steering);
1579 break;
1580 case 3:
1581 val64 = 0x0001020001020001ULL;
1582 writeq(val64, &bar0->rx_w_round_robin_0);
1583 val64 = 0x0200010200010200ULL;
1584 writeq(val64, &bar0->rx_w_round_robin_1);
1585 val64 = 0x0102000102000102ULL;
1586 writeq(val64, &bar0->rx_w_round_robin_2);
1587 val64 = 0x0001020001020001ULL;
1588 writeq(val64, &bar0->rx_w_round_robin_3);
1589 val64 = 0x0200010200000000ULL;
1590 writeq(val64, &bar0->rx_w_round_robin_4);
1592 val64 = 0x8080804040402020ULL;
1593 writeq(val64, &bar0->rts_qos_steering);
1594 break;
1595 case 4:
1596 val64 = 0x0001020300010203ULL;
1597 writeq(val64, &bar0->rx_w_round_robin_0);
1598 writeq(val64, &bar0->rx_w_round_robin_1);
1599 writeq(val64, &bar0->rx_w_round_robin_2);
1600 writeq(val64, &bar0->rx_w_round_robin_3);
1601 val64 = 0x0001020300000000ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_4);
1604 val64 = 0x8080404020201010ULL;
1605 writeq(val64, &bar0->rts_qos_steering);
1606 break;
1607 case 5:
1608 val64 = 0x0001020304000102ULL;
1609 writeq(val64, &bar0->rx_w_round_robin_0);
1610 val64 = 0x0304000102030400ULL;
1611 writeq(val64, &bar0->rx_w_round_robin_1);
1612 val64 = 0x0102030400010203ULL;
1613 writeq(val64, &bar0->rx_w_round_robin_2);
1614 val64 = 0x0400010203040001ULL;
1615 writeq(val64, &bar0->rx_w_round_robin_3);
1616 val64 = 0x0203040000000000ULL;
1617 writeq(val64, &bar0->rx_w_round_robin_4);
1619 val64 = 0x8080404020201008ULL;
1620 writeq(val64, &bar0->rts_qos_steering);
1621 break;
1622 case 6:
1623 val64 = 0x0001020304050001ULL;
1624 writeq(val64, &bar0->rx_w_round_robin_0);
1625 val64 = 0x0203040500010203ULL;
1626 writeq(val64, &bar0->rx_w_round_robin_1);
1627 val64 = 0x0405000102030405ULL;
1628 writeq(val64, &bar0->rx_w_round_robin_2);
1629 val64 = 0x0001020304050001ULL;
1630 writeq(val64, &bar0->rx_w_round_robin_3);
1631 val64 = 0x0203040500000000ULL;
1632 writeq(val64, &bar0->rx_w_round_robin_4);
1634 val64 = 0x8080404020100804ULL;
1635 writeq(val64, &bar0->rts_qos_steering);
1636 break;
1637 case 7:
1638 val64 = 0x0001020304050600ULL;
1639 writeq(val64, &bar0->rx_w_round_robin_0);
1640 val64 = 0x0102030405060001ULL;
1641 writeq(val64, &bar0->rx_w_round_robin_1);
1642 val64 = 0x0203040506000102ULL;
1643 writeq(val64, &bar0->rx_w_round_robin_2);
1644 val64 = 0x0304050600010203ULL;
1645 writeq(val64, &bar0->rx_w_round_robin_3);
1646 val64 = 0x0405060000000000ULL;
1647 writeq(val64, &bar0->rx_w_round_robin_4);
1649 val64 = 0x8080402010080402ULL;
1650 writeq(val64, &bar0->rts_qos_steering);
1651 break;
1652 case 8:
1653 val64 = 0x0001020304050607ULL;
1654 writeq(val64, &bar0->rx_w_round_robin_0);
1655 writeq(val64, &bar0->rx_w_round_robin_1);
1656 writeq(val64, &bar0->rx_w_round_robin_2);
1657 writeq(val64, &bar0->rx_w_round_robin_3);
1658 val64 = 0x0001020300000000ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_4);
1661 val64 = 0x8040201008040201ULL;
1662 writeq(val64, &bar0->rts_qos_steering);
1663 break;
1666 /* UDP Fix */
1667 val64 = 0;
1668 for (i = 0; i < 8; i++)
1669 writeq(val64, &bar0->rts_frm_len_n[i]);
1671 /* Set the default rts frame length for the rings configured */
1672 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1673 for (i = 0 ; i < config->rx_ring_num ; i++)
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1676 /* Set the frame length for the configured rings
1677 * desired by the user
1679 for (i = 0; i < config->rx_ring_num; i++) {
1680 /* If rts_frm_len[i] == 0 then it is assumed that user not
1681 * specified frame length steering.
1682 * If the user provides the frame length then program
1683 * the rts_frm_len register for those values or else
1684 * leave it as it is.
1686 if (rts_frm_len[i] != 0) {
1687 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1688 &bar0->rts_frm_len_n[i]);
1692 /* Disable differentiated services steering logic */
1693 for (i = 0; i < 64; i++) {
1694 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1695 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1696 dev->name);
1697 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1698 return -ENODEV;
1702 /* Program statistics memory */
1703 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1705 if (nic->device_type == XFRAME_II_DEVICE) {
1706 val64 = STAT_BC(0x320);
1707 writeq(val64, &bar0->stat_byte_cnt);
1711 * Initializing the sampling rate for the device to calculate the
1712 * bandwidth utilization.
1714 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1715 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1716 writeq(val64, &bar0->mac_link_util);
1719 * Initializing the Transmit and Receive Traffic Interrupt
1720 * Scheme.
1723 /* Initialize TTI */
1724 if (SUCCESS != init_tti(nic, nic->last_link_state))
1725 return -ENODEV;
1727 /* RTI Initialization */
1728 if (nic->device_type == XFRAME_II_DEVICE) {
1730 * Programmed to generate Apprx 500 Intrs per
1731 * second
1733 int count = (nic->config.bus_speed * 125)/4;
1734 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1735 } else
1736 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1737 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1738 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1739 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1741 writeq(val64, &bar0->rti_data1_mem);
1743 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1744 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1745 if (nic->config.intr_type == MSI_X)
1746 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1747 RTI_DATA2_MEM_RX_UFC_D(0x40));
1748 else
1749 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1750 RTI_DATA2_MEM_RX_UFC_D(0x80));
1751 writeq(val64, &bar0->rti_data2_mem);
1753 for (i = 0; i < config->rx_ring_num; i++) {
1754 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1755 | RTI_CMD_MEM_OFFSET(i);
1756 writeq(val64, &bar0->rti_command_mem);
1759 * Once the operation completes, the Strobe bit of the
1760 * command register will be reset. We poll for this
1761 * particular condition. We wait for a maximum of 500ms
1762 * for the operation to complete, if it's not complete
1763 * by then we return error.
1765 time = 0;
1766 while (TRUE) {
1767 val64 = readq(&bar0->rti_command_mem);
1768 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1769 break;
1771 if (time > 10) {
1772 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1773 dev->name);
1774 return -ENODEV;
1776 time++;
1777 msleep(50);
1782 * Initializing proper values as Pause threshold into all
1783 * the 8 Queues on Rx side.
1785 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1786 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1788 /* Disable RMAC PAD STRIPPING */
1789 add = &bar0->mac_cfg;
1790 val64 = readq(&bar0->mac_cfg);
1791 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1792 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1793 writel((u32) (val64), add);
1794 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1795 writel((u32) (val64 >> 32), (add + 4));
1796 val64 = readq(&bar0->mac_cfg);
1798 /* Enable FCS stripping by adapter */
1799 add = &bar0->mac_cfg;
1800 val64 = readq(&bar0->mac_cfg);
1801 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1802 if (nic->device_type == XFRAME_II_DEVICE)
1803 writeq(val64, &bar0->mac_cfg);
1804 else {
1805 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1806 writel((u32) (val64), add);
1807 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1808 writel((u32) (val64 >> 32), (add + 4));
1812 * Set the time value to be inserted in the pause frame
1813 * generated by xena.
1815 val64 = readq(&bar0->rmac_pause_cfg);
1816 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1817 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1818 writeq(val64, &bar0->rmac_pause_cfg);
1821 * Set the Threshold Limit for Generating the pause frame
1822 * If the amount of data in any Queue exceeds ratio of
1823 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1824 * pause frame is generated
1826 val64 = 0;
1827 for (i = 0; i < 4; i++) {
1828 val64 |=
1829 (((u64) 0xFF00 | nic->mac_control.
1830 mc_pause_threshold_q0q3)
1831 << (i * 2 * 8));
1833 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1835 val64 = 0;
1836 for (i = 0; i < 4; i++) {
1837 val64 |=
1838 (((u64) 0xFF00 | nic->mac_control.
1839 mc_pause_threshold_q4q7)
1840 << (i * 2 * 8));
1842 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1845 * TxDMA will stop Read request if the number of read split has
1846 * exceeded the limit pointed by shared_splits
1848 val64 = readq(&bar0->pic_control);
1849 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1850 writeq(val64, &bar0->pic_control);
1852 if (nic->config.bus_speed == 266) {
1853 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1854 writeq(0x0, &bar0->read_retry_delay);
1855 writeq(0x0, &bar0->write_retry_delay);
1859 * Programming the Herc to split every write transaction
1860 * that does not start on an ADB to reduce disconnects.
1862 if (nic->device_type == XFRAME_II_DEVICE) {
1863 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1864 MISC_LINK_STABILITY_PRD(3);
1865 writeq(val64, &bar0->misc_control);
1866 val64 = readq(&bar0->pic_control2);
1867 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1868 writeq(val64, &bar0->pic_control2);
1870 if (strstr(nic->product_name, "CX4")) {
1871 val64 = TMAC_AVG_IPG(0x17);
1872 writeq(val64, &bar0->tmac_avg_ipg);
1875 return SUCCESS;
1877 #define LINK_UP_DOWN_INTERRUPT 1
1878 #define MAC_RMAC_ERR_TIMER 2
1880 static int s2io_link_fault_indication(struct s2io_nic *nic)
1882 if (nic->device_type == XFRAME_II_DEVICE)
1883 return LINK_UP_DOWN_INTERRUPT;
1884 else
1885 return MAC_RMAC_ERR_TIMER;
1889 * do_s2io_write_bits - update alarm bits in alarm register
1890 * @value: alarm bits
1891 * @flag: interrupt status
1892 * @addr: address value
1893 * Description: update alarm bits in alarm register
1894 * Return Value:
1895 * NONE.
1897 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1899 u64 temp64;
1901 temp64 = readq(addr);
1903 if(flag == ENABLE_INTRS)
1904 temp64 &= ~((u64) value);
1905 else
1906 temp64 |= ((u64) value);
1907 writeq(temp64, addr);
1910 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1912 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1913 register u64 gen_int_mask = 0;
1914 u64 interruptible;
1916 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1917 if (mask & TX_DMA_INTR) {
1919 gen_int_mask |= TXDMA_INT_M;
1921 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1922 TXDMA_PCC_INT | TXDMA_TTI_INT |
1923 TXDMA_LSO_INT | TXDMA_TPA_INT |
1924 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1926 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1927 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1928 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1929 &bar0->pfc_err_mask);
1931 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1932 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1933 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1935 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1936 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1937 PCC_N_SERR | PCC_6_COF_OV_ERR |
1938 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1939 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1940 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1942 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1943 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1945 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1946 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1947 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1948 flag, &bar0->lso_err_mask);
1950 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1951 flag, &bar0->tpa_err_mask);
1953 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1957 if (mask & TX_MAC_INTR) {
1958 gen_int_mask |= TXMAC_INT_M;
1959 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1960 &bar0->mac_int_mask);
1961 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1962 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1963 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1964 flag, &bar0->mac_tmac_err_mask);
1967 if (mask & TX_XGXS_INTR) {
1968 gen_int_mask |= TXXGXS_INT_M;
1969 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1970 &bar0->xgxs_int_mask);
1971 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1972 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1973 flag, &bar0->xgxs_txgxs_err_mask);
1976 if (mask & RX_DMA_INTR) {
1977 gen_int_mask |= RXDMA_INT_M;
1978 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1979 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1980 flag, &bar0->rxdma_int_mask);
1981 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1982 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1983 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1984 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1985 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1986 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1987 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1988 &bar0->prc_pcix_err_mask);
1989 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1990 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1991 &bar0->rpa_err_mask);
1992 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1993 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1994 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1995 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1996 flag, &bar0->rda_err_mask);
1997 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1998 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1999 flag, &bar0->rti_err_mask);
2002 if (mask & RX_MAC_INTR) {
2003 gen_int_mask |= RXMAC_INT_M;
2004 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2005 &bar0->mac_int_mask);
2006 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2007 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2008 RMAC_DOUBLE_ECC_ERR;
2009 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2010 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2011 do_s2io_write_bits(interruptible,
2012 flag, &bar0->mac_rmac_err_mask);
2015 if (mask & RX_XGXS_INTR)
2017 gen_int_mask |= RXXGXS_INT_M;
2018 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2019 &bar0->xgxs_int_mask);
2020 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2021 &bar0->xgxs_rxgxs_err_mask);
2024 if (mask & MC_INTR) {
2025 gen_int_mask |= MC_INT_M;
2026 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2027 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2028 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2029 &bar0->mc_err_mask);
2031 nic->general_int_mask = gen_int_mask;
2033 /* Remove this line when alarm interrupts are enabled */
2034 nic->general_int_mask = 0;
2037 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2038 * @nic: device private variable,
2039 * @mask: A mask indicating which Intr block must be modified and,
2040 * @flag: A flag indicating whether to enable or disable the Intrs.
2041 * Description: This function will either disable or enable the interrupts
2042 * depending on the flag argument. The mask argument can be used to
2043 * enable/disable any Intr block.
2044 * Return Value: NONE.
2047 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2049 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2050 register u64 temp64 = 0, intr_mask = 0;
2052 intr_mask = nic->general_int_mask;
2054 /* Top level interrupt classification */
2055 /* PIC Interrupts */
2056 if (mask & TX_PIC_INTR) {
2057 /* Enable PIC Intrs in the general intr mask register */
2058 intr_mask |= TXPIC_INT_M;
2059 if (flag == ENABLE_INTRS) {
2061 * If Hercules adapter enable GPIO otherwise
2062 * disable all PCIX, Flash, MDIO, IIC and GPIO
2063 * interrupts for now.
2064 * TODO
2066 if (s2io_link_fault_indication(nic) ==
2067 LINK_UP_DOWN_INTERRUPT ) {
2068 do_s2io_write_bits(PIC_INT_GPIO, flag,
2069 &bar0->pic_int_mask);
2070 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2071 &bar0->gpio_int_mask);
2072 } else
2073 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2074 } else if (flag == DISABLE_INTRS) {
2076 * Disable PIC Intrs in the general
2077 * intr mask register
2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2083 /* Tx traffic interrupts */
2084 if (mask & TX_TRAFFIC_INTR) {
2085 intr_mask |= TXTRAFFIC_INT_M;
2086 if (flag == ENABLE_INTRS) {
2088 * Enable all the Tx side interrupts
2089 * writing 0 Enables all 64 TX interrupt levels
2091 writeq(0x0, &bar0->tx_traffic_mask);
2092 } else if (flag == DISABLE_INTRS) {
2094 * Disable Tx Traffic Intrs in the general intr mask
2095 * register.
2097 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2101 /* Rx traffic interrupts */
2102 if (mask & RX_TRAFFIC_INTR) {
2103 intr_mask |= RXTRAFFIC_INT_M;
2104 if (flag == ENABLE_INTRS) {
2105 /* writing 0 Enables all 8 RX interrupt levels */
2106 writeq(0x0, &bar0->rx_traffic_mask);
2107 } else if (flag == DISABLE_INTRS) {
2109 * Disable Rx Traffic Intrs in the general intr mask
2110 * register.
2112 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2116 temp64 = readq(&bar0->general_int_mask);
2117 if (flag == ENABLE_INTRS)
2118 temp64 &= ~((u64) intr_mask);
2119 else
2120 temp64 = DISABLE_ALL_INTRS;
2121 writeq(temp64, &bar0->general_int_mask);
2123 nic->general_int_mask = readq(&bar0->general_int_mask);
2127 * verify_pcc_quiescent- Checks for PCC quiescent state
2128 * Return: 1 If PCC is quiescence
2129 * 0 If PCC is not quiescence
2131 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2133 int ret = 0, herc;
2134 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2135 u64 val64 = readq(&bar0->adapter_status);
2137 herc = (sp->device_type == XFRAME_II_DEVICE);
2139 if (flag == FALSE) {
2140 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2141 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2142 ret = 1;
2143 } else {
2144 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2145 ret = 1;
2147 } else {
2148 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2149 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2150 ADAPTER_STATUS_RMAC_PCC_IDLE))
2151 ret = 1;
2152 } else {
2153 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2154 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2155 ret = 1;
2159 return ret;
2162 * verify_xena_quiescence - Checks whether the H/W is ready
2163 * Description: Returns whether the H/W is ready to go or not. Depending
2164 * on whether adapter enable bit was written or not the comparison
2165 * differs and the calling function passes the input argument flag to
2166 * indicate this.
2167 * Return: 1 If xena is quiescence
2168 * 0 If Xena is not quiescence
2171 static int verify_xena_quiescence(struct s2io_nic *sp)
2173 int mode;
2174 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2175 u64 val64 = readq(&bar0->adapter_status);
2176 mode = s2io_verify_pci_mode(sp);
2178 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2179 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2180 return 0;
2182 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2183 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2184 return 0;
2186 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2187 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2188 return 0;
2190 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2191 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2192 return 0;
2194 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2195 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2196 return 0;
2198 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2199 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2200 return 0;
2202 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2203 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2204 return 0;
2206 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2207 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2208 return 0;
2212 * In PCI 33 mode, the P_PLL is not used, and therefore,
2213 * the the P_PLL_LOCK bit in the adapter_status register will
2214 * not be asserted.
2216 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2217 sp->device_type == XFRAME_II_DEVICE && mode !=
2218 PCI_MODE_PCI_33) {
2219 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2220 return 0;
2222 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2223 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2224 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2225 return 0;
2227 return 1;
2231 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2232 * @sp: Pointer to device specifc structure
2233 * Description :
2234 * New procedure to clear mac address reading problems on Alpha platforms
2238 static void fix_mac_address(struct s2io_nic * sp)
2240 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2241 u64 val64;
2242 int i = 0;
2244 while (fix_mac[i] != END_SIGN) {
2245 writeq(fix_mac[i++], &bar0->gpio_control);
2246 udelay(10);
2247 val64 = readq(&bar0->gpio_control);
2252 * start_nic - Turns the device on
2253 * @nic : device private variable.
2254 * Description:
2255 * This function actually turns the device on. Before this function is
2256 * called,all Registers are configured from their reset states
2257 * and shared memory is allocated but the NIC is still quiescent. On
2258 * calling this function, the device interrupts are cleared and the NIC is
2259 * literally switched on by writing into the adapter control register.
2260 * Return Value:
2261 * SUCCESS on success and -1 on failure.
2264 static int start_nic(struct s2io_nic *nic)
2266 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2267 struct net_device *dev = nic->dev;
2268 register u64 val64 = 0;
2269 u16 subid, i;
2270 struct mac_info *mac_control;
2271 struct config_param *config;
2273 mac_control = &nic->mac_control;
2274 config = &nic->config;
2276 /* PRC Initialization and configuration */
2277 for (i = 0; i < config->rx_ring_num; i++) {
2278 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2279 &bar0->prc_rxd0_n[i]);
2281 val64 = readq(&bar0->prc_ctrl_n[i]);
2282 if (nic->rxd_mode == RXD_MODE_1)
2283 val64 |= PRC_CTRL_RC_ENABLED;
2284 else
2285 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2286 if (nic->device_type == XFRAME_II_DEVICE)
2287 val64 |= PRC_CTRL_GROUP_READS;
2288 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2289 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2290 writeq(val64, &bar0->prc_ctrl_n[i]);
2293 if (nic->rxd_mode == RXD_MODE_3B) {
2294 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2295 val64 = readq(&bar0->rx_pa_cfg);
2296 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2297 writeq(val64, &bar0->rx_pa_cfg);
2300 if (vlan_tag_strip == 0) {
2301 val64 = readq(&bar0->rx_pa_cfg);
2302 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2303 writeq(val64, &bar0->rx_pa_cfg);
2304 nic->vlan_strip_flag = 0;
2308 * Enabling MC-RLDRAM. After enabling the device, we timeout
2309 * for around 100ms, which is approximately the time required
2310 * for the device to be ready for operation.
2312 val64 = readq(&bar0->mc_rldram_mrs);
2313 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2314 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2315 val64 = readq(&bar0->mc_rldram_mrs);
2317 msleep(100); /* Delay by around 100 ms. */
2319 /* Enabling ECC Protection. */
2320 val64 = readq(&bar0->adapter_control);
2321 val64 &= ~ADAPTER_ECC_EN;
2322 writeq(val64, &bar0->adapter_control);
2325 * Verify if the device is ready to be enabled, if so enable
2326 * it.
2328 val64 = readq(&bar0->adapter_status);
2329 if (!verify_xena_quiescence(nic)) {
2330 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2331 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2332 (unsigned long long) val64);
2333 return FAILURE;
2337 * With some switches, link might be already up at this point.
2338 * Because of this weird behavior, when we enable laser,
2339 * we may not get link. We need to handle this. We cannot
2340 * figure out which switch is misbehaving. So we are forced to
2341 * make a global change.
2344 /* Enabling Laser. */
2345 val64 = readq(&bar0->adapter_control);
2346 val64 |= ADAPTER_EOI_TX_ON;
2347 writeq(val64, &bar0->adapter_control);
2349 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2351 * Dont see link state interrupts initally on some switches,
2352 * so directly scheduling the link state task here.
2354 schedule_work(&nic->set_link_task);
2356 /* SXE-002: Initialize link and activity LED */
2357 subid = nic->pdev->subsystem_device;
2358 if (((subid & 0xFF) >= 0x07) &&
2359 (nic->device_type == XFRAME_I_DEVICE)) {
2360 val64 = readq(&bar0->gpio_control);
2361 val64 |= 0x0000800000000000ULL;
2362 writeq(val64, &bar0->gpio_control);
2363 val64 = 0x0411040400000000ULL;
2364 writeq(val64, (void __iomem *)bar0 + 0x2700);
2367 return SUCCESS;
2370 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2372 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2373 TxD *txdlp, int get_off)
2375 struct s2io_nic *nic = fifo_data->nic;
2376 struct sk_buff *skb;
2377 struct TxD *txds;
2378 u16 j, frg_cnt;
2380 txds = txdlp;
2381 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2382 pci_unmap_single(nic->pdev, (dma_addr_t)
2383 txds->Buffer_Pointer, sizeof(u64),
2384 PCI_DMA_TODEVICE);
2385 txds++;
2388 skb = (struct sk_buff *) ((unsigned long)
2389 txds->Host_Control);
2390 if (!skb) {
2391 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2392 return NULL;
2394 pci_unmap_single(nic->pdev, (dma_addr_t)
2395 txds->Buffer_Pointer,
2396 skb->len - skb->data_len,
2397 PCI_DMA_TODEVICE);
2398 frg_cnt = skb_shinfo(skb)->nr_frags;
2399 if (frg_cnt) {
2400 txds++;
2401 for (j = 0; j < frg_cnt; j++, txds++) {
2402 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2403 if (!txds->Buffer_Pointer)
2404 break;
2405 pci_unmap_page(nic->pdev, (dma_addr_t)
2406 txds->Buffer_Pointer,
2407 frag->size, PCI_DMA_TODEVICE);
2410 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2411 return(skb);
2415 * free_tx_buffers - Free all queued Tx buffers
2416 * @nic : device private variable.
2417 * Description:
2418 * Free all queued Tx buffers.
2419 * Return Value: void
2422 static void free_tx_buffers(struct s2io_nic *nic)
2424 struct net_device *dev = nic->dev;
2425 struct sk_buff *skb;
2426 struct TxD *txdp;
2427 int i, j;
2428 struct mac_info *mac_control;
2429 struct config_param *config;
2430 int cnt = 0;
2432 mac_control = &nic->mac_control;
2433 config = &nic->config;
2435 for (i = 0; i < config->tx_fifo_num; i++) {
2436 unsigned long flags;
2437 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2438 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2439 txdp = (struct TxD *) \
2440 mac_control->fifos[i].list_info[j].list_virt_addr;
2441 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2442 if (skb) {
2443 nic->mac_control.stats_info->sw_stat.mem_freed
2444 += skb->truesize;
2445 dev_kfree_skb(skb);
2446 cnt++;
2449 DBG_PRINT(INTR_DBG,
2450 "%s:forcibly freeing %d skbs on FIFO%d\n",
2451 dev->name, cnt, i);
2452 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2453 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2454 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2459 * stop_nic - To stop the nic
2460 * @nic ; device private variable.
2461 * Description:
2462 * This function does exactly the opposite of what the start_nic()
2463 * function does. This function is called to stop the device.
2464 * Return Value:
2465 * void.
2468 static void stop_nic(struct s2io_nic *nic)
2470 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2471 register u64 val64 = 0;
2472 u16 interruptible;
2473 struct mac_info *mac_control;
2474 struct config_param *config;
2476 mac_control = &nic->mac_control;
2477 config = &nic->config;
2479 /* Disable all interrupts */
2480 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2481 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2482 interruptible |= TX_PIC_INTR;
2483 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2485 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2486 val64 = readq(&bar0->adapter_control);
2487 val64 &= ~(ADAPTER_CNTL_EN);
2488 writeq(val64, &bar0->adapter_control);
2492 * fill_rx_buffers - Allocates the Rx side skbs
2493 * @ring_info: per ring structure
2494 * @from_card_up: If this is true, we will map the buffer to get
2495 * the dma address for buf0 and buf1 to give it to the card.
2496 * Else we will sync the already mapped buffer to give it to the card.
2497 * Description:
2498 * The function allocates Rx side skbs and puts the physical
2499 * address of these buffers into the RxD buffer pointers, so that the NIC
2500 * can DMA the received frame into these locations.
2501 * The NIC supports 3 receive modes, viz
2502 * 1. single buffer,
2503 * 2. three buffer and
2504 * 3. Five buffer modes.
2505 * Each mode defines how many fragments the received frame will be split
2506 * up into by the NIC. The frame is split into L3 header, L4 Header,
2507 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2508 * is split into 3 fragments. As of now only single buffer mode is
2509 * supported.
2510 * Return Value:
2511 * SUCCESS on success or an appropriate -ve value on failure.
2513 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2514 int from_card_up)
2516 struct sk_buff *skb;
2517 struct RxD_t *rxdp;
2518 int off, size, block_no, block_no1;
2519 u32 alloc_tab = 0;
2520 u32 alloc_cnt;
2521 u64 tmp;
2522 struct buffAdd *ba;
2523 struct RxD_t *first_rxdp = NULL;
2524 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2525 int rxd_index = 0;
2526 struct RxD1 *rxdp1;
2527 struct RxD3 *rxdp3;
2528 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2530 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2532 block_no1 = ring->rx_curr_get_info.block_index;
2533 while (alloc_tab < alloc_cnt) {
2534 block_no = ring->rx_curr_put_info.block_index;
2536 off = ring->rx_curr_put_info.offset;
2538 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2540 rxd_index = off + 1;
2541 if (block_no)
2542 rxd_index += (block_no * ring->rxd_count);
2544 if ((block_no == block_no1) &&
2545 (off == ring->rx_curr_get_info.offset) &&
2546 (rxdp->Host_Control)) {
2547 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2548 ring->dev->name);
2549 DBG_PRINT(INTR_DBG, " info equated\n");
2550 goto end;
2552 if (off && (off == ring->rxd_count)) {
2553 ring->rx_curr_put_info.block_index++;
2554 if (ring->rx_curr_put_info.block_index ==
2555 ring->block_count)
2556 ring->rx_curr_put_info.block_index = 0;
2557 block_no = ring->rx_curr_put_info.block_index;
2558 off = 0;
2559 ring->rx_curr_put_info.offset = off;
2560 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2561 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2562 ring->dev->name, rxdp);
2566 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2567 ((ring->rxd_mode == RXD_MODE_3B) &&
2568 (rxdp->Control_2 & s2BIT(0)))) {
2569 ring->rx_curr_put_info.offset = off;
2570 goto end;
2572 /* calculate size of skb based on ring mode */
2573 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2574 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2575 if (ring->rxd_mode == RXD_MODE_1)
2576 size += NET_IP_ALIGN;
2577 else
2578 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2580 /* allocate skb */
2581 skb = dev_alloc_skb(size);
2582 if(!skb) {
2583 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2584 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2585 if (first_rxdp) {
2586 wmb();
2587 first_rxdp->Control_1 |= RXD_OWN_XENA;
2589 stats->mem_alloc_fail_cnt++;
2591 return -ENOMEM ;
2593 stats->mem_allocated += skb->truesize;
2595 if (ring->rxd_mode == RXD_MODE_1) {
2596 /* 1 buffer mode - normal operation mode */
2597 rxdp1 = (struct RxD1*)rxdp;
2598 memset(rxdp, 0, sizeof(struct RxD1));
2599 skb_reserve(skb, NET_IP_ALIGN);
2600 rxdp1->Buffer0_ptr = pci_map_single
2601 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2602 PCI_DMA_FROMDEVICE);
2603 if (pci_dma_mapping_error(nic->pdev,
2604 rxdp1->Buffer0_ptr))
2605 goto pci_map_failed;
2607 rxdp->Control_2 =
2608 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2609 rxdp->Host_Control = (unsigned long) (skb);
2610 } else if (ring->rxd_mode == RXD_MODE_3B) {
2612 * 2 buffer mode -
2613 * 2 buffer mode provides 128
2614 * byte aligned receive buffers.
2617 rxdp3 = (struct RxD3*)rxdp;
2618 /* save buffer pointers to avoid frequent dma mapping */
2619 Buffer0_ptr = rxdp3->Buffer0_ptr;
2620 Buffer1_ptr = rxdp3->Buffer1_ptr;
2621 memset(rxdp, 0, sizeof(struct RxD3));
2622 /* restore the buffer pointers for dma sync*/
2623 rxdp3->Buffer0_ptr = Buffer0_ptr;
2624 rxdp3->Buffer1_ptr = Buffer1_ptr;
2626 ba = &ring->ba[block_no][off];
2627 skb_reserve(skb, BUF0_LEN);
2628 tmp = (u64)(unsigned long) skb->data;
2629 tmp += ALIGN_SIZE;
2630 tmp &= ~ALIGN_SIZE;
2631 skb->data = (void *) (unsigned long)tmp;
2632 skb_reset_tail_pointer(skb);
2634 if (from_card_up) {
2635 rxdp3->Buffer0_ptr =
2636 pci_map_single(ring->pdev, ba->ba_0,
2637 BUF0_LEN, PCI_DMA_FROMDEVICE);
2638 if (pci_dma_mapping_error(nic->pdev,
2639 rxdp3->Buffer0_ptr))
2640 goto pci_map_failed;
2641 } else
2642 pci_dma_sync_single_for_device(ring->pdev,
2643 (dma_addr_t) rxdp3->Buffer0_ptr,
2644 BUF0_LEN, PCI_DMA_FROMDEVICE);
2646 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2647 if (ring->rxd_mode == RXD_MODE_3B) {
2648 /* Two buffer mode */
2651 * Buffer2 will have L3/L4 header plus
2652 * L4 payload
2654 rxdp3->Buffer2_ptr = pci_map_single
2655 (ring->pdev, skb->data, ring->mtu + 4,
2656 PCI_DMA_FROMDEVICE);
2658 if (pci_dma_mapping_error(nic->pdev,
2659 rxdp3->Buffer2_ptr))
2660 goto pci_map_failed;
2662 if (from_card_up) {
2663 rxdp3->Buffer1_ptr =
2664 pci_map_single(ring->pdev,
2665 ba->ba_1, BUF1_LEN,
2666 PCI_DMA_FROMDEVICE);
2668 if (pci_dma_mapping_error(nic->pdev,
2669 rxdp3->Buffer1_ptr)) {
2670 pci_unmap_single
2671 (ring->pdev,
2672 (dma_addr_t)(unsigned long)
2673 skb->data,
2674 ring->mtu + 4,
2675 PCI_DMA_FROMDEVICE);
2676 goto pci_map_failed;
2679 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2680 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2681 (ring->mtu + 4);
2683 rxdp->Control_2 |= s2BIT(0);
2684 rxdp->Host_Control = (unsigned long) (skb);
2686 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2687 rxdp->Control_1 |= RXD_OWN_XENA;
2688 off++;
2689 if (off == (ring->rxd_count + 1))
2690 off = 0;
2691 ring->rx_curr_put_info.offset = off;
2693 rxdp->Control_2 |= SET_RXD_MARKER;
2694 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2695 if (first_rxdp) {
2696 wmb();
2697 first_rxdp->Control_1 |= RXD_OWN_XENA;
2699 first_rxdp = rxdp;
2701 ring->rx_bufs_left += 1;
2702 alloc_tab++;
2705 end:
2706 /* Transfer ownership of first descriptor to adapter just before
2707 * exiting. Before that, use memory barrier so that ownership
2708 * and other fields are seen by adapter correctly.
2710 if (first_rxdp) {
2711 wmb();
2712 first_rxdp->Control_1 |= RXD_OWN_XENA;
2715 return SUCCESS;
2716 pci_map_failed:
2717 stats->pci_map_fail_cnt++;
2718 stats->mem_freed += skb->truesize;
2719 dev_kfree_skb_irq(skb);
2720 return -ENOMEM;
2723 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2725 struct net_device *dev = sp->dev;
2726 int j;
2727 struct sk_buff *skb;
2728 struct RxD_t *rxdp;
2729 struct mac_info *mac_control;
2730 struct buffAdd *ba;
2731 struct RxD1 *rxdp1;
2732 struct RxD3 *rxdp3;
2734 mac_control = &sp->mac_control;
2735 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2736 rxdp = mac_control->rings[ring_no].
2737 rx_blocks[blk].rxds[j].virt_addr;
2738 skb = (struct sk_buff *)
2739 ((unsigned long) rxdp->Host_Control);
2740 if (!skb) {
2741 continue;
2743 if (sp->rxd_mode == RXD_MODE_1) {
2744 rxdp1 = (struct RxD1*)rxdp;
2745 pci_unmap_single(sp->pdev, (dma_addr_t)
2746 rxdp1->Buffer0_ptr,
2747 dev->mtu +
2748 HEADER_ETHERNET_II_802_3_SIZE
2749 + HEADER_802_2_SIZE +
2750 HEADER_SNAP_SIZE,
2751 PCI_DMA_FROMDEVICE);
2752 memset(rxdp, 0, sizeof(struct RxD1));
2753 } else if(sp->rxd_mode == RXD_MODE_3B) {
2754 rxdp3 = (struct RxD3*)rxdp;
2755 ba = &mac_control->rings[ring_no].
2756 ba[blk][j];
2757 pci_unmap_single(sp->pdev, (dma_addr_t)
2758 rxdp3->Buffer0_ptr,
2759 BUF0_LEN,
2760 PCI_DMA_FROMDEVICE);
2761 pci_unmap_single(sp->pdev, (dma_addr_t)
2762 rxdp3->Buffer1_ptr,
2763 BUF1_LEN,
2764 PCI_DMA_FROMDEVICE);
2765 pci_unmap_single(sp->pdev, (dma_addr_t)
2766 rxdp3->Buffer2_ptr,
2767 dev->mtu + 4,
2768 PCI_DMA_FROMDEVICE);
2769 memset(rxdp, 0, sizeof(struct RxD3));
2771 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2772 dev_kfree_skb(skb);
2773 mac_control->rings[ring_no].rx_bufs_left -= 1;
2778 * free_rx_buffers - Frees all Rx buffers
2779 * @sp: device private variable.
2780 * Description:
2781 * This function will free all Rx buffers allocated by host.
2782 * Return Value:
2783 * NONE.
2786 static void free_rx_buffers(struct s2io_nic *sp)
2788 struct net_device *dev = sp->dev;
2789 int i, blk = 0, buf_cnt = 0;
2790 struct mac_info *mac_control;
2791 struct config_param *config;
2793 mac_control = &sp->mac_control;
2794 config = &sp->config;
2796 for (i = 0; i < config->rx_ring_num; i++) {
2797 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2798 free_rxd_blk(sp,i,blk);
2800 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2801 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2802 mac_control->rings[i].rx_curr_put_info.offset = 0;
2803 mac_control->rings[i].rx_curr_get_info.offset = 0;
2804 mac_control->rings[i].rx_bufs_left = 0;
2805 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2806 dev->name, buf_cnt, i);
2810 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2812 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2813 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2814 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2816 return 0;
2820 * s2io_poll - Rx interrupt handler for NAPI support
2821 * @napi : pointer to the napi structure.
2822 * @budget : The number of packets that were budgeted to be processed
2823 * during one pass through the 'Poll" function.
2824 * Description:
2825 * Comes into picture only if NAPI support has been incorporated. It does
2826 * the same thing that rx_intr_handler does, but not in a interrupt context
2827 * also It will process only a given number of packets.
2828 * Return value:
2829 * 0 on success and 1 if there are No Rx packets to be processed.
2832 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2834 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2835 struct net_device *dev = ring->dev;
2836 struct config_param *config;
2837 struct mac_info *mac_control;
2838 int pkts_processed = 0;
2839 u8 __iomem *addr = NULL;
2840 u8 val8 = 0;
2841 struct s2io_nic *nic = netdev_priv(dev);
2842 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2843 int budget_org = budget;
2845 config = &nic->config;
2846 mac_control = &nic->mac_control;
2848 if (unlikely(!is_s2io_card_up(nic)))
2849 return 0;
2851 pkts_processed = rx_intr_handler(ring, budget);
2852 s2io_chk_rx_buffers(nic, ring);
2854 if (pkts_processed < budget_org) {
2855 netif_rx_complete(napi);
2856 /*Re Enable MSI-Rx Vector*/
2857 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2858 addr += 7 - ring->ring_no;
2859 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2860 writeb(val8, addr);
2861 val8 = readb(addr);
2863 return pkts_processed;
2865 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2867 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2868 struct ring_info *ring;
2869 struct config_param *config;
2870 struct mac_info *mac_control;
2871 int pkts_processed = 0;
2872 int ring_pkts_processed, i;
2873 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2874 int budget_org = budget;
2876 config = &nic->config;
2877 mac_control = &nic->mac_control;
2879 if (unlikely(!is_s2io_card_up(nic)))
2880 return 0;
2882 for (i = 0; i < config->rx_ring_num; i++) {
2883 ring = &mac_control->rings[i];
2884 ring_pkts_processed = rx_intr_handler(ring, budget);
2885 s2io_chk_rx_buffers(nic, ring);
2886 pkts_processed += ring_pkts_processed;
2887 budget -= ring_pkts_processed;
2888 if (budget <= 0)
2889 break;
2891 if (pkts_processed < budget_org) {
2892 netif_rx_complete(napi);
2893 /* Re enable the Rx interrupts for the ring */
2894 writeq(0, &bar0->rx_traffic_mask);
2895 readl(&bar0->rx_traffic_mask);
2897 return pkts_processed;
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2902 * s2io_netpoll - netpoll event handler entry point
2903 * @dev : pointer to the device structure.
2904 * Description:
2905 * This function will be called by upper layer to check for events on the
2906 * interface in situations where interrupts are disabled. It is used for
2907 * specific in-kernel networking tasks, such as remote consoles and kernel
2908 * debugging over the network (example netdump in RedHat).
2910 static void s2io_netpoll(struct net_device *dev)
2912 struct s2io_nic *nic = netdev_priv(dev);
2913 struct mac_info *mac_control;
2914 struct config_param *config;
2915 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2916 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2917 int i;
2919 if (pci_channel_offline(nic->pdev))
2920 return;
2922 disable_irq(dev->irq);
2924 mac_control = &nic->mac_control;
2925 config = &nic->config;
2927 writeq(val64, &bar0->rx_traffic_int);
2928 writeq(val64, &bar0->tx_traffic_int);
2930 /* we need to free up the transmitted skbufs or else netpoll will
2931 * run out of skbs and will fail and eventually netpoll application such
2932 * as netdump will fail.
2934 for (i = 0; i < config->tx_fifo_num; i++)
2935 tx_intr_handler(&mac_control->fifos[i]);
2937 /* check for received packet and indicate up to network */
2938 for (i = 0; i < config->rx_ring_num; i++)
2939 rx_intr_handler(&mac_control->rings[i], 0);
2941 for (i = 0; i < config->rx_ring_num; i++) {
2942 if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
2943 -ENOMEM) {
2944 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2945 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2946 break;
2949 enable_irq(dev->irq);
2950 return;
2952 #endif
2955 * rx_intr_handler - Rx interrupt handler
2956 * @ring_info: per ring structure.
2957 * @budget: budget for napi processing.
2958 * Description:
2959 * If the interrupt is because of a received frame or if the
2960 * receive ring contains fresh as yet un-processed frames,this function is
2961 * called. It picks out the RxD at which place the last Rx processing had
2962 * stopped and sends the skb to the OSM's Rx handler and then increments
2963 * the offset.
2964 * Return Value:
2965 * No. of napi packets processed.
2967 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2969 int get_block, put_block;
2970 struct rx_curr_get_info get_info, put_info;
2971 struct RxD_t *rxdp;
2972 struct sk_buff *skb;
2973 int pkt_cnt = 0, napi_pkts = 0;
2974 int i;
2975 struct RxD1* rxdp1;
2976 struct RxD3* rxdp3;
2978 get_info = ring_data->rx_curr_get_info;
2979 get_block = get_info.block_index;
2980 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2981 put_block = put_info.block_index;
2982 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2984 while (RXD_IS_UP2DT(rxdp)) {
2986 * If your are next to put index then it's
2987 * FIFO full condition
2989 if ((get_block == put_block) &&
2990 (get_info.offset + 1) == put_info.offset) {
2991 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2992 ring_data->dev->name);
2993 break;
2995 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2996 if (skb == NULL) {
2997 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2998 ring_data->dev->name);
2999 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3000 return 0;
3002 if (ring_data->rxd_mode == RXD_MODE_1) {
3003 rxdp1 = (struct RxD1*)rxdp;
3004 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3005 rxdp1->Buffer0_ptr,
3006 ring_data->mtu +
3007 HEADER_ETHERNET_II_802_3_SIZE +
3008 HEADER_802_2_SIZE +
3009 HEADER_SNAP_SIZE,
3010 PCI_DMA_FROMDEVICE);
3011 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3012 rxdp3 = (struct RxD3*)rxdp;
3013 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3014 rxdp3->Buffer0_ptr,
3015 BUF0_LEN, PCI_DMA_FROMDEVICE);
3016 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3017 rxdp3->Buffer2_ptr,
3018 ring_data->mtu + 4,
3019 PCI_DMA_FROMDEVICE);
3021 prefetch(skb->data);
3022 rx_osm_handler(ring_data, rxdp);
3023 get_info.offset++;
3024 ring_data->rx_curr_get_info.offset = get_info.offset;
3025 rxdp = ring_data->rx_blocks[get_block].
3026 rxds[get_info.offset].virt_addr;
3027 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3028 get_info.offset = 0;
3029 ring_data->rx_curr_get_info.offset = get_info.offset;
3030 get_block++;
3031 if (get_block == ring_data->block_count)
3032 get_block = 0;
3033 ring_data->rx_curr_get_info.block_index = get_block;
3034 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3037 if (ring_data->nic->config.napi) {
3038 budget--;
3039 napi_pkts++;
3040 if (!budget)
3041 break;
3043 pkt_cnt++;
3044 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3045 break;
3047 if (ring_data->lro) {
3048 /* Clear all LRO sessions before exiting */
3049 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3050 struct lro *lro = &ring_data->lro0_n[i];
3051 if (lro->in_use) {
3052 update_L3L4_header(ring_data->nic, lro);
3053 queue_rx_frame(lro->parent, lro->vlan_tag);
3054 clear_lro_session(lro);
3058 return(napi_pkts);
3062 * tx_intr_handler - Transmit interrupt handler
3063 * @nic : device private variable
3064 * Description:
3065 * If an interrupt was raised to indicate DMA complete of the
3066 * Tx packet, this function is called. It identifies the last TxD
3067 * whose buffer was freed and frees all skbs whose data have already
3068 * DMA'ed into the NICs internal memory.
3069 * Return Value:
3070 * NONE
3073 static void tx_intr_handler(struct fifo_info *fifo_data)
3075 struct s2io_nic *nic = fifo_data->nic;
3076 struct tx_curr_get_info get_info, put_info;
3077 struct sk_buff *skb = NULL;
3078 struct TxD *txdlp;
3079 int pkt_cnt = 0;
3080 unsigned long flags = 0;
3081 u8 err_mask;
3083 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3084 return;
3086 get_info = fifo_data->tx_curr_get_info;
3087 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3088 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3089 list_virt_addr;
3090 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3091 (get_info.offset != put_info.offset) &&
3092 (txdlp->Host_Control)) {
3093 /* Check for TxD errors */
3094 if (txdlp->Control_1 & TXD_T_CODE) {
3095 unsigned long long err;
3096 err = txdlp->Control_1 & TXD_T_CODE;
3097 if (err & 0x1) {
3098 nic->mac_control.stats_info->sw_stat.
3099 parity_err_cnt++;
3102 /* update t_code statistics */
3103 err_mask = err >> 48;
3104 switch(err_mask) {
3105 case 2:
3106 nic->mac_control.stats_info->sw_stat.
3107 tx_buf_abort_cnt++;
3108 break;
3110 case 3:
3111 nic->mac_control.stats_info->sw_stat.
3112 tx_desc_abort_cnt++;
3113 break;
3115 case 7:
3116 nic->mac_control.stats_info->sw_stat.
3117 tx_parity_err_cnt++;
3118 break;
3120 case 10:
3121 nic->mac_control.stats_info->sw_stat.
3122 tx_link_loss_cnt++;
3123 break;
3125 case 15:
3126 nic->mac_control.stats_info->sw_stat.
3127 tx_list_proc_err_cnt++;
3128 break;
3132 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3133 if (skb == NULL) {
3134 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3135 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3136 __func__);
3137 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3138 return;
3140 pkt_cnt++;
3142 /* Updating the statistics block */
3143 nic->dev->stats.tx_bytes += skb->len;
3144 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3145 dev_kfree_skb_irq(skb);
3147 get_info.offset++;
3148 if (get_info.offset == get_info.fifo_len + 1)
3149 get_info.offset = 0;
3150 txdlp = (struct TxD *) fifo_data->list_info
3151 [get_info.offset].list_virt_addr;
3152 fifo_data->tx_curr_get_info.offset =
3153 get_info.offset;
3156 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3158 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3162 * s2io_mdio_write - Function to write in to MDIO registers
3163 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3164 * @addr : address value
3165 * @value : data value
3166 * @dev : pointer to net_device structure
3167 * Description:
3168 * This function is used to write values to the MDIO registers
3169 * NONE
3171 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3173 u64 val64 = 0x0;
3174 struct s2io_nic *sp = netdev_priv(dev);
3175 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3177 //address transaction
3178 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3179 | MDIO_MMD_DEV_ADDR(mmd_type)
3180 | MDIO_MMS_PRT_ADDR(0x0);
3181 writeq(val64, &bar0->mdio_control);
3182 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3183 writeq(val64, &bar0->mdio_control);
3184 udelay(100);
3186 //Data transaction
3187 val64 = 0x0;
3188 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3189 | MDIO_MMD_DEV_ADDR(mmd_type)
3190 | MDIO_MMS_PRT_ADDR(0x0)
3191 | MDIO_MDIO_DATA(value)
3192 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3193 writeq(val64, &bar0->mdio_control);
3194 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3195 writeq(val64, &bar0->mdio_control);
3196 udelay(100);
3198 val64 = 0x0;
3199 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3200 | MDIO_MMD_DEV_ADDR(mmd_type)
3201 | MDIO_MMS_PRT_ADDR(0x0)
3202 | MDIO_OP(MDIO_OP_READ_TRANS);
3203 writeq(val64, &bar0->mdio_control);
3204 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3205 writeq(val64, &bar0->mdio_control);
3206 udelay(100);
3211 * s2io_mdio_read - Function to write in to MDIO registers
3212 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3213 * @addr : address value
3214 * @dev : pointer to net_device structure
3215 * Description:
3216 * This function is used to read values to the MDIO registers
3217 * NONE
3219 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3221 u64 val64 = 0x0;
3222 u64 rval64 = 0x0;
3223 struct s2io_nic *sp = netdev_priv(dev);
3224 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3226 /* address transaction */
3227 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3228 | MDIO_MMD_DEV_ADDR(mmd_type)
3229 | MDIO_MMS_PRT_ADDR(0x0);
3230 writeq(val64, &bar0->mdio_control);
3231 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3232 writeq(val64, &bar0->mdio_control);
3233 udelay(100);
3235 /* Data transaction */
3236 val64 = 0x0;
3237 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3238 | MDIO_MMD_DEV_ADDR(mmd_type)
3239 | MDIO_MMS_PRT_ADDR(0x0)
3240 | MDIO_OP(MDIO_OP_READ_TRANS);
3241 writeq(val64, &bar0->mdio_control);
3242 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3243 writeq(val64, &bar0->mdio_control);
3244 udelay(100);
3246 /* Read the value from regs */
3247 rval64 = readq(&bar0->mdio_control);
3248 rval64 = rval64 & 0xFFFF0000;
3249 rval64 = rval64 >> 16;
3250 return rval64;
3253 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3254 * @counter : couter value to be updated
3255 * @flag : flag to indicate the status
3256 * @type : counter type
3257 * Description:
3258 * This function is to check the status of the xpak counters value
3259 * NONE
3262 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3264 u64 mask = 0x3;
3265 u64 val64;
3266 int i;
3267 for(i = 0; i <index; i++)
3268 mask = mask << 0x2;
3270 if(flag > 0)
3272 *counter = *counter + 1;
3273 val64 = *regs_stat & mask;
3274 val64 = val64 >> (index * 0x2);
3275 val64 = val64 + 1;
3276 if(val64 == 3)
3278 switch(type)
3280 case 1:
3281 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3282 "service. Excessive temperatures may "
3283 "result in premature transceiver "
3284 "failure \n");
3285 break;
3286 case 2:
3287 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3288 "service Excessive bias currents may "
3289 "indicate imminent laser diode "
3290 "failure \n");
3291 break;
3292 case 3:
3293 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3294 "service Excessive laser output "
3295 "power may saturate far-end "
3296 "receiver\n");
3297 break;
3298 default:
3299 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3300 "type \n");
3302 val64 = 0x0;
3304 val64 = val64 << (index * 0x2);
3305 *regs_stat = (*regs_stat & (~mask)) | (val64);
3307 } else {
3308 *regs_stat = *regs_stat & (~mask);
3313 * s2io_updt_xpak_counter - Function to update the xpak counters
3314 * @dev : pointer to net_device struct
3315 * Description:
3316 * This function is to upate the status of the xpak counters value
3317 * NONE
3319 static void s2io_updt_xpak_counter(struct net_device *dev)
3321 u16 flag = 0x0;
3322 u16 type = 0x0;
3323 u16 val16 = 0x0;
3324 u64 val64 = 0x0;
3325 u64 addr = 0x0;
3327 struct s2io_nic *sp = netdev_priv(dev);
3328 struct stat_block *stat_info = sp->mac_control.stats_info;
3330 /* Check the communication with the MDIO slave */
3331 addr = 0x0000;
3332 val64 = 0x0;
3333 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3334 if((val64 == 0xFFFF) || (val64 == 0x0000))
3336 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3337 "Returned %llx\n", (unsigned long long)val64);
3338 return;
3341 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3342 if(val64 != 0x2040)
3344 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3345 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3346 (unsigned long long)val64);
3347 return;
3350 /* Loading the DOM register to MDIO register */
3351 addr = 0xA100;
3352 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3353 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3355 /* Reading the Alarm flags */
3356 addr = 0xA070;
3357 val64 = 0x0;
3358 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3360 flag = CHECKBIT(val64, 0x7);
3361 type = 1;
3362 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3363 &stat_info->xpak_stat.xpak_regs_stat,
3364 0x0, flag, type);
3366 if(CHECKBIT(val64, 0x6))
3367 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3369 flag = CHECKBIT(val64, 0x3);
3370 type = 2;
3371 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3372 &stat_info->xpak_stat.xpak_regs_stat,
3373 0x2, flag, type);
3375 if(CHECKBIT(val64, 0x2))
3376 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3378 flag = CHECKBIT(val64, 0x1);
3379 type = 3;
3380 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3381 &stat_info->xpak_stat.xpak_regs_stat,
3382 0x4, flag, type);
3384 if(CHECKBIT(val64, 0x0))
3385 stat_info->xpak_stat.alarm_laser_output_power_low++;
3387 /* Reading the Warning flags */
3388 addr = 0xA074;
3389 val64 = 0x0;
3390 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3392 if(CHECKBIT(val64, 0x7))
3393 stat_info->xpak_stat.warn_transceiver_temp_high++;
3395 if(CHECKBIT(val64, 0x6))
3396 stat_info->xpak_stat.warn_transceiver_temp_low++;
3398 if(CHECKBIT(val64, 0x3))
3399 stat_info->xpak_stat.warn_laser_bias_current_high++;
3401 if(CHECKBIT(val64, 0x2))
3402 stat_info->xpak_stat.warn_laser_bias_current_low++;
3404 if(CHECKBIT(val64, 0x1))
3405 stat_info->xpak_stat.warn_laser_output_power_high++;
3407 if(CHECKBIT(val64, 0x0))
3408 stat_info->xpak_stat.warn_laser_output_power_low++;
3412 * wait_for_cmd_complete - waits for a command to complete.
3413 * @sp : private member of the device structure, which is a pointer to the
3414 * s2io_nic structure.
3415 * Description: Function that waits for a command to Write into RMAC
3416 * ADDR DATA registers to be completed and returns either success or
3417 * error depending on whether the command was complete or not.
3418 * Return value:
3419 * SUCCESS on success and FAILURE on failure.
3422 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3423 int bit_state)
3425 int ret = FAILURE, cnt = 0, delay = 1;
3426 u64 val64;
3428 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3429 return FAILURE;
3431 do {
3432 val64 = readq(addr);
3433 if (bit_state == S2IO_BIT_RESET) {
3434 if (!(val64 & busy_bit)) {
3435 ret = SUCCESS;
3436 break;
3438 } else {
3439 if (!(val64 & busy_bit)) {
3440 ret = SUCCESS;
3441 break;
3445 if(in_interrupt())
3446 mdelay(delay);
3447 else
3448 msleep(delay);
3450 if (++cnt >= 10)
3451 delay = 50;
3452 } while (cnt < 20);
3453 return ret;
3456 * check_pci_device_id - Checks if the device id is supported
3457 * @id : device id
3458 * Description: Function to check if the pci device id is supported by driver.
3459 * Return value: Actual device id if supported else PCI_ANY_ID
3461 static u16 check_pci_device_id(u16 id)
3463 switch (id) {
3464 case PCI_DEVICE_ID_HERC_WIN:
3465 case PCI_DEVICE_ID_HERC_UNI:
3466 return XFRAME_II_DEVICE;
3467 case PCI_DEVICE_ID_S2IO_UNI:
3468 case PCI_DEVICE_ID_S2IO_WIN:
3469 return XFRAME_I_DEVICE;
3470 default:
3471 return PCI_ANY_ID;
3476 * s2io_reset - Resets the card.
3477 * @sp : private member of the device structure.
3478 * Description: Function to Reset the card. This function then also
3479 * restores the previously saved PCI configuration space registers as
3480 * the card reset also resets the configuration space.
3481 * Return value:
3482 * void.
3485 static void s2io_reset(struct s2io_nic * sp)
3487 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3488 u64 val64;
3489 u16 subid, pci_cmd;
3490 int i;
3491 u16 val16;
3492 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3493 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3495 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3496 __func__, sp->dev->name);
3498 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3499 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3501 val64 = SW_RESET_ALL;
3502 writeq(val64, &bar0->sw_reset);
3503 if (strstr(sp->product_name, "CX4")) {
3504 msleep(750);
3506 msleep(250);
3507 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3509 /* Restore the PCI state saved during initialization. */
3510 pci_restore_state(sp->pdev);
3511 pci_read_config_word(sp->pdev, 0x2, &val16);
3512 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3513 break;
3514 msleep(200);
3517 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3518 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3523 s2io_init_pci(sp);
3525 /* Set swapper to enable I/O register access */
3526 s2io_set_swapper(sp);
3528 /* restore mac_addr entries */
3529 do_s2io_restore_unicast_mc(sp);
3531 /* Restore the MSIX table entries from local variables */
3532 restore_xmsi_data(sp);
3534 /* Clear certain PCI/PCI-X fields after reset */
3535 if (sp->device_type == XFRAME_II_DEVICE) {
3536 /* Clear "detected parity error" bit */
3537 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3539 /* Clearing PCIX Ecc status register */
3540 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3542 /* Clearing PCI_STATUS error reflected here */
3543 writeq(s2BIT(62), &bar0->txpic_int_reg);
3546 /* Reset device statistics maintained by OS */
3547 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3549 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3550 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3551 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3552 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3553 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3554 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3555 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3556 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3557 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3558 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3559 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3560 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3561 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3562 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3563 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3564 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3565 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3566 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3567 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3569 /* SXE-002: Configure link and activity LED to turn it off */
3570 subid = sp->pdev->subsystem_device;
3571 if (((subid & 0xFF) >= 0x07) &&
3572 (sp->device_type == XFRAME_I_DEVICE)) {
3573 val64 = readq(&bar0->gpio_control);
3574 val64 |= 0x0000800000000000ULL;
3575 writeq(val64, &bar0->gpio_control);
3576 val64 = 0x0411040400000000ULL;
3577 writeq(val64, (void __iomem *)bar0 + 0x2700);
3581 * Clear spurious ECC interrupts that would have occured on
3582 * XFRAME II cards after reset.
3584 if (sp->device_type == XFRAME_II_DEVICE) {
3585 val64 = readq(&bar0->pcc_err_reg);
3586 writeq(val64, &bar0->pcc_err_reg);
3589 sp->device_enabled_once = FALSE;
3593 * s2io_set_swapper - to set the swapper controle on the card
3594 * @sp : private member of the device structure,
3595 * pointer to the s2io_nic structure.
3596 * Description: Function to set the swapper control on the card
3597 * correctly depending on the 'endianness' of the system.
3598 * Return value:
3599 * SUCCESS on success and FAILURE on failure.
3602 static int s2io_set_swapper(struct s2io_nic * sp)
3604 struct net_device *dev = sp->dev;
3605 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3606 u64 val64, valt, valr;
3609 * Set proper endian settings and verify the same by reading
3610 * the PIF Feed-back register.
3613 val64 = readq(&bar0->pif_rd_swapper_fb);
3614 if (val64 != 0x0123456789ABCDEFULL) {
3615 int i = 0;
3616 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3617 0x8100008181000081ULL, /* FE=1, SE=0 */
3618 0x4200004242000042ULL, /* FE=0, SE=1 */
3619 0}; /* FE=0, SE=0 */
3621 while(i<4) {
3622 writeq(value[i], &bar0->swapper_ctrl);
3623 val64 = readq(&bar0->pif_rd_swapper_fb);
3624 if (val64 == 0x0123456789ABCDEFULL)
3625 break;
3626 i++;
3628 if (i == 4) {
3629 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3630 dev->name);
3631 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3632 (unsigned long long) val64);
3633 return FAILURE;
3635 valr = value[i];
3636 } else {
3637 valr = readq(&bar0->swapper_ctrl);
3640 valt = 0x0123456789ABCDEFULL;
3641 writeq(valt, &bar0->xmsi_address);
3642 val64 = readq(&bar0->xmsi_address);
3644 if(val64 != valt) {
3645 int i = 0;
3646 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3647 0x0081810000818100ULL, /* FE=1, SE=0 */
3648 0x0042420000424200ULL, /* FE=0, SE=1 */
3649 0}; /* FE=0, SE=0 */
3651 while(i<4) {
3652 writeq((value[i] | valr), &bar0->swapper_ctrl);
3653 writeq(valt, &bar0->xmsi_address);
3654 val64 = readq(&bar0->xmsi_address);
3655 if(val64 == valt)
3656 break;
3657 i++;
3659 if(i == 4) {
3660 unsigned long long x = val64;
3661 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3662 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3663 return FAILURE;
3666 val64 = readq(&bar0->swapper_ctrl);
3667 val64 &= 0xFFFF000000000000ULL;
3669 #ifdef __BIG_ENDIAN
3671 * The device by default set to a big endian format, so a
3672 * big endian driver need not set anything.
3674 val64 |= (SWAPPER_CTRL_TXP_FE |
3675 SWAPPER_CTRL_TXP_SE |
3676 SWAPPER_CTRL_TXD_R_FE |
3677 SWAPPER_CTRL_TXD_W_FE |
3678 SWAPPER_CTRL_TXF_R_FE |
3679 SWAPPER_CTRL_RXD_R_FE |
3680 SWAPPER_CTRL_RXD_W_FE |
3681 SWAPPER_CTRL_RXF_W_FE |
3682 SWAPPER_CTRL_XMSI_FE |
3683 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3684 if (sp->config.intr_type == INTA)
3685 val64 |= SWAPPER_CTRL_XMSI_SE;
3686 writeq(val64, &bar0->swapper_ctrl);
3687 #else
3689 * Initially we enable all bits to make it accessible by the
3690 * driver, then we selectively enable only those bits that
3691 * we want to set.
3693 val64 |= (SWAPPER_CTRL_TXP_FE |
3694 SWAPPER_CTRL_TXP_SE |
3695 SWAPPER_CTRL_TXD_R_FE |
3696 SWAPPER_CTRL_TXD_R_SE |
3697 SWAPPER_CTRL_TXD_W_FE |
3698 SWAPPER_CTRL_TXD_W_SE |
3699 SWAPPER_CTRL_TXF_R_FE |
3700 SWAPPER_CTRL_RXD_R_FE |
3701 SWAPPER_CTRL_RXD_R_SE |
3702 SWAPPER_CTRL_RXD_W_FE |
3703 SWAPPER_CTRL_RXD_W_SE |
3704 SWAPPER_CTRL_RXF_W_FE |
3705 SWAPPER_CTRL_XMSI_FE |
3706 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3707 if (sp->config.intr_type == INTA)
3708 val64 |= SWAPPER_CTRL_XMSI_SE;
3709 writeq(val64, &bar0->swapper_ctrl);
3710 #endif
3711 val64 = readq(&bar0->swapper_ctrl);
3714 * Verifying if endian settings are accurate by reading a
3715 * feedback register.
3717 val64 = readq(&bar0->pif_rd_swapper_fb);
3718 if (val64 != 0x0123456789ABCDEFULL) {
3719 /* Endian settings are incorrect, calls for another dekko. */
3720 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3721 dev->name);
3722 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3723 (unsigned long long) val64);
3724 return FAILURE;
3727 return SUCCESS;
3730 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3732 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3733 u64 val64;
3734 int ret = 0, cnt = 0;
3736 do {
3737 val64 = readq(&bar0->xmsi_access);
3738 if (!(val64 & s2BIT(15)))
3739 break;
3740 mdelay(1);
3741 cnt++;
3742 } while(cnt < 5);
3743 if (cnt == 5) {
3744 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3745 ret = 1;
3748 return ret;
3751 static void restore_xmsi_data(struct s2io_nic *nic)
3753 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3754 u64 val64;
3755 int i, msix_index;
3758 if (nic->device_type == XFRAME_I_DEVICE)
3759 return;
3761 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3762 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3763 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3764 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3765 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3766 writeq(val64, &bar0->xmsi_access);
3767 if (wait_for_msix_trans(nic, msix_index)) {
3768 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3769 continue;
3774 static void store_xmsi_data(struct s2io_nic *nic)
3776 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3777 u64 val64, addr, data;
3778 int i, msix_index;
3780 if (nic->device_type == XFRAME_I_DEVICE)
3781 return;
3783 /* Store and display */
3784 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3785 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3786 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3787 writeq(val64, &bar0->xmsi_access);
3788 if (wait_for_msix_trans(nic, msix_index)) {
3789 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3790 continue;
3792 addr = readq(&bar0->xmsi_address);
3793 data = readq(&bar0->xmsi_data);
3794 if (addr && data) {
3795 nic->msix_info[i].addr = addr;
3796 nic->msix_info[i].data = data;
3801 static int s2io_enable_msi_x(struct s2io_nic *nic)
3803 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3804 u64 rx_mat;
3805 u16 msi_control; /* Temp variable */
3806 int ret, i, j, msix_indx = 1;
3808 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3809 GFP_KERNEL);
3810 if (!nic->entries) {
3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3812 __func__);
3813 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3814 return -ENOMEM;
3816 nic->mac_control.stats_info->sw_stat.mem_allocated
3817 += (nic->num_entries * sizeof(struct msix_entry));
3819 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3821 nic->s2io_entries =
3822 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3823 GFP_KERNEL);
3824 if (!nic->s2io_entries) {
3825 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3826 __func__);
3827 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3828 kfree(nic->entries);
3829 nic->mac_control.stats_info->sw_stat.mem_freed
3830 += (nic->num_entries * sizeof(struct msix_entry));
3831 return -ENOMEM;
3833 nic->mac_control.stats_info->sw_stat.mem_allocated
3834 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3835 memset(nic->s2io_entries, 0,
3836 nic->num_entries * sizeof(struct s2io_msix_entry));
3838 nic->entries[0].entry = 0;
3839 nic->s2io_entries[0].entry = 0;
3840 nic->s2io_entries[0].in_use = MSIX_FLG;
3841 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3842 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3844 for (i = 1; i < nic->num_entries; i++) {
3845 nic->entries[i].entry = ((i - 1) * 8) + 1;
3846 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3847 nic->s2io_entries[i].arg = NULL;
3848 nic->s2io_entries[i].in_use = 0;
3851 rx_mat = readq(&bar0->rx_mat);
3852 for (j = 0; j < nic->config.rx_ring_num; j++) {
3853 rx_mat |= RX_MAT_SET(j, msix_indx);
3854 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3855 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3856 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3857 msix_indx += 8;
3859 writeq(rx_mat, &bar0->rx_mat);
3860 readq(&bar0->rx_mat);
3862 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3863 /* We fail init if error or we get less vectors than min required */
3864 if (ret) {
3865 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3866 kfree(nic->entries);
3867 nic->mac_control.stats_info->sw_stat.mem_freed
3868 += (nic->num_entries * sizeof(struct msix_entry));
3869 kfree(nic->s2io_entries);
3870 nic->mac_control.stats_info->sw_stat.mem_freed
3871 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3872 nic->entries = NULL;
3873 nic->s2io_entries = NULL;
3874 return -ENOMEM;
3878 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3879 * in the herc NIC. (Temp change, needs to be removed later)
3881 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3882 msi_control |= 0x1; /* Enable MSI */
3883 pci_write_config_word(nic->pdev, 0x42, msi_control);
3885 return 0;
3888 /* Handle software interrupt used during MSI(X) test */
3889 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3891 struct s2io_nic *sp = dev_id;
3893 sp->msi_detected = 1;
3894 wake_up(&sp->msi_wait);
3896 return IRQ_HANDLED;
3899 /* Test interrupt path by forcing a a software IRQ */
3900 static int s2io_test_msi(struct s2io_nic *sp)
3902 struct pci_dev *pdev = sp->pdev;
3903 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3904 int err;
3905 u64 val64, saved64;
3907 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3908 sp->name, sp);
3909 if (err) {
3910 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3911 sp->dev->name, pci_name(pdev), pdev->irq);
3912 return err;
3915 init_waitqueue_head (&sp->msi_wait);
3916 sp->msi_detected = 0;
3918 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3919 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3920 val64 |= SCHED_INT_CTRL_TIMER_EN;
3921 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3922 writeq(val64, &bar0->scheduled_int_ctrl);
3924 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3926 if (!sp->msi_detected) {
3927 /* MSI(X) test failed, go back to INTx mode */
3928 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3929 "using MSI(X) during test\n", sp->dev->name,
3930 pci_name(pdev));
3932 err = -EOPNOTSUPP;
3935 free_irq(sp->entries[1].vector, sp);
3937 writeq(saved64, &bar0->scheduled_int_ctrl);
3939 return err;
3942 static void remove_msix_isr(struct s2io_nic *sp)
3944 int i;
3945 u16 msi_control;
3947 for (i = 0; i < sp->num_entries; i++) {
3948 if (sp->s2io_entries[i].in_use ==
3949 MSIX_REGISTERED_SUCCESS) {
3950 int vector = sp->entries[i].vector;
3951 void *arg = sp->s2io_entries[i].arg;
3952 free_irq(vector, arg);
3956 kfree(sp->entries);
3957 kfree(sp->s2io_entries);
3958 sp->entries = NULL;
3959 sp->s2io_entries = NULL;
3961 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3962 msi_control &= 0xFFFE; /* Disable MSI */
3963 pci_write_config_word(sp->pdev, 0x42, msi_control);
3965 pci_disable_msix(sp->pdev);
3968 static void remove_inta_isr(struct s2io_nic *sp)
3970 struct net_device *dev = sp->dev;
3972 free_irq(sp->pdev->irq, dev);
3975 /* ********************************************************* *
3976 * Functions defined below concern the OS part of the driver *
3977 * ********************************************************* */
3980 * s2io_open - open entry point of the driver
3981 * @dev : pointer to the device structure.
3982 * Description:
3983 * This function is the open entry point of the driver. It mainly calls a
3984 * function to allocate Rx buffers and inserts them into the buffer
3985 * descriptors and then enables the Rx part of the NIC.
3986 * Return value:
3987 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3988 * file on failure.
3991 static int s2io_open(struct net_device *dev)
3993 struct s2io_nic *sp = netdev_priv(dev);
3994 int err = 0;
3997 * Make sure you have link off by default every time
3998 * Nic is initialized
4000 netif_carrier_off(dev);
4001 sp->last_link_state = 0;
4003 /* Initialize H/W and enable interrupts */
4004 err = s2io_card_up(sp);
4005 if (err) {
4006 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4007 dev->name);
4008 goto hw_init_failed;
4011 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4012 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4013 s2io_card_down(sp);
4014 err = -ENODEV;
4015 goto hw_init_failed;
4017 s2io_start_all_tx_queue(sp);
4018 return 0;
4020 hw_init_failed:
4021 if (sp->config.intr_type == MSI_X) {
4022 if (sp->entries) {
4023 kfree(sp->entries);
4024 sp->mac_control.stats_info->sw_stat.mem_freed
4025 += (sp->num_entries * sizeof(struct msix_entry));
4027 if (sp->s2io_entries) {
4028 kfree(sp->s2io_entries);
4029 sp->mac_control.stats_info->sw_stat.mem_freed
4030 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4033 return err;
4037 * s2io_close -close entry point of the driver
4038 * @dev : device pointer.
4039 * Description:
4040 * This is the stop entry point of the driver. It needs to undo exactly
4041 * whatever was done by the open entry point,thus it's usually referred to
4042 * as the close function.Among other things this function mainly stops the
4043 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4044 * Return value:
4045 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4046 * file on failure.
4049 static int s2io_close(struct net_device *dev)
4051 struct s2io_nic *sp = netdev_priv(dev);
4052 struct config_param *config = &sp->config;
4053 u64 tmp64;
4054 int offset;
4056 /* Return if the device is already closed *
4057 * Can happen when s2io_card_up failed in change_mtu *
4059 if (!is_s2io_card_up(sp))
4060 return 0;
4062 s2io_stop_all_tx_queue(sp);
4063 /* delete all populated mac entries */
4064 for (offset = 1; offset < config->max_mc_addr; offset++) {
4065 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4066 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4067 do_s2io_delete_unicast_mc(sp, tmp64);
4070 s2io_card_down(sp);
4072 return 0;
4076 * s2io_xmit - Tx entry point of te driver
4077 * @skb : the socket buffer containing the Tx data.
4078 * @dev : device pointer.
4079 * Description :
4080 * This function is the Tx entry point of the driver. S2IO NIC supports
4081 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4082 * NOTE: when device cant queue the pkt,just the trans_start variable will
4083 * not be upadted.
4084 * Return value:
4085 * 0 on success & 1 on failure.
4088 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4090 struct s2io_nic *sp = netdev_priv(dev);
4091 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4092 register u64 val64;
4093 struct TxD *txdp;
4094 struct TxFIFO_element __iomem *tx_fifo;
4095 unsigned long flags = 0;
4096 u16 vlan_tag = 0;
4097 struct fifo_info *fifo = NULL;
4098 struct mac_info *mac_control;
4099 struct config_param *config;
4100 int do_spin_lock = 1;
4101 int offload_type;
4102 int enable_per_list_interrupt = 0;
4103 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4105 mac_control = &sp->mac_control;
4106 config = &sp->config;
4108 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4110 if (unlikely(skb->len <= 0)) {
4111 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4112 dev_kfree_skb_any(skb);
4113 return 0;
4116 if (!is_s2io_card_up(sp)) {
4117 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4118 dev->name);
4119 dev_kfree_skb(skb);
4120 return 0;
4123 queue = 0;
4124 if (sp->vlgrp && vlan_tx_tag_present(skb))
4125 vlan_tag = vlan_tx_tag_get(skb);
4126 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4127 if (skb->protocol == htons(ETH_P_IP)) {
4128 struct iphdr *ip;
4129 struct tcphdr *th;
4130 ip = ip_hdr(skb);
4132 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4133 th = (struct tcphdr *)(((unsigned char *)ip) +
4134 ip->ihl*4);
4136 if (ip->protocol == IPPROTO_TCP) {
4137 queue_len = sp->total_tcp_fifos;
4138 queue = (ntohs(th->source) +
4139 ntohs(th->dest)) &
4140 sp->fifo_selector[queue_len - 1];
4141 if (queue >= queue_len)
4142 queue = queue_len - 1;
4143 } else if (ip->protocol == IPPROTO_UDP) {
4144 queue_len = sp->total_udp_fifos;
4145 queue = (ntohs(th->source) +
4146 ntohs(th->dest)) &
4147 sp->fifo_selector[queue_len - 1];
4148 if (queue >= queue_len)
4149 queue = queue_len - 1;
4150 queue += sp->udp_fifo_idx;
4151 if (skb->len > 1024)
4152 enable_per_list_interrupt = 1;
4153 do_spin_lock = 0;
4157 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4158 /* get fifo number based on skb->priority value */
4159 queue = config->fifo_mapping
4160 [skb->priority & (MAX_TX_FIFOS - 1)];
4161 fifo = &mac_control->fifos[queue];
4163 spin_lock_irqsave(&fifo->tx_lock, flags);
4165 if (sp->config.multiq) {
4166 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4167 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4168 return NETDEV_TX_BUSY;
4170 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4171 if (netif_queue_stopped(dev)) {
4172 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4173 return NETDEV_TX_BUSY;
4177 put_off = (u16) fifo->tx_curr_put_info.offset;
4178 get_off = (u16) fifo->tx_curr_get_info.offset;
4179 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4181 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4182 /* Avoid "put" pointer going beyond "get" pointer */
4183 if (txdp->Host_Control ||
4184 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4185 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4186 s2io_stop_tx_queue(sp, fifo->fifo_no);
4187 dev_kfree_skb(skb);
4188 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4189 return 0;
4192 offload_type = s2io_offload_type(skb);
4193 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4194 txdp->Control_1 |= TXD_TCP_LSO_EN;
4195 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4197 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4198 txdp->Control_2 |=
4199 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4200 TXD_TX_CKO_UDP_EN);
4202 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4203 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4204 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4205 if (enable_per_list_interrupt)
4206 if (put_off & (queue_len >> 5))
4207 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4208 if (vlan_tag) {
4209 txdp->Control_2 |= TXD_VLAN_ENABLE;
4210 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4213 frg_len = skb->len - skb->data_len;
4214 if (offload_type == SKB_GSO_UDP) {
4215 int ufo_size;
4217 ufo_size = s2io_udp_mss(skb);
4218 ufo_size &= ~7;
4219 txdp->Control_1 |= TXD_UFO_EN;
4220 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4221 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4222 #ifdef __BIG_ENDIAN
4223 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4224 fifo->ufo_in_band_v[put_off] =
4225 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4226 #else
4227 fifo->ufo_in_band_v[put_off] =
4228 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4229 #endif
4230 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4231 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4232 fifo->ufo_in_band_v,
4233 sizeof(u64), PCI_DMA_TODEVICE);
4234 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4235 goto pci_map_failed;
4236 txdp++;
4239 txdp->Buffer_Pointer = pci_map_single
4240 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4241 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4242 goto pci_map_failed;
4244 txdp->Host_Control = (unsigned long) skb;
4245 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4246 if (offload_type == SKB_GSO_UDP)
4247 txdp->Control_1 |= TXD_UFO_EN;
4249 frg_cnt = skb_shinfo(skb)->nr_frags;
4250 /* For fragmented SKB. */
4251 for (i = 0; i < frg_cnt; i++) {
4252 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4253 /* A '0' length fragment will be ignored */
4254 if (!frag->size)
4255 continue;
4256 txdp++;
4257 txdp->Buffer_Pointer = (u64) pci_map_page
4258 (sp->pdev, frag->page, frag->page_offset,
4259 frag->size, PCI_DMA_TODEVICE);
4260 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4261 if (offload_type == SKB_GSO_UDP)
4262 txdp->Control_1 |= TXD_UFO_EN;
4264 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4266 if (offload_type == SKB_GSO_UDP)
4267 frg_cnt++; /* as Txd0 was used for inband header */
4269 tx_fifo = mac_control->tx_FIFO_start[queue];
4270 val64 = fifo->list_info[put_off].list_phy_addr;
4271 writeq(val64, &tx_fifo->TxDL_Pointer);
4273 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4274 TX_FIFO_LAST_LIST);
4275 if (offload_type)
4276 val64 |= TX_FIFO_SPECIAL_FUNC;
4278 writeq(val64, &tx_fifo->List_Control);
4280 mmiowb();
4282 put_off++;
4283 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4284 put_off = 0;
4285 fifo->tx_curr_put_info.offset = put_off;
4287 /* Avoid "put" pointer going beyond "get" pointer */
4288 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4289 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4290 DBG_PRINT(TX_DBG,
4291 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4292 put_off, get_off);
4293 s2io_stop_tx_queue(sp, fifo->fifo_no);
4295 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4296 dev->trans_start = jiffies;
4297 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4299 if (sp->config.intr_type == MSI_X)
4300 tx_intr_handler(fifo);
4302 return 0;
4303 pci_map_failed:
4304 stats->pci_map_fail_cnt++;
4305 s2io_stop_tx_queue(sp, fifo->fifo_no);
4306 stats->mem_freed += skb->truesize;
4307 dev_kfree_skb(skb);
4308 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4309 return 0;
4312 static void
4313 s2io_alarm_handle(unsigned long data)
4315 struct s2io_nic *sp = (struct s2io_nic *)data;
4316 struct net_device *dev = sp->dev;
4318 s2io_handle_errors(dev);
4319 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4322 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4324 struct ring_info *ring = (struct ring_info *)dev_id;
4325 struct s2io_nic *sp = ring->nic;
4326 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4328 if (unlikely(!is_s2io_card_up(sp)))
4329 return IRQ_HANDLED;
4331 if (sp->config.napi) {
4332 u8 __iomem *addr = NULL;
4333 u8 val8 = 0;
4335 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4336 addr += (7 - ring->ring_no);
4337 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4338 writeb(val8, addr);
4339 val8 = readb(addr);
4340 netif_rx_schedule(&ring->napi);
4341 } else {
4342 rx_intr_handler(ring, 0);
4343 s2io_chk_rx_buffers(sp, ring);
4346 return IRQ_HANDLED;
4349 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4351 int i;
4352 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4353 struct s2io_nic *sp = fifos->nic;
4354 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4355 struct config_param *config = &sp->config;
4356 u64 reason;
4358 if (unlikely(!is_s2io_card_up(sp)))
4359 return IRQ_NONE;
4361 reason = readq(&bar0->general_int_status);
4362 if (unlikely(reason == S2IO_MINUS_ONE))
4363 /* Nothing much can be done. Get out */
4364 return IRQ_HANDLED;
4366 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4367 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4369 if (reason & GEN_INTR_TXPIC)
4370 s2io_txpic_intr_handle(sp);
4372 if (reason & GEN_INTR_TXTRAFFIC)
4373 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4375 for (i = 0; i < config->tx_fifo_num; i++)
4376 tx_intr_handler(&fifos[i]);
4378 writeq(sp->general_int_mask, &bar0->general_int_mask);
4379 readl(&bar0->general_int_status);
4380 return IRQ_HANDLED;
4382 /* The interrupt was not raised by us */
4383 return IRQ_NONE;
4386 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4388 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4389 u64 val64;
4391 val64 = readq(&bar0->pic_int_status);
4392 if (val64 & PIC_INT_GPIO) {
4393 val64 = readq(&bar0->gpio_int_reg);
4394 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4395 (val64 & GPIO_INT_REG_LINK_UP)) {
4397 * This is unstable state so clear both up/down
4398 * interrupt and adapter to re-evaluate the link state.
4400 val64 |= GPIO_INT_REG_LINK_DOWN;
4401 val64 |= GPIO_INT_REG_LINK_UP;
4402 writeq(val64, &bar0->gpio_int_reg);
4403 val64 = readq(&bar0->gpio_int_mask);
4404 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4405 GPIO_INT_MASK_LINK_DOWN);
4406 writeq(val64, &bar0->gpio_int_mask);
4408 else if (val64 & GPIO_INT_REG_LINK_UP) {
4409 val64 = readq(&bar0->adapter_status);
4410 /* Enable Adapter */
4411 val64 = readq(&bar0->adapter_control);
4412 val64 |= ADAPTER_CNTL_EN;
4413 writeq(val64, &bar0->adapter_control);
4414 val64 |= ADAPTER_LED_ON;
4415 writeq(val64, &bar0->adapter_control);
4416 if (!sp->device_enabled_once)
4417 sp->device_enabled_once = 1;
4419 s2io_link(sp, LINK_UP);
4421 * unmask link down interrupt and mask link-up
4422 * intr
4424 val64 = readq(&bar0->gpio_int_mask);
4425 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4426 val64 |= GPIO_INT_MASK_LINK_UP;
4427 writeq(val64, &bar0->gpio_int_mask);
4429 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4430 val64 = readq(&bar0->adapter_status);
4431 s2io_link(sp, LINK_DOWN);
4432 /* Link is down so unmaks link up interrupt */
4433 val64 = readq(&bar0->gpio_int_mask);
4434 val64 &= ~GPIO_INT_MASK_LINK_UP;
4435 val64 |= GPIO_INT_MASK_LINK_DOWN;
4436 writeq(val64, &bar0->gpio_int_mask);
4438 /* turn off LED */
4439 val64 = readq(&bar0->adapter_control);
4440 val64 = val64 &(~ADAPTER_LED_ON);
4441 writeq(val64, &bar0->adapter_control);
4444 val64 = readq(&bar0->gpio_int_mask);
4448 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4449 * @value: alarm bits
4450 * @addr: address value
4451 * @cnt: counter variable
4452 * Description: Check for alarm and increment the counter
4453 * Return Value:
4454 * 1 - if alarm bit set
4455 * 0 - if alarm bit is not set
4457 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4458 unsigned long long *cnt)
4460 u64 val64;
4461 val64 = readq(addr);
4462 if ( val64 & value ) {
4463 writeq(val64, addr);
4464 (*cnt)++;
4465 return 1;
4467 return 0;
4472 * s2io_handle_errors - Xframe error indication handler
4473 * @nic: device private variable
4474 * Description: Handle alarms such as loss of link, single or
4475 * double ECC errors, critical and serious errors.
4476 * Return Value:
4477 * NONE
4479 static void s2io_handle_errors(void * dev_id)
4481 struct net_device *dev = (struct net_device *) dev_id;
4482 struct s2io_nic *sp = netdev_priv(dev);
4483 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4484 u64 temp64 = 0,val64=0;
4485 int i = 0;
4487 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4488 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4490 if (!is_s2io_card_up(sp))
4491 return;
4493 if (pci_channel_offline(sp->pdev))
4494 return;
4496 memset(&sw_stat->ring_full_cnt, 0,
4497 sizeof(sw_stat->ring_full_cnt));
4499 /* Handling the XPAK counters update */
4500 if(stats->xpak_timer_count < 72000) {
4501 /* waiting for an hour */
4502 stats->xpak_timer_count++;
4503 } else {
4504 s2io_updt_xpak_counter(dev);
4505 /* reset the count to zero */
4506 stats->xpak_timer_count = 0;
4509 /* Handling link status change error Intr */
4510 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4511 val64 = readq(&bar0->mac_rmac_err_reg);
4512 writeq(val64, &bar0->mac_rmac_err_reg);
4513 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4514 schedule_work(&sp->set_link_task);
4517 /* In case of a serious error, the device will be Reset. */
4518 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4519 &sw_stat->serious_err_cnt))
4520 goto reset;
4522 /* Check for data parity error */
4523 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4524 &sw_stat->parity_err_cnt))
4525 goto reset;
4527 /* Check for ring full counter */
4528 if (sp->device_type == XFRAME_II_DEVICE) {
4529 val64 = readq(&bar0->ring_bump_counter1);
4530 for (i=0; i<4; i++) {
4531 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4532 temp64 >>= 64 - ((i+1)*16);
4533 sw_stat->ring_full_cnt[i] += temp64;
4536 val64 = readq(&bar0->ring_bump_counter2);
4537 for (i=0; i<4; i++) {
4538 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4539 temp64 >>= 64 - ((i+1)*16);
4540 sw_stat->ring_full_cnt[i+4] += temp64;
4544 val64 = readq(&bar0->txdma_int_status);
4545 /*check for pfc_err*/
4546 if (val64 & TXDMA_PFC_INT) {
4547 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4548 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4549 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4550 &sw_stat->pfc_err_cnt))
4551 goto reset;
4552 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4553 &sw_stat->pfc_err_cnt);
4556 /*check for tda_err*/
4557 if (val64 & TXDMA_TDA_INT) {
4558 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4559 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4560 &sw_stat->tda_err_cnt))
4561 goto reset;
4562 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4563 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4565 /*check for pcc_err*/
4566 if (val64 & TXDMA_PCC_INT) {
4567 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4568 | PCC_N_SERR | PCC_6_COF_OV_ERR
4569 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4570 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4571 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4572 &sw_stat->pcc_err_cnt))
4573 goto reset;
4574 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4575 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4578 /*check for tti_err*/
4579 if (val64 & TXDMA_TTI_INT) {
4580 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4581 &sw_stat->tti_err_cnt))
4582 goto reset;
4583 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4584 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4587 /*check for lso_err*/
4588 if (val64 & TXDMA_LSO_INT) {
4589 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4590 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4591 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4592 goto reset;
4593 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4594 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4597 /*check for tpa_err*/
4598 if (val64 & TXDMA_TPA_INT) {
4599 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4600 &sw_stat->tpa_err_cnt))
4601 goto reset;
4602 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4603 &sw_stat->tpa_err_cnt);
4606 /*check for sm_err*/
4607 if (val64 & TXDMA_SM_INT) {
4608 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4609 &sw_stat->sm_err_cnt))
4610 goto reset;
4613 val64 = readq(&bar0->mac_int_status);
4614 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4615 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4616 &bar0->mac_tmac_err_reg,
4617 &sw_stat->mac_tmac_err_cnt))
4618 goto reset;
4619 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4620 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4621 &bar0->mac_tmac_err_reg,
4622 &sw_stat->mac_tmac_err_cnt);
4625 val64 = readq(&bar0->xgxs_int_status);
4626 if (val64 & XGXS_INT_STATUS_TXGXS) {
4627 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4628 &bar0->xgxs_txgxs_err_reg,
4629 &sw_stat->xgxs_txgxs_err_cnt))
4630 goto reset;
4631 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4632 &bar0->xgxs_txgxs_err_reg,
4633 &sw_stat->xgxs_txgxs_err_cnt);
4636 val64 = readq(&bar0->rxdma_int_status);
4637 if (val64 & RXDMA_INT_RC_INT_M) {
4638 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4639 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4640 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4641 goto reset;
4642 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4643 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4644 &sw_stat->rc_err_cnt);
4645 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4646 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4647 &sw_stat->prc_pcix_err_cnt))
4648 goto reset;
4649 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4650 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4651 &sw_stat->prc_pcix_err_cnt);
4654 if (val64 & RXDMA_INT_RPA_INT_M) {
4655 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4656 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4657 goto reset;
4658 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4659 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4662 if (val64 & RXDMA_INT_RDA_INT_M) {
4663 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4664 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4665 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4666 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4667 goto reset;
4668 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4669 | RDA_MISC_ERR | RDA_PCIX_ERR,
4670 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4673 if (val64 & RXDMA_INT_RTI_INT_M) {
4674 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4675 &sw_stat->rti_err_cnt))
4676 goto reset;
4677 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4678 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4681 val64 = readq(&bar0->mac_int_status);
4682 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4683 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4684 &bar0->mac_rmac_err_reg,
4685 &sw_stat->mac_rmac_err_cnt))
4686 goto reset;
4687 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4688 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4689 &sw_stat->mac_rmac_err_cnt);
4692 val64 = readq(&bar0->xgxs_int_status);
4693 if (val64 & XGXS_INT_STATUS_RXGXS) {
4694 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4695 &bar0->xgxs_rxgxs_err_reg,
4696 &sw_stat->xgxs_rxgxs_err_cnt))
4697 goto reset;
4700 val64 = readq(&bar0->mc_int_status);
4701 if(val64 & MC_INT_STATUS_MC_INT) {
4702 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4703 &sw_stat->mc_err_cnt))
4704 goto reset;
4706 /* Handling Ecc errors */
4707 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4708 writeq(val64, &bar0->mc_err_reg);
4709 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4710 sw_stat->double_ecc_errs++;
4711 if (sp->device_type != XFRAME_II_DEVICE) {
4713 * Reset XframeI only if critical error
4715 if (val64 &
4716 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4717 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4718 goto reset;
4720 } else
4721 sw_stat->single_ecc_errs++;
4724 return;
4726 reset:
4727 s2io_stop_all_tx_queue(sp);
4728 schedule_work(&sp->rst_timer_task);
4729 sw_stat->soft_reset_cnt++;
4730 return;
4734 * s2io_isr - ISR handler of the device .
4735 * @irq: the irq of the device.
4736 * @dev_id: a void pointer to the dev structure of the NIC.
4737 * Description: This function is the ISR handler of the device. It
4738 * identifies the reason for the interrupt and calls the relevant
4739 * service routines. As a contongency measure, this ISR allocates the
4740 * recv buffers, if their numbers are below the panic value which is
4741 * presently set to 25% of the original number of rcv buffers allocated.
4742 * Return value:
4743 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4744 * IRQ_NONE: will be returned if interrupt is not from our device
4746 static irqreturn_t s2io_isr(int irq, void *dev_id)
4748 struct net_device *dev = (struct net_device *) dev_id;
4749 struct s2io_nic *sp = netdev_priv(dev);
4750 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4751 int i;
4752 u64 reason = 0;
4753 struct mac_info *mac_control;
4754 struct config_param *config;
4756 /* Pretend we handled any irq's from a disconnected card */
4757 if (pci_channel_offline(sp->pdev))
4758 return IRQ_NONE;
4760 if (!is_s2io_card_up(sp))
4761 return IRQ_NONE;
4763 mac_control = &sp->mac_control;
4764 config = &sp->config;
4767 * Identify the cause for interrupt and call the appropriate
4768 * interrupt handler. Causes for the interrupt could be;
4769 * 1. Rx of packet.
4770 * 2. Tx complete.
4771 * 3. Link down.
4773 reason = readq(&bar0->general_int_status);
4775 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4776 /* Nothing much can be done. Get out */
4777 return IRQ_HANDLED;
4780 if (reason & (GEN_INTR_RXTRAFFIC |
4781 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4783 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4785 if (config->napi) {
4786 if (reason & GEN_INTR_RXTRAFFIC) {
4787 netif_rx_schedule(&sp->napi);
4788 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4789 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4790 readl(&bar0->rx_traffic_int);
4792 } else {
4794 * rx_traffic_int reg is an R1 register, writing all 1's
4795 * will ensure that the actual interrupt causing bit
4796 * get's cleared and hence a read can be avoided.
4798 if (reason & GEN_INTR_RXTRAFFIC)
4799 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4801 for (i = 0; i < config->rx_ring_num; i++)
4802 rx_intr_handler(&mac_control->rings[i], 0);
4806 * tx_traffic_int reg is an R1 register, writing all 1's
4807 * will ensure that the actual interrupt causing bit get's
4808 * cleared and hence a read can be avoided.
4810 if (reason & GEN_INTR_TXTRAFFIC)
4811 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4813 for (i = 0; i < config->tx_fifo_num; i++)
4814 tx_intr_handler(&mac_control->fifos[i]);
4816 if (reason & GEN_INTR_TXPIC)
4817 s2io_txpic_intr_handle(sp);
4820 * Reallocate the buffers from the interrupt handler itself.
4822 if (!config->napi) {
4823 for (i = 0; i < config->rx_ring_num; i++)
4824 s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
4826 writeq(sp->general_int_mask, &bar0->general_int_mask);
4827 readl(&bar0->general_int_status);
4829 return IRQ_HANDLED;
4832 else if (!reason) {
4833 /* The interrupt was not raised by us */
4834 return IRQ_NONE;
4837 return IRQ_HANDLED;
4841 * s2io_updt_stats -
4843 static void s2io_updt_stats(struct s2io_nic *sp)
4845 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4846 u64 val64;
4847 int cnt = 0;
4849 if (is_s2io_card_up(sp)) {
4850 /* Apprx 30us on a 133 MHz bus */
4851 val64 = SET_UPDT_CLICKS(10) |
4852 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4853 writeq(val64, &bar0->stat_cfg);
4854 do {
4855 udelay(100);
4856 val64 = readq(&bar0->stat_cfg);
4857 if (!(val64 & s2BIT(0)))
4858 break;
4859 cnt++;
4860 if (cnt == 5)
4861 break; /* Updt failed */
4862 } while(1);
4867 * s2io_get_stats - Updates the device statistics structure.
4868 * @dev : pointer to the device structure.
4869 * Description:
4870 * This function updates the device statistics structure in the s2io_nic
4871 * structure and returns a pointer to the same.
4872 * Return value:
4873 * pointer to the updated net_device_stats structure.
4876 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4878 struct s2io_nic *sp = netdev_priv(dev);
4879 struct mac_info *mac_control;
4880 struct config_param *config;
4881 int i;
4884 mac_control = &sp->mac_control;
4885 config = &sp->config;
4887 /* Configure Stats for immediate updt */
4888 s2io_updt_stats(sp);
4890 /* Using sp->stats as a staging area, because reset (due to mtu
4891 change, for example) will clear some hardware counters */
4892 dev->stats.tx_packets +=
4893 le32_to_cpu(mac_control->stats_info->tmac_frms) -
4894 sp->stats.tx_packets;
4895 sp->stats.tx_packets =
4896 le32_to_cpu(mac_control->stats_info->tmac_frms);
4897 dev->stats.tx_errors +=
4898 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4899 sp->stats.tx_errors;
4900 sp->stats.tx_errors =
4901 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4902 dev->stats.rx_errors +=
4903 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4904 sp->stats.rx_errors;
4905 sp->stats.rx_errors =
4906 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4907 dev->stats.multicast =
4908 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4909 sp->stats.multicast;
4910 sp->stats.multicast =
4911 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4912 dev->stats.rx_length_errors =
4913 le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4914 sp->stats.rx_length_errors;
4915 sp->stats.rx_length_errors =
4916 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4918 /* collect per-ring rx_packets and rx_bytes */
4919 dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4920 for (i = 0; i < config->rx_ring_num; i++) {
4921 dev->stats.rx_packets += mac_control->rings[i].rx_packets;
4922 dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4925 return (&dev->stats);
4929 * s2io_set_multicast - entry point for multicast address enable/disable.
4930 * @dev : pointer to the device structure
4931 * Description:
4932 * This function is a driver entry point which gets called by the kernel
4933 * whenever multicast addresses must be enabled/disabled. This also gets
4934 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4935 * determine, if multicast address must be enabled or if promiscuous mode
4936 * is to be disabled etc.
4937 * Return value:
4938 * void.
4941 static void s2io_set_multicast(struct net_device *dev)
4943 int i, j, prev_cnt;
4944 struct dev_mc_list *mclist;
4945 struct s2io_nic *sp = netdev_priv(dev);
4946 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4947 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4948 0xfeffffffffffULL;
4949 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4950 void __iomem *add;
4951 struct config_param *config = &sp->config;
4953 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4954 /* Enable all Multicast addresses */
4955 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4956 &bar0->rmac_addr_data0_mem);
4957 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4958 &bar0->rmac_addr_data1_mem);
4959 val64 = RMAC_ADDR_CMD_MEM_WE |
4960 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4961 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4962 writeq(val64, &bar0->rmac_addr_cmd_mem);
4963 /* Wait till command completes */
4964 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4965 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4966 S2IO_BIT_RESET);
4968 sp->m_cast_flg = 1;
4969 sp->all_multi_pos = config->max_mc_addr - 1;
4970 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4971 /* Disable all Multicast addresses */
4972 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4973 &bar0->rmac_addr_data0_mem);
4974 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4975 &bar0->rmac_addr_data1_mem);
4976 val64 = RMAC_ADDR_CMD_MEM_WE |
4977 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4978 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4979 writeq(val64, &bar0->rmac_addr_cmd_mem);
4980 /* Wait till command completes */
4981 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4982 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4983 S2IO_BIT_RESET);
4985 sp->m_cast_flg = 0;
4986 sp->all_multi_pos = 0;
4989 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4990 /* Put the NIC into promiscuous mode */
4991 add = &bar0->mac_cfg;
4992 val64 = readq(&bar0->mac_cfg);
4993 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4995 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4996 writel((u32) val64, add);
4997 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4998 writel((u32) (val64 >> 32), (add + 4));
5000 if (vlan_tag_strip != 1) {
5001 val64 = readq(&bar0->rx_pa_cfg);
5002 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5003 writeq(val64, &bar0->rx_pa_cfg);
5004 sp->vlan_strip_flag = 0;
5007 val64 = readq(&bar0->mac_cfg);
5008 sp->promisc_flg = 1;
5009 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5010 dev->name);
5011 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5012 /* Remove the NIC from promiscuous mode */
5013 add = &bar0->mac_cfg;
5014 val64 = readq(&bar0->mac_cfg);
5015 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5017 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5018 writel((u32) val64, add);
5019 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5020 writel((u32) (val64 >> 32), (add + 4));
5022 if (vlan_tag_strip != 0) {
5023 val64 = readq(&bar0->rx_pa_cfg);
5024 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5025 writeq(val64, &bar0->rx_pa_cfg);
5026 sp->vlan_strip_flag = 1;
5029 val64 = readq(&bar0->mac_cfg);
5030 sp->promisc_flg = 0;
5031 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5032 dev->name);
5035 /* Update individual M_CAST address list */
5036 if ((!sp->m_cast_flg) && dev->mc_count) {
5037 if (dev->mc_count >
5038 (config->max_mc_addr - config->max_mac_addr)) {
5039 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5040 dev->name);
5041 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5042 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5043 return;
5046 prev_cnt = sp->mc_addr_count;
5047 sp->mc_addr_count = dev->mc_count;
5049 /* Clear out the previous list of Mc in the H/W. */
5050 for (i = 0; i < prev_cnt; i++) {
5051 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5052 &bar0->rmac_addr_data0_mem);
5053 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5054 &bar0->rmac_addr_data1_mem);
5055 val64 = RMAC_ADDR_CMD_MEM_WE |
5056 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5057 RMAC_ADDR_CMD_MEM_OFFSET
5058 (config->mc_start_offset + i);
5059 writeq(val64, &bar0->rmac_addr_cmd_mem);
5061 /* Wait for command completes */
5062 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5063 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5064 S2IO_BIT_RESET)) {
5065 DBG_PRINT(ERR_DBG, "%s: Adding ",
5066 dev->name);
5067 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5068 return;
5072 /* Create the new Rx filter list and update the same in H/W. */
5073 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5074 i++, mclist = mclist->next) {
5075 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5076 ETH_ALEN);
5077 mac_addr = 0;
5078 for (j = 0; j < ETH_ALEN; j++) {
5079 mac_addr |= mclist->dmi_addr[j];
5080 mac_addr <<= 8;
5082 mac_addr >>= 8;
5083 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5084 &bar0->rmac_addr_data0_mem);
5085 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5086 &bar0->rmac_addr_data1_mem);
5087 val64 = RMAC_ADDR_CMD_MEM_WE |
5088 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5089 RMAC_ADDR_CMD_MEM_OFFSET
5090 (i + config->mc_start_offset);
5091 writeq(val64, &bar0->rmac_addr_cmd_mem);
5093 /* Wait for command completes */
5094 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5095 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5096 S2IO_BIT_RESET)) {
5097 DBG_PRINT(ERR_DBG, "%s: Adding ",
5098 dev->name);
5099 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5100 return;
5106 /* read from CAM unicast & multicast addresses and store it in
5107 * def_mac_addr structure
5109 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5111 int offset;
5112 u64 mac_addr = 0x0;
5113 struct config_param *config = &sp->config;
5115 /* store unicast & multicast mac addresses */
5116 for (offset = 0; offset < config->max_mc_addr; offset++) {
5117 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5118 /* if read fails disable the entry */
5119 if (mac_addr == FAILURE)
5120 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5121 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5125 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5126 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5128 int offset;
5129 struct config_param *config = &sp->config;
5130 /* restore unicast mac address */
5131 for (offset = 0; offset < config->max_mac_addr; offset++)
5132 do_s2io_prog_unicast(sp->dev,
5133 sp->def_mac_addr[offset].mac_addr);
5135 /* restore multicast mac address */
5136 for (offset = config->mc_start_offset;
5137 offset < config->max_mc_addr; offset++)
5138 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5141 /* add a multicast MAC address to CAM */
5142 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5144 int i;
5145 u64 mac_addr = 0;
5146 struct config_param *config = &sp->config;
5148 for (i = 0; i < ETH_ALEN; i++) {
5149 mac_addr <<= 8;
5150 mac_addr |= addr[i];
5152 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5153 return SUCCESS;
5155 /* check if the multicast mac already preset in CAM */
5156 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5157 u64 tmp64;
5158 tmp64 = do_s2io_read_unicast_mc(sp, i);
5159 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5160 break;
5162 if (tmp64 == mac_addr)
5163 return SUCCESS;
5165 if (i == config->max_mc_addr) {
5166 DBG_PRINT(ERR_DBG,
5167 "CAM full no space left for multicast MAC\n");
5168 return FAILURE;
5170 /* Update the internal structure with this new mac address */
5171 do_s2io_copy_mac_addr(sp, i, mac_addr);
5173 return (do_s2io_add_mac(sp, mac_addr, i));
5176 /* add MAC address to CAM */
5177 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5179 u64 val64;
5180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5182 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5183 &bar0->rmac_addr_data0_mem);
5185 val64 =
5186 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5187 RMAC_ADDR_CMD_MEM_OFFSET(off);
5188 writeq(val64, &bar0->rmac_addr_cmd_mem);
5190 /* Wait till command completes */
5191 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5192 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5193 S2IO_BIT_RESET)) {
5194 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5195 return FAILURE;
5197 return SUCCESS;
5199 /* deletes a specified unicast/multicast mac entry from CAM */
5200 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5202 int offset;
5203 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5204 struct config_param *config = &sp->config;
5206 for (offset = 1;
5207 offset < config->max_mc_addr; offset++) {
5208 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5209 if (tmp64 == addr) {
5210 /* disable the entry by writing 0xffffffffffffULL */
5211 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5212 return FAILURE;
5213 /* store the new mac list from CAM */
5214 do_s2io_store_unicast_mc(sp);
5215 return SUCCESS;
5218 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5219 (unsigned long long)addr);
5220 return FAILURE;
5223 /* read mac entries from CAM */
5224 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5226 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5227 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5229 /* read mac addr */
5230 val64 =
5231 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5232 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5233 writeq(val64, &bar0->rmac_addr_cmd_mem);
5235 /* Wait till command completes */
5236 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5237 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5238 S2IO_BIT_RESET)) {
5239 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5240 return FAILURE;
5242 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5243 return (tmp64 >> 16);
5247 * s2io_set_mac_addr driver entry point
5250 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5252 struct sockaddr *addr = p;
5254 if (!is_valid_ether_addr(addr->sa_data))
5255 return -EINVAL;
5257 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5259 /* store the MAC address in CAM */
5260 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5263 * do_s2io_prog_unicast - Programs the Xframe mac address
5264 * @dev : pointer to the device structure.
5265 * @addr: a uchar pointer to the new mac address which is to be set.
5266 * Description : This procedure will program the Xframe to receive
5267 * frames with new Mac Address
5268 * Return value: SUCCESS on success and an appropriate (-)ve integer
5269 * as defined in errno.h file on failure.
5272 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5274 struct s2io_nic *sp = netdev_priv(dev);
5275 register u64 mac_addr = 0, perm_addr = 0;
5276 int i;
5277 u64 tmp64;
5278 struct config_param *config = &sp->config;
5281 * Set the new MAC address as the new unicast filter and reflect this
5282 * change on the device address registered with the OS. It will be
5283 * at offset 0.
5285 for (i = 0; i < ETH_ALEN; i++) {
5286 mac_addr <<= 8;
5287 mac_addr |= addr[i];
5288 perm_addr <<= 8;
5289 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5292 /* check if the dev_addr is different than perm_addr */
5293 if (mac_addr == perm_addr)
5294 return SUCCESS;
5296 /* check if the mac already preset in CAM */
5297 for (i = 1; i < config->max_mac_addr; i++) {
5298 tmp64 = do_s2io_read_unicast_mc(sp, i);
5299 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5300 break;
5302 if (tmp64 == mac_addr) {
5303 DBG_PRINT(INFO_DBG,
5304 "MAC addr:0x%llx already present in CAM\n",
5305 (unsigned long long)mac_addr);
5306 return SUCCESS;
5309 if (i == config->max_mac_addr) {
5310 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5311 return FAILURE;
5313 /* Update the internal structure with this new mac address */
5314 do_s2io_copy_mac_addr(sp, i, mac_addr);
5315 return (do_s2io_add_mac(sp, mac_addr, i));
5319 * s2io_ethtool_sset - Sets different link parameters.
5320 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5321 * @info: pointer to the structure with parameters given by ethtool to set
5322 * link information.
5323 * Description:
5324 * The function sets different link parameters provided by the user onto
5325 * the NIC.
5326 * Return value:
5327 * 0 on success.
5330 static int s2io_ethtool_sset(struct net_device *dev,
5331 struct ethtool_cmd *info)
5333 struct s2io_nic *sp = netdev_priv(dev);
5334 if ((info->autoneg == AUTONEG_ENABLE) ||
5335 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5336 return -EINVAL;
5337 else {
5338 s2io_close(sp->dev);
5339 s2io_open(sp->dev);
5342 return 0;
5346 * s2io_ethtol_gset - Return link specific information.
5347 * @sp : private member of the device structure, pointer to the
5348 * s2io_nic structure.
5349 * @info : pointer to the structure with parameters given by ethtool
5350 * to return link information.
5351 * Description:
5352 * Returns link specific information like speed, duplex etc.. to ethtool.
5353 * Return value :
5354 * return 0 on success.
5357 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5359 struct s2io_nic *sp = netdev_priv(dev);
5360 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5361 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5362 info->port = PORT_FIBRE;
5364 /* info->transceiver */
5365 info->transceiver = XCVR_EXTERNAL;
5367 if (netif_carrier_ok(sp->dev)) {
5368 info->speed = 10000;
5369 info->duplex = DUPLEX_FULL;
5370 } else {
5371 info->speed = -1;
5372 info->duplex = -1;
5375 info->autoneg = AUTONEG_DISABLE;
5376 return 0;
5380 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5381 * @sp : private member of the device structure, which is a pointer to the
5382 * s2io_nic structure.
5383 * @info : pointer to the structure with parameters given by ethtool to
5384 * return driver information.
5385 * Description:
5386 * Returns driver specefic information like name, version etc.. to ethtool.
5387 * Return value:
5388 * void
5391 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5392 struct ethtool_drvinfo *info)
5394 struct s2io_nic *sp = netdev_priv(dev);
5396 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5397 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5398 strncpy(info->fw_version, "", sizeof(info->fw_version));
5399 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5400 info->regdump_len = XENA_REG_SPACE;
5401 info->eedump_len = XENA_EEPROM_SPACE;
5405 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5406 * @sp: private member of the device structure, which is a pointer to the
5407 * s2io_nic structure.
5408 * @regs : pointer to the structure with parameters given by ethtool for
5409 * dumping the registers.
5410 * @reg_space: The input argumnet into which all the registers are dumped.
5411 * Description:
5412 * Dumps the entire register space of xFrame NIC into the user given
5413 * buffer area.
5414 * Return value :
5415 * void .
5418 static void s2io_ethtool_gregs(struct net_device *dev,
5419 struct ethtool_regs *regs, void *space)
5421 int i;
5422 u64 reg;
5423 u8 *reg_space = (u8 *) space;
5424 struct s2io_nic *sp = netdev_priv(dev);
5426 regs->len = XENA_REG_SPACE;
5427 regs->version = sp->pdev->subsystem_device;
5429 for (i = 0; i < regs->len; i += 8) {
5430 reg = readq(sp->bar0 + i);
5431 memcpy((reg_space + i), &reg, 8);
5436 * s2io_phy_id - timer function that alternates adapter LED.
5437 * @data : address of the private member of the device structure, which
5438 * is a pointer to the s2io_nic structure, provided as an u32.
5439 * Description: This is actually the timer function that alternates the
5440 * adapter LED bit of the adapter control bit to set/reset every time on
5441 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5442 * once every second.
5444 static void s2io_phy_id(unsigned long data)
5446 struct s2io_nic *sp = (struct s2io_nic *) data;
5447 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5448 u64 val64 = 0;
5449 u16 subid;
5451 subid = sp->pdev->subsystem_device;
5452 if ((sp->device_type == XFRAME_II_DEVICE) ||
5453 ((subid & 0xFF) >= 0x07)) {
5454 val64 = readq(&bar0->gpio_control);
5455 val64 ^= GPIO_CTRL_GPIO_0;
5456 writeq(val64, &bar0->gpio_control);
5457 } else {
5458 val64 = readq(&bar0->adapter_control);
5459 val64 ^= ADAPTER_LED_ON;
5460 writeq(val64, &bar0->adapter_control);
5463 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5467 * s2io_ethtool_idnic - To physically identify the nic on the system.
5468 * @sp : private member of the device structure, which is a pointer to the
5469 * s2io_nic structure.
5470 * @id : pointer to the structure with identification parameters given by
5471 * ethtool.
5472 * Description: Used to physically identify the NIC on the system.
5473 * The Link LED will blink for a time specified by the user for
5474 * identification.
5475 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5476 * identification is possible only if it's link is up.
5477 * Return value:
5478 * int , returns 0 on success
5481 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5483 u64 val64 = 0, last_gpio_ctrl_val;
5484 struct s2io_nic *sp = netdev_priv(dev);
5485 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5486 u16 subid;
5488 subid = sp->pdev->subsystem_device;
5489 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5490 if ((sp->device_type == XFRAME_I_DEVICE) &&
5491 ((subid & 0xFF) < 0x07)) {
5492 val64 = readq(&bar0->adapter_control);
5493 if (!(val64 & ADAPTER_CNTL_EN)) {
5494 printk(KERN_ERR
5495 "Adapter Link down, cannot blink LED\n");
5496 return -EFAULT;
5499 if (sp->id_timer.function == NULL) {
5500 init_timer(&sp->id_timer);
5501 sp->id_timer.function = s2io_phy_id;
5502 sp->id_timer.data = (unsigned long) sp;
5504 mod_timer(&sp->id_timer, jiffies);
5505 if (data)
5506 msleep_interruptible(data * HZ);
5507 else
5508 msleep_interruptible(MAX_FLICKER_TIME);
5509 del_timer_sync(&sp->id_timer);
5511 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5512 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5513 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5516 return 0;
5519 static void s2io_ethtool_gringparam(struct net_device *dev,
5520 struct ethtool_ringparam *ering)
5522 struct s2io_nic *sp = netdev_priv(dev);
5523 int i,tx_desc_count=0,rx_desc_count=0;
5525 if (sp->rxd_mode == RXD_MODE_1)
5526 ering->rx_max_pending = MAX_RX_DESC_1;
5527 else if (sp->rxd_mode == RXD_MODE_3B)
5528 ering->rx_max_pending = MAX_RX_DESC_2;
5530 ering->tx_max_pending = MAX_TX_DESC;
5531 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5532 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5534 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5535 ering->tx_pending = tx_desc_count;
5536 rx_desc_count = 0;
5537 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5538 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5540 ering->rx_pending = rx_desc_count;
5542 ering->rx_mini_max_pending = 0;
5543 ering->rx_mini_pending = 0;
5544 if(sp->rxd_mode == RXD_MODE_1)
5545 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5546 else if (sp->rxd_mode == RXD_MODE_3B)
5547 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5548 ering->rx_jumbo_pending = rx_desc_count;
5552 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5553 * @sp : private member of the device structure, which is a pointer to the
5554 * s2io_nic structure.
5555 * @ep : pointer to the structure with pause parameters given by ethtool.
5556 * Description:
5557 * Returns the Pause frame generation and reception capability of the NIC.
5558 * Return value:
5559 * void
5561 static void s2io_ethtool_getpause_data(struct net_device *dev,
5562 struct ethtool_pauseparam *ep)
5564 u64 val64;
5565 struct s2io_nic *sp = netdev_priv(dev);
5566 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5568 val64 = readq(&bar0->rmac_pause_cfg);
5569 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5570 ep->tx_pause = TRUE;
5571 if (val64 & RMAC_PAUSE_RX_ENABLE)
5572 ep->rx_pause = TRUE;
5573 ep->autoneg = FALSE;
5577 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5578 * @sp : private member of the device structure, which is a pointer to the
5579 * s2io_nic structure.
5580 * @ep : pointer to the structure with pause parameters given by ethtool.
5581 * Description:
5582 * It can be used to set or reset Pause frame generation or reception
5583 * support of the NIC.
5584 * Return value:
5585 * int, returns 0 on Success
5588 static int s2io_ethtool_setpause_data(struct net_device *dev,
5589 struct ethtool_pauseparam *ep)
5591 u64 val64;
5592 struct s2io_nic *sp = netdev_priv(dev);
5593 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5595 val64 = readq(&bar0->rmac_pause_cfg);
5596 if (ep->tx_pause)
5597 val64 |= RMAC_PAUSE_GEN_ENABLE;
5598 else
5599 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5600 if (ep->rx_pause)
5601 val64 |= RMAC_PAUSE_RX_ENABLE;
5602 else
5603 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5604 writeq(val64, &bar0->rmac_pause_cfg);
5605 return 0;
5609 * read_eeprom - reads 4 bytes of data from user given offset.
5610 * @sp : private member of the device structure, which is a pointer to the
5611 * s2io_nic structure.
5612 * @off : offset at which the data must be written
5613 * @data : Its an output parameter where the data read at the given
5614 * offset is stored.
5615 * Description:
5616 * Will read 4 bytes of data from the user given offset and return the
5617 * read data.
5618 * NOTE: Will allow to read only part of the EEPROM visible through the
5619 * I2C bus.
5620 * Return value:
5621 * -1 on failure and 0 on success.
5624 #define S2IO_DEV_ID 5
5625 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5627 int ret = -1;
5628 u32 exit_cnt = 0;
5629 u64 val64;
5630 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5632 if (sp->device_type == XFRAME_I_DEVICE) {
5633 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5634 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5635 I2C_CONTROL_CNTL_START;
5636 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5638 while (exit_cnt < 5) {
5639 val64 = readq(&bar0->i2c_control);
5640 if (I2C_CONTROL_CNTL_END(val64)) {
5641 *data = I2C_CONTROL_GET_DATA(val64);
5642 ret = 0;
5643 break;
5645 msleep(50);
5646 exit_cnt++;
5650 if (sp->device_type == XFRAME_II_DEVICE) {
5651 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5652 SPI_CONTROL_BYTECNT(0x3) |
5653 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5654 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5655 val64 |= SPI_CONTROL_REQ;
5656 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5657 while (exit_cnt < 5) {
5658 val64 = readq(&bar0->spi_control);
5659 if (val64 & SPI_CONTROL_NACK) {
5660 ret = 1;
5661 break;
5662 } else if (val64 & SPI_CONTROL_DONE) {
5663 *data = readq(&bar0->spi_data);
5664 *data &= 0xffffff;
5665 ret = 0;
5666 break;
5668 msleep(50);
5669 exit_cnt++;
5672 return ret;
5676 * write_eeprom - actually writes the relevant part of the data value.
5677 * @sp : private member of the device structure, which is a pointer to the
5678 * s2io_nic structure.
5679 * @off : offset at which the data must be written
5680 * @data : The data that is to be written
5681 * @cnt : Number of bytes of the data that are actually to be written into
5682 * the Eeprom. (max of 3)
5683 * Description:
5684 * Actually writes the relevant part of the data value into the Eeprom
5685 * through the I2C bus.
5686 * Return value:
5687 * 0 on success, -1 on failure.
5690 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5692 int exit_cnt = 0, ret = -1;
5693 u64 val64;
5694 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5696 if (sp->device_type == XFRAME_I_DEVICE) {
5697 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5698 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5699 I2C_CONTROL_CNTL_START;
5700 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5702 while (exit_cnt < 5) {
5703 val64 = readq(&bar0->i2c_control);
5704 if (I2C_CONTROL_CNTL_END(val64)) {
5705 if (!(val64 & I2C_CONTROL_NACK))
5706 ret = 0;
5707 break;
5709 msleep(50);
5710 exit_cnt++;
5714 if (sp->device_type == XFRAME_II_DEVICE) {
5715 int write_cnt = (cnt == 8) ? 0 : cnt;
5716 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5718 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5719 SPI_CONTROL_BYTECNT(write_cnt) |
5720 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5721 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5722 val64 |= SPI_CONTROL_REQ;
5723 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5724 while (exit_cnt < 5) {
5725 val64 = readq(&bar0->spi_control);
5726 if (val64 & SPI_CONTROL_NACK) {
5727 ret = 1;
5728 break;
5729 } else if (val64 & SPI_CONTROL_DONE) {
5730 ret = 0;
5731 break;
5733 msleep(50);
5734 exit_cnt++;
5737 return ret;
5739 static void s2io_vpd_read(struct s2io_nic *nic)
5741 u8 *vpd_data;
5742 u8 data;
5743 int i=0, cnt, fail = 0;
5744 int vpd_addr = 0x80;
5746 if (nic->device_type == XFRAME_II_DEVICE) {
5747 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5748 vpd_addr = 0x80;
5750 else {
5751 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5752 vpd_addr = 0x50;
5754 strcpy(nic->serial_num, "NOT AVAILABLE");
5756 vpd_data = kmalloc(256, GFP_KERNEL);
5757 if (!vpd_data) {
5758 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5759 return;
5761 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5763 for (i = 0; i < 256; i +=4 ) {
5764 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5765 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5766 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5767 for (cnt = 0; cnt <5; cnt++) {
5768 msleep(2);
5769 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5770 if (data == 0x80)
5771 break;
5773 if (cnt >= 5) {
5774 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5775 fail = 1;
5776 break;
5778 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5779 (u32 *)&vpd_data[i]);
5782 if(!fail) {
5783 /* read serial number of adapter */
5784 for (cnt = 0; cnt < 256; cnt++) {
5785 if ((vpd_data[cnt] == 'S') &&
5786 (vpd_data[cnt+1] == 'N') &&
5787 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5788 memset(nic->serial_num, 0, VPD_STRING_LEN);
5789 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5790 vpd_data[cnt+2]);
5791 break;
5796 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5797 memset(nic->product_name, 0, vpd_data[1]);
5798 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5800 kfree(vpd_data);
5801 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5805 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5806 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5807 * @eeprom : pointer to the user level structure provided by ethtool,
5808 * containing all relevant information.
5809 * @data_buf : user defined value to be written into Eeprom.
5810 * Description: Reads the values stored in the Eeprom at given offset
5811 * for a given length. Stores these values int the input argument data
5812 * buffer 'data_buf' and returns these to the caller (ethtool.)
5813 * Return value:
5814 * int 0 on success
5817 static int s2io_ethtool_geeprom(struct net_device *dev,
5818 struct ethtool_eeprom *eeprom, u8 * data_buf)
5820 u32 i, valid;
5821 u64 data;
5822 struct s2io_nic *sp = netdev_priv(dev);
5824 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5826 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5827 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5829 for (i = 0; i < eeprom->len; i += 4) {
5830 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5831 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5832 return -EFAULT;
5834 valid = INV(data);
5835 memcpy((data_buf + i), &valid, 4);
5837 return 0;
5841 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5842 * @sp : private member of the device structure, which is a pointer to the
5843 * s2io_nic structure.
5844 * @eeprom : pointer to the user level structure provided by ethtool,
5845 * containing all relevant information.
5846 * @data_buf ; user defined value to be written into Eeprom.
5847 * Description:
5848 * Tries to write the user provided value in the Eeprom, at the offset
5849 * given by the user.
5850 * Return value:
5851 * 0 on success, -EFAULT on failure.
5854 static int s2io_ethtool_seeprom(struct net_device *dev,
5855 struct ethtool_eeprom *eeprom,
5856 u8 * data_buf)
5858 int len = eeprom->len, cnt = 0;
5859 u64 valid = 0, data;
5860 struct s2io_nic *sp = netdev_priv(dev);
5862 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5863 DBG_PRINT(ERR_DBG,
5864 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5865 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5866 eeprom->magic);
5867 return -EFAULT;
5870 while (len) {
5871 data = (u32) data_buf[cnt] & 0x000000FF;
5872 if (data) {
5873 valid = (u32) (data << 24);
5874 } else
5875 valid = data;
5877 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5878 DBG_PRINT(ERR_DBG,
5879 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5880 DBG_PRINT(ERR_DBG,
5881 "write into the specified offset\n");
5882 return -EFAULT;
5884 cnt++;
5885 len--;
5888 return 0;
5892 * s2io_register_test - reads and writes into all clock domains.
5893 * @sp : private member of the device structure, which is a pointer to the
5894 * s2io_nic structure.
5895 * @data : variable that returns the result of each of the test conducted b
5896 * by the driver.
5897 * Description:
5898 * Read and write into all clock domains. The NIC has 3 clock domains,
5899 * see that registers in all the three regions are accessible.
5900 * Return value:
5901 * 0 on success.
5904 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5906 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5907 u64 val64 = 0, exp_val;
5908 int fail = 0;
5910 val64 = readq(&bar0->pif_rd_swapper_fb);
5911 if (val64 != 0x123456789abcdefULL) {
5912 fail = 1;
5913 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5916 val64 = readq(&bar0->rmac_pause_cfg);
5917 if (val64 != 0xc000ffff00000000ULL) {
5918 fail = 1;
5919 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5922 val64 = readq(&bar0->rx_queue_cfg);
5923 if (sp->device_type == XFRAME_II_DEVICE)
5924 exp_val = 0x0404040404040404ULL;
5925 else
5926 exp_val = 0x0808080808080808ULL;
5927 if (val64 != exp_val) {
5928 fail = 1;
5929 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5932 val64 = readq(&bar0->xgxs_efifo_cfg);
5933 if (val64 != 0x000000001923141EULL) {
5934 fail = 1;
5935 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5938 val64 = 0x5A5A5A5A5A5A5A5AULL;
5939 writeq(val64, &bar0->xmsi_data);
5940 val64 = readq(&bar0->xmsi_data);
5941 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5942 fail = 1;
5943 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5946 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5947 writeq(val64, &bar0->xmsi_data);
5948 val64 = readq(&bar0->xmsi_data);
5949 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5950 fail = 1;
5951 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5954 *data = fail;
5955 return fail;
5959 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5960 * @sp : private member of the device structure, which is a pointer to the
5961 * s2io_nic structure.
5962 * @data:variable that returns the result of each of the test conducted by
5963 * the driver.
5964 * Description:
5965 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5966 * register.
5967 * Return value:
5968 * 0 on success.
5971 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5973 int fail = 0;
5974 u64 ret_data, org_4F0, org_7F0;
5975 u8 saved_4F0 = 0, saved_7F0 = 0;
5976 struct net_device *dev = sp->dev;
5978 /* Test Write Error at offset 0 */
5979 /* Note that SPI interface allows write access to all areas
5980 * of EEPROM. Hence doing all negative testing only for Xframe I.
5982 if (sp->device_type == XFRAME_I_DEVICE)
5983 if (!write_eeprom(sp, 0, 0, 3))
5984 fail = 1;
5986 /* Save current values at offsets 0x4F0 and 0x7F0 */
5987 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5988 saved_4F0 = 1;
5989 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5990 saved_7F0 = 1;
5992 /* Test Write at offset 4f0 */
5993 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5994 fail = 1;
5995 if (read_eeprom(sp, 0x4F0, &ret_data))
5996 fail = 1;
5998 if (ret_data != 0x012345) {
5999 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6000 "Data written %llx Data read %llx\n",
6001 dev->name, (unsigned long long)0x12345,
6002 (unsigned long long)ret_data);
6003 fail = 1;
6006 /* Reset the EEPROM data go FFFF */
6007 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6009 /* Test Write Request Error at offset 0x7c */
6010 if (sp->device_type == XFRAME_I_DEVICE)
6011 if (!write_eeprom(sp, 0x07C, 0, 3))
6012 fail = 1;
6014 /* Test Write Request at offset 0x7f0 */
6015 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6016 fail = 1;
6017 if (read_eeprom(sp, 0x7F0, &ret_data))
6018 fail = 1;
6020 if (ret_data != 0x012345) {
6021 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6022 "Data written %llx Data read %llx\n",
6023 dev->name, (unsigned long long)0x12345,
6024 (unsigned long long)ret_data);
6025 fail = 1;
6028 /* Reset the EEPROM data go FFFF */
6029 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6031 if (sp->device_type == XFRAME_I_DEVICE) {
6032 /* Test Write Error at offset 0x80 */
6033 if (!write_eeprom(sp, 0x080, 0, 3))
6034 fail = 1;
6036 /* Test Write Error at offset 0xfc */
6037 if (!write_eeprom(sp, 0x0FC, 0, 3))
6038 fail = 1;
6040 /* Test Write Error at offset 0x100 */
6041 if (!write_eeprom(sp, 0x100, 0, 3))
6042 fail = 1;
6044 /* Test Write Error at offset 4ec */
6045 if (!write_eeprom(sp, 0x4EC, 0, 3))
6046 fail = 1;
6049 /* Restore values at offsets 0x4F0 and 0x7F0 */
6050 if (saved_4F0)
6051 write_eeprom(sp, 0x4F0, org_4F0, 3);
6052 if (saved_7F0)
6053 write_eeprom(sp, 0x7F0, org_7F0, 3);
6055 *data = fail;
6056 return fail;
6060 * s2io_bist_test - invokes the MemBist test of the card .
6061 * @sp : private member of the device structure, which is a pointer to the
6062 * s2io_nic structure.
6063 * @data:variable that returns the result of each of the test conducted by
6064 * the driver.
6065 * Description:
6066 * This invokes the MemBist test of the card. We give around
6067 * 2 secs time for the Test to complete. If it's still not complete
6068 * within this peiod, we consider that the test failed.
6069 * Return value:
6070 * 0 on success and -1 on failure.
6073 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6075 u8 bist = 0;
6076 int cnt = 0, ret = -1;
6078 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6079 bist |= PCI_BIST_START;
6080 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6082 while (cnt < 20) {
6083 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6084 if (!(bist & PCI_BIST_START)) {
6085 *data = (bist & PCI_BIST_CODE_MASK);
6086 ret = 0;
6087 break;
6089 msleep(100);
6090 cnt++;
6093 return ret;
6097 * s2io-link_test - verifies the link state of the nic
6098 * @sp ; private member of the device structure, which is a pointer to the
6099 * s2io_nic structure.
6100 * @data: variable that returns the result of each of the test conducted by
6101 * the driver.
6102 * Description:
6103 * The function verifies the link state of the NIC and updates the input
6104 * argument 'data' appropriately.
6105 * Return value:
6106 * 0 on success.
6109 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6111 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6112 u64 val64;
6114 val64 = readq(&bar0->adapter_status);
6115 if(!(LINK_IS_UP(val64)))
6116 *data = 1;
6117 else
6118 *data = 0;
6120 return *data;
6124 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6125 * @sp - private member of the device structure, which is a pointer to the
6126 * s2io_nic structure.
6127 * @data - variable that returns the result of each of the test
6128 * conducted by the driver.
6129 * Description:
6130 * This is one of the offline test that tests the read and write
6131 * access to the RldRam chip on the NIC.
6132 * Return value:
6133 * 0 on success.
6136 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6138 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6139 u64 val64;
6140 int cnt, iteration = 0, test_fail = 0;
6142 val64 = readq(&bar0->adapter_control);
6143 val64 &= ~ADAPTER_ECC_EN;
6144 writeq(val64, &bar0->adapter_control);
6146 val64 = readq(&bar0->mc_rldram_test_ctrl);
6147 val64 |= MC_RLDRAM_TEST_MODE;
6148 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6150 val64 = readq(&bar0->mc_rldram_mrs);
6151 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6152 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6154 val64 |= MC_RLDRAM_MRS_ENABLE;
6155 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6157 while (iteration < 2) {
6158 val64 = 0x55555555aaaa0000ULL;
6159 if (iteration == 1) {
6160 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6162 writeq(val64, &bar0->mc_rldram_test_d0);
6164 val64 = 0xaaaa5a5555550000ULL;
6165 if (iteration == 1) {
6166 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6168 writeq(val64, &bar0->mc_rldram_test_d1);
6170 val64 = 0x55aaaaaaaa5a0000ULL;
6171 if (iteration == 1) {
6172 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6174 writeq(val64, &bar0->mc_rldram_test_d2);
6176 val64 = (u64) (0x0000003ffffe0100ULL);
6177 writeq(val64, &bar0->mc_rldram_test_add);
6179 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6180 MC_RLDRAM_TEST_GO;
6181 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6183 for (cnt = 0; cnt < 5; cnt++) {
6184 val64 = readq(&bar0->mc_rldram_test_ctrl);
6185 if (val64 & MC_RLDRAM_TEST_DONE)
6186 break;
6187 msleep(200);
6190 if (cnt == 5)
6191 break;
6193 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6194 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6196 for (cnt = 0; cnt < 5; cnt++) {
6197 val64 = readq(&bar0->mc_rldram_test_ctrl);
6198 if (val64 & MC_RLDRAM_TEST_DONE)
6199 break;
6200 msleep(500);
6203 if (cnt == 5)
6204 break;
6206 val64 = readq(&bar0->mc_rldram_test_ctrl);
6207 if (!(val64 & MC_RLDRAM_TEST_PASS))
6208 test_fail = 1;
6210 iteration++;
6213 *data = test_fail;
6215 /* Bring the adapter out of test mode */
6216 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6218 return test_fail;
6222 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6223 * @sp : private member of the device structure, which is a pointer to the
6224 * s2io_nic structure.
6225 * @ethtest : pointer to a ethtool command specific structure that will be
6226 * returned to the user.
6227 * @data : variable that returns the result of each of the test
6228 * conducted by the driver.
6229 * Description:
6230 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6231 * the health of the card.
6232 * Return value:
6233 * void
6236 static void s2io_ethtool_test(struct net_device *dev,
6237 struct ethtool_test *ethtest,
6238 uint64_t * data)
6240 struct s2io_nic *sp = netdev_priv(dev);
6241 int orig_state = netif_running(sp->dev);
6243 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6244 /* Offline Tests. */
6245 if (orig_state)
6246 s2io_close(sp->dev);
6248 if (s2io_register_test(sp, &data[0]))
6249 ethtest->flags |= ETH_TEST_FL_FAILED;
6251 s2io_reset(sp);
6253 if (s2io_rldram_test(sp, &data[3]))
6254 ethtest->flags |= ETH_TEST_FL_FAILED;
6256 s2io_reset(sp);
6258 if (s2io_eeprom_test(sp, &data[1]))
6259 ethtest->flags |= ETH_TEST_FL_FAILED;
6261 if (s2io_bist_test(sp, &data[4]))
6262 ethtest->flags |= ETH_TEST_FL_FAILED;
6264 if (orig_state)
6265 s2io_open(sp->dev);
6267 data[2] = 0;
6268 } else {
6269 /* Online Tests. */
6270 if (!orig_state) {
6271 DBG_PRINT(ERR_DBG,
6272 "%s: is not up, cannot run test\n",
6273 dev->name);
6274 data[0] = -1;
6275 data[1] = -1;
6276 data[2] = -1;
6277 data[3] = -1;
6278 data[4] = -1;
6281 if (s2io_link_test(sp, &data[2]))
6282 ethtest->flags |= ETH_TEST_FL_FAILED;
6284 data[0] = 0;
6285 data[1] = 0;
6286 data[3] = 0;
6287 data[4] = 0;
6291 static void s2io_get_ethtool_stats(struct net_device *dev,
6292 struct ethtool_stats *estats,
6293 u64 * tmp_stats)
6295 int i = 0, k;
6296 struct s2io_nic *sp = netdev_priv(dev);
6297 struct stat_block *stat_info = sp->mac_control.stats_info;
6299 s2io_updt_stats(sp);
6300 tmp_stats[i++] =
6301 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6302 le32_to_cpu(stat_info->tmac_frms);
6303 tmp_stats[i++] =
6304 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6305 le32_to_cpu(stat_info->tmac_data_octets);
6306 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6307 tmp_stats[i++] =
6308 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6309 le32_to_cpu(stat_info->tmac_mcst_frms);
6310 tmp_stats[i++] =
6311 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6312 le32_to_cpu(stat_info->tmac_bcst_frms);
6313 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6314 tmp_stats[i++] =
6315 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6316 le32_to_cpu(stat_info->tmac_ttl_octets);
6317 tmp_stats[i++] =
6318 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6319 le32_to_cpu(stat_info->tmac_ucst_frms);
6320 tmp_stats[i++] =
6321 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6322 le32_to_cpu(stat_info->tmac_nucst_frms);
6323 tmp_stats[i++] =
6324 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6325 le32_to_cpu(stat_info->tmac_any_err_frms);
6326 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6327 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6328 tmp_stats[i++] =
6329 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6330 le32_to_cpu(stat_info->tmac_vld_ip);
6331 tmp_stats[i++] =
6332 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6333 le32_to_cpu(stat_info->tmac_drop_ip);
6334 tmp_stats[i++] =
6335 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6336 le32_to_cpu(stat_info->tmac_icmp);
6337 tmp_stats[i++] =
6338 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6339 le32_to_cpu(stat_info->tmac_rst_tcp);
6340 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6341 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6342 le32_to_cpu(stat_info->tmac_udp);
6343 tmp_stats[i++] =
6344 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6345 le32_to_cpu(stat_info->rmac_vld_frms);
6346 tmp_stats[i++] =
6347 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6348 le32_to_cpu(stat_info->rmac_data_octets);
6349 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6350 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6351 tmp_stats[i++] =
6352 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6353 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6354 tmp_stats[i++] =
6355 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6356 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6357 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6358 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6359 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6360 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6361 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6362 tmp_stats[i++] =
6363 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6364 le32_to_cpu(stat_info->rmac_ttl_octets);
6365 tmp_stats[i++] =
6366 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6367 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6368 tmp_stats[i++] =
6369 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6370 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6371 tmp_stats[i++] =
6372 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6373 le32_to_cpu(stat_info->rmac_discarded_frms);
6374 tmp_stats[i++] =
6375 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6376 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6377 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6378 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6379 tmp_stats[i++] =
6380 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6381 le32_to_cpu(stat_info->rmac_usized_frms);
6382 tmp_stats[i++] =
6383 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6384 le32_to_cpu(stat_info->rmac_osized_frms);
6385 tmp_stats[i++] =
6386 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6387 le32_to_cpu(stat_info->rmac_frag_frms);
6388 tmp_stats[i++] =
6389 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6390 le32_to_cpu(stat_info->rmac_jabber_frms);
6391 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6392 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6393 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6394 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6395 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6396 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6397 tmp_stats[i++] =
6398 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6399 le32_to_cpu(stat_info->rmac_ip);
6400 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6401 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6402 tmp_stats[i++] =
6403 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6404 le32_to_cpu(stat_info->rmac_drop_ip);
6405 tmp_stats[i++] =
6406 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6407 le32_to_cpu(stat_info->rmac_icmp);
6408 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6409 tmp_stats[i++] =
6410 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6411 le32_to_cpu(stat_info->rmac_udp);
6412 tmp_stats[i++] =
6413 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6414 le32_to_cpu(stat_info->rmac_err_drp_udp);
6415 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6416 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6417 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6418 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6419 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6420 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6421 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6422 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6423 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6424 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6425 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6426 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6427 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6428 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6429 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6430 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6431 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6432 tmp_stats[i++] =
6433 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6434 le32_to_cpu(stat_info->rmac_pause_cnt);
6435 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6436 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6437 tmp_stats[i++] =
6438 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6439 le32_to_cpu(stat_info->rmac_accepted_ip);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6449 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6450 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6451 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6452 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6453 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6454 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6455 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6456 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6457 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6458 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6460 /* Enhanced statistics exist only for Hercules */
6461 if(sp->device_type == XFRAME_II_DEVICE) {
6462 tmp_stats[i++] =
6463 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6464 tmp_stats[i++] =
6465 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6466 tmp_stats[i++] =
6467 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6468 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6469 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6470 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6471 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6472 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6473 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6474 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6475 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6476 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6477 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6478 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6479 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6480 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6483 tmp_stats[i++] = 0;
6484 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6485 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6486 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6487 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6488 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6489 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6490 for (k = 0; k < MAX_RX_RINGS; k++)
6491 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6492 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6493 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6494 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6495 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6496 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6497 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6498 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6499 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6500 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6501 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6502 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6503 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6504 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6505 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6506 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6507 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6508 if (stat_info->sw_stat.num_aggregations) {
6509 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6510 int count = 0;
6512 * Since 64-bit divide does not work on all platforms,
6513 * do repeated subtraction.
6515 while (tmp >= stat_info->sw_stat.num_aggregations) {
6516 tmp -= stat_info->sw_stat.num_aggregations;
6517 count++;
6519 tmp_stats[i++] = count;
6521 else
6522 tmp_stats[i++] = 0;
6523 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6525 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6526 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6527 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6528 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6529 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6531 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6533 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6555 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6556 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6557 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6558 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6559 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6560 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6561 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6562 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6563 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6564 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6567 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6569 return (XENA_REG_SPACE);
6573 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6575 struct s2io_nic *sp = netdev_priv(dev);
6577 return (sp->rx_csum);
6580 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6582 struct s2io_nic *sp = netdev_priv(dev);
6584 if (data)
6585 sp->rx_csum = 1;
6586 else
6587 sp->rx_csum = 0;
6589 return 0;
6592 static int s2io_get_eeprom_len(struct net_device *dev)
6594 return (XENA_EEPROM_SPACE);
6597 static int s2io_get_sset_count(struct net_device *dev, int sset)
6599 struct s2io_nic *sp = netdev_priv(dev);
6601 switch (sset) {
6602 case ETH_SS_TEST:
6603 return S2IO_TEST_LEN;
6604 case ETH_SS_STATS:
6605 switch(sp->device_type) {
6606 case XFRAME_I_DEVICE:
6607 return XFRAME_I_STAT_LEN;
6608 case XFRAME_II_DEVICE:
6609 return XFRAME_II_STAT_LEN;
6610 default:
6611 return 0;
6613 default:
6614 return -EOPNOTSUPP;
6618 static void s2io_ethtool_get_strings(struct net_device *dev,
6619 u32 stringset, u8 * data)
6621 int stat_size = 0;
6622 struct s2io_nic *sp = netdev_priv(dev);
6624 switch (stringset) {
6625 case ETH_SS_TEST:
6626 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6627 break;
6628 case ETH_SS_STATS:
6629 stat_size = sizeof(ethtool_xena_stats_keys);
6630 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6631 if(sp->device_type == XFRAME_II_DEVICE) {
6632 memcpy(data + stat_size,
6633 &ethtool_enhanced_stats_keys,
6634 sizeof(ethtool_enhanced_stats_keys));
6635 stat_size += sizeof(ethtool_enhanced_stats_keys);
6638 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6639 sizeof(ethtool_driver_stats_keys));
6643 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6645 if (data)
6646 dev->features |= NETIF_F_IP_CSUM;
6647 else
6648 dev->features &= ~NETIF_F_IP_CSUM;
6650 return 0;
6653 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6655 return (dev->features & NETIF_F_TSO) != 0;
6657 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6659 if (data)
6660 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6661 else
6662 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6664 return 0;
6667 static const struct ethtool_ops netdev_ethtool_ops = {
6668 .get_settings = s2io_ethtool_gset,
6669 .set_settings = s2io_ethtool_sset,
6670 .get_drvinfo = s2io_ethtool_gdrvinfo,
6671 .get_regs_len = s2io_ethtool_get_regs_len,
6672 .get_regs = s2io_ethtool_gregs,
6673 .get_link = ethtool_op_get_link,
6674 .get_eeprom_len = s2io_get_eeprom_len,
6675 .get_eeprom = s2io_ethtool_geeprom,
6676 .set_eeprom = s2io_ethtool_seeprom,
6677 .get_ringparam = s2io_ethtool_gringparam,
6678 .get_pauseparam = s2io_ethtool_getpause_data,
6679 .set_pauseparam = s2io_ethtool_setpause_data,
6680 .get_rx_csum = s2io_ethtool_get_rx_csum,
6681 .set_rx_csum = s2io_ethtool_set_rx_csum,
6682 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6683 .set_sg = ethtool_op_set_sg,
6684 .get_tso = s2io_ethtool_op_get_tso,
6685 .set_tso = s2io_ethtool_op_set_tso,
6686 .set_ufo = ethtool_op_set_ufo,
6687 .self_test = s2io_ethtool_test,
6688 .get_strings = s2io_ethtool_get_strings,
6689 .phys_id = s2io_ethtool_idnic,
6690 .get_ethtool_stats = s2io_get_ethtool_stats,
6691 .get_sset_count = s2io_get_sset_count,
6695 * s2io_ioctl - Entry point for the Ioctl
6696 * @dev : Device pointer.
6697 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6698 * a proprietary structure used to pass information to the driver.
6699 * @cmd : This is used to distinguish between the different commands that
6700 * can be passed to the IOCTL functions.
6701 * Description:
6702 * Currently there are no special functionality supported in IOCTL, hence
6703 * function always return EOPNOTSUPPORTED
6706 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6708 return -EOPNOTSUPP;
6712 * s2io_change_mtu - entry point to change MTU size for the device.
6713 * @dev : device pointer.
6714 * @new_mtu : the new MTU size for the device.
6715 * Description: A driver entry point to change MTU size for the device.
6716 * Before changing the MTU the device must be stopped.
6717 * Return value:
6718 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6719 * file on failure.
6722 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6724 struct s2io_nic *sp = netdev_priv(dev);
6725 int ret = 0;
6727 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6728 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6729 dev->name);
6730 return -EPERM;
6733 dev->mtu = new_mtu;
6734 if (netif_running(dev)) {
6735 s2io_stop_all_tx_queue(sp);
6736 s2io_card_down(sp);
6737 ret = s2io_card_up(sp);
6738 if (ret) {
6739 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6740 __func__);
6741 return ret;
6743 s2io_wake_all_tx_queue(sp);
6744 } else { /* Device is down */
6745 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6746 u64 val64 = new_mtu;
6748 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6751 return ret;
6755 * s2io_set_link - Set the LInk status
6756 * @data: long pointer to device private structue
6757 * Description: Sets the link status for the adapter
6760 static void s2io_set_link(struct work_struct *work)
6762 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6763 struct net_device *dev = nic->dev;
6764 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6765 register u64 val64;
6766 u16 subid;
6768 rtnl_lock();
6770 if (!netif_running(dev))
6771 goto out_unlock;
6773 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6774 /* The card is being reset, no point doing anything */
6775 goto out_unlock;
6778 subid = nic->pdev->subsystem_device;
6779 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6781 * Allow a small delay for the NICs self initiated
6782 * cleanup to complete.
6784 msleep(100);
6787 val64 = readq(&bar0->adapter_status);
6788 if (LINK_IS_UP(val64)) {
6789 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6790 if (verify_xena_quiescence(nic)) {
6791 val64 = readq(&bar0->adapter_control);
6792 val64 |= ADAPTER_CNTL_EN;
6793 writeq(val64, &bar0->adapter_control);
6794 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6795 nic->device_type, subid)) {
6796 val64 = readq(&bar0->gpio_control);
6797 val64 |= GPIO_CTRL_GPIO_0;
6798 writeq(val64, &bar0->gpio_control);
6799 val64 = readq(&bar0->gpio_control);
6800 } else {
6801 val64 |= ADAPTER_LED_ON;
6802 writeq(val64, &bar0->adapter_control);
6804 nic->device_enabled_once = TRUE;
6805 } else {
6806 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6807 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6808 s2io_stop_all_tx_queue(nic);
6811 val64 = readq(&bar0->adapter_control);
6812 val64 |= ADAPTER_LED_ON;
6813 writeq(val64, &bar0->adapter_control);
6814 s2io_link(nic, LINK_UP);
6815 } else {
6816 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6817 subid)) {
6818 val64 = readq(&bar0->gpio_control);
6819 val64 &= ~GPIO_CTRL_GPIO_0;
6820 writeq(val64, &bar0->gpio_control);
6821 val64 = readq(&bar0->gpio_control);
6823 /* turn off LED */
6824 val64 = readq(&bar0->adapter_control);
6825 val64 = val64 &(~ADAPTER_LED_ON);
6826 writeq(val64, &bar0->adapter_control);
6827 s2io_link(nic, LINK_DOWN);
6829 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6831 out_unlock:
6832 rtnl_unlock();
6835 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6836 struct buffAdd *ba,
6837 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6838 u64 *temp2, int size)
6840 struct net_device *dev = sp->dev;
6841 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6843 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6844 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6845 /* allocate skb */
6846 if (*skb) {
6847 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6849 * As Rx frame are not going to be processed,
6850 * using same mapped address for the Rxd
6851 * buffer pointer
6853 rxdp1->Buffer0_ptr = *temp0;
6854 } else {
6855 *skb = dev_alloc_skb(size);
6856 if (!(*skb)) {
6857 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6858 DBG_PRINT(INFO_DBG, "memory to allocate ");
6859 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6860 sp->mac_control.stats_info->sw_stat. \
6861 mem_alloc_fail_cnt++;
6862 return -ENOMEM ;
6864 sp->mac_control.stats_info->sw_stat.mem_allocated
6865 += (*skb)->truesize;
6866 /* storing the mapped addr in a temp variable
6867 * such it will be used for next rxd whose
6868 * Host Control is NULL
6870 rxdp1->Buffer0_ptr = *temp0 =
6871 pci_map_single( sp->pdev, (*skb)->data,
6872 size - NET_IP_ALIGN,
6873 PCI_DMA_FROMDEVICE);
6874 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6875 goto memalloc_failed;
6876 rxdp->Host_Control = (unsigned long) (*skb);
6878 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6879 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6880 /* Two buffer Mode */
6881 if (*skb) {
6882 rxdp3->Buffer2_ptr = *temp2;
6883 rxdp3->Buffer0_ptr = *temp0;
6884 rxdp3->Buffer1_ptr = *temp1;
6885 } else {
6886 *skb = dev_alloc_skb(size);
6887 if (!(*skb)) {
6888 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6889 DBG_PRINT(INFO_DBG, "memory to allocate ");
6890 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6891 sp->mac_control.stats_info->sw_stat. \
6892 mem_alloc_fail_cnt++;
6893 return -ENOMEM;
6895 sp->mac_control.stats_info->sw_stat.mem_allocated
6896 += (*skb)->truesize;
6897 rxdp3->Buffer2_ptr = *temp2 =
6898 pci_map_single(sp->pdev, (*skb)->data,
6899 dev->mtu + 4,
6900 PCI_DMA_FROMDEVICE);
6901 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6902 goto memalloc_failed;
6903 rxdp3->Buffer0_ptr = *temp0 =
6904 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6905 PCI_DMA_FROMDEVICE);
6906 if (pci_dma_mapping_error(sp->pdev,
6907 rxdp3->Buffer0_ptr)) {
6908 pci_unmap_single (sp->pdev,
6909 (dma_addr_t)rxdp3->Buffer2_ptr,
6910 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6911 goto memalloc_failed;
6913 rxdp->Host_Control = (unsigned long) (*skb);
6915 /* Buffer-1 will be dummy buffer not used */
6916 rxdp3->Buffer1_ptr = *temp1 =
6917 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6918 PCI_DMA_FROMDEVICE);
6919 if (pci_dma_mapping_error(sp->pdev,
6920 rxdp3->Buffer1_ptr)) {
6921 pci_unmap_single (sp->pdev,
6922 (dma_addr_t)rxdp3->Buffer0_ptr,
6923 BUF0_LEN, PCI_DMA_FROMDEVICE);
6924 pci_unmap_single (sp->pdev,
6925 (dma_addr_t)rxdp3->Buffer2_ptr,
6926 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6927 goto memalloc_failed;
6931 return 0;
6932 memalloc_failed:
6933 stats->pci_map_fail_cnt++;
6934 stats->mem_freed += (*skb)->truesize;
6935 dev_kfree_skb(*skb);
6936 return -ENOMEM;
6939 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6940 int size)
6942 struct net_device *dev = sp->dev;
6943 if (sp->rxd_mode == RXD_MODE_1) {
6944 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6945 } else if (sp->rxd_mode == RXD_MODE_3B) {
6946 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6947 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6948 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6952 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6954 int i, j, k, blk_cnt = 0, size;
6955 struct mac_info * mac_control = &sp->mac_control;
6956 struct config_param *config = &sp->config;
6957 struct net_device *dev = sp->dev;
6958 struct RxD_t *rxdp = NULL;
6959 struct sk_buff *skb = NULL;
6960 struct buffAdd *ba = NULL;
6961 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6963 /* Calculate the size based on ring mode */
6964 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6965 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6966 if (sp->rxd_mode == RXD_MODE_1)
6967 size += NET_IP_ALIGN;
6968 else if (sp->rxd_mode == RXD_MODE_3B)
6969 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6971 for (i = 0; i < config->rx_ring_num; i++) {
6972 blk_cnt = config->rx_cfg[i].num_rxd /
6973 (rxd_count[sp->rxd_mode] +1);
6975 for (j = 0; j < blk_cnt; j++) {
6976 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6977 rxdp = mac_control->rings[i].
6978 rx_blocks[j].rxds[k].virt_addr;
6979 if(sp->rxd_mode == RXD_MODE_3B)
6980 ba = &mac_control->rings[i].ba[j][k];
6981 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6982 &skb,(u64 *)&temp0_64,
6983 (u64 *)&temp1_64,
6984 (u64 *)&temp2_64,
6985 size) == -ENOMEM) {
6986 return 0;
6989 set_rxd_buffer_size(sp, rxdp, size);
6990 wmb();
6991 /* flip the Ownership bit to Hardware */
6992 rxdp->Control_1 |= RXD_OWN_XENA;
6996 return 0;
7000 static int s2io_add_isr(struct s2io_nic * sp)
7002 int ret = 0;
7003 struct net_device *dev = sp->dev;
7004 int err = 0;
7006 if (sp->config.intr_type == MSI_X)
7007 ret = s2io_enable_msi_x(sp);
7008 if (ret) {
7009 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7010 sp->config.intr_type = INTA;
7013 /* Store the values of the MSIX table in the struct s2io_nic structure */
7014 store_xmsi_data(sp);
7016 /* After proper initialization of H/W, register ISR */
7017 if (sp->config.intr_type == MSI_X) {
7018 int i, msix_rx_cnt = 0;
7020 for (i = 0; i < sp->num_entries; i++) {
7021 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7022 if (sp->s2io_entries[i].type ==
7023 MSIX_RING_TYPE) {
7024 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7025 dev->name, i);
7026 err = request_irq(sp->entries[i].vector,
7027 s2io_msix_ring_handle, 0,
7028 sp->desc[i],
7029 sp->s2io_entries[i].arg);
7030 } else if (sp->s2io_entries[i].type ==
7031 MSIX_ALARM_TYPE) {
7032 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7033 dev->name, i);
7034 err = request_irq(sp->entries[i].vector,
7035 s2io_msix_fifo_handle, 0,
7036 sp->desc[i],
7037 sp->s2io_entries[i].arg);
7040 /* if either data or addr is zero print it. */
7041 if (!(sp->msix_info[i].addr &&
7042 sp->msix_info[i].data)) {
7043 DBG_PRINT(ERR_DBG,
7044 "%s @Addr:0x%llx Data:0x%llx\n",
7045 sp->desc[i],
7046 (unsigned long long)
7047 sp->msix_info[i].addr,
7048 (unsigned long long)
7049 ntohl(sp->msix_info[i].data));
7050 } else
7051 msix_rx_cnt++;
7052 if (err) {
7053 remove_msix_isr(sp);
7055 DBG_PRINT(ERR_DBG,
7056 "%s:MSI-X-%d registration "
7057 "failed\n", dev->name, i);
7059 DBG_PRINT(ERR_DBG,
7060 "%s: Defaulting to INTA\n",
7061 dev->name);
7062 sp->config.intr_type = INTA;
7063 break;
7065 sp->s2io_entries[i].in_use =
7066 MSIX_REGISTERED_SUCCESS;
7069 if (!err) {
7070 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7071 --msix_rx_cnt);
7072 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7073 " through alarm vector\n");
7076 if (sp->config.intr_type == INTA) {
7077 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7078 sp->name, dev);
7079 if (err) {
7080 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7081 dev->name);
7082 return -1;
7085 return 0;
7087 static void s2io_rem_isr(struct s2io_nic * sp)
7089 if (sp->config.intr_type == MSI_X)
7090 remove_msix_isr(sp);
7091 else
7092 remove_inta_isr(sp);
7095 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7097 int cnt = 0;
7098 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7099 register u64 val64 = 0;
7100 struct config_param *config;
7101 config = &sp->config;
7103 if (!is_s2io_card_up(sp))
7104 return;
7106 del_timer_sync(&sp->alarm_timer);
7107 /* If s2io_set_link task is executing, wait till it completes. */
7108 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7109 msleep(50);
7111 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7113 /* Disable napi */
7114 if (sp->config.napi) {
7115 int off = 0;
7116 if (config->intr_type == MSI_X) {
7117 for (; off < sp->config.rx_ring_num; off++)
7118 napi_disable(&sp->mac_control.rings[off].napi);
7120 else
7121 napi_disable(&sp->napi);
7124 /* disable Tx and Rx traffic on the NIC */
7125 if (do_io)
7126 stop_nic(sp);
7128 s2io_rem_isr(sp);
7130 /* stop the tx queue, indicate link down */
7131 s2io_link(sp, LINK_DOWN);
7133 /* Check if the device is Quiescent and then Reset the NIC */
7134 while(do_io) {
7135 /* As per the HW requirement we need to replenish the
7136 * receive buffer to avoid the ring bump. Since there is
7137 * no intention of processing the Rx frame at this pointwe are
7138 * just settting the ownership bit of rxd in Each Rx
7139 * ring to HW and set the appropriate buffer size
7140 * based on the ring mode
7142 rxd_owner_bit_reset(sp);
7144 val64 = readq(&bar0->adapter_status);
7145 if (verify_xena_quiescence(sp)) {
7146 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7147 break;
7150 msleep(50);
7151 cnt++;
7152 if (cnt == 10) {
7153 DBG_PRINT(ERR_DBG,
7154 "s2io_close:Device not Quiescent ");
7155 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7156 (unsigned long long) val64);
7157 break;
7160 if (do_io)
7161 s2io_reset(sp);
7163 /* Free all Tx buffers */
7164 free_tx_buffers(sp);
7166 /* Free all Rx buffers */
7167 free_rx_buffers(sp);
7169 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7172 static void s2io_card_down(struct s2io_nic * sp)
7174 do_s2io_card_down(sp, 1);
7177 static int s2io_card_up(struct s2io_nic * sp)
7179 int i, ret = 0;
7180 struct mac_info *mac_control;
7181 struct config_param *config;
7182 struct net_device *dev = (struct net_device *) sp->dev;
7183 u16 interruptible;
7185 /* Initialize the H/W I/O registers */
7186 ret = init_nic(sp);
7187 if (ret != 0) {
7188 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7189 dev->name);
7190 if (ret != -EIO)
7191 s2io_reset(sp);
7192 return ret;
7196 * Initializing the Rx buffers. For now we are considering only 1
7197 * Rx ring and initializing buffers into 30 Rx blocks
7199 mac_control = &sp->mac_control;
7200 config = &sp->config;
7202 for (i = 0; i < config->rx_ring_num; i++) {
7203 mac_control->rings[i].mtu = dev->mtu;
7204 ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
7205 if (ret) {
7206 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7207 dev->name);
7208 s2io_reset(sp);
7209 free_rx_buffers(sp);
7210 return -ENOMEM;
7212 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7213 mac_control->rings[i].rx_bufs_left);
7216 /* Initialise napi */
7217 if (config->napi) {
7218 int i;
7219 if (config->intr_type == MSI_X) {
7220 for (i = 0; i < sp->config.rx_ring_num; i++)
7221 napi_enable(&sp->mac_control.rings[i].napi);
7222 } else {
7223 napi_enable(&sp->napi);
7227 /* Maintain the state prior to the open */
7228 if (sp->promisc_flg)
7229 sp->promisc_flg = 0;
7230 if (sp->m_cast_flg) {
7231 sp->m_cast_flg = 0;
7232 sp->all_multi_pos= 0;
7235 /* Setting its receive mode */
7236 s2io_set_multicast(dev);
7238 if (sp->lro) {
7239 /* Initialize max aggregatable pkts per session based on MTU */
7240 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7241 /* Check if we can use(if specified) user provided value */
7242 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7243 sp->lro_max_aggr_per_sess = lro_max_pkts;
7246 /* Enable Rx Traffic and interrupts on the NIC */
7247 if (start_nic(sp)) {
7248 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7249 s2io_reset(sp);
7250 free_rx_buffers(sp);
7251 return -ENODEV;
7254 /* Add interrupt service routine */
7255 if (s2io_add_isr(sp) != 0) {
7256 if (sp->config.intr_type == MSI_X)
7257 s2io_rem_isr(sp);
7258 s2io_reset(sp);
7259 free_rx_buffers(sp);
7260 return -ENODEV;
7263 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7265 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7267 /* Enable select interrupts */
7268 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7269 if (sp->config.intr_type != INTA) {
7270 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7271 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7272 } else {
7273 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7274 interruptible |= TX_PIC_INTR;
7275 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7278 return 0;
7282 * s2io_restart_nic - Resets the NIC.
7283 * @data : long pointer to the device private structure
7284 * Description:
7285 * This function is scheduled to be run by the s2io_tx_watchdog
7286 * function after 0.5 secs to reset the NIC. The idea is to reduce
7287 * the run time of the watch dog routine which is run holding a
7288 * spin lock.
7291 static void s2io_restart_nic(struct work_struct *work)
7293 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7294 struct net_device *dev = sp->dev;
7296 rtnl_lock();
7298 if (!netif_running(dev))
7299 goto out_unlock;
7301 s2io_card_down(sp);
7302 if (s2io_card_up(sp)) {
7303 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7304 dev->name);
7306 s2io_wake_all_tx_queue(sp);
7307 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7308 dev->name);
7309 out_unlock:
7310 rtnl_unlock();
7314 * s2io_tx_watchdog - Watchdog for transmit side.
7315 * @dev : Pointer to net device structure
7316 * Description:
7317 * This function is triggered if the Tx Queue is stopped
7318 * for a pre-defined amount of time when the Interface is still up.
7319 * If the Interface is jammed in such a situation, the hardware is
7320 * reset (by s2io_close) and restarted again (by s2io_open) to
7321 * overcome any problem that might have been caused in the hardware.
7322 * Return value:
7323 * void
7326 static void s2io_tx_watchdog(struct net_device *dev)
7328 struct s2io_nic *sp = netdev_priv(dev);
7330 if (netif_carrier_ok(dev)) {
7331 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7332 schedule_work(&sp->rst_timer_task);
7333 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7338 * rx_osm_handler - To perform some OS related operations on SKB.
7339 * @sp: private member of the device structure,pointer to s2io_nic structure.
7340 * @skb : the socket buffer pointer.
7341 * @len : length of the packet
7342 * @cksum : FCS checksum of the frame.
7343 * @ring_no : the ring from which this RxD was extracted.
7344 * Description:
7345 * This function is called by the Rx interrupt serivce routine to perform
7346 * some OS related operations on the SKB before passing it to the upper
7347 * layers. It mainly checks if the checksum is OK, if so adds it to the
7348 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7349 * to the upper layer. If the checksum is wrong, it increments the Rx
7350 * packet error count, frees the SKB and returns error.
7351 * Return value:
7352 * SUCCESS on success and -1 on failure.
7354 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7356 struct s2io_nic *sp = ring_data->nic;
7357 struct net_device *dev = (struct net_device *) ring_data->dev;
7358 struct sk_buff *skb = (struct sk_buff *)
7359 ((unsigned long) rxdp->Host_Control);
7360 int ring_no = ring_data->ring_no;
7361 u16 l3_csum, l4_csum;
7362 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7363 struct lro *uninitialized_var(lro);
7364 u8 err_mask;
7366 skb->dev = dev;
7368 if (err) {
7369 /* Check for parity error */
7370 if (err & 0x1) {
7371 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7373 err_mask = err >> 48;
7374 switch(err_mask) {
7375 case 1:
7376 sp->mac_control.stats_info->sw_stat.
7377 rx_parity_err_cnt++;
7378 break;
7380 case 2:
7381 sp->mac_control.stats_info->sw_stat.
7382 rx_abort_cnt++;
7383 break;
7385 case 3:
7386 sp->mac_control.stats_info->sw_stat.
7387 rx_parity_abort_cnt++;
7388 break;
7390 case 4:
7391 sp->mac_control.stats_info->sw_stat.
7392 rx_rda_fail_cnt++;
7393 break;
7395 case 5:
7396 sp->mac_control.stats_info->sw_stat.
7397 rx_unkn_prot_cnt++;
7398 break;
7400 case 6:
7401 sp->mac_control.stats_info->sw_stat.
7402 rx_fcs_err_cnt++;
7403 break;
7405 case 7:
7406 sp->mac_control.stats_info->sw_stat.
7407 rx_buf_size_err_cnt++;
7408 break;
7410 case 8:
7411 sp->mac_control.stats_info->sw_stat.
7412 rx_rxd_corrupt_cnt++;
7413 break;
7415 case 15:
7416 sp->mac_control.stats_info->sw_stat.
7417 rx_unkn_err_cnt++;
7418 break;
7421 * Drop the packet if bad transfer code. Exception being
7422 * 0x5, which could be due to unsupported IPv6 extension header.
7423 * In this case, we let stack handle the packet.
7424 * Note that in this case, since checksum will be incorrect,
7425 * stack will validate the same.
7427 if (err_mask != 0x5) {
7428 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7429 dev->name, err_mask);
7430 dev->stats.rx_crc_errors++;
7431 sp->mac_control.stats_info->sw_stat.mem_freed
7432 += skb->truesize;
7433 dev_kfree_skb(skb);
7434 ring_data->rx_bufs_left -= 1;
7435 rxdp->Host_Control = 0;
7436 return 0;
7440 /* Updating statistics */
7441 ring_data->rx_packets++;
7442 rxdp->Host_Control = 0;
7443 if (sp->rxd_mode == RXD_MODE_1) {
7444 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7446 ring_data->rx_bytes += len;
7447 skb_put(skb, len);
7449 } else if (sp->rxd_mode == RXD_MODE_3B) {
7450 int get_block = ring_data->rx_curr_get_info.block_index;
7451 int get_off = ring_data->rx_curr_get_info.offset;
7452 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7453 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7454 unsigned char *buff = skb_push(skb, buf0_len);
7456 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7457 ring_data->rx_bytes += buf0_len + buf2_len;
7458 memcpy(buff, ba->ba_0, buf0_len);
7459 skb_put(skb, buf2_len);
7462 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7463 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7464 (sp->rx_csum)) {
7465 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7466 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7467 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7469 * NIC verifies if the Checksum of the received
7470 * frame is Ok or not and accordingly returns
7471 * a flag in the RxD.
7473 skb->ip_summed = CHECKSUM_UNNECESSARY;
7474 if (ring_data->lro) {
7475 u32 tcp_len;
7476 u8 *tcp;
7477 int ret = 0;
7479 ret = s2io_club_tcp_session(ring_data,
7480 skb->data, &tcp, &tcp_len, &lro,
7481 rxdp, sp);
7482 switch (ret) {
7483 case 3: /* Begin anew */
7484 lro->parent = skb;
7485 goto aggregate;
7486 case 1: /* Aggregate */
7488 lro_append_pkt(sp, lro,
7489 skb, tcp_len);
7490 goto aggregate;
7492 case 4: /* Flush session */
7494 lro_append_pkt(sp, lro,
7495 skb, tcp_len);
7496 queue_rx_frame(lro->parent,
7497 lro->vlan_tag);
7498 clear_lro_session(lro);
7499 sp->mac_control.stats_info->
7500 sw_stat.flush_max_pkts++;
7501 goto aggregate;
7503 case 2: /* Flush both */
7504 lro->parent->data_len =
7505 lro->frags_len;
7506 sp->mac_control.stats_info->
7507 sw_stat.sending_both++;
7508 queue_rx_frame(lro->parent,
7509 lro->vlan_tag);
7510 clear_lro_session(lro);
7511 goto send_up;
7512 case 0: /* sessions exceeded */
7513 case -1: /* non-TCP or not
7514 * L2 aggregatable
7516 case 5: /*
7517 * First pkt in session not
7518 * L3/L4 aggregatable
7520 break;
7521 default:
7522 DBG_PRINT(ERR_DBG,
7523 "%s: Samadhana!!\n",
7524 __func__);
7525 BUG();
7528 } else {
7530 * Packet with erroneous checksum, let the
7531 * upper layers deal with it.
7533 skb->ip_summed = CHECKSUM_NONE;
7535 } else
7536 skb->ip_summed = CHECKSUM_NONE;
7538 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7539 send_up:
7540 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7541 aggregate:
7542 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7543 return SUCCESS;
7547 * s2io_link - stops/starts the Tx queue.
7548 * @sp : private member of the device structure, which is a pointer to the
7549 * s2io_nic structure.
7550 * @link : inidicates whether link is UP/DOWN.
7551 * Description:
7552 * This function stops/starts the Tx queue depending on whether the link
7553 * status of the NIC is is down or up. This is called by the Alarm
7554 * interrupt handler whenever a link change interrupt comes up.
7555 * Return value:
7556 * void.
7559 static void s2io_link(struct s2io_nic * sp, int link)
7561 struct net_device *dev = (struct net_device *) sp->dev;
7563 if (link != sp->last_link_state) {
7564 init_tti(sp, link);
7565 if (link == LINK_DOWN) {
7566 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7567 s2io_stop_all_tx_queue(sp);
7568 netif_carrier_off(dev);
7569 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7570 sp->mac_control.stats_info->sw_stat.link_up_time =
7571 jiffies - sp->start_time;
7572 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7573 } else {
7574 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7575 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7576 sp->mac_control.stats_info->sw_stat.link_down_time =
7577 jiffies - sp->start_time;
7578 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7579 netif_carrier_on(dev);
7580 s2io_wake_all_tx_queue(sp);
7583 sp->last_link_state = link;
7584 sp->start_time = jiffies;
7588 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7589 * @sp : private member of the device structure, which is a pointer to the
7590 * s2io_nic structure.
7591 * Description:
7592 * This function initializes a few of the PCI and PCI-X configuration registers
7593 * with recommended values.
7594 * Return value:
7595 * void
7598 static void s2io_init_pci(struct s2io_nic * sp)
7600 u16 pci_cmd = 0, pcix_cmd = 0;
7602 /* Enable Data Parity Error Recovery in PCI-X command register. */
7603 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7604 &(pcix_cmd));
7605 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7606 (pcix_cmd | 1));
7607 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7608 &(pcix_cmd));
7610 /* Set the PErr Response bit in PCI command register. */
7611 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7612 pci_write_config_word(sp->pdev, PCI_COMMAND,
7613 (pci_cmd | PCI_COMMAND_PARITY));
7614 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7617 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7618 u8 *dev_multiq)
7620 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7621 (tx_fifo_num < 1)) {
7622 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7623 "(%d) not supported\n", tx_fifo_num);
7625 if (tx_fifo_num < 1)
7626 tx_fifo_num = 1;
7627 else
7628 tx_fifo_num = MAX_TX_FIFOS;
7630 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7631 DBG_PRINT(ERR_DBG, "tx fifos\n");
7634 if (multiq)
7635 *dev_multiq = multiq;
7637 if (tx_steering_type && (1 == tx_fifo_num)) {
7638 if (tx_steering_type != TX_DEFAULT_STEERING)
7639 DBG_PRINT(ERR_DBG,
7640 "s2io: Tx steering is not supported with "
7641 "one fifo. Disabling Tx steering.\n");
7642 tx_steering_type = NO_STEERING;
7645 if ((tx_steering_type < NO_STEERING) ||
7646 (tx_steering_type > TX_DEFAULT_STEERING)) {
7647 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7648 "supported\n");
7649 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7650 tx_steering_type = NO_STEERING;
7653 if (rx_ring_num > MAX_RX_RINGS) {
7654 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7655 "supported\n");
7656 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7657 MAX_RX_RINGS);
7658 rx_ring_num = MAX_RX_RINGS;
7661 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7662 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7663 "Defaulting to INTA\n");
7664 *dev_intr_type = INTA;
7667 if ((*dev_intr_type == MSI_X) &&
7668 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7669 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7670 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7671 "Defaulting to INTA\n");
7672 *dev_intr_type = INTA;
7675 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7676 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7677 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7678 rx_ring_mode = 1;
7680 return SUCCESS;
7684 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7685 * or Traffic class respectively.
7686 * @nic: device private variable
7687 * Description: The function configures the receive steering to
7688 * desired receive ring.
7689 * Return Value: SUCCESS on success and
7690 * '-1' on failure (endian settings incorrect).
7692 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7694 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7695 register u64 val64 = 0;
7697 if (ds_codepoint > 63)
7698 return FAILURE;
7700 val64 = RTS_DS_MEM_DATA(ring);
7701 writeq(val64, &bar0->rts_ds_mem_data);
7703 val64 = RTS_DS_MEM_CTRL_WE |
7704 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7705 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7707 writeq(val64, &bar0->rts_ds_mem_ctrl);
7709 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7710 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7711 S2IO_BIT_RESET);
7714 static const struct net_device_ops s2io_netdev_ops = {
7715 .ndo_open = s2io_open,
7716 .ndo_stop = s2io_close,
7717 .ndo_get_stats = s2io_get_stats,
7718 .ndo_start_xmit = s2io_xmit,
7719 .ndo_validate_addr = eth_validate_addr,
7720 .ndo_set_multicast_list = s2io_set_multicast,
7721 .ndo_do_ioctl = s2io_ioctl,
7722 .ndo_set_mac_address = s2io_set_mac_addr,
7723 .ndo_change_mtu = s2io_change_mtu,
7724 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7725 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7726 .ndo_tx_timeout = s2io_tx_watchdog,
7727 #ifdef CONFIG_NET_POLL_CONTROLLER
7728 .ndo_poll_controller = s2io_netpoll,
7729 #endif
7733 * s2io_init_nic - Initialization of the adapter .
7734 * @pdev : structure containing the PCI related information of the device.
7735 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7736 * Description:
7737 * The function initializes an adapter identified by the pci_dec structure.
7738 * All OS related initialization including memory and device structure and
7739 * initlaization of the device private variable is done. Also the swapper
7740 * control register is initialized to enable read and write into the I/O
7741 * registers of the device.
7742 * Return value:
7743 * returns 0 on success and negative on failure.
7746 static int __devinit
7747 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7749 struct s2io_nic *sp;
7750 struct net_device *dev;
7751 int i, j, ret;
7752 int dma_flag = FALSE;
7753 u32 mac_up, mac_down;
7754 u64 val64 = 0, tmp64 = 0;
7755 struct XENA_dev_config __iomem *bar0 = NULL;
7756 u16 subid;
7757 struct mac_info *mac_control;
7758 struct config_param *config;
7759 int mode;
7760 u8 dev_intr_type = intr_type;
7761 u8 dev_multiq = 0;
7763 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7764 if (ret)
7765 return ret;
7767 if ((ret = pci_enable_device(pdev))) {
7768 DBG_PRINT(ERR_DBG,
7769 "s2io_init_nic: pci_enable_device failed\n");
7770 return ret;
7773 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7774 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7775 dma_flag = TRUE;
7776 if (pci_set_consistent_dma_mask
7777 (pdev, DMA_64BIT_MASK)) {
7778 DBG_PRINT(ERR_DBG,
7779 "Unable to obtain 64bit DMA for \
7780 consistent allocations\n");
7781 pci_disable_device(pdev);
7782 return -ENOMEM;
7784 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7785 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7786 } else {
7787 pci_disable_device(pdev);
7788 return -ENOMEM;
7790 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7791 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7792 pci_disable_device(pdev);
7793 return -ENODEV;
7795 if (dev_multiq)
7796 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7797 else
7798 dev = alloc_etherdev(sizeof(struct s2io_nic));
7799 if (dev == NULL) {
7800 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7801 pci_disable_device(pdev);
7802 pci_release_regions(pdev);
7803 return -ENODEV;
7806 pci_set_master(pdev);
7807 pci_set_drvdata(pdev, dev);
7808 SET_NETDEV_DEV(dev, &pdev->dev);
7810 /* Private member variable initialized to s2io NIC structure */
7811 sp = netdev_priv(dev);
7812 memset(sp, 0, sizeof(struct s2io_nic));
7813 sp->dev = dev;
7814 sp->pdev = pdev;
7815 sp->high_dma_flag = dma_flag;
7816 sp->device_enabled_once = FALSE;
7817 if (rx_ring_mode == 1)
7818 sp->rxd_mode = RXD_MODE_1;
7819 if (rx_ring_mode == 2)
7820 sp->rxd_mode = RXD_MODE_3B;
7822 sp->config.intr_type = dev_intr_type;
7824 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7825 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7826 sp->device_type = XFRAME_II_DEVICE;
7827 else
7828 sp->device_type = XFRAME_I_DEVICE;
7830 sp->lro = lro_enable;
7832 /* Initialize some PCI/PCI-X fields of the NIC. */
7833 s2io_init_pci(sp);
7836 * Setting the device configuration parameters.
7837 * Most of these parameters can be specified by the user during
7838 * module insertion as they are module loadable parameters. If
7839 * these parameters are not not specified during load time, they
7840 * are initialized with default values.
7842 mac_control = &sp->mac_control;
7843 config = &sp->config;
7845 config->napi = napi;
7846 config->tx_steering_type = tx_steering_type;
7848 /* Tx side parameters. */
7849 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7850 config->tx_fifo_num = MAX_TX_FIFOS;
7851 else
7852 config->tx_fifo_num = tx_fifo_num;
7854 /* Initialize the fifos used for tx steering */
7855 if (config->tx_fifo_num < 5) {
7856 if (config->tx_fifo_num == 1)
7857 sp->total_tcp_fifos = 1;
7858 else
7859 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7860 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7861 sp->total_udp_fifos = 1;
7862 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7863 } else {
7864 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7865 FIFO_OTHER_MAX_NUM);
7866 sp->udp_fifo_idx = sp->total_tcp_fifos;
7867 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7868 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7871 config->multiq = dev_multiq;
7872 for (i = 0; i < config->tx_fifo_num; i++) {
7873 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7874 config->tx_cfg[i].fifo_priority = i;
7877 /* mapping the QoS priority to the configured fifos */
7878 for (i = 0; i < MAX_TX_FIFOS; i++)
7879 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7881 /* map the hashing selector table to the configured fifos */
7882 for (i = 0; i < config->tx_fifo_num; i++)
7883 sp->fifo_selector[i] = fifo_selector[i];
7886 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7887 for (i = 0; i < config->tx_fifo_num; i++) {
7888 config->tx_cfg[i].f_no_snoop =
7889 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7890 if (config->tx_cfg[i].fifo_len < 65) {
7891 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7892 break;
7895 /* + 2 because one Txd for skb->data and one Txd for UFO */
7896 config->max_txds = MAX_SKB_FRAGS + 2;
7898 /* Rx side parameters. */
7899 config->rx_ring_num = rx_ring_num;
7900 for (i = 0; i < config->rx_ring_num; i++) {
7901 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7902 (rxd_count[sp->rxd_mode] + 1);
7903 config->rx_cfg[i].ring_priority = i;
7904 mac_control->rings[i].rx_bufs_left = 0;
7905 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7906 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7907 mac_control->rings[i].pdev = sp->pdev;
7908 mac_control->rings[i].dev = sp->dev;
7911 for (i = 0; i < rx_ring_num; i++) {
7912 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7913 config->rx_cfg[i].f_no_snoop =
7914 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7917 /* Setting Mac Control parameters */
7918 mac_control->rmac_pause_time = rmac_pause_time;
7919 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7920 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7923 /* initialize the shared memory used by the NIC and the host */
7924 if (init_shared_mem(sp)) {
7925 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7926 dev->name);
7927 ret = -ENOMEM;
7928 goto mem_alloc_failed;
7931 sp->bar0 = pci_ioremap_bar(pdev, 0);
7932 if (!sp->bar0) {
7933 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7934 dev->name);
7935 ret = -ENOMEM;
7936 goto bar0_remap_failed;
7939 sp->bar1 = pci_ioremap_bar(pdev, 2);
7940 if (!sp->bar1) {
7941 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7942 dev->name);
7943 ret = -ENOMEM;
7944 goto bar1_remap_failed;
7947 dev->irq = pdev->irq;
7948 dev->base_addr = (unsigned long) sp->bar0;
7950 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7951 for (j = 0; j < MAX_TX_FIFOS; j++) {
7952 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7953 (sp->bar1 + (j * 0x00020000));
7956 /* Driver entry points */
7957 dev->netdev_ops = &s2io_netdev_ops;
7958 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7959 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7961 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7962 if (sp->high_dma_flag == TRUE)
7963 dev->features |= NETIF_F_HIGHDMA;
7964 dev->features |= NETIF_F_TSO;
7965 dev->features |= NETIF_F_TSO6;
7966 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7967 dev->features |= NETIF_F_UFO;
7968 dev->features |= NETIF_F_HW_CSUM;
7970 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7971 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7972 INIT_WORK(&sp->set_link_task, s2io_set_link);
7974 pci_save_state(sp->pdev);
7976 /* Setting swapper control on the NIC, for proper reset operation */
7977 if (s2io_set_swapper(sp)) {
7978 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7979 dev->name);
7980 ret = -EAGAIN;
7981 goto set_swap_failed;
7984 /* Verify if the Herc works on the slot its placed into */
7985 if (sp->device_type & XFRAME_II_DEVICE) {
7986 mode = s2io_verify_pci_mode(sp);
7987 if (mode < 0) {
7988 DBG_PRINT(ERR_DBG, "%s: ", __func__);
7989 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7990 ret = -EBADSLT;
7991 goto set_swap_failed;
7995 if (sp->config.intr_type == MSI_X) {
7996 sp->num_entries = config->rx_ring_num + 1;
7997 ret = s2io_enable_msi_x(sp);
7999 if (!ret) {
8000 ret = s2io_test_msi(sp);
8001 /* rollback MSI-X, will re-enable during add_isr() */
8002 remove_msix_isr(sp);
8004 if (ret) {
8006 DBG_PRINT(ERR_DBG,
8007 "%s: MSI-X requested but failed to enable\n",
8008 dev->name);
8009 sp->config.intr_type = INTA;
8013 if (config->intr_type == MSI_X) {
8014 for (i = 0; i < config->rx_ring_num ; i++)
8015 netif_napi_add(dev, &mac_control->rings[i].napi,
8016 s2io_poll_msix, 64);
8017 } else {
8018 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8021 /* Not needed for Herc */
8022 if (sp->device_type & XFRAME_I_DEVICE) {
8024 * Fix for all "FFs" MAC address problems observed on
8025 * Alpha platforms
8027 fix_mac_address(sp);
8028 s2io_reset(sp);
8032 * MAC address initialization.
8033 * For now only one mac address will be read and used.
8035 bar0 = sp->bar0;
8036 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8037 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8038 writeq(val64, &bar0->rmac_addr_cmd_mem);
8039 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8040 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8041 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8042 mac_down = (u32) tmp64;
8043 mac_up = (u32) (tmp64 >> 32);
8045 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8046 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8047 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8048 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8049 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8050 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8052 /* Set the factory defined MAC address initially */
8053 dev->addr_len = ETH_ALEN;
8054 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8055 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8057 /* initialize number of multicast & unicast MAC entries variables */
8058 if (sp->device_type == XFRAME_I_DEVICE) {
8059 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8060 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8061 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8062 } else if (sp->device_type == XFRAME_II_DEVICE) {
8063 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8064 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8065 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8068 /* store mac addresses from CAM to s2io_nic structure */
8069 do_s2io_store_unicast_mc(sp);
8071 /* Configure MSIX vector for number of rings configured plus one */
8072 if ((sp->device_type == XFRAME_II_DEVICE) &&
8073 (config->intr_type == MSI_X))
8074 sp->num_entries = config->rx_ring_num + 1;
8076 /* Store the values of the MSIX table in the s2io_nic structure */
8077 store_xmsi_data(sp);
8078 /* reset Nic and bring it to known state */
8079 s2io_reset(sp);
8082 * Initialize link state flags
8083 * and the card state parameter
8085 sp->state = 0;
8087 /* Initialize spinlocks */
8088 for (i = 0; i < sp->config.tx_fifo_num; i++)
8089 spin_lock_init(&mac_control->fifos[i].tx_lock);
8092 * SXE-002: Configure link and activity LED to init state
8093 * on driver load.
8095 subid = sp->pdev->subsystem_device;
8096 if ((subid & 0xFF) >= 0x07) {
8097 val64 = readq(&bar0->gpio_control);
8098 val64 |= 0x0000800000000000ULL;
8099 writeq(val64, &bar0->gpio_control);
8100 val64 = 0x0411040400000000ULL;
8101 writeq(val64, (void __iomem *) bar0 + 0x2700);
8102 val64 = readq(&bar0->gpio_control);
8105 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8107 if (register_netdev(dev)) {
8108 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8109 ret = -ENODEV;
8110 goto register_failed;
8112 s2io_vpd_read(sp);
8113 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8114 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8115 sp->product_name, pdev->revision);
8116 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8117 s2io_driver_version);
8118 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %pM\n", dev->name, dev->dev_addr);
8119 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8120 if (sp->device_type & XFRAME_II_DEVICE) {
8121 mode = s2io_print_pci_mode(sp);
8122 if (mode < 0) {
8123 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8124 ret = -EBADSLT;
8125 unregister_netdev(dev);
8126 goto set_swap_failed;
8129 switch(sp->rxd_mode) {
8130 case RXD_MODE_1:
8131 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8132 dev->name);
8133 break;
8134 case RXD_MODE_3B:
8135 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8136 dev->name);
8137 break;
8140 switch (sp->config.napi) {
8141 case 0:
8142 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8143 break;
8144 case 1:
8145 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8146 break;
8149 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8150 sp->config.tx_fifo_num);
8152 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8153 sp->config.rx_ring_num);
8155 switch(sp->config.intr_type) {
8156 case INTA:
8157 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8158 break;
8159 case MSI_X:
8160 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8161 break;
8163 if (sp->config.multiq) {
8164 for (i = 0; i < sp->config.tx_fifo_num; i++)
8165 mac_control->fifos[i].multiq = config->multiq;
8166 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8167 dev->name);
8168 } else
8169 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8170 dev->name);
8172 switch (sp->config.tx_steering_type) {
8173 case NO_STEERING:
8174 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8175 " transmit\n", dev->name);
8176 break;
8177 case TX_PRIORITY_STEERING:
8178 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8179 " transmit\n", dev->name);
8180 break;
8181 case TX_DEFAULT_STEERING:
8182 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8183 " transmit\n", dev->name);
8186 if (sp->lro)
8187 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8188 dev->name);
8189 if (ufo)
8190 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8191 " enabled\n", dev->name);
8192 /* Initialize device name */
8193 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8195 if (vlan_tag_strip)
8196 sp->vlan_strip_flag = 1;
8197 else
8198 sp->vlan_strip_flag = 0;
8201 * Make Link state as off at this point, when the Link change
8202 * interrupt comes the state will be automatically changed to
8203 * the right state.
8205 netif_carrier_off(dev);
8207 return 0;
8209 register_failed:
8210 set_swap_failed:
8211 iounmap(sp->bar1);
8212 bar1_remap_failed:
8213 iounmap(sp->bar0);
8214 bar0_remap_failed:
8215 mem_alloc_failed:
8216 free_shared_mem(sp);
8217 pci_disable_device(pdev);
8218 pci_release_regions(pdev);
8219 pci_set_drvdata(pdev, NULL);
8220 free_netdev(dev);
8222 return ret;
8226 * s2io_rem_nic - Free the PCI device
8227 * @pdev: structure containing the PCI related information of the device.
8228 * Description: This function is called by the Pci subsystem to release a
8229 * PCI device and free up all resource held up by the device. This could
8230 * be in response to a Hot plug event or when the driver is to be removed
8231 * from memory.
8234 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8236 struct net_device *dev =
8237 (struct net_device *) pci_get_drvdata(pdev);
8238 struct s2io_nic *sp;
8240 if (dev == NULL) {
8241 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8242 return;
8245 flush_scheduled_work();
8247 sp = netdev_priv(dev);
8248 unregister_netdev(dev);
8250 free_shared_mem(sp);
8251 iounmap(sp->bar0);
8252 iounmap(sp->bar1);
8253 pci_release_regions(pdev);
8254 pci_set_drvdata(pdev, NULL);
8255 free_netdev(dev);
8256 pci_disable_device(pdev);
8260 * s2io_starter - Entry point for the driver
8261 * Description: This function is the entry point for the driver. It verifies
8262 * the module loadable parameters and initializes PCI configuration space.
8265 static int __init s2io_starter(void)
8267 return pci_register_driver(&s2io_driver);
8271 * s2io_closer - Cleanup routine for the driver
8272 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8275 static __exit void s2io_closer(void)
8277 pci_unregister_driver(&s2io_driver);
8278 DBG_PRINT(INIT_DBG, "cleanup done\n");
8281 module_init(s2io_starter);
8282 module_exit(s2io_closer);
8284 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8285 struct tcphdr **tcp, struct RxD_t *rxdp,
8286 struct s2io_nic *sp)
8288 int ip_off;
8289 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8291 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8292 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8293 __func__);
8294 return -1;
8297 /* Checking for DIX type or DIX type with VLAN */
8298 if ((l2_type == 0)
8299 || (l2_type == 4)) {
8300 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8302 * If vlan stripping is disabled and the frame is VLAN tagged,
8303 * shift the offset by the VLAN header size bytes.
8305 if ((!sp->vlan_strip_flag) &&
8306 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8307 ip_off += HEADER_VLAN_SIZE;
8308 } else {
8309 /* LLC, SNAP etc are considered non-mergeable */
8310 return -1;
8313 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8314 ip_len = (u8)((*ip)->ihl);
8315 ip_len <<= 2;
8316 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8318 return 0;
8321 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8322 struct tcphdr *tcp)
8324 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8325 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8326 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8327 return -1;
8328 return 0;
8331 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8333 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8336 static void initiate_new_session(struct lro *lro, u8 *l2h,
8337 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8339 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8340 lro->l2h = l2h;
8341 lro->iph = ip;
8342 lro->tcph = tcp;
8343 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8344 lro->tcp_ack = tcp->ack_seq;
8345 lro->sg_num = 1;
8346 lro->total_len = ntohs(ip->tot_len);
8347 lro->frags_len = 0;
8348 lro->vlan_tag = vlan_tag;
8350 * check if we saw TCP timestamp. Other consistency checks have
8351 * already been done.
8353 if (tcp->doff == 8) {
8354 __be32 *ptr;
8355 ptr = (__be32 *)(tcp+1);
8356 lro->saw_ts = 1;
8357 lro->cur_tsval = ntohl(*(ptr+1));
8358 lro->cur_tsecr = *(ptr+2);
8360 lro->in_use = 1;
8363 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8365 struct iphdr *ip = lro->iph;
8366 struct tcphdr *tcp = lro->tcph;
8367 __sum16 nchk;
8368 struct stat_block *statinfo = sp->mac_control.stats_info;
8369 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8371 /* Update L3 header */
8372 ip->tot_len = htons(lro->total_len);
8373 ip->check = 0;
8374 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8375 ip->check = nchk;
8377 /* Update L4 header */
8378 tcp->ack_seq = lro->tcp_ack;
8379 tcp->window = lro->window;
8381 /* Update tsecr field if this session has timestamps enabled */
8382 if (lro->saw_ts) {
8383 __be32 *ptr = (__be32 *)(tcp + 1);
8384 *(ptr+2) = lro->cur_tsecr;
8387 /* Update counters required for calculation of
8388 * average no. of packets aggregated.
8390 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8391 statinfo->sw_stat.num_aggregations++;
8394 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8395 struct tcphdr *tcp, u32 l4_pyld)
8397 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8398 lro->total_len += l4_pyld;
8399 lro->frags_len += l4_pyld;
8400 lro->tcp_next_seq += l4_pyld;
8401 lro->sg_num++;
8403 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8404 lro->tcp_ack = tcp->ack_seq;
8405 lro->window = tcp->window;
8407 if (lro->saw_ts) {
8408 __be32 *ptr;
8409 /* Update tsecr and tsval from this packet */
8410 ptr = (__be32 *)(tcp+1);
8411 lro->cur_tsval = ntohl(*(ptr+1));
8412 lro->cur_tsecr = *(ptr + 2);
8416 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8417 struct tcphdr *tcp, u32 tcp_pyld_len)
8419 u8 *ptr;
8421 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8423 if (!tcp_pyld_len) {
8424 /* Runt frame or a pure ack */
8425 return -1;
8428 if (ip->ihl != 5) /* IP has options */
8429 return -1;
8431 /* If we see CE codepoint in IP header, packet is not mergeable */
8432 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8433 return -1;
8435 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8436 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8437 tcp->ece || tcp->cwr || !tcp->ack) {
8439 * Currently recognize only the ack control word and
8440 * any other control field being set would result in
8441 * flushing the LRO session
8443 return -1;
8447 * Allow only one TCP timestamp option. Don't aggregate if
8448 * any other options are detected.
8450 if (tcp->doff != 5 && tcp->doff != 8)
8451 return -1;
8453 if (tcp->doff == 8) {
8454 ptr = (u8 *)(tcp + 1);
8455 while (*ptr == TCPOPT_NOP)
8456 ptr++;
8457 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8458 return -1;
8460 /* Ensure timestamp value increases monotonically */
8461 if (l_lro)
8462 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8463 return -1;
8465 /* timestamp echo reply should be non-zero */
8466 if (*((__be32 *)(ptr+6)) == 0)
8467 return -1;
8470 return 0;
8473 static int
8474 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8475 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8476 struct s2io_nic *sp)
8478 struct iphdr *ip;
8479 struct tcphdr *tcph;
8480 int ret = 0, i;
8481 u16 vlan_tag = 0;
8483 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8484 rxdp, sp))) {
8485 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8486 ip->saddr, ip->daddr);
8487 } else
8488 return ret;
8490 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8491 tcph = (struct tcphdr *)*tcp;
8492 *tcp_len = get_l4_pyld_length(ip, tcph);
8493 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8494 struct lro *l_lro = &ring_data->lro0_n[i];
8495 if (l_lro->in_use) {
8496 if (check_for_socket_match(l_lro, ip, tcph))
8497 continue;
8498 /* Sock pair matched */
8499 *lro = l_lro;
8501 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8502 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8503 "0x%x, actual 0x%x\n", __func__,
8504 (*lro)->tcp_next_seq,
8505 ntohl(tcph->seq));
8507 sp->mac_control.stats_info->
8508 sw_stat.outof_sequence_pkts++;
8509 ret = 2;
8510 break;
8513 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8514 ret = 1; /* Aggregate */
8515 else
8516 ret = 2; /* Flush both */
8517 break;
8521 if (ret == 0) {
8522 /* Before searching for available LRO objects,
8523 * check if the pkt is L3/L4 aggregatable. If not
8524 * don't create new LRO session. Just send this
8525 * packet up.
8527 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8528 return 5;
8531 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8532 struct lro *l_lro = &ring_data->lro0_n[i];
8533 if (!(l_lro->in_use)) {
8534 *lro = l_lro;
8535 ret = 3; /* Begin anew */
8536 break;
8541 if (ret == 0) { /* sessions exceeded */
8542 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8543 __func__);
8544 *lro = NULL;
8545 return ret;
8548 switch (ret) {
8549 case 3:
8550 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8551 vlan_tag);
8552 break;
8553 case 2:
8554 update_L3L4_header(sp, *lro);
8555 break;
8556 case 1:
8557 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8558 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8559 update_L3L4_header(sp, *lro);
8560 ret = 4; /* Flush the LRO */
8562 break;
8563 default:
8564 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8565 __func__);
8566 break;
8569 return ret;
8572 static void clear_lro_session(struct lro *lro)
8574 static u16 lro_struct_size = sizeof(struct lro);
8576 memset(lro, 0, lro_struct_size);
8579 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8581 struct net_device *dev = skb->dev;
8582 struct s2io_nic *sp = netdev_priv(dev);
8584 skb->protocol = eth_type_trans(skb, dev);
8585 if (sp->vlgrp && vlan_tag
8586 && (sp->vlan_strip_flag)) {
8587 /* Queueing the vlan frame to the upper layer */
8588 if (sp->config.napi)
8589 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8590 else
8591 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8592 } else {
8593 if (sp->config.napi)
8594 netif_receive_skb(skb);
8595 else
8596 netif_rx(skb);
8600 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8601 struct sk_buff *skb,
8602 u32 tcp_len)
8604 struct sk_buff *first = lro->parent;
8606 first->len += tcp_len;
8607 first->data_len = lro->frags_len;
8608 skb_pull(skb, (skb->len - tcp_len));
8609 if (skb_shinfo(first)->frag_list)
8610 lro->last_frag->next = skb;
8611 else
8612 skb_shinfo(first)->frag_list = skb;
8613 first->truesize += skb->truesize;
8614 lro->last_frag = skb;
8615 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8616 return;
8620 * s2io_io_error_detected - called when PCI error is detected
8621 * @pdev: Pointer to PCI device
8622 * @state: The current pci connection state
8624 * This function is called after a PCI bus error affecting
8625 * this device has been detected.
8627 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8628 pci_channel_state_t state)
8630 struct net_device *netdev = pci_get_drvdata(pdev);
8631 struct s2io_nic *sp = netdev_priv(netdev);
8633 netif_device_detach(netdev);
8635 if (netif_running(netdev)) {
8636 /* Bring down the card, while avoiding PCI I/O */
8637 do_s2io_card_down(sp, 0);
8639 pci_disable_device(pdev);
8641 return PCI_ERS_RESULT_NEED_RESET;
8645 * s2io_io_slot_reset - called after the pci bus has been reset.
8646 * @pdev: Pointer to PCI device
8648 * Restart the card from scratch, as if from a cold-boot.
8649 * At this point, the card has exprienced a hard reset,
8650 * followed by fixups by BIOS, and has its config space
8651 * set up identically to what it was at cold boot.
8653 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8655 struct net_device *netdev = pci_get_drvdata(pdev);
8656 struct s2io_nic *sp = netdev_priv(netdev);
8658 if (pci_enable_device(pdev)) {
8659 printk(KERN_ERR "s2io: "
8660 "Cannot re-enable PCI device after reset.\n");
8661 return PCI_ERS_RESULT_DISCONNECT;
8664 pci_set_master(pdev);
8665 s2io_reset(sp);
8667 return PCI_ERS_RESULT_RECOVERED;
8671 * s2io_io_resume - called when traffic can start flowing again.
8672 * @pdev: Pointer to PCI device
8674 * This callback is called when the error recovery driver tells
8675 * us that its OK to resume normal operation.
8677 static void s2io_io_resume(struct pci_dev *pdev)
8679 struct net_device *netdev = pci_get_drvdata(pdev);
8680 struct s2io_nic *sp = netdev_priv(netdev);
8682 if (netif_running(netdev)) {
8683 if (s2io_card_up(sp)) {
8684 printk(KERN_ERR "s2io: "
8685 "Can't bring device back up after reset.\n");
8686 return;
8689 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8690 s2io_card_down(sp);
8691 printk(KERN_ERR "s2io: "
8692 "Can't resetore mac addr after reset.\n");
8693 return;
8697 netif_device_attach(netdev);
8698 netif_tx_wake_all_queues(netdev);