ethtool: allow custom interval for physical identification
[linux-2.6/libata-dev.git] / drivers / net / s2io.c
blob2302d9743744124f95424e6547033a5c3f4207e3
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explanation of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
53 ************************************************************************/
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <net/tcp.h>
83 #include <asm/system.h>
84 #include <asm/div64.h>
85 #include <asm/irq.h>
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
91 #define DRV_VERSION "2.0.26.28"
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 int ret;
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 return ret;
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
147 {"tmac_any_err_frms"},
148 {"tmac_ttl_less_fb_octets"},
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
163 {"rmac_out_rng_len_err_frms"},
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
170 {"rmac_discarded_frms"},
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
209 {"rmac_pause_cnt"},
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
231 {"rxf_wr_cnt"}
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
250 {"link_fault_cnt"}
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
341 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
347 /* copy mac addr to def_mac_addr array */
348 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
358 /* Add the vlan */
359 static void s2io_vlan_rx_register(struct net_device *dev,
360 struct vlan_group *grp)
362 int i;
363 struct s2io_nic *nic = netdev_priv(dev);
364 unsigned long flags[MAX_TX_FIFOS];
365 struct config_param *config = &nic->config;
366 struct mac_info *mac_control = &nic->mac_control;
368 for (i = 0; i < config->tx_fifo_num; i++) {
369 struct fifo_info *fifo = &mac_control->fifos[i];
371 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
374 nic->vlgrp = grp;
376 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
377 struct fifo_info *fifo = &mac_control->fifos[i];
379 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
383 /* Unregister the vlan */
384 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
386 int i;
387 struct s2io_nic *nic = netdev_priv(dev);
388 unsigned long flags[MAX_TX_FIFOS];
389 struct config_param *config = &nic->config;
390 struct mac_info *mac_control = &nic->mac_control;
392 for (i = 0; i < config->tx_fifo_num; i++) {
393 struct fifo_info *fifo = &mac_control->fifos[i];
395 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
398 if (nic->vlgrp)
399 vlan_group_set_device(nic->vlgrp, vid, NULL);
401 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
402 struct fifo_info *fifo = &mac_control->fifos[i];
404 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
409 * Constants to be programmed into the Xena's registers, to configure
410 * the XAUI.
413 #define END_SIGN 0x0
414 static const u64 herc_act_dtx_cfg[] = {
415 /* Set address */
416 0x8000051536750000ULL, 0x80000515367500E0ULL,
417 /* Write data */
418 0x8000051536750004ULL, 0x80000515367500E4ULL,
419 /* Set address */
420 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
421 /* Write data */
422 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
423 /* Set address */
424 0x801205150D440000ULL, 0x801205150D4400E0ULL,
425 /* Write data */
426 0x801205150D440004ULL, 0x801205150D4400E4ULL,
427 /* Set address */
428 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
431 /* Done */
432 END_SIGN
435 static const u64 xena_dtx_cfg[] = {
436 /* Set address */
437 0x8000051500000000ULL, 0x80000515000000E0ULL,
438 /* Write data */
439 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
440 /* Set address */
441 0x8001051500000000ULL, 0x80010515000000E0ULL,
442 /* Write data */
443 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
444 /* Set address */
445 0x8002051500000000ULL, 0x80020515000000E0ULL,
446 /* Write data */
447 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
448 END_SIGN
452 * Constants for Fixing the MacAddress problem seen mostly on
453 * Alpha machines.
455 static const u64 fix_mac[] = {
456 0x0060000000000000ULL, 0x0060600000000000ULL,
457 0x0040600000000000ULL, 0x0000600000000000ULL,
458 0x0020600000000000ULL, 0x0060600000000000ULL,
459 0x0020600000000000ULL, 0x0060600000000000ULL,
460 0x0020600000000000ULL, 0x0060600000000000ULL,
461 0x0020600000000000ULL, 0x0060600000000000ULL,
462 0x0020600000000000ULL, 0x0060600000000000ULL,
463 0x0020600000000000ULL, 0x0060600000000000ULL,
464 0x0020600000000000ULL, 0x0060600000000000ULL,
465 0x0020600000000000ULL, 0x0060600000000000ULL,
466 0x0020600000000000ULL, 0x0060600000000000ULL,
467 0x0020600000000000ULL, 0x0060600000000000ULL,
468 0x0020600000000000ULL, 0x0000600000000000ULL,
469 0x0040600000000000ULL, 0x0060600000000000ULL,
470 END_SIGN
473 MODULE_LICENSE("GPL");
474 MODULE_VERSION(DRV_VERSION);
477 /* Module Loadable parameters. */
478 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
479 S2IO_PARM_INT(rx_ring_num, 1);
480 S2IO_PARM_INT(multiq, 0);
481 S2IO_PARM_INT(rx_ring_mode, 1);
482 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
483 S2IO_PARM_INT(rmac_pause_time, 0x100);
484 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
485 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
486 S2IO_PARM_INT(shared_splits, 0);
487 S2IO_PARM_INT(tmac_util_period, 5);
488 S2IO_PARM_INT(rmac_util_period, 5);
489 S2IO_PARM_INT(l3l4hdr_size, 128);
490 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
491 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
492 /* Frequency of Rx desc syncs expressed as power of 2 */
493 S2IO_PARM_INT(rxsync_frequency, 3);
494 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
495 S2IO_PARM_INT(intr_type, 2);
496 /* Large receive offload feature */
498 /* Max pkts to be aggregated by LRO at one time. If not specified,
499 * aggregation happens until we hit max IP pkt size(64K)
501 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
502 S2IO_PARM_INT(indicate_max_pkts, 0);
504 S2IO_PARM_INT(napi, 1);
505 S2IO_PARM_INT(ufo, 0);
506 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
508 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
509 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
510 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
511 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
512 static unsigned int rts_frm_len[MAX_RX_RINGS] =
513 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
515 module_param_array(tx_fifo_len, uint, NULL, 0);
516 module_param_array(rx_ring_sz, uint, NULL, 0);
517 module_param_array(rts_frm_len, uint, NULL, 0);
520 * S2IO device table.
521 * This table lists all the devices that this driver supports.
523 static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
524 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
525 PCI_ANY_ID, PCI_ANY_ID},
526 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
527 PCI_ANY_ID, PCI_ANY_ID},
528 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
529 PCI_ANY_ID, PCI_ANY_ID},
530 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
531 PCI_ANY_ID, PCI_ANY_ID},
532 {0,}
535 MODULE_DEVICE_TABLE(pci, s2io_tbl);
537 static struct pci_error_handlers s2io_err_handler = {
538 .error_detected = s2io_io_error_detected,
539 .slot_reset = s2io_io_slot_reset,
540 .resume = s2io_io_resume,
543 static struct pci_driver s2io_driver = {
544 .name = "S2IO",
545 .id_table = s2io_tbl,
546 .probe = s2io_init_nic,
547 .remove = __devexit_p(s2io_rem_nic),
548 .err_handler = &s2io_err_handler,
551 /* A simplifier macro used both by init and free shared_mem Fns(). */
552 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
554 /* netqueue manipulation helper functions */
555 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
557 if (!sp->config.multiq) {
558 int i;
560 for (i = 0; i < sp->config.tx_fifo_num; i++)
561 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
563 netif_tx_stop_all_queues(sp->dev);
566 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
568 if (!sp->config.multiq)
569 sp->mac_control.fifos[fifo_no].queue_state =
570 FIFO_QUEUE_STOP;
572 netif_tx_stop_all_queues(sp->dev);
575 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 if (!sp->config.multiq) {
578 int i;
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
583 netif_tx_start_all_queues(sp->dev);
586 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
588 if (!sp->config.multiq)
589 sp->mac_control.fifos[fifo_no].queue_state =
590 FIFO_QUEUE_START;
592 netif_tx_start_all_queues(sp->dev);
595 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
597 if (!sp->config.multiq) {
598 int i;
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_tx_wake_all_queues(sp->dev);
606 static inline void s2io_wake_tx_queue(
607 struct fifo_info *fifo, int cnt, u8 multiq)
610 if (multiq) {
611 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
612 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
613 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
614 if (netif_queue_stopped(fifo->dev)) {
615 fifo->queue_state = FIFO_QUEUE_START;
616 netif_wake_queue(fifo->dev);
622 * init_shared_mem - Allocation and Initialization of Memory
623 * @nic: Device private variable.
624 * Description: The function allocates all the memory areas shared
625 * between the NIC and the driver. This includes Tx descriptors,
626 * Rx descriptors and the statistics block.
629 static int init_shared_mem(struct s2io_nic *nic)
631 u32 size;
632 void *tmp_v_addr, *tmp_v_addr_next;
633 dma_addr_t tmp_p_addr, tmp_p_addr_next;
634 struct RxD_block *pre_rxd_blk = NULL;
635 int i, j, blk_cnt;
636 int lst_size, lst_per_page;
637 struct net_device *dev = nic->dev;
638 unsigned long tmp;
639 struct buffAdd *ba;
640 struct config_param *config = &nic->config;
641 struct mac_info *mac_control = &nic->mac_control;
642 unsigned long long mem_allocated = 0;
644 /* Allocation and initialization of TXDLs in FIFOs */
645 size = 0;
646 for (i = 0; i < config->tx_fifo_num; i++) {
647 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
649 size += tx_cfg->fifo_len;
651 if (size > MAX_AVAILABLE_TXDS) {
652 DBG_PRINT(ERR_DBG,
653 "Too many TxDs requested: %d, max supported: %d\n",
654 size, MAX_AVAILABLE_TXDS);
655 return -EINVAL;
658 size = 0;
659 for (i = 0; i < config->tx_fifo_num; i++) {
660 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
662 size = tx_cfg->fifo_len;
664 * Legal values are from 2 to 8192
666 if (size < 2) {
667 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
668 "Valid lengths are 2 through 8192\n",
669 i, size);
670 return -EINVAL;
674 lst_size = (sizeof(struct TxD) * config->max_txds);
675 lst_per_page = PAGE_SIZE / lst_size;
677 for (i = 0; i < config->tx_fifo_num; i++) {
678 struct fifo_info *fifo = &mac_control->fifos[i];
679 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
680 int fifo_len = tx_cfg->fifo_len;
681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
683 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
684 if (!fifo->list_info) {
685 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
686 return -ENOMEM;
688 mem_allocated += list_holder_size;
690 for (i = 0; i < config->tx_fifo_num; i++) {
691 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
692 lst_per_page);
693 struct fifo_info *fifo = &mac_control->fifos[i];
694 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
696 fifo->tx_curr_put_info.offset = 0;
697 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
698 fifo->tx_curr_get_info.offset = 0;
699 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
700 fifo->fifo_no = i;
701 fifo->nic = nic;
702 fifo->max_txds = MAX_SKB_FRAGS + 2;
703 fifo->dev = dev;
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
712 DBG_PRINT(INFO_DBG,
713 "pci_alloc_consistent failed for TxDL\n");
714 return -ENOMEM;
716 /* If we got a zero DMA address(can happen on
717 * certain platforms like PPC), reallocate.
718 * Store virtual address of page we don't want,
719 * to be freed later.
721 if (!tmp_p) {
722 mac_control->zerodma_virt_addr = tmp_v;
723 DBG_PRINT(INIT_DBG,
724 "%s: Zero DMA address for TxDL. "
725 "Virtual address %p\n",
726 dev->name, tmp_v);
727 tmp_v = pci_alloc_consistent(nic->pdev,
728 PAGE_SIZE, &tmp_p);
729 if (!tmp_v) {
730 DBG_PRINT(INFO_DBG,
731 "pci_alloc_consistent failed for TxDL\n");
732 return -ENOMEM;
734 mem_allocated += PAGE_SIZE;
736 while (k < lst_per_page) {
737 int l = (j * lst_per_page) + k;
738 if (l == tx_cfg->fifo_len)
739 break;
740 fifo->list_info[l].list_virt_addr =
741 tmp_v + (k * lst_size);
742 fifo->list_info[l].list_phy_addr =
743 tmp_p + (k * lst_size);
744 k++;
749 for (i = 0; i < config->tx_fifo_num; i++) {
750 struct fifo_info *fifo = &mac_control->fifos[i];
751 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
753 size = tx_cfg->fifo_len;
754 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!fifo->ufo_in_band_v)
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
763 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
764 struct ring_info *ring = &mac_control->rings[i];
766 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
767 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
768 "multiple of RxDs per Block\n",
769 dev->name, i);
770 return FAILURE;
772 size += rx_cfg->num_rxd;
773 ring->block_count = rx_cfg->num_rxd /
774 (rxd_count[nic->rxd_mode] + 1);
775 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
777 if (nic->rxd_mode == RXD_MODE_1)
778 size = (size * (sizeof(struct RxD1)));
779 else
780 size = (size * (sizeof(struct RxD3)));
782 for (i = 0; i < config->rx_ring_num; i++) {
783 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
784 struct ring_info *ring = &mac_control->rings[i];
786 ring->rx_curr_get_info.block_index = 0;
787 ring->rx_curr_get_info.offset = 0;
788 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
789 ring->rx_curr_put_info.block_index = 0;
790 ring->rx_curr_put_info.offset = 0;
791 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
792 ring->nic = nic;
793 ring->ring_no = i;
795 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
796 /* Allocating all the Rx blocks */
797 for (j = 0; j < blk_cnt; j++) {
798 struct rx_block_info *rx_blocks;
799 int l;
801 rx_blocks = &ring->rx_blocks[j];
802 size = SIZE_OF_BLOCK; /* size is always page size */
803 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
804 &tmp_p_addr);
805 if (tmp_v_addr == NULL) {
807 * In case of failure, free_shared_mem()
808 * is called, which should free any
809 * memory that was alloced till the
810 * failure happened.
812 rx_blocks->block_virt_addr = tmp_v_addr;
813 return -ENOMEM;
815 mem_allocated += size;
816 memset(tmp_v_addr, 0, size);
818 size = sizeof(struct rxd_info) *
819 rxd_count[nic->rxd_mode];
820 rx_blocks->block_virt_addr = tmp_v_addr;
821 rx_blocks->block_dma_addr = tmp_p_addr;
822 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
823 if (!rx_blocks->rxds)
824 return -ENOMEM;
825 mem_allocated += size;
826 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
827 rx_blocks->rxds[l].virt_addr =
828 rx_blocks->block_virt_addr +
829 (rxd_size[nic->rxd_mode] * l);
830 rx_blocks->rxds[l].dma_addr =
831 rx_blocks->block_dma_addr +
832 (rxd_size[nic->rxd_mode] * l);
835 /* Interlinking all Rx Blocks */
836 for (j = 0; j < blk_cnt; j++) {
837 int next = (j + 1) % blk_cnt;
838 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
839 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
840 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
841 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
843 pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
844 pre_rxd_blk->reserved_2_pNext_RxD_block =
845 (unsigned long)tmp_v_addr_next;
846 pre_rxd_blk->pNext_RxD_Blk_physical =
847 (u64)tmp_p_addr_next;
850 if (nic->rxd_mode == RXD_MODE_3B) {
852 * Allocation of Storages for buffer addresses in 2BUFF mode
853 * and the buffers as well.
855 for (i = 0; i < config->rx_ring_num; i++) {
856 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
857 struct ring_info *ring = &mac_control->rings[i];
859 blk_cnt = rx_cfg->num_rxd /
860 (rxd_count[nic->rxd_mode] + 1);
861 size = sizeof(struct buffAdd *) * blk_cnt;
862 ring->ba = kmalloc(size, GFP_KERNEL);
863 if (!ring->ba)
864 return -ENOMEM;
865 mem_allocated += size;
866 for (j = 0; j < blk_cnt; j++) {
867 int k = 0;
869 size = sizeof(struct buffAdd) *
870 (rxd_count[nic->rxd_mode] + 1);
871 ring->ba[j] = kmalloc(size, GFP_KERNEL);
872 if (!ring->ba[j])
873 return -ENOMEM;
874 mem_allocated += size;
875 while (k != rxd_count[nic->rxd_mode]) {
876 ba = &ring->ba[j][k];
877 size = BUF0_LEN + ALIGN_SIZE;
878 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
879 if (!ba->ba_0_org)
880 return -ENOMEM;
881 mem_allocated += size;
882 tmp = (unsigned long)ba->ba_0_org;
883 tmp += ALIGN_SIZE;
884 tmp &= ~((unsigned long)ALIGN_SIZE);
885 ba->ba_0 = (void *)tmp;
887 size = BUF1_LEN + ALIGN_SIZE;
888 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
889 if (!ba->ba_1_org)
890 return -ENOMEM;
891 mem_allocated += size;
892 tmp = (unsigned long)ba->ba_1_org;
893 tmp += ALIGN_SIZE;
894 tmp &= ~((unsigned long)ALIGN_SIZE);
895 ba->ba_1 = (void *)tmp;
896 k++;
902 /* Allocation and initialization of Statistics block */
903 size = sizeof(struct stat_block);
904 mac_control->stats_mem =
905 pci_alloc_consistent(nic->pdev, size,
906 &mac_control->stats_mem_phy);
908 if (!mac_control->stats_mem) {
910 * In case of failure, free_shared_mem() is called, which
911 * should free any memory that was alloced till the
912 * failure happened.
914 return -ENOMEM;
916 mem_allocated += size;
917 mac_control->stats_mem_sz = size;
919 tmp_v_addr = mac_control->stats_mem;
920 mac_control->stats_info = (struct stat_block *)tmp_v_addr;
921 memset(tmp_v_addr, 0, size);
922 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
923 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
924 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
925 return SUCCESS;
929 * free_shared_mem - Free the allocated Memory
930 * @nic: Device private variable.
931 * Description: This function is to free all memory locations allocated by
932 * the init_shared_mem() function and return it to the kernel.
935 static void free_shared_mem(struct s2io_nic *nic)
937 int i, j, blk_cnt, size;
938 void *tmp_v_addr;
939 dma_addr_t tmp_p_addr;
940 int lst_size, lst_per_page;
941 struct net_device *dev;
942 int page_num = 0;
943 struct config_param *config;
944 struct mac_info *mac_control;
945 struct stat_block *stats;
946 struct swStat *swstats;
948 if (!nic)
949 return;
951 dev = nic->dev;
953 config = &nic->config;
954 mac_control = &nic->mac_control;
955 stats = mac_control->stats_info;
956 swstats = &stats->sw_stat;
958 lst_size = sizeof(struct TxD) * config->max_txds;
959 lst_per_page = PAGE_SIZE / lst_size;
961 for (i = 0; i < config->tx_fifo_num; i++) {
962 struct fifo_info *fifo = &mac_control->fifos[i];
963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
965 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
966 for (j = 0; j < page_num; j++) {
967 int mem_blks = (j * lst_per_page);
968 struct list_info_hold *fli;
970 if (!fifo->list_info)
971 return;
973 fli = &fifo->list_info[mem_blks];
974 if (!fli->list_virt_addr)
975 break;
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
977 fli->list_virt_addr,
978 fli->list_phy_addr);
979 swstats->mem_freed += PAGE_SIZE;
981 /* If we got a zero DMA address during allocation,
982 * free the page now
984 if (mac_control->zerodma_virt_addr) {
985 pci_free_consistent(nic->pdev, PAGE_SIZE,
986 mac_control->zerodma_virt_addr,
987 (dma_addr_t)0);
988 DBG_PRINT(INIT_DBG,
989 "%s: Freeing TxDL with zero DMA address. "
990 "Virtual address %p\n",
991 dev->name, mac_control->zerodma_virt_addr);
992 swstats->mem_freed += PAGE_SIZE;
994 kfree(fifo->list_info);
995 swstats->mem_freed += tx_cfg->fifo_len *
996 sizeof(struct list_info_hold);
999 size = SIZE_OF_BLOCK;
1000 for (i = 0; i < config->rx_ring_num; i++) {
1001 struct ring_info *ring = &mac_control->rings[i];
1003 blk_cnt = ring->block_count;
1004 for (j = 0; j < blk_cnt; j++) {
1005 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1006 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1007 if (tmp_v_addr == NULL)
1008 break;
1009 pci_free_consistent(nic->pdev, size,
1010 tmp_v_addr, tmp_p_addr);
1011 swstats->mem_freed += size;
1012 kfree(ring->rx_blocks[j].rxds);
1013 swstats->mem_freed += sizeof(struct rxd_info) *
1014 rxd_count[nic->rxd_mode];
1018 if (nic->rxd_mode == RXD_MODE_3B) {
1019 /* Freeing buffer storage addresses in 2BUFF mode. */
1020 for (i = 0; i < config->rx_ring_num; i++) {
1021 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1022 struct ring_info *ring = &mac_control->rings[i];
1024 blk_cnt = rx_cfg->num_rxd /
1025 (rxd_count[nic->rxd_mode] + 1);
1026 for (j = 0; j < blk_cnt; j++) {
1027 int k = 0;
1028 if (!ring->ba[j])
1029 continue;
1030 while (k != rxd_count[nic->rxd_mode]) {
1031 struct buffAdd *ba = &ring->ba[j][k];
1032 kfree(ba->ba_0_org);
1033 swstats->mem_freed +=
1034 BUF0_LEN + ALIGN_SIZE;
1035 kfree(ba->ba_1_org);
1036 swstats->mem_freed +=
1037 BUF1_LEN + ALIGN_SIZE;
1038 k++;
1040 kfree(ring->ba[j]);
1041 swstats->mem_freed += sizeof(struct buffAdd) *
1042 (rxd_count[nic->rxd_mode] + 1);
1044 kfree(ring->ba);
1045 swstats->mem_freed += sizeof(struct buffAdd *) *
1046 blk_cnt;
1050 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1051 struct fifo_info *fifo = &mac_control->fifos[i];
1052 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1054 if (fifo->ufo_in_band_v) {
1055 swstats->mem_freed += tx_cfg->fifo_len *
1056 sizeof(u64);
1057 kfree(fifo->ufo_in_band_v);
1061 if (mac_control->stats_mem) {
1062 swstats->mem_freed += mac_control->stats_mem_sz;
1063 pci_free_consistent(nic->pdev,
1064 mac_control->stats_mem_sz,
1065 mac_control->stats_mem,
1066 mac_control->stats_mem_phy);
1071 * s2io_verify_pci_mode -
1074 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1077 register u64 val64 = 0;
1078 int mode;
1080 val64 = readq(&bar0->pci_mode);
1081 mode = (u8)GET_PCI_MODE(val64);
1083 if (val64 & PCI_MODE_UNKNOWN_MODE)
1084 return -1; /* Unknown PCI mode */
1085 return mode;
1088 #define NEC_VENID 0x1033
1089 #define NEC_DEVID 0x0125
1090 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1092 struct pci_dev *tdev = NULL;
1093 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1094 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1095 if (tdev->bus == s2io_pdev->bus->parent) {
1096 pci_dev_put(tdev);
1097 return 1;
1101 return 0;
1104 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1106 * s2io_print_pci_mode -
1108 static int s2io_print_pci_mode(struct s2io_nic *nic)
1110 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1111 register u64 val64 = 0;
1112 int mode;
1113 struct config_param *config = &nic->config;
1114 const char *pcimode;
1116 val64 = readq(&bar0->pci_mode);
1117 mode = (u8)GET_PCI_MODE(val64);
1119 if (val64 & PCI_MODE_UNKNOWN_MODE)
1120 return -1; /* Unknown PCI mode */
1122 config->bus_speed = bus_speed[mode];
1124 if (s2io_on_nec_bridge(nic->pdev)) {
1125 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1126 nic->dev->name);
1127 return mode;
1130 switch (mode) {
1131 case PCI_MODE_PCI_33:
1132 pcimode = "33MHz PCI bus";
1133 break;
1134 case PCI_MODE_PCI_66:
1135 pcimode = "66MHz PCI bus";
1136 break;
1137 case PCI_MODE_PCIX_M1_66:
1138 pcimode = "66MHz PCIX(M1) bus";
1139 break;
1140 case PCI_MODE_PCIX_M1_100:
1141 pcimode = "100MHz PCIX(M1) bus";
1142 break;
1143 case PCI_MODE_PCIX_M1_133:
1144 pcimode = "133MHz PCIX(M1) bus";
1145 break;
1146 case PCI_MODE_PCIX_M2_66:
1147 pcimode = "133MHz PCIX(M2) bus";
1148 break;
1149 case PCI_MODE_PCIX_M2_100:
1150 pcimode = "200MHz PCIX(M2) bus";
1151 break;
1152 case PCI_MODE_PCIX_M2_133:
1153 pcimode = "266MHz PCIX(M2) bus";
1154 break;
1155 default:
1156 pcimode = "unsupported bus!";
1157 mode = -1;
1160 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1161 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1163 return mode;
1167 * init_tti - Initialization transmit traffic interrupt scheme
1168 * @nic: device private variable
1169 * @link: link status (UP/DOWN) used to enable/disable continuous
1170 * transmit interrupts
1171 * Description: The function configures transmit traffic interrupts
1172 * Return Value: SUCCESS on success and
1173 * '-1' on failure
1176 static int init_tti(struct s2io_nic *nic, int link)
1178 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1179 register u64 val64 = 0;
1180 int i;
1181 struct config_param *config = &nic->config;
1183 for (i = 0; i < config->tx_fifo_num; i++) {
1185 * TTI Initialization. Default Tx timer gets us about
1186 * 250 interrupts per sec. Continuous interrupts are enabled
1187 * by default.
1189 if (nic->device_type == XFRAME_II_DEVICE) {
1190 int count = (nic->config.bus_speed * 125)/2;
1191 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1192 } else
1193 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1195 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1196 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1197 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1198 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1199 if (i == 0)
1200 if (use_continuous_tx_intrs && (link == LINK_UP))
1201 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1202 writeq(val64, &bar0->tti_data1_mem);
1204 if (nic->config.intr_type == MSI_X) {
1205 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1206 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1207 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1208 TTI_DATA2_MEM_TX_UFC_D(0x300);
1209 } else {
1210 if ((nic->config.tx_steering_type ==
1211 TX_DEFAULT_STEERING) &&
1212 (config->tx_fifo_num > 1) &&
1213 (i >= nic->udp_fifo_idx) &&
1214 (i < (nic->udp_fifo_idx +
1215 nic->total_udp_fifos)))
1216 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1217 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1218 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1219 TTI_DATA2_MEM_TX_UFC_D(0x120);
1220 else
1221 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1223 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1224 TTI_DATA2_MEM_TX_UFC_D(0x80);
1227 writeq(val64, &bar0->tti_data2_mem);
1229 val64 = TTI_CMD_MEM_WE |
1230 TTI_CMD_MEM_STROBE_NEW_CMD |
1231 TTI_CMD_MEM_OFFSET(i);
1232 writeq(val64, &bar0->tti_command_mem);
1234 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1235 TTI_CMD_MEM_STROBE_NEW_CMD,
1236 S2IO_BIT_RESET) != SUCCESS)
1237 return FAILURE;
1240 return SUCCESS;
1244 * init_nic - Initialization of hardware
1245 * @nic: device private variable
1246 * Description: The function sequentially configures every block
1247 * of the H/W from their reset values.
1248 * Return Value: SUCCESS on success and
1249 * '-1' on failure (endian settings incorrect).
1252 static int init_nic(struct s2io_nic *nic)
1254 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1255 struct net_device *dev = nic->dev;
1256 register u64 val64 = 0;
1257 void __iomem *add;
1258 u32 time;
1259 int i, j;
1260 int dtx_cnt = 0;
1261 unsigned long long mem_share;
1262 int mem_size;
1263 struct config_param *config = &nic->config;
1264 struct mac_info *mac_control = &nic->mac_control;
1266 /* to set the swapper controle on the card */
1267 if (s2io_set_swapper(nic)) {
1268 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1269 return -EIO;
1273 * Herc requires EOI to be removed from reset before XGXS, so..
1275 if (nic->device_type & XFRAME_II_DEVICE) {
1276 val64 = 0xA500000000ULL;
1277 writeq(val64, &bar0->sw_reset);
1278 msleep(500);
1279 val64 = readq(&bar0->sw_reset);
1282 /* Remove XGXS from reset state */
1283 val64 = 0;
1284 writeq(val64, &bar0->sw_reset);
1285 msleep(500);
1286 val64 = readq(&bar0->sw_reset);
1288 /* Ensure that it's safe to access registers by checking
1289 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1291 if (nic->device_type == XFRAME_II_DEVICE) {
1292 for (i = 0; i < 50; i++) {
1293 val64 = readq(&bar0->adapter_status);
1294 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1295 break;
1296 msleep(10);
1298 if (i == 50)
1299 return -ENODEV;
1302 /* Enable Receiving broadcasts */
1303 add = &bar0->mac_cfg;
1304 val64 = readq(&bar0->mac_cfg);
1305 val64 |= MAC_RMAC_BCAST_ENABLE;
1306 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1307 writel((u32)val64, add);
1308 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1309 writel((u32) (val64 >> 32), (add + 4));
1311 /* Read registers in all blocks */
1312 val64 = readq(&bar0->mac_int_mask);
1313 val64 = readq(&bar0->mc_int_mask);
1314 val64 = readq(&bar0->xgxs_int_mask);
1316 /* Set MTU */
1317 val64 = dev->mtu;
1318 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1320 if (nic->device_type & XFRAME_II_DEVICE) {
1321 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1322 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1323 &bar0->dtx_control, UF);
1324 if (dtx_cnt & 0x1)
1325 msleep(1); /* Necessary!! */
1326 dtx_cnt++;
1328 } else {
1329 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1330 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control, UF);
1332 val64 = readq(&bar0->dtx_control);
1333 dtx_cnt++;
1337 /* Tx DMA Initialization */
1338 val64 = 0;
1339 writeq(val64, &bar0->tx_fifo_partition_0);
1340 writeq(val64, &bar0->tx_fifo_partition_1);
1341 writeq(val64, &bar0->tx_fifo_partition_2);
1342 writeq(val64, &bar0->tx_fifo_partition_3);
1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1345 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1347 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1348 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
1359 j = 0;
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
1364 j = 0;
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
1369 j = 0;
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1378 break;
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1386 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1387 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1389 val64 = readq(&bar0->tx_fifo_partition_0);
1390 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1391 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1394 * Initialization of Tx_PA_CONFIG register to ignore packet
1395 * integrity checking.
1397 val64 = readq(&bar0->tx_pa_cfg);
1398 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1399 TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL |
1401 TX_PA_CFG_IGNORE_L2_ERR;
1402 writeq(val64, &bar0->tx_pa_cfg);
1404 /* Rx DMA intialization. */
1405 val64 = 0;
1406 for (i = 0; i < config->rx_ring_num; i++) {
1407 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1409 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1411 writeq(val64, &bar0->rx_queue_priority);
1414 * Allocating equal share of memory to all the
1415 * configured Rings.
1417 val64 = 0;
1418 if (nic->device_type & XFRAME_II_DEVICE)
1419 mem_size = 32;
1420 else
1421 mem_size = 64;
1423 for (i = 0; i < config->rx_ring_num; i++) {
1424 switch (i) {
1425 case 0:
1426 mem_share = (mem_size / config->rx_ring_num +
1427 mem_size % config->rx_ring_num);
1428 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1429 continue;
1430 case 1:
1431 mem_share = (mem_size / config->rx_ring_num);
1432 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1433 continue;
1434 case 2:
1435 mem_share = (mem_size / config->rx_ring_num);
1436 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1437 continue;
1438 case 3:
1439 mem_share = (mem_size / config->rx_ring_num);
1440 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1441 continue;
1442 case 4:
1443 mem_share = (mem_size / config->rx_ring_num);
1444 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1445 continue;
1446 case 5:
1447 mem_share = (mem_size / config->rx_ring_num);
1448 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1449 continue;
1450 case 6:
1451 mem_share = (mem_size / config->rx_ring_num);
1452 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1453 continue;
1454 case 7:
1455 mem_share = (mem_size / config->rx_ring_num);
1456 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1457 continue;
1460 writeq(val64, &bar0->rx_queue_cfg);
1463 * Filling Tx round robin registers
1464 * as per the number of FIFOs for equal scheduling priority
1466 switch (config->tx_fifo_num) {
1467 case 1:
1468 val64 = 0x0;
1469 writeq(val64, &bar0->tx_w_round_robin_0);
1470 writeq(val64, &bar0->tx_w_round_robin_1);
1471 writeq(val64, &bar0->tx_w_round_robin_2);
1472 writeq(val64, &bar0->tx_w_round_robin_3);
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 2:
1476 val64 = 0x0001000100010001ULL;
1477 writeq(val64, &bar0->tx_w_round_robin_0);
1478 writeq(val64, &bar0->tx_w_round_robin_1);
1479 writeq(val64, &bar0->tx_w_round_robin_2);
1480 writeq(val64, &bar0->tx_w_round_robin_3);
1481 val64 = 0x0001000100000000ULL;
1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 case 3:
1485 val64 = 0x0001020001020001ULL;
1486 writeq(val64, &bar0->tx_w_round_robin_0);
1487 val64 = 0x0200010200010200ULL;
1488 writeq(val64, &bar0->tx_w_round_robin_1);
1489 val64 = 0x0102000102000102ULL;
1490 writeq(val64, &bar0->tx_w_round_robin_2);
1491 val64 = 0x0001020001020001ULL;
1492 writeq(val64, &bar0->tx_w_round_robin_3);
1493 val64 = 0x0200010200000000ULL;
1494 writeq(val64, &bar0->tx_w_round_robin_4);
1495 break;
1496 case 4:
1497 val64 = 0x0001020300010203ULL;
1498 writeq(val64, &bar0->tx_w_round_robin_0);
1499 writeq(val64, &bar0->tx_w_round_robin_1);
1500 writeq(val64, &bar0->tx_w_round_robin_2);
1501 writeq(val64, &bar0->tx_w_round_robin_3);
1502 val64 = 0x0001020300000000ULL;
1503 writeq(val64, &bar0->tx_w_round_robin_4);
1504 break;
1505 case 5:
1506 val64 = 0x0001020304000102ULL;
1507 writeq(val64, &bar0->tx_w_round_robin_0);
1508 val64 = 0x0304000102030400ULL;
1509 writeq(val64, &bar0->tx_w_round_robin_1);
1510 val64 = 0x0102030400010203ULL;
1511 writeq(val64, &bar0->tx_w_round_robin_2);
1512 val64 = 0x0400010203040001ULL;
1513 writeq(val64, &bar0->tx_w_round_robin_3);
1514 val64 = 0x0203040000000000ULL;
1515 writeq(val64, &bar0->tx_w_round_robin_4);
1516 break;
1517 case 6:
1518 val64 = 0x0001020304050001ULL;
1519 writeq(val64, &bar0->tx_w_round_robin_0);
1520 val64 = 0x0203040500010203ULL;
1521 writeq(val64, &bar0->tx_w_round_robin_1);
1522 val64 = 0x0405000102030405ULL;
1523 writeq(val64, &bar0->tx_w_round_robin_2);
1524 val64 = 0x0001020304050001ULL;
1525 writeq(val64, &bar0->tx_w_round_robin_3);
1526 val64 = 0x0203040500000000ULL;
1527 writeq(val64, &bar0->tx_w_round_robin_4);
1528 break;
1529 case 7:
1530 val64 = 0x0001020304050600ULL;
1531 writeq(val64, &bar0->tx_w_round_robin_0);
1532 val64 = 0x0102030405060001ULL;
1533 writeq(val64, &bar0->tx_w_round_robin_1);
1534 val64 = 0x0203040506000102ULL;
1535 writeq(val64, &bar0->tx_w_round_robin_2);
1536 val64 = 0x0304050600010203ULL;
1537 writeq(val64, &bar0->tx_w_round_robin_3);
1538 val64 = 0x0405060000000000ULL;
1539 writeq(val64, &bar0->tx_w_round_robin_4);
1540 break;
1541 case 8:
1542 val64 = 0x0001020304050607ULL;
1543 writeq(val64, &bar0->tx_w_round_robin_0);
1544 writeq(val64, &bar0->tx_w_round_robin_1);
1545 writeq(val64, &bar0->tx_w_round_robin_2);
1546 writeq(val64, &bar0->tx_w_round_robin_3);
1547 val64 = 0x0001020300000000ULL;
1548 writeq(val64, &bar0->tx_w_round_robin_4);
1549 break;
1552 /* Enable all configured Tx FIFO partitions */
1553 val64 = readq(&bar0->tx_fifo_partition_0);
1554 val64 |= (TX_FIFO_PARTITION_EN);
1555 writeq(val64, &bar0->tx_fifo_partition_0);
1557 /* Filling the Rx round robin registers as per the
1558 * number of Rings and steering based on QoS with
1559 * equal priority.
1561 switch (config->rx_ring_num) {
1562 case 1:
1563 val64 = 0x0;
1564 writeq(val64, &bar0->rx_w_round_robin_0);
1565 writeq(val64, &bar0->rx_w_round_robin_1);
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 writeq(val64, &bar0->rx_w_round_robin_3);
1568 writeq(val64, &bar0->rx_w_round_robin_4);
1570 val64 = 0x8080808080808080ULL;
1571 writeq(val64, &bar0->rts_qos_steering);
1572 break;
1573 case 2:
1574 val64 = 0x0001000100010001ULL;
1575 writeq(val64, &bar0->rx_w_round_robin_0);
1576 writeq(val64, &bar0->rx_w_round_robin_1);
1577 writeq(val64, &bar0->rx_w_round_robin_2);
1578 writeq(val64, &bar0->rx_w_round_robin_3);
1579 val64 = 0x0001000100000000ULL;
1580 writeq(val64, &bar0->rx_w_round_robin_4);
1582 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering);
1584 break;
1585 case 3:
1586 val64 = 0x0001020001020001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_0);
1588 val64 = 0x0200010200010200ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_1);
1590 val64 = 0x0102000102000102ULL;
1591 writeq(val64, &bar0->rx_w_round_robin_2);
1592 val64 = 0x0001020001020001ULL;
1593 writeq(val64, &bar0->rx_w_round_robin_3);
1594 val64 = 0x0200010200000000ULL;
1595 writeq(val64, &bar0->rx_w_round_robin_4);
1597 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering);
1599 break;
1600 case 4:
1601 val64 = 0x0001020300010203ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_0);
1603 writeq(val64, &bar0->rx_w_round_robin_1);
1604 writeq(val64, &bar0->rx_w_round_robin_2);
1605 writeq(val64, &bar0->rx_w_round_robin_3);
1606 val64 = 0x0001020300000000ULL;
1607 writeq(val64, &bar0->rx_w_round_robin_4);
1609 val64 = 0x8080404020201010ULL;
1610 writeq(val64, &bar0->rts_qos_steering);
1611 break;
1612 case 5:
1613 val64 = 0x0001020304000102ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_0);
1615 val64 = 0x0304000102030400ULL;
1616 writeq(val64, &bar0->rx_w_round_robin_1);
1617 val64 = 0x0102030400010203ULL;
1618 writeq(val64, &bar0->rx_w_round_robin_2);
1619 val64 = 0x0400010203040001ULL;
1620 writeq(val64, &bar0->rx_w_round_robin_3);
1621 val64 = 0x0203040000000000ULL;
1622 writeq(val64, &bar0->rx_w_round_robin_4);
1624 val64 = 0x8080404020201008ULL;
1625 writeq(val64, &bar0->rts_qos_steering);
1626 break;
1627 case 6:
1628 val64 = 0x0001020304050001ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_0);
1630 val64 = 0x0203040500010203ULL;
1631 writeq(val64, &bar0->rx_w_round_robin_1);
1632 val64 = 0x0405000102030405ULL;
1633 writeq(val64, &bar0->rx_w_round_robin_2);
1634 val64 = 0x0001020304050001ULL;
1635 writeq(val64, &bar0->rx_w_round_robin_3);
1636 val64 = 0x0203040500000000ULL;
1637 writeq(val64, &bar0->rx_w_round_robin_4);
1639 val64 = 0x8080404020100804ULL;
1640 writeq(val64, &bar0->rts_qos_steering);
1641 break;
1642 case 7:
1643 val64 = 0x0001020304050600ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_0);
1645 val64 = 0x0102030405060001ULL;
1646 writeq(val64, &bar0->rx_w_round_robin_1);
1647 val64 = 0x0203040506000102ULL;
1648 writeq(val64, &bar0->rx_w_round_robin_2);
1649 val64 = 0x0304050600010203ULL;
1650 writeq(val64, &bar0->rx_w_round_robin_3);
1651 val64 = 0x0405060000000000ULL;
1652 writeq(val64, &bar0->rx_w_round_robin_4);
1654 val64 = 0x8080402010080402ULL;
1655 writeq(val64, &bar0->rts_qos_steering);
1656 break;
1657 case 8:
1658 val64 = 0x0001020304050607ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_0);
1660 writeq(val64, &bar0->rx_w_round_robin_1);
1661 writeq(val64, &bar0->rx_w_round_robin_2);
1662 writeq(val64, &bar0->rx_w_round_robin_3);
1663 val64 = 0x0001020300000000ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1671 /* UDP Fix */
1672 val64 = 0;
1673 for (i = 0; i < 8; i++)
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1693 &bar0->rts_frm_len_n[i]);
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1700 DBG_PRINT(ERR_DBG,
1701 "%s: rts_ds_steer failed on codepoint %d\n",
1702 dev->name, i);
1703 return -ENODEV;
1707 /* Program statistics memory */
1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1721 writeq(val64, &bar0->mac_link_util);
1724 * Initializing the Transmit and Receive Traffic Interrupt
1725 * Scheme.
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1745 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1747 writeq(val64, &bar0->rti_data1_mem);
1749 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1750 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1751 if (nic->config.intr_type == MSI_X)
1752 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1753 RTI_DATA2_MEM_RX_UFC_D(0x40));
1754 else
1755 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1756 RTI_DATA2_MEM_RX_UFC_D(0x80));
1757 writeq(val64, &bar0->rti_data2_mem);
1759 for (i = 0; i < config->rx_ring_num; i++) {
1760 val64 = RTI_CMD_MEM_WE |
1761 RTI_CMD_MEM_STROBE_NEW_CMD |
1762 RTI_CMD_MEM_OFFSET(i);
1763 writeq(val64, &bar0->rti_command_mem);
1766 * Once the operation completes, the Strobe bit of the
1767 * command register will be reset. We poll for this
1768 * particular condition. We wait for a maximum of 500ms
1769 * for the operation to complete, if it's not complete
1770 * by then we return error.
1772 time = 0;
1773 while (true) {
1774 val64 = readq(&bar0->rti_command_mem);
1775 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1776 break;
1778 if (time > 10) {
1779 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1780 dev->name);
1781 return -ENODEV;
1783 time++;
1784 msleep(50);
1789 * Initializing proper values as Pause threshold into all
1790 * the 8 Queues on Rx side.
1792 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1793 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1795 /* Disable RMAC PAD STRIPPING */
1796 add = &bar0->mac_cfg;
1797 val64 = readq(&bar0->mac_cfg);
1798 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64), add);
1801 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1802 writel((u32) (val64 >> 32), (add + 4));
1803 val64 = readq(&bar0->mac_cfg);
1805 /* Enable FCS stripping by adapter */
1806 add = &bar0->mac_cfg;
1807 val64 = readq(&bar0->mac_cfg);
1808 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1809 if (nic->device_type == XFRAME_II_DEVICE)
1810 writeq(val64, &bar0->mac_cfg);
1811 else {
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64), add);
1814 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815 writel((u32) (val64 >> 32), (add + 4));
1819 * Set the time value to be inserted in the pause frame
1820 * generated by xena.
1822 val64 = readq(&bar0->rmac_pause_cfg);
1823 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1824 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1825 writeq(val64, &bar0->rmac_pause_cfg);
1828 * Set the Threshold Limit for Generating the pause frame
1829 * If the amount of data in any Queue exceeds ratio of
1830 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1831 * pause frame is generated
1833 val64 = 0;
1834 for (i = 0; i < 4; i++) {
1835 val64 |= (((u64)0xFF00 |
1836 nic->mac_control.mc_pause_threshold_q0q3)
1837 << (i * 2 * 8));
1839 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1841 val64 = 0;
1842 for (i = 0; i < 4; i++) {
1843 val64 |= (((u64)0xFF00 |
1844 nic->mac_control.mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1850 * TxDMA will stop Read request if the number of read split has
1851 * exceeded the limit pointed by shared_splits
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1867 if (nic->device_type == XFRAME_II_DEVICE) {
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1873 writeq(val64, &bar0->pic_control2);
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
1880 return SUCCESS;
1882 #define LINK_UP_DOWN_INTERRUPT 1
1883 #define MAC_RMAC_ERR_TIMER 2
1885 static int s2io_link_fault_indication(struct s2io_nic *nic)
1887 if (nic->device_type == XFRAME_II_DEVICE)
1888 return LINK_UP_DOWN_INTERRUPT;
1889 else
1890 return MAC_RMAC_ERR_TIMER;
1894 * do_s2io_write_bits - update alarm bits in alarm register
1895 * @value: alarm bits
1896 * @flag: interrupt status
1897 * @addr: address value
1898 * Description: update alarm bits in alarm register
1899 * Return Value:
1900 * NONE.
1902 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1904 u64 temp64;
1906 temp64 = readq(addr);
1908 if (flag == ENABLE_INTRS)
1909 temp64 &= ~((u64)value);
1910 else
1911 temp64 |= ((u64)value);
1912 writeq(temp64, addr);
1915 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 gen_int_mask = 0;
1919 u64 interruptible;
1921 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1922 if (mask & TX_DMA_INTR) {
1923 gen_int_mask |= TXDMA_INT_M;
1925 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1926 TXDMA_PCC_INT | TXDMA_TTI_INT |
1927 TXDMA_LSO_INT | TXDMA_TPA_INT |
1928 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1930 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1931 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1932 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1933 &bar0->pfc_err_mask);
1935 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1936 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1937 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1939 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1940 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1941 PCC_N_SERR | PCC_6_COF_OV_ERR |
1942 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1943 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1944 PCC_TXB_ECC_SG_ERR,
1945 flag, &bar0->pcc_err_mask);
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1956 flag, &bar0->tpa_err_mask);
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1961 if (mask & TX_MAC_INTR) {
1962 gen_int_mask |= TXMAC_INT_M;
1963 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1964 &bar0->mac_int_mask);
1965 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1966 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1967 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1968 flag, &bar0->mac_tmac_err_mask);
1971 if (mask & TX_XGXS_INTR) {
1972 gen_int_mask |= TXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1974 &bar0->xgxs_int_mask);
1975 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1976 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1977 flag, &bar0->xgxs_txgxs_err_mask);
1980 if (mask & RX_DMA_INTR) {
1981 gen_int_mask |= RXDMA_INT_M;
1982 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1983 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1984 flag, &bar0->rxdma_int_mask);
1985 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1986 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1987 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1988 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1989 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1990 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1991 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1992 &bar0->prc_pcix_err_mask);
1993 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1994 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1995 &bar0->rpa_err_mask);
1996 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1997 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1998 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1999 RDA_FRM_ECC_SG_ERR |
2000 RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2010 &bar0->mac_int_mask);
2011 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR);
2014 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2015 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2016 do_s2io_write_bits(interruptible,
2017 flag, &bar0->mac_rmac_err_mask);
2020 if (mask & RX_XGXS_INTR) {
2021 gen_int_mask |= RXXGXS_INT_M;
2022 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2023 &bar0->xgxs_int_mask);
2024 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2025 &bar0->xgxs_rxgxs_err_mask);
2028 if (mask & MC_INTR) {
2029 gen_int_mask |= MC_INT_M;
2030 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2031 flag, &bar0->mc_int_mask);
2032 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2033 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2034 &bar0->mc_err_mask);
2036 nic->general_int_mask = gen_int_mask;
2038 /* Remove this line when alarm interrupts are enabled */
2039 nic->general_int_mask = 0;
2043 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2044 * @nic: device private variable,
2045 * @mask: A mask indicating which Intr block must be modified and,
2046 * @flag: A flag indicating whether to enable or disable the Intrs.
2047 * Description: This function will either disable or enable the interrupts
2048 * depending on the flag argument. The mask argument can be used to
2049 * enable/disable any Intr block.
2050 * Return Value: NONE.
2053 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2056 register u64 temp64 = 0, intr_mask = 0;
2058 intr_mask = nic->general_int_mask;
2060 /* Top level interrupt classification */
2061 /* PIC Interrupts */
2062 if (mask & TX_PIC_INTR) {
2063 /* Enable PIC Intrs in the general intr mask register */
2064 intr_mask |= TXPIC_INT_M;
2065 if (flag == ENABLE_INTRS) {
2067 * If Hercules adapter enable GPIO otherwise
2068 * disable all PCIX, Flash, MDIO, IIC and GPIO
2069 * interrupts for now.
2070 * TODO
2072 if (s2io_link_fault_indication(nic) ==
2073 LINK_UP_DOWN_INTERRUPT) {
2074 do_s2io_write_bits(PIC_INT_GPIO, flag,
2075 &bar0->pic_int_mask);
2076 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2077 &bar0->gpio_int_mask);
2078 } else
2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2080 } else if (flag == DISABLE_INTRS) {
2082 * Disable PIC Intrs in the general
2083 * intr mask register
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2089 /* Tx traffic interrupts */
2090 if (mask & TX_TRAFFIC_INTR) {
2091 intr_mask |= TXTRAFFIC_INT_M;
2092 if (flag == ENABLE_INTRS) {
2094 * Enable all the Tx side interrupts
2095 * writing 0 Enables all 64 TX interrupt levels
2097 writeq(0x0, &bar0->tx_traffic_mask);
2098 } else if (flag == DISABLE_INTRS) {
2100 * Disable Tx Traffic Intrs in the general intr mask
2101 * register.
2103 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2107 /* Rx traffic interrupts */
2108 if (mask & RX_TRAFFIC_INTR) {
2109 intr_mask |= RXTRAFFIC_INT_M;
2110 if (flag == ENABLE_INTRS) {
2111 /* writing 0 Enables all 8 RX interrupt levels */
2112 writeq(0x0, &bar0->rx_traffic_mask);
2113 } else if (flag == DISABLE_INTRS) {
2115 * Disable Rx Traffic Intrs in the general intr mask
2116 * register.
2118 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2122 temp64 = readq(&bar0->general_int_mask);
2123 if (flag == ENABLE_INTRS)
2124 temp64 &= ~((u64)intr_mask);
2125 else
2126 temp64 = DISABLE_ALL_INTRS;
2127 writeq(temp64, &bar0->general_int_mask);
2129 nic->general_int_mask = readq(&bar0->general_int_mask);
2133 * verify_pcc_quiescent- Checks for PCC quiescent state
2134 * Return: 1 If PCC is quiescence
2135 * 0 If PCC is not quiescence
2137 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2139 int ret = 0, herc;
2140 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2141 u64 val64 = readq(&bar0->adapter_status);
2143 herc = (sp->device_type == XFRAME_II_DEVICE);
2145 if (flag == false) {
2146 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2148 ret = 1;
2149 } else {
2150 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2151 ret = 1;
2153 } else {
2154 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2155 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2156 ADAPTER_STATUS_RMAC_PCC_IDLE))
2157 ret = 1;
2158 } else {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2160 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2161 ret = 1;
2165 return ret;
2168 * verify_xena_quiescence - Checks whether the H/W is ready
2169 * Description: Returns whether the H/W is ready to go or not. Depending
2170 * on whether adapter enable bit was written or not the comparison
2171 * differs and the calling function passes the input argument flag to
2172 * indicate this.
2173 * Return: 1 If xena is quiescence
2174 * 0 If Xena is not quiescence
2177 static int verify_xena_quiescence(struct s2io_nic *sp)
2179 int mode;
2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2181 u64 val64 = readq(&bar0->adapter_status);
2182 mode = s2io_verify_pci_mode(sp);
2184 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2185 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2186 return 0;
2188 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2190 return 0;
2192 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2193 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2194 return 0;
2196 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2197 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2198 return 0;
2200 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2201 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2202 return 0;
2204 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2205 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2206 return 0;
2208 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2209 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2210 return 0;
2212 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2213 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2214 return 0;
2218 * In PCI 33 mode, the P_PLL is not used, and therefore,
2219 * the the P_PLL_LOCK bit in the adapter_status register will
2220 * not be asserted.
2222 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2223 sp->device_type == XFRAME_II_DEVICE &&
2224 mode != PCI_MODE_PCI_33) {
2225 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2226 return 0;
2228 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2229 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2230 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2231 return 0;
2233 return 1;
2237 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2238 * @sp: Pointer to device specifc structure
2239 * Description :
2240 * New procedure to clear mac address reading problems on Alpha platforms
2244 static void fix_mac_address(struct s2io_nic *sp)
2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2247 int i = 0;
2249 while (fix_mac[i] != END_SIGN) {
2250 writeq(fix_mac[i++], &bar0->gpio_control);
2251 udelay(10);
2252 (void) readq(&bar0->gpio_control);
2257 * start_nic - Turns the device on
2258 * @nic : device private variable.
2259 * Description:
2260 * This function actually turns the device on. Before this function is
2261 * called,all Registers are configured from their reset states
2262 * and shared memory is allocated but the NIC is still quiescent. On
2263 * calling this function, the device interrupts are cleared and the NIC is
2264 * literally switched on by writing into the adapter control register.
2265 * Return Value:
2266 * SUCCESS on success and -1 on failure.
2269 static int start_nic(struct s2io_nic *nic)
2271 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2272 struct net_device *dev = nic->dev;
2273 register u64 val64 = 0;
2274 u16 subid, i;
2275 struct config_param *config = &nic->config;
2276 struct mac_info *mac_control = &nic->mac_control;
2278 /* PRC Initialization and configuration */
2279 for (i = 0; i < config->rx_ring_num; i++) {
2280 struct ring_info *ring = &mac_control->rings[i];
2282 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2283 &bar0->prc_rxd0_n[i]);
2285 val64 = readq(&bar0->prc_ctrl_n[i]);
2286 if (nic->rxd_mode == RXD_MODE_1)
2287 val64 |= PRC_CTRL_RC_ENABLED;
2288 else
2289 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2290 if (nic->device_type == XFRAME_II_DEVICE)
2291 val64 |= PRC_CTRL_GROUP_READS;
2292 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2293 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2294 writeq(val64, &bar0->prc_ctrl_n[i]);
2297 if (nic->rxd_mode == RXD_MODE_3B) {
2298 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2299 val64 = readq(&bar0->rx_pa_cfg);
2300 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2301 writeq(val64, &bar0->rx_pa_cfg);
2304 if (vlan_tag_strip == 0) {
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2307 writeq(val64, &bar0->rx_pa_cfg);
2308 nic->vlan_strip_flag = 0;
2312 * Enabling MC-RLDRAM. After enabling the device, we timeout
2313 * for around 100ms, which is approximately the time required
2314 * for the device to be ready for operation.
2316 val64 = readq(&bar0->mc_rldram_mrs);
2317 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2318 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2319 val64 = readq(&bar0->mc_rldram_mrs);
2321 msleep(100); /* Delay by around 100 ms. */
2323 /* Enabling ECC Protection. */
2324 val64 = readq(&bar0->adapter_control);
2325 val64 &= ~ADAPTER_ECC_EN;
2326 writeq(val64, &bar0->adapter_control);
2329 * Verify if the device is ready to be enabled, if so enable
2330 * it.
2332 val64 = readq(&bar0->adapter_status);
2333 if (!verify_xena_quiescence(nic)) {
2334 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2335 "Adapter status reads: 0x%llx\n",
2336 dev->name, (unsigned long long)val64);
2337 return FAILURE;
2341 * With some switches, link might be already up at this point.
2342 * Because of this weird behavior, when we enable laser,
2343 * we may not get link. We need to handle this. We cannot
2344 * figure out which switch is misbehaving. So we are forced to
2345 * make a global change.
2348 /* Enabling Laser. */
2349 val64 = readq(&bar0->adapter_control);
2350 val64 |= ADAPTER_EOI_TX_ON;
2351 writeq(val64, &bar0->adapter_control);
2353 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2355 * Dont see link state interrupts initially on some switches,
2356 * so directly scheduling the link state task here.
2358 schedule_work(&nic->set_link_task);
2360 /* SXE-002: Initialize link and activity LED */
2361 subid = nic->pdev->subsystem_device;
2362 if (((subid & 0xFF) >= 0x07) &&
2363 (nic->device_type == XFRAME_I_DEVICE)) {
2364 val64 = readq(&bar0->gpio_control);
2365 val64 |= 0x0000800000000000ULL;
2366 writeq(val64, &bar0->gpio_control);
2367 val64 = 0x0411040400000000ULL;
2368 writeq(val64, (void __iomem *)bar0 + 0x2700);
2371 return SUCCESS;
2374 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2376 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2377 struct TxD *txdlp, int get_off)
2379 struct s2io_nic *nic = fifo_data->nic;
2380 struct sk_buff *skb;
2381 struct TxD *txds;
2382 u16 j, frg_cnt;
2384 txds = txdlp;
2385 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2386 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2387 sizeof(u64), PCI_DMA_TODEVICE);
2388 txds++;
2391 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2392 if (!skb) {
2393 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2394 return NULL;
2396 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2397 skb_headlen(skb), PCI_DMA_TODEVICE);
2398 frg_cnt = skb_shinfo(skb)->nr_frags;
2399 if (frg_cnt) {
2400 txds++;
2401 for (j = 0; j < frg_cnt; j++, txds++) {
2402 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2403 if (!txds->Buffer_Pointer)
2404 break;
2405 pci_unmap_page(nic->pdev,
2406 (dma_addr_t)txds->Buffer_Pointer,
2407 frag->size, PCI_DMA_TODEVICE);
2410 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2411 return skb;
2415 * free_tx_buffers - Free all queued Tx buffers
2416 * @nic : device private variable.
2417 * Description:
2418 * Free all queued Tx buffers.
2419 * Return Value: void
2422 static void free_tx_buffers(struct s2io_nic *nic)
2424 struct net_device *dev = nic->dev;
2425 struct sk_buff *skb;
2426 struct TxD *txdp;
2427 int i, j;
2428 int cnt = 0;
2429 struct config_param *config = &nic->config;
2430 struct mac_info *mac_control = &nic->mac_control;
2431 struct stat_block *stats = mac_control->stats_info;
2432 struct swStat *swstats = &stats->sw_stat;
2434 for (i = 0; i < config->tx_fifo_num; i++) {
2435 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2436 struct fifo_info *fifo = &mac_control->fifos[i];
2437 unsigned long flags;
2439 spin_lock_irqsave(&fifo->tx_lock, flags);
2440 for (j = 0; j < tx_cfg->fifo_len; j++) {
2441 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2442 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2443 if (skb) {
2444 swstats->mem_freed += skb->truesize;
2445 dev_kfree_skb(skb);
2446 cnt++;
2449 DBG_PRINT(INTR_DBG,
2450 "%s: forcibly freeing %d skbs on FIFO%d\n",
2451 dev->name, cnt, i);
2452 fifo->tx_curr_get_info.offset = 0;
2453 fifo->tx_curr_put_info.offset = 0;
2454 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2459 * stop_nic - To stop the nic
2460 * @nic ; device private variable.
2461 * Description:
2462 * This function does exactly the opposite of what the start_nic()
2463 * function does. This function is called to stop the device.
2464 * Return Value:
2465 * void.
2468 static void stop_nic(struct s2io_nic *nic)
2470 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2471 register u64 val64 = 0;
2472 u16 interruptible;
2474 /* Disable all interrupts */
2475 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2476 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2477 interruptible |= TX_PIC_INTR;
2478 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2480 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2481 val64 = readq(&bar0->adapter_control);
2482 val64 &= ~(ADAPTER_CNTL_EN);
2483 writeq(val64, &bar0->adapter_control);
2487 * fill_rx_buffers - Allocates the Rx side skbs
2488 * @ring_info: per ring structure
2489 * @from_card_up: If this is true, we will map the buffer to get
2490 * the dma address for buf0 and buf1 to give it to the card.
2491 * Else we will sync the already mapped buffer to give it to the card.
2492 * Description:
2493 * The function allocates Rx side skbs and puts the physical
2494 * address of these buffers into the RxD buffer pointers, so that the NIC
2495 * can DMA the received frame into these locations.
2496 * The NIC supports 3 receive modes, viz
2497 * 1. single buffer,
2498 * 2. three buffer and
2499 * 3. Five buffer modes.
2500 * Each mode defines how many fragments the received frame will be split
2501 * up into by the NIC. The frame is split into L3 header, L4 Header,
2502 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2503 * is split into 3 fragments. As of now only single buffer mode is
2504 * supported.
2505 * Return Value:
2506 * SUCCESS on success or an appropriate -ve value on failure.
2508 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2509 int from_card_up)
2511 struct sk_buff *skb;
2512 struct RxD_t *rxdp;
2513 int off, size, block_no, block_no1;
2514 u32 alloc_tab = 0;
2515 u32 alloc_cnt;
2516 u64 tmp;
2517 struct buffAdd *ba;
2518 struct RxD_t *first_rxdp = NULL;
2519 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2520 int rxd_index = 0;
2521 struct RxD1 *rxdp1;
2522 struct RxD3 *rxdp3;
2523 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2525 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2527 block_no1 = ring->rx_curr_get_info.block_index;
2528 while (alloc_tab < alloc_cnt) {
2529 block_no = ring->rx_curr_put_info.block_index;
2531 off = ring->rx_curr_put_info.offset;
2533 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2535 rxd_index = off + 1;
2536 if (block_no)
2537 rxd_index += (block_no * ring->rxd_count);
2539 if ((block_no == block_no1) &&
2540 (off == ring->rx_curr_get_info.offset) &&
2541 (rxdp->Host_Control)) {
2542 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2543 ring->dev->name);
2544 goto end;
2546 if (off && (off == ring->rxd_count)) {
2547 ring->rx_curr_put_info.block_index++;
2548 if (ring->rx_curr_put_info.block_index ==
2549 ring->block_count)
2550 ring->rx_curr_put_info.block_index = 0;
2551 block_no = ring->rx_curr_put_info.block_index;
2552 off = 0;
2553 ring->rx_curr_put_info.offset = off;
2554 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2555 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2556 ring->dev->name, rxdp);
2560 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2561 ((ring->rxd_mode == RXD_MODE_3B) &&
2562 (rxdp->Control_2 & s2BIT(0)))) {
2563 ring->rx_curr_put_info.offset = off;
2564 goto end;
2566 /* calculate size of skb based on ring mode */
2567 size = ring->mtu +
2568 HEADER_ETHERNET_II_802_3_SIZE +
2569 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2570 if (ring->rxd_mode == RXD_MODE_1)
2571 size += NET_IP_ALIGN;
2572 else
2573 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2575 /* allocate skb */
2576 skb = dev_alloc_skb(size);
2577 if (!skb) {
2578 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2579 ring->dev->name);
2580 if (first_rxdp) {
2581 wmb();
2582 first_rxdp->Control_1 |= RXD_OWN_XENA;
2584 swstats->mem_alloc_fail_cnt++;
2586 return -ENOMEM ;
2588 swstats->mem_allocated += skb->truesize;
2590 if (ring->rxd_mode == RXD_MODE_1) {
2591 /* 1 buffer mode - normal operation mode */
2592 rxdp1 = (struct RxD1 *)rxdp;
2593 memset(rxdp, 0, sizeof(struct RxD1));
2594 skb_reserve(skb, NET_IP_ALIGN);
2595 rxdp1->Buffer0_ptr =
2596 pci_map_single(ring->pdev, skb->data,
2597 size - NET_IP_ALIGN,
2598 PCI_DMA_FROMDEVICE);
2599 if (pci_dma_mapping_error(nic->pdev,
2600 rxdp1->Buffer0_ptr))
2601 goto pci_map_failed;
2603 rxdp->Control_2 =
2604 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2605 rxdp->Host_Control = (unsigned long)skb;
2606 } else if (ring->rxd_mode == RXD_MODE_3B) {
2608 * 2 buffer mode -
2609 * 2 buffer mode provides 128
2610 * byte aligned receive buffers.
2613 rxdp3 = (struct RxD3 *)rxdp;
2614 /* save buffer pointers to avoid frequent dma mapping */
2615 Buffer0_ptr = rxdp3->Buffer0_ptr;
2616 Buffer1_ptr = rxdp3->Buffer1_ptr;
2617 memset(rxdp, 0, sizeof(struct RxD3));
2618 /* restore the buffer pointers for dma sync*/
2619 rxdp3->Buffer0_ptr = Buffer0_ptr;
2620 rxdp3->Buffer1_ptr = Buffer1_ptr;
2622 ba = &ring->ba[block_no][off];
2623 skb_reserve(skb, BUF0_LEN);
2624 tmp = (u64)(unsigned long)skb->data;
2625 tmp += ALIGN_SIZE;
2626 tmp &= ~ALIGN_SIZE;
2627 skb->data = (void *) (unsigned long)tmp;
2628 skb_reset_tail_pointer(skb);
2630 if (from_card_up) {
2631 rxdp3->Buffer0_ptr =
2632 pci_map_single(ring->pdev, ba->ba_0,
2633 BUF0_LEN,
2634 PCI_DMA_FROMDEVICE);
2635 if (pci_dma_mapping_error(nic->pdev,
2636 rxdp3->Buffer0_ptr))
2637 goto pci_map_failed;
2638 } else
2639 pci_dma_sync_single_for_device(ring->pdev,
2640 (dma_addr_t)rxdp3->Buffer0_ptr,
2641 BUF0_LEN,
2642 PCI_DMA_FROMDEVICE);
2644 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2645 if (ring->rxd_mode == RXD_MODE_3B) {
2646 /* Two buffer mode */
2649 * Buffer2 will have L3/L4 header plus
2650 * L4 payload
2652 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2653 skb->data,
2654 ring->mtu + 4,
2655 PCI_DMA_FROMDEVICE);
2657 if (pci_dma_mapping_error(nic->pdev,
2658 rxdp3->Buffer2_ptr))
2659 goto pci_map_failed;
2661 if (from_card_up) {
2662 rxdp3->Buffer1_ptr =
2663 pci_map_single(ring->pdev,
2664 ba->ba_1,
2665 BUF1_LEN,
2666 PCI_DMA_FROMDEVICE);
2668 if (pci_dma_mapping_error(nic->pdev,
2669 rxdp3->Buffer1_ptr)) {
2670 pci_unmap_single(ring->pdev,
2671 (dma_addr_t)(unsigned long)
2672 skb->data,
2673 ring->mtu + 4,
2674 PCI_DMA_FROMDEVICE);
2675 goto pci_map_failed;
2678 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2679 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2680 (ring->mtu + 4);
2682 rxdp->Control_2 |= s2BIT(0);
2683 rxdp->Host_Control = (unsigned long) (skb);
2685 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2686 rxdp->Control_1 |= RXD_OWN_XENA;
2687 off++;
2688 if (off == (ring->rxd_count + 1))
2689 off = 0;
2690 ring->rx_curr_put_info.offset = off;
2692 rxdp->Control_2 |= SET_RXD_MARKER;
2693 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2694 if (first_rxdp) {
2695 wmb();
2696 first_rxdp->Control_1 |= RXD_OWN_XENA;
2698 first_rxdp = rxdp;
2700 ring->rx_bufs_left += 1;
2701 alloc_tab++;
2704 end:
2705 /* Transfer ownership of first descriptor to adapter just before
2706 * exiting. Before that, use memory barrier so that ownership
2707 * and other fields are seen by adapter correctly.
2709 if (first_rxdp) {
2710 wmb();
2711 first_rxdp->Control_1 |= RXD_OWN_XENA;
2714 return SUCCESS;
2716 pci_map_failed:
2717 swstats->pci_map_fail_cnt++;
2718 swstats->mem_freed += skb->truesize;
2719 dev_kfree_skb_irq(skb);
2720 return -ENOMEM;
2723 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2725 struct net_device *dev = sp->dev;
2726 int j;
2727 struct sk_buff *skb;
2728 struct RxD_t *rxdp;
2729 struct RxD1 *rxdp1;
2730 struct RxD3 *rxdp3;
2731 struct mac_info *mac_control = &sp->mac_control;
2732 struct stat_block *stats = mac_control->stats_info;
2733 struct swStat *swstats = &stats->sw_stat;
2735 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2736 rxdp = mac_control->rings[ring_no].
2737 rx_blocks[blk].rxds[j].virt_addr;
2738 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2739 if (!skb)
2740 continue;
2741 if (sp->rxd_mode == RXD_MODE_1) {
2742 rxdp1 = (struct RxD1 *)rxdp;
2743 pci_unmap_single(sp->pdev,
2744 (dma_addr_t)rxdp1->Buffer0_ptr,
2745 dev->mtu +
2746 HEADER_ETHERNET_II_802_3_SIZE +
2747 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2748 PCI_DMA_FROMDEVICE);
2749 memset(rxdp, 0, sizeof(struct RxD1));
2750 } else if (sp->rxd_mode == RXD_MODE_3B) {
2751 rxdp3 = (struct RxD3 *)rxdp;
2752 pci_unmap_single(sp->pdev,
2753 (dma_addr_t)rxdp3->Buffer0_ptr,
2754 BUF0_LEN,
2755 PCI_DMA_FROMDEVICE);
2756 pci_unmap_single(sp->pdev,
2757 (dma_addr_t)rxdp3->Buffer1_ptr,
2758 BUF1_LEN,
2759 PCI_DMA_FROMDEVICE);
2760 pci_unmap_single(sp->pdev,
2761 (dma_addr_t)rxdp3->Buffer2_ptr,
2762 dev->mtu + 4,
2763 PCI_DMA_FROMDEVICE);
2764 memset(rxdp, 0, sizeof(struct RxD3));
2766 swstats->mem_freed += skb->truesize;
2767 dev_kfree_skb(skb);
2768 mac_control->rings[ring_no].rx_bufs_left -= 1;
2773 * free_rx_buffers - Frees all Rx buffers
2774 * @sp: device private variable.
2775 * Description:
2776 * This function will free all Rx buffers allocated by host.
2777 * Return Value:
2778 * NONE.
2781 static void free_rx_buffers(struct s2io_nic *sp)
2783 struct net_device *dev = sp->dev;
2784 int i, blk = 0, buf_cnt = 0;
2785 struct config_param *config = &sp->config;
2786 struct mac_info *mac_control = &sp->mac_control;
2788 for (i = 0; i < config->rx_ring_num; i++) {
2789 struct ring_info *ring = &mac_control->rings[i];
2791 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2792 free_rxd_blk(sp, i, blk);
2794 ring->rx_curr_put_info.block_index = 0;
2795 ring->rx_curr_get_info.block_index = 0;
2796 ring->rx_curr_put_info.offset = 0;
2797 ring->rx_curr_get_info.offset = 0;
2798 ring->rx_bufs_left = 0;
2799 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2800 dev->name, buf_cnt, i);
2804 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2806 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2807 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2808 ring->dev->name);
2810 return 0;
2814 * s2io_poll - Rx interrupt handler for NAPI support
2815 * @napi : pointer to the napi structure.
2816 * @budget : The number of packets that were budgeted to be processed
2817 * during one pass through the 'Poll" function.
2818 * Description:
2819 * Comes into picture only if NAPI support has been incorporated. It does
2820 * the same thing that rx_intr_handler does, but not in a interrupt context
2821 * also It will process only a given number of packets.
2822 * Return value:
2823 * 0 on success and 1 if there are No Rx packets to be processed.
2826 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2828 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2829 struct net_device *dev = ring->dev;
2830 int pkts_processed = 0;
2831 u8 __iomem *addr = NULL;
2832 u8 val8 = 0;
2833 struct s2io_nic *nic = netdev_priv(dev);
2834 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835 int budget_org = budget;
2837 if (unlikely(!is_s2io_card_up(nic)))
2838 return 0;
2840 pkts_processed = rx_intr_handler(ring, budget);
2841 s2io_chk_rx_buffers(nic, ring);
2843 if (pkts_processed < budget_org) {
2844 napi_complete(napi);
2845 /*Re Enable MSI-Rx Vector*/
2846 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2847 addr += 7 - ring->ring_no;
2848 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2849 writeb(val8, addr);
2850 val8 = readb(addr);
2852 return pkts_processed;
2855 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2857 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2858 int pkts_processed = 0;
2859 int ring_pkts_processed, i;
2860 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2861 int budget_org = budget;
2862 struct config_param *config = &nic->config;
2863 struct mac_info *mac_control = &nic->mac_control;
2865 if (unlikely(!is_s2io_card_up(nic)))
2866 return 0;
2868 for (i = 0; i < config->rx_ring_num; i++) {
2869 struct ring_info *ring = &mac_control->rings[i];
2870 ring_pkts_processed = rx_intr_handler(ring, budget);
2871 s2io_chk_rx_buffers(nic, ring);
2872 pkts_processed += ring_pkts_processed;
2873 budget -= ring_pkts_processed;
2874 if (budget <= 0)
2875 break;
2877 if (pkts_processed < budget_org) {
2878 napi_complete(napi);
2879 /* Re enable the Rx interrupts for the ring */
2880 writeq(0, &bar0->rx_traffic_mask);
2881 readl(&bar0->rx_traffic_mask);
2883 return pkts_processed;
2886 #ifdef CONFIG_NET_POLL_CONTROLLER
2888 * s2io_netpoll - netpoll event handler entry point
2889 * @dev : pointer to the device structure.
2890 * Description:
2891 * This function will be called by upper layer to check for events on the
2892 * interface in situations where interrupts are disabled. It is used for
2893 * specific in-kernel networking tasks, such as remote consoles and kernel
2894 * debugging over the network (example netdump in RedHat).
2896 static void s2io_netpoll(struct net_device *dev)
2898 struct s2io_nic *nic = netdev_priv(dev);
2899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2900 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2901 int i;
2902 struct config_param *config = &nic->config;
2903 struct mac_info *mac_control = &nic->mac_control;
2905 if (pci_channel_offline(nic->pdev))
2906 return;
2908 disable_irq(dev->irq);
2910 writeq(val64, &bar0->rx_traffic_int);
2911 writeq(val64, &bar0->tx_traffic_int);
2913 /* we need to free up the transmitted skbufs or else netpoll will
2914 * run out of skbs and will fail and eventually netpoll application such
2915 * as netdump will fail.
2917 for (i = 0; i < config->tx_fifo_num; i++)
2918 tx_intr_handler(&mac_control->fifos[i]);
2920 /* check for received packet and indicate up to network */
2921 for (i = 0; i < config->rx_ring_num; i++) {
2922 struct ring_info *ring = &mac_control->rings[i];
2924 rx_intr_handler(ring, 0);
2927 for (i = 0; i < config->rx_ring_num; i++) {
2928 struct ring_info *ring = &mac_control->rings[i];
2930 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2931 DBG_PRINT(INFO_DBG,
2932 "%s: Out of memory in Rx Netpoll!!\n",
2933 dev->name);
2934 break;
2937 enable_irq(dev->irq);
2939 #endif
2942 * rx_intr_handler - Rx interrupt handler
2943 * @ring_info: per ring structure.
2944 * @budget: budget for napi processing.
2945 * Description:
2946 * If the interrupt is because of a received frame or if the
2947 * receive ring contains fresh as yet un-processed frames,this function is
2948 * called. It picks out the RxD at which place the last Rx processing had
2949 * stopped and sends the skb to the OSM's Rx handler and then increments
2950 * the offset.
2951 * Return Value:
2952 * No. of napi packets processed.
2954 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2956 int get_block, put_block;
2957 struct rx_curr_get_info get_info, put_info;
2958 struct RxD_t *rxdp;
2959 struct sk_buff *skb;
2960 int pkt_cnt = 0, napi_pkts = 0;
2961 int i;
2962 struct RxD1 *rxdp1;
2963 struct RxD3 *rxdp3;
2965 get_info = ring_data->rx_curr_get_info;
2966 get_block = get_info.block_index;
2967 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2968 put_block = put_info.block_index;
2969 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2971 while (RXD_IS_UP2DT(rxdp)) {
2973 * If your are next to put index then it's
2974 * FIFO full condition
2976 if ((get_block == put_block) &&
2977 (get_info.offset + 1) == put_info.offset) {
2978 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2979 ring_data->dev->name);
2980 break;
2982 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2983 if (skb == NULL) {
2984 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2985 ring_data->dev->name);
2986 return 0;
2988 if (ring_data->rxd_mode == RXD_MODE_1) {
2989 rxdp1 = (struct RxD1 *)rxdp;
2990 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2991 rxdp1->Buffer0_ptr,
2992 ring_data->mtu +
2993 HEADER_ETHERNET_II_802_3_SIZE +
2994 HEADER_802_2_SIZE +
2995 HEADER_SNAP_SIZE,
2996 PCI_DMA_FROMDEVICE);
2997 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2998 rxdp3 = (struct RxD3 *)rxdp;
2999 pci_dma_sync_single_for_cpu(ring_data->pdev,
3000 (dma_addr_t)rxdp3->Buffer0_ptr,
3001 BUF0_LEN,
3002 PCI_DMA_FROMDEVICE);
3003 pci_unmap_single(ring_data->pdev,
3004 (dma_addr_t)rxdp3->Buffer2_ptr,
3005 ring_data->mtu + 4,
3006 PCI_DMA_FROMDEVICE);
3008 prefetch(skb->data);
3009 rx_osm_handler(ring_data, rxdp);
3010 get_info.offset++;
3011 ring_data->rx_curr_get_info.offset = get_info.offset;
3012 rxdp = ring_data->rx_blocks[get_block].
3013 rxds[get_info.offset].virt_addr;
3014 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3015 get_info.offset = 0;
3016 ring_data->rx_curr_get_info.offset = get_info.offset;
3017 get_block++;
3018 if (get_block == ring_data->block_count)
3019 get_block = 0;
3020 ring_data->rx_curr_get_info.block_index = get_block;
3021 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3024 if (ring_data->nic->config.napi) {
3025 budget--;
3026 napi_pkts++;
3027 if (!budget)
3028 break;
3030 pkt_cnt++;
3031 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3032 break;
3034 if (ring_data->lro) {
3035 /* Clear all LRO sessions before exiting */
3036 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
3037 struct lro *lro = &ring_data->lro0_n[i];
3038 if (lro->in_use) {
3039 update_L3L4_header(ring_data->nic, lro);
3040 queue_rx_frame(lro->parent, lro->vlan_tag);
3041 clear_lro_session(lro);
3045 return napi_pkts;
3049 * tx_intr_handler - Transmit interrupt handler
3050 * @nic : device private variable
3051 * Description:
3052 * If an interrupt was raised to indicate DMA complete of the
3053 * Tx packet, this function is called. It identifies the last TxD
3054 * whose buffer was freed and frees all skbs whose data have already
3055 * DMA'ed into the NICs internal memory.
3056 * Return Value:
3057 * NONE
3060 static void tx_intr_handler(struct fifo_info *fifo_data)
3062 struct s2io_nic *nic = fifo_data->nic;
3063 struct tx_curr_get_info get_info, put_info;
3064 struct sk_buff *skb = NULL;
3065 struct TxD *txdlp;
3066 int pkt_cnt = 0;
3067 unsigned long flags = 0;
3068 u8 err_mask;
3069 struct stat_block *stats = nic->mac_control.stats_info;
3070 struct swStat *swstats = &stats->sw_stat;
3072 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3073 return;
3075 get_info = fifo_data->tx_curr_get_info;
3076 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3077 txdlp = (struct TxD *)
3078 fifo_data->list_info[get_info.offset].list_virt_addr;
3079 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3080 (get_info.offset != put_info.offset) &&
3081 (txdlp->Host_Control)) {
3082 /* Check for TxD errors */
3083 if (txdlp->Control_1 & TXD_T_CODE) {
3084 unsigned long long err;
3085 err = txdlp->Control_1 & TXD_T_CODE;
3086 if (err & 0x1) {
3087 swstats->parity_err_cnt++;
3090 /* update t_code statistics */
3091 err_mask = err >> 48;
3092 switch (err_mask) {
3093 case 2:
3094 swstats->tx_buf_abort_cnt++;
3095 break;
3097 case 3:
3098 swstats->tx_desc_abort_cnt++;
3099 break;
3101 case 7:
3102 swstats->tx_parity_err_cnt++;
3103 break;
3105 case 10:
3106 swstats->tx_link_loss_cnt++;
3107 break;
3109 case 15:
3110 swstats->tx_list_proc_err_cnt++;
3111 break;
3115 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3116 if (skb == NULL) {
3117 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3118 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3119 __func__);
3120 return;
3122 pkt_cnt++;
3124 /* Updating the statistics block */
3125 swstats->mem_freed += skb->truesize;
3126 dev_kfree_skb_irq(skb);
3128 get_info.offset++;
3129 if (get_info.offset == get_info.fifo_len + 1)
3130 get_info.offset = 0;
3131 txdlp = (struct TxD *)
3132 fifo_data->list_info[get_info.offset].list_virt_addr;
3133 fifo_data->tx_curr_get_info.offset = get_info.offset;
3136 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3138 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3142 * s2io_mdio_write - Function to write in to MDIO registers
3143 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3144 * @addr : address value
3145 * @value : data value
3146 * @dev : pointer to net_device structure
3147 * Description:
3148 * This function is used to write values to the MDIO registers
3149 * NONE
3151 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3152 struct net_device *dev)
3154 u64 val64;
3155 struct s2io_nic *sp = netdev_priv(dev);
3156 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3158 /* address transaction */
3159 val64 = MDIO_MMD_INDX_ADDR(addr) |
3160 MDIO_MMD_DEV_ADDR(mmd_type) |
3161 MDIO_MMS_PRT_ADDR(0x0);
3162 writeq(val64, &bar0->mdio_control);
3163 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3164 writeq(val64, &bar0->mdio_control);
3165 udelay(100);
3167 /* Data transaction */
3168 val64 = MDIO_MMD_INDX_ADDR(addr) |
3169 MDIO_MMD_DEV_ADDR(mmd_type) |
3170 MDIO_MMS_PRT_ADDR(0x0) |
3171 MDIO_MDIO_DATA(value) |
3172 MDIO_OP(MDIO_OP_WRITE_TRANS);
3173 writeq(val64, &bar0->mdio_control);
3174 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3175 writeq(val64, &bar0->mdio_control);
3176 udelay(100);
3178 val64 = MDIO_MMD_INDX_ADDR(addr) |
3179 MDIO_MMD_DEV_ADDR(mmd_type) |
3180 MDIO_MMS_PRT_ADDR(0x0) |
3181 MDIO_OP(MDIO_OP_READ_TRANS);
3182 writeq(val64, &bar0->mdio_control);
3183 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3184 writeq(val64, &bar0->mdio_control);
3185 udelay(100);
3189 * s2io_mdio_read - Function to write in to MDIO registers
3190 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3191 * @addr : address value
3192 * @dev : pointer to net_device structure
3193 * Description:
3194 * This function is used to read values to the MDIO registers
3195 * NONE
3197 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3199 u64 val64 = 0x0;
3200 u64 rval64 = 0x0;
3201 struct s2io_nic *sp = netdev_priv(dev);
3202 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3204 /* address transaction */
3205 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3206 | MDIO_MMD_DEV_ADDR(mmd_type)
3207 | MDIO_MMS_PRT_ADDR(0x0));
3208 writeq(val64, &bar0->mdio_control);
3209 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3210 writeq(val64, &bar0->mdio_control);
3211 udelay(100);
3213 /* Data transaction */
3214 val64 = MDIO_MMD_INDX_ADDR(addr) |
3215 MDIO_MMD_DEV_ADDR(mmd_type) |
3216 MDIO_MMS_PRT_ADDR(0x0) |
3217 MDIO_OP(MDIO_OP_READ_TRANS);
3218 writeq(val64, &bar0->mdio_control);
3219 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3220 writeq(val64, &bar0->mdio_control);
3221 udelay(100);
3223 /* Read the value from regs */
3224 rval64 = readq(&bar0->mdio_control);
3225 rval64 = rval64 & 0xFFFF0000;
3226 rval64 = rval64 >> 16;
3227 return rval64;
3231 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3232 * @counter : counter value to be updated
3233 * @flag : flag to indicate the status
3234 * @type : counter type
3235 * Description:
3236 * This function is to check the status of the xpak counters value
3237 * NONE
3240 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3241 u16 flag, u16 type)
3243 u64 mask = 0x3;
3244 u64 val64;
3245 int i;
3246 for (i = 0; i < index; i++)
3247 mask = mask << 0x2;
3249 if (flag > 0) {
3250 *counter = *counter + 1;
3251 val64 = *regs_stat & mask;
3252 val64 = val64 >> (index * 0x2);
3253 val64 = val64 + 1;
3254 if (val64 == 3) {
3255 switch (type) {
3256 case 1:
3257 DBG_PRINT(ERR_DBG,
3258 "Take Xframe NIC out of service.\n");
3259 DBG_PRINT(ERR_DBG,
3260 "Excessive temperatures may result in premature transceiver failure.\n");
3261 break;
3262 case 2:
3263 DBG_PRINT(ERR_DBG,
3264 "Take Xframe NIC out of service.\n");
3265 DBG_PRINT(ERR_DBG,
3266 "Excessive bias currents may indicate imminent laser diode failure.\n");
3267 break;
3268 case 3:
3269 DBG_PRINT(ERR_DBG,
3270 "Take Xframe NIC out of service.\n");
3271 DBG_PRINT(ERR_DBG,
3272 "Excessive laser output power may saturate far-end receiver.\n");
3273 break;
3274 default:
3275 DBG_PRINT(ERR_DBG,
3276 "Incorrect XPAK Alarm type\n");
3278 val64 = 0x0;
3280 val64 = val64 << (index * 0x2);
3281 *regs_stat = (*regs_stat & (~mask)) | (val64);
3283 } else {
3284 *regs_stat = *regs_stat & (~mask);
3289 * s2io_updt_xpak_counter - Function to update the xpak counters
3290 * @dev : pointer to net_device struct
3291 * Description:
3292 * This function is to upate the status of the xpak counters value
3293 * NONE
3295 static void s2io_updt_xpak_counter(struct net_device *dev)
3297 u16 flag = 0x0;
3298 u16 type = 0x0;
3299 u16 val16 = 0x0;
3300 u64 val64 = 0x0;
3301 u64 addr = 0x0;
3303 struct s2io_nic *sp = netdev_priv(dev);
3304 struct stat_block *stats = sp->mac_control.stats_info;
3305 struct xpakStat *xstats = &stats->xpak_stat;
3307 /* Check the communication with the MDIO slave */
3308 addr = MDIO_CTRL1;
3309 val64 = 0x0;
3310 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3311 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3312 DBG_PRINT(ERR_DBG,
3313 "ERR: MDIO slave access failed - Returned %llx\n",
3314 (unsigned long long)val64);
3315 return;
3318 /* Check for the expected value of control reg 1 */
3319 if (val64 != MDIO_CTRL1_SPEED10G) {
3320 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3321 "Returned: %llx- Expected: 0x%x\n",
3322 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3323 return;
3326 /* Loading the DOM register to MDIO register */
3327 addr = 0xA100;
3328 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3329 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3331 /* Reading the Alarm flags */
3332 addr = 0xA070;
3333 val64 = 0x0;
3334 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3336 flag = CHECKBIT(val64, 0x7);
3337 type = 1;
3338 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3339 &xstats->xpak_regs_stat,
3340 0x0, flag, type);
3342 if (CHECKBIT(val64, 0x6))
3343 xstats->alarm_transceiver_temp_low++;
3345 flag = CHECKBIT(val64, 0x3);
3346 type = 2;
3347 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3348 &xstats->xpak_regs_stat,
3349 0x2, flag, type);
3351 if (CHECKBIT(val64, 0x2))
3352 xstats->alarm_laser_bias_current_low++;
3354 flag = CHECKBIT(val64, 0x1);
3355 type = 3;
3356 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3357 &xstats->xpak_regs_stat,
3358 0x4, flag, type);
3360 if (CHECKBIT(val64, 0x0))
3361 xstats->alarm_laser_output_power_low++;
3363 /* Reading the Warning flags */
3364 addr = 0xA074;
3365 val64 = 0x0;
3366 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3368 if (CHECKBIT(val64, 0x7))
3369 xstats->warn_transceiver_temp_high++;
3371 if (CHECKBIT(val64, 0x6))
3372 xstats->warn_transceiver_temp_low++;
3374 if (CHECKBIT(val64, 0x3))
3375 xstats->warn_laser_bias_current_high++;
3377 if (CHECKBIT(val64, 0x2))
3378 xstats->warn_laser_bias_current_low++;
3380 if (CHECKBIT(val64, 0x1))
3381 xstats->warn_laser_output_power_high++;
3383 if (CHECKBIT(val64, 0x0))
3384 xstats->warn_laser_output_power_low++;
3388 * wait_for_cmd_complete - waits for a command to complete.
3389 * @sp : private member of the device structure, which is a pointer to the
3390 * s2io_nic structure.
3391 * Description: Function that waits for a command to Write into RMAC
3392 * ADDR DATA registers to be completed and returns either success or
3393 * error depending on whether the command was complete or not.
3394 * Return value:
3395 * SUCCESS on success and FAILURE on failure.
3398 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3399 int bit_state)
3401 int ret = FAILURE, cnt = 0, delay = 1;
3402 u64 val64;
3404 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3405 return FAILURE;
3407 do {
3408 val64 = readq(addr);
3409 if (bit_state == S2IO_BIT_RESET) {
3410 if (!(val64 & busy_bit)) {
3411 ret = SUCCESS;
3412 break;
3414 } else {
3415 if (val64 & busy_bit) {
3416 ret = SUCCESS;
3417 break;
3421 if (in_interrupt())
3422 mdelay(delay);
3423 else
3424 msleep(delay);
3426 if (++cnt >= 10)
3427 delay = 50;
3428 } while (cnt < 20);
3429 return ret;
3432 * check_pci_device_id - Checks if the device id is supported
3433 * @id : device id
3434 * Description: Function to check if the pci device id is supported by driver.
3435 * Return value: Actual device id if supported else PCI_ANY_ID
3437 static u16 check_pci_device_id(u16 id)
3439 switch (id) {
3440 case PCI_DEVICE_ID_HERC_WIN:
3441 case PCI_DEVICE_ID_HERC_UNI:
3442 return XFRAME_II_DEVICE;
3443 case PCI_DEVICE_ID_S2IO_UNI:
3444 case PCI_DEVICE_ID_S2IO_WIN:
3445 return XFRAME_I_DEVICE;
3446 default:
3447 return PCI_ANY_ID;
3452 * s2io_reset - Resets the card.
3453 * @sp : private member of the device structure.
3454 * Description: Function to Reset the card. This function then also
3455 * restores the previously saved PCI configuration space registers as
3456 * the card reset also resets the configuration space.
3457 * Return value:
3458 * void.
3461 static void s2io_reset(struct s2io_nic *sp)
3463 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3464 u64 val64;
3465 u16 subid, pci_cmd;
3466 int i;
3467 u16 val16;
3468 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3469 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3470 struct stat_block *stats;
3471 struct swStat *swstats;
3473 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3474 __func__, pci_name(sp->pdev));
3476 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3477 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3479 val64 = SW_RESET_ALL;
3480 writeq(val64, &bar0->sw_reset);
3481 if (strstr(sp->product_name, "CX4"))
3482 msleep(750);
3483 msleep(250);
3484 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3486 /* Restore the PCI state saved during initialization. */
3487 pci_restore_state(sp->pdev);
3488 pci_save_state(sp->pdev);
3489 pci_read_config_word(sp->pdev, 0x2, &val16);
3490 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3491 break;
3492 msleep(200);
3495 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3496 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3498 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3500 s2io_init_pci(sp);
3502 /* Set swapper to enable I/O register access */
3503 s2io_set_swapper(sp);
3505 /* restore mac_addr entries */
3506 do_s2io_restore_unicast_mc(sp);
3508 /* Restore the MSIX table entries from local variables */
3509 restore_xmsi_data(sp);
3511 /* Clear certain PCI/PCI-X fields after reset */
3512 if (sp->device_type == XFRAME_II_DEVICE) {
3513 /* Clear "detected parity error" bit */
3514 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3516 /* Clearing PCIX Ecc status register */
3517 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3519 /* Clearing PCI_STATUS error reflected here */
3520 writeq(s2BIT(62), &bar0->txpic_int_reg);
3523 /* Reset device statistics maintained by OS */
3524 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3526 stats = sp->mac_control.stats_info;
3527 swstats = &stats->sw_stat;
3529 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3530 up_cnt = swstats->link_up_cnt;
3531 down_cnt = swstats->link_down_cnt;
3532 up_time = swstats->link_up_time;
3533 down_time = swstats->link_down_time;
3534 reset_cnt = swstats->soft_reset_cnt;
3535 mem_alloc_cnt = swstats->mem_allocated;
3536 mem_free_cnt = swstats->mem_freed;
3537 watchdog_cnt = swstats->watchdog_timer_cnt;
3539 memset(stats, 0, sizeof(struct stat_block));
3541 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3542 swstats->link_up_cnt = up_cnt;
3543 swstats->link_down_cnt = down_cnt;
3544 swstats->link_up_time = up_time;
3545 swstats->link_down_time = down_time;
3546 swstats->soft_reset_cnt = reset_cnt;
3547 swstats->mem_allocated = mem_alloc_cnt;
3548 swstats->mem_freed = mem_free_cnt;
3549 swstats->watchdog_timer_cnt = watchdog_cnt;
3551 /* SXE-002: Configure link and activity LED to turn it off */
3552 subid = sp->pdev->subsystem_device;
3553 if (((subid & 0xFF) >= 0x07) &&
3554 (sp->device_type == XFRAME_I_DEVICE)) {
3555 val64 = readq(&bar0->gpio_control);
3556 val64 |= 0x0000800000000000ULL;
3557 writeq(val64, &bar0->gpio_control);
3558 val64 = 0x0411040400000000ULL;
3559 writeq(val64, (void __iomem *)bar0 + 0x2700);
3563 * Clear spurious ECC interrupts that would have occurred on
3564 * XFRAME II cards after reset.
3566 if (sp->device_type == XFRAME_II_DEVICE) {
3567 val64 = readq(&bar0->pcc_err_reg);
3568 writeq(val64, &bar0->pcc_err_reg);
3571 sp->device_enabled_once = false;
3575 * s2io_set_swapper - to set the swapper controle on the card
3576 * @sp : private member of the device structure,
3577 * pointer to the s2io_nic structure.
3578 * Description: Function to set the swapper control on the card
3579 * correctly depending on the 'endianness' of the system.
3580 * Return value:
3581 * SUCCESS on success and FAILURE on failure.
3584 static int s2io_set_swapper(struct s2io_nic *sp)
3586 struct net_device *dev = sp->dev;
3587 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3588 u64 val64, valt, valr;
3591 * Set proper endian settings and verify the same by reading
3592 * the PIF Feed-back register.
3595 val64 = readq(&bar0->pif_rd_swapper_fb);
3596 if (val64 != 0x0123456789ABCDEFULL) {
3597 int i = 0;
3598 static const u64 value[] = {
3599 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3600 0x8100008181000081ULL, /* FE=1, SE=0 */
3601 0x4200004242000042ULL, /* FE=0, SE=1 */
3602 0 /* FE=0, SE=0 */
3605 while (i < 4) {
3606 writeq(value[i], &bar0->swapper_ctrl);
3607 val64 = readq(&bar0->pif_rd_swapper_fb);
3608 if (val64 == 0x0123456789ABCDEFULL)
3609 break;
3610 i++;
3612 if (i == 4) {
3613 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3614 "feedback read %llx\n",
3615 dev->name, (unsigned long long)val64);
3616 return FAILURE;
3618 valr = value[i];
3619 } else {
3620 valr = readq(&bar0->swapper_ctrl);
3623 valt = 0x0123456789ABCDEFULL;
3624 writeq(valt, &bar0->xmsi_address);
3625 val64 = readq(&bar0->xmsi_address);
3627 if (val64 != valt) {
3628 int i = 0;
3629 static const u64 value[] = {
3630 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3631 0x0081810000818100ULL, /* FE=1, SE=0 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */
3633 0 /* FE=0, SE=0 */
3636 while (i < 4) {
3637 writeq((value[i] | valr), &bar0->swapper_ctrl);
3638 writeq(valt, &bar0->xmsi_address);
3639 val64 = readq(&bar0->xmsi_address);
3640 if (val64 == valt)
3641 break;
3642 i++;
3644 if (i == 4) {
3645 unsigned long long x = val64;
3646 DBG_PRINT(ERR_DBG,
3647 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3648 return FAILURE;
3651 val64 = readq(&bar0->swapper_ctrl);
3652 val64 &= 0xFFFF000000000000ULL;
3654 #ifdef __BIG_ENDIAN
3656 * The device by default set to a big endian format, so a
3657 * big endian driver need not set anything.
3659 val64 |= (SWAPPER_CTRL_TXP_FE |
3660 SWAPPER_CTRL_TXP_SE |
3661 SWAPPER_CTRL_TXD_R_FE |
3662 SWAPPER_CTRL_TXD_W_FE |
3663 SWAPPER_CTRL_TXF_R_FE |
3664 SWAPPER_CTRL_RXD_R_FE |
3665 SWAPPER_CTRL_RXD_W_FE |
3666 SWAPPER_CTRL_RXF_W_FE |
3667 SWAPPER_CTRL_XMSI_FE |
3668 SWAPPER_CTRL_STATS_FE |
3669 SWAPPER_CTRL_STATS_SE);
3670 if (sp->config.intr_type == INTA)
3671 val64 |= SWAPPER_CTRL_XMSI_SE;
3672 writeq(val64, &bar0->swapper_ctrl);
3673 #else
3675 * Initially we enable all bits to make it accessible by the
3676 * driver, then we selectively enable only those bits that
3677 * we want to set.
3679 val64 |= (SWAPPER_CTRL_TXP_FE |
3680 SWAPPER_CTRL_TXP_SE |
3681 SWAPPER_CTRL_TXD_R_FE |
3682 SWAPPER_CTRL_TXD_R_SE |
3683 SWAPPER_CTRL_TXD_W_FE |
3684 SWAPPER_CTRL_TXD_W_SE |
3685 SWAPPER_CTRL_TXF_R_FE |
3686 SWAPPER_CTRL_RXD_R_FE |
3687 SWAPPER_CTRL_RXD_R_SE |
3688 SWAPPER_CTRL_RXD_W_FE |
3689 SWAPPER_CTRL_RXD_W_SE |
3690 SWAPPER_CTRL_RXF_W_FE |
3691 SWAPPER_CTRL_XMSI_FE |
3692 SWAPPER_CTRL_STATS_FE |
3693 SWAPPER_CTRL_STATS_SE);
3694 if (sp->config.intr_type == INTA)
3695 val64 |= SWAPPER_CTRL_XMSI_SE;
3696 writeq(val64, &bar0->swapper_ctrl);
3697 #endif
3698 val64 = readq(&bar0->swapper_ctrl);
3701 * Verifying if endian settings are accurate by reading a
3702 * feedback register.
3704 val64 = readq(&bar0->pif_rd_swapper_fb);
3705 if (val64 != 0x0123456789ABCDEFULL) {
3706 /* Endian settings are incorrect, calls for another dekko. */
3707 DBG_PRINT(ERR_DBG,
3708 "%s: Endian settings are wrong, feedback read %llx\n",
3709 dev->name, (unsigned long long)val64);
3710 return FAILURE;
3713 return SUCCESS;
3716 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3718 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3719 u64 val64;
3720 int ret = 0, cnt = 0;
3722 do {
3723 val64 = readq(&bar0->xmsi_access);
3724 if (!(val64 & s2BIT(15)))
3725 break;
3726 mdelay(1);
3727 cnt++;
3728 } while (cnt < 5);
3729 if (cnt == 5) {
3730 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3731 ret = 1;
3734 return ret;
3737 static void restore_xmsi_data(struct s2io_nic *nic)
3739 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3740 u64 val64;
3741 int i, msix_index;
3743 if (nic->device_type == XFRAME_I_DEVICE)
3744 return;
3746 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3747 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3748 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3749 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3750 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3751 writeq(val64, &bar0->xmsi_access);
3752 if (wait_for_msix_trans(nic, msix_index)) {
3753 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3754 __func__, msix_index);
3755 continue;
3760 static void store_xmsi_data(struct s2io_nic *nic)
3762 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3763 u64 val64, addr, data;
3764 int i, msix_index;
3766 if (nic->device_type == XFRAME_I_DEVICE)
3767 return;
3769 /* Store and display */
3770 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3771 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3772 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3773 writeq(val64, &bar0->xmsi_access);
3774 if (wait_for_msix_trans(nic, msix_index)) {
3775 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3776 __func__, msix_index);
3777 continue;
3779 addr = readq(&bar0->xmsi_address);
3780 data = readq(&bar0->xmsi_data);
3781 if (addr && data) {
3782 nic->msix_info[i].addr = addr;
3783 nic->msix_info[i].data = data;
3788 static int s2io_enable_msi_x(struct s2io_nic *nic)
3790 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3791 u64 rx_mat;
3792 u16 msi_control; /* Temp variable */
3793 int ret, i, j, msix_indx = 1;
3794 int size;
3795 struct stat_block *stats = nic->mac_control.stats_info;
3796 struct swStat *swstats = &stats->sw_stat;
3798 size = nic->num_entries * sizeof(struct msix_entry);
3799 nic->entries = kzalloc(size, GFP_KERNEL);
3800 if (!nic->entries) {
3801 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3802 __func__);
3803 swstats->mem_alloc_fail_cnt++;
3804 return -ENOMEM;
3806 swstats->mem_allocated += size;
3808 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3809 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3810 if (!nic->s2io_entries) {
3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3812 __func__);
3813 swstats->mem_alloc_fail_cnt++;
3814 kfree(nic->entries);
3815 swstats->mem_freed
3816 += (nic->num_entries * sizeof(struct msix_entry));
3817 return -ENOMEM;
3819 swstats->mem_allocated += size;
3821 nic->entries[0].entry = 0;
3822 nic->s2io_entries[0].entry = 0;
3823 nic->s2io_entries[0].in_use = MSIX_FLG;
3824 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3825 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3827 for (i = 1; i < nic->num_entries; i++) {
3828 nic->entries[i].entry = ((i - 1) * 8) + 1;
3829 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3830 nic->s2io_entries[i].arg = NULL;
3831 nic->s2io_entries[i].in_use = 0;
3834 rx_mat = readq(&bar0->rx_mat);
3835 for (j = 0; j < nic->config.rx_ring_num; j++) {
3836 rx_mat |= RX_MAT_SET(j, msix_indx);
3837 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3838 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3839 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3840 msix_indx += 8;
3842 writeq(rx_mat, &bar0->rx_mat);
3843 readq(&bar0->rx_mat);
3845 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3846 /* We fail init if error or we get less vectors than min required */
3847 if (ret) {
3848 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3849 kfree(nic->entries);
3850 swstats->mem_freed += nic->num_entries *
3851 sizeof(struct msix_entry);
3852 kfree(nic->s2io_entries);
3853 swstats->mem_freed += nic->num_entries *
3854 sizeof(struct s2io_msix_entry);
3855 nic->entries = NULL;
3856 nic->s2io_entries = NULL;
3857 return -ENOMEM;
3861 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3862 * in the herc NIC. (Temp change, needs to be removed later)
3864 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3865 msi_control |= 0x1; /* Enable MSI */
3866 pci_write_config_word(nic->pdev, 0x42, msi_control);
3868 return 0;
3871 /* Handle software interrupt used during MSI(X) test */
3872 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3874 struct s2io_nic *sp = dev_id;
3876 sp->msi_detected = 1;
3877 wake_up(&sp->msi_wait);
3879 return IRQ_HANDLED;
3882 /* Test interrupt path by forcing a a software IRQ */
3883 static int s2io_test_msi(struct s2io_nic *sp)
3885 struct pci_dev *pdev = sp->pdev;
3886 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3887 int err;
3888 u64 val64, saved64;
3890 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3891 sp->name, sp);
3892 if (err) {
3893 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3894 sp->dev->name, pci_name(pdev), pdev->irq);
3895 return err;
3898 init_waitqueue_head(&sp->msi_wait);
3899 sp->msi_detected = 0;
3901 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3902 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3903 val64 |= SCHED_INT_CTRL_TIMER_EN;
3904 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3905 writeq(val64, &bar0->scheduled_int_ctrl);
3907 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3909 if (!sp->msi_detected) {
3910 /* MSI(X) test failed, go back to INTx mode */
3911 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3912 "using MSI(X) during test\n",
3913 sp->dev->name, pci_name(pdev));
3915 err = -EOPNOTSUPP;
3918 free_irq(sp->entries[1].vector, sp);
3920 writeq(saved64, &bar0->scheduled_int_ctrl);
3922 return err;
3925 static void remove_msix_isr(struct s2io_nic *sp)
3927 int i;
3928 u16 msi_control;
3930 for (i = 0; i < sp->num_entries; i++) {
3931 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3932 int vector = sp->entries[i].vector;
3933 void *arg = sp->s2io_entries[i].arg;
3934 free_irq(vector, arg);
3938 kfree(sp->entries);
3939 kfree(sp->s2io_entries);
3940 sp->entries = NULL;
3941 sp->s2io_entries = NULL;
3943 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3944 msi_control &= 0xFFFE; /* Disable MSI */
3945 pci_write_config_word(sp->pdev, 0x42, msi_control);
3947 pci_disable_msix(sp->pdev);
3950 static void remove_inta_isr(struct s2io_nic *sp)
3952 struct net_device *dev = sp->dev;
3954 free_irq(sp->pdev->irq, dev);
3957 /* ********************************************************* *
3958 * Functions defined below concern the OS part of the driver *
3959 * ********************************************************* */
3962 * s2io_open - open entry point of the driver
3963 * @dev : pointer to the device structure.
3964 * Description:
3965 * This function is the open entry point of the driver. It mainly calls a
3966 * function to allocate Rx buffers and inserts them into the buffer
3967 * descriptors and then enables the Rx part of the NIC.
3968 * Return value:
3969 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3970 * file on failure.
3973 static int s2io_open(struct net_device *dev)
3975 struct s2io_nic *sp = netdev_priv(dev);
3976 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3977 int err = 0;
3980 * Make sure you have link off by default every time
3981 * Nic is initialized
3983 netif_carrier_off(dev);
3984 sp->last_link_state = 0;
3986 /* Initialize H/W and enable interrupts */
3987 err = s2io_card_up(sp);
3988 if (err) {
3989 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3990 dev->name);
3991 goto hw_init_failed;
3994 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3995 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3996 s2io_card_down(sp);
3997 err = -ENODEV;
3998 goto hw_init_failed;
4000 s2io_start_all_tx_queue(sp);
4001 return 0;
4003 hw_init_failed:
4004 if (sp->config.intr_type == MSI_X) {
4005 if (sp->entries) {
4006 kfree(sp->entries);
4007 swstats->mem_freed += sp->num_entries *
4008 sizeof(struct msix_entry);
4010 if (sp->s2io_entries) {
4011 kfree(sp->s2io_entries);
4012 swstats->mem_freed += sp->num_entries *
4013 sizeof(struct s2io_msix_entry);
4016 return err;
4020 * s2io_close -close entry point of the driver
4021 * @dev : device pointer.
4022 * Description:
4023 * This is the stop entry point of the driver. It needs to undo exactly
4024 * whatever was done by the open entry point,thus it's usually referred to
4025 * as the close function.Among other things this function mainly stops the
4026 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4027 * Return value:
4028 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4029 * file on failure.
4032 static int s2io_close(struct net_device *dev)
4034 struct s2io_nic *sp = netdev_priv(dev);
4035 struct config_param *config = &sp->config;
4036 u64 tmp64;
4037 int offset;
4039 /* Return if the device is already closed *
4040 * Can happen when s2io_card_up failed in change_mtu *
4042 if (!is_s2io_card_up(sp))
4043 return 0;
4045 s2io_stop_all_tx_queue(sp);
4046 /* delete all populated mac entries */
4047 for (offset = 1; offset < config->max_mc_addr; offset++) {
4048 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4049 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4050 do_s2io_delete_unicast_mc(sp, tmp64);
4053 s2io_card_down(sp);
4055 return 0;
4059 * s2io_xmit - Tx entry point of te driver
4060 * @skb : the socket buffer containing the Tx data.
4061 * @dev : device pointer.
4062 * Description :
4063 * This function is the Tx entry point of the driver. S2IO NIC supports
4064 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4065 * NOTE: when device can't queue the pkt,just the trans_start variable will
4066 * not be upadted.
4067 * Return value:
4068 * 0 on success & 1 on failure.
4071 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4073 struct s2io_nic *sp = netdev_priv(dev);
4074 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4075 register u64 val64;
4076 struct TxD *txdp;
4077 struct TxFIFO_element __iomem *tx_fifo;
4078 unsigned long flags = 0;
4079 u16 vlan_tag = 0;
4080 struct fifo_info *fifo = NULL;
4081 int do_spin_lock = 1;
4082 int offload_type;
4083 int enable_per_list_interrupt = 0;
4084 struct config_param *config = &sp->config;
4085 struct mac_info *mac_control = &sp->mac_control;
4086 struct stat_block *stats = mac_control->stats_info;
4087 struct swStat *swstats = &stats->sw_stat;
4089 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4091 if (unlikely(skb->len <= 0)) {
4092 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4093 dev_kfree_skb_any(skb);
4094 return NETDEV_TX_OK;
4097 if (!is_s2io_card_up(sp)) {
4098 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4099 dev->name);
4100 dev_kfree_skb(skb);
4101 return NETDEV_TX_OK;
4104 queue = 0;
4105 if (vlan_tx_tag_present(skb))
4106 vlan_tag = vlan_tx_tag_get(skb);
4107 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4108 if (skb->protocol == htons(ETH_P_IP)) {
4109 struct iphdr *ip;
4110 struct tcphdr *th;
4111 ip = ip_hdr(skb);
4113 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4114 th = (struct tcphdr *)(((unsigned char *)ip) +
4115 ip->ihl*4);
4117 if (ip->protocol == IPPROTO_TCP) {
4118 queue_len = sp->total_tcp_fifos;
4119 queue = (ntohs(th->source) +
4120 ntohs(th->dest)) &
4121 sp->fifo_selector[queue_len - 1];
4122 if (queue >= queue_len)
4123 queue = queue_len - 1;
4124 } else if (ip->protocol == IPPROTO_UDP) {
4125 queue_len = sp->total_udp_fifos;
4126 queue = (ntohs(th->source) +
4127 ntohs(th->dest)) &
4128 sp->fifo_selector[queue_len - 1];
4129 if (queue >= queue_len)
4130 queue = queue_len - 1;
4131 queue += sp->udp_fifo_idx;
4132 if (skb->len > 1024)
4133 enable_per_list_interrupt = 1;
4134 do_spin_lock = 0;
4138 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4139 /* get fifo number based on skb->priority value */
4140 queue = config->fifo_mapping
4141 [skb->priority & (MAX_TX_FIFOS - 1)];
4142 fifo = &mac_control->fifos[queue];
4144 if (do_spin_lock)
4145 spin_lock_irqsave(&fifo->tx_lock, flags);
4146 else {
4147 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4148 return NETDEV_TX_LOCKED;
4151 if (sp->config.multiq) {
4152 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4153 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4154 return NETDEV_TX_BUSY;
4156 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4157 if (netif_queue_stopped(dev)) {
4158 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4159 return NETDEV_TX_BUSY;
4163 put_off = (u16)fifo->tx_curr_put_info.offset;
4164 get_off = (u16)fifo->tx_curr_get_info.offset;
4165 txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
4167 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4168 /* Avoid "put" pointer going beyond "get" pointer */
4169 if (txdp->Host_Control ||
4170 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4171 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4172 s2io_stop_tx_queue(sp, fifo->fifo_no);
4173 dev_kfree_skb(skb);
4174 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175 return NETDEV_TX_OK;
4178 offload_type = s2io_offload_type(skb);
4179 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4180 txdp->Control_1 |= TXD_TCP_LSO_EN;
4181 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4183 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4184 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4185 TXD_TX_CKO_TCP_EN |
4186 TXD_TX_CKO_UDP_EN);
4188 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4189 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4190 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4191 if (enable_per_list_interrupt)
4192 if (put_off & (queue_len >> 5))
4193 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4194 if (vlan_tag) {
4195 txdp->Control_2 |= TXD_VLAN_ENABLE;
4196 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4199 frg_len = skb_headlen(skb);
4200 if (offload_type == SKB_GSO_UDP) {
4201 int ufo_size;
4203 ufo_size = s2io_udp_mss(skb);
4204 ufo_size &= ~7;
4205 txdp->Control_1 |= TXD_UFO_EN;
4206 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4207 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4208 #ifdef __BIG_ENDIAN
4209 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4210 fifo->ufo_in_band_v[put_off] =
4211 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4212 #else
4213 fifo->ufo_in_band_v[put_off] =
4214 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4215 #endif
4216 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4217 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4218 fifo->ufo_in_band_v,
4219 sizeof(u64),
4220 PCI_DMA_TODEVICE);
4221 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4222 goto pci_map_failed;
4223 txdp++;
4226 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4227 frg_len, PCI_DMA_TODEVICE);
4228 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4229 goto pci_map_failed;
4231 txdp->Host_Control = (unsigned long)skb;
4232 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4233 if (offload_type == SKB_GSO_UDP)
4234 txdp->Control_1 |= TXD_UFO_EN;
4236 frg_cnt = skb_shinfo(skb)->nr_frags;
4237 /* For fragmented SKB. */
4238 for (i = 0; i < frg_cnt; i++) {
4239 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4240 /* A '0' length fragment will be ignored */
4241 if (!frag->size)
4242 continue;
4243 txdp++;
4244 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4245 frag->page_offset,
4246 frag->size,
4247 PCI_DMA_TODEVICE);
4248 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4249 if (offload_type == SKB_GSO_UDP)
4250 txdp->Control_1 |= TXD_UFO_EN;
4252 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4254 if (offload_type == SKB_GSO_UDP)
4255 frg_cnt++; /* as Txd0 was used for inband header */
4257 tx_fifo = mac_control->tx_FIFO_start[queue];
4258 val64 = fifo->list_info[put_off].list_phy_addr;
4259 writeq(val64, &tx_fifo->TxDL_Pointer);
4261 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4262 TX_FIFO_LAST_LIST);
4263 if (offload_type)
4264 val64 |= TX_FIFO_SPECIAL_FUNC;
4266 writeq(val64, &tx_fifo->List_Control);
4268 mmiowb();
4270 put_off++;
4271 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4272 put_off = 0;
4273 fifo->tx_curr_put_info.offset = put_off;
4275 /* Avoid "put" pointer going beyond "get" pointer */
4276 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4277 swstats->fifo_full_cnt++;
4278 DBG_PRINT(TX_DBG,
4279 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4280 put_off, get_off);
4281 s2io_stop_tx_queue(sp, fifo->fifo_no);
4283 swstats->mem_allocated += skb->truesize;
4284 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4286 if (sp->config.intr_type == MSI_X)
4287 tx_intr_handler(fifo);
4289 return NETDEV_TX_OK;
4291 pci_map_failed:
4292 swstats->pci_map_fail_cnt++;
4293 s2io_stop_tx_queue(sp, fifo->fifo_no);
4294 swstats->mem_freed += skb->truesize;
4295 dev_kfree_skb(skb);
4296 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4297 return NETDEV_TX_OK;
4300 static void
4301 s2io_alarm_handle(unsigned long data)
4303 struct s2io_nic *sp = (struct s2io_nic *)data;
4304 struct net_device *dev = sp->dev;
4306 s2io_handle_errors(dev);
4307 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4310 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4312 struct ring_info *ring = (struct ring_info *)dev_id;
4313 struct s2io_nic *sp = ring->nic;
4314 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4316 if (unlikely(!is_s2io_card_up(sp)))
4317 return IRQ_HANDLED;
4319 if (sp->config.napi) {
4320 u8 __iomem *addr = NULL;
4321 u8 val8 = 0;
4323 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4324 addr += (7 - ring->ring_no);
4325 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4326 writeb(val8, addr);
4327 val8 = readb(addr);
4328 napi_schedule(&ring->napi);
4329 } else {
4330 rx_intr_handler(ring, 0);
4331 s2io_chk_rx_buffers(sp, ring);
4334 return IRQ_HANDLED;
4337 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4339 int i;
4340 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4341 struct s2io_nic *sp = fifos->nic;
4342 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4343 struct config_param *config = &sp->config;
4344 u64 reason;
4346 if (unlikely(!is_s2io_card_up(sp)))
4347 return IRQ_NONE;
4349 reason = readq(&bar0->general_int_status);
4350 if (unlikely(reason == S2IO_MINUS_ONE))
4351 /* Nothing much can be done. Get out */
4352 return IRQ_HANDLED;
4354 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4355 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4357 if (reason & GEN_INTR_TXPIC)
4358 s2io_txpic_intr_handle(sp);
4360 if (reason & GEN_INTR_TXTRAFFIC)
4361 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4363 for (i = 0; i < config->tx_fifo_num; i++)
4364 tx_intr_handler(&fifos[i]);
4366 writeq(sp->general_int_mask, &bar0->general_int_mask);
4367 readl(&bar0->general_int_status);
4368 return IRQ_HANDLED;
4370 /* The interrupt was not raised by us */
4371 return IRQ_NONE;
4374 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4376 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4377 u64 val64;
4379 val64 = readq(&bar0->pic_int_status);
4380 if (val64 & PIC_INT_GPIO) {
4381 val64 = readq(&bar0->gpio_int_reg);
4382 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4383 (val64 & GPIO_INT_REG_LINK_UP)) {
4385 * This is unstable state so clear both up/down
4386 * interrupt and adapter to re-evaluate the link state.
4388 val64 |= GPIO_INT_REG_LINK_DOWN;
4389 val64 |= GPIO_INT_REG_LINK_UP;
4390 writeq(val64, &bar0->gpio_int_reg);
4391 val64 = readq(&bar0->gpio_int_mask);
4392 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4393 GPIO_INT_MASK_LINK_DOWN);
4394 writeq(val64, &bar0->gpio_int_mask);
4395 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4396 val64 = readq(&bar0->adapter_status);
4397 /* Enable Adapter */
4398 val64 = readq(&bar0->adapter_control);
4399 val64 |= ADAPTER_CNTL_EN;
4400 writeq(val64, &bar0->adapter_control);
4401 val64 |= ADAPTER_LED_ON;
4402 writeq(val64, &bar0->adapter_control);
4403 if (!sp->device_enabled_once)
4404 sp->device_enabled_once = 1;
4406 s2io_link(sp, LINK_UP);
4408 * unmask link down interrupt and mask link-up
4409 * intr
4411 val64 = readq(&bar0->gpio_int_mask);
4412 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4413 val64 |= GPIO_INT_MASK_LINK_UP;
4414 writeq(val64, &bar0->gpio_int_mask);
4416 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4417 val64 = readq(&bar0->adapter_status);
4418 s2io_link(sp, LINK_DOWN);
4419 /* Link is down so unmaks link up interrupt */
4420 val64 = readq(&bar0->gpio_int_mask);
4421 val64 &= ~GPIO_INT_MASK_LINK_UP;
4422 val64 |= GPIO_INT_MASK_LINK_DOWN;
4423 writeq(val64, &bar0->gpio_int_mask);
4425 /* turn off LED */
4426 val64 = readq(&bar0->adapter_control);
4427 val64 = val64 & (~ADAPTER_LED_ON);
4428 writeq(val64, &bar0->adapter_control);
4431 val64 = readq(&bar0->gpio_int_mask);
4435 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4436 * @value: alarm bits
4437 * @addr: address value
4438 * @cnt: counter variable
4439 * Description: Check for alarm and increment the counter
4440 * Return Value:
4441 * 1 - if alarm bit set
4442 * 0 - if alarm bit is not set
4444 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4445 unsigned long long *cnt)
4447 u64 val64;
4448 val64 = readq(addr);
4449 if (val64 & value) {
4450 writeq(val64, addr);
4451 (*cnt)++;
4452 return 1;
4454 return 0;
4459 * s2io_handle_errors - Xframe error indication handler
4460 * @nic: device private variable
4461 * Description: Handle alarms such as loss of link, single or
4462 * double ECC errors, critical and serious errors.
4463 * Return Value:
4464 * NONE
4466 static void s2io_handle_errors(void *dev_id)
4468 struct net_device *dev = (struct net_device *)dev_id;
4469 struct s2io_nic *sp = netdev_priv(dev);
4470 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4471 u64 temp64 = 0, val64 = 0;
4472 int i = 0;
4474 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4475 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4477 if (!is_s2io_card_up(sp))
4478 return;
4480 if (pci_channel_offline(sp->pdev))
4481 return;
4483 memset(&sw_stat->ring_full_cnt, 0,
4484 sizeof(sw_stat->ring_full_cnt));
4486 /* Handling the XPAK counters update */
4487 if (stats->xpak_timer_count < 72000) {
4488 /* waiting for an hour */
4489 stats->xpak_timer_count++;
4490 } else {
4491 s2io_updt_xpak_counter(dev);
4492 /* reset the count to zero */
4493 stats->xpak_timer_count = 0;
4496 /* Handling link status change error Intr */
4497 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4498 val64 = readq(&bar0->mac_rmac_err_reg);
4499 writeq(val64, &bar0->mac_rmac_err_reg);
4500 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4501 schedule_work(&sp->set_link_task);
4504 /* In case of a serious error, the device will be Reset. */
4505 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4506 &sw_stat->serious_err_cnt))
4507 goto reset;
4509 /* Check for data parity error */
4510 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4511 &sw_stat->parity_err_cnt))
4512 goto reset;
4514 /* Check for ring full counter */
4515 if (sp->device_type == XFRAME_II_DEVICE) {
4516 val64 = readq(&bar0->ring_bump_counter1);
4517 for (i = 0; i < 4; i++) {
4518 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4519 temp64 >>= 64 - ((i+1)*16);
4520 sw_stat->ring_full_cnt[i] += temp64;
4523 val64 = readq(&bar0->ring_bump_counter2);
4524 for (i = 0; i < 4; i++) {
4525 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4526 temp64 >>= 64 - ((i+1)*16);
4527 sw_stat->ring_full_cnt[i+4] += temp64;
4531 val64 = readq(&bar0->txdma_int_status);
4532 /*check for pfc_err*/
4533 if (val64 & TXDMA_PFC_INT) {
4534 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4535 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4536 PFC_PCIX_ERR,
4537 &bar0->pfc_err_reg,
4538 &sw_stat->pfc_err_cnt))
4539 goto reset;
4540 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4541 &bar0->pfc_err_reg,
4542 &sw_stat->pfc_err_cnt);
4545 /*check for tda_err*/
4546 if (val64 & TXDMA_TDA_INT) {
4547 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4548 TDA_SM0_ERR_ALARM |
4549 TDA_SM1_ERR_ALARM,
4550 &bar0->tda_err_reg,
4551 &sw_stat->tda_err_cnt))
4552 goto reset;
4553 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4554 &bar0->tda_err_reg,
4555 &sw_stat->tda_err_cnt);
4557 /*check for pcc_err*/
4558 if (val64 & TXDMA_PCC_INT) {
4559 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4560 PCC_N_SERR | PCC_6_COF_OV_ERR |
4561 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4562 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4563 PCC_TXB_ECC_DB_ERR,
4564 &bar0->pcc_err_reg,
4565 &sw_stat->pcc_err_cnt))
4566 goto reset;
4567 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4568 &bar0->pcc_err_reg,
4569 &sw_stat->pcc_err_cnt);
4572 /*check for tti_err*/
4573 if (val64 & TXDMA_TTI_INT) {
4574 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4575 &bar0->tti_err_reg,
4576 &sw_stat->tti_err_cnt))
4577 goto reset;
4578 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4579 &bar0->tti_err_reg,
4580 &sw_stat->tti_err_cnt);
4583 /*check for lso_err*/
4584 if (val64 & TXDMA_LSO_INT) {
4585 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4586 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4587 &bar0->lso_err_reg,
4588 &sw_stat->lso_err_cnt))
4589 goto reset;
4590 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4591 &bar0->lso_err_reg,
4592 &sw_stat->lso_err_cnt);
4595 /*check for tpa_err*/
4596 if (val64 & TXDMA_TPA_INT) {
4597 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4598 &bar0->tpa_err_reg,
4599 &sw_stat->tpa_err_cnt))
4600 goto reset;
4601 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4602 &bar0->tpa_err_reg,
4603 &sw_stat->tpa_err_cnt);
4606 /*check for sm_err*/
4607 if (val64 & TXDMA_SM_INT) {
4608 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4609 &bar0->sm_err_reg,
4610 &sw_stat->sm_err_cnt))
4611 goto reset;
4614 val64 = readq(&bar0->mac_int_status);
4615 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4616 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4617 &bar0->mac_tmac_err_reg,
4618 &sw_stat->mac_tmac_err_cnt))
4619 goto reset;
4620 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4621 TMAC_DESC_ECC_SG_ERR |
4622 TMAC_DESC_ECC_DB_ERR,
4623 &bar0->mac_tmac_err_reg,
4624 &sw_stat->mac_tmac_err_cnt);
4627 val64 = readq(&bar0->xgxs_int_status);
4628 if (val64 & XGXS_INT_STATUS_TXGXS) {
4629 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4630 &bar0->xgxs_txgxs_err_reg,
4631 &sw_stat->xgxs_txgxs_err_cnt))
4632 goto reset;
4633 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4634 &bar0->xgxs_txgxs_err_reg,
4635 &sw_stat->xgxs_txgxs_err_cnt);
4638 val64 = readq(&bar0->rxdma_int_status);
4639 if (val64 & RXDMA_INT_RC_INT_M) {
4640 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4641 RC_FTC_ECC_DB_ERR |
4642 RC_PRCn_SM_ERR_ALARM |
4643 RC_FTC_SM_ERR_ALARM,
4644 &bar0->rc_err_reg,
4645 &sw_stat->rc_err_cnt))
4646 goto reset;
4647 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4648 RC_FTC_ECC_SG_ERR |
4649 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4650 &sw_stat->rc_err_cnt);
4651 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4652 PRC_PCI_AB_WR_Rn |
4653 PRC_PCI_AB_F_WR_Rn,
4654 &bar0->prc_pcix_err_reg,
4655 &sw_stat->prc_pcix_err_cnt))
4656 goto reset;
4657 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4658 PRC_PCI_DP_WR_Rn |
4659 PRC_PCI_DP_F_WR_Rn,
4660 &bar0->prc_pcix_err_reg,
4661 &sw_stat->prc_pcix_err_cnt);
4664 if (val64 & RXDMA_INT_RPA_INT_M) {
4665 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4666 &bar0->rpa_err_reg,
4667 &sw_stat->rpa_err_cnt))
4668 goto reset;
4669 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4670 &bar0->rpa_err_reg,
4671 &sw_stat->rpa_err_cnt);
4674 if (val64 & RXDMA_INT_RDA_INT_M) {
4675 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4676 RDA_FRM_ECC_DB_N_AERR |
4677 RDA_SM1_ERR_ALARM |
4678 RDA_SM0_ERR_ALARM |
4679 RDA_RXD_ECC_DB_SERR,
4680 &bar0->rda_err_reg,
4681 &sw_stat->rda_err_cnt))
4682 goto reset;
4683 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4684 RDA_FRM_ECC_SG_ERR |
4685 RDA_MISC_ERR |
4686 RDA_PCIX_ERR,
4687 &bar0->rda_err_reg,
4688 &sw_stat->rda_err_cnt);
4691 if (val64 & RXDMA_INT_RTI_INT_M) {
4692 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4693 &bar0->rti_err_reg,
4694 &sw_stat->rti_err_cnt))
4695 goto reset;
4696 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4697 &bar0->rti_err_reg,
4698 &sw_stat->rti_err_cnt);
4701 val64 = readq(&bar0->mac_int_status);
4702 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4703 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4704 &bar0->mac_rmac_err_reg,
4705 &sw_stat->mac_rmac_err_cnt))
4706 goto reset;
4707 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4708 RMAC_SINGLE_ECC_ERR |
4709 RMAC_DOUBLE_ECC_ERR,
4710 &bar0->mac_rmac_err_reg,
4711 &sw_stat->mac_rmac_err_cnt);
4714 val64 = readq(&bar0->xgxs_int_status);
4715 if (val64 & XGXS_INT_STATUS_RXGXS) {
4716 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4717 &bar0->xgxs_rxgxs_err_reg,
4718 &sw_stat->xgxs_rxgxs_err_cnt))
4719 goto reset;
4722 val64 = readq(&bar0->mc_int_status);
4723 if (val64 & MC_INT_STATUS_MC_INT) {
4724 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4725 &bar0->mc_err_reg,
4726 &sw_stat->mc_err_cnt))
4727 goto reset;
4729 /* Handling Ecc errors */
4730 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4731 writeq(val64, &bar0->mc_err_reg);
4732 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4733 sw_stat->double_ecc_errs++;
4734 if (sp->device_type != XFRAME_II_DEVICE) {
4736 * Reset XframeI only if critical error
4738 if (val64 &
4739 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4740 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4741 goto reset;
4743 } else
4744 sw_stat->single_ecc_errs++;
4747 return;
4749 reset:
4750 s2io_stop_all_tx_queue(sp);
4751 schedule_work(&sp->rst_timer_task);
4752 sw_stat->soft_reset_cnt++;
4756 * s2io_isr - ISR handler of the device .
4757 * @irq: the irq of the device.
4758 * @dev_id: a void pointer to the dev structure of the NIC.
4759 * Description: This function is the ISR handler of the device. It
4760 * identifies the reason for the interrupt and calls the relevant
4761 * service routines. As a contongency measure, this ISR allocates the
4762 * recv buffers, if their numbers are below the panic value which is
4763 * presently set to 25% of the original number of rcv buffers allocated.
4764 * Return value:
4765 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4766 * IRQ_NONE: will be returned if interrupt is not from our device
4768 static irqreturn_t s2io_isr(int irq, void *dev_id)
4770 struct net_device *dev = (struct net_device *)dev_id;
4771 struct s2io_nic *sp = netdev_priv(dev);
4772 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4773 int i;
4774 u64 reason = 0;
4775 struct mac_info *mac_control;
4776 struct config_param *config;
4778 /* Pretend we handled any irq's from a disconnected card */
4779 if (pci_channel_offline(sp->pdev))
4780 return IRQ_NONE;
4782 if (!is_s2io_card_up(sp))
4783 return IRQ_NONE;
4785 config = &sp->config;
4786 mac_control = &sp->mac_control;
4789 * Identify the cause for interrupt and call the appropriate
4790 * interrupt handler. Causes for the interrupt could be;
4791 * 1. Rx of packet.
4792 * 2. Tx complete.
4793 * 3. Link down.
4795 reason = readq(&bar0->general_int_status);
4797 if (unlikely(reason == S2IO_MINUS_ONE))
4798 return IRQ_HANDLED; /* Nothing much can be done. Get out */
4800 if (reason &
4801 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4802 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4804 if (config->napi) {
4805 if (reason & GEN_INTR_RXTRAFFIC) {
4806 napi_schedule(&sp->napi);
4807 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4808 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4809 readl(&bar0->rx_traffic_int);
4811 } else {
4813 * rx_traffic_int reg is an R1 register, writing all 1's
4814 * will ensure that the actual interrupt causing bit
4815 * get's cleared and hence a read can be avoided.
4817 if (reason & GEN_INTR_RXTRAFFIC)
4818 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4820 for (i = 0; i < config->rx_ring_num; i++) {
4821 struct ring_info *ring = &mac_control->rings[i];
4823 rx_intr_handler(ring, 0);
4828 * tx_traffic_int reg is an R1 register, writing all 1's
4829 * will ensure that the actual interrupt causing bit get's
4830 * cleared and hence a read can be avoided.
4832 if (reason & GEN_INTR_TXTRAFFIC)
4833 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4835 for (i = 0; i < config->tx_fifo_num; i++)
4836 tx_intr_handler(&mac_control->fifos[i]);
4838 if (reason & GEN_INTR_TXPIC)
4839 s2io_txpic_intr_handle(sp);
4842 * Reallocate the buffers from the interrupt handler itself.
4844 if (!config->napi) {
4845 for (i = 0; i < config->rx_ring_num; i++) {
4846 struct ring_info *ring = &mac_control->rings[i];
4848 s2io_chk_rx_buffers(sp, ring);
4851 writeq(sp->general_int_mask, &bar0->general_int_mask);
4852 readl(&bar0->general_int_status);
4854 return IRQ_HANDLED;
4856 } else if (!reason) {
4857 /* The interrupt was not raised by us */
4858 return IRQ_NONE;
4861 return IRQ_HANDLED;
4865 * s2io_updt_stats -
4867 static void s2io_updt_stats(struct s2io_nic *sp)
4869 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4870 u64 val64;
4871 int cnt = 0;
4873 if (is_s2io_card_up(sp)) {
4874 /* Apprx 30us on a 133 MHz bus */
4875 val64 = SET_UPDT_CLICKS(10) |
4876 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4877 writeq(val64, &bar0->stat_cfg);
4878 do {
4879 udelay(100);
4880 val64 = readq(&bar0->stat_cfg);
4881 if (!(val64 & s2BIT(0)))
4882 break;
4883 cnt++;
4884 if (cnt == 5)
4885 break; /* Updt failed */
4886 } while (1);
4891 * s2io_get_stats - Updates the device statistics structure.
4892 * @dev : pointer to the device structure.
4893 * Description:
4894 * This function updates the device statistics structure in the s2io_nic
4895 * structure and returns a pointer to the same.
4896 * Return value:
4897 * pointer to the updated net_device_stats structure.
4899 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4901 struct s2io_nic *sp = netdev_priv(dev);
4902 struct mac_info *mac_control = &sp->mac_control;
4903 struct stat_block *stats = mac_control->stats_info;
4904 u64 delta;
4906 /* Configure Stats for immediate updt */
4907 s2io_updt_stats(sp);
4909 /* A device reset will cause the on-adapter statistics to be zero'ed.
4910 * This can be done while running by changing the MTU. To prevent the
4911 * system from having the stats zero'ed, the driver keeps a copy of the
4912 * last update to the system (which is also zero'ed on reset). This
4913 * enables the driver to accurately know the delta between the last
4914 * update and the current update.
4916 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4917 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4918 sp->stats.rx_packets += delta;
4919 dev->stats.rx_packets += delta;
4921 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4922 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4923 sp->stats.tx_packets += delta;
4924 dev->stats.tx_packets += delta;
4926 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4927 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4928 sp->stats.rx_bytes += delta;
4929 dev->stats.rx_bytes += delta;
4931 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4932 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4933 sp->stats.tx_bytes += delta;
4934 dev->stats.tx_bytes += delta;
4936 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4937 sp->stats.rx_errors += delta;
4938 dev->stats.rx_errors += delta;
4940 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4941 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4942 sp->stats.tx_errors += delta;
4943 dev->stats.tx_errors += delta;
4945 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4946 sp->stats.rx_dropped += delta;
4947 dev->stats.rx_dropped += delta;
4949 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4950 sp->stats.tx_dropped += delta;
4951 dev->stats.tx_dropped += delta;
4953 /* The adapter MAC interprets pause frames as multicast packets, but
4954 * does not pass them up. This erroneously increases the multicast
4955 * packet count and needs to be deducted when the multicast frame count
4956 * is queried.
4958 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4959 le32_to_cpu(stats->rmac_vld_mcst_frms);
4960 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4961 delta -= sp->stats.multicast;
4962 sp->stats.multicast += delta;
4963 dev->stats.multicast += delta;
4965 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4966 le32_to_cpu(stats->rmac_usized_frms)) +
4967 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4968 sp->stats.rx_length_errors += delta;
4969 dev->stats.rx_length_errors += delta;
4971 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4972 sp->stats.rx_crc_errors += delta;
4973 dev->stats.rx_crc_errors += delta;
4975 return &dev->stats;
4979 * s2io_set_multicast - entry point for multicast address enable/disable.
4980 * @dev : pointer to the device structure
4981 * Description:
4982 * This function is a driver entry point which gets called by the kernel
4983 * whenever multicast addresses must be enabled/disabled. This also gets
4984 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4985 * determine, if multicast address must be enabled or if promiscuous mode
4986 * is to be disabled etc.
4987 * Return value:
4988 * void.
4991 static void s2io_set_multicast(struct net_device *dev)
4993 int i, j, prev_cnt;
4994 struct netdev_hw_addr *ha;
4995 struct s2io_nic *sp = netdev_priv(dev);
4996 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4997 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4998 0xfeffffffffffULL;
4999 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
5000 void __iomem *add;
5001 struct config_param *config = &sp->config;
5003 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5004 /* Enable all Multicast addresses */
5005 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5006 &bar0->rmac_addr_data0_mem);
5007 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5008 &bar0->rmac_addr_data1_mem);
5009 val64 = RMAC_ADDR_CMD_MEM_WE |
5010 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5011 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
5012 writeq(val64, &bar0->rmac_addr_cmd_mem);
5013 /* Wait till command completes */
5014 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5015 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5016 S2IO_BIT_RESET);
5018 sp->m_cast_flg = 1;
5019 sp->all_multi_pos = config->max_mc_addr - 1;
5020 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5021 /* Disable all Multicast addresses */
5022 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5023 &bar0->rmac_addr_data0_mem);
5024 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5025 &bar0->rmac_addr_data1_mem);
5026 val64 = RMAC_ADDR_CMD_MEM_WE |
5027 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5028 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
5029 writeq(val64, &bar0->rmac_addr_cmd_mem);
5030 /* Wait till command completes */
5031 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5032 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033 S2IO_BIT_RESET);
5035 sp->m_cast_flg = 0;
5036 sp->all_multi_pos = 0;
5039 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5040 /* Put the NIC into promiscuous mode */
5041 add = &bar0->mac_cfg;
5042 val64 = readq(&bar0->mac_cfg);
5043 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5045 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5046 writel((u32)val64, add);
5047 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5048 writel((u32) (val64 >> 32), (add + 4));
5050 if (vlan_tag_strip != 1) {
5051 val64 = readq(&bar0->rx_pa_cfg);
5052 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5053 writeq(val64, &bar0->rx_pa_cfg);
5054 sp->vlan_strip_flag = 0;
5057 val64 = readq(&bar0->mac_cfg);
5058 sp->promisc_flg = 1;
5059 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5060 dev->name);
5061 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5062 /* Remove the NIC from promiscuous mode */
5063 add = &bar0->mac_cfg;
5064 val64 = readq(&bar0->mac_cfg);
5065 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5067 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5068 writel((u32)val64, add);
5069 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5070 writel((u32) (val64 >> 32), (add + 4));
5072 if (vlan_tag_strip != 0) {
5073 val64 = readq(&bar0->rx_pa_cfg);
5074 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5075 writeq(val64, &bar0->rx_pa_cfg);
5076 sp->vlan_strip_flag = 1;
5079 val64 = readq(&bar0->mac_cfg);
5080 sp->promisc_flg = 0;
5081 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5084 /* Update individual M_CAST address list */
5085 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5086 if (netdev_mc_count(dev) >
5087 (config->max_mc_addr - config->max_mac_addr)) {
5088 DBG_PRINT(ERR_DBG,
5089 "%s: No more Rx filters can be added - "
5090 "please enable ALL_MULTI instead\n",
5091 dev->name);
5092 return;
5095 prev_cnt = sp->mc_addr_count;
5096 sp->mc_addr_count = netdev_mc_count(dev);
5098 /* Clear out the previous list of Mc in the H/W. */
5099 for (i = 0; i < prev_cnt; i++) {
5100 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5101 &bar0->rmac_addr_data0_mem);
5102 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5103 &bar0->rmac_addr_data1_mem);
5104 val64 = RMAC_ADDR_CMD_MEM_WE |
5105 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5106 RMAC_ADDR_CMD_MEM_OFFSET
5107 (config->mc_start_offset + i);
5108 writeq(val64, &bar0->rmac_addr_cmd_mem);
5110 /* Wait for command completes */
5111 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5112 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5113 S2IO_BIT_RESET)) {
5114 DBG_PRINT(ERR_DBG,
5115 "%s: Adding Multicasts failed\n",
5116 dev->name);
5117 return;
5121 /* Create the new Rx filter list and update the same in H/W. */
5122 i = 0;
5123 netdev_for_each_mc_addr(ha, dev) {
5124 mac_addr = 0;
5125 for (j = 0; j < ETH_ALEN; j++) {
5126 mac_addr |= ha->addr[j];
5127 mac_addr <<= 8;
5129 mac_addr >>= 8;
5130 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5131 &bar0->rmac_addr_data0_mem);
5132 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5133 &bar0->rmac_addr_data1_mem);
5134 val64 = RMAC_ADDR_CMD_MEM_WE |
5135 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5136 RMAC_ADDR_CMD_MEM_OFFSET
5137 (i + config->mc_start_offset);
5138 writeq(val64, &bar0->rmac_addr_cmd_mem);
5140 /* Wait for command completes */
5141 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5142 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5143 S2IO_BIT_RESET)) {
5144 DBG_PRINT(ERR_DBG,
5145 "%s: Adding Multicasts failed\n",
5146 dev->name);
5147 return;
5149 i++;
5154 /* read from CAM unicast & multicast addresses and store it in
5155 * def_mac_addr structure
5157 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5159 int offset;
5160 u64 mac_addr = 0x0;
5161 struct config_param *config = &sp->config;
5163 /* store unicast & multicast mac addresses */
5164 for (offset = 0; offset < config->max_mc_addr; offset++) {
5165 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5166 /* if read fails disable the entry */
5167 if (mac_addr == FAILURE)
5168 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5169 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5173 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5174 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5176 int offset;
5177 struct config_param *config = &sp->config;
5178 /* restore unicast mac address */
5179 for (offset = 0; offset < config->max_mac_addr; offset++)
5180 do_s2io_prog_unicast(sp->dev,
5181 sp->def_mac_addr[offset].mac_addr);
5183 /* restore multicast mac address */
5184 for (offset = config->mc_start_offset;
5185 offset < config->max_mc_addr; offset++)
5186 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5189 /* add a multicast MAC address to CAM */
5190 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5192 int i;
5193 u64 mac_addr = 0;
5194 struct config_param *config = &sp->config;
5196 for (i = 0; i < ETH_ALEN; i++) {
5197 mac_addr <<= 8;
5198 mac_addr |= addr[i];
5200 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5201 return SUCCESS;
5203 /* check if the multicast mac already preset in CAM */
5204 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5205 u64 tmp64;
5206 tmp64 = do_s2io_read_unicast_mc(sp, i);
5207 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5208 break;
5210 if (tmp64 == mac_addr)
5211 return SUCCESS;
5213 if (i == config->max_mc_addr) {
5214 DBG_PRINT(ERR_DBG,
5215 "CAM full no space left for multicast MAC\n");
5216 return FAILURE;
5218 /* Update the internal structure with this new mac address */
5219 do_s2io_copy_mac_addr(sp, i, mac_addr);
5221 return do_s2io_add_mac(sp, mac_addr, i);
5224 /* add MAC address to CAM */
5225 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5227 u64 val64;
5228 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5230 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5231 &bar0->rmac_addr_data0_mem);
5233 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5234 RMAC_ADDR_CMD_MEM_OFFSET(off);
5235 writeq(val64, &bar0->rmac_addr_cmd_mem);
5237 /* Wait till command completes */
5238 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5239 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5240 S2IO_BIT_RESET)) {
5241 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5242 return FAILURE;
5244 return SUCCESS;
5246 /* deletes a specified unicast/multicast mac entry from CAM */
5247 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5249 int offset;
5250 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5251 struct config_param *config = &sp->config;
5253 for (offset = 1;
5254 offset < config->max_mc_addr; offset++) {
5255 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5256 if (tmp64 == addr) {
5257 /* disable the entry by writing 0xffffffffffffULL */
5258 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5259 return FAILURE;
5260 /* store the new mac list from CAM */
5261 do_s2io_store_unicast_mc(sp);
5262 return SUCCESS;
5265 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5266 (unsigned long long)addr);
5267 return FAILURE;
5270 /* read mac entries from CAM */
5271 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5273 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5274 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5276 /* read mac addr */
5277 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5278 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5279 writeq(val64, &bar0->rmac_addr_cmd_mem);
5281 /* Wait till command completes */
5282 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5283 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5284 S2IO_BIT_RESET)) {
5285 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5286 return FAILURE;
5288 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5290 return tmp64 >> 16;
5294 * s2io_set_mac_addr driver entry point
5297 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5299 struct sockaddr *addr = p;
5301 if (!is_valid_ether_addr(addr->sa_data))
5302 return -EINVAL;
5304 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5306 /* store the MAC address in CAM */
5307 return do_s2io_prog_unicast(dev, dev->dev_addr);
5310 * do_s2io_prog_unicast - Programs the Xframe mac address
5311 * @dev : pointer to the device structure.
5312 * @addr: a uchar pointer to the new mac address which is to be set.
5313 * Description : This procedure will program the Xframe to receive
5314 * frames with new Mac Address
5315 * Return value: SUCCESS on success and an appropriate (-)ve integer
5316 * as defined in errno.h file on failure.
5319 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5321 struct s2io_nic *sp = netdev_priv(dev);
5322 register u64 mac_addr = 0, perm_addr = 0;
5323 int i;
5324 u64 tmp64;
5325 struct config_param *config = &sp->config;
5328 * Set the new MAC address as the new unicast filter and reflect this
5329 * change on the device address registered with the OS. It will be
5330 * at offset 0.
5332 for (i = 0; i < ETH_ALEN; i++) {
5333 mac_addr <<= 8;
5334 mac_addr |= addr[i];
5335 perm_addr <<= 8;
5336 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5339 /* check if the dev_addr is different than perm_addr */
5340 if (mac_addr == perm_addr)
5341 return SUCCESS;
5343 /* check if the mac already preset in CAM */
5344 for (i = 1; i < config->max_mac_addr; i++) {
5345 tmp64 = do_s2io_read_unicast_mc(sp, i);
5346 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5347 break;
5349 if (tmp64 == mac_addr) {
5350 DBG_PRINT(INFO_DBG,
5351 "MAC addr:0x%llx already present in CAM\n",
5352 (unsigned long long)mac_addr);
5353 return SUCCESS;
5356 if (i == config->max_mac_addr) {
5357 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5358 return FAILURE;
5360 /* Update the internal structure with this new mac address */
5361 do_s2io_copy_mac_addr(sp, i, mac_addr);
5363 return do_s2io_add_mac(sp, mac_addr, i);
5367 * s2io_ethtool_sset - Sets different link parameters.
5368 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5369 * @info: pointer to the structure with parameters given by ethtool to set
5370 * link information.
5371 * Description:
5372 * The function sets different link parameters provided by the user onto
5373 * the NIC.
5374 * Return value:
5375 * 0 on success.
5378 static int s2io_ethtool_sset(struct net_device *dev,
5379 struct ethtool_cmd *info)
5381 struct s2io_nic *sp = netdev_priv(dev);
5382 if ((info->autoneg == AUTONEG_ENABLE) ||
5383 (info->speed != SPEED_10000) ||
5384 (info->duplex != DUPLEX_FULL))
5385 return -EINVAL;
5386 else {
5387 s2io_close(sp->dev);
5388 s2io_open(sp->dev);
5391 return 0;
5395 * s2io_ethtol_gset - Return link specific information.
5396 * @sp : private member of the device structure, pointer to the
5397 * s2io_nic structure.
5398 * @info : pointer to the structure with parameters given by ethtool
5399 * to return link information.
5400 * Description:
5401 * Returns link specific information like speed, duplex etc.. to ethtool.
5402 * Return value :
5403 * return 0 on success.
5406 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5408 struct s2io_nic *sp = netdev_priv(dev);
5409 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5410 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5411 info->port = PORT_FIBRE;
5413 /* info->transceiver */
5414 info->transceiver = XCVR_EXTERNAL;
5416 if (netif_carrier_ok(sp->dev)) {
5417 info->speed = 10000;
5418 info->duplex = DUPLEX_FULL;
5419 } else {
5420 info->speed = -1;
5421 info->duplex = -1;
5424 info->autoneg = AUTONEG_DISABLE;
5425 return 0;
5429 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5430 * @sp : private member of the device structure, which is a pointer to the
5431 * s2io_nic structure.
5432 * @info : pointer to the structure with parameters given by ethtool to
5433 * return driver information.
5434 * Description:
5435 * Returns driver specefic information like name, version etc.. to ethtool.
5436 * Return value:
5437 * void
5440 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5441 struct ethtool_drvinfo *info)
5443 struct s2io_nic *sp = netdev_priv(dev);
5445 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5446 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5447 strncpy(info->fw_version, "", sizeof(info->fw_version));
5448 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5449 info->regdump_len = XENA_REG_SPACE;
5450 info->eedump_len = XENA_EEPROM_SPACE;
5454 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5455 * @sp: private member of the device structure, which is a pointer to the
5456 * s2io_nic structure.
5457 * @regs : pointer to the structure with parameters given by ethtool for
5458 * dumping the registers.
5459 * @reg_space: The input argumnet into which all the registers are dumped.
5460 * Description:
5461 * Dumps the entire register space of xFrame NIC into the user given
5462 * buffer area.
5463 * Return value :
5464 * void .
5467 static void s2io_ethtool_gregs(struct net_device *dev,
5468 struct ethtool_regs *regs, void *space)
5470 int i;
5471 u64 reg;
5472 u8 *reg_space = (u8 *)space;
5473 struct s2io_nic *sp = netdev_priv(dev);
5475 regs->len = XENA_REG_SPACE;
5476 regs->version = sp->pdev->subsystem_device;
5478 for (i = 0; i < regs->len; i += 8) {
5479 reg = readq(sp->bar0 + i);
5480 memcpy((reg_space + i), &reg, 8);
5485 * s2io_set_led - control NIC led
5487 static void s2io_set_led(struct s2io_nic *sp, bool on)
5489 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5490 u16 subid = sp->pdev->subsystem_device;
5491 u64 val64;
5493 if ((sp->device_type == XFRAME_II_DEVICE) ||
5494 ((subid & 0xFF) >= 0x07)) {
5495 val64 = readq(&bar0->gpio_control);
5496 if (on)
5497 val64 |= GPIO_CTRL_GPIO_0;
5498 else
5499 val64 &= ~GPIO_CTRL_GPIO_0;
5501 writeq(val64, &bar0->gpio_control);
5502 } else {
5503 val64 = readq(&bar0->adapter_control);
5504 if (on)
5505 val64 |= ADAPTER_LED_ON;
5506 else
5507 val64 &= ~ADAPTER_LED_ON;
5509 writeq(val64, &bar0->adapter_control);
5515 * s2io_ethtool_set_led - To physically identify the nic on the system.
5516 * @dev : network device
5517 * @state: led setting
5519 * Description: Used to physically identify the NIC on the system.
5520 * The Link LED will blink for a time specified by the user for
5521 * identification.
5522 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5523 * identification is possible only if it's link is up.
5526 static int s2io_ethtool_set_led(struct net_device *dev,
5527 enum ethtool_phys_id_state state)
5529 struct s2io_nic *sp = netdev_priv(dev);
5530 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5531 u16 subid = sp->pdev->subsystem_device;
5533 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5534 u64 val64 = readq(&bar0->adapter_control);
5535 if (!(val64 & ADAPTER_CNTL_EN)) {
5536 pr_err("Adapter Link down, cannot blink LED\n");
5537 return -EAGAIN;
5541 switch (state) {
5542 case ETHTOOL_ID_ACTIVE:
5543 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5544 return 1; /* cycle on/off once per second */
5546 case ETHTOOL_ID_ON:
5547 s2io_set_led(sp, true);
5548 break;
5550 case ETHTOOL_ID_OFF:
5551 s2io_set_led(sp, false);
5552 break;
5554 case ETHTOOL_ID_INACTIVE:
5555 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5556 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5559 return 0;
5562 static void s2io_ethtool_gringparam(struct net_device *dev,
5563 struct ethtool_ringparam *ering)
5565 struct s2io_nic *sp = netdev_priv(dev);
5566 int i, tx_desc_count = 0, rx_desc_count = 0;
5568 if (sp->rxd_mode == RXD_MODE_1) {
5569 ering->rx_max_pending = MAX_RX_DESC_1;
5570 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5571 } else {
5572 ering->rx_max_pending = MAX_RX_DESC_2;
5573 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5576 ering->rx_mini_max_pending = 0;
5577 ering->tx_max_pending = MAX_TX_DESC;
5579 for (i = 0; i < sp->config.rx_ring_num; i++)
5580 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5581 ering->rx_pending = rx_desc_count;
5582 ering->rx_jumbo_pending = rx_desc_count;
5583 ering->rx_mini_pending = 0;
5585 for (i = 0; i < sp->config.tx_fifo_num; i++)
5586 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5587 ering->tx_pending = tx_desc_count;
5588 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5592 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5593 * @sp : private member of the device structure, which is a pointer to the
5594 * s2io_nic structure.
5595 * @ep : pointer to the structure with pause parameters given by ethtool.
5596 * Description:
5597 * Returns the Pause frame generation and reception capability of the NIC.
5598 * Return value:
5599 * void
5601 static void s2io_ethtool_getpause_data(struct net_device *dev,
5602 struct ethtool_pauseparam *ep)
5604 u64 val64;
5605 struct s2io_nic *sp = netdev_priv(dev);
5606 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5608 val64 = readq(&bar0->rmac_pause_cfg);
5609 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5610 ep->tx_pause = true;
5611 if (val64 & RMAC_PAUSE_RX_ENABLE)
5612 ep->rx_pause = true;
5613 ep->autoneg = false;
5617 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5618 * @sp : private member of the device structure, which is a pointer to the
5619 * s2io_nic structure.
5620 * @ep : pointer to the structure with pause parameters given by ethtool.
5621 * Description:
5622 * It can be used to set or reset Pause frame generation or reception
5623 * support of the NIC.
5624 * Return value:
5625 * int, returns 0 on Success
5628 static int s2io_ethtool_setpause_data(struct net_device *dev,
5629 struct ethtool_pauseparam *ep)
5631 u64 val64;
5632 struct s2io_nic *sp = netdev_priv(dev);
5633 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5635 val64 = readq(&bar0->rmac_pause_cfg);
5636 if (ep->tx_pause)
5637 val64 |= RMAC_PAUSE_GEN_ENABLE;
5638 else
5639 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5640 if (ep->rx_pause)
5641 val64 |= RMAC_PAUSE_RX_ENABLE;
5642 else
5643 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5644 writeq(val64, &bar0->rmac_pause_cfg);
5645 return 0;
5649 * read_eeprom - reads 4 bytes of data from user given offset.
5650 * @sp : private member of the device structure, which is a pointer to the
5651 * s2io_nic structure.
5652 * @off : offset at which the data must be written
5653 * @data : Its an output parameter where the data read at the given
5654 * offset is stored.
5655 * Description:
5656 * Will read 4 bytes of data from the user given offset and return the
5657 * read data.
5658 * NOTE: Will allow to read only part of the EEPROM visible through the
5659 * I2C bus.
5660 * Return value:
5661 * -1 on failure and 0 on success.
5664 #define S2IO_DEV_ID 5
5665 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5667 int ret = -1;
5668 u32 exit_cnt = 0;
5669 u64 val64;
5670 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5672 if (sp->device_type == XFRAME_I_DEVICE) {
5673 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5674 I2C_CONTROL_ADDR(off) |
5675 I2C_CONTROL_BYTE_CNT(0x3) |
5676 I2C_CONTROL_READ |
5677 I2C_CONTROL_CNTL_START;
5678 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5680 while (exit_cnt < 5) {
5681 val64 = readq(&bar0->i2c_control);
5682 if (I2C_CONTROL_CNTL_END(val64)) {
5683 *data = I2C_CONTROL_GET_DATA(val64);
5684 ret = 0;
5685 break;
5687 msleep(50);
5688 exit_cnt++;
5692 if (sp->device_type == XFRAME_II_DEVICE) {
5693 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5694 SPI_CONTROL_BYTECNT(0x3) |
5695 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5696 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5697 val64 |= SPI_CONTROL_REQ;
5698 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5699 while (exit_cnt < 5) {
5700 val64 = readq(&bar0->spi_control);
5701 if (val64 & SPI_CONTROL_NACK) {
5702 ret = 1;
5703 break;
5704 } else if (val64 & SPI_CONTROL_DONE) {
5705 *data = readq(&bar0->spi_data);
5706 *data &= 0xffffff;
5707 ret = 0;
5708 break;
5710 msleep(50);
5711 exit_cnt++;
5714 return ret;
5718 * write_eeprom - actually writes the relevant part of the data value.
5719 * @sp : private member of the device structure, which is a pointer to the
5720 * s2io_nic structure.
5721 * @off : offset at which the data must be written
5722 * @data : The data that is to be written
5723 * @cnt : Number of bytes of the data that are actually to be written into
5724 * the Eeprom. (max of 3)
5725 * Description:
5726 * Actually writes the relevant part of the data value into the Eeprom
5727 * through the I2C bus.
5728 * Return value:
5729 * 0 on success, -1 on failure.
5732 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5734 int exit_cnt = 0, ret = -1;
5735 u64 val64;
5736 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5738 if (sp->device_type == XFRAME_I_DEVICE) {
5739 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5740 I2C_CONTROL_ADDR(off) |
5741 I2C_CONTROL_BYTE_CNT(cnt) |
5742 I2C_CONTROL_SET_DATA((u32)data) |
5743 I2C_CONTROL_CNTL_START;
5744 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5746 while (exit_cnt < 5) {
5747 val64 = readq(&bar0->i2c_control);
5748 if (I2C_CONTROL_CNTL_END(val64)) {
5749 if (!(val64 & I2C_CONTROL_NACK))
5750 ret = 0;
5751 break;
5753 msleep(50);
5754 exit_cnt++;
5758 if (sp->device_type == XFRAME_II_DEVICE) {
5759 int write_cnt = (cnt == 8) ? 0 : cnt;
5760 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5762 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5763 SPI_CONTROL_BYTECNT(write_cnt) |
5764 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5765 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5766 val64 |= SPI_CONTROL_REQ;
5767 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5768 while (exit_cnt < 5) {
5769 val64 = readq(&bar0->spi_control);
5770 if (val64 & SPI_CONTROL_NACK) {
5771 ret = 1;
5772 break;
5773 } else if (val64 & SPI_CONTROL_DONE) {
5774 ret = 0;
5775 break;
5777 msleep(50);
5778 exit_cnt++;
5781 return ret;
5783 static void s2io_vpd_read(struct s2io_nic *nic)
5785 u8 *vpd_data;
5786 u8 data;
5787 int i = 0, cnt, len, fail = 0;
5788 int vpd_addr = 0x80;
5789 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5791 if (nic->device_type == XFRAME_II_DEVICE) {
5792 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5793 vpd_addr = 0x80;
5794 } else {
5795 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5796 vpd_addr = 0x50;
5798 strcpy(nic->serial_num, "NOT AVAILABLE");
5800 vpd_data = kmalloc(256, GFP_KERNEL);
5801 if (!vpd_data) {
5802 swstats->mem_alloc_fail_cnt++;
5803 return;
5805 swstats->mem_allocated += 256;
5807 for (i = 0; i < 256; i += 4) {
5808 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5809 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5810 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5811 for (cnt = 0; cnt < 5; cnt++) {
5812 msleep(2);
5813 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5814 if (data == 0x80)
5815 break;
5817 if (cnt >= 5) {
5818 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5819 fail = 1;
5820 break;
5822 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5823 (u32 *)&vpd_data[i]);
5826 if (!fail) {
5827 /* read serial number of adapter */
5828 for (cnt = 0; cnt < 252; cnt++) {
5829 if ((vpd_data[cnt] == 'S') &&
5830 (vpd_data[cnt+1] == 'N')) {
5831 len = vpd_data[cnt+2];
5832 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5833 memcpy(nic->serial_num,
5834 &vpd_data[cnt + 3],
5835 len);
5836 memset(nic->serial_num+len,
5838 VPD_STRING_LEN-len);
5839 break;
5845 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5846 len = vpd_data[1];
5847 memcpy(nic->product_name, &vpd_data[3], len);
5848 nic->product_name[len] = 0;
5850 kfree(vpd_data);
5851 swstats->mem_freed += 256;
5855 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5856 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5857 * @eeprom : pointer to the user level structure provided by ethtool,
5858 * containing all relevant information.
5859 * @data_buf : user defined value to be written into Eeprom.
5860 * Description: Reads the values stored in the Eeprom at given offset
5861 * for a given length. Stores these values int the input argument data
5862 * buffer 'data_buf' and returns these to the caller (ethtool.)
5863 * Return value:
5864 * int 0 on success
5867 static int s2io_ethtool_geeprom(struct net_device *dev,
5868 struct ethtool_eeprom *eeprom, u8 * data_buf)
5870 u32 i, valid;
5871 u64 data;
5872 struct s2io_nic *sp = netdev_priv(dev);
5874 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5876 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5877 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5879 for (i = 0; i < eeprom->len; i += 4) {
5880 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5881 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5882 return -EFAULT;
5884 valid = INV(data);
5885 memcpy((data_buf + i), &valid, 4);
5887 return 0;
5891 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5892 * @sp : private member of the device structure, which is a pointer to the
5893 * s2io_nic structure.
5894 * @eeprom : pointer to the user level structure provided by ethtool,
5895 * containing all relevant information.
5896 * @data_buf ; user defined value to be written into Eeprom.
5897 * Description:
5898 * Tries to write the user provided value in the Eeprom, at the offset
5899 * given by the user.
5900 * Return value:
5901 * 0 on success, -EFAULT on failure.
5904 static int s2io_ethtool_seeprom(struct net_device *dev,
5905 struct ethtool_eeprom *eeprom,
5906 u8 *data_buf)
5908 int len = eeprom->len, cnt = 0;
5909 u64 valid = 0, data;
5910 struct s2io_nic *sp = netdev_priv(dev);
5912 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5913 DBG_PRINT(ERR_DBG,
5914 "ETHTOOL_WRITE_EEPROM Err: "
5915 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5916 (sp->pdev->vendor | (sp->pdev->device << 16)),
5917 eeprom->magic);
5918 return -EFAULT;
5921 while (len) {
5922 data = (u32)data_buf[cnt] & 0x000000FF;
5923 if (data)
5924 valid = (u32)(data << 24);
5925 else
5926 valid = data;
5928 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5929 DBG_PRINT(ERR_DBG,
5930 "ETHTOOL_WRITE_EEPROM Err: "
5931 "Cannot write into the specified offset\n");
5932 return -EFAULT;
5934 cnt++;
5935 len--;
5938 return 0;
5942 * s2io_register_test - reads and writes into all clock domains.
5943 * @sp : private member of the device structure, which is a pointer to the
5944 * s2io_nic structure.
5945 * @data : variable that returns the result of each of the test conducted b
5946 * by the driver.
5947 * Description:
5948 * Read and write into all clock domains. The NIC has 3 clock domains,
5949 * see that registers in all the three regions are accessible.
5950 * Return value:
5951 * 0 on success.
5954 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5956 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5957 u64 val64 = 0, exp_val;
5958 int fail = 0;
5960 val64 = readq(&bar0->pif_rd_swapper_fb);
5961 if (val64 != 0x123456789abcdefULL) {
5962 fail = 1;
5963 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5966 val64 = readq(&bar0->rmac_pause_cfg);
5967 if (val64 != 0xc000ffff00000000ULL) {
5968 fail = 1;
5969 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5972 val64 = readq(&bar0->rx_queue_cfg);
5973 if (sp->device_type == XFRAME_II_DEVICE)
5974 exp_val = 0x0404040404040404ULL;
5975 else
5976 exp_val = 0x0808080808080808ULL;
5977 if (val64 != exp_val) {
5978 fail = 1;
5979 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5982 val64 = readq(&bar0->xgxs_efifo_cfg);
5983 if (val64 != 0x000000001923141EULL) {
5984 fail = 1;
5985 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5988 val64 = 0x5A5A5A5A5A5A5A5AULL;
5989 writeq(val64, &bar0->xmsi_data);
5990 val64 = readq(&bar0->xmsi_data);
5991 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5992 fail = 1;
5993 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5996 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5997 writeq(val64, &bar0->xmsi_data);
5998 val64 = readq(&bar0->xmsi_data);
5999 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6000 fail = 1;
6001 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
6004 *data = fail;
6005 return fail;
6009 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
6010 * @sp : private member of the device structure, which is a pointer to the
6011 * s2io_nic structure.
6012 * @data:variable that returns the result of each of the test conducted by
6013 * the driver.
6014 * Description:
6015 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
6016 * register.
6017 * Return value:
6018 * 0 on success.
6021 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
6023 int fail = 0;
6024 u64 ret_data, org_4F0, org_7F0;
6025 u8 saved_4F0 = 0, saved_7F0 = 0;
6026 struct net_device *dev = sp->dev;
6028 /* Test Write Error at offset 0 */
6029 /* Note that SPI interface allows write access to all areas
6030 * of EEPROM. Hence doing all negative testing only for Xframe I.
6032 if (sp->device_type == XFRAME_I_DEVICE)
6033 if (!write_eeprom(sp, 0, 0, 3))
6034 fail = 1;
6036 /* Save current values at offsets 0x4F0 and 0x7F0 */
6037 if (!read_eeprom(sp, 0x4F0, &org_4F0))
6038 saved_4F0 = 1;
6039 if (!read_eeprom(sp, 0x7F0, &org_7F0))
6040 saved_7F0 = 1;
6042 /* Test Write at offset 4f0 */
6043 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6044 fail = 1;
6045 if (read_eeprom(sp, 0x4F0, &ret_data))
6046 fail = 1;
6048 if (ret_data != 0x012345) {
6049 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6050 "Data written %llx Data read %llx\n",
6051 dev->name, (unsigned long long)0x12345,
6052 (unsigned long long)ret_data);
6053 fail = 1;
6056 /* Reset the EEPROM data go FFFF */
6057 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6059 /* Test Write Request Error at offset 0x7c */
6060 if (sp->device_type == XFRAME_I_DEVICE)
6061 if (!write_eeprom(sp, 0x07C, 0, 3))
6062 fail = 1;
6064 /* Test Write Request at offset 0x7f0 */
6065 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6066 fail = 1;
6067 if (read_eeprom(sp, 0x7F0, &ret_data))
6068 fail = 1;
6070 if (ret_data != 0x012345) {
6071 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6072 "Data written %llx Data read %llx\n",
6073 dev->name, (unsigned long long)0x12345,
6074 (unsigned long long)ret_data);
6075 fail = 1;
6078 /* Reset the EEPROM data go FFFF */
6079 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6081 if (sp->device_type == XFRAME_I_DEVICE) {
6082 /* Test Write Error at offset 0x80 */
6083 if (!write_eeprom(sp, 0x080, 0, 3))
6084 fail = 1;
6086 /* Test Write Error at offset 0xfc */
6087 if (!write_eeprom(sp, 0x0FC, 0, 3))
6088 fail = 1;
6090 /* Test Write Error at offset 0x100 */
6091 if (!write_eeprom(sp, 0x100, 0, 3))
6092 fail = 1;
6094 /* Test Write Error at offset 4ec */
6095 if (!write_eeprom(sp, 0x4EC, 0, 3))
6096 fail = 1;
6099 /* Restore values at offsets 0x4F0 and 0x7F0 */
6100 if (saved_4F0)
6101 write_eeprom(sp, 0x4F0, org_4F0, 3);
6102 if (saved_7F0)
6103 write_eeprom(sp, 0x7F0, org_7F0, 3);
6105 *data = fail;
6106 return fail;
6110 * s2io_bist_test - invokes the MemBist test of the card .
6111 * @sp : private member of the device structure, which is a pointer to the
6112 * s2io_nic structure.
6113 * @data:variable that returns the result of each of the test conducted by
6114 * the driver.
6115 * Description:
6116 * This invokes the MemBist test of the card. We give around
6117 * 2 secs time for the Test to complete. If it's still not complete
6118 * within this peiod, we consider that the test failed.
6119 * Return value:
6120 * 0 on success and -1 on failure.
6123 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6125 u8 bist = 0;
6126 int cnt = 0, ret = -1;
6128 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6129 bist |= PCI_BIST_START;
6130 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6132 while (cnt < 20) {
6133 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6134 if (!(bist & PCI_BIST_START)) {
6135 *data = (bist & PCI_BIST_CODE_MASK);
6136 ret = 0;
6137 break;
6139 msleep(100);
6140 cnt++;
6143 return ret;
6147 * s2io-link_test - verifies the link state of the nic
6148 * @sp ; private member of the device structure, which is a pointer to the
6149 * s2io_nic structure.
6150 * @data: variable that returns the result of each of the test conducted by
6151 * the driver.
6152 * Description:
6153 * The function verifies the link state of the NIC and updates the input
6154 * argument 'data' appropriately.
6155 * Return value:
6156 * 0 on success.
6159 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6161 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6162 u64 val64;
6164 val64 = readq(&bar0->adapter_status);
6165 if (!(LINK_IS_UP(val64)))
6166 *data = 1;
6167 else
6168 *data = 0;
6170 return *data;
6174 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6175 * @sp - private member of the device structure, which is a pointer to the
6176 * s2io_nic structure.
6177 * @data - variable that returns the result of each of the test
6178 * conducted by the driver.
6179 * Description:
6180 * This is one of the offline test that tests the read and write
6181 * access to the RldRam chip on the NIC.
6182 * Return value:
6183 * 0 on success.
6186 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6188 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6189 u64 val64;
6190 int cnt, iteration = 0, test_fail = 0;
6192 val64 = readq(&bar0->adapter_control);
6193 val64 &= ~ADAPTER_ECC_EN;
6194 writeq(val64, &bar0->adapter_control);
6196 val64 = readq(&bar0->mc_rldram_test_ctrl);
6197 val64 |= MC_RLDRAM_TEST_MODE;
6198 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6200 val64 = readq(&bar0->mc_rldram_mrs);
6201 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6202 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6204 val64 |= MC_RLDRAM_MRS_ENABLE;
6205 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6207 while (iteration < 2) {
6208 val64 = 0x55555555aaaa0000ULL;
6209 if (iteration == 1)
6210 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6211 writeq(val64, &bar0->mc_rldram_test_d0);
6213 val64 = 0xaaaa5a5555550000ULL;
6214 if (iteration == 1)
6215 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6216 writeq(val64, &bar0->mc_rldram_test_d1);
6218 val64 = 0x55aaaaaaaa5a0000ULL;
6219 if (iteration == 1)
6220 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6221 writeq(val64, &bar0->mc_rldram_test_d2);
6223 val64 = (u64) (0x0000003ffffe0100ULL);
6224 writeq(val64, &bar0->mc_rldram_test_add);
6226 val64 = MC_RLDRAM_TEST_MODE |
6227 MC_RLDRAM_TEST_WRITE |
6228 MC_RLDRAM_TEST_GO;
6229 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6231 for (cnt = 0; cnt < 5; cnt++) {
6232 val64 = readq(&bar0->mc_rldram_test_ctrl);
6233 if (val64 & MC_RLDRAM_TEST_DONE)
6234 break;
6235 msleep(200);
6238 if (cnt == 5)
6239 break;
6241 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6242 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6244 for (cnt = 0; cnt < 5; cnt++) {
6245 val64 = readq(&bar0->mc_rldram_test_ctrl);
6246 if (val64 & MC_RLDRAM_TEST_DONE)
6247 break;
6248 msleep(500);
6251 if (cnt == 5)
6252 break;
6254 val64 = readq(&bar0->mc_rldram_test_ctrl);
6255 if (!(val64 & MC_RLDRAM_TEST_PASS))
6256 test_fail = 1;
6258 iteration++;
6261 *data = test_fail;
6263 /* Bring the adapter out of test mode */
6264 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6266 return test_fail;
6270 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6271 * @sp : private member of the device structure, which is a pointer to the
6272 * s2io_nic structure.
6273 * @ethtest : pointer to a ethtool command specific structure that will be
6274 * returned to the user.
6275 * @data : variable that returns the result of each of the test
6276 * conducted by the driver.
6277 * Description:
6278 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6279 * the health of the card.
6280 * Return value:
6281 * void
6284 static void s2io_ethtool_test(struct net_device *dev,
6285 struct ethtool_test *ethtest,
6286 uint64_t *data)
6288 struct s2io_nic *sp = netdev_priv(dev);
6289 int orig_state = netif_running(sp->dev);
6291 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6292 /* Offline Tests. */
6293 if (orig_state)
6294 s2io_close(sp->dev);
6296 if (s2io_register_test(sp, &data[0]))
6297 ethtest->flags |= ETH_TEST_FL_FAILED;
6299 s2io_reset(sp);
6301 if (s2io_rldram_test(sp, &data[3]))
6302 ethtest->flags |= ETH_TEST_FL_FAILED;
6304 s2io_reset(sp);
6306 if (s2io_eeprom_test(sp, &data[1]))
6307 ethtest->flags |= ETH_TEST_FL_FAILED;
6309 if (s2io_bist_test(sp, &data[4]))
6310 ethtest->flags |= ETH_TEST_FL_FAILED;
6312 if (orig_state)
6313 s2io_open(sp->dev);
6315 data[2] = 0;
6316 } else {
6317 /* Online Tests. */
6318 if (!orig_state) {
6319 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6320 dev->name);
6321 data[0] = -1;
6322 data[1] = -1;
6323 data[2] = -1;
6324 data[3] = -1;
6325 data[4] = -1;
6328 if (s2io_link_test(sp, &data[2]))
6329 ethtest->flags |= ETH_TEST_FL_FAILED;
6331 data[0] = 0;
6332 data[1] = 0;
6333 data[3] = 0;
6334 data[4] = 0;
6338 static void s2io_get_ethtool_stats(struct net_device *dev,
6339 struct ethtool_stats *estats,
6340 u64 *tmp_stats)
6342 int i = 0, k;
6343 struct s2io_nic *sp = netdev_priv(dev);
6344 struct stat_block *stats = sp->mac_control.stats_info;
6345 struct swStat *swstats = &stats->sw_stat;
6346 struct xpakStat *xstats = &stats->xpak_stat;
6348 s2io_updt_stats(sp);
6349 tmp_stats[i++] =
6350 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6351 le32_to_cpu(stats->tmac_frms);
6352 tmp_stats[i++] =
6353 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6354 le32_to_cpu(stats->tmac_data_octets);
6355 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6356 tmp_stats[i++] =
6357 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6358 le32_to_cpu(stats->tmac_mcst_frms);
6359 tmp_stats[i++] =
6360 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6361 le32_to_cpu(stats->tmac_bcst_frms);
6362 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6363 tmp_stats[i++] =
6364 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6365 le32_to_cpu(stats->tmac_ttl_octets);
6366 tmp_stats[i++] =
6367 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6368 le32_to_cpu(stats->tmac_ucst_frms);
6369 tmp_stats[i++] =
6370 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6371 le32_to_cpu(stats->tmac_nucst_frms);
6372 tmp_stats[i++] =
6373 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6374 le32_to_cpu(stats->tmac_any_err_frms);
6375 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6376 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6377 tmp_stats[i++] =
6378 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6379 le32_to_cpu(stats->tmac_vld_ip);
6380 tmp_stats[i++] =
6381 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6382 le32_to_cpu(stats->tmac_drop_ip);
6383 tmp_stats[i++] =
6384 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6385 le32_to_cpu(stats->tmac_icmp);
6386 tmp_stats[i++] =
6387 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6388 le32_to_cpu(stats->tmac_rst_tcp);
6389 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6390 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6391 le32_to_cpu(stats->tmac_udp);
6392 tmp_stats[i++] =
6393 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6394 le32_to_cpu(stats->rmac_vld_frms);
6395 tmp_stats[i++] =
6396 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6397 le32_to_cpu(stats->rmac_data_octets);
6398 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6399 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6400 tmp_stats[i++] =
6401 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6402 le32_to_cpu(stats->rmac_vld_mcst_frms);
6403 tmp_stats[i++] =
6404 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6405 le32_to_cpu(stats->rmac_vld_bcst_frms);
6406 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6407 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6408 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6411 tmp_stats[i++] =
6412 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6413 le32_to_cpu(stats->rmac_ttl_octets);
6414 tmp_stats[i++] =
6415 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6416 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6417 tmp_stats[i++] =
6418 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6419 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6420 tmp_stats[i++] =
6421 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6422 le32_to_cpu(stats->rmac_discarded_frms);
6423 tmp_stats[i++] =
6424 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6425 << 32 | le32_to_cpu(stats->rmac_drop_events);
6426 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6427 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6428 tmp_stats[i++] =
6429 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6430 le32_to_cpu(stats->rmac_usized_frms);
6431 tmp_stats[i++] =
6432 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6433 le32_to_cpu(stats->rmac_osized_frms);
6434 tmp_stats[i++] =
6435 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6436 le32_to_cpu(stats->rmac_frag_frms);
6437 tmp_stats[i++] =
6438 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6439 le32_to_cpu(stats->rmac_jabber_frms);
6440 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6441 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6442 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6443 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6444 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6445 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6446 tmp_stats[i++] =
6447 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6448 le32_to_cpu(stats->rmac_ip);
6449 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6450 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6451 tmp_stats[i++] =
6452 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6453 le32_to_cpu(stats->rmac_drop_ip);
6454 tmp_stats[i++] =
6455 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6456 le32_to_cpu(stats->rmac_icmp);
6457 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6458 tmp_stats[i++] =
6459 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6460 le32_to_cpu(stats->rmac_udp);
6461 tmp_stats[i++] =
6462 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6463 le32_to_cpu(stats->rmac_err_drp_udp);
6464 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6465 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6466 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6467 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6468 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6469 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6471 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6472 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6473 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6474 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6475 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6476 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6477 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6478 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6479 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6480 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6481 tmp_stats[i++] =
6482 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6483 le32_to_cpu(stats->rmac_pause_cnt);
6484 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6485 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6486 tmp_stats[i++] =
6487 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6488 le32_to_cpu(stats->rmac_accepted_ip);
6489 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6490 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6491 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6492 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6493 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6494 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6495 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6496 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6497 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6498 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6499 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6500 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6501 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6502 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6503 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6504 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6505 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6506 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6507 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6509 /* Enhanced statistics exist only for Hercules */
6510 if (sp->device_type == XFRAME_II_DEVICE) {
6511 tmp_stats[i++] =
6512 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6513 tmp_stats[i++] =
6514 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6515 tmp_stats[i++] =
6516 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6517 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6518 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6519 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6520 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6521 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6522 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6523 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6524 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6525 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6526 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6527 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6528 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6529 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6532 tmp_stats[i++] = 0;
6533 tmp_stats[i++] = swstats->single_ecc_errs;
6534 tmp_stats[i++] = swstats->double_ecc_errs;
6535 tmp_stats[i++] = swstats->parity_err_cnt;
6536 tmp_stats[i++] = swstats->serious_err_cnt;
6537 tmp_stats[i++] = swstats->soft_reset_cnt;
6538 tmp_stats[i++] = swstats->fifo_full_cnt;
6539 for (k = 0; k < MAX_RX_RINGS; k++)
6540 tmp_stats[i++] = swstats->ring_full_cnt[k];
6541 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6542 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6543 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6544 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6545 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6546 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6547 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6548 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6549 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6550 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6551 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6552 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6553 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6554 tmp_stats[i++] = swstats->sending_both;
6555 tmp_stats[i++] = swstats->outof_sequence_pkts;
6556 tmp_stats[i++] = swstats->flush_max_pkts;
6557 if (swstats->num_aggregations) {
6558 u64 tmp = swstats->sum_avg_pkts_aggregated;
6559 int count = 0;
6561 * Since 64-bit divide does not work on all platforms,
6562 * do repeated subtraction.
6564 while (tmp >= swstats->num_aggregations) {
6565 tmp -= swstats->num_aggregations;
6566 count++;
6568 tmp_stats[i++] = count;
6569 } else
6570 tmp_stats[i++] = 0;
6571 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6572 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6573 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6574 tmp_stats[i++] = swstats->mem_allocated;
6575 tmp_stats[i++] = swstats->mem_freed;
6576 tmp_stats[i++] = swstats->link_up_cnt;
6577 tmp_stats[i++] = swstats->link_down_cnt;
6578 tmp_stats[i++] = swstats->link_up_time;
6579 tmp_stats[i++] = swstats->link_down_time;
6581 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6582 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6583 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6584 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6585 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6587 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6588 tmp_stats[i++] = swstats->rx_abort_cnt;
6589 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6590 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6591 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6592 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6593 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6594 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6595 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6596 tmp_stats[i++] = swstats->tda_err_cnt;
6597 tmp_stats[i++] = swstats->pfc_err_cnt;
6598 tmp_stats[i++] = swstats->pcc_err_cnt;
6599 tmp_stats[i++] = swstats->tti_err_cnt;
6600 tmp_stats[i++] = swstats->tpa_err_cnt;
6601 tmp_stats[i++] = swstats->sm_err_cnt;
6602 tmp_stats[i++] = swstats->lso_err_cnt;
6603 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6604 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6605 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6606 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6607 tmp_stats[i++] = swstats->rc_err_cnt;
6608 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6609 tmp_stats[i++] = swstats->rpa_err_cnt;
6610 tmp_stats[i++] = swstats->rda_err_cnt;
6611 tmp_stats[i++] = swstats->rti_err_cnt;
6612 tmp_stats[i++] = swstats->mc_err_cnt;
6615 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6617 return XENA_REG_SPACE;
6621 static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
6623 struct s2io_nic *sp = netdev_priv(dev);
6625 return sp->rx_csum;
6628 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6630 struct s2io_nic *sp = netdev_priv(dev);
6632 if (data)
6633 sp->rx_csum = 1;
6634 else
6635 sp->rx_csum = 0;
6637 return 0;
6640 static int s2io_get_eeprom_len(struct net_device *dev)
6642 return XENA_EEPROM_SPACE;
6645 static int s2io_get_sset_count(struct net_device *dev, int sset)
6647 struct s2io_nic *sp = netdev_priv(dev);
6649 switch (sset) {
6650 case ETH_SS_TEST:
6651 return S2IO_TEST_LEN;
6652 case ETH_SS_STATS:
6653 switch (sp->device_type) {
6654 case XFRAME_I_DEVICE:
6655 return XFRAME_I_STAT_LEN;
6656 case XFRAME_II_DEVICE:
6657 return XFRAME_II_STAT_LEN;
6658 default:
6659 return 0;
6661 default:
6662 return -EOPNOTSUPP;
6666 static void s2io_ethtool_get_strings(struct net_device *dev,
6667 u32 stringset, u8 *data)
6669 int stat_size = 0;
6670 struct s2io_nic *sp = netdev_priv(dev);
6672 switch (stringset) {
6673 case ETH_SS_TEST:
6674 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6675 break;
6676 case ETH_SS_STATS:
6677 stat_size = sizeof(ethtool_xena_stats_keys);
6678 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6679 if (sp->device_type == XFRAME_II_DEVICE) {
6680 memcpy(data + stat_size,
6681 &ethtool_enhanced_stats_keys,
6682 sizeof(ethtool_enhanced_stats_keys));
6683 stat_size += sizeof(ethtool_enhanced_stats_keys);
6686 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6687 sizeof(ethtool_driver_stats_keys));
6691 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6693 if (data)
6694 dev->features |= NETIF_F_IP_CSUM;
6695 else
6696 dev->features &= ~NETIF_F_IP_CSUM;
6698 return 0;
6701 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6703 return (dev->features & NETIF_F_TSO) != 0;
6706 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6708 if (data)
6709 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6710 else
6711 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6713 return 0;
6716 static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6718 struct s2io_nic *sp = netdev_priv(dev);
6719 int rc = 0;
6720 int changed = 0;
6722 if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
6723 return -EINVAL;
6725 if (data & ETH_FLAG_LRO) {
6726 if (!(dev->features & NETIF_F_LRO)) {
6727 dev->features |= NETIF_F_LRO;
6728 changed = 1;
6730 } else if (dev->features & NETIF_F_LRO) {
6731 dev->features &= ~NETIF_F_LRO;
6732 changed = 1;
6735 if (changed && netif_running(dev)) {
6736 s2io_stop_all_tx_queue(sp);
6737 s2io_card_down(sp);
6738 rc = s2io_card_up(sp);
6739 if (rc)
6740 s2io_reset(sp);
6741 else
6742 s2io_start_all_tx_queue(sp);
6745 return rc;
6748 static const struct ethtool_ops netdev_ethtool_ops = {
6749 .get_settings = s2io_ethtool_gset,
6750 .set_settings = s2io_ethtool_sset,
6751 .get_drvinfo = s2io_ethtool_gdrvinfo,
6752 .get_regs_len = s2io_ethtool_get_regs_len,
6753 .get_regs = s2io_ethtool_gregs,
6754 .get_link = ethtool_op_get_link,
6755 .get_eeprom_len = s2io_get_eeprom_len,
6756 .get_eeprom = s2io_ethtool_geeprom,
6757 .set_eeprom = s2io_ethtool_seeprom,
6758 .get_ringparam = s2io_ethtool_gringparam,
6759 .get_pauseparam = s2io_ethtool_getpause_data,
6760 .set_pauseparam = s2io_ethtool_setpause_data,
6761 .get_rx_csum = s2io_ethtool_get_rx_csum,
6762 .set_rx_csum = s2io_ethtool_set_rx_csum,
6763 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6764 .set_flags = s2io_ethtool_set_flags,
6765 .get_flags = ethtool_op_get_flags,
6766 .set_sg = ethtool_op_set_sg,
6767 .get_tso = s2io_ethtool_op_get_tso,
6768 .set_tso = s2io_ethtool_op_set_tso,
6769 .set_ufo = ethtool_op_set_ufo,
6770 .self_test = s2io_ethtool_test,
6771 .get_strings = s2io_ethtool_get_strings,
6772 .set_phys_id = s2io_ethtool_set_led,
6773 .get_ethtool_stats = s2io_get_ethtool_stats,
6774 .get_sset_count = s2io_get_sset_count,
6778 * s2io_ioctl - Entry point for the Ioctl
6779 * @dev : Device pointer.
6780 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6781 * a proprietary structure used to pass information to the driver.
6782 * @cmd : This is used to distinguish between the different commands that
6783 * can be passed to the IOCTL functions.
6784 * Description:
6785 * Currently there are no special functionality supported in IOCTL, hence
6786 * function always return EOPNOTSUPPORTED
6789 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6791 return -EOPNOTSUPP;
6795 * s2io_change_mtu - entry point to change MTU size for the device.
6796 * @dev : device pointer.
6797 * @new_mtu : the new MTU size for the device.
6798 * Description: A driver entry point to change MTU size for the device.
6799 * Before changing the MTU the device must be stopped.
6800 * Return value:
6801 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6802 * file on failure.
6805 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6807 struct s2io_nic *sp = netdev_priv(dev);
6808 int ret = 0;
6810 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6811 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6812 return -EPERM;
6815 dev->mtu = new_mtu;
6816 if (netif_running(dev)) {
6817 s2io_stop_all_tx_queue(sp);
6818 s2io_card_down(sp);
6819 ret = s2io_card_up(sp);
6820 if (ret) {
6821 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6822 __func__);
6823 return ret;
6825 s2io_wake_all_tx_queue(sp);
6826 } else { /* Device is down */
6827 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6828 u64 val64 = new_mtu;
6830 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6833 return ret;
6837 * s2io_set_link - Set the LInk status
6838 * @data: long pointer to device private structue
6839 * Description: Sets the link status for the adapter
6842 static void s2io_set_link(struct work_struct *work)
6844 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6845 set_link_task);
6846 struct net_device *dev = nic->dev;
6847 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6848 register u64 val64;
6849 u16 subid;
6851 rtnl_lock();
6853 if (!netif_running(dev))
6854 goto out_unlock;
6856 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6857 /* The card is being reset, no point doing anything */
6858 goto out_unlock;
6861 subid = nic->pdev->subsystem_device;
6862 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6864 * Allow a small delay for the NICs self initiated
6865 * cleanup to complete.
6867 msleep(100);
6870 val64 = readq(&bar0->adapter_status);
6871 if (LINK_IS_UP(val64)) {
6872 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6873 if (verify_xena_quiescence(nic)) {
6874 val64 = readq(&bar0->adapter_control);
6875 val64 |= ADAPTER_CNTL_EN;
6876 writeq(val64, &bar0->adapter_control);
6877 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6878 nic->device_type, subid)) {
6879 val64 = readq(&bar0->gpio_control);
6880 val64 |= GPIO_CTRL_GPIO_0;
6881 writeq(val64, &bar0->gpio_control);
6882 val64 = readq(&bar0->gpio_control);
6883 } else {
6884 val64 |= ADAPTER_LED_ON;
6885 writeq(val64, &bar0->adapter_control);
6887 nic->device_enabled_once = true;
6888 } else {
6889 DBG_PRINT(ERR_DBG,
6890 "%s: Error: device is not Quiescent\n",
6891 dev->name);
6892 s2io_stop_all_tx_queue(nic);
6895 val64 = readq(&bar0->adapter_control);
6896 val64 |= ADAPTER_LED_ON;
6897 writeq(val64, &bar0->adapter_control);
6898 s2io_link(nic, LINK_UP);
6899 } else {
6900 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6901 subid)) {
6902 val64 = readq(&bar0->gpio_control);
6903 val64 &= ~GPIO_CTRL_GPIO_0;
6904 writeq(val64, &bar0->gpio_control);
6905 val64 = readq(&bar0->gpio_control);
6907 /* turn off LED */
6908 val64 = readq(&bar0->adapter_control);
6909 val64 = val64 & (~ADAPTER_LED_ON);
6910 writeq(val64, &bar0->adapter_control);
6911 s2io_link(nic, LINK_DOWN);
6913 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6915 out_unlock:
6916 rtnl_unlock();
6919 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6920 struct buffAdd *ba,
6921 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6922 u64 *temp2, int size)
6924 struct net_device *dev = sp->dev;
6925 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6927 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6928 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6929 /* allocate skb */
6930 if (*skb) {
6931 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6933 * As Rx frame are not going to be processed,
6934 * using same mapped address for the Rxd
6935 * buffer pointer
6937 rxdp1->Buffer0_ptr = *temp0;
6938 } else {
6939 *skb = dev_alloc_skb(size);
6940 if (!(*skb)) {
6941 DBG_PRINT(INFO_DBG,
6942 "%s: Out of memory to allocate %s\n",
6943 dev->name, "1 buf mode SKBs");
6944 stats->mem_alloc_fail_cnt++;
6945 return -ENOMEM ;
6947 stats->mem_allocated += (*skb)->truesize;
6948 /* storing the mapped addr in a temp variable
6949 * such it will be used for next rxd whose
6950 * Host Control is NULL
6952 rxdp1->Buffer0_ptr = *temp0 =
6953 pci_map_single(sp->pdev, (*skb)->data,
6954 size - NET_IP_ALIGN,
6955 PCI_DMA_FROMDEVICE);
6956 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6957 goto memalloc_failed;
6958 rxdp->Host_Control = (unsigned long) (*skb);
6960 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6961 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6962 /* Two buffer Mode */
6963 if (*skb) {
6964 rxdp3->Buffer2_ptr = *temp2;
6965 rxdp3->Buffer0_ptr = *temp0;
6966 rxdp3->Buffer1_ptr = *temp1;
6967 } else {
6968 *skb = dev_alloc_skb(size);
6969 if (!(*skb)) {
6970 DBG_PRINT(INFO_DBG,
6971 "%s: Out of memory to allocate %s\n",
6972 dev->name,
6973 "2 buf mode SKBs");
6974 stats->mem_alloc_fail_cnt++;
6975 return -ENOMEM;
6977 stats->mem_allocated += (*skb)->truesize;
6978 rxdp3->Buffer2_ptr = *temp2 =
6979 pci_map_single(sp->pdev, (*skb)->data,
6980 dev->mtu + 4,
6981 PCI_DMA_FROMDEVICE);
6982 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6983 goto memalloc_failed;
6984 rxdp3->Buffer0_ptr = *temp0 =
6985 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6986 PCI_DMA_FROMDEVICE);
6987 if (pci_dma_mapping_error(sp->pdev,
6988 rxdp3->Buffer0_ptr)) {
6989 pci_unmap_single(sp->pdev,
6990 (dma_addr_t)rxdp3->Buffer2_ptr,
6991 dev->mtu + 4,
6992 PCI_DMA_FROMDEVICE);
6993 goto memalloc_failed;
6995 rxdp->Host_Control = (unsigned long) (*skb);
6997 /* Buffer-1 will be dummy buffer not used */
6998 rxdp3->Buffer1_ptr = *temp1 =
6999 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
7000 PCI_DMA_FROMDEVICE);
7001 if (pci_dma_mapping_error(sp->pdev,
7002 rxdp3->Buffer1_ptr)) {
7003 pci_unmap_single(sp->pdev,
7004 (dma_addr_t)rxdp3->Buffer0_ptr,
7005 BUF0_LEN, PCI_DMA_FROMDEVICE);
7006 pci_unmap_single(sp->pdev,
7007 (dma_addr_t)rxdp3->Buffer2_ptr,
7008 dev->mtu + 4,
7009 PCI_DMA_FROMDEVICE);
7010 goto memalloc_failed;
7014 return 0;
7016 memalloc_failed:
7017 stats->pci_map_fail_cnt++;
7018 stats->mem_freed += (*skb)->truesize;
7019 dev_kfree_skb(*skb);
7020 return -ENOMEM;
7023 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
7024 int size)
7026 struct net_device *dev = sp->dev;
7027 if (sp->rxd_mode == RXD_MODE_1) {
7028 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
7029 } else if (sp->rxd_mode == RXD_MODE_3B) {
7030 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
7031 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
7032 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
7036 static int rxd_owner_bit_reset(struct s2io_nic *sp)
7038 int i, j, k, blk_cnt = 0, size;
7039 struct config_param *config = &sp->config;
7040 struct mac_info *mac_control = &sp->mac_control;
7041 struct net_device *dev = sp->dev;
7042 struct RxD_t *rxdp = NULL;
7043 struct sk_buff *skb = NULL;
7044 struct buffAdd *ba = NULL;
7045 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7047 /* Calculate the size based on ring mode */
7048 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7049 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7050 if (sp->rxd_mode == RXD_MODE_1)
7051 size += NET_IP_ALIGN;
7052 else if (sp->rxd_mode == RXD_MODE_3B)
7053 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
7055 for (i = 0; i < config->rx_ring_num; i++) {
7056 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7057 struct ring_info *ring = &mac_control->rings[i];
7059 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
7061 for (j = 0; j < blk_cnt; j++) {
7062 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7063 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7064 if (sp->rxd_mode == RXD_MODE_3B)
7065 ba = &ring->ba[j][k];
7066 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7067 (u64 *)&temp0_64,
7068 (u64 *)&temp1_64,
7069 (u64 *)&temp2_64,
7070 size) == -ENOMEM) {
7071 return 0;
7074 set_rxd_buffer_size(sp, rxdp, size);
7075 wmb();
7076 /* flip the Ownership bit to Hardware */
7077 rxdp->Control_1 |= RXD_OWN_XENA;
7081 return 0;
7085 static int s2io_add_isr(struct s2io_nic *sp)
7087 int ret = 0;
7088 struct net_device *dev = sp->dev;
7089 int err = 0;
7091 if (sp->config.intr_type == MSI_X)
7092 ret = s2io_enable_msi_x(sp);
7093 if (ret) {
7094 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7095 sp->config.intr_type = INTA;
7099 * Store the values of the MSIX table in
7100 * the struct s2io_nic structure
7102 store_xmsi_data(sp);
7104 /* After proper initialization of H/W, register ISR */
7105 if (sp->config.intr_type == MSI_X) {
7106 int i, msix_rx_cnt = 0;
7108 for (i = 0; i < sp->num_entries; i++) {
7109 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7110 if (sp->s2io_entries[i].type ==
7111 MSIX_RING_TYPE) {
7112 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7113 dev->name, i);
7114 err = request_irq(sp->entries[i].vector,
7115 s2io_msix_ring_handle,
7117 sp->desc[i],
7118 sp->s2io_entries[i].arg);
7119 } else if (sp->s2io_entries[i].type ==
7120 MSIX_ALARM_TYPE) {
7121 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7122 dev->name, i);
7123 err = request_irq(sp->entries[i].vector,
7124 s2io_msix_fifo_handle,
7126 sp->desc[i],
7127 sp->s2io_entries[i].arg);
7130 /* if either data or addr is zero print it. */
7131 if (!(sp->msix_info[i].addr &&
7132 sp->msix_info[i].data)) {
7133 DBG_PRINT(ERR_DBG,
7134 "%s @Addr:0x%llx Data:0x%llx\n",
7135 sp->desc[i],
7136 (unsigned long long)
7137 sp->msix_info[i].addr,
7138 (unsigned long long)
7139 ntohl(sp->msix_info[i].data));
7140 } else
7141 msix_rx_cnt++;
7142 if (err) {
7143 remove_msix_isr(sp);
7145 DBG_PRINT(ERR_DBG,
7146 "%s:MSI-X-%d registration "
7147 "failed\n", dev->name, i);
7149 DBG_PRINT(ERR_DBG,
7150 "%s: Defaulting to INTA\n",
7151 dev->name);
7152 sp->config.intr_type = INTA;
7153 break;
7155 sp->s2io_entries[i].in_use =
7156 MSIX_REGISTERED_SUCCESS;
7159 if (!err) {
7160 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7161 DBG_PRINT(INFO_DBG,
7162 "MSI-X-TX entries enabled through alarm vector\n");
7165 if (sp->config.intr_type == INTA) {
7166 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7167 sp->name, dev);
7168 if (err) {
7169 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7170 dev->name);
7171 return -1;
7174 return 0;
7177 static void s2io_rem_isr(struct s2io_nic *sp)
7179 if (sp->config.intr_type == MSI_X)
7180 remove_msix_isr(sp);
7181 else
7182 remove_inta_isr(sp);
7185 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7187 int cnt = 0;
7188 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7189 register u64 val64 = 0;
7190 struct config_param *config;
7191 config = &sp->config;
7193 if (!is_s2io_card_up(sp))
7194 return;
7196 del_timer_sync(&sp->alarm_timer);
7197 /* If s2io_set_link task is executing, wait till it completes. */
7198 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7199 msleep(50);
7200 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7202 /* Disable napi */
7203 if (sp->config.napi) {
7204 int off = 0;
7205 if (config->intr_type == MSI_X) {
7206 for (; off < sp->config.rx_ring_num; off++)
7207 napi_disable(&sp->mac_control.rings[off].napi);
7209 else
7210 napi_disable(&sp->napi);
7213 /* disable Tx and Rx traffic on the NIC */
7214 if (do_io)
7215 stop_nic(sp);
7217 s2io_rem_isr(sp);
7219 /* stop the tx queue, indicate link down */
7220 s2io_link(sp, LINK_DOWN);
7222 /* Check if the device is Quiescent and then Reset the NIC */
7223 while (do_io) {
7224 /* As per the HW requirement we need to replenish the
7225 * receive buffer to avoid the ring bump. Since there is
7226 * no intention of processing the Rx frame at this pointwe are
7227 * just settting the ownership bit of rxd in Each Rx
7228 * ring to HW and set the appropriate buffer size
7229 * based on the ring mode
7231 rxd_owner_bit_reset(sp);
7233 val64 = readq(&bar0->adapter_status);
7234 if (verify_xena_quiescence(sp)) {
7235 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7236 break;
7239 msleep(50);
7240 cnt++;
7241 if (cnt == 10) {
7242 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7243 "adapter status reads 0x%llx\n",
7244 (unsigned long long)val64);
7245 break;
7248 if (do_io)
7249 s2io_reset(sp);
7251 /* Free all Tx buffers */
7252 free_tx_buffers(sp);
7254 /* Free all Rx buffers */
7255 free_rx_buffers(sp);
7257 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7260 static void s2io_card_down(struct s2io_nic *sp)
7262 do_s2io_card_down(sp, 1);
7265 static int s2io_card_up(struct s2io_nic *sp)
7267 int i, ret = 0;
7268 struct config_param *config;
7269 struct mac_info *mac_control;
7270 struct net_device *dev = (struct net_device *)sp->dev;
7271 u16 interruptible;
7273 /* Initialize the H/W I/O registers */
7274 ret = init_nic(sp);
7275 if (ret != 0) {
7276 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7277 dev->name);
7278 if (ret != -EIO)
7279 s2io_reset(sp);
7280 return ret;
7284 * Initializing the Rx buffers. For now we are considering only 1
7285 * Rx ring and initializing buffers into 30 Rx blocks
7287 config = &sp->config;
7288 mac_control = &sp->mac_control;
7290 for (i = 0; i < config->rx_ring_num; i++) {
7291 struct ring_info *ring = &mac_control->rings[i];
7293 ring->mtu = dev->mtu;
7294 ring->lro = !!(dev->features & NETIF_F_LRO);
7295 ret = fill_rx_buffers(sp, ring, 1);
7296 if (ret) {
7297 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7298 dev->name);
7299 s2io_reset(sp);
7300 free_rx_buffers(sp);
7301 return -ENOMEM;
7303 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7304 ring->rx_bufs_left);
7307 /* Initialise napi */
7308 if (config->napi) {
7309 if (config->intr_type == MSI_X) {
7310 for (i = 0; i < sp->config.rx_ring_num; i++)
7311 napi_enable(&sp->mac_control.rings[i].napi);
7312 } else {
7313 napi_enable(&sp->napi);
7317 /* Maintain the state prior to the open */
7318 if (sp->promisc_flg)
7319 sp->promisc_flg = 0;
7320 if (sp->m_cast_flg) {
7321 sp->m_cast_flg = 0;
7322 sp->all_multi_pos = 0;
7325 /* Setting its receive mode */
7326 s2io_set_multicast(dev);
7328 if (dev->features & NETIF_F_LRO) {
7329 /* Initialize max aggregatable pkts per session based on MTU */
7330 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7331 /* Check if we can use (if specified) user provided value */
7332 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7333 sp->lro_max_aggr_per_sess = lro_max_pkts;
7336 /* Enable Rx Traffic and interrupts on the NIC */
7337 if (start_nic(sp)) {
7338 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7339 s2io_reset(sp);
7340 free_rx_buffers(sp);
7341 return -ENODEV;
7344 /* Add interrupt service routine */
7345 if (s2io_add_isr(sp) != 0) {
7346 if (sp->config.intr_type == MSI_X)
7347 s2io_rem_isr(sp);
7348 s2io_reset(sp);
7349 free_rx_buffers(sp);
7350 return -ENODEV;
7353 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7355 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7357 /* Enable select interrupts */
7358 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7359 if (sp->config.intr_type != INTA) {
7360 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7361 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7362 } else {
7363 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7364 interruptible |= TX_PIC_INTR;
7365 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7368 return 0;
7372 * s2io_restart_nic - Resets the NIC.
7373 * @data : long pointer to the device private structure
7374 * Description:
7375 * This function is scheduled to be run by the s2io_tx_watchdog
7376 * function after 0.5 secs to reset the NIC. The idea is to reduce
7377 * the run time of the watch dog routine which is run holding a
7378 * spin lock.
7381 static void s2io_restart_nic(struct work_struct *work)
7383 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7384 struct net_device *dev = sp->dev;
7386 rtnl_lock();
7388 if (!netif_running(dev))
7389 goto out_unlock;
7391 s2io_card_down(sp);
7392 if (s2io_card_up(sp)) {
7393 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7395 s2io_wake_all_tx_queue(sp);
7396 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7397 out_unlock:
7398 rtnl_unlock();
7402 * s2io_tx_watchdog - Watchdog for transmit side.
7403 * @dev : Pointer to net device structure
7404 * Description:
7405 * This function is triggered if the Tx Queue is stopped
7406 * for a pre-defined amount of time when the Interface is still up.
7407 * If the Interface is jammed in such a situation, the hardware is
7408 * reset (by s2io_close) and restarted again (by s2io_open) to
7409 * overcome any problem that might have been caused in the hardware.
7410 * Return value:
7411 * void
7414 static void s2io_tx_watchdog(struct net_device *dev)
7416 struct s2io_nic *sp = netdev_priv(dev);
7417 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7419 if (netif_carrier_ok(dev)) {
7420 swstats->watchdog_timer_cnt++;
7421 schedule_work(&sp->rst_timer_task);
7422 swstats->soft_reset_cnt++;
7427 * rx_osm_handler - To perform some OS related operations on SKB.
7428 * @sp: private member of the device structure,pointer to s2io_nic structure.
7429 * @skb : the socket buffer pointer.
7430 * @len : length of the packet
7431 * @cksum : FCS checksum of the frame.
7432 * @ring_no : the ring from which this RxD was extracted.
7433 * Description:
7434 * This function is called by the Rx interrupt serivce routine to perform
7435 * some OS related operations on the SKB before passing it to the upper
7436 * layers. It mainly checks if the checksum is OK, if so adds it to the
7437 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7438 * to the upper layer. If the checksum is wrong, it increments the Rx
7439 * packet error count, frees the SKB and returns error.
7440 * Return value:
7441 * SUCCESS on success and -1 on failure.
7443 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7445 struct s2io_nic *sp = ring_data->nic;
7446 struct net_device *dev = (struct net_device *)ring_data->dev;
7447 struct sk_buff *skb = (struct sk_buff *)
7448 ((unsigned long)rxdp->Host_Control);
7449 int ring_no = ring_data->ring_no;
7450 u16 l3_csum, l4_csum;
7451 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7452 struct lro *uninitialized_var(lro);
7453 u8 err_mask;
7454 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7456 skb->dev = dev;
7458 if (err) {
7459 /* Check for parity error */
7460 if (err & 0x1)
7461 swstats->parity_err_cnt++;
7463 err_mask = err >> 48;
7464 switch (err_mask) {
7465 case 1:
7466 swstats->rx_parity_err_cnt++;
7467 break;
7469 case 2:
7470 swstats->rx_abort_cnt++;
7471 break;
7473 case 3:
7474 swstats->rx_parity_abort_cnt++;
7475 break;
7477 case 4:
7478 swstats->rx_rda_fail_cnt++;
7479 break;
7481 case 5:
7482 swstats->rx_unkn_prot_cnt++;
7483 break;
7485 case 6:
7486 swstats->rx_fcs_err_cnt++;
7487 break;
7489 case 7:
7490 swstats->rx_buf_size_err_cnt++;
7491 break;
7493 case 8:
7494 swstats->rx_rxd_corrupt_cnt++;
7495 break;
7497 case 15:
7498 swstats->rx_unkn_err_cnt++;
7499 break;
7502 * Drop the packet if bad transfer code. Exception being
7503 * 0x5, which could be due to unsupported IPv6 extension header.
7504 * In this case, we let stack handle the packet.
7505 * Note that in this case, since checksum will be incorrect,
7506 * stack will validate the same.
7508 if (err_mask != 0x5) {
7509 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7510 dev->name, err_mask);
7511 dev->stats.rx_crc_errors++;
7512 swstats->mem_freed
7513 += skb->truesize;
7514 dev_kfree_skb(skb);
7515 ring_data->rx_bufs_left -= 1;
7516 rxdp->Host_Control = 0;
7517 return 0;
7521 rxdp->Host_Control = 0;
7522 if (sp->rxd_mode == RXD_MODE_1) {
7523 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7525 skb_put(skb, len);
7526 } else if (sp->rxd_mode == RXD_MODE_3B) {
7527 int get_block = ring_data->rx_curr_get_info.block_index;
7528 int get_off = ring_data->rx_curr_get_info.offset;
7529 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7530 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7531 unsigned char *buff = skb_push(skb, buf0_len);
7533 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7534 memcpy(buff, ba->ba_0, buf0_len);
7535 skb_put(skb, buf2_len);
7538 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7539 ((!ring_data->lro) ||
7540 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7541 (sp->rx_csum)) {
7542 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7543 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7544 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7546 * NIC verifies if the Checksum of the received
7547 * frame is Ok or not and accordingly returns
7548 * a flag in the RxD.
7550 skb->ip_summed = CHECKSUM_UNNECESSARY;
7551 if (ring_data->lro) {
7552 u32 tcp_len = 0;
7553 u8 *tcp;
7554 int ret = 0;
7556 ret = s2io_club_tcp_session(ring_data,
7557 skb->data, &tcp,
7558 &tcp_len, &lro,
7559 rxdp, sp);
7560 switch (ret) {
7561 case 3: /* Begin anew */
7562 lro->parent = skb;
7563 goto aggregate;
7564 case 1: /* Aggregate */
7565 lro_append_pkt(sp, lro, skb, tcp_len);
7566 goto aggregate;
7567 case 4: /* Flush session */
7568 lro_append_pkt(sp, lro, skb, tcp_len);
7569 queue_rx_frame(lro->parent,
7570 lro->vlan_tag);
7571 clear_lro_session(lro);
7572 swstats->flush_max_pkts++;
7573 goto aggregate;
7574 case 2: /* Flush both */
7575 lro->parent->data_len = lro->frags_len;
7576 swstats->sending_both++;
7577 queue_rx_frame(lro->parent,
7578 lro->vlan_tag);
7579 clear_lro_session(lro);
7580 goto send_up;
7581 case 0: /* sessions exceeded */
7582 case -1: /* non-TCP or not L2 aggregatable */
7583 case 5: /*
7584 * First pkt in session not
7585 * L3/L4 aggregatable
7587 break;
7588 default:
7589 DBG_PRINT(ERR_DBG,
7590 "%s: Samadhana!!\n",
7591 __func__);
7592 BUG();
7595 } else {
7597 * Packet with erroneous checksum, let the
7598 * upper layers deal with it.
7600 skb_checksum_none_assert(skb);
7602 } else
7603 skb_checksum_none_assert(skb);
7605 swstats->mem_freed += skb->truesize;
7606 send_up:
7607 skb_record_rx_queue(skb, ring_no);
7608 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7609 aggregate:
7610 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7611 return SUCCESS;
7615 * s2io_link - stops/starts the Tx queue.
7616 * @sp : private member of the device structure, which is a pointer to the
7617 * s2io_nic structure.
7618 * @link : inidicates whether link is UP/DOWN.
7619 * Description:
7620 * This function stops/starts the Tx queue depending on whether the link
7621 * status of the NIC is is down or up. This is called by the Alarm
7622 * interrupt handler whenever a link change interrupt comes up.
7623 * Return value:
7624 * void.
7627 static void s2io_link(struct s2io_nic *sp, int link)
7629 struct net_device *dev = (struct net_device *)sp->dev;
7630 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7632 if (link != sp->last_link_state) {
7633 init_tti(sp, link);
7634 if (link == LINK_DOWN) {
7635 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7636 s2io_stop_all_tx_queue(sp);
7637 netif_carrier_off(dev);
7638 if (swstats->link_up_cnt)
7639 swstats->link_up_time =
7640 jiffies - sp->start_time;
7641 swstats->link_down_cnt++;
7642 } else {
7643 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7644 if (swstats->link_down_cnt)
7645 swstats->link_down_time =
7646 jiffies - sp->start_time;
7647 swstats->link_up_cnt++;
7648 netif_carrier_on(dev);
7649 s2io_wake_all_tx_queue(sp);
7652 sp->last_link_state = link;
7653 sp->start_time = jiffies;
7657 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7658 * @sp : private member of the device structure, which is a pointer to the
7659 * s2io_nic structure.
7660 * Description:
7661 * This function initializes a few of the PCI and PCI-X configuration registers
7662 * with recommended values.
7663 * Return value:
7664 * void
7667 static void s2io_init_pci(struct s2io_nic *sp)
7669 u16 pci_cmd = 0, pcix_cmd = 0;
7671 /* Enable Data Parity Error Recovery in PCI-X command register. */
7672 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7673 &(pcix_cmd));
7674 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7675 (pcix_cmd | 1));
7676 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7677 &(pcix_cmd));
7679 /* Set the PErr Response bit in PCI command register. */
7680 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7681 pci_write_config_word(sp->pdev, PCI_COMMAND,
7682 (pci_cmd | PCI_COMMAND_PARITY));
7683 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7686 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7687 u8 *dev_multiq)
7689 int i;
7691 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7692 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7693 "(%d) not supported\n", tx_fifo_num);
7695 if (tx_fifo_num < 1)
7696 tx_fifo_num = 1;
7697 else
7698 tx_fifo_num = MAX_TX_FIFOS;
7700 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7703 if (multiq)
7704 *dev_multiq = multiq;
7706 if (tx_steering_type && (1 == tx_fifo_num)) {
7707 if (tx_steering_type != TX_DEFAULT_STEERING)
7708 DBG_PRINT(ERR_DBG,
7709 "Tx steering is not supported with "
7710 "one fifo. Disabling Tx steering.\n");
7711 tx_steering_type = NO_STEERING;
7714 if ((tx_steering_type < NO_STEERING) ||
7715 (tx_steering_type > TX_DEFAULT_STEERING)) {
7716 DBG_PRINT(ERR_DBG,
7717 "Requested transmit steering not supported\n");
7718 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7719 tx_steering_type = NO_STEERING;
7722 if (rx_ring_num > MAX_RX_RINGS) {
7723 DBG_PRINT(ERR_DBG,
7724 "Requested number of rx rings not supported\n");
7725 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7726 MAX_RX_RINGS);
7727 rx_ring_num = MAX_RX_RINGS;
7730 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7731 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7732 "Defaulting to INTA\n");
7733 *dev_intr_type = INTA;
7736 if ((*dev_intr_type == MSI_X) &&
7737 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7738 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7739 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7740 "Defaulting to INTA\n");
7741 *dev_intr_type = INTA;
7744 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7745 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7746 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7747 rx_ring_mode = 1;
7750 for (i = 0; i < MAX_RX_RINGS; i++)
7751 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7752 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7753 "supported\nDefaulting to %d\n",
7754 MAX_RX_BLOCKS_PER_RING);
7755 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7758 return SUCCESS;
7762 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7763 * or Traffic class respectively.
7764 * @nic: device private variable
7765 * Description: The function configures the receive steering to
7766 * desired receive ring.
7767 * Return Value: SUCCESS on success and
7768 * '-1' on failure (endian settings incorrect).
7770 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7772 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7773 register u64 val64 = 0;
7775 if (ds_codepoint > 63)
7776 return FAILURE;
7778 val64 = RTS_DS_MEM_DATA(ring);
7779 writeq(val64, &bar0->rts_ds_mem_data);
7781 val64 = RTS_DS_MEM_CTRL_WE |
7782 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7783 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7785 writeq(val64, &bar0->rts_ds_mem_ctrl);
7787 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7788 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7789 S2IO_BIT_RESET);
7792 static const struct net_device_ops s2io_netdev_ops = {
7793 .ndo_open = s2io_open,
7794 .ndo_stop = s2io_close,
7795 .ndo_get_stats = s2io_get_stats,
7796 .ndo_start_xmit = s2io_xmit,
7797 .ndo_validate_addr = eth_validate_addr,
7798 .ndo_set_multicast_list = s2io_set_multicast,
7799 .ndo_do_ioctl = s2io_ioctl,
7800 .ndo_set_mac_address = s2io_set_mac_addr,
7801 .ndo_change_mtu = s2io_change_mtu,
7802 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7803 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7804 .ndo_tx_timeout = s2io_tx_watchdog,
7805 #ifdef CONFIG_NET_POLL_CONTROLLER
7806 .ndo_poll_controller = s2io_netpoll,
7807 #endif
7811 * s2io_init_nic - Initialization of the adapter .
7812 * @pdev : structure containing the PCI related information of the device.
7813 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7814 * Description:
7815 * The function initializes an adapter identified by the pci_dec structure.
7816 * All OS related initialization including memory and device structure and
7817 * initlaization of the device private variable is done. Also the swapper
7818 * control register is initialized to enable read and write into the I/O
7819 * registers of the device.
7820 * Return value:
7821 * returns 0 on success and negative on failure.
7824 static int __devinit
7825 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7827 struct s2io_nic *sp;
7828 struct net_device *dev;
7829 int i, j, ret;
7830 int dma_flag = false;
7831 u32 mac_up, mac_down;
7832 u64 val64 = 0, tmp64 = 0;
7833 struct XENA_dev_config __iomem *bar0 = NULL;
7834 u16 subid;
7835 struct config_param *config;
7836 struct mac_info *mac_control;
7837 int mode;
7838 u8 dev_intr_type = intr_type;
7839 u8 dev_multiq = 0;
7841 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7842 if (ret)
7843 return ret;
7845 ret = pci_enable_device(pdev);
7846 if (ret) {
7847 DBG_PRINT(ERR_DBG,
7848 "%s: pci_enable_device failed\n", __func__);
7849 return ret;
7852 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7853 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7854 dma_flag = true;
7855 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7856 DBG_PRINT(ERR_DBG,
7857 "Unable to obtain 64bit DMA "
7858 "for consistent allocations\n");
7859 pci_disable_device(pdev);
7860 return -ENOMEM;
7862 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7863 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7864 } else {
7865 pci_disable_device(pdev);
7866 return -ENOMEM;
7868 ret = pci_request_regions(pdev, s2io_driver_name);
7869 if (ret) {
7870 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7871 __func__, ret);
7872 pci_disable_device(pdev);
7873 return -ENODEV;
7875 if (dev_multiq)
7876 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7877 else
7878 dev = alloc_etherdev(sizeof(struct s2io_nic));
7879 if (dev == NULL) {
7880 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7881 pci_disable_device(pdev);
7882 pci_release_regions(pdev);
7883 return -ENODEV;
7886 pci_set_master(pdev);
7887 pci_set_drvdata(pdev, dev);
7888 SET_NETDEV_DEV(dev, &pdev->dev);
7890 /* Private member variable initialized to s2io NIC structure */
7891 sp = netdev_priv(dev);
7892 sp->dev = dev;
7893 sp->pdev = pdev;
7894 sp->high_dma_flag = dma_flag;
7895 sp->device_enabled_once = false;
7896 if (rx_ring_mode == 1)
7897 sp->rxd_mode = RXD_MODE_1;
7898 if (rx_ring_mode == 2)
7899 sp->rxd_mode = RXD_MODE_3B;
7901 sp->config.intr_type = dev_intr_type;
7903 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7904 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7905 sp->device_type = XFRAME_II_DEVICE;
7906 else
7907 sp->device_type = XFRAME_I_DEVICE;
7910 /* Initialize some PCI/PCI-X fields of the NIC. */
7911 s2io_init_pci(sp);
7914 * Setting the device configuration parameters.
7915 * Most of these parameters can be specified by the user during
7916 * module insertion as they are module loadable parameters. If
7917 * these parameters are not not specified during load time, they
7918 * are initialized with default values.
7920 config = &sp->config;
7921 mac_control = &sp->mac_control;
7923 config->napi = napi;
7924 config->tx_steering_type = tx_steering_type;
7926 /* Tx side parameters. */
7927 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7928 config->tx_fifo_num = MAX_TX_FIFOS;
7929 else
7930 config->tx_fifo_num = tx_fifo_num;
7932 /* Initialize the fifos used for tx steering */
7933 if (config->tx_fifo_num < 5) {
7934 if (config->tx_fifo_num == 1)
7935 sp->total_tcp_fifos = 1;
7936 else
7937 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7938 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7939 sp->total_udp_fifos = 1;
7940 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7941 } else {
7942 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7943 FIFO_OTHER_MAX_NUM);
7944 sp->udp_fifo_idx = sp->total_tcp_fifos;
7945 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7946 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7949 config->multiq = dev_multiq;
7950 for (i = 0; i < config->tx_fifo_num; i++) {
7951 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7953 tx_cfg->fifo_len = tx_fifo_len[i];
7954 tx_cfg->fifo_priority = i;
7957 /* mapping the QoS priority to the configured fifos */
7958 for (i = 0; i < MAX_TX_FIFOS; i++)
7959 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7961 /* map the hashing selector table to the configured fifos */
7962 for (i = 0; i < config->tx_fifo_num; i++)
7963 sp->fifo_selector[i] = fifo_selector[i];
7966 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7967 for (i = 0; i < config->tx_fifo_num; i++) {
7968 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7970 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7971 if (tx_cfg->fifo_len < 65) {
7972 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7973 break;
7976 /* + 2 because one Txd for skb->data and one Txd for UFO */
7977 config->max_txds = MAX_SKB_FRAGS + 2;
7979 /* Rx side parameters. */
7980 config->rx_ring_num = rx_ring_num;
7981 for (i = 0; i < config->rx_ring_num; i++) {
7982 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7983 struct ring_info *ring = &mac_control->rings[i];
7985 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7986 rx_cfg->ring_priority = i;
7987 ring->rx_bufs_left = 0;
7988 ring->rxd_mode = sp->rxd_mode;
7989 ring->rxd_count = rxd_count[sp->rxd_mode];
7990 ring->pdev = sp->pdev;
7991 ring->dev = sp->dev;
7994 for (i = 0; i < rx_ring_num; i++) {
7995 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7997 rx_cfg->ring_org = RING_ORG_BUFF1;
7998 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
8001 /* Setting Mac Control parameters */
8002 mac_control->rmac_pause_time = rmac_pause_time;
8003 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
8004 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
8007 /* initialize the shared memory used by the NIC and the host */
8008 if (init_shared_mem(sp)) {
8009 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
8010 ret = -ENOMEM;
8011 goto mem_alloc_failed;
8014 sp->bar0 = pci_ioremap_bar(pdev, 0);
8015 if (!sp->bar0) {
8016 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
8017 dev->name);
8018 ret = -ENOMEM;
8019 goto bar0_remap_failed;
8022 sp->bar1 = pci_ioremap_bar(pdev, 2);
8023 if (!sp->bar1) {
8024 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
8025 dev->name);
8026 ret = -ENOMEM;
8027 goto bar1_remap_failed;
8030 dev->irq = pdev->irq;
8031 dev->base_addr = (unsigned long)sp->bar0;
8033 /* Initializing the BAR1 address as the start of the FIFO pointer. */
8034 for (j = 0; j < MAX_TX_FIFOS; j++) {
8035 mac_control->tx_FIFO_start[j] =
8036 (struct TxFIFO_element __iomem *)
8037 (sp->bar1 + (j * 0x00020000));
8040 /* Driver entry points */
8041 dev->netdev_ops = &s2io_netdev_ops;
8042 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8043 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8044 dev->features |= NETIF_F_LRO;
8045 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8046 if (sp->high_dma_flag == true)
8047 dev->features |= NETIF_F_HIGHDMA;
8048 dev->features |= NETIF_F_TSO;
8049 dev->features |= NETIF_F_TSO6;
8050 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
8051 dev->features |= NETIF_F_UFO;
8052 dev->features |= NETIF_F_HW_CSUM;
8054 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
8055 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8056 INIT_WORK(&sp->set_link_task, s2io_set_link);
8058 pci_save_state(sp->pdev);
8060 /* Setting swapper control on the NIC, for proper reset operation */
8061 if (s2io_set_swapper(sp)) {
8062 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
8063 dev->name);
8064 ret = -EAGAIN;
8065 goto set_swap_failed;
8068 /* Verify if the Herc works on the slot its placed into */
8069 if (sp->device_type & XFRAME_II_DEVICE) {
8070 mode = s2io_verify_pci_mode(sp);
8071 if (mode < 0) {
8072 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
8073 __func__);
8074 ret = -EBADSLT;
8075 goto set_swap_failed;
8079 if (sp->config.intr_type == MSI_X) {
8080 sp->num_entries = config->rx_ring_num + 1;
8081 ret = s2io_enable_msi_x(sp);
8083 if (!ret) {
8084 ret = s2io_test_msi(sp);
8085 /* rollback MSI-X, will re-enable during add_isr() */
8086 remove_msix_isr(sp);
8088 if (ret) {
8090 DBG_PRINT(ERR_DBG,
8091 "MSI-X requested but failed to enable\n");
8092 sp->config.intr_type = INTA;
8096 if (config->intr_type == MSI_X) {
8097 for (i = 0; i < config->rx_ring_num ; i++) {
8098 struct ring_info *ring = &mac_control->rings[i];
8100 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8102 } else {
8103 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8106 /* Not needed for Herc */
8107 if (sp->device_type & XFRAME_I_DEVICE) {
8109 * Fix for all "FFs" MAC address problems observed on
8110 * Alpha platforms
8112 fix_mac_address(sp);
8113 s2io_reset(sp);
8117 * MAC address initialization.
8118 * For now only one mac address will be read and used.
8120 bar0 = sp->bar0;
8121 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8122 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8123 writeq(val64, &bar0->rmac_addr_cmd_mem);
8124 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8125 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8126 S2IO_BIT_RESET);
8127 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8128 mac_down = (u32)tmp64;
8129 mac_up = (u32) (tmp64 >> 32);
8131 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8132 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8133 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8134 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8135 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8136 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8138 /* Set the factory defined MAC address initially */
8139 dev->addr_len = ETH_ALEN;
8140 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8141 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8143 /* initialize number of multicast & unicast MAC entries variables */
8144 if (sp->device_type == XFRAME_I_DEVICE) {
8145 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8146 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8147 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8148 } else if (sp->device_type == XFRAME_II_DEVICE) {
8149 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8150 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8151 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8154 /* store mac addresses from CAM to s2io_nic structure */
8155 do_s2io_store_unicast_mc(sp);
8157 /* Configure MSIX vector for number of rings configured plus one */
8158 if ((sp->device_type == XFRAME_II_DEVICE) &&
8159 (config->intr_type == MSI_X))
8160 sp->num_entries = config->rx_ring_num + 1;
8162 /* Store the values of the MSIX table in the s2io_nic structure */
8163 store_xmsi_data(sp);
8164 /* reset Nic and bring it to known state */
8165 s2io_reset(sp);
8168 * Initialize link state flags
8169 * and the card state parameter
8171 sp->state = 0;
8173 /* Initialize spinlocks */
8174 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8175 struct fifo_info *fifo = &mac_control->fifos[i];
8177 spin_lock_init(&fifo->tx_lock);
8181 * SXE-002: Configure link and activity LED to init state
8182 * on driver load.
8184 subid = sp->pdev->subsystem_device;
8185 if ((subid & 0xFF) >= 0x07) {
8186 val64 = readq(&bar0->gpio_control);
8187 val64 |= 0x0000800000000000ULL;
8188 writeq(val64, &bar0->gpio_control);
8189 val64 = 0x0411040400000000ULL;
8190 writeq(val64, (void __iomem *)bar0 + 0x2700);
8191 val64 = readq(&bar0->gpio_control);
8194 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8196 if (register_netdev(dev)) {
8197 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8198 ret = -ENODEV;
8199 goto register_failed;
8201 s2io_vpd_read(sp);
8202 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8203 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8204 sp->product_name, pdev->revision);
8205 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8206 s2io_driver_version);
8207 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8208 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8209 if (sp->device_type & XFRAME_II_DEVICE) {
8210 mode = s2io_print_pci_mode(sp);
8211 if (mode < 0) {
8212 ret = -EBADSLT;
8213 unregister_netdev(dev);
8214 goto set_swap_failed;
8217 switch (sp->rxd_mode) {
8218 case RXD_MODE_1:
8219 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8220 dev->name);
8221 break;
8222 case RXD_MODE_3B:
8223 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8224 dev->name);
8225 break;
8228 switch (sp->config.napi) {
8229 case 0:
8230 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8231 break;
8232 case 1:
8233 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8234 break;
8237 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8238 sp->config.tx_fifo_num);
8240 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8241 sp->config.rx_ring_num);
8243 switch (sp->config.intr_type) {
8244 case INTA:
8245 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8246 break;
8247 case MSI_X:
8248 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8249 break;
8251 if (sp->config.multiq) {
8252 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8253 struct fifo_info *fifo = &mac_control->fifos[i];
8255 fifo->multiq = config->multiq;
8257 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8258 dev->name);
8259 } else
8260 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8261 dev->name);
8263 switch (sp->config.tx_steering_type) {
8264 case NO_STEERING:
8265 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8266 dev->name);
8267 break;
8268 case TX_PRIORITY_STEERING:
8269 DBG_PRINT(ERR_DBG,
8270 "%s: Priority steering enabled for transmit\n",
8271 dev->name);
8272 break;
8273 case TX_DEFAULT_STEERING:
8274 DBG_PRINT(ERR_DBG,
8275 "%s: Default steering enabled for transmit\n",
8276 dev->name);
8279 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8280 dev->name);
8281 if (ufo)
8282 DBG_PRINT(ERR_DBG,
8283 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8284 dev->name);
8285 /* Initialize device name */
8286 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8288 if (vlan_tag_strip)
8289 sp->vlan_strip_flag = 1;
8290 else
8291 sp->vlan_strip_flag = 0;
8294 * Make Link state as off at this point, when the Link change
8295 * interrupt comes the state will be automatically changed to
8296 * the right state.
8298 netif_carrier_off(dev);
8300 return 0;
8302 register_failed:
8303 set_swap_failed:
8304 iounmap(sp->bar1);
8305 bar1_remap_failed:
8306 iounmap(sp->bar0);
8307 bar0_remap_failed:
8308 mem_alloc_failed:
8309 free_shared_mem(sp);
8310 pci_disable_device(pdev);
8311 pci_release_regions(pdev);
8312 pci_set_drvdata(pdev, NULL);
8313 free_netdev(dev);
8315 return ret;
8319 * s2io_rem_nic - Free the PCI device
8320 * @pdev: structure containing the PCI related information of the device.
8321 * Description: This function is called by the Pci subsystem to release a
8322 * PCI device and free up all resource held up by the device. This could
8323 * be in response to a Hot plug event or when the driver is to be removed
8324 * from memory.
8327 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8329 struct net_device *dev = pci_get_drvdata(pdev);
8330 struct s2io_nic *sp;
8332 if (dev == NULL) {
8333 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8334 return;
8337 sp = netdev_priv(dev);
8339 cancel_work_sync(&sp->rst_timer_task);
8340 cancel_work_sync(&sp->set_link_task);
8342 unregister_netdev(dev);
8344 free_shared_mem(sp);
8345 iounmap(sp->bar0);
8346 iounmap(sp->bar1);
8347 pci_release_regions(pdev);
8348 pci_set_drvdata(pdev, NULL);
8349 free_netdev(dev);
8350 pci_disable_device(pdev);
8354 * s2io_starter - Entry point for the driver
8355 * Description: This function is the entry point for the driver. It verifies
8356 * the module loadable parameters and initializes PCI configuration space.
8359 static int __init s2io_starter(void)
8361 return pci_register_driver(&s2io_driver);
8365 * s2io_closer - Cleanup routine for the driver
8366 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8369 static __exit void s2io_closer(void)
8371 pci_unregister_driver(&s2io_driver);
8372 DBG_PRINT(INIT_DBG, "cleanup done\n");
8375 module_init(s2io_starter);
8376 module_exit(s2io_closer);
8378 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8379 struct tcphdr **tcp, struct RxD_t *rxdp,
8380 struct s2io_nic *sp)
8382 int ip_off;
8383 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8385 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8386 DBG_PRINT(INIT_DBG,
8387 "%s: Non-TCP frames not supported for LRO\n",
8388 __func__);
8389 return -1;
8392 /* Checking for DIX type or DIX type with VLAN */
8393 if ((l2_type == 0) || (l2_type == 4)) {
8394 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8396 * If vlan stripping is disabled and the frame is VLAN tagged,
8397 * shift the offset by the VLAN header size bytes.
8399 if ((!sp->vlan_strip_flag) &&
8400 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8401 ip_off += HEADER_VLAN_SIZE;
8402 } else {
8403 /* LLC, SNAP etc are considered non-mergeable */
8404 return -1;
8407 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8408 ip_len = (u8)((*ip)->ihl);
8409 ip_len <<= 2;
8410 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8412 return 0;
8415 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8416 struct tcphdr *tcp)
8418 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8419 if ((lro->iph->saddr != ip->saddr) ||
8420 (lro->iph->daddr != ip->daddr) ||
8421 (lro->tcph->source != tcp->source) ||
8422 (lro->tcph->dest != tcp->dest))
8423 return -1;
8424 return 0;
8427 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8429 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8432 static void initiate_new_session(struct lro *lro, u8 *l2h,
8433 struct iphdr *ip, struct tcphdr *tcp,
8434 u32 tcp_pyld_len, u16 vlan_tag)
8436 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8437 lro->l2h = l2h;
8438 lro->iph = ip;
8439 lro->tcph = tcp;
8440 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8441 lro->tcp_ack = tcp->ack_seq;
8442 lro->sg_num = 1;
8443 lro->total_len = ntohs(ip->tot_len);
8444 lro->frags_len = 0;
8445 lro->vlan_tag = vlan_tag;
8447 * Check if we saw TCP timestamp.
8448 * Other consistency checks have already been done.
8450 if (tcp->doff == 8) {
8451 __be32 *ptr;
8452 ptr = (__be32 *)(tcp+1);
8453 lro->saw_ts = 1;
8454 lro->cur_tsval = ntohl(*(ptr+1));
8455 lro->cur_tsecr = *(ptr+2);
8457 lro->in_use = 1;
8460 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8462 struct iphdr *ip = lro->iph;
8463 struct tcphdr *tcp = lro->tcph;
8464 __sum16 nchk;
8465 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8467 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8469 /* Update L3 header */
8470 ip->tot_len = htons(lro->total_len);
8471 ip->check = 0;
8472 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8473 ip->check = nchk;
8475 /* Update L4 header */
8476 tcp->ack_seq = lro->tcp_ack;
8477 tcp->window = lro->window;
8479 /* Update tsecr field if this session has timestamps enabled */
8480 if (lro->saw_ts) {
8481 __be32 *ptr = (__be32 *)(tcp + 1);
8482 *(ptr+2) = lro->cur_tsecr;
8485 /* Update counters required for calculation of
8486 * average no. of packets aggregated.
8488 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8489 swstats->num_aggregations++;
8492 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8493 struct tcphdr *tcp, u32 l4_pyld)
8495 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8496 lro->total_len += l4_pyld;
8497 lro->frags_len += l4_pyld;
8498 lro->tcp_next_seq += l4_pyld;
8499 lro->sg_num++;
8501 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8502 lro->tcp_ack = tcp->ack_seq;
8503 lro->window = tcp->window;
8505 if (lro->saw_ts) {
8506 __be32 *ptr;
8507 /* Update tsecr and tsval from this packet */
8508 ptr = (__be32 *)(tcp+1);
8509 lro->cur_tsval = ntohl(*(ptr+1));
8510 lro->cur_tsecr = *(ptr + 2);
8514 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8515 struct tcphdr *tcp, u32 tcp_pyld_len)
8517 u8 *ptr;
8519 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8521 if (!tcp_pyld_len) {
8522 /* Runt frame or a pure ack */
8523 return -1;
8526 if (ip->ihl != 5) /* IP has options */
8527 return -1;
8529 /* If we see CE codepoint in IP header, packet is not mergeable */
8530 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8531 return -1;
8533 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8534 if (tcp->urg || tcp->psh || tcp->rst ||
8535 tcp->syn || tcp->fin ||
8536 tcp->ece || tcp->cwr || !tcp->ack) {
8538 * Currently recognize only the ack control word and
8539 * any other control field being set would result in
8540 * flushing the LRO session
8542 return -1;
8546 * Allow only one TCP timestamp option. Don't aggregate if
8547 * any other options are detected.
8549 if (tcp->doff != 5 && tcp->doff != 8)
8550 return -1;
8552 if (tcp->doff == 8) {
8553 ptr = (u8 *)(tcp + 1);
8554 while (*ptr == TCPOPT_NOP)
8555 ptr++;
8556 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8557 return -1;
8559 /* Ensure timestamp value increases monotonically */
8560 if (l_lro)
8561 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8562 return -1;
8564 /* timestamp echo reply should be non-zero */
8565 if (*((__be32 *)(ptr+6)) == 0)
8566 return -1;
8569 return 0;
8572 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8573 u8 **tcp, u32 *tcp_len, struct lro **lro,
8574 struct RxD_t *rxdp, struct s2io_nic *sp)
8576 struct iphdr *ip;
8577 struct tcphdr *tcph;
8578 int ret = 0, i;
8579 u16 vlan_tag = 0;
8580 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8582 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8583 rxdp, sp);
8584 if (ret)
8585 return ret;
8587 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8589 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8590 tcph = (struct tcphdr *)*tcp;
8591 *tcp_len = get_l4_pyld_length(ip, tcph);
8592 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8593 struct lro *l_lro = &ring_data->lro0_n[i];
8594 if (l_lro->in_use) {
8595 if (check_for_socket_match(l_lro, ip, tcph))
8596 continue;
8597 /* Sock pair matched */
8598 *lro = l_lro;
8600 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8601 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8602 "expected 0x%x, actual 0x%x\n",
8603 __func__,
8604 (*lro)->tcp_next_seq,
8605 ntohl(tcph->seq));
8607 swstats->outof_sequence_pkts++;
8608 ret = 2;
8609 break;
8612 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8613 *tcp_len))
8614 ret = 1; /* Aggregate */
8615 else
8616 ret = 2; /* Flush both */
8617 break;
8621 if (ret == 0) {
8622 /* Before searching for available LRO objects,
8623 * check if the pkt is L3/L4 aggregatable. If not
8624 * don't create new LRO session. Just send this
8625 * packet up.
8627 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8628 return 5;
8630 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8631 struct lro *l_lro = &ring_data->lro0_n[i];
8632 if (!(l_lro->in_use)) {
8633 *lro = l_lro;
8634 ret = 3; /* Begin anew */
8635 break;
8640 if (ret == 0) { /* sessions exceeded */
8641 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8642 __func__);
8643 *lro = NULL;
8644 return ret;
8647 switch (ret) {
8648 case 3:
8649 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8650 vlan_tag);
8651 break;
8652 case 2:
8653 update_L3L4_header(sp, *lro);
8654 break;
8655 case 1:
8656 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8657 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8658 update_L3L4_header(sp, *lro);
8659 ret = 4; /* Flush the LRO */
8661 break;
8662 default:
8663 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8664 break;
8667 return ret;
8670 static void clear_lro_session(struct lro *lro)
8672 static u16 lro_struct_size = sizeof(struct lro);
8674 memset(lro, 0, lro_struct_size);
8677 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8679 struct net_device *dev = skb->dev;
8680 struct s2io_nic *sp = netdev_priv(dev);
8682 skb->protocol = eth_type_trans(skb, dev);
8683 if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
8684 /* Queueing the vlan frame to the upper layer */
8685 if (sp->config.napi)
8686 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8687 else
8688 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8689 } else {
8690 if (sp->config.napi)
8691 netif_receive_skb(skb);
8692 else
8693 netif_rx(skb);
8697 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8698 struct sk_buff *skb, u32 tcp_len)
8700 struct sk_buff *first = lro->parent;
8701 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8703 first->len += tcp_len;
8704 first->data_len = lro->frags_len;
8705 skb_pull(skb, (skb->len - tcp_len));
8706 if (skb_shinfo(first)->frag_list)
8707 lro->last_frag->next = skb;
8708 else
8709 skb_shinfo(first)->frag_list = skb;
8710 first->truesize += skb->truesize;
8711 lro->last_frag = skb;
8712 swstats->clubbed_frms_cnt++;
8716 * s2io_io_error_detected - called when PCI error is detected
8717 * @pdev: Pointer to PCI device
8718 * @state: The current pci connection state
8720 * This function is called after a PCI bus error affecting
8721 * this device has been detected.
8723 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8724 pci_channel_state_t state)
8726 struct net_device *netdev = pci_get_drvdata(pdev);
8727 struct s2io_nic *sp = netdev_priv(netdev);
8729 netif_device_detach(netdev);
8731 if (state == pci_channel_io_perm_failure)
8732 return PCI_ERS_RESULT_DISCONNECT;
8734 if (netif_running(netdev)) {
8735 /* Bring down the card, while avoiding PCI I/O */
8736 do_s2io_card_down(sp, 0);
8738 pci_disable_device(pdev);
8740 return PCI_ERS_RESULT_NEED_RESET;
8744 * s2io_io_slot_reset - called after the pci bus has been reset.
8745 * @pdev: Pointer to PCI device
8747 * Restart the card from scratch, as if from a cold-boot.
8748 * At this point, the card has exprienced a hard reset,
8749 * followed by fixups by BIOS, and has its config space
8750 * set up identically to what it was at cold boot.
8752 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8754 struct net_device *netdev = pci_get_drvdata(pdev);
8755 struct s2io_nic *sp = netdev_priv(netdev);
8757 if (pci_enable_device(pdev)) {
8758 pr_err("Cannot re-enable PCI device after reset.\n");
8759 return PCI_ERS_RESULT_DISCONNECT;
8762 pci_set_master(pdev);
8763 s2io_reset(sp);
8765 return PCI_ERS_RESULT_RECOVERED;
8769 * s2io_io_resume - called when traffic can start flowing again.
8770 * @pdev: Pointer to PCI device
8772 * This callback is called when the error recovery driver tells
8773 * us that its OK to resume normal operation.
8775 static void s2io_io_resume(struct pci_dev *pdev)
8777 struct net_device *netdev = pci_get_drvdata(pdev);
8778 struct s2io_nic *sp = netdev_priv(netdev);
8780 if (netif_running(netdev)) {
8781 if (s2io_card_up(sp)) {
8782 pr_err("Can't bring device back up after reset.\n");
8783 return;
8786 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8787 s2io_card_down(sp);
8788 pr_err("Can't restore mac addr after reset.\n");
8789 return;
8793 netif_device_attach(netdev);
8794 netif_tx_wake_all_queues(netdev);