drm/radeon/kms: fix thermal sensor reporting on rv6xx
[linux-2.6.git] / drivers / net / s2io.c
blobecc25aab896af3ee9e55a9da4e90bccd32553d37
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explanation of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
53 ************************************************************************/
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <net/tcp.h>
83 #include <asm/system.h>
84 #include <asm/div64.h>
85 #include <asm/irq.h>
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
91 #define DRV_VERSION "2.0.26.27"
93 /* S2io Driver name & version. */
94 static char s2io_driver_name[] = "Neterion";
95 static char s2io_driver_version[] = DRV_VERSION;
97 static int rxd_size[2] = {32, 48};
98 static int rxd_count[2] = {127, 85};
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 int ret;
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 return ret;
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
147 {"tmac_any_err_frms"},
148 {"tmac_ttl_less_fb_octets"},
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
163 {"rmac_out_rng_len_err_frms"},
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
170 {"rmac_discarded_frms"},
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
209 {"rmac_pause_cnt"},
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
231 {"rxf_wr_cnt"}
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
250 {"link_fault_cnt"}
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
341 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
347 /* copy mac addr to def_mac_addr array */
348 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
358 /* Add the vlan */
359 static void s2io_vlan_rx_register(struct net_device *dev,
360 struct vlan_group *grp)
362 int i;
363 struct s2io_nic *nic = netdev_priv(dev);
364 unsigned long flags[MAX_TX_FIFOS];
365 struct config_param *config = &nic->config;
366 struct mac_info *mac_control = &nic->mac_control;
368 for (i = 0; i < config->tx_fifo_num; i++) {
369 struct fifo_info *fifo = &mac_control->fifos[i];
371 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
374 nic->vlgrp = grp;
376 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
377 struct fifo_info *fifo = &mac_control->fifos[i];
379 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
383 /* Unregister the vlan */
384 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
386 int i;
387 struct s2io_nic *nic = netdev_priv(dev);
388 unsigned long flags[MAX_TX_FIFOS];
389 struct config_param *config = &nic->config;
390 struct mac_info *mac_control = &nic->mac_control;
392 for (i = 0; i < config->tx_fifo_num; i++) {
393 struct fifo_info *fifo = &mac_control->fifos[i];
395 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
398 if (nic->vlgrp)
399 vlan_group_set_device(nic->vlgrp, vid, NULL);
401 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
402 struct fifo_info *fifo = &mac_control->fifos[i];
404 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
409 * Constants to be programmed into the Xena's registers, to configure
410 * the XAUI.
413 #define END_SIGN 0x0
414 static const u64 herc_act_dtx_cfg[] = {
415 /* Set address */
416 0x8000051536750000ULL, 0x80000515367500E0ULL,
417 /* Write data */
418 0x8000051536750004ULL, 0x80000515367500E4ULL,
419 /* Set address */
420 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
421 /* Write data */
422 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
423 /* Set address */
424 0x801205150D440000ULL, 0x801205150D4400E0ULL,
425 /* Write data */
426 0x801205150D440004ULL, 0x801205150D4400E4ULL,
427 /* Set address */
428 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
431 /* Done */
432 END_SIGN
435 static const u64 xena_dtx_cfg[] = {
436 /* Set address */
437 0x8000051500000000ULL, 0x80000515000000E0ULL,
438 /* Write data */
439 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
440 /* Set address */
441 0x8001051500000000ULL, 0x80010515000000E0ULL,
442 /* Write data */
443 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
444 /* Set address */
445 0x8002051500000000ULL, 0x80020515000000E0ULL,
446 /* Write data */
447 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
448 END_SIGN
452 * Constants for Fixing the MacAddress problem seen mostly on
453 * Alpha machines.
455 static const u64 fix_mac[] = {
456 0x0060000000000000ULL, 0x0060600000000000ULL,
457 0x0040600000000000ULL, 0x0000600000000000ULL,
458 0x0020600000000000ULL, 0x0060600000000000ULL,
459 0x0020600000000000ULL, 0x0060600000000000ULL,
460 0x0020600000000000ULL, 0x0060600000000000ULL,
461 0x0020600000000000ULL, 0x0060600000000000ULL,
462 0x0020600000000000ULL, 0x0060600000000000ULL,
463 0x0020600000000000ULL, 0x0060600000000000ULL,
464 0x0020600000000000ULL, 0x0060600000000000ULL,
465 0x0020600000000000ULL, 0x0060600000000000ULL,
466 0x0020600000000000ULL, 0x0060600000000000ULL,
467 0x0020600000000000ULL, 0x0060600000000000ULL,
468 0x0020600000000000ULL, 0x0000600000000000ULL,
469 0x0040600000000000ULL, 0x0060600000000000ULL,
470 END_SIGN
473 MODULE_LICENSE("GPL");
474 MODULE_VERSION(DRV_VERSION);
477 /* Module Loadable parameters. */
478 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
479 S2IO_PARM_INT(rx_ring_num, 1);
480 S2IO_PARM_INT(multiq, 0);
481 S2IO_PARM_INT(rx_ring_mode, 1);
482 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
483 S2IO_PARM_INT(rmac_pause_time, 0x100);
484 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
485 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
486 S2IO_PARM_INT(shared_splits, 0);
487 S2IO_PARM_INT(tmac_util_period, 5);
488 S2IO_PARM_INT(rmac_util_period, 5);
489 S2IO_PARM_INT(l3l4hdr_size, 128);
490 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
491 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
492 /* Frequency of Rx desc syncs expressed as power of 2 */
493 S2IO_PARM_INT(rxsync_frequency, 3);
494 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
495 S2IO_PARM_INT(intr_type, 2);
496 /* Large receive offload feature */
498 /* Max pkts to be aggregated by LRO at one time. If not specified,
499 * aggregation happens until we hit max IP pkt size(64K)
501 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
502 S2IO_PARM_INT(indicate_max_pkts, 0);
504 S2IO_PARM_INT(napi, 1);
505 S2IO_PARM_INT(ufo, 0);
506 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
508 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
509 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
510 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
511 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
512 static unsigned int rts_frm_len[MAX_RX_RINGS] =
513 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
515 module_param_array(tx_fifo_len, uint, NULL, 0);
516 module_param_array(rx_ring_sz, uint, NULL, 0);
517 module_param_array(rts_frm_len, uint, NULL, 0);
520 * S2IO device table.
521 * This table lists all the devices that this driver supports.
523 static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
524 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
525 PCI_ANY_ID, PCI_ANY_ID},
526 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
527 PCI_ANY_ID, PCI_ANY_ID},
528 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
529 PCI_ANY_ID, PCI_ANY_ID},
530 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
531 PCI_ANY_ID, PCI_ANY_ID},
532 {0,}
535 MODULE_DEVICE_TABLE(pci, s2io_tbl);
537 static struct pci_error_handlers s2io_err_handler = {
538 .error_detected = s2io_io_error_detected,
539 .slot_reset = s2io_io_slot_reset,
540 .resume = s2io_io_resume,
543 static struct pci_driver s2io_driver = {
544 .name = "S2IO",
545 .id_table = s2io_tbl,
546 .probe = s2io_init_nic,
547 .remove = __devexit_p(s2io_rem_nic),
548 .err_handler = &s2io_err_handler,
551 /* A simplifier macro used both by init and free shared_mem Fns(). */
552 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
554 /* netqueue manipulation helper functions */
555 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
557 if (!sp->config.multiq) {
558 int i;
560 for (i = 0; i < sp->config.tx_fifo_num; i++)
561 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
563 netif_tx_stop_all_queues(sp->dev);
566 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
568 if (!sp->config.multiq)
569 sp->mac_control.fifos[fifo_no].queue_state =
570 FIFO_QUEUE_STOP;
572 netif_tx_stop_all_queues(sp->dev);
575 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 if (!sp->config.multiq) {
578 int i;
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
583 netif_tx_start_all_queues(sp->dev);
586 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
588 if (!sp->config.multiq)
589 sp->mac_control.fifos[fifo_no].queue_state =
590 FIFO_QUEUE_START;
592 netif_tx_start_all_queues(sp->dev);
595 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
597 if (!sp->config.multiq) {
598 int i;
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_tx_wake_all_queues(sp->dev);
606 static inline void s2io_wake_tx_queue(
607 struct fifo_info *fifo, int cnt, u8 multiq)
610 if (multiq) {
611 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
612 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
613 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
614 if (netif_queue_stopped(fifo->dev)) {
615 fifo->queue_state = FIFO_QUEUE_START;
616 netif_wake_queue(fifo->dev);
622 * init_shared_mem - Allocation and Initialization of Memory
623 * @nic: Device private variable.
624 * Description: The function allocates all the memory areas shared
625 * between the NIC and the driver. This includes Tx descriptors,
626 * Rx descriptors and the statistics block.
629 static int init_shared_mem(struct s2io_nic *nic)
631 u32 size;
632 void *tmp_v_addr, *tmp_v_addr_next;
633 dma_addr_t tmp_p_addr, tmp_p_addr_next;
634 struct RxD_block *pre_rxd_blk = NULL;
635 int i, j, blk_cnt;
636 int lst_size, lst_per_page;
637 struct net_device *dev = nic->dev;
638 unsigned long tmp;
639 struct buffAdd *ba;
640 struct config_param *config = &nic->config;
641 struct mac_info *mac_control = &nic->mac_control;
642 unsigned long long mem_allocated = 0;
644 /* Allocation and initialization of TXDLs in FIFOs */
645 size = 0;
646 for (i = 0; i < config->tx_fifo_num; i++) {
647 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
649 size += tx_cfg->fifo_len;
651 if (size > MAX_AVAILABLE_TXDS) {
652 DBG_PRINT(ERR_DBG,
653 "Too many TxDs requested: %d, max supported: %d\n",
654 size, MAX_AVAILABLE_TXDS);
655 return -EINVAL;
658 size = 0;
659 for (i = 0; i < config->tx_fifo_num; i++) {
660 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
662 size = tx_cfg->fifo_len;
664 * Legal values are from 2 to 8192
666 if (size < 2) {
667 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
668 "Valid lengths are 2 through 8192\n",
669 i, size);
670 return -EINVAL;
674 lst_size = (sizeof(struct TxD) * config->max_txds);
675 lst_per_page = PAGE_SIZE / lst_size;
677 for (i = 0; i < config->tx_fifo_num; i++) {
678 struct fifo_info *fifo = &mac_control->fifos[i];
679 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
680 int fifo_len = tx_cfg->fifo_len;
681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
683 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
684 if (!fifo->list_info) {
685 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
686 return -ENOMEM;
688 mem_allocated += list_holder_size;
690 for (i = 0; i < config->tx_fifo_num; i++) {
691 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
692 lst_per_page);
693 struct fifo_info *fifo = &mac_control->fifos[i];
694 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
696 fifo->tx_curr_put_info.offset = 0;
697 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
698 fifo->tx_curr_get_info.offset = 0;
699 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
700 fifo->fifo_no = i;
701 fifo->nic = nic;
702 fifo->max_txds = MAX_SKB_FRAGS + 2;
703 fifo->dev = dev;
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
712 DBG_PRINT(INFO_DBG,
713 "pci_alloc_consistent failed for TxDL\n");
714 return -ENOMEM;
716 /* If we got a zero DMA address(can happen on
717 * certain platforms like PPC), reallocate.
718 * Store virtual address of page we don't want,
719 * to be freed later.
721 if (!tmp_p) {
722 mac_control->zerodma_virt_addr = tmp_v;
723 DBG_PRINT(INIT_DBG,
724 "%s: Zero DMA address for TxDL. "
725 "Virtual address %p\n",
726 dev->name, tmp_v);
727 tmp_v = pci_alloc_consistent(nic->pdev,
728 PAGE_SIZE, &tmp_p);
729 if (!tmp_v) {
730 DBG_PRINT(INFO_DBG,
731 "pci_alloc_consistent failed for TxDL\n");
732 return -ENOMEM;
734 mem_allocated += PAGE_SIZE;
736 while (k < lst_per_page) {
737 int l = (j * lst_per_page) + k;
738 if (l == tx_cfg->fifo_len)
739 break;
740 fifo->list_info[l].list_virt_addr =
741 tmp_v + (k * lst_size);
742 fifo->list_info[l].list_phy_addr =
743 tmp_p + (k * lst_size);
744 k++;
749 for (i = 0; i < config->tx_fifo_num; i++) {
750 struct fifo_info *fifo = &mac_control->fifos[i];
751 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
753 size = tx_cfg->fifo_len;
754 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!fifo->ufo_in_band_v)
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
763 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
764 struct ring_info *ring = &mac_control->rings[i];
766 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
767 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
768 "multiple of RxDs per Block\n",
769 dev->name, i);
770 return FAILURE;
772 size += rx_cfg->num_rxd;
773 ring->block_count = rx_cfg->num_rxd /
774 (rxd_count[nic->rxd_mode] + 1);
775 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
777 if (nic->rxd_mode == RXD_MODE_1)
778 size = (size * (sizeof(struct RxD1)));
779 else
780 size = (size * (sizeof(struct RxD3)));
782 for (i = 0; i < config->rx_ring_num; i++) {
783 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
784 struct ring_info *ring = &mac_control->rings[i];
786 ring->rx_curr_get_info.block_index = 0;
787 ring->rx_curr_get_info.offset = 0;
788 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
789 ring->rx_curr_put_info.block_index = 0;
790 ring->rx_curr_put_info.offset = 0;
791 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
792 ring->nic = nic;
793 ring->ring_no = i;
795 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
796 /* Allocating all the Rx blocks */
797 for (j = 0; j < blk_cnt; j++) {
798 struct rx_block_info *rx_blocks;
799 int l;
801 rx_blocks = &ring->rx_blocks[j];
802 size = SIZE_OF_BLOCK; /* size is always page size */
803 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
804 &tmp_p_addr);
805 if (tmp_v_addr == NULL) {
807 * In case of failure, free_shared_mem()
808 * is called, which should free any
809 * memory that was alloced till the
810 * failure happened.
812 rx_blocks->block_virt_addr = tmp_v_addr;
813 return -ENOMEM;
815 mem_allocated += size;
816 memset(tmp_v_addr, 0, size);
818 size = sizeof(struct rxd_info) *
819 rxd_count[nic->rxd_mode];
820 rx_blocks->block_virt_addr = tmp_v_addr;
821 rx_blocks->block_dma_addr = tmp_p_addr;
822 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
823 if (!rx_blocks->rxds)
824 return -ENOMEM;
825 mem_allocated += size;
826 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
827 rx_blocks->rxds[l].virt_addr =
828 rx_blocks->block_virt_addr +
829 (rxd_size[nic->rxd_mode] * l);
830 rx_blocks->rxds[l].dma_addr =
831 rx_blocks->block_dma_addr +
832 (rxd_size[nic->rxd_mode] * l);
835 /* Interlinking all Rx Blocks */
836 for (j = 0; j < blk_cnt; j++) {
837 int next = (j + 1) % blk_cnt;
838 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
839 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
840 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
841 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
843 pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
844 pre_rxd_blk->reserved_2_pNext_RxD_block =
845 (unsigned long)tmp_v_addr_next;
846 pre_rxd_blk->pNext_RxD_Blk_physical =
847 (u64)tmp_p_addr_next;
850 if (nic->rxd_mode == RXD_MODE_3B) {
852 * Allocation of Storages for buffer addresses in 2BUFF mode
853 * and the buffers as well.
855 for (i = 0; i < config->rx_ring_num; i++) {
856 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
857 struct ring_info *ring = &mac_control->rings[i];
859 blk_cnt = rx_cfg->num_rxd /
860 (rxd_count[nic->rxd_mode] + 1);
861 size = sizeof(struct buffAdd *) * blk_cnt;
862 ring->ba = kmalloc(size, GFP_KERNEL);
863 if (!ring->ba)
864 return -ENOMEM;
865 mem_allocated += size;
866 for (j = 0; j < blk_cnt; j++) {
867 int k = 0;
869 size = sizeof(struct buffAdd) *
870 (rxd_count[nic->rxd_mode] + 1);
871 ring->ba[j] = kmalloc(size, GFP_KERNEL);
872 if (!ring->ba[j])
873 return -ENOMEM;
874 mem_allocated += size;
875 while (k != rxd_count[nic->rxd_mode]) {
876 ba = &ring->ba[j][k];
877 size = BUF0_LEN + ALIGN_SIZE;
878 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
879 if (!ba->ba_0_org)
880 return -ENOMEM;
881 mem_allocated += size;
882 tmp = (unsigned long)ba->ba_0_org;
883 tmp += ALIGN_SIZE;
884 tmp &= ~((unsigned long)ALIGN_SIZE);
885 ba->ba_0 = (void *)tmp;
887 size = BUF1_LEN + ALIGN_SIZE;
888 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
889 if (!ba->ba_1_org)
890 return -ENOMEM;
891 mem_allocated += size;
892 tmp = (unsigned long)ba->ba_1_org;
893 tmp += ALIGN_SIZE;
894 tmp &= ~((unsigned long)ALIGN_SIZE);
895 ba->ba_1 = (void *)tmp;
896 k++;
902 /* Allocation and initialization of Statistics block */
903 size = sizeof(struct stat_block);
904 mac_control->stats_mem =
905 pci_alloc_consistent(nic->pdev, size,
906 &mac_control->stats_mem_phy);
908 if (!mac_control->stats_mem) {
910 * In case of failure, free_shared_mem() is called, which
911 * should free any memory that was alloced till the
912 * failure happened.
914 return -ENOMEM;
916 mem_allocated += size;
917 mac_control->stats_mem_sz = size;
919 tmp_v_addr = mac_control->stats_mem;
920 mac_control->stats_info = (struct stat_block *)tmp_v_addr;
921 memset(tmp_v_addr, 0, size);
922 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
923 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
924 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
925 return SUCCESS;
929 * free_shared_mem - Free the allocated Memory
930 * @nic: Device private variable.
931 * Description: This function is to free all memory locations allocated by
932 * the init_shared_mem() function and return it to the kernel.
935 static void free_shared_mem(struct s2io_nic *nic)
937 int i, j, blk_cnt, size;
938 void *tmp_v_addr;
939 dma_addr_t tmp_p_addr;
940 int lst_size, lst_per_page;
941 struct net_device *dev;
942 int page_num = 0;
943 struct config_param *config;
944 struct mac_info *mac_control;
945 struct stat_block *stats;
946 struct swStat *swstats;
948 if (!nic)
949 return;
951 dev = nic->dev;
953 config = &nic->config;
954 mac_control = &nic->mac_control;
955 stats = mac_control->stats_info;
956 swstats = &stats->sw_stat;
958 lst_size = sizeof(struct TxD) * config->max_txds;
959 lst_per_page = PAGE_SIZE / lst_size;
961 for (i = 0; i < config->tx_fifo_num; i++) {
962 struct fifo_info *fifo = &mac_control->fifos[i];
963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
965 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
966 for (j = 0; j < page_num; j++) {
967 int mem_blks = (j * lst_per_page);
968 struct list_info_hold *fli;
970 if (!fifo->list_info)
971 return;
973 fli = &fifo->list_info[mem_blks];
974 if (!fli->list_virt_addr)
975 break;
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
977 fli->list_virt_addr,
978 fli->list_phy_addr);
979 swstats->mem_freed += PAGE_SIZE;
981 /* If we got a zero DMA address during allocation,
982 * free the page now
984 if (mac_control->zerodma_virt_addr) {
985 pci_free_consistent(nic->pdev, PAGE_SIZE,
986 mac_control->zerodma_virt_addr,
987 (dma_addr_t)0);
988 DBG_PRINT(INIT_DBG,
989 "%s: Freeing TxDL with zero DMA address. "
990 "Virtual address %p\n",
991 dev->name, mac_control->zerodma_virt_addr);
992 swstats->mem_freed += PAGE_SIZE;
994 kfree(fifo->list_info);
995 swstats->mem_freed += tx_cfg->fifo_len *
996 sizeof(struct list_info_hold);
999 size = SIZE_OF_BLOCK;
1000 for (i = 0; i < config->rx_ring_num; i++) {
1001 struct ring_info *ring = &mac_control->rings[i];
1003 blk_cnt = ring->block_count;
1004 for (j = 0; j < blk_cnt; j++) {
1005 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1006 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1007 if (tmp_v_addr == NULL)
1008 break;
1009 pci_free_consistent(nic->pdev, size,
1010 tmp_v_addr, tmp_p_addr);
1011 swstats->mem_freed += size;
1012 kfree(ring->rx_blocks[j].rxds);
1013 swstats->mem_freed += sizeof(struct rxd_info) *
1014 rxd_count[nic->rxd_mode];
1018 if (nic->rxd_mode == RXD_MODE_3B) {
1019 /* Freeing buffer storage addresses in 2BUFF mode. */
1020 for (i = 0; i < config->rx_ring_num; i++) {
1021 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1022 struct ring_info *ring = &mac_control->rings[i];
1024 blk_cnt = rx_cfg->num_rxd /
1025 (rxd_count[nic->rxd_mode] + 1);
1026 for (j = 0; j < blk_cnt; j++) {
1027 int k = 0;
1028 if (!ring->ba[j])
1029 continue;
1030 while (k != rxd_count[nic->rxd_mode]) {
1031 struct buffAdd *ba = &ring->ba[j][k];
1032 kfree(ba->ba_0_org);
1033 swstats->mem_freed +=
1034 BUF0_LEN + ALIGN_SIZE;
1035 kfree(ba->ba_1_org);
1036 swstats->mem_freed +=
1037 BUF1_LEN + ALIGN_SIZE;
1038 k++;
1040 kfree(ring->ba[j]);
1041 swstats->mem_freed += sizeof(struct buffAdd) *
1042 (rxd_count[nic->rxd_mode] + 1);
1044 kfree(ring->ba);
1045 swstats->mem_freed += sizeof(struct buffAdd *) *
1046 blk_cnt;
1050 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1051 struct fifo_info *fifo = &mac_control->fifos[i];
1052 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1054 if (fifo->ufo_in_band_v) {
1055 swstats->mem_freed += tx_cfg->fifo_len *
1056 sizeof(u64);
1057 kfree(fifo->ufo_in_band_v);
1061 if (mac_control->stats_mem) {
1062 swstats->mem_freed += mac_control->stats_mem_sz;
1063 pci_free_consistent(nic->pdev,
1064 mac_control->stats_mem_sz,
1065 mac_control->stats_mem,
1066 mac_control->stats_mem_phy);
1071 * s2io_verify_pci_mode -
1074 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1077 register u64 val64 = 0;
1078 int mode;
1080 val64 = readq(&bar0->pci_mode);
1081 mode = (u8)GET_PCI_MODE(val64);
1083 if (val64 & PCI_MODE_UNKNOWN_MODE)
1084 return -1; /* Unknown PCI mode */
1085 return mode;
1088 #define NEC_VENID 0x1033
1089 #define NEC_DEVID 0x0125
1090 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1092 struct pci_dev *tdev = NULL;
1093 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1094 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1095 if (tdev->bus == s2io_pdev->bus->parent) {
1096 pci_dev_put(tdev);
1097 return 1;
1101 return 0;
1104 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1106 * s2io_print_pci_mode -
1108 static int s2io_print_pci_mode(struct s2io_nic *nic)
1110 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1111 register u64 val64 = 0;
1112 int mode;
1113 struct config_param *config = &nic->config;
1114 const char *pcimode;
1116 val64 = readq(&bar0->pci_mode);
1117 mode = (u8)GET_PCI_MODE(val64);
1119 if (val64 & PCI_MODE_UNKNOWN_MODE)
1120 return -1; /* Unknown PCI mode */
1122 config->bus_speed = bus_speed[mode];
1124 if (s2io_on_nec_bridge(nic->pdev)) {
1125 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1126 nic->dev->name);
1127 return mode;
1130 switch (mode) {
1131 case PCI_MODE_PCI_33:
1132 pcimode = "33MHz PCI bus";
1133 break;
1134 case PCI_MODE_PCI_66:
1135 pcimode = "66MHz PCI bus";
1136 break;
1137 case PCI_MODE_PCIX_M1_66:
1138 pcimode = "66MHz PCIX(M1) bus";
1139 break;
1140 case PCI_MODE_PCIX_M1_100:
1141 pcimode = "100MHz PCIX(M1) bus";
1142 break;
1143 case PCI_MODE_PCIX_M1_133:
1144 pcimode = "133MHz PCIX(M1) bus";
1145 break;
1146 case PCI_MODE_PCIX_M2_66:
1147 pcimode = "133MHz PCIX(M2) bus";
1148 break;
1149 case PCI_MODE_PCIX_M2_100:
1150 pcimode = "200MHz PCIX(M2) bus";
1151 break;
1152 case PCI_MODE_PCIX_M2_133:
1153 pcimode = "266MHz PCIX(M2) bus";
1154 break;
1155 default:
1156 pcimode = "unsupported bus!";
1157 mode = -1;
1160 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1161 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1163 return mode;
1167 * init_tti - Initialization transmit traffic interrupt scheme
1168 * @nic: device private variable
1169 * @link: link status (UP/DOWN) used to enable/disable continuous
1170 * transmit interrupts
1171 * Description: The function configures transmit traffic interrupts
1172 * Return Value: SUCCESS on success and
1173 * '-1' on failure
1176 static int init_tti(struct s2io_nic *nic, int link)
1178 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1179 register u64 val64 = 0;
1180 int i;
1181 struct config_param *config = &nic->config;
1183 for (i = 0; i < config->tx_fifo_num; i++) {
1185 * TTI Initialization. Default Tx timer gets us about
1186 * 250 interrupts per sec. Continuous interrupts are enabled
1187 * by default.
1189 if (nic->device_type == XFRAME_II_DEVICE) {
1190 int count = (nic->config.bus_speed * 125)/2;
1191 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1192 } else
1193 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1195 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1196 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1197 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1198 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1199 if (i == 0)
1200 if (use_continuous_tx_intrs && (link == LINK_UP))
1201 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1202 writeq(val64, &bar0->tti_data1_mem);
1204 if (nic->config.intr_type == MSI_X) {
1205 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1206 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1207 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1208 TTI_DATA2_MEM_TX_UFC_D(0x300);
1209 } else {
1210 if ((nic->config.tx_steering_type ==
1211 TX_DEFAULT_STEERING) &&
1212 (config->tx_fifo_num > 1) &&
1213 (i >= nic->udp_fifo_idx) &&
1214 (i < (nic->udp_fifo_idx +
1215 nic->total_udp_fifos)))
1216 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1217 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1218 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1219 TTI_DATA2_MEM_TX_UFC_D(0x120);
1220 else
1221 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1223 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1224 TTI_DATA2_MEM_TX_UFC_D(0x80);
1227 writeq(val64, &bar0->tti_data2_mem);
1229 val64 = TTI_CMD_MEM_WE |
1230 TTI_CMD_MEM_STROBE_NEW_CMD |
1231 TTI_CMD_MEM_OFFSET(i);
1232 writeq(val64, &bar0->tti_command_mem);
1234 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1235 TTI_CMD_MEM_STROBE_NEW_CMD,
1236 S2IO_BIT_RESET) != SUCCESS)
1237 return FAILURE;
1240 return SUCCESS;
1244 * init_nic - Initialization of hardware
1245 * @nic: device private variable
1246 * Description: The function sequentially configures every block
1247 * of the H/W from their reset values.
1248 * Return Value: SUCCESS on success and
1249 * '-1' on failure (endian settings incorrect).
1252 static int init_nic(struct s2io_nic *nic)
1254 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1255 struct net_device *dev = nic->dev;
1256 register u64 val64 = 0;
1257 void __iomem *add;
1258 u32 time;
1259 int i, j;
1260 int dtx_cnt = 0;
1261 unsigned long long mem_share;
1262 int mem_size;
1263 struct config_param *config = &nic->config;
1264 struct mac_info *mac_control = &nic->mac_control;
1266 /* to set the swapper controle on the card */
1267 if (s2io_set_swapper(nic)) {
1268 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1269 return -EIO;
1273 * Herc requires EOI to be removed from reset before XGXS, so..
1275 if (nic->device_type & XFRAME_II_DEVICE) {
1276 val64 = 0xA500000000ULL;
1277 writeq(val64, &bar0->sw_reset);
1278 msleep(500);
1279 val64 = readq(&bar0->sw_reset);
1282 /* Remove XGXS from reset state */
1283 val64 = 0;
1284 writeq(val64, &bar0->sw_reset);
1285 msleep(500);
1286 val64 = readq(&bar0->sw_reset);
1288 /* Ensure that it's safe to access registers by checking
1289 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1291 if (nic->device_type == XFRAME_II_DEVICE) {
1292 for (i = 0; i < 50; i++) {
1293 val64 = readq(&bar0->adapter_status);
1294 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1295 break;
1296 msleep(10);
1298 if (i == 50)
1299 return -ENODEV;
1302 /* Enable Receiving broadcasts */
1303 add = &bar0->mac_cfg;
1304 val64 = readq(&bar0->mac_cfg);
1305 val64 |= MAC_RMAC_BCAST_ENABLE;
1306 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1307 writel((u32)val64, add);
1308 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1309 writel((u32) (val64 >> 32), (add + 4));
1311 /* Read registers in all blocks */
1312 val64 = readq(&bar0->mac_int_mask);
1313 val64 = readq(&bar0->mc_int_mask);
1314 val64 = readq(&bar0->xgxs_int_mask);
1316 /* Set MTU */
1317 val64 = dev->mtu;
1318 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1320 if (nic->device_type & XFRAME_II_DEVICE) {
1321 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1322 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1323 &bar0->dtx_control, UF);
1324 if (dtx_cnt & 0x1)
1325 msleep(1); /* Necessary!! */
1326 dtx_cnt++;
1328 } else {
1329 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1330 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control, UF);
1332 val64 = readq(&bar0->dtx_control);
1333 dtx_cnt++;
1337 /* Tx DMA Initialization */
1338 val64 = 0;
1339 writeq(val64, &bar0->tx_fifo_partition_0);
1340 writeq(val64, &bar0->tx_fifo_partition_1);
1341 writeq(val64, &bar0->tx_fifo_partition_2);
1342 writeq(val64, &bar0->tx_fifo_partition_3);
1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1345 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1347 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1348 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
1359 j = 0;
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
1364 j = 0;
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
1369 j = 0;
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1378 break;
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1386 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1387 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1389 val64 = readq(&bar0->tx_fifo_partition_0);
1390 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1391 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1394 * Initialization of Tx_PA_CONFIG register to ignore packet
1395 * integrity checking.
1397 val64 = readq(&bar0->tx_pa_cfg);
1398 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1399 TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL |
1401 TX_PA_CFG_IGNORE_L2_ERR;
1402 writeq(val64, &bar0->tx_pa_cfg);
1404 /* Rx DMA intialization. */
1405 val64 = 0;
1406 for (i = 0; i < config->rx_ring_num; i++) {
1407 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1409 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1411 writeq(val64, &bar0->rx_queue_priority);
1414 * Allocating equal share of memory to all the
1415 * configured Rings.
1417 val64 = 0;
1418 if (nic->device_type & XFRAME_II_DEVICE)
1419 mem_size = 32;
1420 else
1421 mem_size = 64;
1423 for (i = 0; i < config->rx_ring_num; i++) {
1424 switch (i) {
1425 case 0:
1426 mem_share = (mem_size / config->rx_ring_num +
1427 mem_size % config->rx_ring_num);
1428 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1429 continue;
1430 case 1:
1431 mem_share = (mem_size / config->rx_ring_num);
1432 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1433 continue;
1434 case 2:
1435 mem_share = (mem_size / config->rx_ring_num);
1436 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1437 continue;
1438 case 3:
1439 mem_share = (mem_size / config->rx_ring_num);
1440 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1441 continue;
1442 case 4:
1443 mem_share = (mem_size / config->rx_ring_num);
1444 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1445 continue;
1446 case 5:
1447 mem_share = (mem_size / config->rx_ring_num);
1448 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1449 continue;
1450 case 6:
1451 mem_share = (mem_size / config->rx_ring_num);
1452 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1453 continue;
1454 case 7:
1455 mem_share = (mem_size / config->rx_ring_num);
1456 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1457 continue;
1460 writeq(val64, &bar0->rx_queue_cfg);
1463 * Filling Tx round robin registers
1464 * as per the number of FIFOs for equal scheduling priority
1466 switch (config->tx_fifo_num) {
1467 case 1:
1468 val64 = 0x0;
1469 writeq(val64, &bar0->tx_w_round_robin_0);
1470 writeq(val64, &bar0->tx_w_round_robin_1);
1471 writeq(val64, &bar0->tx_w_round_robin_2);
1472 writeq(val64, &bar0->tx_w_round_robin_3);
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 2:
1476 val64 = 0x0001000100010001ULL;
1477 writeq(val64, &bar0->tx_w_round_robin_0);
1478 writeq(val64, &bar0->tx_w_round_robin_1);
1479 writeq(val64, &bar0->tx_w_round_robin_2);
1480 writeq(val64, &bar0->tx_w_round_robin_3);
1481 val64 = 0x0001000100000000ULL;
1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 case 3:
1485 val64 = 0x0001020001020001ULL;
1486 writeq(val64, &bar0->tx_w_round_robin_0);
1487 val64 = 0x0200010200010200ULL;
1488 writeq(val64, &bar0->tx_w_round_robin_1);
1489 val64 = 0x0102000102000102ULL;
1490 writeq(val64, &bar0->tx_w_round_robin_2);
1491 val64 = 0x0001020001020001ULL;
1492 writeq(val64, &bar0->tx_w_round_robin_3);
1493 val64 = 0x0200010200000000ULL;
1494 writeq(val64, &bar0->tx_w_round_robin_4);
1495 break;
1496 case 4:
1497 val64 = 0x0001020300010203ULL;
1498 writeq(val64, &bar0->tx_w_round_robin_0);
1499 writeq(val64, &bar0->tx_w_round_robin_1);
1500 writeq(val64, &bar0->tx_w_round_robin_2);
1501 writeq(val64, &bar0->tx_w_round_robin_3);
1502 val64 = 0x0001020300000000ULL;
1503 writeq(val64, &bar0->tx_w_round_robin_4);
1504 break;
1505 case 5:
1506 val64 = 0x0001020304000102ULL;
1507 writeq(val64, &bar0->tx_w_round_robin_0);
1508 val64 = 0x0304000102030400ULL;
1509 writeq(val64, &bar0->tx_w_round_robin_1);
1510 val64 = 0x0102030400010203ULL;
1511 writeq(val64, &bar0->tx_w_round_robin_2);
1512 val64 = 0x0400010203040001ULL;
1513 writeq(val64, &bar0->tx_w_round_robin_3);
1514 val64 = 0x0203040000000000ULL;
1515 writeq(val64, &bar0->tx_w_round_robin_4);
1516 break;
1517 case 6:
1518 val64 = 0x0001020304050001ULL;
1519 writeq(val64, &bar0->tx_w_round_robin_0);
1520 val64 = 0x0203040500010203ULL;
1521 writeq(val64, &bar0->tx_w_round_robin_1);
1522 val64 = 0x0405000102030405ULL;
1523 writeq(val64, &bar0->tx_w_round_robin_2);
1524 val64 = 0x0001020304050001ULL;
1525 writeq(val64, &bar0->tx_w_round_robin_3);
1526 val64 = 0x0203040500000000ULL;
1527 writeq(val64, &bar0->tx_w_round_robin_4);
1528 break;
1529 case 7:
1530 val64 = 0x0001020304050600ULL;
1531 writeq(val64, &bar0->tx_w_round_robin_0);
1532 val64 = 0x0102030405060001ULL;
1533 writeq(val64, &bar0->tx_w_round_robin_1);
1534 val64 = 0x0203040506000102ULL;
1535 writeq(val64, &bar0->tx_w_round_robin_2);
1536 val64 = 0x0304050600010203ULL;
1537 writeq(val64, &bar0->tx_w_round_robin_3);
1538 val64 = 0x0405060000000000ULL;
1539 writeq(val64, &bar0->tx_w_round_robin_4);
1540 break;
1541 case 8:
1542 val64 = 0x0001020304050607ULL;
1543 writeq(val64, &bar0->tx_w_round_robin_0);
1544 writeq(val64, &bar0->tx_w_round_robin_1);
1545 writeq(val64, &bar0->tx_w_round_robin_2);
1546 writeq(val64, &bar0->tx_w_round_robin_3);
1547 val64 = 0x0001020300000000ULL;
1548 writeq(val64, &bar0->tx_w_round_robin_4);
1549 break;
1552 /* Enable all configured Tx FIFO partitions */
1553 val64 = readq(&bar0->tx_fifo_partition_0);
1554 val64 |= (TX_FIFO_PARTITION_EN);
1555 writeq(val64, &bar0->tx_fifo_partition_0);
1557 /* Filling the Rx round robin registers as per the
1558 * number of Rings and steering based on QoS with
1559 * equal priority.
1561 switch (config->rx_ring_num) {
1562 case 1:
1563 val64 = 0x0;
1564 writeq(val64, &bar0->rx_w_round_robin_0);
1565 writeq(val64, &bar0->rx_w_round_robin_1);
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 writeq(val64, &bar0->rx_w_round_robin_3);
1568 writeq(val64, &bar0->rx_w_round_robin_4);
1570 val64 = 0x8080808080808080ULL;
1571 writeq(val64, &bar0->rts_qos_steering);
1572 break;
1573 case 2:
1574 val64 = 0x0001000100010001ULL;
1575 writeq(val64, &bar0->rx_w_round_robin_0);
1576 writeq(val64, &bar0->rx_w_round_robin_1);
1577 writeq(val64, &bar0->rx_w_round_robin_2);
1578 writeq(val64, &bar0->rx_w_round_robin_3);
1579 val64 = 0x0001000100000000ULL;
1580 writeq(val64, &bar0->rx_w_round_robin_4);
1582 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering);
1584 break;
1585 case 3:
1586 val64 = 0x0001020001020001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_0);
1588 val64 = 0x0200010200010200ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_1);
1590 val64 = 0x0102000102000102ULL;
1591 writeq(val64, &bar0->rx_w_round_robin_2);
1592 val64 = 0x0001020001020001ULL;
1593 writeq(val64, &bar0->rx_w_round_robin_3);
1594 val64 = 0x0200010200000000ULL;
1595 writeq(val64, &bar0->rx_w_round_robin_4);
1597 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering);
1599 break;
1600 case 4:
1601 val64 = 0x0001020300010203ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_0);
1603 writeq(val64, &bar0->rx_w_round_robin_1);
1604 writeq(val64, &bar0->rx_w_round_robin_2);
1605 writeq(val64, &bar0->rx_w_round_robin_3);
1606 val64 = 0x0001020300000000ULL;
1607 writeq(val64, &bar0->rx_w_round_robin_4);
1609 val64 = 0x8080404020201010ULL;
1610 writeq(val64, &bar0->rts_qos_steering);
1611 break;
1612 case 5:
1613 val64 = 0x0001020304000102ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_0);
1615 val64 = 0x0304000102030400ULL;
1616 writeq(val64, &bar0->rx_w_round_robin_1);
1617 val64 = 0x0102030400010203ULL;
1618 writeq(val64, &bar0->rx_w_round_robin_2);
1619 val64 = 0x0400010203040001ULL;
1620 writeq(val64, &bar0->rx_w_round_robin_3);
1621 val64 = 0x0203040000000000ULL;
1622 writeq(val64, &bar0->rx_w_round_robin_4);
1624 val64 = 0x8080404020201008ULL;
1625 writeq(val64, &bar0->rts_qos_steering);
1626 break;
1627 case 6:
1628 val64 = 0x0001020304050001ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_0);
1630 val64 = 0x0203040500010203ULL;
1631 writeq(val64, &bar0->rx_w_round_robin_1);
1632 val64 = 0x0405000102030405ULL;
1633 writeq(val64, &bar0->rx_w_round_robin_2);
1634 val64 = 0x0001020304050001ULL;
1635 writeq(val64, &bar0->rx_w_round_robin_3);
1636 val64 = 0x0203040500000000ULL;
1637 writeq(val64, &bar0->rx_w_round_robin_4);
1639 val64 = 0x8080404020100804ULL;
1640 writeq(val64, &bar0->rts_qos_steering);
1641 break;
1642 case 7:
1643 val64 = 0x0001020304050600ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_0);
1645 val64 = 0x0102030405060001ULL;
1646 writeq(val64, &bar0->rx_w_round_robin_1);
1647 val64 = 0x0203040506000102ULL;
1648 writeq(val64, &bar0->rx_w_round_robin_2);
1649 val64 = 0x0304050600010203ULL;
1650 writeq(val64, &bar0->rx_w_round_robin_3);
1651 val64 = 0x0405060000000000ULL;
1652 writeq(val64, &bar0->rx_w_round_robin_4);
1654 val64 = 0x8080402010080402ULL;
1655 writeq(val64, &bar0->rts_qos_steering);
1656 break;
1657 case 8:
1658 val64 = 0x0001020304050607ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_0);
1660 writeq(val64, &bar0->rx_w_round_robin_1);
1661 writeq(val64, &bar0->rx_w_round_robin_2);
1662 writeq(val64, &bar0->rx_w_round_robin_3);
1663 val64 = 0x0001020300000000ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1671 /* UDP Fix */
1672 val64 = 0;
1673 for (i = 0; i < 8; i++)
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1693 &bar0->rts_frm_len_n[i]);
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1700 DBG_PRINT(ERR_DBG,
1701 "%s: rts_ds_steer failed on codepoint %d\n",
1702 dev->name, i);
1703 return -ENODEV;
1707 /* Program statistics memory */
1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1721 writeq(val64, &bar0->mac_link_util);
1724 * Initializing the Transmit and Receive Traffic Interrupt
1725 * Scheme.
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1745 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1747 writeq(val64, &bar0->rti_data1_mem);
1749 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1750 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1751 if (nic->config.intr_type == MSI_X)
1752 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1753 RTI_DATA2_MEM_RX_UFC_D(0x40));
1754 else
1755 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1756 RTI_DATA2_MEM_RX_UFC_D(0x80));
1757 writeq(val64, &bar0->rti_data2_mem);
1759 for (i = 0; i < config->rx_ring_num; i++) {
1760 val64 = RTI_CMD_MEM_WE |
1761 RTI_CMD_MEM_STROBE_NEW_CMD |
1762 RTI_CMD_MEM_OFFSET(i);
1763 writeq(val64, &bar0->rti_command_mem);
1766 * Once the operation completes, the Strobe bit of the
1767 * command register will be reset. We poll for this
1768 * particular condition. We wait for a maximum of 500ms
1769 * for the operation to complete, if it's not complete
1770 * by then we return error.
1772 time = 0;
1773 while (true) {
1774 val64 = readq(&bar0->rti_command_mem);
1775 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1776 break;
1778 if (time > 10) {
1779 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1780 dev->name);
1781 return -ENODEV;
1783 time++;
1784 msleep(50);
1789 * Initializing proper values as Pause threshold into all
1790 * the 8 Queues on Rx side.
1792 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1793 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1795 /* Disable RMAC PAD STRIPPING */
1796 add = &bar0->mac_cfg;
1797 val64 = readq(&bar0->mac_cfg);
1798 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64), add);
1801 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1802 writel((u32) (val64 >> 32), (add + 4));
1803 val64 = readq(&bar0->mac_cfg);
1805 /* Enable FCS stripping by adapter */
1806 add = &bar0->mac_cfg;
1807 val64 = readq(&bar0->mac_cfg);
1808 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1809 if (nic->device_type == XFRAME_II_DEVICE)
1810 writeq(val64, &bar0->mac_cfg);
1811 else {
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64), add);
1814 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815 writel((u32) (val64 >> 32), (add + 4));
1819 * Set the time value to be inserted in the pause frame
1820 * generated by xena.
1822 val64 = readq(&bar0->rmac_pause_cfg);
1823 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1824 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1825 writeq(val64, &bar0->rmac_pause_cfg);
1828 * Set the Threshold Limit for Generating the pause frame
1829 * If the amount of data in any Queue exceeds ratio of
1830 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1831 * pause frame is generated
1833 val64 = 0;
1834 for (i = 0; i < 4; i++) {
1835 val64 |= (((u64)0xFF00 |
1836 nic->mac_control.mc_pause_threshold_q0q3)
1837 << (i * 2 * 8));
1839 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1841 val64 = 0;
1842 for (i = 0; i < 4; i++) {
1843 val64 |= (((u64)0xFF00 |
1844 nic->mac_control.mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1850 * TxDMA will stop Read request if the number of read split has
1851 * exceeded the limit pointed by shared_splits
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1867 if (nic->device_type == XFRAME_II_DEVICE) {
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1873 writeq(val64, &bar0->pic_control2);
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
1880 return SUCCESS;
1882 #define LINK_UP_DOWN_INTERRUPT 1
1883 #define MAC_RMAC_ERR_TIMER 2
1885 static int s2io_link_fault_indication(struct s2io_nic *nic)
1887 if (nic->device_type == XFRAME_II_DEVICE)
1888 return LINK_UP_DOWN_INTERRUPT;
1889 else
1890 return MAC_RMAC_ERR_TIMER;
1894 * do_s2io_write_bits - update alarm bits in alarm register
1895 * @value: alarm bits
1896 * @flag: interrupt status
1897 * @addr: address value
1898 * Description: update alarm bits in alarm register
1899 * Return Value:
1900 * NONE.
1902 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1904 u64 temp64;
1906 temp64 = readq(addr);
1908 if (flag == ENABLE_INTRS)
1909 temp64 &= ~((u64)value);
1910 else
1911 temp64 |= ((u64)value);
1912 writeq(temp64, addr);
1915 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 gen_int_mask = 0;
1919 u64 interruptible;
1921 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1922 if (mask & TX_DMA_INTR) {
1923 gen_int_mask |= TXDMA_INT_M;
1925 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1926 TXDMA_PCC_INT | TXDMA_TTI_INT |
1927 TXDMA_LSO_INT | TXDMA_TPA_INT |
1928 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1930 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1931 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1932 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1933 &bar0->pfc_err_mask);
1935 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1936 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1937 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1939 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1940 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1941 PCC_N_SERR | PCC_6_COF_OV_ERR |
1942 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1943 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1944 PCC_TXB_ECC_SG_ERR,
1945 flag, &bar0->pcc_err_mask);
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1956 flag, &bar0->tpa_err_mask);
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1961 if (mask & TX_MAC_INTR) {
1962 gen_int_mask |= TXMAC_INT_M;
1963 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1964 &bar0->mac_int_mask);
1965 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1966 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1967 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1968 flag, &bar0->mac_tmac_err_mask);
1971 if (mask & TX_XGXS_INTR) {
1972 gen_int_mask |= TXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1974 &bar0->xgxs_int_mask);
1975 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1976 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1977 flag, &bar0->xgxs_txgxs_err_mask);
1980 if (mask & RX_DMA_INTR) {
1981 gen_int_mask |= RXDMA_INT_M;
1982 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1983 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1984 flag, &bar0->rxdma_int_mask);
1985 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1986 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1987 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1988 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1989 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1990 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1991 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1992 &bar0->prc_pcix_err_mask);
1993 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1994 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1995 &bar0->rpa_err_mask);
1996 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1997 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1998 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1999 RDA_FRM_ECC_SG_ERR |
2000 RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2010 &bar0->mac_int_mask);
2011 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR);
2014 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2015 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2016 do_s2io_write_bits(interruptible,
2017 flag, &bar0->mac_rmac_err_mask);
2020 if (mask & RX_XGXS_INTR) {
2021 gen_int_mask |= RXXGXS_INT_M;
2022 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2023 &bar0->xgxs_int_mask);
2024 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2025 &bar0->xgxs_rxgxs_err_mask);
2028 if (mask & MC_INTR) {
2029 gen_int_mask |= MC_INT_M;
2030 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2031 flag, &bar0->mc_int_mask);
2032 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2033 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2034 &bar0->mc_err_mask);
2036 nic->general_int_mask = gen_int_mask;
2038 /* Remove this line when alarm interrupts are enabled */
2039 nic->general_int_mask = 0;
2043 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2044 * @nic: device private variable,
2045 * @mask: A mask indicating which Intr block must be modified and,
2046 * @flag: A flag indicating whether to enable or disable the Intrs.
2047 * Description: This function will either disable or enable the interrupts
2048 * depending on the flag argument. The mask argument can be used to
2049 * enable/disable any Intr block.
2050 * Return Value: NONE.
2053 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2056 register u64 temp64 = 0, intr_mask = 0;
2058 intr_mask = nic->general_int_mask;
2060 /* Top level interrupt classification */
2061 /* PIC Interrupts */
2062 if (mask & TX_PIC_INTR) {
2063 /* Enable PIC Intrs in the general intr mask register */
2064 intr_mask |= TXPIC_INT_M;
2065 if (flag == ENABLE_INTRS) {
2067 * If Hercules adapter enable GPIO otherwise
2068 * disable all PCIX, Flash, MDIO, IIC and GPIO
2069 * interrupts for now.
2070 * TODO
2072 if (s2io_link_fault_indication(nic) ==
2073 LINK_UP_DOWN_INTERRUPT) {
2074 do_s2io_write_bits(PIC_INT_GPIO, flag,
2075 &bar0->pic_int_mask);
2076 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2077 &bar0->gpio_int_mask);
2078 } else
2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2080 } else if (flag == DISABLE_INTRS) {
2082 * Disable PIC Intrs in the general
2083 * intr mask register
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2089 /* Tx traffic interrupts */
2090 if (mask & TX_TRAFFIC_INTR) {
2091 intr_mask |= TXTRAFFIC_INT_M;
2092 if (flag == ENABLE_INTRS) {
2094 * Enable all the Tx side interrupts
2095 * writing 0 Enables all 64 TX interrupt levels
2097 writeq(0x0, &bar0->tx_traffic_mask);
2098 } else if (flag == DISABLE_INTRS) {
2100 * Disable Tx Traffic Intrs in the general intr mask
2101 * register.
2103 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2107 /* Rx traffic interrupts */
2108 if (mask & RX_TRAFFIC_INTR) {
2109 intr_mask |= RXTRAFFIC_INT_M;
2110 if (flag == ENABLE_INTRS) {
2111 /* writing 0 Enables all 8 RX interrupt levels */
2112 writeq(0x0, &bar0->rx_traffic_mask);
2113 } else if (flag == DISABLE_INTRS) {
2115 * Disable Rx Traffic Intrs in the general intr mask
2116 * register.
2118 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2122 temp64 = readq(&bar0->general_int_mask);
2123 if (flag == ENABLE_INTRS)
2124 temp64 &= ~((u64)intr_mask);
2125 else
2126 temp64 = DISABLE_ALL_INTRS;
2127 writeq(temp64, &bar0->general_int_mask);
2129 nic->general_int_mask = readq(&bar0->general_int_mask);
2133 * verify_pcc_quiescent- Checks for PCC quiescent state
2134 * Return: 1 If PCC is quiescence
2135 * 0 If PCC is not quiescence
2137 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2139 int ret = 0, herc;
2140 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2141 u64 val64 = readq(&bar0->adapter_status);
2143 herc = (sp->device_type == XFRAME_II_DEVICE);
2145 if (flag == false) {
2146 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2148 ret = 1;
2149 } else {
2150 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2151 ret = 1;
2153 } else {
2154 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2155 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2156 ADAPTER_STATUS_RMAC_PCC_IDLE))
2157 ret = 1;
2158 } else {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2160 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2161 ret = 1;
2165 return ret;
2168 * verify_xena_quiescence - Checks whether the H/W is ready
2169 * Description: Returns whether the H/W is ready to go or not. Depending
2170 * on whether adapter enable bit was written or not the comparison
2171 * differs and the calling function passes the input argument flag to
2172 * indicate this.
2173 * Return: 1 If xena is quiescence
2174 * 0 If Xena is not quiescence
2177 static int verify_xena_quiescence(struct s2io_nic *sp)
2179 int mode;
2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2181 u64 val64 = readq(&bar0->adapter_status);
2182 mode = s2io_verify_pci_mode(sp);
2184 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2185 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2186 return 0;
2188 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2190 return 0;
2192 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2193 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2194 return 0;
2196 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2197 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2198 return 0;
2200 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2201 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2202 return 0;
2204 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2205 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2206 return 0;
2208 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2209 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2210 return 0;
2212 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2213 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2214 return 0;
2218 * In PCI 33 mode, the P_PLL is not used, and therefore,
2219 * the the P_PLL_LOCK bit in the adapter_status register will
2220 * not be asserted.
2222 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2223 sp->device_type == XFRAME_II_DEVICE &&
2224 mode != PCI_MODE_PCI_33) {
2225 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2226 return 0;
2228 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2229 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2230 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2231 return 0;
2233 return 1;
2237 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2238 * @sp: Pointer to device specifc structure
2239 * Description :
2240 * New procedure to clear mac address reading problems on Alpha platforms
2244 static void fix_mac_address(struct s2io_nic *sp)
2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2247 u64 val64;
2248 int i = 0;
2250 while (fix_mac[i] != END_SIGN) {
2251 writeq(fix_mac[i++], &bar0->gpio_control);
2252 udelay(10);
2253 val64 = readq(&bar0->gpio_control);
2258 * start_nic - Turns the device on
2259 * @nic : device private variable.
2260 * Description:
2261 * This function actually turns the device on. Before this function is
2262 * called,all Registers are configured from their reset states
2263 * and shared memory is allocated but the NIC is still quiescent. On
2264 * calling this function, the device interrupts are cleared and the NIC is
2265 * literally switched on by writing into the adapter control register.
2266 * Return Value:
2267 * SUCCESS on success and -1 on failure.
2270 static int start_nic(struct s2io_nic *nic)
2272 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2273 struct net_device *dev = nic->dev;
2274 register u64 val64 = 0;
2275 u16 subid, i;
2276 struct config_param *config = &nic->config;
2277 struct mac_info *mac_control = &nic->mac_control;
2279 /* PRC Initialization and configuration */
2280 for (i = 0; i < config->rx_ring_num; i++) {
2281 struct ring_info *ring = &mac_control->rings[i];
2283 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2284 &bar0->prc_rxd0_n[i]);
2286 val64 = readq(&bar0->prc_ctrl_n[i]);
2287 if (nic->rxd_mode == RXD_MODE_1)
2288 val64 |= PRC_CTRL_RC_ENABLED;
2289 else
2290 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2291 if (nic->device_type == XFRAME_II_DEVICE)
2292 val64 |= PRC_CTRL_GROUP_READS;
2293 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2294 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2295 writeq(val64, &bar0->prc_ctrl_n[i]);
2298 if (nic->rxd_mode == RXD_MODE_3B) {
2299 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2300 val64 = readq(&bar0->rx_pa_cfg);
2301 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2302 writeq(val64, &bar0->rx_pa_cfg);
2305 if (vlan_tag_strip == 0) {
2306 val64 = readq(&bar0->rx_pa_cfg);
2307 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2308 writeq(val64, &bar0->rx_pa_cfg);
2309 nic->vlan_strip_flag = 0;
2313 * Enabling MC-RLDRAM. After enabling the device, we timeout
2314 * for around 100ms, which is approximately the time required
2315 * for the device to be ready for operation.
2317 val64 = readq(&bar0->mc_rldram_mrs);
2318 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2319 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2320 val64 = readq(&bar0->mc_rldram_mrs);
2322 msleep(100); /* Delay by around 100 ms. */
2324 /* Enabling ECC Protection. */
2325 val64 = readq(&bar0->adapter_control);
2326 val64 &= ~ADAPTER_ECC_EN;
2327 writeq(val64, &bar0->adapter_control);
2330 * Verify if the device is ready to be enabled, if so enable
2331 * it.
2333 val64 = readq(&bar0->adapter_status);
2334 if (!verify_xena_quiescence(nic)) {
2335 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2336 "Adapter status reads: 0x%llx\n",
2337 dev->name, (unsigned long long)val64);
2338 return FAILURE;
2342 * With some switches, link might be already up at this point.
2343 * Because of this weird behavior, when we enable laser,
2344 * we may not get link. We need to handle this. We cannot
2345 * figure out which switch is misbehaving. So we are forced to
2346 * make a global change.
2349 /* Enabling Laser. */
2350 val64 = readq(&bar0->adapter_control);
2351 val64 |= ADAPTER_EOI_TX_ON;
2352 writeq(val64, &bar0->adapter_control);
2354 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2356 * Dont see link state interrupts initally on some switches,
2357 * so directly scheduling the link state task here.
2359 schedule_work(&nic->set_link_task);
2361 /* SXE-002: Initialize link and activity LED */
2362 subid = nic->pdev->subsystem_device;
2363 if (((subid & 0xFF) >= 0x07) &&
2364 (nic->device_type == XFRAME_I_DEVICE)) {
2365 val64 = readq(&bar0->gpio_control);
2366 val64 |= 0x0000800000000000ULL;
2367 writeq(val64, &bar0->gpio_control);
2368 val64 = 0x0411040400000000ULL;
2369 writeq(val64, (void __iomem *)bar0 + 0x2700);
2372 return SUCCESS;
2375 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2377 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2378 struct TxD *txdlp, int get_off)
2380 struct s2io_nic *nic = fifo_data->nic;
2381 struct sk_buff *skb;
2382 struct TxD *txds;
2383 u16 j, frg_cnt;
2385 txds = txdlp;
2386 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2387 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2388 sizeof(u64), PCI_DMA_TODEVICE);
2389 txds++;
2392 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2393 if (!skb) {
2394 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2395 return NULL;
2397 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2398 skb_headlen(skb), PCI_DMA_TODEVICE);
2399 frg_cnt = skb_shinfo(skb)->nr_frags;
2400 if (frg_cnt) {
2401 txds++;
2402 for (j = 0; j < frg_cnt; j++, txds++) {
2403 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2404 if (!txds->Buffer_Pointer)
2405 break;
2406 pci_unmap_page(nic->pdev,
2407 (dma_addr_t)txds->Buffer_Pointer,
2408 frag->size, PCI_DMA_TODEVICE);
2411 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2412 return skb;
2416 * free_tx_buffers - Free all queued Tx buffers
2417 * @nic : device private variable.
2418 * Description:
2419 * Free all queued Tx buffers.
2420 * Return Value: void
2423 static void free_tx_buffers(struct s2io_nic *nic)
2425 struct net_device *dev = nic->dev;
2426 struct sk_buff *skb;
2427 struct TxD *txdp;
2428 int i, j;
2429 int cnt = 0;
2430 struct config_param *config = &nic->config;
2431 struct mac_info *mac_control = &nic->mac_control;
2432 struct stat_block *stats = mac_control->stats_info;
2433 struct swStat *swstats = &stats->sw_stat;
2435 for (i = 0; i < config->tx_fifo_num; i++) {
2436 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2437 struct fifo_info *fifo = &mac_control->fifos[i];
2438 unsigned long flags;
2440 spin_lock_irqsave(&fifo->tx_lock, flags);
2441 for (j = 0; j < tx_cfg->fifo_len; j++) {
2442 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2443 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2444 if (skb) {
2445 swstats->mem_freed += skb->truesize;
2446 dev_kfree_skb(skb);
2447 cnt++;
2450 DBG_PRINT(INTR_DBG,
2451 "%s: forcibly freeing %d skbs on FIFO%d\n",
2452 dev->name, cnt, i);
2453 fifo->tx_curr_get_info.offset = 0;
2454 fifo->tx_curr_put_info.offset = 0;
2455 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2460 * stop_nic - To stop the nic
2461 * @nic ; device private variable.
2462 * Description:
2463 * This function does exactly the opposite of what the start_nic()
2464 * function does. This function is called to stop the device.
2465 * Return Value:
2466 * void.
2469 static void stop_nic(struct s2io_nic *nic)
2471 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2472 register u64 val64 = 0;
2473 u16 interruptible;
2475 /* Disable all interrupts */
2476 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2477 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2478 interruptible |= TX_PIC_INTR;
2479 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2481 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2482 val64 = readq(&bar0->adapter_control);
2483 val64 &= ~(ADAPTER_CNTL_EN);
2484 writeq(val64, &bar0->adapter_control);
2488 * fill_rx_buffers - Allocates the Rx side skbs
2489 * @ring_info: per ring structure
2490 * @from_card_up: If this is true, we will map the buffer to get
2491 * the dma address for buf0 and buf1 to give it to the card.
2492 * Else we will sync the already mapped buffer to give it to the card.
2493 * Description:
2494 * The function allocates Rx side skbs and puts the physical
2495 * address of these buffers into the RxD buffer pointers, so that the NIC
2496 * can DMA the received frame into these locations.
2497 * The NIC supports 3 receive modes, viz
2498 * 1. single buffer,
2499 * 2. three buffer and
2500 * 3. Five buffer modes.
2501 * Each mode defines how many fragments the received frame will be split
2502 * up into by the NIC. The frame is split into L3 header, L4 Header,
2503 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2504 * is split into 3 fragments. As of now only single buffer mode is
2505 * supported.
2506 * Return Value:
2507 * SUCCESS on success or an appropriate -ve value on failure.
2509 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2510 int from_card_up)
2512 struct sk_buff *skb;
2513 struct RxD_t *rxdp;
2514 int off, size, block_no, block_no1;
2515 u32 alloc_tab = 0;
2516 u32 alloc_cnt;
2517 u64 tmp;
2518 struct buffAdd *ba;
2519 struct RxD_t *first_rxdp = NULL;
2520 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2521 int rxd_index = 0;
2522 struct RxD1 *rxdp1;
2523 struct RxD3 *rxdp3;
2524 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2526 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2528 block_no1 = ring->rx_curr_get_info.block_index;
2529 while (alloc_tab < alloc_cnt) {
2530 block_no = ring->rx_curr_put_info.block_index;
2532 off = ring->rx_curr_put_info.offset;
2534 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2536 rxd_index = off + 1;
2537 if (block_no)
2538 rxd_index += (block_no * ring->rxd_count);
2540 if ((block_no == block_no1) &&
2541 (off == ring->rx_curr_get_info.offset) &&
2542 (rxdp->Host_Control)) {
2543 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2544 ring->dev->name);
2545 goto end;
2547 if (off && (off == ring->rxd_count)) {
2548 ring->rx_curr_put_info.block_index++;
2549 if (ring->rx_curr_put_info.block_index ==
2550 ring->block_count)
2551 ring->rx_curr_put_info.block_index = 0;
2552 block_no = ring->rx_curr_put_info.block_index;
2553 off = 0;
2554 ring->rx_curr_put_info.offset = off;
2555 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2556 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2557 ring->dev->name, rxdp);
2561 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2562 ((ring->rxd_mode == RXD_MODE_3B) &&
2563 (rxdp->Control_2 & s2BIT(0)))) {
2564 ring->rx_curr_put_info.offset = off;
2565 goto end;
2567 /* calculate size of skb based on ring mode */
2568 size = ring->mtu +
2569 HEADER_ETHERNET_II_802_3_SIZE +
2570 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2571 if (ring->rxd_mode == RXD_MODE_1)
2572 size += NET_IP_ALIGN;
2573 else
2574 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2576 /* allocate skb */
2577 skb = dev_alloc_skb(size);
2578 if (!skb) {
2579 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2580 ring->dev->name);
2581 if (first_rxdp) {
2582 wmb();
2583 first_rxdp->Control_1 |= RXD_OWN_XENA;
2585 swstats->mem_alloc_fail_cnt++;
2587 return -ENOMEM ;
2589 swstats->mem_allocated += skb->truesize;
2591 if (ring->rxd_mode == RXD_MODE_1) {
2592 /* 1 buffer mode - normal operation mode */
2593 rxdp1 = (struct RxD1 *)rxdp;
2594 memset(rxdp, 0, sizeof(struct RxD1));
2595 skb_reserve(skb, NET_IP_ALIGN);
2596 rxdp1->Buffer0_ptr =
2597 pci_map_single(ring->pdev, skb->data,
2598 size - NET_IP_ALIGN,
2599 PCI_DMA_FROMDEVICE);
2600 if (pci_dma_mapping_error(nic->pdev,
2601 rxdp1->Buffer0_ptr))
2602 goto pci_map_failed;
2604 rxdp->Control_2 =
2605 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2606 rxdp->Host_Control = (unsigned long)skb;
2607 } else if (ring->rxd_mode == RXD_MODE_3B) {
2609 * 2 buffer mode -
2610 * 2 buffer mode provides 128
2611 * byte aligned receive buffers.
2614 rxdp3 = (struct RxD3 *)rxdp;
2615 /* save buffer pointers to avoid frequent dma mapping */
2616 Buffer0_ptr = rxdp3->Buffer0_ptr;
2617 Buffer1_ptr = rxdp3->Buffer1_ptr;
2618 memset(rxdp, 0, sizeof(struct RxD3));
2619 /* restore the buffer pointers for dma sync*/
2620 rxdp3->Buffer0_ptr = Buffer0_ptr;
2621 rxdp3->Buffer1_ptr = Buffer1_ptr;
2623 ba = &ring->ba[block_no][off];
2624 skb_reserve(skb, BUF0_LEN);
2625 tmp = (u64)(unsigned long)skb->data;
2626 tmp += ALIGN_SIZE;
2627 tmp &= ~ALIGN_SIZE;
2628 skb->data = (void *) (unsigned long)tmp;
2629 skb_reset_tail_pointer(skb);
2631 if (from_card_up) {
2632 rxdp3->Buffer0_ptr =
2633 pci_map_single(ring->pdev, ba->ba_0,
2634 BUF0_LEN,
2635 PCI_DMA_FROMDEVICE);
2636 if (pci_dma_mapping_error(nic->pdev,
2637 rxdp3->Buffer0_ptr))
2638 goto pci_map_failed;
2639 } else
2640 pci_dma_sync_single_for_device(ring->pdev,
2641 (dma_addr_t)rxdp3->Buffer0_ptr,
2642 BUF0_LEN,
2643 PCI_DMA_FROMDEVICE);
2645 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2646 if (ring->rxd_mode == RXD_MODE_3B) {
2647 /* Two buffer mode */
2650 * Buffer2 will have L3/L4 header plus
2651 * L4 payload
2653 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2654 skb->data,
2655 ring->mtu + 4,
2656 PCI_DMA_FROMDEVICE);
2658 if (pci_dma_mapping_error(nic->pdev,
2659 rxdp3->Buffer2_ptr))
2660 goto pci_map_failed;
2662 if (from_card_up) {
2663 rxdp3->Buffer1_ptr =
2664 pci_map_single(ring->pdev,
2665 ba->ba_1,
2666 BUF1_LEN,
2667 PCI_DMA_FROMDEVICE);
2669 if (pci_dma_mapping_error(nic->pdev,
2670 rxdp3->Buffer1_ptr)) {
2671 pci_unmap_single(ring->pdev,
2672 (dma_addr_t)(unsigned long)
2673 skb->data,
2674 ring->mtu + 4,
2675 PCI_DMA_FROMDEVICE);
2676 goto pci_map_failed;
2679 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2680 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2681 (ring->mtu + 4);
2683 rxdp->Control_2 |= s2BIT(0);
2684 rxdp->Host_Control = (unsigned long) (skb);
2686 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2687 rxdp->Control_1 |= RXD_OWN_XENA;
2688 off++;
2689 if (off == (ring->rxd_count + 1))
2690 off = 0;
2691 ring->rx_curr_put_info.offset = off;
2693 rxdp->Control_2 |= SET_RXD_MARKER;
2694 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2695 if (first_rxdp) {
2696 wmb();
2697 first_rxdp->Control_1 |= RXD_OWN_XENA;
2699 first_rxdp = rxdp;
2701 ring->rx_bufs_left += 1;
2702 alloc_tab++;
2705 end:
2706 /* Transfer ownership of first descriptor to adapter just before
2707 * exiting. Before that, use memory barrier so that ownership
2708 * and other fields are seen by adapter correctly.
2710 if (first_rxdp) {
2711 wmb();
2712 first_rxdp->Control_1 |= RXD_OWN_XENA;
2715 return SUCCESS;
2717 pci_map_failed:
2718 swstats->pci_map_fail_cnt++;
2719 swstats->mem_freed += skb->truesize;
2720 dev_kfree_skb_irq(skb);
2721 return -ENOMEM;
2724 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2726 struct net_device *dev = sp->dev;
2727 int j;
2728 struct sk_buff *skb;
2729 struct RxD_t *rxdp;
2730 struct buffAdd *ba;
2731 struct RxD1 *rxdp1;
2732 struct RxD3 *rxdp3;
2733 struct mac_info *mac_control = &sp->mac_control;
2734 struct stat_block *stats = mac_control->stats_info;
2735 struct swStat *swstats = &stats->sw_stat;
2737 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2738 rxdp = mac_control->rings[ring_no].
2739 rx_blocks[blk].rxds[j].virt_addr;
2740 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2741 if (!skb)
2742 continue;
2743 if (sp->rxd_mode == RXD_MODE_1) {
2744 rxdp1 = (struct RxD1 *)rxdp;
2745 pci_unmap_single(sp->pdev,
2746 (dma_addr_t)rxdp1->Buffer0_ptr,
2747 dev->mtu +
2748 HEADER_ETHERNET_II_802_3_SIZE +
2749 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2750 PCI_DMA_FROMDEVICE);
2751 memset(rxdp, 0, sizeof(struct RxD1));
2752 } else if (sp->rxd_mode == RXD_MODE_3B) {
2753 rxdp3 = (struct RxD3 *)rxdp;
2754 ba = &mac_control->rings[ring_no].ba[blk][j];
2755 pci_unmap_single(sp->pdev,
2756 (dma_addr_t)rxdp3->Buffer0_ptr,
2757 BUF0_LEN,
2758 PCI_DMA_FROMDEVICE);
2759 pci_unmap_single(sp->pdev,
2760 (dma_addr_t)rxdp3->Buffer1_ptr,
2761 BUF1_LEN,
2762 PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(sp->pdev,
2764 (dma_addr_t)rxdp3->Buffer2_ptr,
2765 dev->mtu + 4,
2766 PCI_DMA_FROMDEVICE);
2767 memset(rxdp, 0, sizeof(struct RxD3));
2769 swstats->mem_freed += skb->truesize;
2770 dev_kfree_skb(skb);
2771 mac_control->rings[ring_no].rx_bufs_left -= 1;
2776 * free_rx_buffers - Frees all Rx buffers
2777 * @sp: device private variable.
2778 * Description:
2779 * This function will free all Rx buffers allocated by host.
2780 * Return Value:
2781 * NONE.
2784 static void free_rx_buffers(struct s2io_nic *sp)
2786 struct net_device *dev = sp->dev;
2787 int i, blk = 0, buf_cnt = 0;
2788 struct config_param *config = &sp->config;
2789 struct mac_info *mac_control = &sp->mac_control;
2791 for (i = 0; i < config->rx_ring_num; i++) {
2792 struct ring_info *ring = &mac_control->rings[i];
2794 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2795 free_rxd_blk(sp, i, blk);
2797 ring->rx_curr_put_info.block_index = 0;
2798 ring->rx_curr_get_info.block_index = 0;
2799 ring->rx_curr_put_info.offset = 0;
2800 ring->rx_curr_get_info.offset = 0;
2801 ring->rx_bufs_left = 0;
2802 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2803 dev->name, buf_cnt, i);
2807 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2809 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2810 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2811 ring->dev->name);
2813 return 0;
2817 * s2io_poll - Rx interrupt handler for NAPI support
2818 * @napi : pointer to the napi structure.
2819 * @budget : The number of packets that were budgeted to be processed
2820 * during one pass through the 'Poll" function.
2821 * Description:
2822 * Comes into picture only if NAPI support has been incorporated. It does
2823 * the same thing that rx_intr_handler does, but not in a interrupt context
2824 * also It will process only a given number of packets.
2825 * Return value:
2826 * 0 on success and 1 if there are No Rx packets to be processed.
2829 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2831 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2832 struct net_device *dev = ring->dev;
2833 int pkts_processed = 0;
2834 u8 __iomem *addr = NULL;
2835 u8 val8 = 0;
2836 struct s2io_nic *nic = netdev_priv(dev);
2837 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2838 int budget_org = budget;
2840 if (unlikely(!is_s2io_card_up(nic)))
2841 return 0;
2843 pkts_processed = rx_intr_handler(ring, budget);
2844 s2io_chk_rx_buffers(nic, ring);
2846 if (pkts_processed < budget_org) {
2847 napi_complete(napi);
2848 /*Re Enable MSI-Rx Vector*/
2849 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2850 addr += 7 - ring->ring_no;
2851 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2852 writeb(val8, addr);
2853 val8 = readb(addr);
2855 return pkts_processed;
2858 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2860 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2861 int pkts_processed = 0;
2862 int ring_pkts_processed, i;
2863 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2864 int budget_org = budget;
2865 struct config_param *config = &nic->config;
2866 struct mac_info *mac_control = &nic->mac_control;
2868 if (unlikely(!is_s2io_card_up(nic)))
2869 return 0;
2871 for (i = 0; i < config->rx_ring_num; i++) {
2872 struct ring_info *ring = &mac_control->rings[i];
2873 ring_pkts_processed = rx_intr_handler(ring, budget);
2874 s2io_chk_rx_buffers(nic, ring);
2875 pkts_processed += ring_pkts_processed;
2876 budget -= ring_pkts_processed;
2877 if (budget <= 0)
2878 break;
2880 if (pkts_processed < budget_org) {
2881 napi_complete(napi);
2882 /* Re enable the Rx interrupts for the ring */
2883 writeq(0, &bar0->rx_traffic_mask);
2884 readl(&bar0->rx_traffic_mask);
2886 return pkts_processed;
2889 #ifdef CONFIG_NET_POLL_CONTROLLER
2891 * s2io_netpoll - netpoll event handler entry point
2892 * @dev : pointer to the device structure.
2893 * Description:
2894 * This function will be called by upper layer to check for events on the
2895 * interface in situations where interrupts are disabled. It is used for
2896 * specific in-kernel networking tasks, such as remote consoles and kernel
2897 * debugging over the network (example netdump in RedHat).
2899 static void s2io_netpoll(struct net_device *dev)
2901 struct s2io_nic *nic = netdev_priv(dev);
2902 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2903 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2904 int i;
2905 struct config_param *config = &nic->config;
2906 struct mac_info *mac_control = &nic->mac_control;
2908 if (pci_channel_offline(nic->pdev))
2909 return;
2911 disable_irq(dev->irq);
2913 writeq(val64, &bar0->rx_traffic_int);
2914 writeq(val64, &bar0->tx_traffic_int);
2916 /* we need to free up the transmitted skbufs or else netpoll will
2917 * run out of skbs and will fail and eventually netpoll application such
2918 * as netdump will fail.
2920 for (i = 0; i < config->tx_fifo_num; i++)
2921 tx_intr_handler(&mac_control->fifos[i]);
2923 /* check for received packet and indicate up to network */
2924 for (i = 0; i < config->rx_ring_num; i++) {
2925 struct ring_info *ring = &mac_control->rings[i];
2927 rx_intr_handler(ring, 0);
2930 for (i = 0; i < config->rx_ring_num; i++) {
2931 struct ring_info *ring = &mac_control->rings[i];
2933 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2934 DBG_PRINT(INFO_DBG,
2935 "%s: Out of memory in Rx Netpoll!!\n",
2936 dev->name);
2937 break;
2940 enable_irq(dev->irq);
2942 #endif
2945 * rx_intr_handler - Rx interrupt handler
2946 * @ring_info: per ring structure.
2947 * @budget: budget for napi processing.
2948 * Description:
2949 * If the interrupt is because of a received frame or if the
2950 * receive ring contains fresh as yet un-processed frames,this function is
2951 * called. It picks out the RxD at which place the last Rx processing had
2952 * stopped and sends the skb to the OSM's Rx handler and then increments
2953 * the offset.
2954 * Return Value:
2955 * No. of napi packets processed.
2957 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2959 int get_block, put_block;
2960 struct rx_curr_get_info get_info, put_info;
2961 struct RxD_t *rxdp;
2962 struct sk_buff *skb;
2963 int pkt_cnt = 0, napi_pkts = 0;
2964 int i;
2965 struct RxD1 *rxdp1;
2966 struct RxD3 *rxdp3;
2968 get_info = ring_data->rx_curr_get_info;
2969 get_block = get_info.block_index;
2970 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2971 put_block = put_info.block_index;
2972 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2974 while (RXD_IS_UP2DT(rxdp)) {
2976 * If your are next to put index then it's
2977 * FIFO full condition
2979 if ((get_block == put_block) &&
2980 (get_info.offset + 1) == put_info.offset) {
2981 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2982 ring_data->dev->name);
2983 break;
2985 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2986 if (skb == NULL) {
2987 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2988 ring_data->dev->name);
2989 return 0;
2991 if (ring_data->rxd_mode == RXD_MODE_1) {
2992 rxdp1 = (struct RxD1 *)rxdp;
2993 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2994 rxdp1->Buffer0_ptr,
2995 ring_data->mtu +
2996 HEADER_ETHERNET_II_802_3_SIZE +
2997 HEADER_802_2_SIZE +
2998 HEADER_SNAP_SIZE,
2999 PCI_DMA_FROMDEVICE);
3000 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3001 rxdp3 = (struct RxD3 *)rxdp;
3002 pci_dma_sync_single_for_cpu(ring_data->pdev,
3003 (dma_addr_t)rxdp3->Buffer0_ptr,
3004 BUF0_LEN,
3005 PCI_DMA_FROMDEVICE);
3006 pci_unmap_single(ring_data->pdev,
3007 (dma_addr_t)rxdp3->Buffer2_ptr,
3008 ring_data->mtu + 4,
3009 PCI_DMA_FROMDEVICE);
3011 prefetch(skb->data);
3012 rx_osm_handler(ring_data, rxdp);
3013 get_info.offset++;
3014 ring_data->rx_curr_get_info.offset = get_info.offset;
3015 rxdp = ring_data->rx_blocks[get_block].
3016 rxds[get_info.offset].virt_addr;
3017 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3018 get_info.offset = 0;
3019 ring_data->rx_curr_get_info.offset = get_info.offset;
3020 get_block++;
3021 if (get_block == ring_data->block_count)
3022 get_block = 0;
3023 ring_data->rx_curr_get_info.block_index = get_block;
3024 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3027 if (ring_data->nic->config.napi) {
3028 budget--;
3029 napi_pkts++;
3030 if (!budget)
3031 break;
3033 pkt_cnt++;
3034 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3035 break;
3037 if (ring_data->lro) {
3038 /* Clear all LRO sessions before exiting */
3039 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
3040 struct lro *lro = &ring_data->lro0_n[i];
3041 if (lro->in_use) {
3042 update_L3L4_header(ring_data->nic, lro);
3043 queue_rx_frame(lro->parent, lro->vlan_tag);
3044 clear_lro_session(lro);
3048 return napi_pkts;
3052 * tx_intr_handler - Transmit interrupt handler
3053 * @nic : device private variable
3054 * Description:
3055 * If an interrupt was raised to indicate DMA complete of the
3056 * Tx packet, this function is called. It identifies the last TxD
3057 * whose buffer was freed and frees all skbs whose data have already
3058 * DMA'ed into the NICs internal memory.
3059 * Return Value:
3060 * NONE
3063 static void tx_intr_handler(struct fifo_info *fifo_data)
3065 struct s2io_nic *nic = fifo_data->nic;
3066 struct tx_curr_get_info get_info, put_info;
3067 struct sk_buff *skb = NULL;
3068 struct TxD *txdlp;
3069 int pkt_cnt = 0;
3070 unsigned long flags = 0;
3071 u8 err_mask;
3072 struct stat_block *stats = nic->mac_control.stats_info;
3073 struct swStat *swstats = &stats->sw_stat;
3075 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3076 return;
3078 get_info = fifo_data->tx_curr_get_info;
3079 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3080 txdlp = (struct TxD *)
3081 fifo_data->list_info[get_info.offset].list_virt_addr;
3082 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3083 (get_info.offset != put_info.offset) &&
3084 (txdlp->Host_Control)) {
3085 /* Check for TxD errors */
3086 if (txdlp->Control_1 & TXD_T_CODE) {
3087 unsigned long long err;
3088 err = txdlp->Control_1 & TXD_T_CODE;
3089 if (err & 0x1) {
3090 swstats->parity_err_cnt++;
3093 /* update t_code statistics */
3094 err_mask = err >> 48;
3095 switch (err_mask) {
3096 case 2:
3097 swstats->tx_buf_abort_cnt++;
3098 break;
3100 case 3:
3101 swstats->tx_desc_abort_cnt++;
3102 break;
3104 case 7:
3105 swstats->tx_parity_err_cnt++;
3106 break;
3108 case 10:
3109 swstats->tx_link_loss_cnt++;
3110 break;
3112 case 15:
3113 swstats->tx_list_proc_err_cnt++;
3114 break;
3118 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3119 if (skb == NULL) {
3120 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3121 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3122 __func__);
3123 return;
3125 pkt_cnt++;
3127 /* Updating the statistics block */
3128 swstats->mem_freed += skb->truesize;
3129 dev_kfree_skb_irq(skb);
3131 get_info.offset++;
3132 if (get_info.offset == get_info.fifo_len + 1)
3133 get_info.offset = 0;
3134 txdlp = (struct TxD *)
3135 fifo_data->list_info[get_info.offset].list_virt_addr;
3136 fifo_data->tx_curr_get_info.offset = get_info.offset;
3139 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3141 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3145 * s2io_mdio_write - Function to write in to MDIO registers
3146 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3147 * @addr : address value
3148 * @value : data value
3149 * @dev : pointer to net_device structure
3150 * Description:
3151 * This function is used to write values to the MDIO registers
3152 * NONE
3154 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3155 struct net_device *dev)
3157 u64 val64;
3158 struct s2io_nic *sp = netdev_priv(dev);
3159 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3161 /* address transaction */
3162 val64 = MDIO_MMD_INDX_ADDR(addr) |
3163 MDIO_MMD_DEV_ADDR(mmd_type) |
3164 MDIO_MMS_PRT_ADDR(0x0);
3165 writeq(val64, &bar0->mdio_control);
3166 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3167 writeq(val64, &bar0->mdio_control);
3168 udelay(100);
3170 /* Data transaction */
3171 val64 = MDIO_MMD_INDX_ADDR(addr) |
3172 MDIO_MMD_DEV_ADDR(mmd_type) |
3173 MDIO_MMS_PRT_ADDR(0x0) |
3174 MDIO_MDIO_DATA(value) |
3175 MDIO_OP(MDIO_OP_WRITE_TRANS);
3176 writeq(val64, &bar0->mdio_control);
3177 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3178 writeq(val64, &bar0->mdio_control);
3179 udelay(100);
3181 val64 = MDIO_MMD_INDX_ADDR(addr) |
3182 MDIO_MMD_DEV_ADDR(mmd_type) |
3183 MDIO_MMS_PRT_ADDR(0x0) |
3184 MDIO_OP(MDIO_OP_READ_TRANS);
3185 writeq(val64, &bar0->mdio_control);
3186 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3187 writeq(val64, &bar0->mdio_control);
3188 udelay(100);
3192 * s2io_mdio_read - Function to write in to MDIO registers
3193 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3194 * @addr : address value
3195 * @dev : pointer to net_device structure
3196 * Description:
3197 * This function is used to read values to the MDIO registers
3198 * NONE
3200 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3202 u64 val64 = 0x0;
3203 u64 rval64 = 0x0;
3204 struct s2io_nic *sp = netdev_priv(dev);
3205 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3207 /* address transaction */
3208 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3209 | MDIO_MMD_DEV_ADDR(mmd_type)
3210 | MDIO_MMS_PRT_ADDR(0x0));
3211 writeq(val64, &bar0->mdio_control);
3212 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3213 writeq(val64, &bar0->mdio_control);
3214 udelay(100);
3216 /* Data transaction */
3217 val64 = MDIO_MMD_INDX_ADDR(addr) |
3218 MDIO_MMD_DEV_ADDR(mmd_type) |
3219 MDIO_MMS_PRT_ADDR(0x0) |
3220 MDIO_OP(MDIO_OP_READ_TRANS);
3221 writeq(val64, &bar0->mdio_control);
3222 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3223 writeq(val64, &bar0->mdio_control);
3224 udelay(100);
3226 /* Read the value from regs */
3227 rval64 = readq(&bar0->mdio_control);
3228 rval64 = rval64 & 0xFFFF0000;
3229 rval64 = rval64 >> 16;
3230 return rval64;
3234 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3235 * @counter : counter value to be updated
3236 * @flag : flag to indicate the status
3237 * @type : counter type
3238 * Description:
3239 * This function is to check the status of the xpak counters value
3240 * NONE
3243 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3244 u16 flag, u16 type)
3246 u64 mask = 0x3;
3247 u64 val64;
3248 int i;
3249 for (i = 0; i < index; i++)
3250 mask = mask << 0x2;
3252 if (flag > 0) {
3253 *counter = *counter + 1;
3254 val64 = *regs_stat & mask;
3255 val64 = val64 >> (index * 0x2);
3256 val64 = val64 + 1;
3257 if (val64 == 3) {
3258 switch (type) {
3259 case 1:
3260 DBG_PRINT(ERR_DBG,
3261 "Take Xframe NIC out of service.\n");
3262 DBG_PRINT(ERR_DBG,
3263 "Excessive temperatures may result in premature transceiver failure.\n");
3264 break;
3265 case 2:
3266 DBG_PRINT(ERR_DBG,
3267 "Take Xframe NIC out of service.\n");
3268 DBG_PRINT(ERR_DBG,
3269 "Excessive bias currents may indicate imminent laser diode failure.\n");
3270 break;
3271 case 3:
3272 DBG_PRINT(ERR_DBG,
3273 "Take Xframe NIC out of service.\n");
3274 DBG_PRINT(ERR_DBG,
3275 "Excessive laser output power may saturate far-end receiver.\n");
3276 break;
3277 default:
3278 DBG_PRINT(ERR_DBG,
3279 "Incorrect XPAK Alarm type\n");
3281 val64 = 0x0;
3283 val64 = val64 << (index * 0x2);
3284 *regs_stat = (*regs_stat & (~mask)) | (val64);
3286 } else {
3287 *regs_stat = *regs_stat & (~mask);
3292 * s2io_updt_xpak_counter - Function to update the xpak counters
3293 * @dev : pointer to net_device struct
3294 * Description:
3295 * This function is to upate the status of the xpak counters value
3296 * NONE
3298 static void s2io_updt_xpak_counter(struct net_device *dev)
3300 u16 flag = 0x0;
3301 u16 type = 0x0;
3302 u16 val16 = 0x0;
3303 u64 val64 = 0x0;
3304 u64 addr = 0x0;
3306 struct s2io_nic *sp = netdev_priv(dev);
3307 struct stat_block *stats = sp->mac_control.stats_info;
3308 struct xpakStat *xstats = &stats->xpak_stat;
3310 /* Check the communication with the MDIO slave */
3311 addr = MDIO_CTRL1;
3312 val64 = 0x0;
3313 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3314 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3315 DBG_PRINT(ERR_DBG,
3316 "ERR: MDIO slave access failed - Returned %llx\n",
3317 (unsigned long long)val64);
3318 return;
3321 /* Check for the expected value of control reg 1 */
3322 if (val64 != MDIO_CTRL1_SPEED10G) {
3323 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3324 "Returned: %llx- Expected: 0x%x\n",
3325 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3326 return;
3329 /* Loading the DOM register to MDIO register */
3330 addr = 0xA100;
3331 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3332 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3334 /* Reading the Alarm flags */
3335 addr = 0xA070;
3336 val64 = 0x0;
3337 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3339 flag = CHECKBIT(val64, 0x7);
3340 type = 1;
3341 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3342 &xstats->xpak_regs_stat,
3343 0x0, flag, type);
3345 if (CHECKBIT(val64, 0x6))
3346 xstats->alarm_transceiver_temp_low++;
3348 flag = CHECKBIT(val64, 0x3);
3349 type = 2;
3350 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3351 &xstats->xpak_regs_stat,
3352 0x2, flag, type);
3354 if (CHECKBIT(val64, 0x2))
3355 xstats->alarm_laser_bias_current_low++;
3357 flag = CHECKBIT(val64, 0x1);
3358 type = 3;
3359 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3360 &xstats->xpak_regs_stat,
3361 0x4, flag, type);
3363 if (CHECKBIT(val64, 0x0))
3364 xstats->alarm_laser_output_power_low++;
3366 /* Reading the Warning flags */
3367 addr = 0xA074;
3368 val64 = 0x0;
3369 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3371 if (CHECKBIT(val64, 0x7))
3372 xstats->warn_transceiver_temp_high++;
3374 if (CHECKBIT(val64, 0x6))
3375 xstats->warn_transceiver_temp_low++;
3377 if (CHECKBIT(val64, 0x3))
3378 xstats->warn_laser_bias_current_high++;
3380 if (CHECKBIT(val64, 0x2))
3381 xstats->warn_laser_bias_current_low++;
3383 if (CHECKBIT(val64, 0x1))
3384 xstats->warn_laser_output_power_high++;
3386 if (CHECKBIT(val64, 0x0))
3387 xstats->warn_laser_output_power_low++;
3391 * wait_for_cmd_complete - waits for a command to complete.
3392 * @sp : private member of the device structure, which is a pointer to the
3393 * s2io_nic structure.
3394 * Description: Function that waits for a command to Write into RMAC
3395 * ADDR DATA registers to be completed and returns either success or
3396 * error depending on whether the command was complete or not.
3397 * Return value:
3398 * SUCCESS on success and FAILURE on failure.
3401 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3402 int bit_state)
3404 int ret = FAILURE, cnt = 0, delay = 1;
3405 u64 val64;
3407 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3408 return FAILURE;
3410 do {
3411 val64 = readq(addr);
3412 if (bit_state == S2IO_BIT_RESET) {
3413 if (!(val64 & busy_bit)) {
3414 ret = SUCCESS;
3415 break;
3417 } else {
3418 if (val64 & busy_bit) {
3419 ret = SUCCESS;
3420 break;
3424 if (in_interrupt())
3425 mdelay(delay);
3426 else
3427 msleep(delay);
3429 if (++cnt >= 10)
3430 delay = 50;
3431 } while (cnt < 20);
3432 return ret;
3435 * check_pci_device_id - Checks if the device id is supported
3436 * @id : device id
3437 * Description: Function to check if the pci device id is supported by driver.
3438 * Return value: Actual device id if supported else PCI_ANY_ID
3440 static u16 check_pci_device_id(u16 id)
3442 switch (id) {
3443 case PCI_DEVICE_ID_HERC_WIN:
3444 case PCI_DEVICE_ID_HERC_UNI:
3445 return XFRAME_II_DEVICE;
3446 case PCI_DEVICE_ID_S2IO_UNI:
3447 case PCI_DEVICE_ID_S2IO_WIN:
3448 return XFRAME_I_DEVICE;
3449 default:
3450 return PCI_ANY_ID;
3455 * s2io_reset - Resets the card.
3456 * @sp : private member of the device structure.
3457 * Description: Function to Reset the card. This function then also
3458 * restores the previously saved PCI configuration space registers as
3459 * the card reset also resets the configuration space.
3460 * Return value:
3461 * void.
3464 static void s2io_reset(struct s2io_nic *sp)
3466 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3467 u64 val64;
3468 u16 subid, pci_cmd;
3469 int i;
3470 u16 val16;
3471 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3472 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3473 struct stat_block *stats;
3474 struct swStat *swstats;
3476 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3477 __func__, pci_name(sp->pdev));
3479 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3480 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3482 val64 = SW_RESET_ALL;
3483 writeq(val64, &bar0->sw_reset);
3484 if (strstr(sp->product_name, "CX4"))
3485 msleep(750);
3486 msleep(250);
3487 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3489 /* Restore the PCI state saved during initialization. */
3490 pci_restore_state(sp->pdev);
3491 pci_save_state(sp->pdev);
3492 pci_read_config_word(sp->pdev, 0x2, &val16);
3493 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3494 break;
3495 msleep(200);
3498 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3499 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3501 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3503 s2io_init_pci(sp);
3505 /* Set swapper to enable I/O register access */
3506 s2io_set_swapper(sp);
3508 /* restore mac_addr entries */
3509 do_s2io_restore_unicast_mc(sp);
3511 /* Restore the MSIX table entries from local variables */
3512 restore_xmsi_data(sp);
3514 /* Clear certain PCI/PCI-X fields after reset */
3515 if (sp->device_type == XFRAME_II_DEVICE) {
3516 /* Clear "detected parity error" bit */
3517 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3519 /* Clearing PCIX Ecc status register */
3520 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3522 /* Clearing PCI_STATUS error reflected here */
3523 writeq(s2BIT(62), &bar0->txpic_int_reg);
3526 /* Reset device statistics maintained by OS */
3527 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3529 stats = sp->mac_control.stats_info;
3530 swstats = &stats->sw_stat;
3532 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3533 up_cnt = swstats->link_up_cnt;
3534 down_cnt = swstats->link_down_cnt;
3535 up_time = swstats->link_up_time;
3536 down_time = swstats->link_down_time;
3537 reset_cnt = swstats->soft_reset_cnt;
3538 mem_alloc_cnt = swstats->mem_allocated;
3539 mem_free_cnt = swstats->mem_freed;
3540 watchdog_cnt = swstats->watchdog_timer_cnt;
3542 memset(stats, 0, sizeof(struct stat_block));
3544 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3545 swstats->link_up_cnt = up_cnt;
3546 swstats->link_down_cnt = down_cnt;
3547 swstats->link_up_time = up_time;
3548 swstats->link_down_time = down_time;
3549 swstats->soft_reset_cnt = reset_cnt;
3550 swstats->mem_allocated = mem_alloc_cnt;
3551 swstats->mem_freed = mem_free_cnt;
3552 swstats->watchdog_timer_cnt = watchdog_cnt;
3554 /* SXE-002: Configure link and activity LED to turn it off */
3555 subid = sp->pdev->subsystem_device;
3556 if (((subid & 0xFF) >= 0x07) &&
3557 (sp->device_type == XFRAME_I_DEVICE)) {
3558 val64 = readq(&bar0->gpio_control);
3559 val64 |= 0x0000800000000000ULL;
3560 writeq(val64, &bar0->gpio_control);
3561 val64 = 0x0411040400000000ULL;
3562 writeq(val64, (void __iomem *)bar0 + 0x2700);
3566 * Clear spurious ECC interrupts that would have occured on
3567 * XFRAME II cards after reset.
3569 if (sp->device_type == XFRAME_II_DEVICE) {
3570 val64 = readq(&bar0->pcc_err_reg);
3571 writeq(val64, &bar0->pcc_err_reg);
3574 sp->device_enabled_once = false;
3578 * s2io_set_swapper - to set the swapper controle on the card
3579 * @sp : private member of the device structure,
3580 * pointer to the s2io_nic structure.
3581 * Description: Function to set the swapper control on the card
3582 * correctly depending on the 'endianness' of the system.
3583 * Return value:
3584 * SUCCESS on success and FAILURE on failure.
3587 static int s2io_set_swapper(struct s2io_nic *sp)
3589 struct net_device *dev = sp->dev;
3590 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3591 u64 val64, valt, valr;
3594 * Set proper endian settings and verify the same by reading
3595 * the PIF Feed-back register.
3598 val64 = readq(&bar0->pif_rd_swapper_fb);
3599 if (val64 != 0x0123456789ABCDEFULL) {
3600 int i = 0;
3601 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3602 0x8100008181000081ULL, /* FE=1, SE=0 */
3603 0x4200004242000042ULL, /* FE=0, SE=1 */
3604 0}; /* FE=0, SE=0 */
3606 while (i < 4) {
3607 writeq(value[i], &bar0->swapper_ctrl);
3608 val64 = readq(&bar0->pif_rd_swapper_fb);
3609 if (val64 == 0x0123456789ABCDEFULL)
3610 break;
3611 i++;
3613 if (i == 4) {
3614 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3615 "feedback read %llx\n",
3616 dev->name, (unsigned long long)val64);
3617 return FAILURE;
3619 valr = value[i];
3620 } else {
3621 valr = readq(&bar0->swapper_ctrl);
3624 valt = 0x0123456789ABCDEFULL;
3625 writeq(valt, &bar0->xmsi_address);
3626 val64 = readq(&bar0->xmsi_address);
3628 if (val64 != valt) {
3629 int i = 0;
3630 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3631 0x0081810000818100ULL, /* FE=1, SE=0 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */
3633 0}; /* FE=0, SE=0 */
3635 while (i < 4) {
3636 writeq((value[i] | valr), &bar0->swapper_ctrl);
3637 writeq(valt, &bar0->xmsi_address);
3638 val64 = readq(&bar0->xmsi_address);
3639 if (val64 == valt)
3640 break;
3641 i++;
3643 if (i == 4) {
3644 unsigned long long x = val64;
3645 DBG_PRINT(ERR_DBG,
3646 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3647 return FAILURE;
3650 val64 = readq(&bar0->swapper_ctrl);
3651 val64 &= 0xFFFF000000000000ULL;
3653 #ifdef __BIG_ENDIAN
3655 * The device by default set to a big endian format, so a
3656 * big endian driver need not set anything.
3658 val64 |= (SWAPPER_CTRL_TXP_FE |
3659 SWAPPER_CTRL_TXP_SE |
3660 SWAPPER_CTRL_TXD_R_FE |
3661 SWAPPER_CTRL_TXD_W_FE |
3662 SWAPPER_CTRL_TXF_R_FE |
3663 SWAPPER_CTRL_RXD_R_FE |
3664 SWAPPER_CTRL_RXD_W_FE |
3665 SWAPPER_CTRL_RXF_W_FE |
3666 SWAPPER_CTRL_XMSI_FE |
3667 SWAPPER_CTRL_STATS_FE |
3668 SWAPPER_CTRL_STATS_SE);
3669 if (sp->config.intr_type == INTA)
3670 val64 |= SWAPPER_CTRL_XMSI_SE;
3671 writeq(val64, &bar0->swapper_ctrl);
3672 #else
3674 * Initially we enable all bits to make it accessible by the
3675 * driver, then we selectively enable only those bits that
3676 * we want to set.
3678 val64 |= (SWAPPER_CTRL_TXP_FE |
3679 SWAPPER_CTRL_TXP_SE |
3680 SWAPPER_CTRL_TXD_R_FE |
3681 SWAPPER_CTRL_TXD_R_SE |
3682 SWAPPER_CTRL_TXD_W_FE |
3683 SWAPPER_CTRL_TXD_W_SE |
3684 SWAPPER_CTRL_TXF_R_FE |
3685 SWAPPER_CTRL_RXD_R_FE |
3686 SWAPPER_CTRL_RXD_R_SE |
3687 SWAPPER_CTRL_RXD_W_FE |
3688 SWAPPER_CTRL_RXD_W_SE |
3689 SWAPPER_CTRL_RXF_W_FE |
3690 SWAPPER_CTRL_XMSI_FE |
3691 SWAPPER_CTRL_STATS_FE |
3692 SWAPPER_CTRL_STATS_SE);
3693 if (sp->config.intr_type == INTA)
3694 val64 |= SWAPPER_CTRL_XMSI_SE;
3695 writeq(val64, &bar0->swapper_ctrl);
3696 #endif
3697 val64 = readq(&bar0->swapper_ctrl);
3700 * Verifying if endian settings are accurate by reading a
3701 * feedback register.
3703 val64 = readq(&bar0->pif_rd_swapper_fb);
3704 if (val64 != 0x0123456789ABCDEFULL) {
3705 /* Endian settings are incorrect, calls for another dekko. */
3706 DBG_PRINT(ERR_DBG,
3707 "%s: Endian settings are wrong, feedback read %llx\n",
3708 dev->name, (unsigned long long)val64);
3709 return FAILURE;
3712 return SUCCESS;
3715 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3717 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3718 u64 val64;
3719 int ret = 0, cnt = 0;
3721 do {
3722 val64 = readq(&bar0->xmsi_access);
3723 if (!(val64 & s2BIT(15)))
3724 break;
3725 mdelay(1);
3726 cnt++;
3727 } while (cnt < 5);
3728 if (cnt == 5) {
3729 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3730 ret = 1;
3733 return ret;
3736 static void restore_xmsi_data(struct s2io_nic *nic)
3738 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3739 u64 val64;
3740 int i, msix_index;
3742 if (nic->device_type == XFRAME_I_DEVICE)
3743 return;
3745 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3746 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3747 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3748 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3749 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3750 writeq(val64, &bar0->xmsi_access);
3751 if (wait_for_msix_trans(nic, msix_index)) {
3752 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3753 __func__, msix_index);
3754 continue;
3759 static void store_xmsi_data(struct s2io_nic *nic)
3761 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3762 u64 val64, addr, data;
3763 int i, msix_index;
3765 if (nic->device_type == XFRAME_I_DEVICE)
3766 return;
3768 /* Store and display */
3769 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3770 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3771 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3772 writeq(val64, &bar0->xmsi_access);
3773 if (wait_for_msix_trans(nic, msix_index)) {
3774 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3775 __func__, msix_index);
3776 continue;
3778 addr = readq(&bar0->xmsi_address);
3779 data = readq(&bar0->xmsi_data);
3780 if (addr && data) {
3781 nic->msix_info[i].addr = addr;
3782 nic->msix_info[i].data = data;
3787 static int s2io_enable_msi_x(struct s2io_nic *nic)
3789 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3790 u64 rx_mat;
3791 u16 msi_control; /* Temp variable */
3792 int ret, i, j, msix_indx = 1;
3793 int size;
3794 struct stat_block *stats = nic->mac_control.stats_info;
3795 struct swStat *swstats = &stats->sw_stat;
3797 size = nic->num_entries * sizeof(struct msix_entry);
3798 nic->entries = kzalloc(size, GFP_KERNEL);
3799 if (!nic->entries) {
3800 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3801 __func__);
3802 swstats->mem_alloc_fail_cnt++;
3803 return -ENOMEM;
3805 swstats->mem_allocated += size;
3807 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3808 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3809 if (!nic->s2io_entries) {
3810 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3811 __func__);
3812 swstats->mem_alloc_fail_cnt++;
3813 kfree(nic->entries);
3814 swstats->mem_freed
3815 += (nic->num_entries * sizeof(struct msix_entry));
3816 return -ENOMEM;
3818 swstats->mem_allocated += size;
3820 nic->entries[0].entry = 0;
3821 nic->s2io_entries[0].entry = 0;
3822 nic->s2io_entries[0].in_use = MSIX_FLG;
3823 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3824 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3826 for (i = 1; i < nic->num_entries; i++) {
3827 nic->entries[i].entry = ((i - 1) * 8) + 1;
3828 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3829 nic->s2io_entries[i].arg = NULL;
3830 nic->s2io_entries[i].in_use = 0;
3833 rx_mat = readq(&bar0->rx_mat);
3834 for (j = 0; j < nic->config.rx_ring_num; j++) {
3835 rx_mat |= RX_MAT_SET(j, msix_indx);
3836 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3837 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3838 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3839 msix_indx += 8;
3841 writeq(rx_mat, &bar0->rx_mat);
3842 readq(&bar0->rx_mat);
3844 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3845 /* We fail init if error or we get less vectors than min required */
3846 if (ret) {
3847 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3848 kfree(nic->entries);
3849 swstats->mem_freed += nic->num_entries *
3850 sizeof(struct msix_entry);
3851 kfree(nic->s2io_entries);
3852 swstats->mem_freed += nic->num_entries *
3853 sizeof(struct s2io_msix_entry);
3854 nic->entries = NULL;
3855 nic->s2io_entries = NULL;
3856 return -ENOMEM;
3860 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3861 * in the herc NIC. (Temp change, needs to be removed later)
3863 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3864 msi_control |= 0x1; /* Enable MSI */
3865 pci_write_config_word(nic->pdev, 0x42, msi_control);
3867 return 0;
3870 /* Handle software interrupt used during MSI(X) test */
3871 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3873 struct s2io_nic *sp = dev_id;
3875 sp->msi_detected = 1;
3876 wake_up(&sp->msi_wait);
3878 return IRQ_HANDLED;
3881 /* Test interrupt path by forcing a a software IRQ */
3882 static int s2io_test_msi(struct s2io_nic *sp)
3884 struct pci_dev *pdev = sp->pdev;
3885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3886 int err;
3887 u64 val64, saved64;
3889 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3890 sp->name, sp);
3891 if (err) {
3892 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3893 sp->dev->name, pci_name(pdev), pdev->irq);
3894 return err;
3897 init_waitqueue_head(&sp->msi_wait);
3898 sp->msi_detected = 0;
3900 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3901 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3902 val64 |= SCHED_INT_CTRL_TIMER_EN;
3903 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3904 writeq(val64, &bar0->scheduled_int_ctrl);
3906 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3908 if (!sp->msi_detected) {
3909 /* MSI(X) test failed, go back to INTx mode */
3910 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3911 "using MSI(X) during test\n",
3912 sp->dev->name, pci_name(pdev));
3914 err = -EOPNOTSUPP;
3917 free_irq(sp->entries[1].vector, sp);
3919 writeq(saved64, &bar0->scheduled_int_ctrl);
3921 return err;
3924 static void remove_msix_isr(struct s2io_nic *sp)
3926 int i;
3927 u16 msi_control;
3929 for (i = 0; i < sp->num_entries; i++) {
3930 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3931 int vector = sp->entries[i].vector;
3932 void *arg = sp->s2io_entries[i].arg;
3933 free_irq(vector, arg);
3937 kfree(sp->entries);
3938 kfree(sp->s2io_entries);
3939 sp->entries = NULL;
3940 sp->s2io_entries = NULL;
3942 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3943 msi_control &= 0xFFFE; /* Disable MSI */
3944 pci_write_config_word(sp->pdev, 0x42, msi_control);
3946 pci_disable_msix(sp->pdev);
3949 static void remove_inta_isr(struct s2io_nic *sp)
3951 struct net_device *dev = sp->dev;
3953 free_irq(sp->pdev->irq, dev);
3956 /* ********************************************************* *
3957 * Functions defined below concern the OS part of the driver *
3958 * ********************************************************* */
3961 * s2io_open - open entry point of the driver
3962 * @dev : pointer to the device structure.
3963 * Description:
3964 * This function is the open entry point of the driver. It mainly calls a
3965 * function to allocate Rx buffers and inserts them into the buffer
3966 * descriptors and then enables the Rx part of the NIC.
3967 * Return value:
3968 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3969 * file on failure.
3972 static int s2io_open(struct net_device *dev)
3974 struct s2io_nic *sp = netdev_priv(dev);
3975 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3976 int err = 0;
3979 * Make sure you have link off by default every time
3980 * Nic is initialized
3982 netif_carrier_off(dev);
3983 sp->last_link_state = 0;
3985 /* Initialize H/W and enable interrupts */
3986 err = s2io_card_up(sp);
3987 if (err) {
3988 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3989 dev->name);
3990 goto hw_init_failed;
3993 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3994 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3995 s2io_card_down(sp);
3996 err = -ENODEV;
3997 goto hw_init_failed;
3999 s2io_start_all_tx_queue(sp);
4000 return 0;
4002 hw_init_failed:
4003 if (sp->config.intr_type == MSI_X) {
4004 if (sp->entries) {
4005 kfree(sp->entries);
4006 swstats->mem_freed += sp->num_entries *
4007 sizeof(struct msix_entry);
4009 if (sp->s2io_entries) {
4010 kfree(sp->s2io_entries);
4011 swstats->mem_freed += sp->num_entries *
4012 sizeof(struct s2io_msix_entry);
4015 return err;
4019 * s2io_close -close entry point of the driver
4020 * @dev : device pointer.
4021 * Description:
4022 * This is the stop entry point of the driver. It needs to undo exactly
4023 * whatever was done by the open entry point,thus it's usually referred to
4024 * as the close function.Among other things this function mainly stops the
4025 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4026 * Return value:
4027 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4028 * file on failure.
4031 static int s2io_close(struct net_device *dev)
4033 struct s2io_nic *sp = netdev_priv(dev);
4034 struct config_param *config = &sp->config;
4035 u64 tmp64;
4036 int offset;
4038 /* Return if the device is already closed *
4039 * Can happen when s2io_card_up failed in change_mtu *
4041 if (!is_s2io_card_up(sp))
4042 return 0;
4044 s2io_stop_all_tx_queue(sp);
4045 /* delete all populated mac entries */
4046 for (offset = 1; offset < config->max_mc_addr; offset++) {
4047 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4048 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4049 do_s2io_delete_unicast_mc(sp, tmp64);
4052 s2io_card_down(sp);
4054 return 0;
4058 * s2io_xmit - Tx entry point of te driver
4059 * @skb : the socket buffer containing the Tx data.
4060 * @dev : device pointer.
4061 * Description :
4062 * This function is the Tx entry point of the driver. S2IO NIC supports
4063 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4064 * NOTE: when device cant queue the pkt,just the trans_start variable will
4065 * not be upadted.
4066 * Return value:
4067 * 0 on success & 1 on failure.
4070 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4072 struct s2io_nic *sp = netdev_priv(dev);
4073 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4074 register u64 val64;
4075 struct TxD *txdp;
4076 struct TxFIFO_element __iomem *tx_fifo;
4077 unsigned long flags = 0;
4078 u16 vlan_tag = 0;
4079 struct fifo_info *fifo = NULL;
4080 int do_spin_lock = 1;
4081 int offload_type;
4082 int enable_per_list_interrupt = 0;
4083 struct config_param *config = &sp->config;
4084 struct mac_info *mac_control = &sp->mac_control;
4085 struct stat_block *stats = mac_control->stats_info;
4086 struct swStat *swstats = &stats->sw_stat;
4088 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4090 if (unlikely(skb->len <= 0)) {
4091 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4092 dev_kfree_skb_any(skb);
4093 return NETDEV_TX_OK;
4096 if (!is_s2io_card_up(sp)) {
4097 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4098 dev->name);
4099 dev_kfree_skb(skb);
4100 return NETDEV_TX_OK;
4103 queue = 0;
4104 if (vlan_tx_tag_present(skb))
4105 vlan_tag = vlan_tx_tag_get(skb);
4106 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4107 if (skb->protocol == htons(ETH_P_IP)) {
4108 struct iphdr *ip;
4109 struct tcphdr *th;
4110 ip = ip_hdr(skb);
4112 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4113 th = (struct tcphdr *)(((unsigned char *)ip) +
4114 ip->ihl*4);
4116 if (ip->protocol == IPPROTO_TCP) {
4117 queue_len = sp->total_tcp_fifos;
4118 queue = (ntohs(th->source) +
4119 ntohs(th->dest)) &
4120 sp->fifo_selector[queue_len - 1];
4121 if (queue >= queue_len)
4122 queue = queue_len - 1;
4123 } else if (ip->protocol == IPPROTO_UDP) {
4124 queue_len = sp->total_udp_fifos;
4125 queue = (ntohs(th->source) +
4126 ntohs(th->dest)) &
4127 sp->fifo_selector[queue_len - 1];
4128 if (queue >= queue_len)
4129 queue = queue_len - 1;
4130 queue += sp->udp_fifo_idx;
4131 if (skb->len > 1024)
4132 enable_per_list_interrupt = 1;
4133 do_spin_lock = 0;
4137 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4138 /* get fifo number based on skb->priority value */
4139 queue = config->fifo_mapping
4140 [skb->priority & (MAX_TX_FIFOS - 1)];
4141 fifo = &mac_control->fifos[queue];
4143 if (do_spin_lock)
4144 spin_lock_irqsave(&fifo->tx_lock, flags);
4145 else {
4146 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4147 return NETDEV_TX_LOCKED;
4150 if (sp->config.multiq) {
4151 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4152 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4153 return NETDEV_TX_BUSY;
4155 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4156 if (netif_queue_stopped(dev)) {
4157 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4158 return NETDEV_TX_BUSY;
4162 put_off = (u16)fifo->tx_curr_put_info.offset;
4163 get_off = (u16)fifo->tx_curr_get_info.offset;
4164 txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
4166 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4167 /* Avoid "put" pointer going beyond "get" pointer */
4168 if (txdp->Host_Control ||
4169 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4170 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4171 s2io_stop_tx_queue(sp, fifo->fifo_no);
4172 dev_kfree_skb(skb);
4173 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4174 return NETDEV_TX_OK;
4177 offload_type = s2io_offload_type(skb);
4178 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4179 txdp->Control_1 |= TXD_TCP_LSO_EN;
4180 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4182 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4183 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4184 TXD_TX_CKO_TCP_EN |
4185 TXD_TX_CKO_UDP_EN);
4187 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4188 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4189 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4190 if (enable_per_list_interrupt)
4191 if (put_off & (queue_len >> 5))
4192 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4193 if (vlan_tag) {
4194 txdp->Control_2 |= TXD_VLAN_ENABLE;
4195 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4198 frg_len = skb_headlen(skb);
4199 if (offload_type == SKB_GSO_UDP) {
4200 int ufo_size;
4202 ufo_size = s2io_udp_mss(skb);
4203 ufo_size &= ~7;
4204 txdp->Control_1 |= TXD_UFO_EN;
4205 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4206 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4207 #ifdef __BIG_ENDIAN
4208 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4209 fifo->ufo_in_band_v[put_off] =
4210 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4211 #else
4212 fifo->ufo_in_band_v[put_off] =
4213 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4214 #endif
4215 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4216 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4217 fifo->ufo_in_band_v,
4218 sizeof(u64),
4219 PCI_DMA_TODEVICE);
4220 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4221 goto pci_map_failed;
4222 txdp++;
4225 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4226 frg_len, PCI_DMA_TODEVICE);
4227 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4228 goto pci_map_failed;
4230 txdp->Host_Control = (unsigned long)skb;
4231 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4232 if (offload_type == SKB_GSO_UDP)
4233 txdp->Control_1 |= TXD_UFO_EN;
4235 frg_cnt = skb_shinfo(skb)->nr_frags;
4236 /* For fragmented SKB. */
4237 for (i = 0; i < frg_cnt; i++) {
4238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4239 /* A '0' length fragment will be ignored */
4240 if (!frag->size)
4241 continue;
4242 txdp++;
4243 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4244 frag->page_offset,
4245 frag->size,
4246 PCI_DMA_TODEVICE);
4247 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4248 if (offload_type == SKB_GSO_UDP)
4249 txdp->Control_1 |= TXD_UFO_EN;
4251 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4253 if (offload_type == SKB_GSO_UDP)
4254 frg_cnt++; /* as Txd0 was used for inband header */
4256 tx_fifo = mac_control->tx_FIFO_start[queue];
4257 val64 = fifo->list_info[put_off].list_phy_addr;
4258 writeq(val64, &tx_fifo->TxDL_Pointer);
4260 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4261 TX_FIFO_LAST_LIST);
4262 if (offload_type)
4263 val64 |= TX_FIFO_SPECIAL_FUNC;
4265 writeq(val64, &tx_fifo->List_Control);
4267 mmiowb();
4269 put_off++;
4270 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4271 put_off = 0;
4272 fifo->tx_curr_put_info.offset = put_off;
4274 /* Avoid "put" pointer going beyond "get" pointer */
4275 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4276 swstats->fifo_full_cnt++;
4277 DBG_PRINT(TX_DBG,
4278 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4279 put_off, get_off);
4280 s2io_stop_tx_queue(sp, fifo->fifo_no);
4282 swstats->mem_allocated += skb->truesize;
4283 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4285 if (sp->config.intr_type == MSI_X)
4286 tx_intr_handler(fifo);
4288 return NETDEV_TX_OK;
4290 pci_map_failed:
4291 swstats->pci_map_fail_cnt++;
4292 s2io_stop_tx_queue(sp, fifo->fifo_no);
4293 swstats->mem_freed += skb->truesize;
4294 dev_kfree_skb(skb);
4295 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4296 return NETDEV_TX_OK;
4299 static void
4300 s2io_alarm_handle(unsigned long data)
4302 struct s2io_nic *sp = (struct s2io_nic *)data;
4303 struct net_device *dev = sp->dev;
4305 s2io_handle_errors(dev);
4306 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4309 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4311 struct ring_info *ring = (struct ring_info *)dev_id;
4312 struct s2io_nic *sp = ring->nic;
4313 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4315 if (unlikely(!is_s2io_card_up(sp)))
4316 return IRQ_HANDLED;
4318 if (sp->config.napi) {
4319 u8 __iomem *addr = NULL;
4320 u8 val8 = 0;
4322 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4323 addr += (7 - ring->ring_no);
4324 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4325 writeb(val8, addr);
4326 val8 = readb(addr);
4327 napi_schedule(&ring->napi);
4328 } else {
4329 rx_intr_handler(ring, 0);
4330 s2io_chk_rx_buffers(sp, ring);
4333 return IRQ_HANDLED;
4336 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4338 int i;
4339 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4340 struct s2io_nic *sp = fifos->nic;
4341 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4342 struct config_param *config = &sp->config;
4343 u64 reason;
4345 if (unlikely(!is_s2io_card_up(sp)))
4346 return IRQ_NONE;
4348 reason = readq(&bar0->general_int_status);
4349 if (unlikely(reason == S2IO_MINUS_ONE))
4350 /* Nothing much can be done. Get out */
4351 return IRQ_HANDLED;
4353 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4354 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4356 if (reason & GEN_INTR_TXPIC)
4357 s2io_txpic_intr_handle(sp);
4359 if (reason & GEN_INTR_TXTRAFFIC)
4360 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4362 for (i = 0; i < config->tx_fifo_num; i++)
4363 tx_intr_handler(&fifos[i]);
4365 writeq(sp->general_int_mask, &bar0->general_int_mask);
4366 readl(&bar0->general_int_status);
4367 return IRQ_HANDLED;
4369 /* The interrupt was not raised by us */
4370 return IRQ_NONE;
4373 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4375 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4376 u64 val64;
4378 val64 = readq(&bar0->pic_int_status);
4379 if (val64 & PIC_INT_GPIO) {
4380 val64 = readq(&bar0->gpio_int_reg);
4381 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4382 (val64 & GPIO_INT_REG_LINK_UP)) {
4384 * This is unstable state so clear both up/down
4385 * interrupt and adapter to re-evaluate the link state.
4387 val64 |= GPIO_INT_REG_LINK_DOWN;
4388 val64 |= GPIO_INT_REG_LINK_UP;
4389 writeq(val64, &bar0->gpio_int_reg);
4390 val64 = readq(&bar0->gpio_int_mask);
4391 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4392 GPIO_INT_MASK_LINK_DOWN);
4393 writeq(val64, &bar0->gpio_int_mask);
4394 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4395 val64 = readq(&bar0->adapter_status);
4396 /* Enable Adapter */
4397 val64 = readq(&bar0->adapter_control);
4398 val64 |= ADAPTER_CNTL_EN;
4399 writeq(val64, &bar0->adapter_control);
4400 val64 |= ADAPTER_LED_ON;
4401 writeq(val64, &bar0->adapter_control);
4402 if (!sp->device_enabled_once)
4403 sp->device_enabled_once = 1;
4405 s2io_link(sp, LINK_UP);
4407 * unmask link down interrupt and mask link-up
4408 * intr
4410 val64 = readq(&bar0->gpio_int_mask);
4411 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4412 val64 |= GPIO_INT_MASK_LINK_UP;
4413 writeq(val64, &bar0->gpio_int_mask);
4415 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4416 val64 = readq(&bar0->adapter_status);
4417 s2io_link(sp, LINK_DOWN);
4418 /* Link is down so unmaks link up interrupt */
4419 val64 = readq(&bar0->gpio_int_mask);
4420 val64 &= ~GPIO_INT_MASK_LINK_UP;
4421 val64 |= GPIO_INT_MASK_LINK_DOWN;
4422 writeq(val64, &bar0->gpio_int_mask);
4424 /* turn off LED */
4425 val64 = readq(&bar0->adapter_control);
4426 val64 = val64 & (~ADAPTER_LED_ON);
4427 writeq(val64, &bar0->adapter_control);
4430 val64 = readq(&bar0->gpio_int_mask);
4434 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4435 * @value: alarm bits
4436 * @addr: address value
4437 * @cnt: counter variable
4438 * Description: Check for alarm and increment the counter
4439 * Return Value:
4440 * 1 - if alarm bit set
4441 * 0 - if alarm bit is not set
4443 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4444 unsigned long long *cnt)
4446 u64 val64;
4447 val64 = readq(addr);
4448 if (val64 & value) {
4449 writeq(val64, addr);
4450 (*cnt)++;
4451 return 1;
4453 return 0;
4458 * s2io_handle_errors - Xframe error indication handler
4459 * @nic: device private variable
4460 * Description: Handle alarms such as loss of link, single or
4461 * double ECC errors, critical and serious errors.
4462 * Return Value:
4463 * NONE
4465 static void s2io_handle_errors(void *dev_id)
4467 struct net_device *dev = (struct net_device *)dev_id;
4468 struct s2io_nic *sp = netdev_priv(dev);
4469 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4470 u64 temp64 = 0, val64 = 0;
4471 int i = 0;
4473 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4474 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4476 if (!is_s2io_card_up(sp))
4477 return;
4479 if (pci_channel_offline(sp->pdev))
4480 return;
4482 memset(&sw_stat->ring_full_cnt, 0,
4483 sizeof(sw_stat->ring_full_cnt));
4485 /* Handling the XPAK counters update */
4486 if (stats->xpak_timer_count < 72000) {
4487 /* waiting for an hour */
4488 stats->xpak_timer_count++;
4489 } else {
4490 s2io_updt_xpak_counter(dev);
4491 /* reset the count to zero */
4492 stats->xpak_timer_count = 0;
4495 /* Handling link status change error Intr */
4496 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4497 val64 = readq(&bar0->mac_rmac_err_reg);
4498 writeq(val64, &bar0->mac_rmac_err_reg);
4499 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4500 schedule_work(&sp->set_link_task);
4503 /* In case of a serious error, the device will be Reset. */
4504 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4505 &sw_stat->serious_err_cnt))
4506 goto reset;
4508 /* Check for data parity error */
4509 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4510 &sw_stat->parity_err_cnt))
4511 goto reset;
4513 /* Check for ring full counter */
4514 if (sp->device_type == XFRAME_II_DEVICE) {
4515 val64 = readq(&bar0->ring_bump_counter1);
4516 for (i = 0; i < 4; i++) {
4517 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4518 temp64 >>= 64 - ((i+1)*16);
4519 sw_stat->ring_full_cnt[i] += temp64;
4522 val64 = readq(&bar0->ring_bump_counter2);
4523 for (i = 0; i < 4; i++) {
4524 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4525 temp64 >>= 64 - ((i+1)*16);
4526 sw_stat->ring_full_cnt[i+4] += temp64;
4530 val64 = readq(&bar0->txdma_int_status);
4531 /*check for pfc_err*/
4532 if (val64 & TXDMA_PFC_INT) {
4533 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4534 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4535 PFC_PCIX_ERR,
4536 &bar0->pfc_err_reg,
4537 &sw_stat->pfc_err_cnt))
4538 goto reset;
4539 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4540 &bar0->pfc_err_reg,
4541 &sw_stat->pfc_err_cnt);
4544 /*check for tda_err*/
4545 if (val64 & TXDMA_TDA_INT) {
4546 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4547 TDA_SM0_ERR_ALARM |
4548 TDA_SM1_ERR_ALARM,
4549 &bar0->tda_err_reg,
4550 &sw_stat->tda_err_cnt))
4551 goto reset;
4552 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4553 &bar0->tda_err_reg,
4554 &sw_stat->tda_err_cnt);
4556 /*check for pcc_err*/
4557 if (val64 & TXDMA_PCC_INT) {
4558 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4559 PCC_N_SERR | PCC_6_COF_OV_ERR |
4560 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4561 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4562 PCC_TXB_ECC_DB_ERR,
4563 &bar0->pcc_err_reg,
4564 &sw_stat->pcc_err_cnt))
4565 goto reset;
4566 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4567 &bar0->pcc_err_reg,
4568 &sw_stat->pcc_err_cnt);
4571 /*check for tti_err*/
4572 if (val64 & TXDMA_TTI_INT) {
4573 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4574 &bar0->tti_err_reg,
4575 &sw_stat->tti_err_cnt))
4576 goto reset;
4577 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4578 &bar0->tti_err_reg,
4579 &sw_stat->tti_err_cnt);
4582 /*check for lso_err*/
4583 if (val64 & TXDMA_LSO_INT) {
4584 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4585 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4586 &bar0->lso_err_reg,
4587 &sw_stat->lso_err_cnt))
4588 goto reset;
4589 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4590 &bar0->lso_err_reg,
4591 &sw_stat->lso_err_cnt);
4594 /*check for tpa_err*/
4595 if (val64 & TXDMA_TPA_INT) {
4596 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4597 &bar0->tpa_err_reg,
4598 &sw_stat->tpa_err_cnt))
4599 goto reset;
4600 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4601 &bar0->tpa_err_reg,
4602 &sw_stat->tpa_err_cnt);
4605 /*check for sm_err*/
4606 if (val64 & TXDMA_SM_INT) {
4607 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4608 &bar0->sm_err_reg,
4609 &sw_stat->sm_err_cnt))
4610 goto reset;
4613 val64 = readq(&bar0->mac_int_status);
4614 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4615 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4616 &bar0->mac_tmac_err_reg,
4617 &sw_stat->mac_tmac_err_cnt))
4618 goto reset;
4619 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4620 TMAC_DESC_ECC_SG_ERR |
4621 TMAC_DESC_ECC_DB_ERR,
4622 &bar0->mac_tmac_err_reg,
4623 &sw_stat->mac_tmac_err_cnt);
4626 val64 = readq(&bar0->xgxs_int_status);
4627 if (val64 & XGXS_INT_STATUS_TXGXS) {
4628 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4629 &bar0->xgxs_txgxs_err_reg,
4630 &sw_stat->xgxs_txgxs_err_cnt))
4631 goto reset;
4632 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4633 &bar0->xgxs_txgxs_err_reg,
4634 &sw_stat->xgxs_txgxs_err_cnt);
4637 val64 = readq(&bar0->rxdma_int_status);
4638 if (val64 & RXDMA_INT_RC_INT_M) {
4639 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4640 RC_FTC_ECC_DB_ERR |
4641 RC_PRCn_SM_ERR_ALARM |
4642 RC_FTC_SM_ERR_ALARM,
4643 &bar0->rc_err_reg,
4644 &sw_stat->rc_err_cnt))
4645 goto reset;
4646 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4647 RC_FTC_ECC_SG_ERR |
4648 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4649 &sw_stat->rc_err_cnt);
4650 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4651 PRC_PCI_AB_WR_Rn |
4652 PRC_PCI_AB_F_WR_Rn,
4653 &bar0->prc_pcix_err_reg,
4654 &sw_stat->prc_pcix_err_cnt))
4655 goto reset;
4656 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4657 PRC_PCI_DP_WR_Rn |
4658 PRC_PCI_DP_F_WR_Rn,
4659 &bar0->prc_pcix_err_reg,
4660 &sw_stat->prc_pcix_err_cnt);
4663 if (val64 & RXDMA_INT_RPA_INT_M) {
4664 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4665 &bar0->rpa_err_reg,
4666 &sw_stat->rpa_err_cnt))
4667 goto reset;
4668 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4669 &bar0->rpa_err_reg,
4670 &sw_stat->rpa_err_cnt);
4673 if (val64 & RXDMA_INT_RDA_INT_M) {
4674 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4675 RDA_FRM_ECC_DB_N_AERR |
4676 RDA_SM1_ERR_ALARM |
4677 RDA_SM0_ERR_ALARM |
4678 RDA_RXD_ECC_DB_SERR,
4679 &bar0->rda_err_reg,
4680 &sw_stat->rda_err_cnt))
4681 goto reset;
4682 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4683 RDA_FRM_ECC_SG_ERR |
4684 RDA_MISC_ERR |
4685 RDA_PCIX_ERR,
4686 &bar0->rda_err_reg,
4687 &sw_stat->rda_err_cnt);
4690 if (val64 & RXDMA_INT_RTI_INT_M) {
4691 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4692 &bar0->rti_err_reg,
4693 &sw_stat->rti_err_cnt))
4694 goto reset;
4695 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4696 &bar0->rti_err_reg,
4697 &sw_stat->rti_err_cnt);
4700 val64 = readq(&bar0->mac_int_status);
4701 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4702 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4703 &bar0->mac_rmac_err_reg,
4704 &sw_stat->mac_rmac_err_cnt))
4705 goto reset;
4706 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4707 RMAC_SINGLE_ECC_ERR |
4708 RMAC_DOUBLE_ECC_ERR,
4709 &bar0->mac_rmac_err_reg,
4710 &sw_stat->mac_rmac_err_cnt);
4713 val64 = readq(&bar0->xgxs_int_status);
4714 if (val64 & XGXS_INT_STATUS_RXGXS) {
4715 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4716 &bar0->xgxs_rxgxs_err_reg,
4717 &sw_stat->xgxs_rxgxs_err_cnt))
4718 goto reset;
4721 val64 = readq(&bar0->mc_int_status);
4722 if (val64 & MC_INT_STATUS_MC_INT) {
4723 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4724 &bar0->mc_err_reg,
4725 &sw_stat->mc_err_cnt))
4726 goto reset;
4728 /* Handling Ecc errors */
4729 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4730 writeq(val64, &bar0->mc_err_reg);
4731 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4732 sw_stat->double_ecc_errs++;
4733 if (sp->device_type != XFRAME_II_DEVICE) {
4735 * Reset XframeI only if critical error
4737 if (val64 &
4738 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4739 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4740 goto reset;
4742 } else
4743 sw_stat->single_ecc_errs++;
4746 return;
4748 reset:
4749 s2io_stop_all_tx_queue(sp);
4750 schedule_work(&sp->rst_timer_task);
4751 sw_stat->soft_reset_cnt++;
4755 * s2io_isr - ISR handler of the device .
4756 * @irq: the irq of the device.
4757 * @dev_id: a void pointer to the dev structure of the NIC.
4758 * Description: This function is the ISR handler of the device. It
4759 * identifies the reason for the interrupt and calls the relevant
4760 * service routines. As a contongency measure, this ISR allocates the
4761 * recv buffers, if their numbers are below the panic value which is
4762 * presently set to 25% of the original number of rcv buffers allocated.
4763 * Return value:
4764 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4765 * IRQ_NONE: will be returned if interrupt is not from our device
4767 static irqreturn_t s2io_isr(int irq, void *dev_id)
4769 struct net_device *dev = (struct net_device *)dev_id;
4770 struct s2io_nic *sp = netdev_priv(dev);
4771 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4772 int i;
4773 u64 reason = 0;
4774 struct mac_info *mac_control;
4775 struct config_param *config;
4777 /* Pretend we handled any irq's from a disconnected card */
4778 if (pci_channel_offline(sp->pdev))
4779 return IRQ_NONE;
4781 if (!is_s2io_card_up(sp))
4782 return IRQ_NONE;
4784 config = &sp->config;
4785 mac_control = &sp->mac_control;
4788 * Identify the cause for interrupt and call the appropriate
4789 * interrupt handler. Causes for the interrupt could be;
4790 * 1. Rx of packet.
4791 * 2. Tx complete.
4792 * 3. Link down.
4794 reason = readq(&bar0->general_int_status);
4796 if (unlikely(reason == S2IO_MINUS_ONE))
4797 return IRQ_HANDLED; /* Nothing much can be done. Get out */
4799 if (reason &
4800 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4801 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4803 if (config->napi) {
4804 if (reason & GEN_INTR_RXTRAFFIC) {
4805 napi_schedule(&sp->napi);
4806 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4807 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4808 readl(&bar0->rx_traffic_int);
4810 } else {
4812 * rx_traffic_int reg is an R1 register, writing all 1's
4813 * will ensure that the actual interrupt causing bit
4814 * get's cleared and hence a read can be avoided.
4816 if (reason & GEN_INTR_RXTRAFFIC)
4817 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4819 for (i = 0; i < config->rx_ring_num; i++) {
4820 struct ring_info *ring = &mac_control->rings[i];
4822 rx_intr_handler(ring, 0);
4827 * tx_traffic_int reg is an R1 register, writing all 1's
4828 * will ensure that the actual interrupt causing bit get's
4829 * cleared and hence a read can be avoided.
4831 if (reason & GEN_INTR_TXTRAFFIC)
4832 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4834 for (i = 0; i < config->tx_fifo_num; i++)
4835 tx_intr_handler(&mac_control->fifos[i]);
4837 if (reason & GEN_INTR_TXPIC)
4838 s2io_txpic_intr_handle(sp);
4841 * Reallocate the buffers from the interrupt handler itself.
4843 if (!config->napi) {
4844 for (i = 0; i < config->rx_ring_num; i++) {
4845 struct ring_info *ring = &mac_control->rings[i];
4847 s2io_chk_rx_buffers(sp, ring);
4850 writeq(sp->general_int_mask, &bar0->general_int_mask);
4851 readl(&bar0->general_int_status);
4853 return IRQ_HANDLED;
4855 } else if (!reason) {
4856 /* The interrupt was not raised by us */
4857 return IRQ_NONE;
4860 return IRQ_HANDLED;
4864 * s2io_updt_stats -
4866 static void s2io_updt_stats(struct s2io_nic *sp)
4868 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4869 u64 val64;
4870 int cnt = 0;
4872 if (is_s2io_card_up(sp)) {
4873 /* Apprx 30us on a 133 MHz bus */
4874 val64 = SET_UPDT_CLICKS(10) |
4875 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4876 writeq(val64, &bar0->stat_cfg);
4877 do {
4878 udelay(100);
4879 val64 = readq(&bar0->stat_cfg);
4880 if (!(val64 & s2BIT(0)))
4881 break;
4882 cnt++;
4883 if (cnt == 5)
4884 break; /* Updt failed */
4885 } while (1);
4890 * s2io_get_stats - Updates the device statistics structure.
4891 * @dev : pointer to the device structure.
4892 * Description:
4893 * This function updates the device statistics structure in the s2io_nic
4894 * structure and returns a pointer to the same.
4895 * Return value:
4896 * pointer to the updated net_device_stats structure.
4898 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4900 struct s2io_nic *sp = netdev_priv(dev);
4901 struct mac_info *mac_control = &sp->mac_control;
4902 struct stat_block *stats = mac_control->stats_info;
4903 u64 delta;
4905 /* Configure Stats for immediate updt */
4906 s2io_updt_stats(sp);
4908 /* A device reset will cause the on-adapter statistics to be zero'ed.
4909 * This can be done while running by changing the MTU. To prevent the
4910 * system from having the stats zero'ed, the driver keeps a copy of the
4911 * last update to the system (which is also zero'ed on reset). This
4912 * enables the driver to accurately know the delta between the last
4913 * update and the current update.
4915 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4916 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4917 sp->stats.rx_packets += delta;
4918 dev->stats.rx_packets += delta;
4920 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4921 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4922 sp->stats.tx_packets += delta;
4923 dev->stats.tx_packets += delta;
4925 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4926 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4927 sp->stats.rx_bytes += delta;
4928 dev->stats.rx_bytes += delta;
4930 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4931 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4932 sp->stats.tx_bytes += delta;
4933 dev->stats.tx_bytes += delta;
4935 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4936 sp->stats.rx_errors += delta;
4937 dev->stats.rx_errors += delta;
4939 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4940 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4941 sp->stats.tx_errors += delta;
4942 dev->stats.tx_errors += delta;
4944 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4945 sp->stats.rx_dropped += delta;
4946 dev->stats.rx_dropped += delta;
4948 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4949 sp->stats.tx_dropped += delta;
4950 dev->stats.tx_dropped += delta;
4952 /* The adapter MAC interprets pause frames as multicast packets, but
4953 * does not pass them up. This erroneously increases the multicast
4954 * packet count and needs to be deducted when the multicast frame count
4955 * is queried.
4957 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4958 le32_to_cpu(stats->rmac_vld_mcst_frms);
4959 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4960 delta -= sp->stats.multicast;
4961 sp->stats.multicast += delta;
4962 dev->stats.multicast += delta;
4964 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4965 le32_to_cpu(stats->rmac_usized_frms)) +
4966 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4967 sp->stats.rx_length_errors += delta;
4968 dev->stats.rx_length_errors += delta;
4970 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4971 sp->stats.rx_crc_errors += delta;
4972 dev->stats.rx_crc_errors += delta;
4974 return &dev->stats;
4978 * s2io_set_multicast - entry point for multicast address enable/disable.
4979 * @dev : pointer to the device structure
4980 * Description:
4981 * This function is a driver entry point which gets called by the kernel
4982 * whenever multicast addresses must be enabled/disabled. This also gets
4983 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4984 * determine, if multicast address must be enabled or if promiscuous mode
4985 * is to be disabled etc.
4986 * Return value:
4987 * void.
4990 static void s2io_set_multicast(struct net_device *dev)
4992 int i, j, prev_cnt;
4993 struct netdev_hw_addr *ha;
4994 struct s2io_nic *sp = netdev_priv(dev);
4995 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4996 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4997 0xfeffffffffffULL;
4998 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4999 void __iomem *add;
5000 struct config_param *config = &sp->config;
5002 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5003 /* Enable all Multicast addresses */
5004 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5005 &bar0->rmac_addr_data0_mem);
5006 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5007 &bar0->rmac_addr_data1_mem);
5008 val64 = RMAC_ADDR_CMD_MEM_WE |
5009 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5010 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
5011 writeq(val64, &bar0->rmac_addr_cmd_mem);
5012 /* Wait till command completes */
5013 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5014 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5015 S2IO_BIT_RESET);
5017 sp->m_cast_flg = 1;
5018 sp->all_multi_pos = config->max_mc_addr - 1;
5019 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5020 /* Disable all Multicast addresses */
5021 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5022 &bar0->rmac_addr_data0_mem);
5023 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5024 &bar0->rmac_addr_data1_mem);
5025 val64 = RMAC_ADDR_CMD_MEM_WE |
5026 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5027 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
5028 writeq(val64, &bar0->rmac_addr_cmd_mem);
5029 /* Wait till command completes */
5030 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5031 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5032 S2IO_BIT_RESET);
5034 sp->m_cast_flg = 0;
5035 sp->all_multi_pos = 0;
5038 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5039 /* Put the NIC into promiscuous mode */
5040 add = &bar0->mac_cfg;
5041 val64 = readq(&bar0->mac_cfg);
5042 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5044 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5045 writel((u32)val64, add);
5046 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5047 writel((u32) (val64 >> 32), (add + 4));
5049 if (vlan_tag_strip != 1) {
5050 val64 = readq(&bar0->rx_pa_cfg);
5051 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5052 writeq(val64, &bar0->rx_pa_cfg);
5053 sp->vlan_strip_flag = 0;
5056 val64 = readq(&bar0->mac_cfg);
5057 sp->promisc_flg = 1;
5058 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5059 dev->name);
5060 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5061 /* Remove the NIC from promiscuous mode */
5062 add = &bar0->mac_cfg;
5063 val64 = readq(&bar0->mac_cfg);
5064 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5066 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5067 writel((u32)val64, add);
5068 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5069 writel((u32) (val64 >> 32), (add + 4));
5071 if (vlan_tag_strip != 0) {
5072 val64 = readq(&bar0->rx_pa_cfg);
5073 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5074 writeq(val64, &bar0->rx_pa_cfg);
5075 sp->vlan_strip_flag = 1;
5078 val64 = readq(&bar0->mac_cfg);
5079 sp->promisc_flg = 0;
5080 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5083 /* Update individual M_CAST address list */
5084 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5085 if (netdev_mc_count(dev) >
5086 (config->max_mc_addr - config->max_mac_addr)) {
5087 DBG_PRINT(ERR_DBG,
5088 "%s: No more Rx filters can be added - "
5089 "please enable ALL_MULTI instead\n",
5090 dev->name);
5091 return;
5094 prev_cnt = sp->mc_addr_count;
5095 sp->mc_addr_count = netdev_mc_count(dev);
5097 /* Clear out the previous list of Mc in the H/W. */
5098 for (i = 0; i < prev_cnt; i++) {
5099 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5100 &bar0->rmac_addr_data0_mem);
5101 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5102 &bar0->rmac_addr_data1_mem);
5103 val64 = RMAC_ADDR_CMD_MEM_WE |
5104 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5105 RMAC_ADDR_CMD_MEM_OFFSET
5106 (config->mc_start_offset + i);
5107 writeq(val64, &bar0->rmac_addr_cmd_mem);
5109 /* Wait for command completes */
5110 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5111 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5112 S2IO_BIT_RESET)) {
5113 DBG_PRINT(ERR_DBG,
5114 "%s: Adding Multicasts failed\n",
5115 dev->name);
5116 return;
5120 /* Create the new Rx filter list and update the same in H/W. */
5121 i = 0;
5122 netdev_for_each_mc_addr(ha, dev) {
5123 mac_addr = 0;
5124 for (j = 0; j < ETH_ALEN; j++) {
5125 mac_addr |= ha->addr[j];
5126 mac_addr <<= 8;
5128 mac_addr >>= 8;
5129 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5130 &bar0->rmac_addr_data0_mem);
5131 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5132 &bar0->rmac_addr_data1_mem);
5133 val64 = RMAC_ADDR_CMD_MEM_WE |
5134 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5135 RMAC_ADDR_CMD_MEM_OFFSET
5136 (i + config->mc_start_offset);
5137 writeq(val64, &bar0->rmac_addr_cmd_mem);
5139 /* Wait for command completes */
5140 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5141 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5142 S2IO_BIT_RESET)) {
5143 DBG_PRINT(ERR_DBG,
5144 "%s: Adding Multicasts failed\n",
5145 dev->name);
5146 return;
5148 i++;
5153 /* read from CAM unicast & multicast addresses and store it in
5154 * def_mac_addr structure
5156 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5158 int offset;
5159 u64 mac_addr = 0x0;
5160 struct config_param *config = &sp->config;
5162 /* store unicast & multicast mac addresses */
5163 for (offset = 0; offset < config->max_mc_addr; offset++) {
5164 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5165 /* if read fails disable the entry */
5166 if (mac_addr == FAILURE)
5167 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5168 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5172 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5173 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5175 int offset;
5176 struct config_param *config = &sp->config;
5177 /* restore unicast mac address */
5178 for (offset = 0; offset < config->max_mac_addr; offset++)
5179 do_s2io_prog_unicast(sp->dev,
5180 sp->def_mac_addr[offset].mac_addr);
5182 /* restore multicast mac address */
5183 for (offset = config->mc_start_offset;
5184 offset < config->max_mc_addr; offset++)
5185 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5188 /* add a multicast MAC address to CAM */
5189 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5191 int i;
5192 u64 mac_addr = 0;
5193 struct config_param *config = &sp->config;
5195 for (i = 0; i < ETH_ALEN; i++) {
5196 mac_addr <<= 8;
5197 mac_addr |= addr[i];
5199 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5200 return SUCCESS;
5202 /* check if the multicast mac already preset in CAM */
5203 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5204 u64 tmp64;
5205 tmp64 = do_s2io_read_unicast_mc(sp, i);
5206 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5207 break;
5209 if (tmp64 == mac_addr)
5210 return SUCCESS;
5212 if (i == config->max_mc_addr) {
5213 DBG_PRINT(ERR_DBG,
5214 "CAM full no space left for multicast MAC\n");
5215 return FAILURE;
5217 /* Update the internal structure with this new mac address */
5218 do_s2io_copy_mac_addr(sp, i, mac_addr);
5220 return do_s2io_add_mac(sp, mac_addr, i);
5223 /* add MAC address to CAM */
5224 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5226 u64 val64;
5227 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5229 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5230 &bar0->rmac_addr_data0_mem);
5232 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5233 RMAC_ADDR_CMD_MEM_OFFSET(off);
5234 writeq(val64, &bar0->rmac_addr_cmd_mem);
5236 /* Wait till command completes */
5237 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5238 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5239 S2IO_BIT_RESET)) {
5240 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5241 return FAILURE;
5243 return SUCCESS;
5245 /* deletes a specified unicast/multicast mac entry from CAM */
5246 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5248 int offset;
5249 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5250 struct config_param *config = &sp->config;
5252 for (offset = 1;
5253 offset < config->max_mc_addr; offset++) {
5254 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5255 if (tmp64 == addr) {
5256 /* disable the entry by writing 0xffffffffffffULL */
5257 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5258 return FAILURE;
5259 /* store the new mac list from CAM */
5260 do_s2io_store_unicast_mc(sp);
5261 return SUCCESS;
5264 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5265 (unsigned long long)addr);
5266 return FAILURE;
5269 /* read mac entries from CAM */
5270 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5272 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5273 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5275 /* read mac addr */
5276 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5277 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5278 writeq(val64, &bar0->rmac_addr_cmd_mem);
5280 /* Wait till command completes */
5281 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5282 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5283 S2IO_BIT_RESET)) {
5284 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5285 return FAILURE;
5287 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5289 return tmp64 >> 16;
5293 * s2io_set_mac_addr driver entry point
5296 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5298 struct sockaddr *addr = p;
5300 if (!is_valid_ether_addr(addr->sa_data))
5301 return -EINVAL;
5303 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5305 /* store the MAC address in CAM */
5306 return do_s2io_prog_unicast(dev, dev->dev_addr);
5309 * do_s2io_prog_unicast - Programs the Xframe mac address
5310 * @dev : pointer to the device structure.
5311 * @addr: a uchar pointer to the new mac address which is to be set.
5312 * Description : This procedure will program the Xframe to receive
5313 * frames with new Mac Address
5314 * Return value: SUCCESS on success and an appropriate (-)ve integer
5315 * as defined in errno.h file on failure.
5318 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5320 struct s2io_nic *sp = netdev_priv(dev);
5321 register u64 mac_addr = 0, perm_addr = 0;
5322 int i;
5323 u64 tmp64;
5324 struct config_param *config = &sp->config;
5327 * Set the new MAC address as the new unicast filter and reflect this
5328 * change on the device address registered with the OS. It will be
5329 * at offset 0.
5331 for (i = 0; i < ETH_ALEN; i++) {
5332 mac_addr <<= 8;
5333 mac_addr |= addr[i];
5334 perm_addr <<= 8;
5335 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5338 /* check if the dev_addr is different than perm_addr */
5339 if (mac_addr == perm_addr)
5340 return SUCCESS;
5342 /* check if the mac already preset in CAM */
5343 for (i = 1; i < config->max_mac_addr; i++) {
5344 tmp64 = do_s2io_read_unicast_mc(sp, i);
5345 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5346 break;
5348 if (tmp64 == mac_addr) {
5349 DBG_PRINT(INFO_DBG,
5350 "MAC addr:0x%llx already present in CAM\n",
5351 (unsigned long long)mac_addr);
5352 return SUCCESS;
5355 if (i == config->max_mac_addr) {
5356 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5357 return FAILURE;
5359 /* Update the internal structure with this new mac address */
5360 do_s2io_copy_mac_addr(sp, i, mac_addr);
5362 return do_s2io_add_mac(sp, mac_addr, i);
5366 * s2io_ethtool_sset - Sets different link parameters.
5367 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5368 * @info: pointer to the structure with parameters given by ethtool to set
5369 * link information.
5370 * Description:
5371 * The function sets different link parameters provided by the user onto
5372 * the NIC.
5373 * Return value:
5374 * 0 on success.
5377 static int s2io_ethtool_sset(struct net_device *dev,
5378 struct ethtool_cmd *info)
5380 struct s2io_nic *sp = netdev_priv(dev);
5381 if ((info->autoneg == AUTONEG_ENABLE) ||
5382 (info->speed != SPEED_10000) ||
5383 (info->duplex != DUPLEX_FULL))
5384 return -EINVAL;
5385 else {
5386 s2io_close(sp->dev);
5387 s2io_open(sp->dev);
5390 return 0;
5394 * s2io_ethtol_gset - Return link specific information.
5395 * @sp : private member of the device structure, pointer to the
5396 * s2io_nic structure.
5397 * @info : pointer to the structure with parameters given by ethtool
5398 * to return link information.
5399 * Description:
5400 * Returns link specific information like speed, duplex etc.. to ethtool.
5401 * Return value :
5402 * return 0 on success.
5405 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5407 struct s2io_nic *sp = netdev_priv(dev);
5408 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5409 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5410 info->port = PORT_FIBRE;
5412 /* info->transceiver */
5413 info->transceiver = XCVR_EXTERNAL;
5415 if (netif_carrier_ok(sp->dev)) {
5416 info->speed = 10000;
5417 info->duplex = DUPLEX_FULL;
5418 } else {
5419 info->speed = -1;
5420 info->duplex = -1;
5423 info->autoneg = AUTONEG_DISABLE;
5424 return 0;
5428 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5429 * @sp : private member of the device structure, which is a pointer to the
5430 * s2io_nic structure.
5431 * @info : pointer to the structure with parameters given by ethtool to
5432 * return driver information.
5433 * Description:
5434 * Returns driver specefic information like name, version etc.. to ethtool.
5435 * Return value:
5436 * void
5439 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5440 struct ethtool_drvinfo *info)
5442 struct s2io_nic *sp = netdev_priv(dev);
5444 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5445 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5446 strncpy(info->fw_version, "", sizeof(info->fw_version));
5447 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5448 info->regdump_len = XENA_REG_SPACE;
5449 info->eedump_len = XENA_EEPROM_SPACE;
5453 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5454 * @sp: private member of the device structure, which is a pointer to the
5455 * s2io_nic structure.
5456 * @regs : pointer to the structure with parameters given by ethtool for
5457 * dumping the registers.
5458 * @reg_space: The input argumnet into which all the registers are dumped.
5459 * Description:
5460 * Dumps the entire register space of xFrame NIC into the user given
5461 * buffer area.
5462 * Return value :
5463 * void .
5466 static void s2io_ethtool_gregs(struct net_device *dev,
5467 struct ethtool_regs *regs, void *space)
5469 int i;
5470 u64 reg;
5471 u8 *reg_space = (u8 *)space;
5472 struct s2io_nic *sp = netdev_priv(dev);
5474 regs->len = XENA_REG_SPACE;
5475 regs->version = sp->pdev->subsystem_device;
5477 for (i = 0; i < regs->len; i += 8) {
5478 reg = readq(sp->bar0 + i);
5479 memcpy((reg_space + i), &reg, 8);
5484 * s2io_phy_id - timer function that alternates adapter LED.
5485 * @data : address of the private member of the device structure, which
5486 * is a pointer to the s2io_nic structure, provided as an u32.
5487 * Description: This is actually the timer function that alternates the
5488 * adapter LED bit of the adapter control bit to set/reset every time on
5489 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5490 * once every second.
5492 static void s2io_phy_id(unsigned long data)
5494 struct s2io_nic *sp = (struct s2io_nic *)data;
5495 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5496 u64 val64 = 0;
5497 u16 subid;
5499 subid = sp->pdev->subsystem_device;
5500 if ((sp->device_type == XFRAME_II_DEVICE) ||
5501 ((subid & 0xFF) >= 0x07)) {
5502 val64 = readq(&bar0->gpio_control);
5503 val64 ^= GPIO_CTRL_GPIO_0;
5504 writeq(val64, &bar0->gpio_control);
5505 } else {
5506 val64 = readq(&bar0->adapter_control);
5507 val64 ^= ADAPTER_LED_ON;
5508 writeq(val64, &bar0->adapter_control);
5511 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5515 * s2io_ethtool_idnic - To physically identify the nic on the system.
5516 * @sp : private member of the device structure, which is a pointer to the
5517 * s2io_nic structure.
5518 * @id : pointer to the structure with identification parameters given by
5519 * ethtool.
5520 * Description: Used to physically identify the NIC on the system.
5521 * The Link LED will blink for a time specified by the user for
5522 * identification.
5523 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5524 * identification is possible only if it's link is up.
5525 * Return value:
5526 * int , returns 0 on success
5529 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5531 u64 val64 = 0, last_gpio_ctrl_val;
5532 struct s2io_nic *sp = netdev_priv(dev);
5533 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5534 u16 subid;
5536 subid = sp->pdev->subsystem_device;
5537 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5538 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5539 val64 = readq(&bar0->adapter_control);
5540 if (!(val64 & ADAPTER_CNTL_EN)) {
5541 pr_err("Adapter Link down, cannot blink LED\n");
5542 return -EFAULT;
5545 if (sp->id_timer.function == NULL) {
5546 init_timer(&sp->id_timer);
5547 sp->id_timer.function = s2io_phy_id;
5548 sp->id_timer.data = (unsigned long)sp;
5550 mod_timer(&sp->id_timer, jiffies);
5551 if (data)
5552 msleep_interruptible(data * HZ);
5553 else
5554 msleep_interruptible(MAX_FLICKER_TIME);
5555 del_timer_sync(&sp->id_timer);
5557 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5558 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5559 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5562 return 0;
5565 static void s2io_ethtool_gringparam(struct net_device *dev,
5566 struct ethtool_ringparam *ering)
5568 struct s2io_nic *sp = netdev_priv(dev);
5569 int i, tx_desc_count = 0, rx_desc_count = 0;
5571 if (sp->rxd_mode == RXD_MODE_1)
5572 ering->rx_max_pending = MAX_RX_DESC_1;
5573 else if (sp->rxd_mode == RXD_MODE_3B)
5574 ering->rx_max_pending = MAX_RX_DESC_2;
5576 ering->tx_max_pending = MAX_TX_DESC;
5577 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5578 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5580 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5581 ering->tx_pending = tx_desc_count;
5582 rx_desc_count = 0;
5583 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5584 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5586 ering->rx_pending = rx_desc_count;
5588 ering->rx_mini_max_pending = 0;
5589 ering->rx_mini_pending = 0;
5590 if (sp->rxd_mode == RXD_MODE_1)
5591 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5592 else if (sp->rxd_mode == RXD_MODE_3B)
5593 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5594 ering->rx_jumbo_pending = rx_desc_count;
5598 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5599 * @sp : private member of the device structure, which is a pointer to the
5600 * s2io_nic structure.
5601 * @ep : pointer to the structure with pause parameters given by ethtool.
5602 * Description:
5603 * Returns the Pause frame generation and reception capability of the NIC.
5604 * Return value:
5605 * void
5607 static void s2io_ethtool_getpause_data(struct net_device *dev,
5608 struct ethtool_pauseparam *ep)
5610 u64 val64;
5611 struct s2io_nic *sp = netdev_priv(dev);
5612 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5614 val64 = readq(&bar0->rmac_pause_cfg);
5615 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5616 ep->tx_pause = true;
5617 if (val64 & RMAC_PAUSE_RX_ENABLE)
5618 ep->rx_pause = true;
5619 ep->autoneg = false;
5623 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5624 * @sp : private member of the device structure, which is a pointer to the
5625 * s2io_nic structure.
5626 * @ep : pointer to the structure with pause parameters given by ethtool.
5627 * Description:
5628 * It can be used to set or reset Pause frame generation or reception
5629 * support of the NIC.
5630 * Return value:
5631 * int, returns 0 on Success
5634 static int s2io_ethtool_setpause_data(struct net_device *dev,
5635 struct ethtool_pauseparam *ep)
5637 u64 val64;
5638 struct s2io_nic *sp = netdev_priv(dev);
5639 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5641 val64 = readq(&bar0->rmac_pause_cfg);
5642 if (ep->tx_pause)
5643 val64 |= RMAC_PAUSE_GEN_ENABLE;
5644 else
5645 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5646 if (ep->rx_pause)
5647 val64 |= RMAC_PAUSE_RX_ENABLE;
5648 else
5649 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5650 writeq(val64, &bar0->rmac_pause_cfg);
5651 return 0;
5655 * read_eeprom - reads 4 bytes of data from user given offset.
5656 * @sp : private member of the device structure, which is a pointer to the
5657 * s2io_nic structure.
5658 * @off : offset at which the data must be written
5659 * @data : Its an output parameter where the data read at the given
5660 * offset is stored.
5661 * Description:
5662 * Will read 4 bytes of data from the user given offset and return the
5663 * read data.
5664 * NOTE: Will allow to read only part of the EEPROM visible through the
5665 * I2C bus.
5666 * Return value:
5667 * -1 on failure and 0 on success.
5670 #define S2IO_DEV_ID 5
5671 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5673 int ret = -1;
5674 u32 exit_cnt = 0;
5675 u64 val64;
5676 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5678 if (sp->device_type == XFRAME_I_DEVICE) {
5679 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5680 I2C_CONTROL_ADDR(off) |
5681 I2C_CONTROL_BYTE_CNT(0x3) |
5682 I2C_CONTROL_READ |
5683 I2C_CONTROL_CNTL_START;
5684 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5686 while (exit_cnt < 5) {
5687 val64 = readq(&bar0->i2c_control);
5688 if (I2C_CONTROL_CNTL_END(val64)) {
5689 *data = I2C_CONTROL_GET_DATA(val64);
5690 ret = 0;
5691 break;
5693 msleep(50);
5694 exit_cnt++;
5698 if (sp->device_type == XFRAME_II_DEVICE) {
5699 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5700 SPI_CONTROL_BYTECNT(0x3) |
5701 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5702 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5703 val64 |= SPI_CONTROL_REQ;
5704 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5705 while (exit_cnt < 5) {
5706 val64 = readq(&bar0->spi_control);
5707 if (val64 & SPI_CONTROL_NACK) {
5708 ret = 1;
5709 break;
5710 } else if (val64 & SPI_CONTROL_DONE) {
5711 *data = readq(&bar0->spi_data);
5712 *data &= 0xffffff;
5713 ret = 0;
5714 break;
5716 msleep(50);
5717 exit_cnt++;
5720 return ret;
5724 * write_eeprom - actually writes the relevant part of the data value.
5725 * @sp : private member of the device structure, which is a pointer to the
5726 * s2io_nic structure.
5727 * @off : offset at which the data must be written
5728 * @data : The data that is to be written
5729 * @cnt : Number of bytes of the data that are actually to be written into
5730 * the Eeprom. (max of 3)
5731 * Description:
5732 * Actually writes the relevant part of the data value into the Eeprom
5733 * through the I2C bus.
5734 * Return value:
5735 * 0 on success, -1 on failure.
5738 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5740 int exit_cnt = 0, ret = -1;
5741 u64 val64;
5742 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5744 if (sp->device_type == XFRAME_I_DEVICE) {
5745 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5746 I2C_CONTROL_ADDR(off) |
5747 I2C_CONTROL_BYTE_CNT(cnt) |
5748 I2C_CONTROL_SET_DATA((u32)data) |
5749 I2C_CONTROL_CNTL_START;
5750 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5752 while (exit_cnt < 5) {
5753 val64 = readq(&bar0->i2c_control);
5754 if (I2C_CONTROL_CNTL_END(val64)) {
5755 if (!(val64 & I2C_CONTROL_NACK))
5756 ret = 0;
5757 break;
5759 msleep(50);
5760 exit_cnt++;
5764 if (sp->device_type == XFRAME_II_DEVICE) {
5765 int write_cnt = (cnt == 8) ? 0 : cnt;
5766 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5768 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5769 SPI_CONTROL_BYTECNT(write_cnt) |
5770 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5771 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5772 val64 |= SPI_CONTROL_REQ;
5773 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5774 while (exit_cnt < 5) {
5775 val64 = readq(&bar0->spi_control);
5776 if (val64 & SPI_CONTROL_NACK) {
5777 ret = 1;
5778 break;
5779 } else if (val64 & SPI_CONTROL_DONE) {
5780 ret = 0;
5781 break;
5783 msleep(50);
5784 exit_cnt++;
5787 return ret;
5789 static void s2io_vpd_read(struct s2io_nic *nic)
5791 u8 *vpd_data;
5792 u8 data;
5793 int i = 0, cnt, len, fail = 0;
5794 int vpd_addr = 0x80;
5795 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5797 if (nic->device_type == XFRAME_II_DEVICE) {
5798 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5799 vpd_addr = 0x80;
5800 } else {
5801 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5802 vpd_addr = 0x50;
5804 strcpy(nic->serial_num, "NOT AVAILABLE");
5806 vpd_data = kmalloc(256, GFP_KERNEL);
5807 if (!vpd_data) {
5808 swstats->mem_alloc_fail_cnt++;
5809 return;
5811 swstats->mem_allocated += 256;
5813 for (i = 0; i < 256; i += 4) {
5814 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5815 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5816 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5817 for (cnt = 0; cnt < 5; cnt++) {
5818 msleep(2);
5819 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5820 if (data == 0x80)
5821 break;
5823 if (cnt >= 5) {
5824 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5825 fail = 1;
5826 break;
5828 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5829 (u32 *)&vpd_data[i]);
5832 if (!fail) {
5833 /* read serial number of adapter */
5834 for (cnt = 0; cnt < 252; cnt++) {
5835 if ((vpd_data[cnt] == 'S') &&
5836 (vpd_data[cnt+1] == 'N')) {
5837 len = vpd_data[cnt+2];
5838 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5839 memcpy(nic->serial_num,
5840 &vpd_data[cnt + 3],
5841 len);
5842 memset(nic->serial_num+len,
5844 VPD_STRING_LEN-len);
5845 break;
5851 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5852 len = vpd_data[1];
5853 memcpy(nic->product_name, &vpd_data[3], len);
5854 nic->product_name[len] = 0;
5856 kfree(vpd_data);
5857 swstats->mem_freed += 256;
5861 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5862 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5863 * @eeprom : pointer to the user level structure provided by ethtool,
5864 * containing all relevant information.
5865 * @data_buf : user defined value to be written into Eeprom.
5866 * Description: Reads the values stored in the Eeprom at given offset
5867 * for a given length. Stores these values int the input argument data
5868 * buffer 'data_buf' and returns these to the caller (ethtool.)
5869 * Return value:
5870 * int 0 on success
5873 static int s2io_ethtool_geeprom(struct net_device *dev,
5874 struct ethtool_eeprom *eeprom, u8 * data_buf)
5876 u32 i, valid;
5877 u64 data;
5878 struct s2io_nic *sp = netdev_priv(dev);
5880 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5882 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5883 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5885 for (i = 0; i < eeprom->len; i += 4) {
5886 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5887 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5888 return -EFAULT;
5890 valid = INV(data);
5891 memcpy((data_buf + i), &valid, 4);
5893 return 0;
5897 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5898 * @sp : private member of the device structure, which is a pointer to the
5899 * s2io_nic structure.
5900 * @eeprom : pointer to the user level structure provided by ethtool,
5901 * containing all relevant information.
5902 * @data_buf ; user defined value to be written into Eeprom.
5903 * Description:
5904 * Tries to write the user provided value in the Eeprom, at the offset
5905 * given by the user.
5906 * Return value:
5907 * 0 on success, -EFAULT on failure.
5910 static int s2io_ethtool_seeprom(struct net_device *dev,
5911 struct ethtool_eeprom *eeprom,
5912 u8 *data_buf)
5914 int len = eeprom->len, cnt = 0;
5915 u64 valid = 0, data;
5916 struct s2io_nic *sp = netdev_priv(dev);
5918 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5919 DBG_PRINT(ERR_DBG,
5920 "ETHTOOL_WRITE_EEPROM Err: "
5921 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5922 (sp->pdev->vendor | (sp->pdev->device << 16)),
5923 eeprom->magic);
5924 return -EFAULT;
5927 while (len) {
5928 data = (u32)data_buf[cnt] & 0x000000FF;
5929 if (data)
5930 valid = (u32)(data << 24);
5931 else
5932 valid = data;
5934 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5935 DBG_PRINT(ERR_DBG,
5936 "ETHTOOL_WRITE_EEPROM Err: "
5937 "Cannot write into the specified offset\n");
5938 return -EFAULT;
5940 cnt++;
5941 len--;
5944 return 0;
5948 * s2io_register_test - reads and writes into all clock domains.
5949 * @sp : private member of the device structure, which is a pointer to the
5950 * s2io_nic structure.
5951 * @data : variable that returns the result of each of the test conducted b
5952 * by the driver.
5953 * Description:
5954 * Read and write into all clock domains. The NIC has 3 clock domains,
5955 * see that registers in all the three regions are accessible.
5956 * Return value:
5957 * 0 on success.
5960 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5962 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5963 u64 val64 = 0, exp_val;
5964 int fail = 0;
5966 val64 = readq(&bar0->pif_rd_swapper_fb);
5967 if (val64 != 0x123456789abcdefULL) {
5968 fail = 1;
5969 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5972 val64 = readq(&bar0->rmac_pause_cfg);
5973 if (val64 != 0xc000ffff00000000ULL) {
5974 fail = 1;
5975 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5978 val64 = readq(&bar0->rx_queue_cfg);
5979 if (sp->device_type == XFRAME_II_DEVICE)
5980 exp_val = 0x0404040404040404ULL;
5981 else
5982 exp_val = 0x0808080808080808ULL;
5983 if (val64 != exp_val) {
5984 fail = 1;
5985 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5988 val64 = readq(&bar0->xgxs_efifo_cfg);
5989 if (val64 != 0x000000001923141EULL) {
5990 fail = 1;
5991 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5994 val64 = 0x5A5A5A5A5A5A5A5AULL;
5995 writeq(val64, &bar0->xmsi_data);
5996 val64 = readq(&bar0->xmsi_data);
5997 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5998 fail = 1;
5999 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
6002 val64 = 0xA5A5A5A5A5A5A5A5ULL;
6003 writeq(val64, &bar0->xmsi_data);
6004 val64 = readq(&bar0->xmsi_data);
6005 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6006 fail = 1;
6007 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
6010 *data = fail;
6011 return fail;
6015 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
6016 * @sp : private member of the device structure, which is a pointer to the
6017 * s2io_nic structure.
6018 * @data:variable that returns the result of each of the test conducted by
6019 * the driver.
6020 * Description:
6021 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
6022 * register.
6023 * Return value:
6024 * 0 on success.
6027 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
6029 int fail = 0;
6030 u64 ret_data, org_4F0, org_7F0;
6031 u8 saved_4F0 = 0, saved_7F0 = 0;
6032 struct net_device *dev = sp->dev;
6034 /* Test Write Error at offset 0 */
6035 /* Note that SPI interface allows write access to all areas
6036 * of EEPROM. Hence doing all negative testing only for Xframe I.
6038 if (sp->device_type == XFRAME_I_DEVICE)
6039 if (!write_eeprom(sp, 0, 0, 3))
6040 fail = 1;
6042 /* Save current values at offsets 0x4F0 and 0x7F0 */
6043 if (!read_eeprom(sp, 0x4F0, &org_4F0))
6044 saved_4F0 = 1;
6045 if (!read_eeprom(sp, 0x7F0, &org_7F0))
6046 saved_7F0 = 1;
6048 /* Test Write at offset 4f0 */
6049 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6050 fail = 1;
6051 if (read_eeprom(sp, 0x4F0, &ret_data))
6052 fail = 1;
6054 if (ret_data != 0x012345) {
6055 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6056 "Data written %llx Data read %llx\n",
6057 dev->name, (unsigned long long)0x12345,
6058 (unsigned long long)ret_data);
6059 fail = 1;
6062 /* Reset the EEPROM data go FFFF */
6063 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6065 /* Test Write Request Error at offset 0x7c */
6066 if (sp->device_type == XFRAME_I_DEVICE)
6067 if (!write_eeprom(sp, 0x07C, 0, 3))
6068 fail = 1;
6070 /* Test Write Request at offset 0x7f0 */
6071 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6072 fail = 1;
6073 if (read_eeprom(sp, 0x7F0, &ret_data))
6074 fail = 1;
6076 if (ret_data != 0x012345) {
6077 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6078 "Data written %llx Data read %llx\n",
6079 dev->name, (unsigned long long)0x12345,
6080 (unsigned long long)ret_data);
6081 fail = 1;
6084 /* Reset the EEPROM data go FFFF */
6085 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6087 if (sp->device_type == XFRAME_I_DEVICE) {
6088 /* Test Write Error at offset 0x80 */
6089 if (!write_eeprom(sp, 0x080, 0, 3))
6090 fail = 1;
6092 /* Test Write Error at offset 0xfc */
6093 if (!write_eeprom(sp, 0x0FC, 0, 3))
6094 fail = 1;
6096 /* Test Write Error at offset 0x100 */
6097 if (!write_eeprom(sp, 0x100, 0, 3))
6098 fail = 1;
6100 /* Test Write Error at offset 4ec */
6101 if (!write_eeprom(sp, 0x4EC, 0, 3))
6102 fail = 1;
6105 /* Restore values at offsets 0x4F0 and 0x7F0 */
6106 if (saved_4F0)
6107 write_eeprom(sp, 0x4F0, org_4F0, 3);
6108 if (saved_7F0)
6109 write_eeprom(sp, 0x7F0, org_7F0, 3);
6111 *data = fail;
6112 return fail;
6116 * s2io_bist_test - invokes the MemBist test of the card .
6117 * @sp : private member of the device structure, which is a pointer to the
6118 * s2io_nic structure.
6119 * @data:variable that returns the result of each of the test conducted by
6120 * the driver.
6121 * Description:
6122 * This invokes the MemBist test of the card. We give around
6123 * 2 secs time for the Test to complete. If it's still not complete
6124 * within this peiod, we consider that the test failed.
6125 * Return value:
6126 * 0 on success and -1 on failure.
6129 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6131 u8 bist = 0;
6132 int cnt = 0, ret = -1;
6134 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6135 bist |= PCI_BIST_START;
6136 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6138 while (cnt < 20) {
6139 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6140 if (!(bist & PCI_BIST_START)) {
6141 *data = (bist & PCI_BIST_CODE_MASK);
6142 ret = 0;
6143 break;
6145 msleep(100);
6146 cnt++;
6149 return ret;
6153 * s2io-link_test - verifies the link state of the nic
6154 * @sp ; private member of the device structure, which is a pointer to the
6155 * s2io_nic structure.
6156 * @data: variable that returns the result of each of the test conducted by
6157 * the driver.
6158 * Description:
6159 * The function verifies the link state of the NIC and updates the input
6160 * argument 'data' appropriately.
6161 * Return value:
6162 * 0 on success.
6165 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6167 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6168 u64 val64;
6170 val64 = readq(&bar0->adapter_status);
6171 if (!(LINK_IS_UP(val64)))
6172 *data = 1;
6173 else
6174 *data = 0;
6176 return *data;
6180 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6181 * @sp - private member of the device structure, which is a pointer to the
6182 * s2io_nic structure.
6183 * @data - variable that returns the result of each of the test
6184 * conducted by the driver.
6185 * Description:
6186 * This is one of the offline test that tests the read and write
6187 * access to the RldRam chip on the NIC.
6188 * Return value:
6189 * 0 on success.
6192 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6195 u64 val64;
6196 int cnt, iteration = 0, test_fail = 0;
6198 val64 = readq(&bar0->adapter_control);
6199 val64 &= ~ADAPTER_ECC_EN;
6200 writeq(val64, &bar0->adapter_control);
6202 val64 = readq(&bar0->mc_rldram_test_ctrl);
6203 val64 |= MC_RLDRAM_TEST_MODE;
6204 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6206 val64 = readq(&bar0->mc_rldram_mrs);
6207 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6208 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6210 val64 |= MC_RLDRAM_MRS_ENABLE;
6211 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6213 while (iteration < 2) {
6214 val64 = 0x55555555aaaa0000ULL;
6215 if (iteration == 1)
6216 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6217 writeq(val64, &bar0->mc_rldram_test_d0);
6219 val64 = 0xaaaa5a5555550000ULL;
6220 if (iteration == 1)
6221 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6222 writeq(val64, &bar0->mc_rldram_test_d1);
6224 val64 = 0x55aaaaaaaa5a0000ULL;
6225 if (iteration == 1)
6226 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6227 writeq(val64, &bar0->mc_rldram_test_d2);
6229 val64 = (u64) (0x0000003ffffe0100ULL);
6230 writeq(val64, &bar0->mc_rldram_test_add);
6232 val64 = MC_RLDRAM_TEST_MODE |
6233 MC_RLDRAM_TEST_WRITE |
6234 MC_RLDRAM_TEST_GO;
6235 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6237 for (cnt = 0; cnt < 5; cnt++) {
6238 val64 = readq(&bar0->mc_rldram_test_ctrl);
6239 if (val64 & MC_RLDRAM_TEST_DONE)
6240 break;
6241 msleep(200);
6244 if (cnt == 5)
6245 break;
6247 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6248 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6250 for (cnt = 0; cnt < 5; cnt++) {
6251 val64 = readq(&bar0->mc_rldram_test_ctrl);
6252 if (val64 & MC_RLDRAM_TEST_DONE)
6253 break;
6254 msleep(500);
6257 if (cnt == 5)
6258 break;
6260 val64 = readq(&bar0->mc_rldram_test_ctrl);
6261 if (!(val64 & MC_RLDRAM_TEST_PASS))
6262 test_fail = 1;
6264 iteration++;
6267 *data = test_fail;
6269 /* Bring the adapter out of test mode */
6270 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6272 return test_fail;
6276 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6277 * @sp : private member of the device structure, which is a pointer to the
6278 * s2io_nic structure.
6279 * @ethtest : pointer to a ethtool command specific structure that will be
6280 * returned to the user.
6281 * @data : variable that returns the result of each of the test
6282 * conducted by the driver.
6283 * Description:
6284 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6285 * the health of the card.
6286 * Return value:
6287 * void
6290 static void s2io_ethtool_test(struct net_device *dev,
6291 struct ethtool_test *ethtest,
6292 uint64_t *data)
6294 struct s2io_nic *sp = netdev_priv(dev);
6295 int orig_state = netif_running(sp->dev);
6297 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6298 /* Offline Tests. */
6299 if (orig_state)
6300 s2io_close(sp->dev);
6302 if (s2io_register_test(sp, &data[0]))
6303 ethtest->flags |= ETH_TEST_FL_FAILED;
6305 s2io_reset(sp);
6307 if (s2io_rldram_test(sp, &data[3]))
6308 ethtest->flags |= ETH_TEST_FL_FAILED;
6310 s2io_reset(sp);
6312 if (s2io_eeprom_test(sp, &data[1]))
6313 ethtest->flags |= ETH_TEST_FL_FAILED;
6315 if (s2io_bist_test(sp, &data[4]))
6316 ethtest->flags |= ETH_TEST_FL_FAILED;
6318 if (orig_state)
6319 s2io_open(sp->dev);
6321 data[2] = 0;
6322 } else {
6323 /* Online Tests. */
6324 if (!orig_state) {
6325 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6326 dev->name);
6327 data[0] = -1;
6328 data[1] = -1;
6329 data[2] = -1;
6330 data[3] = -1;
6331 data[4] = -1;
6334 if (s2io_link_test(sp, &data[2]))
6335 ethtest->flags |= ETH_TEST_FL_FAILED;
6337 data[0] = 0;
6338 data[1] = 0;
6339 data[3] = 0;
6340 data[4] = 0;
6344 static void s2io_get_ethtool_stats(struct net_device *dev,
6345 struct ethtool_stats *estats,
6346 u64 *tmp_stats)
6348 int i = 0, k;
6349 struct s2io_nic *sp = netdev_priv(dev);
6350 struct stat_block *stats = sp->mac_control.stats_info;
6351 struct swStat *swstats = &stats->sw_stat;
6352 struct xpakStat *xstats = &stats->xpak_stat;
6354 s2io_updt_stats(sp);
6355 tmp_stats[i++] =
6356 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6357 le32_to_cpu(stats->tmac_frms);
6358 tmp_stats[i++] =
6359 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6360 le32_to_cpu(stats->tmac_data_octets);
6361 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6362 tmp_stats[i++] =
6363 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6364 le32_to_cpu(stats->tmac_mcst_frms);
6365 tmp_stats[i++] =
6366 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6367 le32_to_cpu(stats->tmac_bcst_frms);
6368 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6369 tmp_stats[i++] =
6370 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6371 le32_to_cpu(stats->tmac_ttl_octets);
6372 tmp_stats[i++] =
6373 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6374 le32_to_cpu(stats->tmac_ucst_frms);
6375 tmp_stats[i++] =
6376 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6377 le32_to_cpu(stats->tmac_nucst_frms);
6378 tmp_stats[i++] =
6379 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6380 le32_to_cpu(stats->tmac_any_err_frms);
6381 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6382 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6383 tmp_stats[i++] =
6384 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6385 le32_to_cpu(stats->tmac_vld_ip);
6386 tmp_stats[i++] =
6387 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6388 le32_to_cpu(stats->tmac_drop_ip);
6389 tmp_stats[i++] =
6390 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6391 le32_to_cpu(stats->tmac_icmp);
6392 tmp_stats[i++] =
6393 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6394 le32_to_cpu(stats->tmac_rst_tcp);
6395 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6396 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6397 le32_to_cpu(stats->tmac_udp);
6398 tmp_stats[i++] =
6399 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6400 le32_to_cpu(stats->rmac_vld_frms);
6401 tmp_stats[i++] =
6402 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6403 le32_to_cpu(stats->rmac_data_octets);
6404 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6405 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6406 tmp_stats[i++] =
6407 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6408 le32_to_cpu(stats->rmac_vld_mcst_frms);
6409 tmp_stats[i++] =
6410 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6411 le32_to_cpu(stats->rmac_vld_bcst_frms);
6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6413 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6414 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6415 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6416 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6417 tmp_stats[i++] =
6418 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6419 le32_to_cpu(stats->rmac_ttl_octets);
6420 tmp_stats[i++] =
6421 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6422 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6423 tmp_stats[i++] =
6424 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6425 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6426 tmp_stats[i++] =
6427 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6428 le32_to_cpu(stats->rmac_discarded_frms);
6429 tmp_stats[i++] =
6430 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6431 << 32 | le32_to_cpu(stats->rmac_drop_events);
6432 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6433 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6434 tmp_stats[i++] =
6435 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6436 le32_to_cpu(stats->rmac_usized_frms);
6437 tmp_stats[i++] =
6438 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6439 le32_to_cpu(stats->rmac_osized_frms);
6440 tmp_stats[i++] =
6441 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6442 le32_to_cpu(stats->rmac_frag_frms);
6443 tmp_stats[i++] =
6444 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6445 le32_to_cpu(stats->rmac_jabber_frms);
6446 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6447 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6448 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6449 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6450 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6451 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6452 tmp_stats[i++] =
6453 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6454 le32_to_cpu(stats->rmac_ip);
6455 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6456 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6457 tmp_stats[i++] =
6458 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6459 le32_to_cpu(stats->rmac_drop_ip);
6460 tmp_stats[i++] =
6461 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6462 le32_to_cpu(stats->rmac_icmp);
6463 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6464 tmp_stats[i++] =
6465 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6466 le32_to_cpu(stats->rmac_udp);
6467 tmp_stats[i++] =
6468 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6469 le32_to_cpu(stats->rmac_err_drp_udp);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6471 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6472 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6473 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6474 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6475 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6476 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6477 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6478 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6479 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6480 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6481 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6482 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6483 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6484 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6485 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6486 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6487 tmp_stats[i++] =
6488 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6489 le32_to_cpu(stats->rmac_pause_cnt);
6490 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6491 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6492 tmp_stats[i++] =
6493 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6494 le32_to_cpu(stats->rmac_accepted_ip);
6495 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6496 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6497 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6498 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6499 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6500 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6501 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6502 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6503 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6504 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6505 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6506 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6507 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6508 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6509 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6510 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6511 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6512 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6513 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6515 /* Enhanced statistics exist only for Hercules */
6516 if (sp->device_type == XFRAME_II_DEVICE) {
6517 tmp_stats[i++] =
6518 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6519 tmp_stats[i++] =
6520 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6521 tmp_stats[i++] =
6522 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6523 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6524 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6525 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6526 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6527 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6528 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6529 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6530 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6531 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6532 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6533 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6534 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6535 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6538 tmp_stats[i++] = 0;
6539 tmp_stats[i++] = swstats->single_ecc_errs;
6540 tmp_stats[i++] = swstats->double_ecc_errs;
6541 tmp_stats[i++] = swstats->parity_err_cnt;
6542 tmp_stats[i++] = swstats->serious_err_cnt;
6543 tmp_stats[i++] = swstats->soft_reset_cnt;
6544 tmp_stats[i++] = swstats->fifo_full_cnt;
6545 for (k = 0; k < MAX_RX_RINGS; k++)
6546 tmp_stats[i++] = swstats->ring_full_cnt[k];
6547 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6548 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6549 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6550 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6551 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6552 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6553 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6554 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6555 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6556 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6557 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6558 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6559 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6560 tmp_stats[i++] = swstats->sending_both;
6561 tmp_stats[i++] = swstats->outof_sequence_pkts;
6562 tmp_stats[i++] = swstats->flush_max_pkts;
6563 if (swstats->num_aggregations) {
6564 u64 tmp = swstats->sum_avg_pkts_aggregated;
6565 int count = 0;
6567 * Since 64-bit divide does not work on all platforms,
6568 * do repeated subtraction.
6570 while (tmp >= swstats->num_aggregations) {
6571 tmp -= swstats->num_aggregations;
6572 count++;
6574 tmp_stats[i++] = count;
6575 } else
6576 tmp_stats[i++] = 0;
6577 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6578 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6579 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6580 tmp_stats[i++] = swstats->mem_allocated;
6581 tmp_stats[i++] = swstats->mem_freed;
6582 tmp_stats[i++] = swstats->link_up_cnt;
6583 tmp_stats[i++] = swstats->link_down_cnt;
6584 tmp_stats[i++] = swstats->link_up_time;
6585 tmp_stats[i++] = swstats->link_down_time;
6587 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6588 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6589 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6590 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6591 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6593 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6594 tmp_stats[i++] = swstats->rx_abort_cnt;
6595 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6596 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6597 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6598 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6599 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6600 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6601 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6602 tmp_stats[i++] = swstats->tda_err_cnt;
6603 tmp_stats[i++] = swstats->pfc_err_cnt;
6604 tmp_stats[i++] = swstats->pcc_err_cnt;
6605 tmp_stats[i++] = swstats->tti_err_cnt;
6606 tmp_stats[i++] = swstats->tpa_err_cnt;
6607 tmp_stats[i++] = swstats->sm_err_cnt;
6608 tmp_stats[i++] = swstats->lso_err_cnt;
6609 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6610 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6611 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6612 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6613 tmp_stats[i++] = swstats->rc_err_cnt;
6614 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6615 tmp_stats[i++] = swstats->rpa_err_cnt;
6616 tmp_stats[i++] = swstats->rda_err_cnt;
6617 tmp_stats[i++] = swstats->rti_err_cnt;
6618 tmp_stats[i++] = swstats->mc_err_cnt;
6621 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6623 return XENA_REG_SPACE;
6627 static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
6629 struct s2io_nic *sp = netdev_priv(dev);
6631 return sp->rx_csum;
6634 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6636 struct s2io_nic *sp = netdev_priv(dev);
6638 if (data)
6639 sp->rx_csum = 1;
6640 else
6641 sp->rx_csum = 0;
6643 return 0;
6646 static int s2io_get_eeprom_len(struct net_device *dev)
6648 return XENA_EEPROM_SPACE;
6651 static int s2io_get_sset_count(struct net_device *dev, int sset)
6653 struct s2io_nic *sp = netdev_priv(dev);
6655 switch (sset) {
6656 case ETH_SS_TEST:
6657 return S2IO_TEST_LEN;
6658 case ETH_SS_STATS:
6659 switch (sp->device_type) {
6660 case XFRAME_I_DEVICE:
6661 return XFRAME_I_STAT_LEN;
6662 case XFRAME_II_DEVICE:
6663 return XFRAME_II_STAT_LEN;
6664 default:
6665 return 0;
6667 default:
6668 return -EOPNOTSUPP;
6672 static void s2io_ethtool_get_strings(struct net_device *dev,
6673 u32 stringset, u8 *data)
6675 int stat_size = 0;
6676 struct s2io_nic *sp = netdev_priv(dev);
6678 switch (stringset) {
6679 case ETH_SS_TEST:
6680 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6681 break;
6682 case ETH_SS_STATS:
6683 stat_size = sizeof(ethtool_xena_stats_keys);
6684 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6685 if (sp->device_type == XFRAME_II_DEVICE) {
6686 memcpy(data + stat_size,
6687 &ethtool_enhanced_stats_keys,
6688 sizeof(ethtool_enhanced_stats_keys));
6689 stat_size += sizeof(ethtool_enhanced_stats_keys);
6692 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6693 sizeof(ethtool_driver_stats_keys));
6697 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6699 if (data)
6700 dev->features |= NETIF_F_IP_CSUM;
6701 else
6702 dev->features &= ~NETIF_F_IP_CSUM;
6704 return 0;
6707 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6709 return (dev->features & NETIF_F_TSO) != 0;
6712 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6714 if (data)
6715 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6716 else
6717 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6719 return 0;
6722 static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6724 struct s2io_nic *sp = netdev_priv(dev);
6725 int rc = 0;
6726 int changed = 0;
6728 if (data & ~ETH_FLAG_LRO)
6729 return -EINVAL;
6731 if (data & ETH_FLAG_LRO) {
6732 if (!(dev->features & NETIF_F_LRO)) {
6733 dev->features |= NETIF_F_LRO;
6734 changed = 1;
6736 } else if (dev->features & NETIF_F_LRO) {
6737 dev->features &= ~NETIF_F_LRO;
6738 changed = 1;
6741 if (changed && netif_running(dev)) {
6742 s2io_stop_all_tx_queue(sp);
6743 s2io_card_down(sp);
6744 rc = s2io_card_up(sp);
6745 if (rc)
6746 s2io_reset(sp);
6747 else
6748 s2io_start_all_tx_queue(sp);
6751 return rc;
6754 static const struct ethtool_ops netdev_ethtool_ops = {
6755 .get_settings = s2io_ethtool_gset,
6756 .set_settings = s2io_ethtool_sset,
6757 .get_drvinfo = s2io_ethtool_gdrvinfo,
6758 .get_regs_len = s2io_ethtool_get_regs_len,
6759 .get_regs = s2io_ethtool_gregs,
6760 .get_link = ethtool_op_get_link,
6761 .get_eeprom_len = s2io_get_eeprom_len,
6762 .get_eeprom = s2io_ethtool_geeprom,
6763 .set_eeprom = s2io_ethtool_seeprom,
6764 .get_ringparam = s2io_ethtool_gringparam,
6765 .get_pauseparam = s2io_ethtool_getpause_data,
6766 .set_pauseparam = s2io_ethtool_setpause_data,
6767 .get_rx_csum = s2io_ethtool_get_rx_csum,
6768 .set_rx_csum = s2io_ethtool_set_rx_csum,
6769 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6770 .set_flags = s2io_ethtool_set_flags,
6771 .get_flags = ethtool_op_get_flags,
6772 .set_sg = ethtool_op_set_sg,
6773 .get_tso = s2io_ethtool_op_get_tso,
6774 .set_tso = s2io_ethtool_op_set_tso,
6775 .set_ufo = ethtool_op_set_ufo,
6776 .self_test = s2io_ethtool_test,
6777 .get_strings = s2io_ethtool_get_strings,
6778 .phys_id = s2io_ethtool_idnic,
6779 .get_ethtool_stats = s2io_get_ethtool_stats,
6780 .get_sset_count = s2io_get_sset_count,
6784 * s2io_ioctl - Entry point for the Ioctl
6785 * @dev : Device pointer.
6786 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6787 * a proprietary structure used to pass information to the driver.
6788 * @cmd : This is used to distinguish between the different commands that
6789 * can be passed to the IOCTL functions.
6790 * Description:
6791 * Currently there are no special functionality supported in IOCTL, hence
6792 * function always return EOPNOTSUPPORTED
6795 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6797 return -EOPNOTSUPP;
6801 * s2io_change_mtu - entry point to change MTU size for the device.
6802 * @dev : device pointer.
6803 * @new_mtu : the new MTU size for the device.
6804 * Description: A driver entry point to change MTU size for the device.
6805 * Before changing the MTU the device must be stopped.
6806 * Return value:
6807 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6808 * file on failure.
6811 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6813 struct s2io_nic *sp = netdev_priv(dev);
6814 int ret = 0;
6816 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6817 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6818 return -EPERM;
6821 dev->mtu = new_mtu;
6822 if (netif_running(dev)) {
6823 s2io_stop_all_tx_queue(sp);
6824 s2io_card_down(sp);
6825 ret = s2io_card_up(sp);
6826 if (ret) {
6827 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6828 __func__);
6829 return ret;
6831 s2io_wake_all_tx_queue(sp);
6832 } else { /* Device is down */
6833 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6834 u64 val64 = new_mtu;
6836 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6839 return ret;
6843 * s2io_set_link - Set the LInk status
6844 * @data: long pointer to device private structue
6845 * Description: Sets the link status for the adapter
6848 static void s2io_set_link(struct work_struct *work)
6850 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6851 set_link_task);
6852 struct net_device *dev = nic->dev;
6853 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6854 register u64 val64;
6855 u16 subid;
6857 rtnl_lock();
6859 if (!netif_running(dev))
6860 goto out_unlock;
6862 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6863 /* The card is being reset, no point doing anything */
6864 goto out_unlock;
6867 subid = nic->pdev->subsystem_device;
6868 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6870 * Allow a small delay for the NICs self initiated
6871 * cleanup to complete.
6873 msleep(100);
6876 val64 = readq(&bar0->adapter_status);
6877 if (LINK_IS_UP(val64)) {
6878 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6879 if (verify_xena_quiescence(nic)) {
6880 val64 = readq(&bar0->adapter_control);
6881 val64 |= ADAPTER_CNTL_EN;
6882 writeq(val64, &bar0->adapter_control);
6883 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6884 nic->device_type, subid)) {
6885 val64 = readq(&bar0->gpio_control);
6886 val64 |= GPIO_CTRL_GPIO_0;
6887 writeq(val64, &bar0->gpio_control);
6888 val64 = readq(&bar0->gpio_control);
6889 } else {
6890 val64 |= ADAPTER_LED_ON;
6891 writeq(val64, &bar0->adapter_control);
6893 nic->device_enabled_once = true;
6894 } else {
6895 DBG_PRINT(ERR_DBG,
6896 "%s: Error: device is not Quiescent\n",
6897 dev->name);
6898 s2io_stop_all_tx_queue(nic);
6901 val64 = readq(&bar0->adapter_control);
6902 val64 |= ADAPTER_LED_ON;
6903 writeq(val64, &bar0->adapter_control);
6904 s2io_link(nic, LINK_UP);
6905 } else {
6906 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6907 subid)) {
6908 val64 = readq(&bar0->gpio_control);
6909 val64 &= ~GPIO_CTRL_GPIO_0;
6910 writeq(val64, &bar0->gpio_control);
6911 val64 = readq(&bar0->gpio_control);
6913 /* turn off LED */
6914 val64 = readq(&bar0->adapter_control);
6915 val64 = val64 & (~ADAPTER_LED_ON);
6916 writeq(val64, &bar0->adapter_control);
6917 s2io_link(nic, LINK_DOWN);
6919 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6921 out_unlock:
6922 rtnl_unlock();
6925 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6926 struct buffAdd *ba,
6927 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6928 u64 *temp2, int size)
6930 struct net_device *dev = sp->dev;
6931 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6933 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6934 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6935 /* allocate skb */
6936 if (*skb) {
6937 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6939 * As Rx frame are not going to be processed,
6940 * using same mapped address for the Rxd
6941 * buffer pointer
6943 rxdp1->Buffer0_ptr = *temp0;
6944 } else {
6945 *skb = dev_alloc_skb(size);
6946 if (!(*skb)) {
6947 DBG_PRINT(INFO_DBG,
6948 "%s: Out of memory to allocate %s\n",
6949 dev->name, "1 buf mode SKBs");
6950 stats->mem_alloc_fail_cnt++;
6951 return -ENOMEM ;
6953 stats->mem_allocated += (*skb)->truesize;
6954 /* storing the mapped addr in a temp variable
6955 * such it will be used for next rxd whose
6956 * Host Control is NULL
6958 rxdp1->Buffer0_ptr = *temp0 =
6959 pci_map_single(sp->pdev, (*skb)->data,
6960 size - NET_IP_ALIGN,
6961 PCI_DMA_FROMDEVICE);
6962 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6963 goto memalloc_failed;
6964 rxdp->Host_Control = (unsigned long) (*skb);
6966 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6967 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6968 /* Two buffer Mode */
6969 if (*skb) {
6970 rxdp3->Buffer2_ptr = *temp2;
6971 rxdp3->Buffer0_ptr = *temp0;
6972 rxdp3->Buffer1_ptr = *temp1;
6973 } else {
6974 *skb = dev_alloc_skb(size);
6975 if (!(*skb)) {
6976 DBG_PRINT(INFO_DBG,
6977 "%s: Out of memory to allocate %s\n",
6978 dev->name,
6979 "2 buf mode SKBs");
6980 stats->mem_alloc_fail_cnt++;
6981 return -ENOMEM;
6983 stats->mem_allocated += (*skb)->truesize;
6984 rxdp3->Buffer2_ptr = *temp2 =
6985 pci_map_single(sp->pdev, (*skb)->data,
6986 dev->mtu + 4,
6987 PCI_DMA_FROMDEVICE);
6988 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6989 goto memalloc_failed;
6990 rxdp3->Buffer0_ptr = *temp0 =
6991 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6992 PCI_DMA_FROMDEVICE);
6993 if (pci_dma_mapping_error(sp->pdev,
6994 rxdp3->Buffer0_ptr)) {
6995 pci_unmap_single(sp->pdev,
6996 (dma_addr_t)rxdp3->Buffer2_ptr,
6997 dev->mtu + 4,
6998 PCI_DMA_FROMDEVICE);
6999 goto memalloc_failed;
7001 rxdp->Host_Control = (unsigned long) (*skb);
7003 /* Buffer-1 will be dummy buffer not used */
7004 rxdp3->Buffer1_ptr = *temp1 =
7005 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
7006 PCI_DMA_FROMDEVICE);
7007 if (pci_dma_mapping_error(sp->pdev,
7008 rxdp3->Buffer1_ptr)) {
7009 pci_unmap_single(sp->pdev,
7010 (dma_addr_t)rxdp3->Buffer0_ptr,
7011 BUF0_LEN, PCI_DMA_FROMDEVICE);
7012 pci_unmap_single(sp->pdev,
7013 (dma_addr_t)rxdp3->Buffer2_ptr,
7014 dev->mtu + 4,
7015 PCI_DMA_FROMDEVICE);
7016 goto memalloc_failed;
7020 return 0;
7022 memalloc_failed:
7023 stats->pci_map_fail_cnt++;
7024 stats->mem_freed += (*skb)->truesize;
7025 dev_kfree_skb(*skb);
7026 return -ENOMEM;
7029 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
7030 int size)
7032 struct net_device *dev = sp->dev;
7033 if (sp->rxd_mode == RXD_MODE_1) {
7034 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
7035 } else if (sp->rxd_mode == RXD_MODE_3B) {
7036 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
7037 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
7038 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
7042 static int rxd_owner_bit_reset(struct s2io_nic *sp)
7044 int i, j, k, blk_cnt = 0, size;
7045 struct config_param *config = &sp->config;
7046 struct mac_info *mac_control = &sp->mac_control;
7047 struct net_device *dev = sp->dev;
7048 struct RxD_t *rxdp = NULL;
7049 struct sk_buff *skb = NULL;
7050 struct buffAdd *ba = NULL;
7051 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7053 /* Calculate the size based on ring mode */
7054 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7055 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7056 if (sp->rxd_mode == RXD_MODE_1)
7057 size += NET_IP_ALIGN;
7058 else if (sp->rxd_mode == RXD_MODE_3B)
7059 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
7061 for (i = 0; i < config->rx_ring_num; i++) {
7062 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7063 struct ring_info *ring = &mac_control->rings[i];
7065 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
7067 for (j = 0; j < blk_cnt; j++) {
7068 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7069 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7070 if (sp->rxd_mode == RXD_MODE_3B)
7071 ba = &ring->ba[j][k];
7072 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7073 (u64 *)&temp0_64,
7074 (u64 *)&temp1_64,
7075 (u64 *)&temp2_64,
7076 size) == -ENOMEM) {
7077 return 0;
7080 set_rxd_buffer_size(sp, rxdp, size);
7081 wmb();
7082 /* flip the Ownership bit to Hardware */
7083 rxdp->Control_1 |= RXD_OWN_XENA;
7087 return 0;
7091 static int s2io_add_isr(struct s2io_nic *sp)
7093 int ret = 0;
7094 struct net_device *dev = sp->dev;
7095 int err = 0;
7097 if (sp->config.intr_type == MSI_X)
7098 ret = s2io_enable_msi_x(sp);
7099 if (ret) {
7100 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7101 sp->config.intr_type = INTA;
7105 * Store the values of the MSIX table in
7106 * the struct s2io_nic structure
7108 store_xmsi_data(sp);
7110 /* After proper initialization of H/W, register ISR */
7111 if (sp->config.intr_type == MSI_X) {
7112 int i, msix_rx_cnt = 0;
7114 for (i = 0; i < sp->num_entries; i++) {
7115 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7116 if (sp->s2io_entries[i].type ==
7117 MSIX_RING_TYPE) {
7118 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7119 dev->name, i);
7120 err = request_irq(sp->entries[i].vector,
7121 s2io_msix_ring_handle,
7123 sp->desc[i],
7124 sp->s2io_entries[i].arg);
7125 } else if (sp->s2io_entries[i].type ==
7126 MSIX_ALARM_TYPE) {
7127 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7128 dev->name, i);
7129 err = request_irq(sp->entries[i].vector,
7130 s2io_msix_fifo_handle,
7132 sp->desc[i],
7133 sp->s2io_entries[i].arg);
7136 /* if either data or addr is zero print it. */
7137 if (!(sp->msix_info[i].addr &&
7138 sp->msix_info[i].data)) {
7139 DBG_PRINT(ERR_DBG,
7140 "%s @Addr:0x%llx Data:0x%llx\n",
7141 sp->desc[i],
7142 (unsigned long long)
7143 sp->msix_info[i].addr,
7144 (unsigned long long)
7145 ntohl(sp->msix_info[i].data));
7146 } else
7147 msix_rx_cnt++;
7148 if (err) {
7149 remove_msix_isr(sp);
7151 DBG_PRINT(ERR_DBG,
7152 "%s:MSI-X-%d registration "
7153 "failed\n", dev->name, i);
7155 DBG_PRINT(ERR_DBG,
7156 "%s: Defaulting to INTA\n",
7157 dev->name);
7158 sp->config.intr_type = INTA;
7159 break;
7161 sp->s2io_entries[i].in_use =
7162 MSIX_REGISTERED_SUCCESS;
7165 if (!err) {
7166 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7167 DBG_PRINT(INFO_DBG,
7168 "MSI-X-TX entries enabled through alarm vector\n");
7171 if (sp->config.intr_type == INTA) {
7172 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7173 sp->name, dev);
7174 if (err) {
7175 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7176 dev->name);
7177 return -1;
7180 return 0;
7183 static void s2io_rem_isr(struct s2io_nic *sp)
7185 if (sp->config.intr_type == MSI_X)
7186 remove_msix_isr(sp);
7187 else
7188 remove_inta_isr(sp);
7191 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7193 int cnt = 0;
7194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7195 register u64 val64 = 0;
7196 struct config_param *config;
7197 config = &sp->config;
7199 if (!is_s2io_card_up(sp))
7200 return;
7202 del_timer_sync(&sp->alarm_timer);
7203 /* If s2io_set_link task is executing, wait till it completes. */
7204 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7205 msleep(50);
7206 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7208 /* Disable napi */
7209 if (sp->config.napi) {
7210 int off = 0;
7211 if (config->intr_type == MSI_X) {
7212 for (; off < sp->config.rx_ring_num; off++)
7213 napi_disable(&sp->mac_control.rings[off].napi);
7215 else
7216 napi_disable(&sp->napi);
7219 /* disable Tx and Rx traffic on the NIC */
7220 if (do_io)
7221 stop_nic(sp);
7223 s2io_rem_isr(sp);
7225 /* stop the tx queue, indicate link down */
7226 s2io_link(sp, LINK_DOWN);
7228 /* Check if the device is Quiescent and then Reset the NIC */
7229 while (do_io) {
7230 /* As per the HW requirement we need to replenish the
7231 * receive buffer to avoid the ring bump. Since there is
7232 * no intention of processing the Rx frame at this pointwe are
7233 * just settting the ownership bit of rxd in Each Rx
7234 * ring to HW and set the appropriate buffer size
7235 * based on the ring mode
7237 rxd_owner_bit_reset(sp);
7239 val64 = readq(&bar0->adapter_status);
7240 if (verify_xena_quiescence(sp)) {
7241 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7242 break;
7245 msleep(50);
7246 cnt++;
7247 if (cnt == 10) {
7248 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7249 "adapter status reads 0x%llx\n",
7250 (unsigned long long)val64);
7251 break;
7254 if (do_io)
7255 s2io_reset(sp);
7257 /* Free all Tx buffers */
7258 free_tx_buffers(sp);
7260 /* Free all Rx buffers */
7261 free_rx_buffers(sp);
7263 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7266 static void s2io_card_down(struct s2io_nic *sp)
7268 do_s2io_card_down(sp, 1);
7271 static int s2io_card_up(struct s2io_nic *sp)
7273 int i, ret = 0;
7274 struct config_param *config;
7275 struct mac_info *mac_control;
7276 struct net_device *dev = (struct net_device *)sp->dev;
7277 u16 interruptible;
7279 /* Initialize the H/W I/O registers */
7280 ret = init_nic(sp);
7281 if (ret != 0) {
7282 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7283 dev->name);
7284 if (ret != -EIO)
7285 s2io_reset(sp);
7286 return ret;
7290 * Initializing the Rx buffers. For now we are considering only 1
7291 * Rx ring and initializing buffers into 30 Rx blocks
7293 config = &sp->config;
7294 mac_control = &sp->mac_control;
7296 for (i = 0; i < config->rx_ring_num; i++) {
7297 struct ring_info *ring = &mac_control->rings[i];
7299 ring->mtu = dev->mtu;
7300 ring->lro = !!(dev->features & NETIF_F_LRO);
7301 ret = fill_rx_buffers(sp, ring, 1);
7302 if (ret) {
7303 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7304 dev->name);
7305 s2io_reset(sp);
7306 free_rx_buffers(sp);
7307 return -ENOMEM;
7309 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7310 ring->rx_bufs_left);
7313 /* Initialise napi */
7314 if (config->napi) {
7315 if (config->intr_type == MSI_X) {
7316 for (i = 0; i < sp->config.rx_ring_num; i++)
7317 napi_enable(&sp->mac_control.rings[i].napi);
7318 } else {
7319 napi_enable(&sp->napi);
7323 /* Maintain the state prior to the open */
7324 if (sp->promisc_flg)
7325 sp->promisc_flg = 0;
7326 if (sp->m_cast_flg) {
7327 sp->m_cast_flg = 0;
7328 sp->all_multi_pos = 0;
7331 /* Setting its receive mode */
7332 s2io_set_multicast(dev);
7334 if (dev->features & NETIF_F_LRO) {
7335 /* Initialize max aggregatable pkts per session based on MTU */
7336 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7337 /* Check if we can use (if specified) user provided value */
7338 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7339 sp->lro_max_aggr_per_sess = lro_max_pkts;
7342 /* Enable Rx Traffic and interrupts on the NIC */
7343 if (start_nic(sp)) {
7344 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7345 s2io_reset(sp);
7346 free_rx_buffers(sp);
7347 return -ENODEV;
7350 /* Add interrupt service routine */
7351 if (s2io_add_isr(sp) != 0) {
7352 if (sp->config.intr_type == MSI_X)
7353 s2io_rem_isr(sp);
7354 s2io_reset(sp);
7355 free_rx_buffers(sp);
7356 return -ENODEV;
7359 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7361 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7363 /* Enable select interrupts */
7364 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7365 if (sp->config.intr_type != INTA) {
7366 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7367 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7368 } else {
7369 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7370 interruptible |= TX_PIC_INTR;
7371 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7374 return 0;
7378 * s2io_restart_nic - Resets the NIC.
7379 * @data : long pointer to the device private structure
7380 * Description:
7381 * This function is scheduled to be run by the s2io_tx_watchdog
7382 * function after 0.5 secs to reset the NIC. The idea is to reduce
7383 * the run time of the watch dog routine which is run holding a
7384 * spin lock.
7387 static void s2io_restart_nic(struct work_struct *work)
7389 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7390 struct net_device *dev = sp->dev;
7392 rtnl_lock();
7394 if (!netif_running(dev))
7395 goto out_unlock;
7397 s2io_card_down(sp);
7398 if (s2io_card_up(sp)) {
7399 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7401 s2io_wake_all_tx_queue(sp);
7402 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7403 out_unlock:
7404 rtnl_unlock();
7408 * s2io_tx_watchdog - Watchdog for transmit side.
7409 * @dev : Pointer to net device structure
7410 * Description:
7411 * This function is triggered if the Tx Queue is stopped
7412 * for a pre-defined amount of time when the Interface is still up.
7413 * If the Interface is jammed in such a situation, the hardware is
7414 * reset (by s2io_close) and restarted again (by s2io_open) to
7415 * overcome any problem that might have been caused in the hardware.
7416 * Return value:
7417 * void
7420 static void s2io_tx_watchdog(struct net_device *dev)
7422 struct s2io_nic *sp = netdev_priv(dev);
7423 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7425 if (netif_carrier_ok(dev)) {
7426 swstats->watchdog_timer_cnt++;
7427 schedule_work(&sp->rst_timer_task);
7428 swstats->soft_reset_cnt++;
7433 * rx_osm_handler - To perform some OS related operations on SKB.
7434 * @sp: private member of the device structure,pointer to s2io_nic structure.
7435 * @skb : the socket buffer pointer.
7436 * @len : length of the packet
7437 * @cksum : FCS checksum of the frame.
7438 * @ring_no : the ring from which this RxD was extracted.
7439 * Description:
7440 * This function is called by the Rx interrupt serivce routine to perform
7441 * some OS related operations on the SKB before passing it to the upper
7442 * layers. It mainly checks if the checksum is OK, if so adds it to the
7443 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7444 * to the upper layer. If the checksum is wrong, it increments the Rx
7445 * packet error count, frees the SKB and returns error.
7446 * Return value:
7447 * SUCCESS on success and -1 on failure.
7449 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7451 struct s2io_nic *sp = ring_data->nic;
7452 struct net_device *dev = (struct net_device *)ring_data->dev;
7453 struct sk_buff *skb = (struct sk_buff *)
7454 ((unsigned long)rxdp->Host_Control);
7455 int ring_no = ring_data->ring_no;
7456 u16 l3_csum, l4_csum;
7457 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7458 struct lro *uninitialized_var(lro);
7459 u8 err_mask;
7460 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7462 skb->dev = dev;
7464 if (err) {
7465 /* Check for parity error */
7466 if (err & 0x1)
7467 swstats->parity_err_cnt++;
7469 err_mask = err >> 48;
7470 switch (err_mask) {
7471 case 1:
7472 swstats->rx_parity_err_cnt++;
7473 break;
7475 case 2:
7476 swstats->rx_abort_cnt++;
7477 break;
7479 case 3:
7480 swstats->rx_parity_abort_cnt++;
7481 break;
7483 case 4:
7484 swstats->rx_rda_fail_cnt++;
7485 break;
7487 case 5:
7488 swstats->rx_unkn_prot_cnt++;
7489 break;
7491 case 6:
7492 swstats->rx_fcs_err_cnt++;
7493 break;
7495 case 7:
7496 swstats->rx_buf_size_err_cnt++;
7497 break;
7499 case 8:
7500 swstats->rx_rxd_corrupt_cnt++;
7501 break;
7503 case 15:
7504 swstats->rx_unkn_err_cnt++;
7505 break;
7508 * Drop the packet if bad transfer code. Exception being
7509 * 0x5, which could be due to unsupported IPv6 extension header.
7510 * In this case, we let stack handle the packet.
7511 * Note that in this case, since checksum will be incorrect,
7512 * stack will validate the same.
7514 if (err_mask != 0x5) {
7515 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7516 dev->name, err_mask);
7517 dev->stats.rx_crc_errors++;
7518 swstats->mem_freed
7519 += skb->truesize;
7520 dev_kfree_skb(skb);
7521 ring_data->rx_bufs_left -= 1;
7522 rxdp->Host_Control = 0;
7523 return 0;
7527 rxdp->Host_Control = 0;
7528 if (sp->rxd_mode == RXD_MODE_1) {
7529 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7531 skb_put(skb, len);
7532 } else if (sp->rxd_mode == RXD_MODE_3B) {
7533 int get_block = ring_data->rx_curr_get_info.block_index;
7534 int get_off = ring_data->rx_curr_get_info.offset;
7535 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7536 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7537 unsigned char *buff = skb_push(skb, buf0_len);
7539 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7540 memcpy(buff, ba->ba_0, buf0_len);
7541 skb_put(skb, buf2_len);
7544 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7545 ((!ring_data->lro) ||
7546 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7547 (sp->rx_csum)) {
7548 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7549 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7550 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7552 * NIC verifies if the Checksum of the received
7553 * frame is Ok or not and accordingly returns
7554 * a flag in the RxD.
7556 skb->ip_summed = CHECKSUM_UNNECESSARY;
7557 if (ring_data->lro) {
7558 u32 tcp_len;
7559 u8 *tcp;
7560 int ret = 0;
7562 ret = s2io_club_tcp_session(ring_data,
7563 skb->data, &tcp,
7564 &tcp_len, &lro,
7565 rxdp, sp);
7566 switch (ret) {
7567 case 3: /* Begin anew */
7568 lro->parent = skb;
7569 goto aggregate;
7570 case 1: /* Aggregate */
7571 lro_append_pkt(sp, lro, skb, tcp_len);
7572 goto aggregate;
7573 case 4: /* Flush session */
7574 lro_append_pkt(sp, lro, skb, tcp_len);
7575 queue_rx_frame(lro->parent,
7576 lro->vlan_tag);
7577 clear_lro_session(lro);
7578 swstats->flush_max_pkts++;
7579 goto aggregate;
7580 case 2: /* Flush both */
7581 lro->parent->data_len = lro->frags_len;
7582 swstats->sending_both++;
7583 queue_rx_frame(lro->parent,
7584 lro->vlan_tag);
7585 clear_lro_session(lro);
7586 goto send_up;
7587 case 0: /* sessions exceeded */
7588 case -1: /* non-TCP or not L2 aggregatable */
7589 case 5: /*
7590 * First pkt in session not
7591 * L3/L4 aggregatable
7593 break;
7594 default:
7595 DBG_PRINT(ERR_DBG,
7596 "%s: Samadhana!!\n",
7597 __func__);
7598 BUG();
7601 } else {
7603 * Packet with erroneous checksum, let the
7604 * upper layers deal with it.
7606 skb_checksum_none_assert(skb);
7608 } else
7609 skb_checksum_none_assert(skb);
7611 swstats->mem_freed += skb->truesize;
7612 send_up:
7613 skb_record_rx_queue(skb, ring_no);
7614 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7615 aggregate:
7616 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7617 return SUCCESS;
7621 * s2io_link - stops/starts the Tx queue.
7622 * @sp : private member of the device structure, which is a pointer to the
7623 * s2io_nic structure.
7624 * @link : inidicates whether link is UP/DOWN.
7625 * Description:
7626 * This function stops/starts the Tx queue depending on whether the link
7627 * status of the NIC is is down or up. This is called by the Alarm
7628 * interrupt handler whenever a link change interrupt comes up.
7629 * Return value:
7630 * void.
7633 static void s2io_link(struct s2io_nic *sp, int link)
7635 struct net_device *dev = (struct net_device *)sp->dev;
7636 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7638 if (link != sp->last_link_state) {
7639 init_tti(sp, link);
7640 if (link == LINK_DOWN) {
7641 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7642 s2io_stop_all_tx_queue(sp);
7643 netif_carrier_off(dev);
7644 if (swstats->link_up_cnt)
7645 swstats->link_up_time =
7646 jiffies - sp->start_time;
7647 swstats->link_down_cnt++;
7648 } else {
7649 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7650 if (swstats->link_down_cnt)
7651 swstats->link_down_time =
7652 jiffies - sp->start_time;
7653 swstats->link_up_cnt++;
7654 netif_carrier_on(dev);
7655 s2io_wake_all_tx_queue(sp);
7658 sp->last_link_state = link;
7659 sp->start_time = jiffies;
7663 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7664 * @sp : private member of the device structure, which is a pointer to the
7665 * s2io_nic structure.
7666 * Description:
7667 * This function initializes a few of the PCI and PCI-X configuration registers
7668 * with recommended values.
7669 * Return value:
7670 * void
7673 static void s2io_init_pci(struct s2io_nic *sp)
7675 u16 pci_cmd = 0, pcix_cmd = 0;
7677 /* Enable Data Parity Error Recovery in PCI-X command register. */
7678 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7679 &(pcix_cmd));
7680 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7681 (pcix_cmd | 1));
7682 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7683 &(pcix_cmd));
7685 /* Set the PErr Response bit in PCI command register. */
7686 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7687 pci_write_config_word(sp->pdev, PCI_COMMAND,
7688 (pci_cmd | PCI_COMMAND_PARITY));
7689 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7692 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7693 u8 *dev_multiq)
7695 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7696 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7697 "(%d) not supported\n", tx_fifo_num);
7699 if (tx_fifo_num < 1)
7700 tx_fifo_num = 1;
7701 else
7702 tx_fifo_num = MAX_TX_FIFOS;
7704 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7707 if (multiq)
7708 *dev_multiq = multiq;
7710 if (tx_steering_type && (1 == tx_fifo_num)) {
7711 if (tx_steering_type != TX_DEFAULT_STEERING)
7712 DBG_PRINT(ERR_DBG,
7713 "Tx steering is not supported with "
7714 "one fifo. Disabling Tx steering.\n");
7715 tx_steering_type = NO_STEERING;
7718 if ((tx_steering_type < NO_STEERING) ||
7719 (tx_steering_type > TX_DEFAULT_STEERING)) {
7720 DBG_PRINT(ERR_DBG,
7721 "Requested transmit steering not supported\n");
7722 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7723 tx_steering_type = NO_STEERING;
7726 if (rx_ring_num > MAX_RX_RINGS) {
7727 DBG_PRINT(ERR_DBG,
7728 "Requested number of rx rings not supported\n");
7729 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7730 MAX_RX_RINGS);
7731 rx_ring_num = MAX_RX_RINGS;
7734 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7735 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7736 "Defaulting to INTA\n");
7737 *dev_intr_type = INTA;
7740 if ((*dev_intr_type == MSI_X) &&
7741 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7742 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7743 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7744 "Defaulting to INTA\n");
7745 *dev_intr_type = INTA;
7748 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7749 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7750 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7751 rx_ring_mode = 1;
7753 return SUCCESS;
7757 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7758 * or Traffic class respectively.
7759 * @nic: device private variable
7760 * Description: The function configures the receive steering to
7761 * desired receive ring.
7762 * Return Value: SUCCESS on success and
7763 * '-1' on failure (endian settings incorrect).
7765 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7767 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7768 register u64 val64 = 0;
7770 if (ds_codepoint > 63)
7771 return FAILURE;
7773 val64 = RTS_DS_MEM_DATA(ring);
7774 writeq(val64, &bar0->rts_ds_mem_data);
7776 val64 = RTS_DS_MEM_CTRL_WE |
7777 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7778 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7780 writeq(val64, &bar0->rts_ds_mem_ctrl);
7782 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7783 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7784 S2IO_BIT_RESET);
7787 static const struct net_device_ops s2io_netdev_ops = {
7788 .ndo_open = s2io_open,
7789 .ndo_stop = s2io_close,
7790 .ndo_get_stats = s2io_get_stats,
7791 .ndo_start_xmit = s2io_xmit,
7792 .ndo_validate_addr = eth_validate_addr,
7793 .ndo_set_multicast_list = s2io_set_multicast,
7794 .ndo_do_ioctl = s2io_ioctl,
7795 .ndo_set_mac_address = s2io_set_mac_addr,
7796 .ndo_change_mtu = s2io_change_mtu,
7797 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7798 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7799 .ndo_tx_timeout = s2io_tx_watchdog,
7800 #ifdef CONFIG_NET_POLL_CONTROLLER
7801 .ndo_poll_controller = s2io_netpoll,
7802 #endif
7806 * s2io_init_nic - Initialization of the adapter .
7807 * @pdev : structure containing the PCI related information of the device.
7808 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7809 * Description:
7810 * The function initializes an adapter identified by the pci_dec structure.
7811 * All OS related initialization including memory and device structure and
7812 * initlaization of the device private variable is done. Also the swapper
7813 * control register is initialized to enable read and write into the I/O
7814 * registers of the device.
7815 * Return value:
7816 * returns 0 on success and negative on failure.
7819 static int __devinit
7820 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7822 struct s2io_nic *sp;
7823 struct net_device *dev;
7824 int i, j, ret;
7825 int dma_flag = false;
7826 u32 mac_up, mac_down;
7827 u64 val64 = 0, tmp64 = 0;
7828 struct XENA_dev_config __iomem *bar0 = NULL;
7829 u16 subid;
7830 struct config_param *config;
7831 struct mac_info *mac_control;
7832 int mode;
7833 u8 dev_intr_type = intr_type;
7834 u8 dev_multiq = 0;
7836 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7837 if (ret)
7838 return ret;
7840 ret = pci_enable_device(pdev);
7841 if (ret) {
7842 DBG_PRINT(ERR_DBG,
7843 "%s: pci_enable_device failed\n", __func__);
7844 return ret;
7847 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7848 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7849 dma_flag = true;
7850 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7851 DBG_PRINT(ERR_DBG,
7852 "Unable to obtain 64bit DMA "
7853 "for consistent allocations\n");
7854 pci_disable_device(pdev);
7855 return -ENOMEM;
7857 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7858 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7859 } else {
7860 pci_disable_device(pdev);
7861 return -ENOMEM;
7863 ret = pci_request_regions(pdev, s2io_driver_name);
7864 if (ret) {
7865 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7866 __func__, ret);
7867 pci_disable_device(pdev);
7868 return -ENODEV;
7870 if (dev_multiq)
7871 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7872 else
7873 dev = alloc_etherdev(sizeof(struct s2io_nic));
7874 if (dev == NULL) {
7875 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7876 pci_disable_device(pdev);
7877 pci_release_regions(pdev);
7878 return -ENODEV;
7881 pci_set_master(pdev);
7882 pci_set_drvdata(pdev, dev);
7883 SET_NETDEV_DEV(dev, &pdev->dev);
7885 /* Private member variable initialized to s2io NIC structure */
7886 sp = netdev_priv(dev);
7887 sp->dev = dev;
7888 sp->pdev = pdev;
7889 sp->high_dma_flag = dma_flag;
7890 sp->device_enabled_once = false;
7891 if (rx_ring_mode == 1)
7892 sp->rxd_mode = RXD_MODE_1;
7893 if (rx_ring_mode == 2)
7894 sp->rxd_mode = RXD_MODE_3B;
7896 sp->config.intr_type = dev_intr_type;
7898 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7899 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7900 sp->device_type = XFRAME_II_DEVICE;
7901 else
7902 sp->device_type = XFRAME_I_DEVICE;
7905 /* Initialize some PCI/PCI-X fields of the NIC. */
7906 s2io_init_pci(sp);
7909 * Setting the device configuration parameters.
7910 * Most of these parameters can be specified by the user during
7911 * module insertion as they are module loadable parameters. If
7912 * these parameters are not not specified during load time, they
7913 * are initialized with default values.
7915 config = &sp->config;
7916 mac_control = &sp->mac_control;
7918 config->napi = napi;
7919 config->tx_steering_type = tx_steering_type;
7921 /* Tx side parameters. */
7922 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7923 config->tx_fifo_num = MAX_TX_FIFOS;
7924 else
7925 config->tx_fifo_num = tx_fifo_num;
7927 /* Initialize the fifos used for tx steering */
7928 if (config->tx_fifo_num < 5) {
7929 if (config->tx_fifo_num == 1)
7930 sp->total_tcp_fifos = 1;
7931 else
7932 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7933 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7934 sp->total_udp_fifos = 1;
7935 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7936 } else {
7937 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7938 FIFO_OTHER_MAX_NUM);
7939 sp->udp_fifo_idx = sp->total_tcp_fifos;
7940 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7941 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7944 config->multiq = dev_multiq;
7945 for (i = 0; i < config->tx_fifo_num; i++) {
7946 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7948 tx_cfg->fifo_len = tx_fifo_len[i];
7949 tx_cfg->fifo_priority = i;
7952 /* mapping the QoS priority to the configured fifos */
7953 for (i = 0; i < MAX_TX_FIFOS; i++)
7954 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7956 /* map the hashing selector table to the configured fifos */
7957 for (i = 0; i < config->tx_fifo_num; i++)
7958 sp->fifo_selector[i] = fifo_selector[i];
7961 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7962 for (i = 0; i < config->tx_fifo_num; i++) {
7963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7965 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7966 if (tx_cfg->fifo_len < 65) {
7967 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7968 break;
7971 /* + 2 because one Txd for skb->data and one Txd for UFO */
7972 config->max_txds = MAX_SKB_FRAGS + 2;
7974 /* Rx side parameters. */
7975 config->rx_ring_num = rx_ring_num;
7976 for (i = 0; i < config->rx_ring_num; i++) {
7977 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7978 struct ring_info *ring = &mac_control->rings[i];
7980 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7981 rx_cfg->ring_priority = i;
7982 ring->rx_bufs_left = 0;
7983 ring->rxd_mode = sp->rxd_mode;
7984 ring->rxd_count = rxd_count[sp->rxd_mode];
7985 ring->pdev = sp->pdev;
7986 ring->dev = sp->dev;
7989 for (i = 0; i < rx_ring_num; i++) {
7990 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7992 rx_cfg->ring_org = RING_ORG_BUFF1;
7993 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7996 /* Setting Mac Control parameters */
7997 mac_control->rmac_pause_time = rmac_pause_time;
7998 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7999 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
8002 /* initialize the shared memory used by the NIC and the host */
8003 if (init_shared_mem(sp)) {
8004 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
8005 ret = -ENOMEM;
8006 goto mem_alloc_failed;
8009 sp->bar0 = pci_ioremap_bar(pdev, 0);
8010 if (!sp->bar0) {
8011 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
8012 dev->name);
8013 ret = -ENOMEM;
8014 goto bar0_remap_failed;
8017 sp->bar1 = pci_ioremap_bar(pdev, 2);
8018 if (!sp->bar1) {
8019 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
8020 dev->name);
8021 ret = -ENOMEM;
8022 goto bar1_remap_failed;
8025 dev->irq = pdev->irq;
8026 dev->base_addr = (unsigned long)sp->bar0;
8028 /* Initializing the BAR1 address as the start of the FIFO pointer. */
8029 for (j = 0; j < MAX_TX_FIFOS; j++) {
8030 mac_control->tx_FIFO_start[j] =
8031 (struct TxFIFO_element __iomem *)
8032 (sp->bar1 + (j * 0x00020000));
8035 /* Driver entry points */
8036 dev->netdev_ops = &s2io_netdev_ops;
8037 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8038 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8039 dev->features |= NETIF_F_LRO;
8040 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8041 if (sp->high_dma_flag == true)
8042 dev->features |= NETIF_F_HIGHDMA;
8043 dev->features |= NETIF_F_TSO;
8044 dev->features |= NETIF_F_TSO6;
8045 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
8046 dev->features |= NETIF_F_UFO;
8047 dev->features |= NETIF_F_HW_CSUM;
8049 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
8050 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8051 INIT_WORK(&sp->set_link_task, s2io_set_link);
8053 pci_save_state(sp->pdev);
8055 /* Setting swapper control on the NIC, for proper reset operation */
8056 if (s2io_set_swapper(sp)) {
8057 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
8058 dev->name);
8059 ret = -EAGAIN;
8060 goto set_swap_failed;
8063 /* Verify if the Herc works on the slot its placed into */
8064 if (sp->device_type & XFRAME_II_DEVICE) {
8065 mode = s2io_verify_pci_mode(sp);
8066 if (mode < 0) {
8067 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
8068 __func__);
8069 ret = -EBADSLT;
8070 goto set_swap_failed;
8074 if (sp->config.intr_type == MSI_X) {
8075 sp->num_entries = config->rx_ring_num + 1;
8076 ret = s2io_enable_msi_x(sp);
8078 if (!ret) {
8079 ret = s2io_test_msi(sp);
8080 /* rollback MSI-X, will re-enable during add_isr() */
8081 remove_msix_isr(sp);
8083 if (ret) {
8085 DBG_PRINT(ERR_DBG,
8086 "MSI-X requested but failed to enable\n");
8087 sp->config.intr_type = INTA;
8091 if (config->intr_type == MSI_X) {
8092 for (i = 0; i < config->rx_ring_num ; i++) {
8093 struct ring_info *ring = &mac_control->rings[i];
8095 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8097 } else {
8098 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8101 /* Not needed for Herc */
8102 if (sp->device_type & XFRAME_I_DEVICE) {
8104 * Fix for all "FFs" MAC address problems observed on
8105 * Alpha platforms
8107 fix_mac_address(sp);
8108 s2io_reset(sp);
8112 * MAC address initialization.
8113 * For now only one mac address will be read and used.
8115 bar0 = sp->bar0;
8116 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8117 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8118 writeq(val64, &bar0->rmac_addr_cmd_mem);
8119 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8120 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8121 S2IO_BIT_RESET);
8122 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8123 mac_down = (u32)tmp64;
8124 mac_up = (u32) (tmp64 >> 32);
8126 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8127 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8128 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8129 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8130 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8131 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8133 /* Set the factory defined MAC address initially */
8134 dev->addr_len = ETH_ALEN;
8135 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8136 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8138 /* initialize number of multicast & unicast MAC entries variables */
8139 if (sp->device_type == XFRAME_I_DEVICE) {
8140 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8141 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8142 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8143 } else if (sp->device_type == XFRAME_II_DEVICE) {
8144 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8145 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8146 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8149 /* store mac addresses from CAM to s2io_nic structure */
8150 do_s2io_store_unicast_mc(sp);
8152 /* Configure MSIX vector for number of rings configured plus one */
8153 if ((sp->device_type == XFRAME_II_DEVICE) &&
8154 (config->intr_type == MSI_X))
8155 sp->num_entries = config->rx_ring_num + 1;
8157 /* Store the values of the MSIX table in the s2io_nic structure */
8158 store_xmsi_data(sp);
8159 /* reset Nic and bring it to known state */
8160 s2io_reset(sp);
8163 * Initialize link state flags
8164 * and the card state parameter
8166 sp->state = 0;
8168 /* Initialize spinlocks */
8169 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8170 struct fifo_info *fifo = &mac_control->fifos[i];
8172 spin_lock_init(&fifo->tx_lock);
8176 * SXE-002: Configure link and activity LED to init state
8177 * on driver load.
8179 subid = sp->pdev->subsystem_device;
8180 if ((subid & 0xFF) >= 0x07) {
8181 val64 = readq(&bar0->gpio_control);
8182 val64 |= 0x0000800000000000ULL;
8183 writeq(val64, &bar0->gpio_control);
8184 val64 = 0x0411040400000000ULL;
8185 writeq(val64, (void __iomem *)bar0 + 0x2700);
8186 val64 = readq(&bar0->gpio_control);
8189 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8191 if (register_netdev(dev)) {
8192 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8193 ret = -ENODEV;
8194 goto register_failed;
8196 s2io_vpd_read(sp);
8197 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8198 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8199 sp->product_name, pdev->revision);
8200 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8201 s2io_driver_version);
8202 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8203 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8204 if (sp->device_type & XFRAME_II_DEVICE) {
8205 mode = s2io_print_pci_mode(sp);
8206 if (mode < 0) {
8207 ret = -EBADSLT;
8208 unregister_netdev(dev);
8209 goto set_swap_failed;
8212 switch (sp->rxd_mode) {
8213 case RXD_MODE_1:
8214 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8215 dev->name);
8216 break;
8217 case RXD_MODE_3B:
8218 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8219 dev->name);
8220 break;
8223 switch (sp->config.napi) {
8224 case 0:
8225 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8226 break;
8227 case 1:
8228 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8229 break;
8232 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8233 sp->config.tx_fifo_num);
8235 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8236 sp->config.rx_ring_num);
8238 switch (sp->config.intr_type) {
8239 case INTA:
8240 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8241 break;
8242 case MSI_X:
8243 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8244 break;
8246 if (sp->config.multiq) {
8247 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8248 struct fifo_info *fifo = &mac_control->fifos[i];
8250 fifo->multiq = config->multiq;
8252 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8253 dev->name);
8254 } else
8255 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8256 dev->name);
8258 switch (sp->config.tx_steering_type) {
8259 case NO_STEERING:
8260 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8261 dev->name);
8262 break;
8263 case TX_PRIORITY_STEERING:
8264 DBG_PRINT(ERR_DBG,
8265 "%s: Priority steering enabled for transmit\n",
8266 dev->name);
8267 break;
8268 case TX_DEFAULT_STEERING:
8269 DBG_PRINT(ERR_DBG,
8270 "%s: Default steering enabled for transmit\n",
8271 dev->name);
8274 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8275 dev->name);
8276 if (ufo)
8277 DBG_PRINT(ERR_DBG,
8278 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8279 dev->name);
8280 /* Initialize device name */
8281 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8283 if (vlan_tag_strip)
8284 sp->vlan_strip_flag = 1;
8285 else
8286 sp->vlan_strip_flag = 0;
8289 * Make Link state as off at this point, when the Link change
8290 * interrupt comes the state will be automatically changed to
8291 * the right state.
8293 netif_carrier_off(dev);
8295 return 0;
8297 register_failed:
8298 set_swap_failed:
8299 iounmap(sp->bar1);
8300 bar1_remap_failed:
8301 iounmap(sp->bar0);
8302 bar0_remap_failed:
8303 mem_alloc_failed:
8304 free_shared_mem(sp);
8305 pci_disable_device(pdev);
8306 pci_release_regions(pdev);
8307 pci_set_drvdata(pdev, NULL);
8308 free_netdev(dev);
8310 return ret;
8314 * s2io_rem_nic - Free the PCI device
8315 * @pdev: structure containing the PCI related information of the device.
8316 * Description: This function is called by the Pci subsystem to release a
8317 * PCI device and free up all resource held up by the device. This could
8318 * be in response to a Hot plug event or when the driver is to be removed
8319 * from memory.
8322 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8324 struct net_device *dev =
8325 (struct net_device *)pci_get_drvdata(pdev);
8326 struct s2io_nic *sp;
8328 if (dev == NULL) {
8329 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8330 return;
8333 flush_scheduled_work();
8335 sp = netdev_priv(dev);
8336 unregister_netdev(dev);
8338 free_shared_mem(sp);
8339 iounmap(sp->bar0);
8340 iounmap(sp->bar1);
8341 pci_release_regions(pdev);
8342 pci_set_drvdata(pdev, NULL);
8343 free_netdev(dev);
8344 pci_disable_device(pdev);
8348 * s2io_starter - Entry point for the driver
8349 * Description: This function is the entry point for the driver. It verifies
8350 * the module loadable parameters and initializes PCI configuration space.
8353 static int __init s2io_starter(void)
8355 return pci_register_driver(&s2io_driver);
8359 * s2io_closer - Cleanup routine for the driver
8360 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8363 static __exit void s2io_closer(void)
8365 pci_unregister_driver(&s2io_driver);
8366 DBG_PRINT(INIT_DBG, "cleanup done\n");
8369 module_init(s2io_starter);
8370 module_exit(s2io_closer);
8372 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8373 struct tcphdr **tcp, struct RxD_t *rxdp,
8374 struct s2io_nic *sp)
8376 int ip_off;
8377 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8379 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8380 DBG_PRINT(INIT_DBG,
8381 "%s: Non-TCP frames not supported for LRO\n",
8382 __func__);
8383 return -1;
8386 /* Checking for DIX type or DIX type with VLAN */
8387 if ((l2_type == 0) || (l2_type == 4)) {
8388 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8390 * If vlan stripping is disabled and the frame is VLAN tagged,
8391 * shift the offset by the VLAN header size bytes.
8393 if ((!sp->vlan_strip_flag) &&
8394 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8395 ip_off += HEADER_VLAN_SIZE;
8396 } else {
8397 /* LLC, SNAP etc are considered non-mergeable */
8398 return -1;
8401 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8402 ip_len = (u8)((*ip)->ihl);
8403 ip_len <<= 2;
8404 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8406 return 0;
8409 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8410 struct tcphdr *tcp)
8412 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8413 if ((lro->iph->saddr != ip->saddr) ||
8414 (lro->iph->daddr != ip->daddr) ||
8415 (lro->tcph->source != tcp->source) ||
8416 (lro->tcph->dest != tcp->dest))
8417 return -1;
8418 return 0;
8421 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8423 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8426 static void initiate_new_session(struct lro *lro, u8 *l2h,
8427 struct iphdr *ip, struct tcphdr *tcp,
8428 u32 tcp_pyld_len, u16 vlan_tag)
8430 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8431 lro->l2h = l2h;
8432 lro->iph = ip;
8433 lro->tcph = tcp;
8434 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8435 lro->tcp_ack = tcp->ack_seq;
8436 lro->sg_num = 1;
8437 lro->total_len = ntohs(ip->tot_len);
8438 lro->frags_len = 0;
8439 lro->vlan_tag = vlan_tag;
8441 * Check if we saw TCP timestamp.
8442 * Other consistency checks have already been done.
8444 if (tcp->doff == 8) {
8445 __be32 *ptr;
8446 ptr = (__be32 *)(tcp+1);
8447 lro->saw_ts = 1;
8448 lro->cur_tsval = ntohl(*(ptr+1));
8449 lro->cur_tsecr = *(ptr+2);
8451 lro->in_use = 1;
8454 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8456 struct iphdr *ip = lro->iph;
8457 struct tcphdr *tcp = lro->tcph;
8458 __sum16 nchk;
8459 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8461 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8463 /* Update L3 header */
8464 ip->tot_len = htons(lro->total_len);
8465 ip->check = 0;
8466 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8467 ip->check = nchk;
8469 /* Update L4 header */
8470 tcp->ack_seq = lro->tcp_ack;
8471 tcp->window = lro->window;
8473 /* Update tsecr field if this session has timestamps enabled */
8474 if (lro->saw_ts) {
8475 __be32 *ptr = (__be32 *)(tcp + 1);
8476 *(ptr+2) = lro->cur_tsecr;
8479 /* Update counters required for calculation of
8480 * average no. of packets aggregated.
8482 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8483 swstats->num_aggregations++;
8486 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8487 struct tcphdr *tcp, u32 l4_pyld)
8489 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8490 lro->total_len += l4_pyld;
8491 lro->frags_len += l4_pyld;
8492 lro->tcp_next_seq += l4_pyld;
8493 lro->sg_num++;
8495 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8496 lro->tcp_ack = tcp->ack_seq;
8497 lro->window = tcp->window;
8499 if (lro->saw_ts) {
8500 __be32 *ptr;
8501 /* Update tsecr and tsval from this packet */
8502 ptr = (__be32 *)(tcp+1);
8503 lro->cur_tsval = ntohl(*(ptr+1));
8504 lro->cur_tsecr = *(ptr + 2);
8508 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8509 struct tcphdr *tcp, u32 tcp_pyld_len)
8511 u8 *ptr;
8513 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8515 if (!tcp_pyld_len) {
8516 /* Runt frame or a pure ack */
8517 return -1;
8520 if (ip->ihl != 5) /* IP has options */
8521 return -1;
8523 /* If we see CE codepoint in IP header, packet is not mergeable */
8524 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8525 return -1;
8527 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8528 if (tcp->urg || tcp->psh || tcp->rst ||
8529 tcp->syn || tcp->fin ||
8530 tcp->ece || tcp->cwr || !tcp->ack) {
8532 * Currently recognize only the ack control word and
8533 * any other control field being set would result in
8534 * flushing the LRO session
8536 return -1;
8540 * Allow only one TCP timestamp option. Don't aggregate if
8541 * any other options are detected.
8543 if (tcp->doff != 5 && tcp->doff != 8)
8544 return -1;
8546 if (tcp->doff == 8) {
8547 ptr = (u8 *)(tcp + 1);
8548 while (*ptr == TCPOPT_NOP)
8549 ptr++;
8550 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8551 return -1;
8553 /* Ensure timestamp value increases monotonically */
8554 if (l_lro)
8555 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8556 return -1;
8558 /* timestamp echo reply should be non-zero */
8559 if (*((__be32 *)(ptr+6)) == 0)
8560 return -1;
8563 return 0;
8566 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8567 u8 **tcp, u32 *tcp_len, struct lro **lro,
8568 struct RxD_t *rxdp, struct s2io_nic *sp)
8570 struct iphdr *ip;
8571 struct tcphdr *tcph;
8572 int ret = 0, i;
8573 u16 vlan_tag = 0;
8574 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8576 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8577 rxdp, sp);
8578 if (ret)
8579 return ret;
8581 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8583 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8584 tcph = (struct tcphdr *)*tcp;
8585 *tcp_len = get_l4_pyld_length(ip, tcph);
8586 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8587 struct lro *l_lro = &ring_data->lro0_n[i];
8588 if (l_lro->in_use) {
8589 if (check_for_socket_match(l_lro, ip, tcph))
8590 continue;
8591 /* Sock pair matched */
8592 *lro = l_lro;
8594 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8595 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8596 "expected 0x%x, actual 0x%x\n",
8597 __func__,
8598 (*lro)->tcp_next_seq,
8599 ntohl(tcph->seq));
8601 swstats->outof_sequence_pkts++;
8602 ret = 2;
8603 break;
8606 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8607 *tcp_len))
8608 ret = 1; /* Aggregate */
8609 else
8610 ret = 2; /* Flush both */
8611 break;
8615 if (ret == 0) {
8616 /* Before searching for available LRO objects,
8617 * check if the pkt is L3/L4 aggregatable. If not
8618 * don't create new LRO session. Just send this
8619 * packet up.
8621 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8622 return 5;
8624 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8625 struct lro *l_lro = &ring_data->lro0_n[i];
8626 if (!(l_lro->in_use)) {
8627 *lro = l_lro;
8628 ret = 3; /* Begin anew */
8629 break;
8634 if (ret == 0) { /* sessions exceeded */
8635 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8636 __func__);
8637 *lro = NULL;
8638 return ret;
8641 switch (ret) {
8642 case 3:
8643 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8644 vlan_tag);
8645 break;
8646 case 2:
8647 update_L3L4_header(sp, *lro);
8648 break;
8649 case 1:
8650 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8651 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8652 update_L3L4_header(sp, *lro);
8653 ret = 4; /* Flush the LRO */
8655 break;
8656 default:
8657 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8658 break;
8661 return ret;
8664 static void clear_lro_session(struct lro *lro)
8666 static u16 lro_struct_size = sizeof(struct lro);
8668 memset(lro, 0, lro_struct_size);
8671 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8673 struct net_device *dev = skb->dev;
8674 struct s2io_nic *sp = netdev_priv(dev);
8676 skb->protocol = eth_type_trans(skb, dev);
8677 if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
8678 /* Queueing the vlan frame to the upper layer */
8679 if (sp->config.napi)
8680 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8681 else
8682 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8683 } else {
8684 if (sp->config.napi)
8685 netif_receive_skb(skb);
8686 else
8687 netif_rx(skb);
8691 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8692 struct sk_buff *skb, u32 tcp_len)
8694 struct sk_buff *first = lro->parent;
8695 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8697 first->len += tcp_len;
8698 first->data_len = lro->frags_len;
8699 skb_pull(skb, (skb->len - tcp_len));
8700 if (skb_shinfo(first)->frag_list)
8701 lro->last_frag->next = skb;
8702 else
8703 skb_shinfo(first)->frag_list = skb;
8704 first->truesize += skb->truesize;
8705 lro->last_frag = skb;
8706 swstats->clubbed_frms_cnt++;
8710 * s2io_io_error_detected - called when PCI error is detected
8711 * @pdev: Pointer to PCI device
8712 * @state: The current pci connection state
8714 * This function is called after a PCI bus error affecting
8715 * this device has been detected.
8717 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8718 pci_channel_state_t state)
8720 struct net_device *netdev = pci_get_drvdata(pdev);
8721 struct s2io_nic *sp = netdev_priv(netdev);
8723 netif_device_detach(netdev);
8725 if (state == pci_channel_io_perm_failure)
8726 return PCI_ERS_RESULT_DISCONNECT;
8728 if (netif_running(netdev)) {
8729 /* Bring down the card, while avoiding PCI I/O */
8730 do_s2io_card_down(sp, 0);
8732 pci_disable_device(pdev);
8734 return PCI_ERS_RESULT_NEED_RESET;
8738 * s2io_io_slot_reset - called after the pci bus has been reset.
8739 * @pdev: Pointer to PCI device
8741 * Restart the card from scratch, as if from a cold-boot.
8742 * At this point, the card has exprienced a hard reset,
8743 * followed by fixups by BIOS, and has its config space
8744 * set up identically to what it was at cold boot.
8746 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8748 struct net_device *netdev = pci_get_drvdata(pdev);
8749 struct s2io_nic *sp = netdev_priv(netdev);
8751 if (pci_enable_device(pdev)) {
8752 pr_err("Cannot re-enable PCI device after reset.\n");
8753 return PCI_ERS_RESULT_DISCONNECT;
8756 pci_set_master(pdev);
8757 s2io_reset(sp);
8759 return PCI_ERS_RESULT_RECOVERED;
8763 * s2io_io_resume - called when traffic can start flowing again.
8764 * @pdev: Pointer to PCI device
8766 * This callback is called when the error recovery driver tells
8767 * us that its OK to resume normal operation.
8769 static void s2io_io_resume(struct pci_dev *pdev)
8771 struct net_device *netdev = pci_get_drvdata(pdev);
8772 struct s2io_nic *sp = netdev_priv(netdev);
8774 if (netif_running(netdev)) {
8775 if (s2io_card_up(sp)) {
8776 pr_err("Can't bring device back up after reset.\n");
8777 return;
8780 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8781 s2io_card_down(sp);
8782 pr_err("Can't restore mac addr after reset.\n");
8783 return;
8787 netif_device_attach(netdev);
8788 netif_tx_wake_all_queues(netdev);