S2io: Removed unused feature - bimodal interrupts
[linux-2.6/verdex.git] / drivers / net / s2io.c
blob3885f6b83cc31648724294571ab01b90f94c984a
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.26.4"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
98 int ret;
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
103 return ret;
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC 1
120 #define LOW 2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
135 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140 "Register test\t(offline)",
141 "Eeprom test\t(offline)",
142 "Link test\t(online)",
143 "RLDRAM test\t(offline)",
144 "BIST Test\t(offline)"
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148 {"tmac_frms"},
149 {"tmac_data_octets"},
150 {"tmac_drop_frms"},
151 {"tmac_mcst_frms"},
152 {"tmac_bcst_frms"},
153 {"tmac_pause_ctrl_frms"},
154 {"tmac_ttl_octets"},
155 {"tmac_ucst_frms"},
156 {"tmac_nucst_frms"},
157 {"tmac_any_err_frms"},
158 {"tmac_ttl_less_fb_octets"},
159 {"tmac_vld_ip_octets"},
160 {"tmac_vld_ip"},
161 {"tmac_drop_ip"},
162 {"tmac_icmp"},
163 {"tmac_rst_tcp"},
164 {"tmac_tcp"},
165 {"tmac_udp"},
166 {"rmac_vld_frms"},
167 {"rmac_data_octets"},
168 {"rmac_fcs_err_frms"},
169 {"rmac_drop_frms"},
170 {"rmac_vld_mcst_frms"},
171 {"rmac_vld_bcst_frms"},
172 {"rmac_in_rng_len_err_frms"},
173 {"rmac_out_rng_len_err_frms"},
174 {"rmac_long_frms"},
175 {"rmac_pause_ctrl_frms"},
176 {"rmac_unsup_ctrl_frms"},
177 {"rmac_ttl_octets"},
178 {"rmac_accepted_ucst_frms"},
179 {"rmac_accepted_nucst_frms"},
180 {"rmac_discarded_frms"},
181 {"rmac_drop_events"},
182 {"rmac_ttl_less_fb_octets"},
183 {"rmac_ttl_frms"},
184 {"rmac_usized_frms"},
185 {"rmac_osized_frms"},
186 {"rmac_frag_frms"},
187 {"rmac_jabber_frms"},
188 {"rmac_ttl_64_frms"},
189 {"rmac_ttl_65_127_frms"},
190 {"rmac_ttl_128_255_frms"},
191 {"rmac_ttl_256_511_frms"},
192 {"rmac_ttl_512_1023_frms"},
193 {"rmac_ttl_1024_1518_frms"},
194 {"rmac_ip"},
195 {"rmac_ip_octets"},
196 {"rmac_hdr_err_ip"},
197 {"rmac_drop_ip"},
198 {"rmac_icmp"},
199 {"rmac_tcp"},
200 {"rmac_udp"},
201 {"rmac_err_drp_udp"},
202 {"rmac_xgmii_err_sym"},
203 {"rmac_frms_q0"},
204 {"rmac_frms_q1"},
205 {"rmac_frms_q2"},
206 {"rmac_frms_q3"},
207 {"rmac_frms_q4"},
208 {"rmac_frms_q5"},
209 {"rmac_frms_q6"},
210 {"rmac_frms_q7"},
211 {"rmac_full_q0"},
212 {"rmac_full_q1"},
213 {"rmac_full_q2"},
214 {"rmac_full_q3"},
215 {"rmac_full_q4"},
216 {"rmac_full_q5"},
217 {"rmac_full_q6"},
218 {"rmac_full_q7"},
219 {"rmac_pause_cnt"},
220 {"rmac_xgmii_data_err_cnt"},
221 {"rmac_xgmii_ctrl_err_cnt"},
222 {"rmac_accepted_ip"},
223 {"rmac_err_tcp"},
224 {"rd_req_cnt"},
225 {"new_rd_req_cnt"},
226 {"new_rd_req_rtry_cnt"},
227 {"rd_rtry_cnt"},
228 {"wr_rtry_rd_ack_cnt"},
229 {"wr_req_cnt"},
230 {"new_wr_req_cnt"},
231 {"new_wr_req_rtry_cnt"},
232 {"wr_rtry_cnt"},
233 {"wr_disc_cnt"},
234 {"rd_rtry_wr_ack_cnt"},
235 {"txp_wr_cnt"},
236 {"txd_rd_cnt"},
237 {"txd_wr_cnt"},
238 {"rxd_rd_cnt"},
239 {"rxd_wr_cnt"},
240 {"txf_rd_cnt"},
241 {"rxf_wr_cnt"}
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245 {"rmac_ttl_1519_4095_frms"},
246 {"rmac_ttl_4096_8191_frms"},
247 {"rmac_ttl_8192_max_frms"},
248 {"rmac_ttl_gt_max_frms"},
249 {"rmac_osized_alt_frms"},
250 {"rmac_jabber_alt_frms"},
251 {"rmac_gt_max_alt_frms"},
252 {"rmac_vlan_frms"},
253 {"rmac_len_discard"},
254 {"rmac_fcs_discard"},
255 {"rmac_pf_discard"},
256 {"rmac_da_discard"},
257 {"rmac_red_discard"},
258 {"rmac_rts_discard"},
259 {"rmac_ingm_full_discard"},
260 {"link_fault_cnt"}
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264 {"\n DRIVER STATISTICS"},
265 {"single_bit_ecc_errs"},
266 {"double_bit_ecc_errs"},
267 {"parity_err_cnt"},
268 {"serious_err_cnt"},
269 {"soft_reset_cnt"},
270 {"fifo_full_cnt"},
271 {"ring_0_full_cnt"},
272 {"ring_1_full_cnt"},
273 {"ring_2_full_cnt"},
274 {"ring_3_full_cnt"},
275 {"ring_4_full_cnt"},
276 {"ring_5_full_cnt"},
277 {"ring_6_full_cnt"},
278 {"ring_7_full_cnt"},
279 ("alarm_transceiver_temp_high"),
280 ("alarm_transceiver_temp_low"),
281 ("alarm_laser_bias_current_high"),
282 ("alarm_laser_bias_current_low"),
283 ("alarm_laser_output_power_high"),
284 ("alarm_laser_output_power_low"),
285 ("warn_transceiver_temp_high"),
286 ("warn_transceiver_temp_low"),
287 ("warn_laser_bias_current_high"),
288 ("warn_laser_bias_current_low"),
289 ("warn_laser_output_power_high"),
290 ("warn_laser_output_power_low"),
291 ("lro_aggregated_pkts"),
292 ("lro_flush_both_count"),
293 ("lro_out_of_sequence_pkts"),
294 ("lro_flush_due_to_max_pkts"),
295 ("lro_avg_aggr_pkts"),
296 ("mem_alloc_fail_cnt"),
297 ("pci_map_fail_cnt"),
298 ("watchdog_timer_cnt"),
299 ("mem_allocated"),
300 ("mem_freed"),
301 ("link_up_cnt"),
302 ("link_down_cnt"),
303 ("link_up_time"),
304 ("link_down_time"),
305 ("tx_tcode_buf_abort_cnt"),
306 ("tx_tcode_desc_abort_cnt"),
307 ("tx_tcode_parity_err_cnt"),
308 ("tx_tcode_link_loss_cnt"),
309 ("tx_tcode_list_proc_err_cnt"),
310 ("rx_tcode_parity_err_cnt"),
311 ("rx_tcode_abort_cnt"),
312 ("rx_tcode_parity_abort_cnt"),
313 ("rx_tcode_rda_fail_cnt"),
314 ("rx_tcode_unkn_prot_cnt"),
315 ("rx_tcode_fcs_err_cnt"),
316 ("rx_tcode_buf_size_err_cnt"),
317 ("rx_tcode_rxd_corrupt_cnt"),
318 ("rx_tcode_unkn_err_cnt"),
319 {"tda_err_cnt"},
320 {"pfc_err_cnt"},
321 {"pcc_err_cnt"},
322 {"tti_err_cnt"},
323 {"tpa_err_cnt"},
324 {"sm_err_cnt"},
325 {"lso_err_cnt"},
326 {"mac_tmac_err_cnt"},
327 {"mac_rmac_err_cnt"},
328 {"xgxs_txgxs_err_cnt"},
329 {"xgxs_rxgxs_err_cnt"},
330 {"rc_err_cnt"},
331 {"prc_pcix_err_cnt"},
332 {"rpa_err_cnt"},
333 {"rda_err_cnt"},
334 {"rti_err_cnt"},
335 {"mc_err_cnt"}
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340 ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
349 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
353 init_timer(&timer); \
354 timer.function = handle; \
355 timer.data = (unsigned long) arg; \
356 mod_timer(&timer, (jiffies + exp)) \
358 /* Add the vlan */
359 static void s2io_vlan_rx_register(struct net_device *dev,
360 struct vlan_group *grp)
362 struct s2io_nic *nic = dev->priv;
363 unsigned long flags;
365 spin_lock_irqsave(&nic->tx_lock, flags);
366 nic->vlgrp = grp;
367 spin_unlock_irqrestore(&nic->tx_lock, flags);
370 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
371 static int vlan_strip_flag;
374 * Constants to be programmed into the Xena's registers, to configure
375 * the XAUI.
378 #define END_SIGN 0x0
379 static const u64 herc_act_dtx_cfg[] = {
380 /* Set address */
381 0x8000051536750000ULL, 0x80000515367500E0ULL,
382 /* Write data */
383 0x8000051536750004ULL, 0x80000515367500E4ULL,
384 /* Set address */
385 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
386 /* Write data */
387 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
388 /* Set address */
389 0x801205150D440000ULL, 0x801205150D4400E0ULL,
390 /* Write data */
391 0x801205150D440004ULL, 0x801205150D4400E4ULL,
392 /* Set address */
393 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
394 /* Write data */
395 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
396 /* Done */
397 END_SIGN
400 static const u64 xena_dtx_cfg[] = {
401 /* Set address */
402 0x8000051500000000ULL, 0x80000515000000E0ULL,
403 /* Write data */
404 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
405 /* Set address */
406 0x8001051500000000ULL, 0x80010515000000E0ULL,
407 /* Write data */
408 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
409 /* Set address */
410 0x8002051500000000ULL, 0x80020515000000E0ULL,
411 /* Write data */
412 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
413 END_SIGN
417 * Constants for Fixing the MacAddress problem seen mostly on
418 * Alpha machines.
420 static const u64 fix_mac[] = {
421 0x0060000000000000ULL, 0x0060600000000000ULL,
422 0x0040600000000000ULL, 0x0000600000000000ULL,
423 0x0020600000000000ULL, 0x0060600000000000ULL,
424 0x0020600000000000ULL, 0x0060600000000000ULL,
425 0x0020600000000000ULL, 0x0060600000000000ULL,
426 0x0020600000000000ULL, 0x0060600000000000ULL,
427 0x0020600000000000ULL, 0x0060600000000000ULL,
428 0x0020600000000000ULL, 0x0060600000000000ULL,
429 0x0020600000000000ULL, 0x0060600000000000ULL,
430 0x0020600000000000ULL, 0x0060600000000000ULL,
431 0x0020600000000000ULL, 0x0060600000000000ULL,
432 0x0020600000000000ULL, 0x0060600000000000ULL,
433 0x0020600000000000ULL, 0x0000600000000000ULL,
434 0x0040600000000000ULL, 0x0060600000000000ULL,
435 END_SIGN
438 MODULE_LICENSE("GPL");
439 MODULE_VERSION(DRV_VERSION);
442 /* Module Loadable parameters. */
443 S2IO_PARM_INT(tx_fifo_num, 1);
444 S2IO_PARM_INT(rx_ring_num, 1);
447 S2IO_PARM_INT(rx_ring_mode, 1);
448 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
449 S2IO_PARM_INT(rmac_pause_time, 0x100);
450 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
451 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
452 S2IO_PARM_INT(shared_splits, 0);
453 S2IO_PARM_INT(tmac_util_period, 5);
454 S2IO_PARM_INT(rmac_util_period, 5);
455 S2IO_PARM_INT(l3l4hdr_size, 128);
456 /* Frequency of Rx desc syncs expressed as power of 2 */
457 S2IO_PARM_INT(rxsync_frequency, 3);
458 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
459 S2IO_PARM_INT(intr_type, 2);
460 /* Large receive offload feature */
461 S2IO_PARM_INT(lro, 0);
462 /* Max pkts to be aggregated by LRO at one time. If not specified,
463 * aggregation happens until we hit max IP pkt size(64K)
465 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
466 S2IO_PARM_INT(indicate_max_pkts, 0);
468 S2IO_PARM_INT(napi, 1);
469 S2IO_PARM_INT(ufo, 0);
470 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
472 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
473 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
474 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
475 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
476 static unsigned int rts_frm_len[MAX_RX_RINGS] =
477 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
479 module_param_array(tx_fifo_len, uint, NULL, 0);
480 module_param_array(rx_ring_sz, uint, NULL, 0);
481 module_param_array(rts_frm_len, uint, NULL, 0);
484 * S2IO device table.
485 * This table lists all the devices that this driver supports.
487 static struct pci_device_id s2io_tbl[] __devinitdata = {
488 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
489 PCI_ANY_ID, PCI_ANY_ID},
490 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
491 PCI_ANY_ID, PCI_ANY_ID},
492 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
493 PCI_ANY_ID, PCI_ANY_ID},
494 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
495 PCI_ANY_ID, PCI_ANY_ID},
496 {0,}
499 MODULE_DEVICE_TABLE(pci, s2io_tbl);
501 static struct pci_error_handlers s2io_err_handler = {
502 .error_detected = s2io_io_error_detected,
503 .slot_reset = s2io_io_slot_reset,
504 .resume = s2io_io_resume,
507 static struct pci_driver s2io_driver = {
508 .name = "S2IO",
509 .id_table = s2io_tbl,
510 .probe = s2io_init_nic,
511 .remove = __devexit_p(s2io_rem_nic),
512 .err_handler = &s2io_err_handler,
515 /* A simplifier macro used both by init and free shared_mem Fns(). */
516 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
519 * init_shared_mem - Allocation and Initialization of Memory
520 * @nic: Device private variable.
521 * Description: The function allocates all the memory areas shared
522 * between the NIC and the driver. This includes Tx descriptors,
523 * Rx descriptors and the statistics block.
526 static int init_shared_mem(struct s2io_nic *nic)
528 u32 size;
529 void *tmp_v_addr, *tmp_v_addr_next;
530 dma_addr_t tmp_p_addr, tmp_p_addr_next;
531 struct RxD_block *pre_rxd_blk = NULL;
532 int i, j, blk_cnt;
533 int lst_size, lst_per_page;
534 struct net_device *dev = nic->dev;
535 unsigned long tmp;
536 struct buffAdd *ba;
538 struct mac_info *mac_control;
539 struct config_param *config;
540 unsigned long long mem_allocated = 0;
542 mac_control = &nic->mac_control;
543 config = &nic->config;
546 /* Allocation and initialization of TXDLs in FIOFs */
547 size = 0;
548 for (i = 0; i < config->tx_fifo_num; i++) {
549 size += config->tx_cfg[i].fifo_len;
551 if (size > MAX_AVAILABLE_TXDS) {
552 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
553 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
554 return -EINVAL;
557 lst_size = (sizeof(struct TxD) * config->max_txds);
558 lst_per_page = PAGE_SIZE / lst_size;
560 for (i = 0; i < config->tx_fifo_num; i++) {
561 int fifo_len = config->tx_cfg[i].fifo_len;
562 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
563 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
564 GFP_KERNEL);
565 if (!mac_control->fifos[i].list_info) {
566 DBG_PRINT(INFO_DBG,
567 "Malloc failed for list_info\n");
568 return -ENOMEM;
570 mem_allocated += list_holder_size;
572 for (i = 0; i < config->tx_fifo_num; i++) {
573 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
574 lst_per_page);
575 mac_control->fifos[i].tx_curr_put_info.offset = 0;
576 mac_control->fifos[i].tx_curr_put_info.fifo_len =
577 config->tx_cfg[i].fifo_len - 1;
578 mac_control->fifos[i].tx_curr_get_info.offset = 0;
579 mac_control->fifos[i].tx_curr_get_info.fifo_len =
580 config->tx_cfg[i].fifo_len - 1;
581 mac_control->fifos[i].fifo_no = i;
582 mac_control->fifos[i].nic = nic;
583 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
585 for (j = 0; j < page_num; j++) {
586 int k = 0;
587 dma_addr_t tmp_p;
588 void *tmp_v;
589 tmp_v = pci_alloc_consistent(nic->pdev,
590 PAGE_SIZE, &tmp_p);
591 if (!tmp_v) {
592 DBG_PRINT(INFO_DBG,
593 "pci_alloc_consistent ");
594 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
595 return -ENOMEM;
597 /* If we got a zero DMA address(can happen on
598 * certain platforms like PPC), reallocate.
599 * Store virtual address of page we don't want,
600 * to be freed later.
602 if (!tmp_p) {
603 mac_control->zerodma_virt_addr = tmp_v;
604 DBG_PRINT(INIT_DBG,
605 "%s: Zero DMA address for TxDL. ", dev->name);
606 DBG_PRINT(INIT_DBG,
607 "Virtual address %p\n", tmp_v);
608 tmp_v = pci_alloc_consistent(nic->pdev,
609 PAGE_SIZE, &tmp_p);
610 if (!tmp_v) {
611 DBG_PRINT(INFO_DBG,
612 "pci_alloc_consistent ");
613 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
614 return -ENOMEM;
616 mem_allocated += PAGE_SIZE;
618 while (k < lst_per_page) {
619 int l = (j * lst_per_page) + k;
620 if (l == config->tx_cfg[i].fifo_len)
621 break;
622 mac_control->fifos[i].list_info[l].list_virt_addr =
623 tmp_v + (k * lst_size);
624 mac_control->fifos[i].list_info[l].list_phy_addr =
625 tmp_p + (k * lst_size);
626 k++;
631 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
632 if (!nic->ufo_in_band_v)
633 return -ENOMEM;
634 mem_allocated += (size * sizeof(u64));
636 /* Allocation and initialization of RXDs in Rings */
637 size = 0;
638 for (i = 0; i < config->rx_ring_num; i++) {
639 if (config->rx_cfg[i].num_rxd %
640 (rxd_count[nic->rxd_mode] + 1)) {
641 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
642 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
644 DBG_PRINT(ERR_DBG, "RxDs per Block");
645 return FAILURE;
647 size += config->rx_cfg[i].num_rxd;
648 mac_control->rings[i].block_count =
649 config->rx_cfg[i].num_rxd /
650 (rxd_count[nic->rxd_mode] + 1 );
651 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
652 mac_control->rings[i].block_count;
654 if (nic->rxd_mode == RXD_MODE_1)
655 size = (size * (sizeof(struct RxD1)));
656 else
657 size = (size * (sizeof(struct RxD3)));
659 for (i = 0; i < config->rx_ring_num; i++) {
660 mac_control->rings[i].rx_curr_get_info.block_index = 0;
661 mac_control->rings[i].rx_curr_get_info.offset = 0;
662 mac_control->rings[i].rx_curr_get_info.ring_len =
663 config->rx_cfg[i].num_rxd - 1;
664 mac_control->rings[i].rx_curr_put_info.block_index = 0;
665 mac_control->rings[i].rx_curr_put_info.offset = 0;
666 mac_control->rings[i].rx_curr_put_info.ring_len =
667 config->rx_cfg[i].num_rxd - 1;
668 mac_control->rings[i].nic = nic;
669 mac_control->rings[i].ring_no = i;
671 blk_cnt = config->rx_cfg[i].num_rxd /
672 (rxd_count[nic->rxd_mode] + 1);
673 /* Allocating all the Rx blocks */
674 for (j = 0; j < blk_cnt; j++) {
675 struct rx_block_info *rx_blocks;
676 int l;
678 rx_blocks = &mac_control->rings[i].rx_blocks[j];
679 size = SIZE_OF_BLOCK; //size is always page size
680 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
681 &tmp_p_addr);
682 if (tmp_v_addr == NULL) {
684 * In case of failure, free_shared_mem()
685 * is called, which should free any
686 * memory that was alloced till the
687 * failure happened.
689 rx_blocks->block_virt_addr = tmp_v_addr;
690 return -ENOMEM;
692 mem_allocated += size;
693 memset(tmp_v_addr, 0, size);
694 rx_blocks->block_virt_addr = tmp_v_addr;
695 rx_blocks->block_dma_addr = tmp_p_addr;
696 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
697 rxd_count[nic->rxd_mode],
698 GFP_KERNEL);
699 if (!rx_blocks->rxds)
700 return -ENOMEM;
701 mem_allocated +=
702 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
703 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
704 rx_blocks->rxds[l].virt_addr =
705 rx_blocks->block_virt_addr +
706 (rxd_size[nic->rxd_mode] * l);
707 rx_blocks->rxds[l].dma_addr =
708 rx_blocks->block_dma_addr +
709 (rxd_size[nic->rxd_mode] * l);
712 /* Interlinking all Rx Blocks */
713 for (j = 0; j < blk_cnt; j++) {
714 tmp_v_addr =
715 mac_control->rings[i].rx_blocks[j].block_virt_addr;
716 tmp_v_addr_next =
717 mac_control->rings[i].rx_blocks[(j + 1) %
718 blk_cnt].block_virt_addr;
719 tmp_p_addr =
720 mac_control->rings[i].rx_blocks[j].block_dma_addr;
721 tmp_p_addr_next =
722 mac_control->rings[i].rx_blocks[(j + 1) %
723 blk_cnt].block_dma_addr;
725 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
726 pre_rxd_blk->reserved_2_pNext_RxD_block =
727 (unsigned long) tmp_v_addr_next;
728 pre_rxd_blk->pNext_RxD_Blk_physical =
729 (u64) tmp_p_addr_next;
732 if (nic->rxd_mode == RXD_MODE_3B) {
734 * Allocation of Storages for buffer addresses in 2BUFF mode
735 * and the buffers as well.
737 for (i = 0; i < config->rx_ring_num; i++) {
738 blk_cnt = config->rx_cfg[i].num_rxd /
739 (rxd_count[nic->rxd_mode]+ 1);
740 mac_control->rings[i].ba =
741 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
742 GFP_KERNEL);
743 if (!mac_control->rings[i].ba)
744 return -ENOMEM;
745 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
746 for (j = 0; j < blk_cnt; j++) {
747 int k = 0;
748 mac_control->rings[i].ba[j] =
749 kmalloc((sizeof(struct buffAdd) *
750 (rxd_count[nic->rxd_mode] + 1)),
751 GFP_KERNEL);
752 if (!mac_control->rings[i].ba[j])
753 return -ENOMEM;
754 mem_allocated += (sizeof(struct buffAdd) * \
755 (rxd_count[nic->rxd_mode] + 1));
756 while (k != rxd_count[nic->rxd_mode]) {
757 ba = &mac_control->rings[i].ba[j][k];
759 ba->ba_0_org = (void *) kmalloc
760 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
761 if (!ba->ba_0_org)
762 return -ENOMEM;
763 mem_allocated +=
764 (BUF0_LEN + ALIGN_SIZE);
765 tmp = (unsigned long)ba->ba_0_org;
766 tmp += ALIGN_SIZE;
767 tmp &= ~((unsigned long) ALIGN_SIZE);
768 ba->ba_0 = (void *) tmp;
770 ba->ba_1_org = (void *) kmalloc
771 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
772 if (!ba->ba_1_org)
773 return -ENOMEM;
774 mem_allocated
775 += (BUF1_LEN + ALIGN_SIZE);
776 tmp = (unsigned long) ba->ba_1_org;
777 tmp += ALIGN_SIZE;
778 tmp &= ~((unsigned long) ALIGN_SIZE);
779 ba->ba_1 = (void *) tmp;
780 k++;
786 /* Allocation and initialization of Statistics block */
787 size = sizeof(struct stat_block);
788 mac_control->stats_mem = pci_alloc_consistent
789 (nic->pdev, size, &mac_control->stats_mem_phy);
791 if (!mac_control->stats_mem) {
793 * In case of failure, free_shared_mem() is called, which
794 * should free any memory that was alloced till the
795 * failure happened.
797 return -ENOMEM;
799 mem_allocated += size;
800 mac_control->stats_mem_sz = size;
802 tmp_v_addr = mac_control->stats_mem;
803 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
804 memset(tmp_v_addr, 0, size);
805 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
806 (unsigned long long) tmp_p_addr);
807 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
808 return SUCCESS;
812 * free_shared_mem - Free the allocated Memory
813 * @nic: Device private variable.
814 * Description: This function is to free all memory locations allocated by
815 * the init_shared_mem() function and return it to the kernel.
818 static void free_shared_mem(struct s2io_nic *nic)
820 int i, j, blk_cnt, size;
821 u32 ufo_size = 0;
822 void *tmp_v_addr;
823 dma_addr_t tmp_p_addr;
824 struct mac_info *mac_control;
825 struct config_param *config;
826 int lst_size, lst_per_page;
827 struct net_device *dev;
828 int page_num = 0;
830 if (!nic)
831 return;
833 dev = nic->dev;
835 mac_control = &nic->mac_control;
836 config = &nic->config;
838 lst_size = (sizeof(struct TxD) * config->max_txds);
839 lst_per_page = PAGE_SIZE / lst_size;
841 for (i = 0; i < config->tx_fifo_num; i++) {
842 ufo_size += config->tx_cfg[i].fifo_len;
843 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
844 lst_per_page);
845 for (j = 0; j < page_num; j++) {
846 int mem_blks = (j * lst_per_page);
847 if (!mac_control->fifos[i].list_info)
848 return;
849 if (!mac_control->fifos[i].list_info[mem_blks].
850 list_virt_addr)
851 break;
852 pci_free_consistent(nic->pdev, PAGE_SIZE,
853 mac_control->fifos[i].
854 list_info[mem_blks].
855 list_virt_addr,
856 mac_control->fifos[i].
857 list_info[mem_blks].
858 list_phy_addr);
859 nic->mac_control.stats_info->sw_stat.mem_freed
860 += PAGE_SIZE;
862 /* If we got a zero DMA address during allocation,
863 * free the page now
865 if (mac_control->zerodma_virt_addr) {
866 pci_free_consistent(nic->pdev, PAGE_SIZE,
867 mac_control->zerodma_virt_addr,
868 (dma_addr_t)0);
869 DBG_PRINT(INIT_DBG,
870 "%s: Freeing TxDL with zero DMA addr. ",
871 dev->name);
872 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
873 mac_control->zerodma_virt_addr);
874 nic->mac_control.stats_info->sw_stat.mem_freed
875 += PAGE_SIZE;
877 kfree(mac_control->fifos[i].list_info);
878 nic->mac_control.stats_info->sw_stat.mem_freed +=
879 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
882 size = SIZE_OF_BLOCK;
883 for (i = 0; i < config->rx_ring_num; i++) {
884 blk_cnt = mac_control->rings[i].block_count;
885 for (j = 0; j < blk_cnt; j++) {
886 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
887 block_virt_addr;
888 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
889 block_dma_addr;
890 if (tmp_v_addr == NULL)
891 break;
892 pci_free_consistent(nic->pdev, size,
893 tmp_v_addr, tmp_p_addr);
894 nic->mac_control.stats_info->sw_stat.mem_freed += size;
895 kfree(mac_control->rings[i].rx_blocks[j].rxds);
896 nic->mac_control.stats_info->sw_stat.mem_freed +=
897 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
901 if (nic->rxd_mode == RXD_MODE_3B) {
902 /* Freeing buffer storage addresses in 2BUFF mode. */
903 for (i = 0; i < config->rx_ring_num; i++) {
904 blk_cnt = config->rx_cfg[i].num_rxd /
905 (rxd_count[nic->rxd_mode] + 1);
906 for (j = 0; j < blk_cnt; j++) {
907 int k = 0;
908 if (!mac_control->rings[i].ba[j])
909 continue;
910 while (k != rxd_count[nic->rxd_mode]) {
911 struct buffAdd *ba =
912 &mac_control->rings[i].ba[j][k];
913 kfree(ba->ba_0_org);
914 nic->mac_control.stats_info->sw_stat.\
915 mem_freed += (BUF0_LEN + ALIGN_SIZE);
916 kfree(ba->ba_1_org);
917 nic->mac_control.stats_info->sw_stat.\
918 mem_freed += (BUF1_LEN + ALIGN_SIZE);
919 k++;
921 kfree(mac_control->rings[i].ba[j]);
922 nic->mac_control.stats_info->sw_stat.mem_freed +=
923 (sizeof(struct buffAdd) *
924 (rxd_count[nic->rxd_mode] + 1));
926 kfree(mac_control->rings[i].ba);
927 nic->mac_control.stats_info->sw_stat.mem_freed +=
928 (sizeof(struct buffAdd *) * blk_cnt);
932 if (mac_control->stats_mem) {
933 pci_free_consistent(nic->pdev,
934 mac_control->stats_mem_sz,
935 mac_control->stats_mem,
936 mac_control->stats_mem_phy);
937 nic->mac_control.stats_info->sw_stat.mem_freed +=
938 mac_control->stats_mem_sz;
940 if (nic->ufo_in_band_v) {
941 kfree(nic->ufo_in_band_v);
942 nic->mac_control.stats_info->sw_stat.mem_freed
943 += (ufo_size * sizeof(u64));
948 * s2io_verify_pci_mode -
951 static int s2io_verify_pci_mode(struct s2io_nic *nic)
953 struct XENA_dev_config __iomem *bar0 = nic->bar0;
954 register u64 val64 = 0;
955 int mode;
957 val64 = readq(&bar0->pci_mode);
958 mode = (u8)GET_PCI_MODE(val64);
960 if ( val64 & PCI_MODE_UNKNOWN_MODE)
961 return -1; /* Unknown PCI mode */
962 return mode;
965 #define NEC_VENID 0x1033
966 #define NEC_DEVID 0x0125
967 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
969 struct pci_dev *tdev = NULL;
970 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
971 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
972 if (tdev->bus == s2io_pdev->bus->parent)
973 pci_dev_put(tdev);
974 return 1;
977 return 0;
980 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
982 * s2io_print_pci_mode -
984 static int s2io_print_pci_mode(struct s2io_nic *nic)
986 struct XENA_dev_config __iomem *bar0 = nic->bar0;
987 register u64 val64 = 0;
988 int mode;
989 struct config_param *config = &nic->config;
991 val64 = readq(&bar0->pci_mode);
992 mode = (u8)GET_PCI_MODE(val64);
994 if ( val64 & PCI_MODE_UNKNOWN_MODE)
995 return -1; /* Unknown PCI mode */
997 config->bus_speed = bus_speed[mode];
999 if (s2io_on_nec_bridge(nic->pdev)) {
1000 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1001 nic->dev->name);
1002 return mode;
1005 if (val64 & PCI_MODE_32_BITS) {
1006 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1007 } else {
1008 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1011 switch(mode) {
1012 case PCI_MODE_PCI_33:
1013 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1014 break;
1015 case PCI_MODE_PCI_66:
1016 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1017 break;
1018 case PCI_MODE_PCIX_M1_66:
1019 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1020 break;
1021 case PCI_MODE_PCIX_M1_100:
1022 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1023 break;
1024 case PCI_MODE_PCIX_M1_133:
1025 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1026 break;
1027 case PCI_MODE_PCIX_M2_66:
1028 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1029 break;
1030 case PCI_MODE_PCIX_M2_100:
1031 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1032 break;
1033 case PCI_MODE_PCIX_M2_133:
1034 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1035 break;
1036 default:
1037 return -1; /* Unsupported bus speed */
1040 return mode;
1044 * init_nic - Initialization of hardware
1045 * @nic: device peivate variable
1046 * Description: The function sequentially configures every block
1047 * of the H/W from their reset values.
1048 * Return Value: SUCCESS on success and
1049 * '-1' on failure (endian settings incorrect).
1052 static int init_nic(struct s2io_nic *nic)
1054 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1055 struct net_device *dev = nic->dev;
1056 register u64 val64 = 0;
1057 void __iomem *add;
1058 u32 time;
1059 int i, j;
1060 struct mac_info *mac_control;
1061 struct config_param *config;
1062 int dtx_cnt = 0;
1063 unsigned long long mem_share;
1064 int mem_size;
1066 mac_control = &nic->mac_control;
1067 config = &nic->config;
1069 /* to set the swapper controle on the card */
1070 if(s2io_set_swapper(nic)) {
1071 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1072 return -1;
1076 * Herc requires EOI to be removed from reset before XGXS, so..
1078 if (nic->device_type & XFRAME_II_DEVICE) {
1079 val64 = 0xA500000000ULL;
1080 writeq(val64, &bar0->sw_reset);
1081 msleep(500);
1082 val64 = readq(&bar0->sw_reset);
1085 /* Remove XGXS from reset state */
1086 val64 = 0;
1087 writeq(val64, &bar0->sw_reset);
1088 msleep(500);
1089 val64 = readq(&bar0->sw_reset);
1091 /* Enable Receiving broadcasts */
1092 add = &bar0->mac_cfg;
1093 val64 = readq(&bar0->mac_cfg);
1094 val64 |= MAC_RMAC_BCAST_ENABLE;
1095 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1096 writel((u32) val64, add);
1097 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1098 writel((u32) (val64 >> 32), (add + 4));
1100 /* Read registers in all blocks */
1101 val64 = readq(&bar0->mac_int_mask);
1102 val64 = readq(&bar0->mc_int_mask);
1103 val64 = readq(&bar0->xgxs_int_mask);
1105 /* Set MTU */
1106 val64 = dev->mtu;
1107 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1109 if (nic->device_type & XFRAME_II_DEVICE) {
1110 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1111 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1112 &bar0->dtx_control, UF);
1113 if (dtx_cnt & 0x1)
1114 msleep(1); /* Necessary!! */
1115 dtx_cnt++;
1117 } else {
1118 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1119 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1120 &bar0->dtx_control, UF);
1121 val64 = readq(&bar0->dtx_control);
1122 dtx_cnt++;
1126 /* Tx DMA Initialization */
1127 val64 = 0;
1128 writeq(val64, &bar0->tx_fifo_partition_0);
1129 writeq(val64, &bar0->tx_fifo_partition_1);
1130 writeq(val64, &bar0->tx_fifo_partition_2);
1131 writeq(val64, &bar0->tx_fifo_partition_3);
1134 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1135 val64 |=
1136 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1137 13) | vBIT(config->tx_cfg[i].fifo_priority,
1138 ((i * 32) + 5), 3);
1140 if (i == (config->tx_fifo_num - 1)) {
1141 if (i % 2 == 0)
1142 i++;
1145 switch (i) {
1146 case 1:
1147 writeq(val64, &bar0->tx_fifo_partition_0);
1148 val64 = 0;
1149 break;
1150 case 3:
1151 writeq(val64, &bar0->tx_fifo_partition_1);
1152 val64 = 0;
1153 break;
1154 case 5:
1155 writeq(val64, &bar0->tx_fifo_partition_2);
1156 val64 = 0;
1157 break;
1158 case 7:
1159 writeq(val64, &bar0->tx_fifo_partition_3);
1160 break;
1165 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1166 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1168 if ((nic->device_type == XFRAME_I_DEVICE) &&
1169 (nic->pdev->revision < 4))
1170 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1172 val64 = readq(&bar0->tx_fifo_partition_0);
1173 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1174 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1177 * Initialization of Tx_PA_CONFIG register to ignore packet
1178 * integrity checking.
1180 val64 = readq(&bar0->tx_pa_cfg);
1181 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1182 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1183 writeq(val64, &bar0->tx_pa_cfg);
1185 /* Rx DMA intialization. */
1186 val64 = 0;
1187 for (i = 0; i < config->rx_ring_num; i++) {
1188 val64 |=
1189 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1192 writeq(val64, &bar0->rx_queue_priority);
1195 * Allocating equal share of memory to all the
1196 * configured Rings.
1198 val64 = 0;
1199 if (nic->device_type & XFRAME_II_DEVICE)
1200 mem_size = 32;
1201 else
1202 mem_size = 64;
1204 for (i = 0; i < config->rx_ring_num; i++) {
1205 switch (i) {
1206 case 0:
1207 mem_share = (mem_size / config->rx_ring_num +
1208 mem_size % config->rx_ring_num);
1209 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1210 continue;
1211 case 1:
1212 mem_share = (mem_size / config->rx_ring_num);
1213 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1214 continue;
1215 case 2:
1216 mem_share = (mem_size / config->rx_ring_num);
1217 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1218 continue;
1219 case 3:
1220 mem_share = (mem_size / config->rx_ring_num);
1221 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1222 continue;
1223 case 4:
1224 mem_share = (mem_size / config->rx_ring_num);
1225 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1226 continue;
1227 case 5:
1228 mem_share = (mem_size / config->rx_ring_num);
1229 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1230 continue;
1231 case 6:
1232 mem_share = (mem_size / config->rx_ring_num);
1233 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1234 continue;
1235 case 7:
1236 mem_share = (mem_size / config->rx_ring_num);
1237 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1238 continue;
1241 writeq(val64, &bar0->rx_queue_cfg);
1244 * Filling Tx round robin registers
1245 * as per the number of FIFOs
1247 switch (config->tx_fifo_num) {
1248 case 1:
1249 val64 = 0x0000000000000000ULL;
1250 writeq(val64, &bar0->tx_w_round_robin_0);
1251 writeq(val64, &bar0->tx_w_round_robin_1);
1252 writeq(val64, &bar0->tx_w_round_robin_2);
1253 writeq(val64, &bar0->tx_w_round_robin_3);
1254 writeq(val64, &bar0->tx_w_round_robin_4);
1255 break;
1256 case 2:
1257 val64 = 0x0000010000010000ULL;
1258 writeq(val64, &bar0->tx_w_round_robin_0);
1259 val64 = 0x0100000100000100ULL;
1260 writeq(val64, &bar0->tx_w_round_robin_1);
1261 val64 = 0x0001000001000001ULL;
1262 writeq(val64, &bar0->tx_w_round_robin_2);
1263 val64 = 0x0000010000010000ULL;
1264 writeq(val64, &bar0->tx_w_round_robin_3);
1265 val64 = 0x0100000000000000ULL;
1266 writeq(val64, &bar0->tx_w_round_robin_4);
1267 break;
1268 case 3:
1269 val64 = 0x0001000102000001ULL;
1270 writeq(val64, &bar0->tx_w_round_robin_0);
1271 val64 = 0x0001020000010001ULL;
1272 writeq(val64, &bar0->tx_w_round_robin_1);
1273 val64 = 0x0200000100010200ULL;
1274 writeq(val64, &bar0->tx_w_round_robin_2);
1275 val64 = 0x0001000102000001ULL;
1276 writeq(val64, &bar0->tx_w_round_robin_3);
1277 val64 = 0x0001020000000000ULL;
1278 writeq(val64, &bar0->tx_w_round_robin_4);
1279 break;
1280 case 4:
1281 val64 = 0x0001020300010200ULL;
1282 writeq(val64, &bar0->tx_w_round_robin_0);
1283 val64 = 0x0100000102030001ULL;
1284 writeq(val64, &bar0->tx_w_round_robin_1);
1285 val64 = 0x0200010000010203ULL;
1286 writeq(val64, &bar0->tx_w_round_robin_2);
1287 val64 = 0x0001020001000001ULL;
1288 writeq(val64, &bar0->tx_w_round_robin_3);
1289 val64 = 0x0203000100000000ULL;
1290 writeq(val64, &bar0->tx_w_round_robin_4);
1291 break;
1292 case 5:
1293 val64 = 0x0001000203000102ULL;
1294 writeq(val64, &bar0->tx_w_round_robin_0);
1295 val64 = 0x0001020001030004ULL;
1296 writeq(val64, &bar0->tx_w_round_robin_1);
1297 val64 = 0x0001000203000102ULL;
1298 writeq(val64, &bar0->tx_w_round_robin_2);
1299 val64 = 0x0001020001030004ULL;
1300 writeq(val64, &bar0->tx_w_round_robin_3);
1301 val64 = 0x0001000000000000ULL;
1302 writeq(val64, &bar0->tx_w_round_robin_4);
1303 break;
1304 case 6:
1305 val64 = 0x0001020304000102ULL;
1306 writeq(val64, &bar0->tx_w_round_robin_0);
1307 val64 = 0x0304050001020001ULL;
1308 writeq(val64, &bar0->tx_w_round_robin_1);
1309 val64 = 0x0203000100000102ULL;
1310 writeq(val64, &bar0->tx_w_round_robin_2);
1311 val64 = 0x0304000102030405ULL;
1312 writeq(val64, &bar0->tx_w_round_robin_3);
1313 val64 = 0x0001000200000000ULL;
1314 writeq(val64, &bar0->tx_w_round_robin_4);
1315 break;
1316 case 7:
1317 val64 = 0x0001020001020300ULL;
1318 writeq(val64, &bar0->tx_w_round_robin_0);
1319 val64 = 0x0102030400010203ULL;
1320 writeq(val64, &bar0->tx_w_round_robin_1);
1321 val64 = 0x0405060001020001ULL;
1322 writeq(val64, &bar0->tx_w_round_robin_2);
1323 val64 = 0x0304050000010200ULL;
1324 writeq(val64, &bar0->tx_w_round_robin_3);
1325 val64 = 0x0102030000000000ULL;
1326 writeq(val64, &bar0->tx_w_round_robin_4);
1327 break;
1328 case 8:
1329 val64 = 0x0001020300040105ULL;
1330 writeq(val64, &bar0->tx_w_round_robin_0);
1331 val64 = 0x0200030106000204ULL;
1332 writeq(val64, &bar0->tx_w_round_robin_1);
1333 val64 = 0x0103000502010007ULL;
1334 writeq(val64, &bar0->tx_w_round_robin_2);
1335 val64 = 0x0304010002060500ULL;
1336 writeq(val64, &bar0->tx_w_round_robin_3);
1337 val64 = 0x0103020400000000ULL;
1338 writeq(val64, &bar0->tx_w_round_robin_4);
1339 break;
1342 /* Enable all configured Tx FIFO partitions */
1343 val64 = readq(&bar0->tx_fifo_partition_0);
1344 val64 |= (TX_FIFO_PARTITION_EN);
1345 writeq(val64, &bar0->tx_fifo_partition_0);
1347 /* Filling the Rx round robin registers as per the
1348 * number of Rings and steering based on QoS.
1350 switch (config->rx_ring_num) {
1351 case 1:
1352 val64 = 0x8080808080808080ULL;
1353 writeq(val64, &bar0->rts_qos_steering);
1354 break;
1355 case 2:
1356 val64 = 0x0000010000010000ULL;
1357 writeq(val64, &bar0->rx_w_round_robin_0);
1358 val64 = 0x0100000100000100ULL;
1359 writeq(val64, &bar0->rx_w_round_robin_1);
1360 val64 = 0x0001000001000001ULL;
1361 writeq(val64, &bar0->rx_w_round_robin_2);
1362 val64 = 0x0000010000010000ULL;
1363 writeq(val64, &bar0->rx_w_round_robin_3);
1364 val64 = 0x0100000000000000ULL;
1365 writeq(val64, &bar0->rx_w_round_robin_4);
1367 val64 = 0x8080808040404040ULL;
1368 writeq(val64, &bar0->rts_qos_steering);
1369 break;
1370 case 3:
1371 val64 = 0x0001000102000001ULL;
1372 writeq(val64, &bar0->rx_w_round_robin_0);
1373 val64 = 0x0001020000010001ULL;
1374 writeq(val64, &bar0->rx_w_round_robin_1);
1375 val64 = 0x0200000100010200ULL;
1376 writeq(val64, &bar0->rx_w_round_robin_2);
1377 val64 = 0x0001000102000001ULL;
1378 writeq(val64, &bar0->rx_w_round_robin_3);
1379 val64 = 0x0001020000000000ULL;
1380 writeq(val64, &bar0->rx_w_round_robin_4);
1382 val64 = 0x8080804040402020ULL;
1383 writeq(val64, &bar0->rts_qos_steering);
1384 break;
1385 case 4:
1386 val64 = 0x0001020300010200ULL;
1387 writeq(val64, &bar0->rx_w_round_robin_0);
1388 val64 = 0x0100000102030001ULL;
1389 writeq(val64, &bar0->rx_w_round_robin_1);
1390 val64 = 0x0200010000010203ULL;
1391 writeq(val64, &bar0->rx_w_round_robin_2);
1392 val64 = 0x0001020001000001ULL;
1393 writeq(val64, &bar0->rx_w_round_robin_3);
1394 val64 = 0x0203000100000000ULL;
1395 writeq(val64, &bar0->rx_w_round_robin_4);
1397 val64 = 0x8080404020201010ULL;
1398 writeq(val64, &bar0->rts_qos_steering);
1399 break;
1400 case 5:
1401 val64 = 0x0001000203000102ULL;
1402 writeq(val64, &bar0->rx_w_round_robin_0);
1403 val64 = 0x0001020001030004ULL;
1404 writeq(val64, &bar0->rx_w_round_robin_1);
1405 val64 = 0x0001000203000102ULL;
1406 writeq(val64, &bar0->rx_w_round_robin_2);
1407 val64 = 0x0001020001030004ULL;
1408 writeq(val64, &bar0->rx_w_round_robin_3);
1409 val64 = 0x0001000000000000ULL;
1410 writeq(val64, &bar0->rx_w_round_robin_4);
1412 val64 = 0x8080404020201008ULL;
1413 writeq(val64, &bar0->rts_qos_steering);
1414 break;
1415 case 6:
1416 val64 = 0x0001020304000102ULL;
1417 writeq(val64, &bar0->rx_w_round_robin_0);
1418 val64 = 0x0304050001020001ULL;
1419 writeq(val64, &bar0->rx_w_round_robin_1);
1420 val64 = 0x0203000100000102ULL;
1421 writeq(val64, &bar0->rx_w_round_robin_2);
1422 val64 = 0x0304000102030405ULL;
1423 writeq(val64, &bar0->rx_w_round_robin_3);
1424 val64 = 0x0001000200000000ULL;
1425 writeq(val64, &bar0->rx_w_round_robin_4);
1427 val64 = 0x8080404020100804ULL;
1428 writeq(val64, &bar0->rts_qos_steering);
1429 break;
1430 case 7:
1431 val64 = 0x0001020001020300ULL;
1432 writeq(val64, &bar0->rx_w_round_robin_0);
1433 val64 = 0x0102030400010203ULL;
1434 writeq(val64, &bar0->rx_w_round_robin_1);
1435 val64 = 0x0405060001020001ULL;
1436 writeq(val64, &bar0->rx_w_round_robin_2);
1437 val64 = 0x0304050000010200ULL;
1438 writeq(val64, &bar0->rx_w_round_robin_3);
1439 val64 = 0x0102030000000000ULL;
1440 writeq(val64, &bar0->rx_w_round_robin_4);
1442 val64 = 0x8080402010080402ULL;
1443 writeq(val64, &bar0->rts_qos_steering);
1444 break;
1445 case 8:
1446 val64 = 0x0001020300040105ULL;
1447 writeq(val64, &bar0->rx_w_round_robin_0);
1448 val64 = 0x0200030106000204ULL;
1449 writeq(val64, &bar0->rx_w_round_robin_1);
1450 val64 = 0x0103000502010007ULL;
1451 writeq(val64, &bar0->rx_w_round_robin_2);
1452 val64 = 0x0304010002060500ULL;
1453 writeq(val64, &bar0->rx_w_round_robin_3);
1454 val64 = 0x0103020400000000ULL;
1455 writeq(val64, &bar0->rx_w_round_robin_4);
1457 val64 = 0x8040201008040201ULL;
1458 writeq(val64, &bar0->rts_qos_steering);
1459 break;
1462 /* UDP Fix */
1463 val64 = 0;
1464 for (i = 0; i < 8; i++)
1465 writeq(val64, &bar0->rts_frm_len_n[i]);
1467 /* Set the default rts frame length for the rings configured */
1468 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1469 for (i = 0 ; i < config->rx_ring_num ; i++)
1470 writeq(val64, &bar0->rts_frm_len_n[i]);
1472 /* Set the frame length for the configured rings
1473 * desired by the user
1475 for (i = 0; i < config->rx_ring_num; i++) {
1476 /* If rts_frm_len[i] == 0 then it is assumed that user not
1477 * specified frame length steering.
1478 * If the user provides the frame length then program
1479 * the rts_frm_len register for those values or else
1480 * leave it as it is.
1482 if (rts_frm_len[i] != 0) {
1483 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1484 &bar0->rts_frm_len_n[i]);
1488 /* Disable differentiated services steering logic */
1489 for (i = 0; i < 64; i++) {
1490 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1491 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1492 dev->name);
1493 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1494 return FAILURE;
1498 /* Program statistics memory */
1499 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1501 if (nic->device_type == XFRAME_II_DEVICE) {
1502 val64 = STAT_BC(0x320);
1503 writeq(val64, &bar0->stat_byte_cnt);
1507 * Initializing the sampling rate for the device to calculate the
1508 * bandwidth utilization.
1510 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1511 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1512 writeq(val64, &bar0->mac_link_util);
1516 * Initializing the Transmit and Receive Traffic Interrupt
1517 * Scheme.
1520 * TTI Initialization. Default Tx timer gets us about
1521 * 250 interrupts per sec. Continuous interrupts are enabled
1522 * by default.
1524 if (nic->device_type == XFRAME_II_DEVICE) {
1525 int count = (nic->config.bus_speed * 125)/2;
1526 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1527 } else {
1529 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1531 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1532 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1533 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1534 if (use_continuous_tx_intrs)
1535 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1536 writeq(val64, &bar0->tti_data1_mem);
1538 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1539 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1540 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1541 writeq(val64, &bar0->tti_data2_mem);
1543 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1544 writeq(val64, &bar0->tti_command_mem);
1547 * Once the operation completes, the Strobe bit of the command
1548 * register will be reset. We poll for this particular condition
1549 * We wait for a maximum of 500ms for the operation to complete,
1550 * if it's not complete by then we return error.
1552 time = 0;
1553 while (TRUE) {
1554 val64 = readq(&bar0->tti_command_mem);
1555 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1556 break;
1558 if (time > 10) {
1559 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1560 dev->name);
1561 return -1;
1563 msleep(50);
1564 time++;
1567 /* RTI Initialization */
1568 if (nic->device_type == XFRAME_II_DEVICE) {
1570 * Programmed to generate Apprx 500 Intrs per
1571 * second
1573 int count = (nic->config.bus_speed * 125)/4;
1574 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1575 } else
1576 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1577 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1578 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1579 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1581 writeq(val64, &bar0->rti_data1_mem);
1583 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1584 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1585 if (nic->config.intr_type == MSI_X)
1586 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1587 RTI_DATA2_MEM_RX_UFC_D(0x40));
1588 else
1589 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1590 RTI_DATA2_MEM_RX_UFC_D(0x80));
1591 writeq(val64, &bar0->rti_data2_mem);
1593 for (i = 0; i < config->rx_ring_num; i++) {
1594 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1595 | RTI_CMD_MEM_OFFSET(i);
1596 writeq(val64, &bar0->rti_command_mem);
1599 * Once the operation completes, the Strobe bit of the
1600 * command register will be reset. We poll for this
1601 * particular condition. We wait for a maximum of 500ms
1602 * for the operation to complete, if it's not complete
1603 * by then we return error.
1605 time = 0;
1606 while (TRUE) {
1607 val64 = readq(&bar0->rti_command_mem);
1608 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1609 break;
1611 if (time > 10) {
1612 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1613 dev->name);
1614 return -1;
1616 time++;
1617 msleep(50);
1622 * Initializing proper values as Pause threshold into all
1623 * the 8 Queues on Rx side.
1625 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1626 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1628 /* Disable RMAC PAD STRIPPING */
1629 add = &bar0->mac_cfg;
1630 val64 = readq(&bar0->mac_cfg);
1631 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1632 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1633 writel((u32) (val64), add);
1634 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1635 writel((u32) (val64 >> 32), (add + 4));
1636 val64 = readq(&bar0->mac_cfg);
1638 /* Enable FCS stripping by adapter */
1639 add = &bar0->mac_cfg;
1640 val64 = readq(&bar0->mac_cfg);
1641 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1642 if (nic->device_type == XFRAME_II_DEVICE)
1643 writeq(val64, &bar0->mac_cfg);
1644 else {
1645 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1646 writel((u32) (val64), add);
1647 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1648 writel((u32) (val64 >> 32), (add + 4));
1652 * Set the time value to be inserted in the pause frame
1653 * generated by xena.
1655 val64 = readq(&bar0->rmac_pause_cfg);
1656 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1657 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1658 writeq(val64, &bar0->rmac_pause_cfg);
1661 * Set the Threshold Limit for Generating the pause frame
1662 * If the amount of data in any Queue exceeds ratio of
1663 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1664 * pause frame is generated
1666 val64 = 0;
1667 for (i = 0; i < 4; i++) {
1668 val64 |=
1669 (((u64) 0xFF00 | nic->mac_control.
1670 mc_pause_threshold_q0q3)
1671 << (i * 2 * 8));
1673 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1675 val64 = 0;
1676 for (i = 0; i < 4; i++) {
1677 val64 |=
1678 (((u64) 0xFF00 | nic->mac_control.
1679 mc_pause_threshold_q4q7)
1680 << (i * 2 * 8));
1682 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1685 * TxDMA will stop Read request if the number of read split has
1686 * exceeded the limit pointed by shared_splits
1688 val64 = readq(&bar0->pic_control);
1689 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1690 writeq(val64, &bar0->pic_control);
1692 if (nic->config.bus_speed == 266) {
1693 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1694 writeq(0x0, &bar0->read_retry_delay);
1695 writeq(0x0, &bar0->write_retry_delay);
1699 * Programming the Herc to split every write transaction
1700 * that does not start on an ADB to reduce disconnects.
1702 if (nic->device_type == XFRAME_II_DEVICE) {
1703 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1704 MISC_LINK_STABILITY_PRD(3);
1705 writeq(val64, &bar0->misc_control);
1706 val64 = readq(&bar0->pic_control2);
1707 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1708 writeq(val64, &bar0->pic_control2);
1710 if (strstr(nic->product_name, "CX4")) {
1711 val64 = TMAC_AVG_IPG(0x17);
1712 writeq(val64, &bar0->tmac_avg_ipg);
1715 return SUCCESS;
1717 #define LINK_UP_DOWN_INTERRUPT 1
1718 #define MAC_RMAC_ERR_TIMER 2
1720 static int s2io_link_fault_indication(struct s2io_nic *nic)
1722 if (nic->config.intr_type != INTA)
1723 return MAC_RMAC_ERR_TIMER;
1724 if (nic->device_type == XFRAME_II_DEVICE)
1725 return LINK_UP_DOWN_INTERRUPT;
1726 else
1727 return MAC_RMAC_ERR_TIMER;
1731 * do_s2io_write_bits - update alarm bits in alarm register
1732 * @value: alarm bits
1733 * @flag: interrupt status
1734 * @addr: address value
1735 * Description: update alarm bits in alarm register
1736 * Return Value:
1737 * NONE.
1739 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1741 u64 temp64;
1743 temp64 = readq(addr);
1745 if(flag == ENABLE_INTRS)
1746 temp64 &= ~((u64) value);
1747 else
1748 temp64 |= ((u64) value);
1749 writeq(temp64, addr);
1752 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1754 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1755 register u64 gen_int_mask = 0;
1757 if (mask & TX_DMA_INTR) {
1759 gen_int_mask |= TXDMA_INT_M;
1761 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1762 TXDMA_PCC_INT | TXDMA_TTI_INT |
1763 TXDMA_LSO_INT | TXDMA_TPA_INT |
1764 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1766 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1767 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1768 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1769 &bar0->pfc_err_mask);
1771 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1772 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1773 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1775 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1776 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1777 PCC_N_SERR | PCC_6_COF_OV_ERR |
1778 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1779 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1780 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1782 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1783 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1785 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1786 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1787 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1788 flag, &bar0->lso_err_mask);
1790 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1791 flag, &bar0->tpa_err_mask);
1793 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1797 if (mask & TX_MAC_INTR) {
1798 gen_int_mask |= TXMAC_INT_M;
1799 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1800 &bar0->mac_int_mask);
1801 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1802 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1803 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1804 flag, &bar0->mac_tmac_err_mask);
1807 if (mask & TX_XGXS_INTR) {
1808 gen_int_mask |= TXXGXS_INT_M;
1809 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1810 &bar0->xgxs_int_mask);
1811 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1812 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1813 flag, &bar0->xgxs_txgxs_err_mask);
1816 if (mask & RX_DMA_INTR) {
1817 gen_int_mask |= RXDMA_INT_M;
1818 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1819 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1820 flag, &bar0->rxdma_int_mask);
1821 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1822 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1823 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1824 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1825 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1826 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1827 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1828 &bar0->prc_pcix_err_mask);
1829 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1830 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1831 &bar0->rpa_err_mask);
1832 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1833 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1834 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1835 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1836 flag, &bar0->rda_err_mask);
1837 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1838 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1839 flag, &bar0->rti_err_mask);
1842 if (mask & RX_MAC_INTR) {
1843 gen_int_mask |= RXMAC_INT_M;
1844 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1845 &bar0->mac_int_mask);
1846 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1847 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1848 RMAC_DOUBLE_ECC_ERR |
1849 RMAC_LINK_STATE_CHANGE_INT,
1850 flag, &bar0->mac_rmac_err_mask);
1853 if (mask & RX_XGXS_INTR)
1855 gen_int_mask |= RXXGXS_INT_M;
1856 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1857 &bar0->xgxs_int_mask);
1858 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1859 &bar0->xgxs_rxgxs_err_mask);
1862 if (mask & MC_INTR) {
1863 gen_int_mask |= MC_INT_M;
1864 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1865 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1866 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1867 &bar0->mc_err_mask);
1869 nic->general_int_mask = gen_int_mask;
1871 /* Remove this line when alarm interrupts are enabled */
1872 nic->general_int_mask = 0;
1875 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1876 * @nic: device private variable,
1877 * @mask: A mask indicating which Intr block must be modified and,
1878 * @flag: A flag indicating whether to enable or disable the Intrs.
1879 * Description: This function will either disable or enable the interrupts
1880 * depending on the flag argument. The mask argument can be used to
1881 * enable/disable any Intr block.
1882 * Return Value: NONE.
1885 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1887 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1888 register u64 temp64 = 0, intr_mask = 0;
1890 intr_mask = nic->general_int_mask;
1892 /* Top level interrupt classification */
1893 /* PIC Interrupts */
1894 if (mask & TX_PIC_INTR) {
1895 /* Enable PIC Intrs in the general intr mask register */
1896 intr_mask |= TXPIC_INT_M;
1897 if (flag == ENABLE_INTRS) {
1899 * If Hercules adapter enable GPIO otherwise
1900 * disable all PCIX, Flash, MDIO, IIC and GPIO
1901 * interrupts for now.
1902 * TODO
1904 if (s2io_link_fault_indication(nic) ==
1905 LINK_UP_DOWN_INTERRUPT ) {
1906 do_s2io_write_bits(PIC_INT_GPIO, flag,
1907 &bar0->pic_int_mask);
1908 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1909 &bar0->gpio_int_mask);
1910 } else
1911 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1912 } else if (flag == DISABLE_INTRS) {
1914 * Disable PIC Intrs in the general
1915 * intr mask register
1917 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1921 /* Tx traffic interrupts */
1922 if (mask & TX_TRAFFIC_INTR) {
1923 intr_mask |= TXTRAFFIC_INT_M;
1924 if (flag == ENABLE_INTRS) {
1926 * Enable all the Tx side interrupts
1927 * writing 0 Enables all 64 TX interrupt levels
1929 writeq(0x0, &bar0->tx_traffic_mask);
1930 } else if (flag == DISABLE_INTRS) {
1932 * Disable Tx Traffic Intrs in the general intr mask
1933 * register.
1935 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1939 /* Rx traffic interrupts */
1940 if (mask & RX_TRAFFIC_INTR) {
1941 intr_mask |= RXTRAFFIC_INT_M;
1942 if (flag == ENABLE_INTRS) {
1943 /* writing 0 Enables all 8 RX interrupt levels */
1944 writeq(0x0, &bar0->rx_traffic_mask);
1945 } else if (flag == DISABLE_INTRS) {
1947 * Disable Rx Traffic Intrs in the general intr mask
1948 * register.
1950 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1954 temp64 = readq(&bar0->general_int_mask);
1955 if (flag == ENABLE_INTRS)
1956 temp64 &= ~((u64) intr_mask);
1957 else
1958 temp64 = DISABLE_ALL_INTRS;
1959 writeq(temp64, &bar0->general_int_mask);
1961 nic->general_int_mask = readq(&bar0->general_int_mask);
1965 * verify_pcc_quiescent- Checks for PCC quiescent state
1966 * Return: 1 If PCC is quiescence
1967 * 0 If PCC is not quiescence
1969 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1971 int ret = 0, herc;
1972 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1973 u64 val64 = readq(&bar0->adapter_status);
1975 herc = (sp->device_type == XFRAME_II_DEVICE);
1977 if (flag == FALSE) {
1978 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1979 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1980 ret = 1;
1981 } else {
1982 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1983 ret = 1;
1985 } else {
1986 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1987 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1988 ADAPTER_STATUS_RMAC_PCC_IDLE))
1989 ret = 1;
1990 } else {
1991 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1992 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1993 ret = 1;
1997 return ret;
2000 * verify_xena_quiescence - Checks whether the H/W is ready
2001 * Description: Returns whether the H/W is ready to go or not. Depending
2002 * on whether adapter enable bit was written or not the comparison
2003 * differs and the calling function passes the input argument flag to
2004 * indicate this.
2005 * Return: 1 If xena is quiescence
2006 * 0 If Xena is not quiescence
2009 static int verify_xena_quiescence(struct s2io_nic *sp)
2011 int mode;
2012 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2013 u64 val64 = readq(&bar0->adapter_status);
2014 mode = s2io_verify_pci_mode(sp);
2016 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2017 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2018 return 0;
2020 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2021 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2022 return 0;
2024 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2025 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2026 return 0;
2028 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2029 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2030 return 0;
2032 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2033 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2034 return 0;
2036 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2037 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2038 return 0;
2040 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2041 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2042 return 0;
2044 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2045 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2046 return 0;
2050 * In PCI 33 mode, the P_PLL is not used, and therefore,
2051 * the the P_PLL_LOCK bit in the adapter_status register will
2052 * not be asserted.
2054 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2055 sp->device_type == XFRAME_II_DEVICE && mode !=
2056 PCI_MODE_PCI_33) {
2057 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2058 return 0;
2060 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2061 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2062 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2063 return 0;
2065 return 1;
2069 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2070 * @sp: Pointer to device specifc structure
2071 * Description :
2072 * New procedure to clear mac address reading problems on Alpha platforms
2076 static void fix_mac_address(struct s2io_nic * sp)
2078 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2079 u64 val64;
2080 int i = 0;
2082 while (fix_mac[i] != END_SIGN) {
2083 writeq(fix_mac[i++], &bar0->gpio_control);
2084 udelay(10);
2085 val64 = readq(&bar0->gpio_control);
2090 * start_nic - Turns the device on
2091 * @nic : device private variable.
2092 * Description:
2093 * This function actually turns the device on. Before this function is
2094 * called,all Registers are configured from their reset states
2095 * and shared memory is allocated but the NIC is still quiescent. On
2096 * calling this function, the device interrupts are cleared and the NIC is
2097 * literally switched on by writing into the adapter control register.
2098 * Return Value:
2099 * SUCCESS on success and -1 on failure.
2102 static int start_nic(struct s2io_nic *nic)
2104 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2105 struct net_device *dev = nic->dev;
2106 register u64 val64 = 0;
2107 u16 subid, i;
2108 struct mac_info *mac_control;
2109 struct config_param *config;
2111 mac_control = &nic->mac_control;
2112 config = &nic->config;
2114 /* PRC Initialization and configuration */
2115 for (i = 0; i < config->rx_ring_num; i++) {
2116 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2117 &bar0->prc_rxd0_n[i]);
2119 val64 = readq(&bar0->prc_ctrl_n[i]);
2120 if (nic->rxd_mode == RXD_MODE_1)
2121 val64 |= PRC_CTRL_RC_ENABLED;
2122 else
2123 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2124 if (nic->device_type == XFRAME_II_DEVICE)
2125 val64 |= PRC_CTRL_GROUP_READS;
2126 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2127 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2128 writeq(val64, &bar0->prc_ctrl_n[i]);
2131 if (nic->rxd_mode == RXD_MODE_3B) {
2132 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2133 val64 = readq(&bar0->rx_pa_cfg);
2134 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2135 writeq(val64, &bar0->rx_pa_cfg);
2138 if (vlan_tag_strip == 0) {
2139 val64 = readq(&bar0->rx_pa_cfg);
2140 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2141 writeq(val64, &bar0->rx_pa_cfg);
2142 vlan_strip_flag = 0;
2146 * Enabling MC-RLDRAM. After enabling the device, we timeout
2147 * for around 100ms, which is approximately the time required
2148 * for the device to be ready for operation.
2150 val64 = readq(&bar0->mc_rldram_mrs);
2151 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2152 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2153 val64 = readq(&bar0->mc_rldram_mrs);
2155 msleep(100); /* Delay by around 100 ms. */
2157 /* Enabling ECC Protection. */
2158 val64 = readq(&bar0->adapter_control);
2159 val64 &= ~ADAPTER_ECC_EN;
2160 writeq(val64, &bar0->adapter_control);
2163 * Verify if the device is ready to be enabled, if so enable
2164 * it.
2166 val64 = readq(&bar0->adapter_status);
2167 if (!verify_xena_quiescence(nic)) {
2168 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2169 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2170 (unsigned long long) val64);
2171 return FAILURE;
2175 * With some switches, link might be already up at this point.
2176 * Because of this weird behavior, when we enable laser,
2177 * we may not get link. We need to handle this. We cannot
2178 * figure out which switch is misbehaving. So we are forced to
2179 * make a global change.
2182 /* Enabling Laser. */
2183 val64 = readq(&bar0->adapter_control);
2184 val64 |= ADAPTER_EOI_TX_ON;
2185 writeq(val64, &bar0->adapter_control);
2187 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2189 * Dont see link state interrupts initally on some switches,
2190 * so directly scheduling the link state task here.
2192 schedule_work(&nic->set_link_task);
2194 /* SXE-002: Initialize link and activity LED */
2195 subid = nic->pdev->subsystem_device;
2196 if (((subid & 0xFF) >= 0x07) &&
2197 (nic->device_type == XFRAME_I_DEVICE)) {
2198 val64 = readq(&bar0->gpio_control);
2199 val64 |= 0x0000800000000000ULL;
2200 writeq(val64, &bar0->gpio_control);
2201 val64 = 0x0411040400000000ULL;
2202 writeq(val64, (void __iomem *)bar0 + 0x2700);
2205 return SUCCESS;
2208 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2210 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2211 TxD *txdlp, int get_off)
2213 struct s2io_nic *nic = fifo_data->nic;
2214 struct sk_buff *skb;
2215 struct TxD *txds;
2216 u16 j, frg_cnt;
2218 txds = txdlp;
2219 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2220 pci_unmap_single(nic->pdev, (dma_addr_t)
2221 txds->Buffer_Pointer, sizeof(u64),
2222 PCI_DMA_TODEVICE);
2223 txds++;
2226 skb = (struct sk_buff *) ((unsigned long)
2227 txds->Host_Control);
2228 if (!skb) {
2229 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2230 return NULL;
2232 pci_unmap_single(nic->pdev, (dma_addr_t)
2233 txds->Buffer_Pointer,
2234 skb->len - skb->data_len,
2235 PCI_DMA_TODEVICE);
2236 frg_cnt = skb_shinfo(skb)->nr_frags;
2237 if (frg_cnt) {
2238 txds++;
2239 for (j = 0; j < frg_cnt; j++, txds++) {
2240 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2241 if (!txds->Buffer_Pointer)
2242 break;
2243 pci_unmap_page(nic->pdev, (dma_addr_t)
2244 txds->Buffer_Pointer,
2245 frag->size, PCI_DMA_TODEVICE);
2248 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2249 return(skb);
2253 * free_tx_buffers - Free all queued Tx buffers
2254 * @nic : device private variable.
2255 * Description:
2256 * Free all queued Tx buffers.
2257 * Return Value: void
2260 static void free_tx_buffers(struct s2io_nic *nic)
2262 struct net_device *dev = nic->dev;
2263 struct sk_buff *skb;
2264 struct TxD *txdp;
2265 int i, j;
2266 struct mac_info *mac_control;
2267 struct config_param *config;
2268 int cnt = 0;
2270 mac_control = &nic->mac_control;
2271 config = &nic->config;
2273 for (i = 0; i < config->tx_fifo_num; i++) {
2274 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2275 txdp = (struct TxD *) \
2276 mac_control->fifos[i].list_info[j].list_virt_addr;
2277 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2278 if (skb) {
2279 nic->mac_control.stats_info->sw_stat.mem_freed
2280 += skb->truesize;
2281 dev_kfree_skb(skb);
2282 cnt++;
2285 DBG_PRINT(INTR_DBG,
2286 "%s:forcibly freeing %d skbs on FIFO%d\n",
2287 dev->name, cnt, i);
2288 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2289 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2294 * stop_nic - To stop the nic
2295 * @nic ; device private variable.
2296 * Description:
2297 * This function does exactly the opposite of what the start_nic()
2298 * function does. This function is called to stop the device.
2299 * Return Value:
2300 * void.
2303 static void stop_nic(struct s2io_nic *nic)
2305 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2306 register u64 val64 = 0;
2307 u16 interruptible;
2308 struct mac_info *mac_control;
2309 struct config_param *config;
2311 mac_control = &nic->mac_control;
2312 config = &nic->config;
2314 /* Disable all interrupts */
2315 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2316 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2317 interruptible |= TX_PIC_INTR;
2318 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2320 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2321 val64 = readq(&bar0->adapter_control);
2322 val64 &= ~(ADAPTER_CNTL_EN);
2323 writeq(val64, &bar0->adapter_control);
2327 * fill_rx_buffers - Allocates the Rx side skbs
2328 * @nic: device private variable
2329 * @ring_no: ring number
2330 * Description:
2331 * The function allocates Rx side skbs and puts the physical
2332 * address of these buffers into the RxD buffer pointers, so that the NIC
2333 * can DMA the received frame into these locations.
2334 * The NIC supports 3 receive modes, viz
2335 * 1. single buffer,
2336 * 2. three buffer and
2337 * 3. Five buffer modes.
2338 * Each mode defines how many fragments the received frame will be split
2339 * up into by the NIC. The frame is split into L3 header, L4 Header,
2340 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2341 * is split into 3 fragments. As of now only single buffer mode is
2342 * supported.
2343 * Return Value:
2344 * SUCCESS on success or an appropriate -ve value on failure.
2347 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2349 struct net_device *dev = nic->dev;
2350 struct sk_buff *skb;
2351 struct RxD_t *rxdp;
2352 int off, off1, size, block_no, block_no1;
2353 u32 alloc_tab = 0;
2354 u32 alloc_cnt;
2355 struct mac_info *mac_control;
2356 struct config_param *config;
2357 u64 tmp;
2358 struct buffAdd *ba;
2359 unsigned long flags;
2360 struct RxD_t *first_rxdp = NULL;
2361 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2362 struct RxD1 *rxdp1;
2363 struct RxD3 *rxdp3;
2364 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2366 mac_control = &nic->mac_control;
2367 config = &nic->config;
2368 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2369 atomic_read(&nic->rx_bufs_left[ring_no]);
2371 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2372 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2373 while (alloc_tab < alloc_cnt) {
2374 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2375 block_index;
2376 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2378 rxdp = mac_control->rings[ring_no].
2379 rx_blocks[block_no].rxds[off].virt_addr;
2381 if ((block_no == block_no1) && (off == off1) &&
2382 (rxdp->Host_Control)) {
2383 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2384 dev->name);
2385 DBG_PRINT(INTR_DBG, " info equated\n");
2386 goto end;
2388 if (off && (off == rxd_count[nic->rxd_mode])) {
2389 mac_control->rings[ring_no].rx_curr_put_info.
2390 block_index++;
2391 if (mac_control->rings[ring_no].rx_curr_put_info.
2392 block_index == mac_control->rings[ring_no].
2393 block_count)
2394 mac_control->rings[ring_no].rx_curr_put_info.
2395 block_index = 0;
2396 block_no = mac_control->rings[ring_no].
2397 rx_curr_put_info.block_index;
2398 if (off == rxd_count[nic->rxd_mode])
2399 off = 0;
2400 mac_control->rings[ring_no].rx_curr_put_info.
2401 offset = off;
2402 rxdp = mac_control->rings[ring_no].
2403 rx_blocks[block_no].block_virt_addr;
2404 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2405 dev->name, rxdp);
2407 if(!napi) {
2408 spin_lock_irqsave(&nic->put_lock, flags);
2409 mac_control->rings[ring_no].put_pos =
2410 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2411 spin_unlock_irqrestore(&nic->put_lock, flags);
2412 } else {
2413 mac_control->rings[ring_no].put_pos =
2414 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2416 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2417 ((nic->rxd_mode == RXD_MODE_3B) &&
2418 (rxdp->Control_2 & BIT(0)))) {
2419 mac_control->rings[ring_no].rx_curr_put_info.
2420 offset = off;
2421 goto end;
2423 /* calculate size of skb based on ring mode */
2424 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2425 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2426 if (nic->rxd_mode == RXD_MODE_1)
2427 size += NET_IP_ALIGN;
2428 else
2429 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2431 /* allocate skb */
2432 skb = dev_alloc_skb(size);
2433 if(!skb) {
2434 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2435 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2436 if (first_rxdp) {
2437 wmb();
2438 first_rxdp->Control_1 |= RXD_OWN_XENA;
2440 nic->mac_control.stats_info->sw_stat. \
2441 mem_alloc_fail_cnt++;
2442 return -ENOMEM ;
2444 nic->mac_control.stats_info->sw_stat.mem_allocated
2445 += skb->truesize;
2446 if (nic->rxd_mode == RXD_MODE_1) {
2447 /* 1 buffer mode - normal operation mode */
2448 rxdp1 = (struct RxD1*)rxdp;
2449 memset(rxdp, 0, sizeof(struct RxD1));
2450 skb_reserve(skb, NET_IP_ALIGN);
2451 rxdp1->Buffer0_ptr = pci_map_single
2452 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2453 PCI_DMA_FROMDEVICE);
2454 if( (rxdp1->Buffer0_ptr == 0) ||
2455 (rxdp1->Buffer0_ptr ==
2456 DMA_ERROR_CODE))
2457 goto pci_map_failed;
2459 rxdp->Control_2 =
2460 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2462 } else if (nic->rxd_mode == RXD_MODE_3B) {
2464 * 2 buffer mode -
2465 * 2 buffer mode provides 128
2466 * byte aligned receive buffers.
2469 rxdp3 = (struct RxD3*)rxdp;
2470 /* save buffer pointers to avoid frequent dma mapping */
2471 Buffer0_ptr = rxdp3->Buffer0_ptr;
2472 Buffer1_ptr = rxdp3->Buffer1_ptr;
2473 memset(rxdp, 0, sizeof(struct RxD3));
2474 /* restore the buffer pointers for dma sync*/
2475 rxdp3->Buffer0_ptr = Buffer0_ptr;
2476 rxdp3->Buffer1_ptr = Buffer1_ptr;
2478 ba = &mac_control->rings[ring_no].ba[block_no][off];
2479 skb_reserve(skb, BUF0_LEN);
2480 tmp = (u64)(unsigned long) skb->data;
2481 tmp += ALIGN_SIZE;
2482 tmp &= ~ALIGN_SIZE;
2483 skb->data = (void *) (unsigned long)tmp;
2484 skb_reset_tail_pointer(skb);
2486 if (!(rxdp3->Buffer0_ptr))
2487 rxdp3->Buffer0_ptr =
2488 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2489 PCI_DMA_FROMDEVICE);
2490 else
2491 pci_dma_sync_single_for_device(nic->pdev,
2492 (dma_addr_t) rxdp3->Buffer0_ptr,
2493 BUF0_LEN, PCI_DMA_FROMDEVICE);
2494 if( (rxdp3->Buffer0_ptr == 0) ||
2495 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2496 goto pci_map_failed;
2498 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2499 if (nic->rxd_mode == RXD_MODE_3B) {
2500 /* Two buffer mode */
2503 * Buffer2 will have L3/L4 header plus
2504 * L4 payload
2506 rxdp3->Buffer2_ptr = pci_map_single
2507 (nic->pdev, skb->data, dev->mtu + 4,
2508 PCI_DMA_FROMDEVICE);
2510 if( (rxdp3->Buffer2_ptr == 0) ||
2511 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2512 goto pci_map_failed;
2514 rxdp3->Buffer1_ptr =
2515 pci_map_single(nic->pdev,
2516 ba->ba_1, BUF1_LEN,
2517 PCI_DMA_FROMDEVICE);
2518 if( (rxdp3->Buffer1_ptr == 0) ||
2519 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2520 pci_unmap_single
2521 (nic->pdev,
2522 (dma_addr_t)rxdp3->Buffer2_ptr,
2523 dev->mtu + 4,
2524 PCI_DMA_FROMDEVICE);
2525 goto pci_map_failed;
2527 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2528 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2529 (dev->mtu + 4);
2531 rxdp->Control_2 |= BIT(0);
2533 rxdp->Host_Control = (unsigned long) (skb);
2534 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2535 rxdp->Control_1 |= RXD_OWN_XENA;
2536 off++;
2537 if (off == (rxd_count[nic->rxd_mode] + 1))
2538 off = 0;
2539 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2541 rxdp->Control_2 |= SET_RXD_MARKER;
2542 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2543 if (first_rxdp) {
2544 wmb();
2545 first_rxdp->Control_1 |= RXD_OWN_XENA;
2547 first_rxdp = rxdp;
2549 atomic_inc(&nic->rx_bufs_left[ring_no]);
2550 alloc_tab++;
2553 end:
2554 /* Transfer ownership of first descriptor to adapter just before
2555 * exiting. Before that, use memory barrier so that ownership
2556 * and other fields are seen by adapter correctly.
2558 if (first_rxdp) {
2559 wmb();
2560 first_rxdp->Control_1 |= RXD_OWN_XENA;
2563 return SUCCESS;
2564 pci_map_failed:
2565 stats->pci_map_fail_cnt++;
2566 stats->mem_freed += skb->truesize;
2567 dev_kfree_skb_irq(skb);
2568 return -ENOMEM;
2571 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2573 struct net_device *dev = sp->dev;
2574 int j;
2575 struct sk_buff *skb;
2576 struct RxD_t *rxdp;
2577 struct mac_info *mac_control;
2578 struct buffAdd *ba;
2579 struct RxD1 *rxdp1;
2580 struct RxD3 *rxdp3;
2582 mac_control = &sp->mac_control;
2583 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2584 rxdp = mac_control->rings[ring_no].
2585 rx_blocks[blk].rxds[j].virt_addr;
2586 skb = (struct sk_buff *)
2587 ((unsigned long) rxdp->Host_Control);
2588 if (!skb) {
2589 continue;
2591 if (sp->rxd_mode == RXD_MODE_1) {
2592 rxdp1 = (struct RxD1*)rxdp;
2593 pci_unmap_single(sp->pdev, (dma_addr_t)
2594 rxdp1->Buffer0_ptr,
2595 dev->mtu +
2596 HEADER_ETHERNET_II_802_3_SIZE
2597 + HEADER_802_2_SIZE +
2598 HEADER_SNAP_SIZE,
2599 PCI_DMA_FROMDEVICE);
2600 memset(rxdp, 0, sizeof(struct RxD1));
2601 } else if(sp->rxd_mode == RXD_MODE_3B) {
2602 rxdp3 = (struct RxD3*)rxdp;
2603 ba = &mac_control->rings[ring_no].
2604 ba[blk][j];
2605 pci_unmap_single(sp->pdev, (dma_addr_t)
2606 rxdp3->Buffer0_ptr,
2607 BUF0_LEN,
2608 PCI_DMA_FROMDEVICE);
2609 pci_unmap_single(sp->pdev, (dma_addr_t)
2610 rxdp3->Buffer1_ptr,
2611 BUF1_LEN,
2612 PCI_DMA_FROMDEVICE);
2613 pci_unmap_single(sp->pdev, (dma_addr_t)
2614 rxdp3->Buffer2_ptr,
2615 dev->mtu + 4,
2616 PCI_DMA_FROMDEVICE);
2617 memset(rxdp, 0, sizeof(struct RxD3));
2619 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2620 dev_kfree_skb(skb);
2621 atomic_dec(&sp->rx_bufs_left[ring_no]);
2626 * free_rx_buffers - Frees all Rx buffers
2627 * @sp: device private variable.
2628 * Description:
2629 * This function will free all Rx buffers allocated by host.
2630 * Return Value:
2631 * NONE.
2634 static void free_rx_buffers(struct s2io_nic *sp)
2636 struct net_device *dev = sp->dev;
2637 int i, blk = 0, buf_cnt = 0;
2638 struct mac_info *mac_control;
2639 struct config_param *config;
2641 mac_control = &sp->mac_control;
2642 config = &sp->config;
2644 for (i = 0; i < config->rx_ring_num; i++) {
2645 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2646 free_rxd_blk(sp,i,blk);
2648 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2649 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2650 mac_control->rings[i].rx_curr_put_info.offset = 0;
2651 mac_control->rings[i].rx_curr_get_info.offset = 0;
2652 atomic_set(&sp->rx_bufs_left[i], 0);
2653 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2654 dev->name, buf_cnt, i);
2659 * s2io_poll - Rx interrupt handler for NAPI support
2660 * @napi : pointer to the napi structure.
2661 * @budget : The number of packets that were budgeted to be processed
2662 * during one pass through the 'Poll" function.
2663 * Description:
2664 * Comes into picture only if NAPI support has been incorporated. It does
2665 * the same thing that rx_intr_handler does, but not in a interrupt context
2666 * also It will process only a given number of packets.
2667 * Return value:
2668 * 0 on success and 1 if there are No Rx packets to be processed.
2671 static int s2io_poll(struct napi_struct *napi, int budget)
2673 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2674 struct net_device *dev = nic->dev;
2675 int pkt_cnt = 0, org_pkts_to_process;
2676 struct mac_info *mac_control;
2677 struct config_param *config;
2678 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2679 int i;
2681 if (!is_s2io_card_up(nic))
2682 return 0;
2684 mac_control = &nic->mac_control;
2685 config = &nic->config;
2687 nic->pkts_to_process = budget;
2688 org_pkts_to_process = nic->pkts_to_process;
2690 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2691 readl(&bar0->rx_traffic_int);
2693 for (i = 0; i < config->rx_ring_num; i++) {
2694 rx_intr_handler(&mac_control->rings[i]);
2695 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2696 if (!nic->pkts_to_process) {
2697 /* Quota for the current iteration has been met */
2698 goto no_rx;
2702 netif_rx_complete(dev, napi);
2704 for (i = 0; i < config->rx_ring_num; i++) {
2705 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2706 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2707 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2708 break;
2711 /* Re enable the Rx interrupts. */
2712 writeq(0x0, &bar0->rx_traffic_mask);
2713 readl(&bar0->rx_traffic_mask);
2714 return pkt_cnt;
2716 no_rx:
2717 for (i = 0; i < config->rx_ring_num; i++) {
2718 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2719 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2720 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2721 break;
2724 return pkt_cnt;
2727 #ifdef CONFIG_NET_POLL_CONTROLLER
2729 * s2io_netpoll - netpoll event handler entry point
2730 * @dev : pointer to the device structure.
2731 * Description:
2732 * This function will be called by upper layer to check for events on the
2733 * interface in situations where interrupts are disabled. It is used for
2734 * specific in-kernel networking tasks, such as remote consoles and kernel
2735 * debugging over the network (example netdump in RedHat).
2737 static void s2io_netpoll(struct net_device *dev)
2739 struct s2io_nic *nic = dev->priv;
2740 struct mac_info *mac_control;
2741 struct config_param *config;
2742 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2743 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2744 int i;
2746 if (pci_channel_offline(nic->pdev))
2747 return;
2749 disable_irq(dev->irq);
2751 mac_control = &nic->mac_control;
2752 config = &nic->config;
2754 writeq(val64, &bar0->rx_traffic_int);
2755 writeq(val64, &bar0->tx_traffic_int);
2757 /* we need to free up the transmitted skbufs or else netpoll will
2758 * run out of skbs and will fail and eventually netpoll application such
2759 * as netdump will fail.
2761 for (i = 0; i < config->tx_fifo_num; i++)
2762 tx_intr_handler(&mac_control->fifos[i]);
2764 /* check for received packet and indicate up to network */
2765 for (i = 0; i < config->rx_ring_num; i++)
2766 rx_intr_handler(&mac_control->rings[i]);
2768 for (i = 0; i < config->rx_ring_num; i++) {
2769 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2770 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2771 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2772 break;
2775 enable_irq(dev->irq);
2776 return;
2778 #endif
2781 * rx_intr_handler - Rx interrupt handler
2782 * @nic: device private variable.
2783 * Description:
2784 * If the interrupt is because of a received frame or if the
2785 * receive ring contains fresh as yet un-processed frames,this function is
2786 * called. It picks out the RxD at which place the last Rx processing had
2787 * stopped and sends the skb to the OSM's Rx handler and then increments
2788 * the offset.
2789 * Return Value:
2790 * NONE.
2792 static void rx_intr_handler(struct ring_info *ring_data)
2794 struct s2io_nic *nic = ring_data->nic;
2795 struct net_device *dev = (struct net_device *) nic->dev;
2796 int get_block, put_block, put_offset;
2797 struct rx_curr_get_info get_info, put_info;
2798 struct RxD_t *rxdp;
2799 struct sk_buff *skb;
2800 int pkt_cnt = 0;
2801 int i;
2802 struct RxD1* rxdp1;
2803 struct RxD3* rxdp3;
2805 spin_lock(&nic->rx_lock);
2807 get_info = ring_data->rx_curr_get_info;
2808 get_block = get_info.block_index;
2809 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2810 put_block = put_info.block_index;
2811 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2812 if (!napi) {
2813 spin_lock(&nic->put_lock);
2814 put_offset = ring_data->put_pos;
2815 spin_unlock(&nic->put_lock);
2816 } else
2817 put_offset = ring_data->put_pos;
2819 while (RXD_IS_UP2DT(rxdp)) {
2821 * If your are next to put index then it's
2822 * FIFO full condition
2824 if ((get_block == put_block) &&
2825 (get_info.offset + 1) == put_info.offset) {
2826 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2827 break;
2829 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2830 if (skb == NULL) {
2831 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2832 dev->name);
2833 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2834 spin_unlock(&nic->rx_lock);
2835 return;
2837 if (nic->rxd_mode == RXD_MODE_1) {
2838 rxdp1 = (struct RxD1*)rxdp;
2839 pci_unmap_single(nic->pdev, (dma_addr_t)
2840 rxdp1->Buffer0_ptr,
2841 dev->mtu +
2842 HEADER_ETHERNET_II_802_3_SIZE +
2843 HEADER_802_2_SIZE +
2844 HEADER_SNAP_SIZE,
2845 PCI_DMA_FROMDEVICE);
2846 } else if (nic->rxd_mode == RXD_MODE_3B) {
2847 rxdp3 = (struct RxD3*)rxdp;
2848 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2849 rxdp3->Buffer0_ptr,
2850 BUF0_LEN, PCI_DMA_FROMDEVICE);
2851 pci_unmap_single(nic->pdev, (dma_addr_t)
2852 rxdp3->Buffer2_ptr,
2853 dev->mtu + 4,
2854 PCI_DMA_FROMDEVICE);
2856 prefetch(skb->data);
2857 rx_osm_handler(ring_data, rxdp);
2858 get_info.offset++;
2859 ring_data->rx_curr_get_info.offset = get_info.offset;
2860 rxdp = ring_data->rx_blocks[get_block].
2861 rxds[get_info.offset].virt_addr;
2862 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2863 get_info.offset = 0;
2864 ring_data->rx_curr_get_info.offset = get_info.offset;
2865 get_block++;
2866 if (get_block == ring_data->block_count)
2867 get_block = 0;
2868 ring_data->rx_curr_get_info.block_index = get_block;
2869 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2872 nic->pkts_to_process -= 1;
2873 if ((napi) && (!nic->pkts_to_process))
2874 break;
2875 pkt_cnt++;
2876 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2877 break;
2879 if (nic->lro) {
2880 /* Clear all LRO sessions before exiting */
2881 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2882 struct lro *lro = &nic->lro0_n[i];
2883 if (lro->in_use) {
2884 update_L3L4_header(nic, lro);
2885 queue_rx_frame(lro->parent);
2886 clear_lro_session(lro);
2891 spin_unlock(&nic->rx_lock);
2895 * tx_intr_handler - Transmit interrupt handler
2896 * @nic : device private variable
2897 * Description:
2898 * If an interrupt was raised to indicate DMA complete of the
2899 * Tx packet, this function is called. It identifies the last TxD
2900 * whose buffer was freed and frees all skbs whose data have already
2901 * DMA'ed into the NICs internal memory.
2902 * Return Value:
2903 * NONE
2906 static void tx_intr_handler(struct fifo_info *fifo_data)
2908 struct s2io_nic *nic = fifo_data->nic;
2909 struct net_device *dev = (struct net_device *) nic->dev;
2910 struct tx_curr_get_info get_info, put_info;
2911 struct sk_buff *skb;
2912 struct TxD *txdlp;
2913 u8 err_mask;
2915 get_info = fifo_data->tx_curr_get_info;
2916 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2917 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2918 list_virt_addr;
2919 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2920 (get_info.offset != put_info.offset) &&
2921 (txdlp->Host_Control)) {
2922 /* Check for TxD errors */
2923 if (txdlp->Control_1 & TXD_T_CODE) {
2924 unsigned long long err;
2925 err = txdlp->Control_1 & TXD_T_CODE;
2926 if (err & 0x1) {
2927 nic->mac_control.stats_info->sw_stat.
2928 parity_err_cnt++;
2931 /* update t_code statistics */
2932 err_mask = err >> 48;
2933 switch(err_mask) {
2934 case 2:
2935 nic->mac_control.stats_info->sw_stat.
2936 tx_buf_abort_cnt++;
2937 break;
2939 case 3:
2940 nic->mac_control.stats_info->sw_stat.
2941 tx_desc_abort_cnt++;
2942 break;
2944 case 7:
2945 nic->mac_control.stats_info->sw_stat.
2946 tx_parity_err_cnt++;
2947 break;
2949 case 10:
2950 nic->mac_control.stats_info->sw_stat.
2951 tx_link_loss_cnt++;
2952 break;
2954 case 15:
2955 nic->mac_control.stats_info->sw_stat.
2956 tx_list_proc_err_cnt++;
2957 break;
2961 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2962 if (skb == NULL) {
2963 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2964 __FUNCTION__);
2965 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2966 return;
2969 /* Updating the statistics block */
2970 nic->stats.tx_bytes += skb->len;
2971 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2972 dev_kfree_skb_irq(skb);
2974 get_info.offset++;
2975 if (get_info.offset == get_info.fifo_len + 1)
2976 get_info.offset = 0;
2977 txdlp = (struct TxD *) fifo_data->list_info
2978 [get_info.offset].list_virt_addr;
2979 fifo_data->tx_curr_get_info.offset =
2980 get_info.offset;
2983 spin_lock(&nic->tx_lock);
2984 if (netif_queue_stopped(dev))
2985 netif_wake_queue(dev);
2986 spin_unlock(&nic->tx_lock);
2990 * s2io_mdio_write - Function to write in to MDIO registers
2991 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2992 * @addr : address value
2993 * @value : data value
2994 * @dev : pointer to net_device structure
2995 * Description:
2996 * This function is used to write values to the MDIO registers
2997 * NONE
2999 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3001 u64 val64 = 0x0;
3002 struct s2io_nic *sp = dev->priv;
3003 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3005 //address transaction
3006 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3007 | MDIO_MMD_DEV_ADDR(mmd_type)
3008 | MDIO_MMS_PRT_ADDR(0x0);
3009 writeq(val64, &bar0->mdio_control);
3010 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3011 writeq(val64, &bar0->mdio_control);
3012 udelay(100);
3014 //Data transaction
3015 val64 = 0x0;
3016 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3017 | MDIO_MMD_DEV_ADDR(mmd_type)
3018 | MDIO_MMS_PRT_ADDR(0x0)
3019 | MDIO_MDIO_DATA(value)
3020 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3021 writeq(val64, &bar0->mdio_control);
3022 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3023 writeq(val64, &bar0->mdio_control);
3024 udelay(100);
3026 val64 = 0x0;
3027 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3028 | MDIO_MMD_DEV_ADDR(mmd_type)
3029 | MDIO_MMS_PRT_ADDR(0x0)
3030 | MDIO_OP(MDIO_OP_READ_TRANS);
3031 writeq(val64, &bar0->mdio_control);
3032 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3033 writeq(val64, &bar0->mdio_control);
3034 udelay(100);
3039 * s2io_mdio_read - Function to write in to MDIO registers
3040 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3041 * @addr : address value
3042 * @dev : pointer to net_device structure
3043 * Description:
3044 * This function is used to read values to the MDIO registers
3045 * NONE
3047 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3049 u64 val64 = 0x0;
3050 u64 rval64 = 0x0;
3051 struct s2io_nic *sp = dev->priv;
3052 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3054 /* address transaction */
3055 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3056 | MDIO_MMD_DEV_ADDR(mmd_type)
3057 | MDIO_MMS_PRT_ADDR(0x0);
3058 writeq(val64, &bar0->mdio_control);
3059 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3060 writeq(val64, &bar0->mdio_control);
3061 udelay(100);
3063 /* Data transaction */
3064 val64 = 0x0;
3065 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3066 | MDIO_MMD_DEV_ADDR(mmd_type)
3067 | MDIO_MMS_PRT_ADDR(0x0)
3068 | MDIO_OP(MDIO_OP_READ_TRANS);
3069 writeq(val64, &bar0->mdio_control);
3070 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3071 writeq(val64, &bar0->mdio_control);
3072 udelay(100);
3074 /* Read the value from regs */
3075 rval64 = readq(&bar0->mdio_control);
3076 rval64 = rval64 & 0xFFFF0000;
3077 rval64 = rval64 >> 16;
3078 return rval64;
3081 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3082 * @counter : couter value to be updated
3083 * @flag : flag to indicate the status
3084 * @type : counter type
3085 * Description:
3086 * This function is to check the status of the xpak counters value
3087 * NONE
3090 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3092 u64 mask = 0x3;
3093 u64 val64;
3094 int i;
3095 for(i = 0; i <index; i++)
3096 mask = mask << 0x2;
3098 if(flag > 0)
3100 *counter = *counter + 1;
3101 val64 = *regs_stat & mask;
3102 val64 = val64 >> (index * 0x2);
3103 val64 = val64 + 1;
3104 if(val64 == 3)
3106 switch(type)
3108 case 1:
3109 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3110 "service. Excessive temperatures may "
3111 "result in premature transceiver "
3112 "failure \n");
3113 break;
3114 case 2:
3115 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3116 "service Excessive bias currents may "
3117 "indicate imminent laser diode "
3118 "failure \n");
3119 break;
3120 case 3:
3121 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3122 "service Excessive laser output "
3123 "power may saturate far-end "
3124 "receiver\n");
3125 break;
3126 default:
3127 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3128 "type \n");
3130 val64 = 0x0;
3132 val64 = val64 << (index * 0x2);
3133 *regs_stat = (*regs_stat & (~mask)) | (val64);
3135 } else {
3136 *regs_stat = *regs_stat & (~mask);
3141 * s2io_updt_xpak_counter - Function to update the xpak counters
3142 * @dev : pointer to net_device struct
3143 * Description:
3144 * This function is to upate the status of the xpak counters value
3145 * NONE
3147 static void s2io_updt_xpak_counter(struct net_device *dev)
3149 u16 flag = 0x0;
3150 u16 type = 0x0;
3151 u16 val16 = 0x0;
3152 u64 val64 = 0x0;
3153 u64 addr = 0x0;
3155 struct s2io_nic *sp = dev->priv;
3156 struct stat_block *stat_info = sp->mac_control.stats_info;
3158 /* Check the communication with the MDIO slave */
3159 addr = 0x0000;
3160 val64 = 0x0;
3161 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3162 if((val64 == 0xFFFF) || (val64 == 0x0000))
3164 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3165 "Returned %llx\n", (unsigned long long)val64);
3166 return;
3169 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3170 if(val64 != 0x2040)
3172 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3173 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3174 (unsigned long long)val64);
3175 return;
3178 /* Loading the DOM register to MDIO register */
3179 addr = 0xA100;
3180 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3181 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3183 /* Reading the Alarm flags */
3184 addr = 0xA070;
3185 val64 = 0x0;
3186 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3188 flag = CHECKBIT(val64, 0x7);
3189 type = 1;
3190 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3191 &stat_info->xpak_stat.xpak_regs_stat,
3192 0x0, flag, type);
3194 if(CHECKBIT(val64, 0x6))
3195 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3197 flag = CHECKBIT(val64, 0x3);
3198 type = 2;
3199 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3200 &stat_info->xpak_stat.xpak_regs_stat,
3201 0x2, flag, type);
3203 if(CHECKBIT(val64, 0x2))
3204 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3206 flag = CHECKBIT(val64, 0x1);
3207 type = 3;
3208 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3209 &stat_info->xpak_stat.xpak_regs_stat,
3210 0x4, flag, type);
3212 if(CHECKBIT(val64, 0x0))
3213 stat_info->xpak_stat.alarm_laser_output_power_low++;
3215 /* Reading the Warning flags */
3216 addr = 0xA074;
3217 val64 = 0x0;
3218 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3220 if(CHECKBIT(val64, 0x7))
3221 stat_info->xpak_stat.warn_transceiver_temp_high++;
3223 if(CHECKBIT(val64, 0x6))
3224 stat_info->xpak_stat.warn_transceiver_temp_low++;
3226 if(CHECKBIT(val64, 0x3))
3227 stat_info->xpak_stat.warn_laser_bias_current_high++;
3229 if(CHECKBIT(val64, 0x2))
3230 stat_info->xpak_stat.warn_laser_bias_current_low++;
3232 if(CHECKBIT(val64, 0x1))
3233 stat_info->xpak_stat.warn_laser_output_power_high++;
3235 if(CHECKBIT(val64, 0x0))
3236 stat_info->xpak_stat.warn_laser_output_power_low++;
3240 * wait_for_cmd_complete - waits for a command to complete.
3241 * @sp : private member of the device structure, which is a pointer to the
3242 * s2io_nic structure.
3243 * Description: Function that waits for a command to Write into RMAC
3244 * ADDR DATA registers to be completed and returns either success or
3245 * error depending on whether the command was complete or not.
3246 * Return value:
3247 * SUCCESS on success and FAILURE on failure.
3250 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3251 int bit_state)
3253 int ret = FAILURE, cnt = 0, delay = 1;
3254 u64 val64;
3256 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3257 return FAILURE;
3259 do {
3260 val64 = readq(addr);
3261 if (bit_state == S2IO_BIT_RESET) {
3262 if (!(val64 & busy_bit)) {
3263 ret = SUCCESS;
3264 break;
3266 } else {
3267 if (!(val64 & busy_bit)) {
3268 ret = SUCCESS;
3269 break;
3273 if(in_interrupt())
3274 mdelay(delay);
3275 else
3276 msleep(delay);
3278 if (++cnt >= 10)
3279 delay = 50;
3280 } while (cnt < 20);
3281 return ret;
3284 * check_pci_device_id - Checks if the device id is supported
3285 * @id : device id
3286 * Description: Function to check if the pci device id is supported by driver.
3287 * Return value: Actual device id if supported else PCI_ANY_ID
3289 static u16 check_pci_device_id(u16 id)
3291 switch (id) {
3292 case PCI_DEVICE_ID_HERC_WIN:
3293 case PCI_DEVICE_ID_HERC_UNI:
3294 return XFRAME_II_DEVICE;
3295 case PCI_DEVICE_ID_S2IO_UNI:
3296 case PCI_DEVICE_ID_S2IO_WIN:
3297 return XFRAME_I_DEVICE;
3298 default:
3299 return PCI_ANY_ID;
3304 * s2io_reset - Resets the card.
3305 * @sp : private member of the device structure.
3306 * Description: Function to Reset the card. This function then also
3307 * restores the previously saved PCI configuration space registers as
3308 * the card reset also resets the configuration space.
3309 * Return value:
3310 * void.
3313 static void s2io_reset(struct s2io_nic * sp)
3315 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3316 u64 val64;
3317 u16 subid, pci_cmd;
3318 int i;
3319 u16 val16;
3320 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3321 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3323 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3324 __FUNCTION__, sp->dev->name);
3326 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3327 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3329 val64 = SW_RESET_ALL;
3330 writeq(val64, &bar0->sw_reset);
3331 if (strstr(sp->product_name, "CX4")) {
3332 msleep(750);
3334 msleep(250);
3335 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3337 /* Restore the PCI state saved during initialization. */
3338 pci_restore_state(sp->pdev);
3339 pci_read_config_word(sp->pdev, 0x2, &val16);
3340 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3341 break;
3342 msleep(200);
3345 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3346 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3349 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3351 s2io_init_pci(sp);
3353 /* Set swapper to enable I/O register access */
3354 s2io_set_swapper(sp);
3356 /* Restore the MSIX table entries from local variables */
3357 restore_xmsi_data(sp);
3359 /* Clear certain PCI/PCI-X fields after reset */
3360 if (sp->device_type == XFRAME_II_DEVICE) {
3361 /* Clear "detected parity error" bit */
3362 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3364 /* Clearing PCIX Ecc status register */
3365 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3367 /* Clearing PCI_STATUS error reflected here */
3368 writeq(BIT(62), &bar0->txpic_int_reg);
3371 /* Reset device statistics maintained by OS */
3372 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3374 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3375 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3376 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3377 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3378 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3379 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3380 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3381 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3382 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3383 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3384 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3385 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3386 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3387 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3388 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3389 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3390 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3391 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3392 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3394 /* SXE-002: Configure link and activity LED to turn it off */
3395 subid = sp->pdev->subsystem_device;
3396 if (((subid & 0xFF) >= 0x07) &&
3397 (sp->device_type == XFRAME_I_DEVICE)) {
3398 val64 = readq(&bar0->gpio_control);
3399 val64 |= 0x0000800000000000ULL;
3400 writeq(val64, &bar0->gpio_control);
3401 val64 = 0x0411040400000000ULL;
3402 writeq(val64, (void __iomem *)bar0 + 0x2700);
3406 * Clear spurious ECC interrupts that would have occured on
3407 * XFRAME II cards after reset.
3409 if (sp->device_type == XFRAME_II_DEVICE) {
3410 val64 = readq(&bar0->pcc_err_reg);
3411 writeq(val64, &bar0->pcc_err_reg);
3414 /* restore the previously assigned mac address */
3415 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3417 sp->device_enabled_once = FALSE;
3421 * s2io_set_swapper - to set the swapper controle on the card
3422 * @sp : private member of the device structure,
3423 * pointer to the s2io_nic structure.
3424 * Description: Function to set the swapper control on the card
3425 * correctly depending on the 'endianness' of the system.
3426 * Return value:
3427 * SUCCESS on success and FAILURE on failure.
3430 static int s2io_set_swapper(struct s2io_nic * sp)
3432 struct net_device *dev = sp->dev;
3433 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3434 u64 val64, valt, valr;
3437 * Set proper endian settings and verify the same by reading
3438 * the PIF Feed-back register.
3441 val64 = readq(&bar0->pif_rd_swapper_fb);
3442 if (val64 != 0x0123456789ABCDEFULL) {
3443 int i = 0;
3444 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3445 0x8100008181000081ULL, /* FE=1, SE=0 */
3446 0x4200004242000042ULL, /* FE=0, SE=1 */
3447 0}; /* FE=0, SE=0 */
3449 while(i<4) {
3450 writeq(value[i], &bar0->swapper_ctrl);
3451 val64 = readq(&bar0->pif_rd_swapper_fb);
3452 if (val64 == 0x0123456789ABCDEFULL)
3453 break;
3454 i++;
3456 if (i == 4) {
3457 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3458 dev->name);
3459 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3460 (unsigned long long) val64);
3461 return FAILURE;
3463 valr = value[i];
3464 } else {
3465 valr = readq(&bar0->swapper_ctrl);
3468 valt = 0x0123456789ABCDEFULL;
3469 writeq(valt, &bar0->xmsi_address);
3470 val64 = readq(&bar0->xmsi_address);
3472 if(val64 != valt) {
3473 int i = 0;
3474 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3475 0x0081810000818100ULL, /* FE=1, SE=0 */
3476 0x0042420000424200ULL, /* FE=0, SE=1 */
3477 0}; /* FE=0, SE=0 */
3479 while(i<4) {
3480 writeq((value[i] | valr), &bar0->swapper_ctrl);
3481 writeq(valt, &bar0->xmsi_address);
3482 val64 = readq(&bar0->xmsi_address);
3483 if(val64 == valt)
3484 break;
3485 i++;
3487 if(i == 4) {
3488 unsigned long long x = val64;
3489 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3490 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3491 return FAILURE;
3494 val64 = readq(&bar0->swapper_ctrl);
3495 val64 &= 0xFFFF000000000000ULL;
3497 #ifdef __BIG_ENDIAN
3499 * The device by default set to a big endian format, so a
3500 * big endian driver need not set anything.
3502 val64 |= (SWAPPER_CTRL_TXP_FE |
3503 SWAPPER_CTRL_TXP_SE |
3504 SWAPPER_CTRL_TXD_R_FE |
3505 SWAPPER_CTRL_TXD_W_FE |
3506 SWAPPER_CTRL_TXF_R_FE |
3507 SWAPPER_CTRL_RXD_R_FE |
3508 SWAPPER_CTRL_RXD_W_FE |
3509 SWAPPER_CTRL_RXF_W_FE |
3510 SWAPPER_CTRL_XMSI_FE |
3511 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3512 if (sp->config.intr_type == INTA)
3513 val64 |= SWAPPER_CTRL_XMSI_SE;
3514 writeq(val64, &bar0->swapper_ctrl);
3515 #else
3517 * Initially we enable all bits to make it accessible by the
3518 * driver, then we selectively enable only those bits that
3519 * we want to set.
3521 val64 |= (SWAPPER_CTRL_TXP_FE |
3522 SWAPPER_CTRL_TXP_SE |
3523 SWAPPER_CTRL_TXD_R_FE |
3524 SWAPPER_CTRL_TXD_R_SE |
3525 SWAPPER_CTRL_TXD_W_FE |
3526 SWAPPER_CTRL_TXD_W_SE |
3527 SWAPPER_CTRL_TXF_R_FE |
3528 SWAPPER_CTRL_RXD_R_FE |
3529 SWAPPER_CTRL_RXD_R_SE |
3530 SWAPPER_CTRL_RXD_W_FE |
3531 SWAPPER_CTRL_RXD_W_SE |
3532 SWAPPER_CTRL_RXF_W_FE |
3533 SWAPPER_CTRL_XMSI_FE |
3534 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3535 if (sp->config.intr_type == INTA)
3536 val64 |= SWAPPER_CTRL_XMSI_SE;
3537 writeq(val64, &bar0->swapper_ctrl);
3538 #endif
3539 val64 = readq(&bar0->swapper_ctrl);
3542 * Verifying if endian settings are accurate by reading a
3543 * feedback register.
3545 val64 = readq(&bar0->pif_rd_swapper_fb);
3546 if (val64 != 0x0123456789ABCDEFULL) {
3547 /* Endian settings are incorrect, calls for another dekko. */
3548 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3549 dev->name);
3550 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3551 (unsigned long long) val64);
3552 return FAILURE;
3555 return SUCCESS;
3558 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3560 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3561 u64 val64;
3562 int ret = 0, cnt = 0;
3564 do {
3565 val64 = readq(&bar0->xmsi_access);
3566 if (!(val64 & BIT(15)))
3567 break;
3568 mdelay(1);
3569 cnt++;
3570 } while(cnt < 5);
3571 if (cnt == 5) {
3572 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3573 ret = 1;
3576 return ret;
3579 static void restore_xmsi_data(struct s2io_nic *nic)
3581 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3582 u64 val64;
3583 int i;
3585 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3586 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3587 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3588 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3589 writeq(val64, &bar0->xmsi_access);
3590 if (wait_for_msix_trans(nic, i)) {
3591 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3592 continue;
3597 static void store_xmsi_data(struct s2io_nic *nic)
3599 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3600 u64 val64, addr, data;
3601 int i;
3603 /* Store and display */
3604 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3605 val64 = (BIT(15) | vBIT(i, 26, 6));
3606 writeq(val64, &bar0->xmsi_access);
3607 if (wait_for_msix_trans(nic, i)) {
3608 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3609 continue;
3611 addr = readq(&bar0->xmsi_address);
3612 data = readq(&bar0->xmsi_data);
3613 if (addr && data) {
3614 nic->msix_info[i].addr = addr;
3615 nic->msix_info[i].data = data;
3620 static int s2io_enable_msi_x(struct s2io_nic *nic)
3622 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3623 u64 tx_mat, rx_mat;
3624 u16 msi_control; /* Temp variable */
3625 int ret, i, j, msix_indx = 1;
3627 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3628 GFP_KERNEL);
3629 if (!nic->entries) {
3630 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3631 __FUNCTION__);
3632 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3633 return -ENOMEM;
3635 nic->mac_control.stats_info->sw_stat.mem_allocated
3636 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3638 nic->s2io_entries =
3639 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3640 GFP_KERNEL);
3641 if (!nic->s2io_entries) {
3642 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3643 __FUNCTION__);
3644 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3645 kfree(nic->entries);
3646 nic->mac_control.stats_info->sw_stat.mem_freed
3647 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3648 return -ENOMEM;
3650 nic->mac_control.stats_info->sw_stat.mem_allocated
3651 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3653 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3654 nic->entries[i].entry = i;
3655 nic->s2io_entries[i].entry = i;
3656 nic->s2io_entries[i].arg = NULL;
3657 nic->s2io_entries[i].in_use = 0;
3660 tx_mat = readq(&bar0->tx_mat0_n[0]);
3661 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3662 tx_mat |= TX_MAT_SET(i, msix_indx);
3663 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3664 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3665 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3667 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3669 rx_mat = readq(&bar0->rx_mat);
3670 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3671 rx_mat |= RX_MAT_SET(j, msix_indx);
3672 nic->s2io_entries[msix_indx].arg
3673 = &nic->mac_control.rings[j];
3674 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3675 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3677 writeq(rx_mat, &bar0->rx_mat);
3679 nic->avail_msix_vectors = 0;
3680 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3681 /* We fail init if error or we get less vectors than min required */
3682 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3683 nic->avail_msix_vectors = ret;
3684 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3686 if (ret) {
3687 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3688 kfree(nic->entries);
3689 nic->mac_control.stats_info->sw_stat.mem_freed
3690 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3691 kfree(nic->s2io_entries);
3692 nic->mac_control.stats_info->sw_stat.mem_freed
3693 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3694 nic->entries = NULL;
3695 nic->s2io_entries = NULL;
3696 nic->avail_msix_vectors = 0;
3697 return -ENOMEM;
3699 if (!nic->avail_msix_vectors)
3700 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3703 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3704 * in the herc NIC. (Temp change, needs to be removed later)
3706 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3707 msi_control |= 0x1; /* Enable MSI */
3708 pci_write_config_word(nic->pdev, 0x42, msi_control);
3710 return 0;
3713 /* Handle software interrupt used during MSI(X) test */
3714 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3716 struct s2io_nic *sp = dev_id;
3718 sp->msi_detected = 1;
3719 wake_up(&sp->msi_wait);
3721 return IRQ_HANDLED;
3724 /* Test interrupt path by forcing a a software IRQ */
3725 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3727 struct pci_dev *pdev = sp->pdev;
3728 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3729 int err;
3730 u64 val64, saved64;
3732 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3733 sp->name, sp);
3734 if (err) {
3735 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3736 sp->dev->name, pci_name(pdev), pdev->irq);
3737 return err;
3740 init_waitqueue_head (&sp->msi_wait);
3741 sp->msi_detected = 0;
3743 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3744 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3745 val64 |= SCHED_INT_CTRL_TIMER_EN;
3746 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3747 writeq(val64, &bar0->scheduled_int_ctrl);
3749 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3751 if (!sp->msi_detected) {
3752 /* MSI(X) test failed, go back to INTx mode */
3753 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3754 "using MSI(X) during test\n", sp->dev->name,
3755 pci_name(pdev));
3757 err = -EOPNOTSUPP;
3760 free_irq(sp->entries[1].vector, sp);
3762 writeq(saved64, &bar0->scheduled_int_ctrl);
3764 return err;
3766 /* ********************************************************* *
3767 * Functions defined below concern the OS part of the driver *
3768 * ********************************************************* */
3771 * s2io_open - open entry point of the driver
3772 * @dev : pointer to the device structure.
3773 * Description:
3774 * This function is the open entry point of the driver. It mainly calls a
3775 * function to allocate Rx buffers and inserts them into the buffer
3776 * descriptors and then enables the Rx part of the NIC.
3777 * Return value:
3778 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3779 * file on failure.
3782 static int s2io_open(struct net_device *dev)
3784 struct s2io_nic *sp = dev->priv;
3785 int err = 0;
3788 * Make sure you have link off by default every time
3789 * Nic is initialized
3791 netif_carrier_off(dev);
3792 sp->last_link_state = 0;
3794 napi_enable(&sp->napi);
3796 if (sp->config.intr_type == MSI_X) {
3797 int ret = s2io_enable_msi_x(sp);
3799 if (!ret) {
3800 u16 msi_control;
3802 ret = s2io_test_msi(sp);
3804 /* rollback MSI-X, will re-enable during add_isr() */
3805 kfree(sp->entries);
3806 sp->mac_control.stats_info->sw_stat.mem_freed +=
3807 (MAX_REQUESTED_MSI_X *
3808 sizeof(struct msix_entry));
3809 kfree(sp->s2io_entries);
3810 sp->mac_control.stats_info->sw_stat.mem_freed +=
3811 (MAX_REQUESTED_MSI_X *
3812 sizeof(struct s2io_msix_entry));
3813 sp->entries = NULL;
3814 sp->s2io_entries = NULL;
3816 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3817 msi_control &= 0xFFFE; /* Disable MSI */
3818 pci_write_config_word(sp->pdev, 0x42, msi_control);
3820 pci_disable_msix(sp->pdev);
3823 if (ret) {
3825 DBG_PRINT(ERR_DBG,
3826 "%s: MSI-X requested but failed to enable\n",
3827 dev->name);
3828 sp->config.intr_type = INTA;
3832 /* NAPI doesn't work well with MSI(X) */
3833 if (sp->config.intr_type != INTA) {
3834 if(sp->config.napi)
3835 sp->config.napi = 0;
3838 /* Initialize H/W and enable interrupts */
3839 err = s2io_card_up(sp);
3840 if (err) {
3841 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3842 dev->name);
3843 goto hw_init_failed;
3846 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3847 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3848 s2io_card_down(sp);
3849 err = -ENODEV;
3850 goto hw_init_failed;
3853 netif_start_queue(dev);
3854 return 0;
3856 hw_init_failed:
3857 napi_disable(&sp->napi);
3858 if (sp->config.intr_type == MSI_X) {
3859 if (sp->entries) {
3860 kfree(sp->entries);
3861 sp->mac_control.stats_info->sw_stat.mem_freed
3862 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3864 if (sp->s2io_entries) {
3865 kfree(sp->s2io_entries);
3866 sp->mac_control.stats_info->sw_stat.mem_freed
3867 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3870 return err;
3874 * s2io_close -close entry point of the driver
3875 * @dev : device pointer.
3876 * Description:
3877 * This is the stop entry point of the driver. It needs to undo exactly
3878 * whatever was done by the open entry point,thus it's usually referred to
3879 * as the close function.Among other things this function mainly stops the
3880 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3881 * Return value:
3882 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3883 * file on failure.
3886 static int s2io_close(struct net_device *dev)
3888 struct s2io_nic *sp = dev->priv;
3890 netif_stop_queue(dev);
3891 napi_disable(&sp->napi);
3892 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3893 s2io_card_down(sp);
3895 return 0;
3899 * s2io_xmit - Tx entry point of te driver
3900 * @skb : the socket buffer containing the Tx data.
3901 * @dev : device pointer.
3902 * Description :
3903 * This function is the Tx entry point of the driver. S2IO NIC supports
3904 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3905 * NOTE: when device cant queue the pkt,just the trans_start variable will
3906 * not be upadted.
3907 * Return value:
3908 * 0 on success & 1 on failure.
3911 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3913 struct s2io_nic *sp = dev->priv;
3914 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3915 register u64 val64;
3916 struct TxD *txdp;
3917 struct TxFIFO_element __iomem *tx_fifo;
3918 unsigned long flags;
3919 u16 vlan_tag = 0;
3920 int vlan_priority = 0;
3921 struct mac_info *mac_control;
3922 struct config_param *config;
3923 int offload_type;
3924 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3926 mac_control = &sp->mac_control;
3927 config = &sp->config;
3929 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3931 if (unlikely(skb->len <= 0)) {
3932 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3933 dev_kfree_skb_any(skb);
3934 return 0;
3937 spin_lock_irqsave(&sp->tx_lock, flags);
3938 if (!is_s2io_card_up(sp)) {
3939 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3940 dev->name);
3941 spin_unlock_irqrestore(&sp->tx_lock, flags);
3942 dev_kfree_skb(skb);
3943 return 0;
3946 queue = 0;
3947 /* Get Fifo number to Transmit based on vlan priority */
3948 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3949 vlan_tag = vlan_tx_tag_get(skb);
3950 vlan_priority = vlan_tag >> 13;
3951 queue = config->fifo_mapping[vlan_priority];
3954 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3955 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3956 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3957 list_virt_addr;
3959 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3960 /* Avoid "put" pointer going beyond "get" pointer */
3961 if (txdp->Host_Control ||
3962 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3963 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3964 netif_stop_queue(dev);
3965 dev_kfree_skb(skb);
3966 spin_unlock_irqrestore(&sp->tx_lock, flags);
3967 return 0;
3970 offload_type = s2io_offload_type(skb);
3971 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3972 txdp->Control_1 |= TXD_TCP_LSO_EN;
3973 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3975 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3976 txdp->Control_2 |=
3977 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3978 TXD_TX_CKO_UDP_EN);
3980 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3981 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3982 txdp->Control_2 |= config->tx_intr_type;
3984 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3985 txdp->Control_2 |= TXD_VLAN_ENABLE;
3986 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3989 frg_len = skb->len - skb->data_len;
3990 if (offload_type == SKB_GSO_UDP) {
3991 int ufo_size;
3993 ufo_size = s2io_udp_mss(skb);
3994 ufo_size &= ~7;
3995 txdp->Control_1 |= TXD_UFO_EN;
3996 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3997 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3998 #ifdef __BIG_ENDIAN
3999 sp->ufo_in_band_v[put_off] =
4000 (u64)skb_shinfo(skb)->ip6_frag_id;
4001 #else
4002 sp->ufo_in_band_v[put_off] =
4003 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4004 #endif
4005 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4006 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4007 sp->ufo_in_band_v,
4008 sizeof(u64), PCI_DMA_TODEVICE);
4009 if((txdp->Buffer_Pointer == 0) ||
4010 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4011 goto pci_map_failed;
4012 txdp++;
4015 txdp->Buffer_Pointer = pci_map_single
4016 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4017 if((txdp->Buffer_Pointer == 0) ||
4018 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4019 goto pci_map_failed;
4021 txdp->Host_Control = (unsigned long) skb;
4022 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4023 if (offload_type == SKB_GSO_UDP)
4024 txdp->Control_1 |= TXD_UFO_EN;
4026 frg_cnt = skb_shinfo(skb)->nr_frags;
4027 /* For fragmented SKB. */
4028 for (i = 0; i < frg_cnt; i++) {
4029 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4030 /* A '0' length fragment will be ignored */
4031 if (!frag->size)
4032 continue;
4033 txdp++;
4034 txdp->Buffer_Pointer = (u64) pci_map_page
4035 (sp->pdev, frag->page, frag->page_offset,
4036 frag->size, PCI_DMA_TODEVICE);
4037 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4038 if (offload_type == SKB_GSO_UDP)
4039 txdp->Control_1 |= TXD_UFO_EN;
4041 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4043 if (offload_type == SKB_GSO_UDP)
4044 frg_cnt++; /* as Txd0 was used for inband header */
4046 tx_fifo = mac_control->tx_FIFO_start[queue];
4047 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4048 writeq(val64, &tx_fifo->TxDL_Pointer);
4050 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4051 TX_FIFO_LAST_LIST);
4052 if (offload_type)
4053 val64 |= TX_FIFO_SPECIAL_FUNC;
4055 writeq(val64, &tx_fifo->List_Control);
4057 mmiowb();
4059 put_off++;
4060 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4061 put_off = 0;
4062 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4064 /* Avoid "put" pointer going beyond "get" pointer */
4065 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4066 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4067 DBG_PRINT(TX_DBG,
4068 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4069 put_off, get_off);
4070 netif_stop_queue(dev);
4072 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4073 dev->trans_start = jiffies;
4074 spin_unlock_irqrestore(&sp->tx_lock, flags);
4076 return 0;
4077 pci_map_failed:
4078 stats->pci_map_fail_cnt++;
4079 netif_stop_queue(dev);
4080 stats->mem_freed += skb->truesize;
4081 dev_kfree_skb(skb);
4082 spin_unlock_irqrestore(&sp->tx_lock, flags);
4083 return 0;
4086 static void
4087 s2io_alarm_handle(unsigned long data)
4089 struct s2io_nic *sp = (struct s2io_nic *)data;
4090 struct net_device *dev = sp->dev;
4092 s2io_handle_errors(dev);
4093 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4096 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4098 int rxb_size, level;
4100 if (!sp->lro) {
4101 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4102 level = rx_buffer_level(sp, rxb_size, rng_n);
4104 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4105 int ret;
4106 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4107 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4108 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4109 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4110 __FUNCTION__);
4111 clear_bit(0, (&sp->tasklet_status));
4112 return -1;
4114 clear_bit(0, (&sp->tasklet_status));
4115 } else if (level == LOW)
4116 tasklet_schedule(&sp->task);
4118 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4119 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4120 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4122 return 0;
4125 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4127 struct ring_info *ring = (struct ring_info *)dev_id;
4128 struct s2io_nic *sp = ring->nic;
4130 if (!is_s2io_card_up(sp))
4131 return IRQ_HANDLED;
4133 rx_intr_handler(ring);
4134 s2io_chk_rx_buffers(sp, ring->ring_no);
4136 return IRQ_HANDLED;
4139 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4141 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4142 struct s2io_nic *sp = fifo->nic;
4144 if (!is_s2io_card_up(sp))
4145 return IRQ_HANDLED;
4147 tx_intr_handler(fifo);
4148 return IRQ_HANDLED;
4150 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4152 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4153 u64 val64;
4155 val64 = readq(&bar0->pic_int_status);
4156 if (val64 & PIC_INT_GPIO) {
4157 val64 = readq(&bar0->gpio_int_reg);
4158 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4159 (val64 & GPIO_INT_REG_LINK_UP)) {
4161 * This is unstable state so clear both up/down
4162 * interrupt and adapter to re-evaluate the link state.
4164 val64 |= GPIO_INT_REG_LINK_DOWN;
4165 val64 |= GPIO_INT_REG_LINK_UP;
4166 writeq(val64, &bar0->gpio_int_reg);
4167 val64 = readq(&bar0->gpio_int_mask);
4168 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4169 GPIO_INT_MASK_LINK_DOWN);
4170 writeq(val64, &bar0->gpio_int_mask);
4172 else if (val64 & GPIO_INT_REG_LINK_UP) {
4173 val64 = readq(&bar0->adapter_status);
4174 /* Enable Adapter */
4175 val64 = readq(&bar0->adapter_control);
4176 val64 |= ADAPTER_CNTL_EN;
4177 writeq(val64, &bar0->adapter_control);
4178 val64 |= ADAPTER_LED_ON;
4179 writeq(val64, &bar0->adapter_control);
4180 if (!sp->device_enabled_once)
4181 sp->device_enabled_once = 1;
4183 s2io_link(sp, LINK_UP);
4185 * unmask link down interrupt and mask link-up
4186 * intr
4188 val64 = readq(&bar0->gpio_int_mask);
4189 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4190 val64 |= GPIO_INT_MASK_LINK_UP;
4191 writeq(val64, &bar0->gpio_int_mask);
4193 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4194 val64 = readq(&bar0->adapter_status);
4195 s2io_link(sp, LINK_DOWN);
4196 /* Link is down so unmaks link up interrupt */
4197 val64 = readq(&bar0->gpio_int_mask);
4198 val64 &= ~GPIO_INT_MASK_LINK_UP;
4199 val64 |= GPIO_INT_MASK_LINK_DOWN;
4200 writeq(val64, &bar0->gpio_int_mask);
4202 /* turn off LED */
4203 val64 = readq(&bar0->adapter_control);
4204 val64 = val64 &(~ADAPTER_LED_ON);
4205 writeq(val64, &bar0->adapter_control);
4208 val64 = readq(&bar0->gpio_int_mask);
4212 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4213 * @value: alarm bits
4214 * @addr: address value
4215 * @cnt: counter variable
4216 * Description: Check for alarm and increment the counter
4217 * Return Value:
4218 * 1 - if alarm bit set
4219 * 0 - if alarm bit is not set
4221 int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4222 unsigned long long *cnt)
4224 u64 val64;
4225 val64 = readq(addr);
4226 if ( val64 & value ) {
4227 writeq(val64, addr);
4228 (*cnt)++;
4229 return 1;
4231 return 0;
4236 * s2io_handle_errors - Xframe error indication handler
4237 * @nic: device private variable
4238 * Description: Handle alarms such as loss of link, single or
4239 * double ECC errors, critical and serious errors.
4240 * Return Value:
4241 * NONE
4243 static void s2io_handle_errors(void * dev_id)
4245 struct net_device *dev = (struct net_device *) dev_id;
4246 struct s2io_nic *sp = dev->priv;
4247 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4248 u64 temp64 = 0,val64=0;
4249 int i = 0;
4251 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4252 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4254 if (!is_s2io_card_up(sp))
4255 return;
4257 if (pci_channel_offline(sp->pdev))
4258 return;
4260 memset(&sw_stat->ring_full_cnt, 0,
4261 sizeof(sw_stat->ring_full_cnt));
4263 /* Handling the XPAK counters update */
4264 if(stats->xpak_timer_count < 72000) {
4265 /* waiting for an hour */
4266 stats->xpak_timer_count++;
4267 } else {
4268 s2io_updt_xpak_counter(dev);
4269 /* reset the count to zero */
4270 stats->xpak_timer_count = 0;
4273 /* Handling link status change error Intr */
4274 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4275 val64 = readq(&bar0->mac_rmac_err_reg);
4276 writeq(val64, &bar0->mac_rmac_err_reg);
4277 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4278 schedule_work(&sp->set_link_task);
4281 /* In case of a serious error, the device will be Reset. */
4282 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4283 &sw_stat->serious_err_cnt))
4284 goto reset;
4286 /* Check for data parity error */
4287 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4288 &sw_stat->parity_err_cnt))
4289 goto reset;
4291 /* Check for ring full counter */
4292 if (sp->device_type == XFRAME_II_DEVICE) {
4293 val64 = readq(&bar0->ring_bump_counter1);
4294 for (i=0; i<4; i++) {
4295 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4296 temp64 >>= 64 - ((i+1)*16);
4297 sw_stat->ring_full_cnt[i] += temp64;
4300 val64 = readq(&bar0->ring_bump_counter2);
4301 for (i=0; i<4; i++) {
4302 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4303 temp64 >>= 64 - ((i+1)*16);
4304 sw_stat->ring_full_cnt[i+4] += temp64;
4308 val64 = readq(&bar0->txdma_int_status);
4309 /*check for pfc_err*/
4310 if (val64 & TXDMA_PFC_INT) {
4311 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4312 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4313 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4314 &sw_stat->pfc_err_cnt))
4315 goto reset;
4316 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4317 &sw_stat->pfc_err_cnt);
4320 /*check for tda_err*/
4321 if (val64 & TXDMA_TDA_INT) {
4322 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4323 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4324 &sw_stat->tda_err_cnt))
4325 goto reset;
4326 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4327 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4329 /*check for pcc_err*/
4330 if (val64 & TXDMA_PCC_INT) {
4331 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4332 | PCC_N_SERR | PCC_6_COF_OV_ERR
4333 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4334 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4335 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4336 &sw_stat->pcc_err_cnt))
4337 goto reset;
4338 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4339 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4342 /*check for tti_err*/
4343 if (val64 & TXDMA_TTI_INT) {
4344 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4345 &sw_stat->tti_err_cnt))
4346 goto reset;
4347 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4348 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4351 /*check for lso_err*/
4352 if (val64 & TXDMA_LSO_INT) {
4353 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4354 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4355 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4356 goto reset;
4357 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4358 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4361 /*check for tpa_err*/
4362 if (val64 & TXDMA_TPA_INT) {
4363 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4364 &sw_stat->tpa_err_cnt))
4365 goto reset;
4366 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4367 &sw_stat->tpa_err_cnt);
4370 /*check for sm_err*/
4371 if (val64 & TXDMA_SM_INT) {
4372 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4373 &sw_stat->sm_err_cnt))
4374 goto reset;
4377 val64 = readq(&bar0->mac_int_status);
4378 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4379 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4380 &bar0->mac_tmac_err_reg,
4381 &sw_stat->mac_tmac_err_cnt))
4382 goto reset;
4383 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4384 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4385 &bar0->mac_tmac_err_reg,
4386 &sw_stat->mac_tmac_err_cnt);
4389 val64 = readq(&bar0->xgxs_int_status);
4390 if (val64 & XGXS_INT_STATUS_TXGXS) {
4391 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4392 &bar0->xgxs_txgxs_err_reg,
4393 &sw_stat->xgxs_txgxs_err_cnt))
4394 goto reset;
4395 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4396 &bar0->xgxs_txgxs_err_reg,
4397 &sw_stat->xgxs_txgxs_err_cnt);
4400 val64 = readq(&bar0->rxdma_int_status);
4401 if (val64 & RXDMA_INT_RC_INT_M) {
4402 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4403 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4404 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4405 goto reset;
4406 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4407 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4408 &sw_stat->rc_err_cnt);
4409 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4410 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4411 &sw_stat->prc_pcix_err_cnt))
4412 goto reset;
4413 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4414 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4415 &sw_stat->prc_pcix_err_cnt);
4418 if (val64 & RXDMA_INT_RPA_INT_M) {
4419 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4420 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4421 goto reset;
4422 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4423 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4426 if (val64 & RXDMA_INT_RDA_INT_M) {
4427 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4428 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4429 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4430 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4431 goto reset;
4432 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4433 | RDA_MISC_ERR | RDA_PCIX_ERR,
4434 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4437 if (val64 & RXDMA_INT_RTI_INT_M) {
4438 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4439 &sw_stat->rti_err_cnt))
4440 goto reset;
4441 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4442 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4445 val64 = readq(&bar0->mac_int_status);
4446 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4447 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4448 &bar0->mac_rmac_err_reg,
4449 &sw_stat->mac_rmac_err_cnt))
4450 goto reset;
4451 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4452 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4453 &sw_stat->mac_rmac_err_cnt);
4456 val64 = readq(&bar0->xgxs_int_status);
4457 if (val64 & XGXS_INT_STATUS_RXGXS) {
4458 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4459 &bar0->xgxs_rxgxs_err_reg,
4460 &sw_stat->xgxs_rxgxs_err_cnt))
4461 goto reset;
4464 val64 = readq(&bar0->mc_int_status);
4465 if(val64 & MC_INT_STATUS_MC_INT) {
4466 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4467 &sw_stat->mc_err_cnt))
4468 goto reset;
4470 /* Handling Ecc errors */
4471 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4472 writeq(val64, &bar0->mc_err_reg);
4473 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4474 sw_stat->double_ecc_errs++;
4475 if (sp->device_type != XFRAME_II_DEVICE) {
4477 * Reset XframeI only if critical error
4479 if (val64 &
4480 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4481 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4482 goto reset;
4484 } else
4485 sw_stat->single_ecc_errs++;
4488 return;
4490 reset:
4491 netif_stop_queue(dev);
4492 schedule_work(&sp->rst_timer_task);
4493 sw_stat->soft_reset_cnt++;
4494 return;
4498 * s2io_isr - ISR handler of the device .
4499 * @irq: the irq of the device.
4500 * @dev_id: a void pointer to the dev structure of the NIC.
4501 * Description: This function is the ISR handler of the device. It
4502 * identifies the reason for the interrupt and calls the relevant
4503 * service routines. As a contongency measure, this ISR allocates the
4504 * recv buffers, if their numbers are below the panic value which is
4505 * presently set to 25% of the original number of rcv buffers allocated.
4506 * Return value:
4507 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4508 * IRQ_NONE: will be returned if interrupt is not from our device
4510 static irqreturn_t s2io_isr(int irq, void *dev_id)
4512 struct net_device *dev = (struct net_device *) dev_id;
4513 struct s2io_nic *sp = dev->priv;
4514 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4515 int i;
4516 u64 reason = 0;
4517 struct mac_info *mac_control;
4518 struct config_param *config;
4520 /* Pretend we handled any irq's from a disconnected card */
4521 if (pci_channel_offline(sp->pdev))
4522 return IRQ_NONE;
4524 if (!is_s2io_card_up(sp))
4525 return IRQ_NONE;
4527 mac_control = &sp->mac_control;
4528 config = &sp->config;
4531 * Identify the cause for interrupt and call the appropriate
4532 * interrupt handler. Causes for the interrupt could be;
4533 * 1. Rx of packet.
4534 * 2. Tx complete.
4535 * 3. Link down.
4537 reason = readq(&bar0->general_int_status);
4539 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4540 /* Nothing much can be done. Get out */
4541 return IRQ_HANDLED;
4544 if (reason & (GEN_INTR_RXTRAFFIC |
4545 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4547 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4549 if (config->napi) {
4550 if (reason & GEN_INTR_RXTRAFFIC) {
4551 if (likely(netif_rx_schedule_prep(dev,
4552 &sp->napi))) {
4553 __netif_rx_schedule(dev, &sp->napi);
4554 writeq(S2IO_MINUS_ONE,
4555 &bar0->rx_traffic_mask);
4556 } else
4557 writeq(S2IO_MINUS_ONE,
4558 &bar0->rx_traffic_int);
4560 } else {
4562 * rx_traffic_int reg is an R1 register, writing all 1's
4563 * will ensure that the actual interrupt causing bit
4564 * get's cleared and hence a read can be avoided.
4566 if (reason & GEN_INTR_RXTRAFFIC)
4567 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4569 for (i = 0; i < config->rx_ring_num; i++)
4570 rx_intr_handler(&mac_control->rings[i]);
4574 * tx_traffic_int reg is an R1 register, writing all 1's
4575 * will ensure that the actual interrupt causing bit get's
4576 * cleared and hence a read can be avoided.
4578 if (reason & GEN_INTR_TXTRAFFIC)
4579 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4581 for (i = 0; i < config->tx_fifo_num; i++)
4582 tx_intr_handler(&mac_control->fifos[i]);
4584 if (reason & GEN_INTR_TXPIC)
4585 s2io_txpic_intr_handle(sp);
4588 * Reallocate the buffers from the interrupt handler itself.
4590 if (!config->napi) {
4591 for (i = 0; i < config->rx_ring_num; i++)
4592 s2io_chk_rx_buffers(sp, i);
4594 writeq(sp->general_int_mask, &bar0->general_int_mask);
4595 readl(&bar0->general_int_status);
4597 return IRQ_HANDLED;
4600 else if (!reason) {
4601 /* The interrupt was not raised by us */
4602 return IRQ_NONE;
4605 return IRQ_HANDLED;
4609 * s2io_updt_stats -
4611 static void s2io_updt_stats(struct s2io_nic *sp)
4613 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4614 u64 val64;
4615 int cnt = 0;
4617 if (is_s2io_card_up(sp)) {
4618 /* Apprx 30us on a 133 MHz bus */
4619 val64 = SET_UPDT_CLICKS(10) |
4620 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4621 writeq(val64, &bar0->stat_cfg);
4622 do {
4623 udelay(100);
4624 val64 = readq(&bar0->stat_cfg);
4625 if (!(val64 & BIT(0)))
4626 break;
4627 cnt++;
4628 if (cnt == 5)
4629 break; /* Updt failed */
4630 } while(1);
4635 * s2io_get_stats - Updates the device statistics structure.
4636 * @dev : pointer to the device structure.
4637 * Description:
4638 * This function updates the device statistics structure in the s2io_nic
4639 * structure and returns a pointer to the same.
4640 * Return value:
4641 * pointer to the updated net_device_stats structure.
4644 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4646 struct s2io_nic *sp = dev->priv;
4647 struct mac_info *mac_control;
4648 struct config_param *config;
4651 mac_control = &sp->mac_control;
4652 config = &sp->config;
4654 /* Configure Stats for immediate updt */
4655 s2io_updt_stats(sp);
4657 sp->stats.tx_packets =
4658 le32_to_cpu(mac_control->stats_info->tmac_frms);
4659 sp->stats.tx_errors =
4660 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4661 sp->stats.rx_errors =
4662 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4663 sp->stats.multicast =
4664 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4665 sp->stats.rx_length_errors =
4666 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4668 return (&sp->stats);
4672 * s2io_set_multicast - entry point for multicast address enable/disable.
4673 * @dev : pointer to the device structure
4674 * Description:
4675 * This function is a driver entry point which gets called by the kernel
4676 * whenever multicast addresses must be enabled/disabled. This also gets
4677 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4678 * determine, if multicast address must be enabled or if promiscuous mode
4679 * is to be disabled etc.
4680 * Return value:
4681 * void.
4684 static void s2io_set_multicast(struct net_device *dev)
4686 int i, j, prev_cnt;
4687 struct dev_mc_list *mclist;
4688 struct s2io_nic *sp = dev->priv;
4689 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4690 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4691 0xfeffffffffffULL;
4692 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4693 void __iomem *add;
4695 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4696 /* Enable all Multicast addresses */
4697 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4698 &bar0->rmac_addr_data0_mem);
4699 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4700 &bar0->rmac_addr_data1_mem);
4701 val64 = RMAC_ADDR_CMD_MEM_WE |
4702 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4703 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4704 writeq(val64, &bar0->rmac_addr_cmd_mem);
4705 /* Wait till command completes */
4706 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4707 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4708 S2IO_BIT_RESET);
4710 sp->m_cast_flg = 1;
4711 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4712 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4713 /* Disable all Multicast addresses */
4714 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4715 &bar0->rmac_addr_data0_mem);
4716 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4717 &bar0->rmac_addr_data1_mem);
4718 val64 = RMAC_ADDR_CMD_MEM_WE |
4719 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4720 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4721 writeq(val64, &bar0->rmac_addr_cmd_mem);
4722 /* Wait till command completes */
4723 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4724 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4725 S2IO_BIT_RESET);
4727 sp->m_cast_flg = 0;
4728 sp->all_multi_pos = 0;
4731 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4732 /* Put the NIC into promiscuous mode */
4733 add = &bar0->mac_cfg;
4734 val64 = readq(&bar0->mac_cfg);
4735 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4737 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4738 writel((u32) val64, add);
4739 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4740 writel((u32) (val64 >> 32), (add + 4));
4742 if (vlan_tag_strip != 1) {
4743 val64 = readq(&bar0->rx_pa_cfg);
4744 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4745 writeq(val64, &bar0->rx_pa_cfg);
4746 vlan_strip_flag = 0;
4749 val64 = readq(&bar0->mac_cfg);
4750 sp->promisc_flg = 1;
4751 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4752 dev->name);
4753 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4754 /* Remove the NIC from promiscuous mode */
4755 add = &bar0->mac_cfg;
4756 val64 = readq(&bar0->mac_cfg);
4757 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4759 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4760 writel((u32) val64, add);
4761 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4762 writel((u32) (val64 >> 32), (add + 4));
4764 if (vlan_tag_strip != 0) {
4765 val64 = readq(&bar0->rx_pa_cfg);
4766 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4767 writeq(val64, &bar0->rx_pa_cfg);
4768 vlan_strip_flag = 1;
4771 val64 = readq(&bar0->mac_cfg);
4772 sp->promisc_flg = 0;
4773 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4774 dev->name);
4777 /* Update individual M_CAST address list */
4778 if ((!sp->m_cast_flg) && dev->mc_count) {
4779 if (dev->mc_count >
4780 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4781 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4782 dev->name);
4783 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4784 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4785 return;
4788 prev_cnt = sp->mc_addr_count;
4789 sp->mc_addr_count = dev->mc_count;
4791 /* Clear out the previous list of Mc in the H/W. */
4792 for (i = 0; i < prev_cnt; i++) {
4793 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4794 &bar0->rmac_addr_data0_mem);
4795 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4796 &bar0->rmac_addr_data1_mem);
4797 val64 = RMAC_ADDR_CMD_MEM_WE |
4798 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4799 RMAC_ADDR_CMD_MEM_OFFSET
4800 (MAC_MC_ADDR_START_OFFSET + i);
4801 writeq(val64, &bar0->rmac_addr_cmd_mem);
4803 /* Wait for command completes */
4804 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4805 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4806 S2IO_BIT_RESET)) {
4807 DBG_PRINT(ERR_DBG, "%s: Adding ",
4808 dev->name);
4809 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4810 return;
4814 /* Create the new Rx filter list and update the same in H/W. */
4815 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4816 i++, mclist = mclist->next) {
4817 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4818 ETH_ALEN);
4819 mac_addr = 0;
4820 for (j = 0; j < ETH_ALEN; j++) {
4821 mac_addr |= mclist->dmi_addr[j];
4822 mac_addr <<= 8;
4824 mac_addr >>= 8;
4825 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4826 &bar0->rmac_addr_data0_mem);
4827 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4828 &bar0->rmac_addr_data1_mem);
4829 val64 = RMAC_ADDR_CMD_MEM_WE |
4830 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4831 RMAC_ADDR_CMD_MEM_OFFSET
4832 (i + MAC_MC_ADDR_START_OFFSET);
4833 writeq(val64, &bar0->rmac_addr_cmd_mem);
4835 /* Wait for command completes */
4836 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4837 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4838 S2IO_BIT_RESET)) {
4839 DBG_PRINT(ERR_DBG, "%s: Adding ",
4840 dev->name);
4841 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4842 return;
4849 * s2io_set_mac_addr - Programs the Xframe mac address
4850 * @dev : pointer to the device structure.
4851 * @addr: a uchar pointer to the new mac address which is to be set.
4852 * Description : This procedure will program the Xframe to receive
4853 * frames with new Mac Address
4854 * Return value: SUCCESS on success and an appropriate (-)ve integer
4855 * as defined in errno.h file on failure.
4858 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4860 struct s2io_nic *sp = dev->priv;
4861 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4862 register u64 val64, mac_addr = 0;
4863 int i;
4864 u64 old_mac_addr = 0;
4867 * Set the new MAC address as the new unicast filter and reflect this
4868 * change on the device address registered with the OS. It will be
4869 * at offset 0.
4871 for (i = 0; i < ETH_ALEN; i++) {
4872 mac_addr <<= 8;
4873 mac_addr |= addr[i];
4874 old_mac_addr <<= 8;
4875 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4878 if(0 == mac_addr)
4879 return SUCCESS;
4881 /* Update the internal structure with this new mac address */
4882 if(mac_addr != old_mac_addr) {
4883 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4884 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4885 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4886 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4887 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4888 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4889 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4892 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4893 &bar0->rmac_addr_data0_mem);
4895 val64 =
4896 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4897 RMAC_ADDR_CMD_MEM_OFFSET(0);
4898 writeq(val64, &bar0->rmac_addr_cmd_mem);
4899 /* Wait till command completes */
4900 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4901 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4902 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4903 return FAILURE;
4906 return SUCCESS;
4910 * s2io_ethtool_sset - Sets different link parameters.
4911 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4912 * @info: pointer to the structure with parameters given by ethtool to set
4913 * link information.
4914 * Description:
4915 * The function sets different link parameters provided by the user onto
4916 * the NIC.
4917 * Return value:
4918 * 0 on success.
4921 static int s2io_ethtool_sset(struct net_device *dev,
4922 struct ethtool_cmd *info)
4924 struct s2io_nic *sp = dev->priv;
4925 if ((info->autoneg == AUTONEG_ENABLE) ||
4926 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4927 return -EINVAL;
4928 else {
4929 s2io_close(sp->dev);
4930 s2io_open(sp->dev);
4933 return 0;
4937 * s2io_ethtol_gset - Return link specific information.
4938 * @sp : private member of the device structure, pointer to the
4939 * s2io_nic structure.
4940 * @info : pointer to the structure with parameters given by ethtool
4941 * to return link information.
4942 * Description:
4943 * Returns link specific information like speed, duplex etc.. to ethtool.
4944 * Return value :
4945 * return 0 on success.
4948 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4950 struct s2io_nic *sp = dev->priv;
4951 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4952 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4953 info->port = PORT_FIBRE;
4954 /* info->transceiver?? TODO */
4956 if (netif_carrier_ok(sp->dev)) {
4957 info->speed = 10000;
4958 info->duplex = DUPLEX_FULL;
4959 } else {
4960 info->speed = -1;
4961 info->duplex = -1;
4964 info->autoneg = AUTONEG_DISABLE;
4965 return 0;
4969 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4970 * @sp : private member of the device structure, which is a pointer to the
4971 * s2io_nic structure.
4972 * @info : pointer to the structure with parameters given by ethtool to
4973 * return driver information.
4974 * Description:
4975 * Returns driver specefic information like name, version etc.. to ethtool.
4976 * Return value:
4977 * void
4980 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4981 struct ethtool_drvinfo *info)
4983 struct s2io_nic *sp = dev->priv;
4985 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4986 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4987 strncpy(info->fw_version, "", sizeof(info->fw_version));
4988 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4989 info->regdump_len = XENA_REG_SPACE;
4990 info->eedump_len = XENA_EEPROM_SPACE;
4994 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4995 * @sp: private member of the device structure, which is a pointer to the
4996 * s2io_nic structure.
4997 * @regs : pointer to the structure with parameters given by ethtool for
4998 * dumping the registers.
4999 * @reg_space: The input argumnet into which all the registers are dumped.
5000 * Description:
5001 * Dumps the entire register space of xFrame NIC into the user given
5002 * buffer area.
5003 * Return value :
5004 * void .
5007 static void s2io_ethtool_gregs(struct net_device *dev,
5008 struct ethtool_regs *regs, void *space)
5010 int i;
5011 u64 reg;
5012 u8 *reg_space = (u8 *) space;
5013 struct s2io_nic *sp = dev->priv;
5015 regs->len = XENA_REG_SPACE;
5016 regs->version = sp->pdev->subsystem_device;
5018 for (i = 0; i < regs->len; i += 8) {
5019 reg = readq(sp->bar0 + i);
5020 memcpy((reg_space + i), &reg, 8);
5025 * s2io_phy_id - timer function that alternates adapter LED.
5026 * @data : address of the private member of the device structure, which
5027 * is a pointer to the s2io_nic structure, provided as an u32.
5028 * Description: This is actually the timer function that alternates the
5029 * adapter LED bit of the adapter control bit to set/reset every time on
5030 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5031 * once every second.
5033 static void s2io_phy_id(unsigned long data)
5035 struct s2io_nic *sp = (struct s2io_nic *) data;
5036 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5037 u64 val64 = 0;
5038 u16 subid;
5040 subid = sp->pdev->subsystem_device;
5041 if ((sp->device_type == XFRAME_II_DEVICE) ||
5042 ((subid & 0xFF) >= 0x07)) {
5043 val64 = readq(&bar0->gpio_control);
5044 val64 ^= GPIO_CTRL_GPIO_0;
5045 writeq(val64, &bar0->gpio_control);
5046 } else {
5047 val64 = readq(&bar0->adapter_control);
5048 val64 ^= ADAPTER_LED_ON;
5049 writeq(val64, &bar0->adapter_control);
5052 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5056 * s2io_ethtool_idnic - To physically identify the nic on the system.
5057 * @sp : private member of the device structure, which is a pointer to the
5058 * s2io_nic structure.
5059 * @id : pointer to the structure with identification parameters given by
5060 * ethtool.
5061 * Description: Used to physically identify the NIC on the system.
5062 * The Link LED will blink for a time specified by the user for
5063 * identification.
5064 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5065 * identification is possible only if it's link is up.
5066 * Return value:
5067 * int , returns 0 on success
5070 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5072 u64 val64 = 0, last_gpio_ctrl_val;
5073 struct s2io_nic *sp = dev->priv;
5074 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5075 u16 subid;
5077 subid = sp->pdev->subsystem_device;
5078 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5079 if ((sp->device_type == XFRAME_I_DEVICE) &&
5080 ((subid & 0xFF) < 0x07)) {
5081 val64 = readq(&bar0->adapter_control);
5082 if (!(val64 & ADAPTER_CNTL_EN)) {
5083 printk(KERN_ERR
5084 "Adapter Link down, cannot blink LED\n");
5085 return -EFAULT;
5088 if (sp->id_timer.function == NULL) {
5089 init_timer(&sp->id_timer);
5090 sp->id_timer.function = s2io_phy_id;
5091 sp->id_timer.data = (unsigned long) sp;
5093 mod_timer(&sp->id_timer, jiffies);
5094 if (data)
5095 msleep_interruptible(data * HZ);
5096 else
5097 msleep_interruptible(MAX_FLICKER_TIME);
5098 del_timer_sync(&sp->id_timer);
5100 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5101 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5102 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5105 return 0;
5108 static void s2io_ethtool_gringparam(struct net_device *dev,
5109 struct ethtool_ringparam *ering)
5111 struct s2io_nic *sp = dev->priv;
5112 int i,tx_desc_count=0,rx_desc_count=0;
5114 if (sp->rxd_mode == RXD_MODE_1)
5115 ering->rx_max_pending = MAX_RX_DESC_1;
5116 else if (sp->rxd_mode == RXD_MODE_3B)
5117 ering->rx_max_pending = MAX_RX_DESC_2;
5119 ering->tx_max_pending = MAX_TX_DESC;
5120 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5121 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5123 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5124 ering->tx_pending = tx_desc_count;
5125 rx_desc_count = 0;
5126 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5127 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5129 ering->rx_pending = rx_desc_count;
5131 ering->rx_mini_max_pending = 0;
5132 ering->rx_mini_pending = 0;
5133 if(sp->rxd_mode == RXD_MODE_1)
5134 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5135 else if (sp->rxd_mode == RXD_MODE_3B)
5136 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5137 ering->rx_jumbo_pending = rx_desc_count;
5141 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5142 * @sp : private member of the device structure, which is a pointer to the
5143 * s2io_nic structure.
5144 * @ep : pointer to the structure with pause parameters given by ethtool.
5145 * Description:
5146 * Returns the Pause frame generation and reception capability of the NIC.
5147 * Return value:
5148 * void
5150 static void s2io_ethtool_getpause_data(struct net_device *dev,
5151 struct ethtool_pauseparam *ep)
5153 u64 val64;
5154 struct s2io_nic *sp = dev->priv;
5155 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5157 val64 = readq(&bar0->rmac_pause_cfg);
5158 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5159 ep->tx_pause = TRUE;
5160 if (val64 & RMAC_PAUSE_RX_ENABLE)
5161 ep->rx_pause = TRUE;
5162 ep->autoneg = FALSE;
5166 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5167 * @sp : private member of the device structure, which is a pointer to the
5168 * s2io_nic structure.
5169 * @ep : pointer to the structure with pause parameters given by ethtool.
5170 * Description:
5171 * It can be used to set or reset Pause frame generation or reception
5172 * support of the NIC.
5173 * Return value:
5174 * int, returns 0 on Success
5177 static int s2io_ethtool_setpause_data(struct net_device *dev,
5178 struct ethtool_pauseparam *ep)
5180 u64 val64;
5181 struct s2io_nic *sp = dev->priv;
5182 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5184 val64 = readq(&bar0->rmac_pause_cfg);
5185 if (ep->tx_pause)
5186 val64 |= RMAC_PAUSE_GEN_ENABLE;
5187 else
5188 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5189 if (ep->rx_pause)
5190 val64 |= RMAC_PAUSE_RX_ENABLE;
5191 else
5192 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5193 writeq(val64, &bar0->rmac_pause_cfg);
5194 return 0;
5198 * read_eeprom - reads 4 bytes of data from user given offset.
5199 * @sp : private member of the device structure, which is a pointer to the
5200 * s2io_nic structure.
5201 * @off : offset at which the data must be written
5202 * @data : Its an output parameter where the data read at the given
5203 * offset is stored.
5204 * Description:
5205 * Will read 4 bytes of data from the user given offset and return the
5206 * read data.
5207 * NOTE: Will allow to read only part of the EEPROM visible through the
5208 * I2C bus.
5209 * Return value:
5210 * -1 on failure and 0 on success.
5213 #define S2IO_DEV_ID 5
5214 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5216 int ret = -1;
5217 u32 exit_cnt = 0;
5218 u64 val64;
5219 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5221 if (sp->device_type == XFRAME_I_DEVICE) {
5222 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5223 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5224 I2C_CONTROL_CNTL_START;
5225 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5227 while (exit_cnt < 5) {
5228 val64 = readq(&bar0->i2c_control);
5229 if (I2C_CONTROL_CNTL_END(val64)) {
5230 *data = I2C_CONTROL_GET_DATA(val64);
5231 ret = 0;
5232 break;
5234 msleep(50);
5235 exit_cnt++;
5239 if (sp->device_type == XFRAME_II_DEVICE) {
5240 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5241 SPI_CONTROL_BYTECNT(0x3) |
5242 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5243 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5244 val64 |= SPI_CONTROL_REQ;
5245 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5246 while (exit_cnt < 5) {
5247 val64 = readq(&bar0->spi_control);
5248 if (val64 & SPI_CONTROL_NACK) {
5249 ret = 1;
5250 break;
5251 } else if (val64 & SPI_CONTROL_DONE) {
5252 *data = readq(&bar0->spi_data);
5253 *data &= 0xffffff;
5254 ret = 0;
5255 break;
5257 msleep(50);
5258 exit_cnt++;
5261 return ret;
5265 * write_eeprom - actually writes the relevant part of the data value.
5266 * @sp : private member of the device structure, which is a pointer to the
5267 * s2io_nic structure.
5268 * @off : offset at which the data must be written
5269 * @data : The data that is to be written
5270 * @cnt : Number of bytes of the data that are actually to be written into
5271 * the Eeprom. (max of 3)
5272 * Description:
5273 * Actually writes the relevant part of the data value into the Eeprom
5274 * through the I2C bus.
5275 * Return value:
5276 * 0 on success, -1 on failure.
5279 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5281 int exit_cnt = 0, ret = -1;
5282 u64 val64;
5283 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5285 if (sp->device_type == XFRAME_I_DEVICE) {
5286 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5287 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5288 I2C_CONTROL_CNTL_START;
5289 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5291 while (exit_cnt < 5) {
5292 val64 = readq(&bar0->i2c_control);
5293 if (I2C_CONTROL_CNTL_END(val64)) {
5294 if (!(val64 & I2C_CONTROL_NACK))
5295 ret = 0;
5296 break;
5298 msleep(50);
5299 exit_cnt++;
5303 if (sp->device_type == XFRAME_II_DEVICE) {
5304 int write_cnt = (cnt == 8) ? 0 : cnt;
5305 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5307 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5308 SPI_CONTROL_BYTECNT(write_cnt) |
5309 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5310 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5311 val64 |= SPI_CONTROL_REQ;
5312 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5313 while (exit_cnt < 5) {
5314 val64 = readq(&bar0->spi_control);
5315 if (val64 & SPI_CONTROL_NACK) {
5316 ret = 1;
5317 break;
5318 } else if (val64 & SPI_CONTROL_DONE) {
5319 ret = 0;
5320 break;
5322 msleep(50);
5323 exit_cnt++;
5326 return ret;
5328 static void s2io_vpd_read(struct s2io_nic *nic)
5330 u8 *vpd_data;
5331 u8 data;
5332 int i=0, cnt, fail = 0;
5333 int vpd_addr = 0x80;
5335 if (nic->device_type == XFRAME_II_DEVICE) {
5336 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5337 vpd_addr = 0x80;
5339 else {
5340 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5341 vpd_addr = 0x50;
5343 strcpy(nic->serial_num, "NOT AVAILABLE");
5345 vpd_data = kmalloc(256, GFP_KERNEL);
5346 if (!vpd_data) {
5347 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5348 return;
5350 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5352 for (i = 0; i < 256; i +=4 ) {
5353 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5354 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5355 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5356 for (cnt = 0; cnt <5; cnt++) {
5357 msleep(2);
5358 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5359 if (data == 0x80)
5360 break;
5362 if (cnt >= 5) {
5363 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5364 fail = 1;
5365 break;
5367 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5368 (u32 *)&vpd_data[i]);
5371 if(!fail) {
5372 /* read serial number of adapter */
5373 for (cnt = 0; cnt < 256; cnt++) {
5374 if ((vpd_data[cnt] == 'S') &&
5375 (vpd_data[cnt+1] == 'N') &&
5376 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5377 memset(nic->serial_num, 0, VPD_STRING_LEN);
5378 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5379 vpd_data[cnt+2]);
5380 break;
5385 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5386 memset(nic->product_name, 0, vpd_data[1]);
5387 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5389 kfree(vpd_data);
5390 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5394 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5395 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5396 * @eeprom : pointer to the user level structure provided by ethtool,
5397 * containing all relevant information.
5398 * @data_buf : user defined value to be written into Eeprom.
5399 * Description: Reads the values stored in the Eeprom at given offset
5400 * for a given length. Stores these values int the input argument data
5401 * buffer 'data_buf' and returns these to the caller (ethtool.)
5402 * Return value:
5403 * int 0 on success
5406 static int s2io_ethtool_geeprom(struct net_device *dev,
5407 struct ethtool_eeprom *eeprom, u8 * data_buf)
5409 u32 i, valid;
5410 u64 data;
5411 struct s2io_nic *sp = dev->priv;
5413 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5415 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5416 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5418 for (i = 0; i < eeprom->len; i += 4) {
5419 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5420 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5421 return -EFAULT;
5423 valid = INV(data);
5424 memcpy((data_buf + i), &valid, 4);
5426 return 0;
5430 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5431 * @sp : private member of the device structure, which is a pointer to the
5432 * s2io_nic structure.
5433 * @eeprom : pointer to the user level structure provided by ethtool,
5434 * containing all relevant information.
5435 * @data_buf ; user defined value to be written into Eeprom.
5436 * Description:
5437 * Tries to write the user provided value in the Eeprom, at the offset
5438 * given by the user.
5439 * Return value:
5440 * 0 on success, -EFAULT on failure.
5443 static int s2io_ethtool_seeprom(struct net_device *dev,
5444 struct ethtool_eeprom *eeprom,
5445 u8 * data_buf)
5447 int len = eeprom->len, cnt = 0;
5448 u64 valid = 0, data;
5449 struct s2io_nic *sp = dev->priv;
5451 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5452 DBG_PRINT(ERR_DBG,
5453 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5454 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5455 eeprom->magic);
5456 return -EFAULT;
5459 while (len) {
5460 data = (u32) data_buf[cnt] & 0x000000FF;
5461 if (data) {
5462 valid = (u32) (data << 24);
5463 } else
5464 valid = data;
5466 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5467 DBG_PRINT(ERR_DBG,
5468 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5469 DBG_PRINT(ERR_DBG,
5470 "write into the specified offset\n");
5471 return -EFAULT;
5473 cnt++;
5474 len--;
5477 return 0;
5481 * s2io_register_test - reads and writes into all clock domains.
5482 * @sp : private member of the device structure, which is a pointer to the
5483 * s2io_nic structure.
5484 * @data : variable that returns the result of each of the test conducted b
5485 * by the driver.
5486 * Description:
5487 * Read and write into all clock domains. The NIC has 3 clock domains,
5488 * see that registers in all the three regions are accessible.
5489 * Return value:
5490 * 0 on success.
5493 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5495 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5496 u64 val64 = 0, exp_val;
5497 int fail = 0;
5499 val64 = readq(&bar0->pif_rd_swapper_fb);
5500 if (val64 != 0x123456789abcdefULL) {
5501 fail = 1;
5502 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5505 val64 = readq(&bar0->rmac_pause_cfg);
5506 if (val64 != 0xc000ffff00000000ULL) {
5507 fail = 1;
5508 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5511 val64 = readq(&bar0->rx_queue_cfg);
5512 if (sp->device_type == XFRAME_II_DEVICE)
5513 exp_val = 0x0404040404040404ULL;
5514 else
5515 exp_val = 0x0808080808080808ULL;
5516 if (val64 != exp_val) {
5517 fail = 1;
5518 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5521 val64 = readq(&bar0->xgxs_efifo_cfg);
5522 if (val64 != 0x000000001923141EULL) {
5523 fail = 1;
5524 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5527 val64 = 0x5A5A5A5A5A5A5A5AULL;
5528 writeq(val64, &bar0->xmsi_data);
5529 val64 = readq(&bar0->xmsi_data);
5530 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5531 fail = 1;
5532 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5535 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5536 writeq(val64, &bar0->xmsi_data);
5537 val64 = readq(&bar0->xmsi_data);
5538 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5539 fail = 1;
5540 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5543 *data = fail;
5544 return fail;
5548 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5549 * @sp : private member of the device structure, which is a pointer to the
5550 * s2io_nic structure.
5551 * @data:variable that returns the result of each of the test conducted by
5552 * the driver.
5553 * Description:
5554 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5555 * register.
5556 * Return value:
5557 * 0 on success.
5560 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5562 int fail = 0;
5563 u64 ret_data, org_4F0, org_7F0;
5564 u8 saved_4F0 = 0, saved_7F0 = 0;
5565 struct net_device *dev = sp->dev;
5567 /* Test Write Error at offset 0 */
5568 /* Note that SPI interface allows write access to all areas
5569 * of EEPROM. Hence doing all negative testing only for Xframe I.
5571 if (sp->device_type == XFRAME_I_DEVICE)
5572 if (!write_eeprom(sp, 0, 0, 3))
5573 fail = 1;
5575 /* Save current values at offsets 0x4F0 and 0x7F0 */
5576 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5577 saved_4F0 = 1;
5578 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5579 saved_7F0 = 1;
5581 /* Test Write at offset 4f0 */
5582 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5583 fail = 1;
5584 if (read_eeprom(sp, 0x4F0, &ret_data))
5585 fail = 1;
5587 if (ret_data != 0x012345) {
5588 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5589 "Data written %llx Data read %llx\n",
5590 dev->name, (unsigned long long)0x12345,
5591 (unsigned long long)ret_data);
5592 fail = 1;
5595 /* Reset the EEPROM data go FFFF */
5596 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5598 /* Test Write Request Error at offset 0x7c */
5599 if (sp->device_type == XFRAME_I_DEVICE)
5600 if (!write_eeprom(sp, 0x07C, 0, 3))
5601 fail = 1;
5603 /* Test Write Request at offset 0x7f0 */
5604 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5605 fail = 1;
5606 if (read_eeprom(sp, 0x7F0, &ret_data))
5607 fail = 1;
5609 if (ret_data != 0x012345) {
5610 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5611 "Data written %llx Data read %llx\n",
5612 dev->name, (unsigned long long)0x12345,
5613 (unsigned long long)ret_data);
5614 fail = 1;
5617 /* Reset the EEPROM data go FFFF */
5618 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5620 if (sp->device_type == XFRAME_I_DEVICE) {
5621 /* Test Write Error at offset 0x80 */
5622 if (!write_eeprom(sp, 0x080, 0, 3))
5623 fail = 1;
5625 /* Test Write Error at offset 0xfc */
5626 if (!write_eeprom(sp, 0x0FC, 0, 3))
5627 fail = 1;
5629 /* Test Write Error at offset 0x100 */
5630 if (!write_eeprom(sp, 0x100, 0, 3))
5631 fail = 1;
5633 /* Test Write Error at offset 4ec */
5634 if (!write_eeprom(sp, 0x4EC, 0, 3))
5635 fail = 1;
5638 /* Restore values at offsets 0x4F0 and 0x7F0 */
5639 if (saved_4F0)
5640 write_eeprom(sp, 0x4F0, org_4F0, 3);
5641 if (saved_7F0)
5642 write_eeprom(sp, 0x7F0, org_7F0, 3);
5644 *data = fail;
5645 return fail;
5649 * s2io_bist_test - invokes the MemBist test of the card .
5650 * @sp : private member of the device structure, which is a pointer to the
5651 * s2io_nic structure.
5652 * @data:variable that returns the result of each of the test conducted by
5653 * the driver.
5654 * Description:
5655 * This invokes the MemBist test of the card. We give around
5656 * 2 secs time for the Test to complete. If it's still not complete
5657 * within this peiod, we consider that the test failed.
5658 * Return value:
5659 * 0 on success and -1 on failure.
5662 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5664 u8 bist = 0;
5665 int cnt = 0, ret = -1;
5667 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5668 bist |= PCI_BIST_START;
5669 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5671 while (cnt < 20) {
5672 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5673 if (!(bist & PCI_BIST_START)) {
5674 *data = (bist & PCI_BIST_CODE_MASK);
5675 ret = 0;
5676 break;
5678 msleep(100);
5679 cnt++;
5682 return ret;
5686 * s2io-link_test - verifies the link state of the nic
5687 * @sp ; private member of the device structure, which is a pointer to the
5688 * s2io_nic structure.
5689 * @data: variable that returns the result of each of the test conducted by
5690 * the driver.
5691 * Description:
5692 * The function verifies the link state of the NIC and updates the input
5693 * argument 'data' appropriately.
5694 * Return value:
5695 * 0 on success.
5698 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5700 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5701 u64 val64;
5703 val64 = readq(&bar0->adapter_status);
5704 if(!(LINK_IS_UP(val64)))
5705 *data = 1;
5706 else
5707 *data = 0;
5709 return *data;
5713 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5714 * @sp - private member of the device structure, which is a pointer to the
5715 * s2io_nic structure.
5716 * @data - variable that returns the result of each of the test
5717 * conducted by the driver.
5718 * Description:
5719 * This is one of the offline test that tests the read and write
5720 * access to the RldRam chip on the NIC.
5721 * Return value:
5722 * 0 on success.
5725 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5727 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5728 u64 val64;
5729 int cnt, iteration = 0, test_fail = 0;
5731 val64 = readq(&bar0->adapter_control);
5732 val64 &= ~ADAPTER_ECC_EN;
5733 writeq(val64, &bar0->adapter_control);
5735 val64 = readq(&bar0->mc_rldram_test_ctrl);
5736 val64 |= MC_RLDRAM_TEST_MODE;
5737 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5739 val64 = readq(&bar0->mc_rldram_mrs);
5740 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5741 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5743 val64 |= MC_RLDRAM_MRS_ENABLE;
5744 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5746 while (iteration < 2) {
5747 val64 = 0x55555555aaaa0000ULL;
5748 if (iteration == 1) {
5749 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5751 writeq(val64, &bar0->mc_rldram_test_d0);
5753 val64 = 0xaaaa5a5555550000ULL;
5754 if (iteration == 1) {
5755 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5757 writeq(val64, &bar0->mc_rldram_test_d1);
5759 val64 = 0x55aaaaaaaa5a0000ULL;
5760 if (iteration == 1) {
5761 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5763 writeq(val64, &bar0->mc_rldram_test_d2);
5765 val64 = (u64) (0x0000003ffffe0100ULL);
5766 writeq(val64, &bar0->mc_rldram_test_add);
5768 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5769 MC_RLDRAM_TEST_GO;
5770 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5772 for (cnt = 0; cnt < 5; cnt++) {
5773 val64 = readq(&bar0->mc_rldram_test_ctrl);
5774 if (val64 & MC_RLDRAM_TEST_DONE)
5775 break;
5776 msleep(200);
5779 if (cnt == 5)
5780 break;
5782 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5783 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5785 for (cnt = 0; cnt < 5; cnt++) {
5786 val64 = readq(&bar0->mc_rldram_test_ctrl);
5787 if (val64 & MC_RLDRAM_TEST_DONE)
5788 break;
5789 msleep(500);
5792 if (cnt == 5)
5793 break;
5795 val64 = readq(&bar0->mc_rldram_test_ctrl);
5796 if (!(val64 & MC_RLDRAM_TEST_PASS))
5797 test_fail = 1;
5799 iteration++;
5802 *data = test_fail;
5804 /* Bring the adapter out of test mode */
5805 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5807 return test_fail;
5811 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5812 * @sp : private member of the device structure, which is a pointer to the
5813 * s2io_nic structure.
5814 * @ethtest : pointer to a ethtool command specific structure that will be
5815 * returned to the user.
5816 * @data : variable that returns the result of each of the test
5817 * conducted by the driver.
5818 * Description:
5819 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5820 * the health of the card.
5821 * Return value:
5822 * void
5825 static void s2io_ethtool_test(struct net_device *dev,
5826 struct ethtool_test *ethtest,
5827 uint64_t * data)
5829 struct s2io_nic *sp = dev->priv;
5830 int orig_state = netif_running(sp->dev);
5832 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5833 /* Offline Tests. */
5834 if (orig_state)
5835 s2io_close(sp->dev);
5837 if (s2io_register_test(sp, &data[0]))
5838 ethtest->flags |= ETH_TEST_FL_FAILED;
5840 s2io_reset(sp);
5842 if (s2io_rldram_test(sp, &data[3]))
5843 ethtest->flags |= ETH_TEST_FL_FAILED;
5845 s2io_reset(sp);
5847 if (s2io_eeprom_test(sp, &data[1]))
5848 ethtest->flags |= ETH_TEST_FL_FAILED;
5850 if (s2io_bist_test(sp, &data[4]))
5851 ethtest->flags |= ETH_TEST_FL_FAILED;
5853 if (orig_state)
5854 s2io_open(sp->dev);
5856 data[2] = 0;
5857 } else {
5858 /* Online Tests. */
5859 if (!orig_state) {
5860 DBG_PRINT(ERR_DBG,
5861 "%s: is not up, cannot run test\n",
5862 dev->name);
5863 data[0] = -1;
5864 data[1] = -1;
5865 data[2] = -1;
5866 data[3] = -1;
5867 data[4] = -1;
5870 if (s2io_link_test(sp, &data[2]))
5871 ethtest->flags |= ETH_TEST_FL_FAILED;
5873 data[0] = 0;
5874 data[1] = 0;
5875 data[3] = 0;
5876 data[4] = 0;
5880 static void s2io_get_ethtool_stats(struct net_device *dev,
5881 struct ethtool_stats *estats,
5882 u64 * tmp_stats)
5884 int i = 0, k;
5885 struct s2io_nic *sp = dev->priv;
5886 struct stat_block *stat_info = sp->mac_control.stats_info;
5888 s2io_updt_stats(sp);
5889 tmp_stats[i++] =
5890 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5891 le32_to_cpu(stat_info->tmac_frms);
5892 tmp_stats[i++] =
5893 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5894 le32_to_cpu(stat_info->tmac_data_octets);
5895 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5896 tmp_stats[i++] =
5897 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5898 le32_to_cpu(stat_info->tmac_mcst_frms);
5899 tmp_stats[i++] =
5900 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5901 le32_to_cpu(stat_info->tmac_bcst_frms);
5902 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5903 tmp_stats[i++] =
5904 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5905 le32_to_cpu(stat_info->tmac_ttl_octets);
5906 tmp_stats[i++] =
5907 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5908 le32_to_cpu(stat_info->tmac_ucst_frms);
5909 tmp_stats[i++] =
5910 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5911 le32_to_cpu(stat_info->tmac_nucst_frms);
5912 tmp_stats[i++] =
5913 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5914 le32_to_cpu(stat_info->tmac_any_err_frms);
5915 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5916 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5917 tmp_stats[i++] =
5918 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5919 le32_to_cpu(stat_info->tmac_vld_ip);
5920 tmp_stats[i++] =
5921 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5922 le32_to_cpu(stat_info->tmac_drop_ip);
5923 tmp_stats[i++] =
5924 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5925 le32_to_cpu(stat_info->tmac_icmp);
5926 tmp_stats[i++] =
5927 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5928 le32_to_cpu(stat_info->tmac_rst_tcp);
5929 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5930 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5931 le32_to_cpu(stat_info->tmac_udp);
5932 tmp_stats[i++] =
5933 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5934 le32_to_cpu(stat_info->rmac_vld_frms);
5935 tmp_stats[i++] =
5936 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5937 le32_to_cpu(stat_info->rmac_data_octets);
5938 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5939 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5940 tmp_stats[i++] =
5941 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5942 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5943 tmp_stats[i++] =
5944 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5945 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5946 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5948 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5949 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5950 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5951 tmp_stats[i++] =
5952 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5953 le32_to_cpu(stat_info->rmac_ttl_octets);
5954 tmp_stats[i++] =
5955 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5956 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5957 tmp_stats[i++] =
5958 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5959 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5960 tmp_stats[i++] =
5961 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5962 le32_to_cpu(stat_info->rmac_discarded_frms);
5963 tmp_stats[i++] =
5964 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5965 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5966 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5967 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5968 tmp_stats[i++] =
5969 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5970 le32_to_cpu(stat_info->rmac_usized_frms);
5971 tmp_stats[i++] =
5972 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5973 le32_to_cpu(stat_info->rmac_osized_frms);
5974 tmp_stats[i++] =
5975 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5976 le32_to_cpu(stat_info->rmac_frag_frms);
5977 tmp_stats[i++] =
5978 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5979 le32_to_cpu(stat_info->rmac_jabber_frms);
5980 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5981 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5982 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5983 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5984 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5985 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5986 tmp_stats[i++] =
5987 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5988 le32_to_cpu(stat_info->rmac_ip);
5989 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5990 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5991 tmp_stats[i++] =
5992 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5993 le32_to_cpu(stat_info->rmac_drop_ip);
5994 tmp_stats[i++] =
5995 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5996 le32_to_cpu(stat_info->rmac_icmp);
5997 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5998 tmp_stats[i++] =
5999 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6000 le32_to_cpu(stat_info->rmac_udp);
6001 tmp_stats[i++] =
6002 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6003 le32_to_cpu(stat_info->rmac_err_drp_udp);
6004 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6005 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6006 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6007 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6008 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6009 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6010 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6011 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6012 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6013 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6014 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6015 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6016 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6017 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6018 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6019 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6020 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6021 tmp_stats[i++] =
6022 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6023 le32_to_cpu(stat_info->rmac_pause_cnt);
6024 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6025 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6026 tmp_stats[i++] =
6027 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6028 le32_to_cpu(stat_info->rmac_accepted_ip);
6029 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6030 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6031 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6032 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6033 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6034 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6035 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6036 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6037 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6038 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6039 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6040 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6041 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6042 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6043 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6044 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6045 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6046 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6047 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6049 /* Enhanced statistics exist only for Hercules */
6050 if(sp->device_type == XFRAME_II_DEVICE) {
6051 tmp_stats[i++] =
6052 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6053 tmp_stats[i++] =
6054 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6055 tmp_stats[i++] =
6056 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6057 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6058 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6059 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6060 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6061 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6062 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6063 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6064 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6065 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6066 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6067 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6068 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6069 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6072 tmp_stats[i++] = 0;
6073 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6074 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6075 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6076 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6077 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6078 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6079 for (k = 0; k < MAX_RX_RINGS; k++)
6080 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6081 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6082 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6083 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6084 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6085 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6086 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6087 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6088 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6089 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6090 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6091 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6092 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6093 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6094 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6095 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6096 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6097 if (stat_info->sw_stat.num_aggregations) {
6098 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6099 int count = 0;
6101 * Since 64-bit divide does not work on all platforms,
6102 * do repeated subtraction.
6104 while (tmp >= stat_info->sw_stat.num_aggregations) {
6105 tmp -= stat_info->sw_stat.num_aggregations;
6106 count++;
6108 tmp_stats[i++] = count;
6110 else
6111 tmp_stats[i++] = 0;
6112 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6113 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6114 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6115 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6116 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6117 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6118 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6119 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6120 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6122 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6123 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6124 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6125 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6126 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6128 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6129 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6130 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6131 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6132 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6133 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6134 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6135 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6136 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6137 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6138 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6139 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6140 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6141 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6142 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6143 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6144 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6145 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6146 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6147 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6148 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6149 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6150 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6151 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6152 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6153 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6156 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6158 return (XENA_REG_SPACE);
6162 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6164 struct s2io_nic *sp = dev->priv;
6166 return (sp->rx_csum);
6169 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6171 struct s2io_nic *sp = dev->priv;
6173 if (data)
6174 sp->rx_csum = 1;
6175 else
6176 sp->rx_csum = 0;
6178 return 0;
6181 static int s2io_get_eeprom_len(struct net_device *dev)
6183 return (XENA_EEPROM_SPACE);
6186 static int s2io_get_sset_count(struct net_device *dev, int sset)
6188 struct s2io_nic *sp = dev->priv;
6190 switch (sset) {
6191 case ETH_SS_TEST:
6192 return S2IO_TEST_LEN;
6193 case ETH_SS_STATS:
6194 switch(sp->device_type) {
6195 case XFRAME_I_DEVICE:
6196 return XFRAME_I_STAT_LEN;
6197 case XFRAME_II_DEVICE:
6198 return XFRAME_II_STAT_LEN;
6199 default:
6200 return 0;
6202 default:
6203 return -EOPNOTSUPP;
6207 static void s2io_ethtool_get_strings(struct net_device *dev,
6208 u32 stringset, u8 * data)
6210 int stat_size = 0;
6211 struct s2io_nic *sp = dev->priv;
6213 switch (stringset) {
6214 case ETH_SS_TEST:
6215 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6216 break;
6217 case ETH_SS_STATS:
6218 stat_size = sizeof(ethtool_xena_stats_keys);
6219 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6220 if(sp->device_type == XFRAME_II_DEVICE) {
6221 memcpy(data + stat_size,
6222 &ethtool_enhanced_stats_keys,
6223 sizeof(ethtool_enhanced_stats_keys));
6224 stat_size += sizeof(ethtool_enhanced_stats_keys);
6227 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6228 sizeof(ethtool_driver_stats_keys));
6232 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6234 if (data)
6235 dev->features |= NETIF_F_IP_CSUM;
6236 else
6237 dev->features &= ~NETIF_F_IP_CSUM;
6239 return 0;
6242 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6244 return (dev->features & NETIF_F_TSO) != 0;
6246 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6248 if (data)
6249 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6250 else
6251 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6253 return 0;
6256 static const struct ethtool_ops netdev_ethtool_ops = {
6257 .get_settings = s2io_ethtool_gset,
6258 .set_settings = s2io_ethtool_sset,
6259 .get_drvinfo = s2io_ethtool_gdrvinfo,
6260 .get_regs_len = s2io_ethtool_get_regs_len,
6261 .get_regs = s2io_ethtool_gregs,
6262 .get_link = ethtool_op_get_link,
6263 .get_eeprom_len = s2io_get_eeprom_len,
6264 .get_eeprom = s2io_ethtool_geeprom,
6265 .set_eeprom = s2io_ethtool_seeprom,
6266 .get_ringparam = s2io_ethtool_gringparam,
6267 .get_pauseparam = s2io_ethtool_getpause_data,
6268 .set_pauseparam = s2io_ethtool_setpause_data,
6269 .get_rx_csum = s2io_ethtool_get_rx_csum,
6270 .set_rx_csum = s2io_ethtool_set_rx_csum,
6271 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6272 .set_sg = ethtool_op_set_sg,
6273 .get_tso = s2io_ethtool_op_get_tso,
6274 .set_tso = s2io_ethtool_op_set_tso,
6275 .set_ufo = ethtool_op_set_ufo,
6276 .self_test = s2io_ethtool_test,
6277 .get_strings = s2io_ethtool_get_strings,
6278 .phys_id = s2io_ethtool_idnic,
6279 .get_ethtool_stats = s2io_get_ethtool_stats,
6280 .get_sset_count = s2io_get_sset_count,
6284 * s2io_ioctl - Entry point for the Ioctl
6285 * @dev : Device pointer.
6286 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6287 * a proprietary structure used to pass information to the driver.
6288 * @cmd : This is used to distinguish between the different commands that
6289 * can be passed to the IOCTL functions.
6290 * Description:
6291 * Currently there are no special functionality supported in IOCTL, hence
6292 * function always return EOPNOTSUPPORTED
6295 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6297 return -EOPNOTSUPP;
6301 * s2io_change_mtu - entry point to change MTU size for the device.
6302 * @dev : device pointer.
6303 * @new_mtu : the new MTU size for the device.
6304 * Description: A driver entry point to change MTU size for the device.
6305 * Before changing the MTU the device must be stopped.
6306 * Return value:
6307 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6308 * file on failure.
6311 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6313 struct s2io_nic *sp = dev->priv;
6315 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6316 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6317 dev->name);
6318 return -EPERM;
6321 dev->mtu = new_mtu;
6322 if (netif_running(dev)) {
6323 s2io_card_down(sp);
6324 netif_stop_queue(dev);
6325 if (s2io_card_up(sp)) {
6326 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6327 __FUNCTION__);
6329 if (netif_queue_stopped(dev))
6330 netif_wake_queue(dev);
6331 } else { /* Device is down */
6332 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6333 u64 val64 = new_mtu;
6335 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6338 return 0;
6342 * s2io_tasklet - Bottom half of the ISR.
6343 * @dev_adr : address of the device structure in dma_addr_t format.
6344 * Description:
6345 * This is the tasklet or the bottom half of the ISR. This is
6346 * an extension of the ISR which is scheduled by the scheduler to be run
6347 * when the load on the CPU is low. All low priority tasks of the ISR can
6348 * be pushed into the tasklet. For now the tasklet is used only to
6349 * replenish the Rx buffers in the Rx buffer descriptors.
6350 * Return value:
6351 * void.
6354 static void s2io_tasklet(unsigned long dev_addr)
6356 struct net_device *dev = (struct net_device *) dev_addr;
6357 struct s2io_nic *sp = dev->priv;
6358 int i, ret;
6359 struct mac_info *mac_control;
6360 struct config_param *config;
6362 mac_control = &sp->mac_control;
6363 config = &sp->config;
6365 if (!TASKLET_IN_USE) {
6366 for (i = 0; i < config->rx_ring_num; i++) {
6367 ret = fill_rx_buffers(sp, i);
6368 if (ret == -ENOMEM) {
6369 DBG_PRINT(INFO_DBG, "%s: Out of ",
6370 dev->name);
6371 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6372 break;
6373 } else if (ret == -EFILL) {
6374 DBG_PRINT(INFO_DBG,
6375 "%s: Rx Ring %d is full\n",
6376 dev->name, i);
6377 break;
6380 clear_bit(0, (&sp->tasklet_status));
6385 * s2io_set_link - Set the LInk status
6386 * @data: long pointer to device private structue
6387 * Description: Sets the link status for the adapter
6390 static void s2io_set_link(struct work_struct *work)
6392 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6393 struct net_device *dev = nic->dev;
6394 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6395 register u64 val64;
6396 u16 subid;
6398 rtnl_lock();
6400 if (!netif_running(dev))
6401 goto out_unlock;
6403 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6404 /* The card is being reset, no point doing anything */
6405 goto out_unlock;
6408 subid = nic->pdev->subsystem_device;
6409 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6411 * Allow a small delay for the NICs self initiated
6412 * cleanup to complete.
6414 msleep(100);
6417 val64 = readq(&bar0->adapter_status);
6418 if (LINK_IS_UP(val64)) {
6419 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6420 if (verify_xena_quiescence(nic)) {
6421 val64 = readq(&bar0->adapter_control);
6422 val64 |= ADAPTER_CNTL_EN;
6423 writeq(val64, &bar0->adapter_control);
6424 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6425 nic->device_type, subid)) {
6426 val64 = readq(&bar0->gpio_control);
6427 val64 |= GPIO_CTRL_GPIO_0;
6428 writeq(val64, &bar0->gpio_control);
6429 val64 = readq(&bar0->gpio_control);
6430 } else {
6431 val64 |= ADAPTER_LED_ON;
6432 writeq(val64, &bar0->adapter_control);
6434 nic->device_enabled_once = TRUE;
6435 } else {
6436 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6437 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6438 netif_stop_queue(dev);
6441 val64 = readq(&bar0->adapter_control);
6442 val64 |= ADAPTER_LED_ON;
6443 writeq(val64, &bar0->adapter_control);
6444 s2io_link(nic, LINK_UP);
6445 } else {
6446 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6447 subid)) {
6448 val64 = readq(&bar0->gpio_control);
6449 val64 &= ~GPIO_CTRL_GPIO_0;
6450 writeq(val64, &bar0->gpio_control);
6451 val64 = readq(&bar0->gpio_control);
6453 /* turn off LED */
6454 val64 = readq(&bar0->adapter_control);
6455 val64 = val64 &(~ADAPTER_LED_ON);
6456 writeq(val64, &bar0->adapter_control);
6457 s2io_link(nic, LINK_DOWN);
6459 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6461 out_unlock:
6462 rtnl_unlock();
6465 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6466 struct buffAdd *ba,
6467 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6468 u64 *temp2, int size)
6470 struct net_device *dev = sp->dev;
6471 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6473 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6474 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6475 /* allocate skb */
6476 if (*skb) {
6477 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6479 * As Rx frame are not going to be processed,
6480 * using same mapped address for the Rxd
6481 * buffer pointer
6483 rxdp1->Buffer0_ptr = *temp0;
6484 } else {
6485 *skb = dev_alloc_skb(size);
6486 if (!(*skb)) {
6487 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6488 DBG_PRINT(INFO_DBG, "memory to allocate ");
6489 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6490 sp->mac_control.stats_info->sw_stat. \
6491 mem_alloc_fail_cnt++;
6492 return -ENOMEM ;
6494 sp->mac_control.stats_info->sw_stat.mem_allocated
6495 += (*skb)->truesize;
6496 /* storing the mapped addr in a temp variable
6497 * such it will be used for next rxd whose
6498 * Host Control is NULL
6500 rxdp1->Buffer0_ptr = *temp0 =
6501 pci_map_single( sp->pdev, (*skb)->data,
6502 size - NET_IP_ALIGN,
6503 PCI_DMA_FROMDEVICE);
6504 if( (rxdp1->Buffer0_ptr == 0) ||
6505 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6506 goto memalloc_failed;
6508 rxdp->Host_Control = (unsigned long) (*skb);
6510 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6511 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6512 /* Two buffer Mode */
6513 if (*skb) {
6514 rxdp3->Buffer2_ptr = *temp2;
6515 rxdp3->Buffer0_ptr = *temp0;
6516 rxdp3->Buffer1_ptr = *temp1;
6517 } else {
6518 *skb = dev_alloc_skb(size);
6519 if (!(*skb)) {
6520 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6521 DBG_PRINT(INFO_DBG, "memory to allocate ");
6522 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6523 sp->mac_control.stats_info->sw_stat. \
6524 mem_alloc_fail_cnt++;
6525 return -ENOMEM;
6527 sp->mac_control.stats_info->sw_stat.mem_allocated
6528 += (*skb)->truesize;
6529 rxdp3->Buffer2_ptr = *temp2 =
6530 pci_map_single(sp->pdev, (*skb)->data,
6531 dev->mtu + 4,
6532 PCI_DMA_FROMDEVICE);
6533 if( (rxdp3->Buffer2_ptr == 0) ||
6534 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6535 goto memalloc_failed;
6537 rxdp3->Buffer0_ptr = *temp0 =
6538 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6539 PCI_DMA_FROMDEVICE);
6540 if( (rxdp3->Buffer0_ptr == 0) ||
6541 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6542 pci_unmap_single (sp->pdev,
6543 (dma_addr_t)rxdp3->Buffer2_ptr,
6544 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6545 goto memalloc_failed;
6547 rxdp->Host_Control = (unsigned long) (*skb);
6549 /* Buffer-1 will be dummy buffer not used */
6550 rxdp3->Buffer1_ptr = *temp1 =
6551 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6552 PCI_DMA_FROMDEVICE);
6553 if( (rxdp3->Buffer1_ptr == 0) ||
6554 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6555 pci_unmap_single (sp->pdev,
6556 (dma_addr_t)rxdp3->Buffer0_ptr,
6557 BUF0_LEN, PCI_DMA_FROMDEVICE);
6558 pci_unmap_single (sp->pdev,
6559 (dma_addr_t)rxdp3->Buffer2_ptr,
6560 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6561 goto memalloc_failed;
6565 return 0;
6566 memalloc_failed:
6567 stats->pci_map_fail_cnt++;
6568 stats->mem_freed += (*skb)->truesize;
6569 dev_kfree_skb(*skb);
6570 return -ENOMEM;
6573 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6574 int size)
6576 struct net_device *dev = sp->dev;
6577 if (sp->rxd_mode == RXD_MODE_1) {
6578 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6579 } else if (sp->rxd_mode == RXD_MODE_3B) {
6580 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6581 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6582 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6586 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6588 int i, j, k, blk_cnt = 0, size;
6589 struct mac_info * mac_control = &sp->mac_control;
6590 struct config_param *config = &sp->config;
6591 struct net_device *dev = sp->dev;
6592 struct RxD_t *rxdp = NULL;
6593 struct sk_buff *skb = NULL;
6594 struct buffAdd *ba = NULL;
6595 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6597 /* Calculate the size based on ring mode */
6598 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6599 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6600 if (sp->rxd_mode == RXD_MODE_1)
6601 size += NET_IP_ALIGN;
6602 else if (sp->rxd_mode == RXD_MODE_3B)
6603 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6605 for (i = 0; i < config->rx_ring_num; i++) {
6606 blk_cnt = config->rx_cfg[i].num_rxd /
6607 (rxd_count[sp->rxd_mode] +1);
6609 for (j = 0; j < blk_cnt; j++) {
6610 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6611 rxdp = mac_control->rings[i].
6612 rx_blocks[j].rxds[k].virt_addr;
6613 if(sp->rxd_mode == RXD_MODE_3B)
6614 ba = &mac_control->rings[i].ba[j][k];
6615 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6616 &skb,(u64 *)&temp0_64,
6617 (u64 *)&temp1_64,
6618 (u64 *)&temp2_64,
6619 size) == ENOMEM) {
6620 return 0;
6623 set_rxd_buffer_size(sp, rxdp, size);
6624 wmb();
6625 /* flip the Ownership bit to Hardware */
6626 rxdp->Control_1 |= RXD_OWN_XENA;
6630 return 0;
6634 static int s2io_add_isr(struct s2io_nic * sp)
6636 int ret = 0;
6637 struct net_device *dev = sp->dev;
6638 int err = 0;
6640 if (sp->config.intr_type == MSI_X)
6641 ret = s2io_enable_msi_x(sp);
6642 if (ret) {
6643 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6644 sp->config.intr_type = INTA;
6647 /* Store the values of the MSIX table in the struct s2io_nic structure */
6648 store_xmsi_data(sp);
6650 /* After proper initialization of H/W, register ISR */
6651 if (sp->config.intr_type == MSI_X) {
6652 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6654 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6655 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6656 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6657 dev->name, i);
6658 err = request_irq(sp->entries[i].vector,
6659 s2io_msix_fifo_handle, 0, sp->desc[i],
6660 sp->s2io_entries[i].arg);
6661 /* If either data or addr is zero print it */
6662 if(!(sp->msix_info[i].addr &&
6663 sp->msix_info[i].data)) {
6664 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6665 "Data:0x%lx\n",sp->desc[i],
6666 (unsigned long long)
6667 sp->msix_info[i].addr,
6668 (unsigned long)
6669 ntohl(sp->msix_info[i].data));
6670 } else {
6671 msix_tx_cnt++;
6673 } else {
6674 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6675 dev->name, i);
6676 err = request_irq(sp->entries[i].vector,
6677 s2io_msix_ring_handle, 0, sp->desc[i],
6678 sp->s2io_entries[i].arg);
6679 /* If either data or addr is zero print it */
6680 if(!(sp->msix_info[i].addr &&
6681 sp->msix_info[i].data)) {
6682 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6683 "Data:0x%lx\n",sp->desc[i],
6684 (unsigned long long)
6685 sp->msix_info[i].addr,
6686 (unsigned long)
6687 ntohl(sp->msix_info[i].data));
6688 } else {
6689 msix_rx_cnt++;
6692 if (err) {
6693 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6694 "failed\n", dev->name, i);
6695 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6696 return -1;
6698 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6700 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6701 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6703 if (sp->config.intr_type == INTA) {
6704 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6705 sp->name, dev);
6706 if (err) {
6707 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6708 dev->name);
6709 return -1;
6712 return 0;
6714 static void s2io_rem_isr(struct s2io_nic * sp)
6716 struct net_device *dev = sp->dev;
6717 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6719 if (sp->config.intr_type == MSI_X) {
6720 int i;
6721 u16 msi_control;
6723 for (i=1; (sp->s2io_entries[i].in_use ==
6724 MSIX_REGISTERED_SUCCESS); i++) {
6725 int vector = sp->entries[i].vector;
6726 void *arg = sp->s2io_entries[i].arg;
6728 synchronize_irq(vector);
6729 free_irq(vector, arg);
6732 kfree(sp->entries);
6733 stats->mem_freed +=
6734 (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6735 kfree(sp->s2io_entries);
6736 stats->mem_freed +=
6737 (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6738 sp->entries = NULL;
6739 sp->s2io_entries = NULL;
6741 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6742 msi_control &= 0xFFFE; /* Disable MSI */
6743 pci_write_config_word(sp->pdev, 0x42, msi_control);
6745 pci_disable_msix(sp->pdev);
6746 } else {
6747 synchronize_irq(sp->pdev->irq);
6748 free_irq(sp->pdev->irq, dev);
6752 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6754 int cnt = 0;
6755 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6756 unsigned long flags;
6757 register u64 val64 = 0;
6759 del_timer_sync(&sp->alarm_timer);
6760 /* If s2io_set_link task is executing, wait till it completes. */
6761 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6762 msleep(50);
6764 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6766 /* disable Tx and Rx traffic on the NIC */
6767 if (do_io)
6768 stop_nic(sp);
6770 s2io_rem_isr(sp);
6772 /* Kill tasklet. */
6773 tasklet_kill(&sp->task);
6775 /* Check if the device is Quiescent and then Reset the NIC */
6776 while(do_io) {
6777 /* As per the HW requirement we need to replenish the
6778 * receive buffer to avoid the ring bump. Since there is
6779 * no intention of processing the Rx frame at this pointwe are
6780 * just settting the ownership bit of rxd in Each Rx
6781 * ring to HW and set the appropriate buffer size
6782 * based on the ring mode
6784 rxd_owner_bit_reset(sp);
6786 val64 = readq(&bar0->adapter_status);
6787 if (verify_xena_quiescence(sp)) {
6788 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6789 break;
6792 msleep(50);
6793 cnt++;
6794 if (cnt == 10) {
6795 DBG_PRINT(ERR_DBG,
6796 "s2io_close:Device not Quiescent ");
6797 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6798 (unsigned long long) val64);
6799 break;
6802 if (do_io)
6803 s2io_reset(sp);
6805 spin_lock_irqsave(&sp->tx_lock, flags);
6806 /* Free all Tx buffers */
6807 free_tx_buffers(sp);
6808 spin_unlock_irqrestore(&sp->tx_lock, flags);
6810 /* Free all Rx buffers */
6811 spin_lock_irqsave(&sp->rx_lock, flags);
6812 free_rx_buffers(sp);
6813 spin_unlock_irqrestore(&sp->rx_lock, flags);
6815 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6818 static void s2io_card_down(struct s2io_nic * sp)
6820 do_s2io_card_down(sp, 1);
6823 static int s2io_card_up(struct s2io_nic * sp)
6825 int i, ret = 0;
6826 struct mac_info *mac_control;
6827 struct config_param *config;
6828 struct net_device *dev = (struct net_device *) sp->dev;
6829 u16 interruptible;
6831 /* Initialize the H/W I/O registers */
6832 if (init_nic(sp) != 0) {
6833 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6834 dev->name);
6835 s2io_reset(sp);
6836 return -ENODEV;
6840 * Initializing the Rx buffers. For now we are considering only 1
6841 * Rx ring and initializing buffers into 30 Rx blocks
6843 mac_control = &sp->mac_control;
6844 config = &sp->config;
6846 for (i = 0; i < config->rx_ring_num; i++) {
6847 if ((ret = fill_rx_buffers(sp, i))) {
6848 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6849 dev->name);
6850 s2io_reset(sp);
6851 free_rx_buffers(sp);
6852 return -ENOMEM;
6854 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6855 atomic_read(&sp->rx_bufs_left[i]));
6857 /* Maintain the state prior to the open */
6858 if (sp->promisc_flg)
6859 sp->promisc_flg = 0;
6860 if (sp->m_cast_flg) {
6861 sp->m_cast_flg = 0;
6862 sp->all_multi_pos= 0;
6865 /* Setting its receive mode */
6866 s2io_set_multicast(dev);
6868 if (sp->lro) {
6869 /* Initialize max aggregatable pkts per session based on MTU */
6870 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6871 /* Check if we can use(if specified) user provided value */
6872 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6873 sp->lro_max_aggr_per_sess = lro_max_pkts;
6876 /* Enable Rx Traffic and interrupts on the NIC */
6877 if (start_nic(sp)) {
6878 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6879 s2io_reset(sp);
6880 free_rx_buffers(sp);
6881 return -ENODEV;
6884 /* Add interrupt service routine */
6885 if (s2io_add_isr(sp) != 0) {
6886 if (sp->config.intr_type == MSI_X)
6887 s2io_rem_isr(sp);
6888 s2io_reset(sp);
6889 free_rx_buffers(sp);
6890 return -ENODEV;
6893 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6895 /* Enable tasklet for the device */
6896 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6898 /* Enable select interrupts */
6899 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6900 if (sp->config.intr_type != INTA)
6901 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6902 else {
6903 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6904 interruptible |= TX_PIC_INTR;
6905 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6908 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6909 return 0;
6913 * s2io_restart_nic - Resets the NIC.
6914 * @data : long pointer to the device private structure
6915 * Description:
6916 * This function is scheduled to be run by the s2io_tx_watchdog
6917 * function after 0.5 secs to reset the NIC. The idea is to reduce
6918 * the run time of the watch dog routine which is run holding a
6919 * spin lock.
6922 static void s2io_restart_nic(struct work_struct *work)
6924 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6925 struct net_device *dev = sp->dev;
6927 rtnl_lock();
6929 if (!netif_running(dev))
6930 goto out_unlock;
6932 s2io_card_down(sp);
6933 if (s2io_card_up(sp)) {
6934 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6935 dev->name);
6937 netif_wake_queue(dev);
6938 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6939 dev->name);
6940 out_unlock:
6941 rtnl_unlock();
6945 * s2io_tx_watchdog - Watchdog for transmit side.
6946 * @dev : Pointer to net device structure
6947 * Description:
6948 * This function is triggered if the Tx Queue is stopped
6949 * for a pre-defined amount of time when the Interface is still up.
6950 * If the Interface is jammed in such a situation, the hardware is
6951 * reset (by s2io_close) and restarted again (by s2io_open) to
6952 * overcome any problem that might have been caused in the hardware.
6953 * Return value:
6954 * void
6957 static void s2io_tx_watchdog(struct net_device *dev)
6959 struct s2io_nic *sp = dev->priv;
6961 if (netif_carrier_ok(dev)) {
6962 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6963 schedule_work(&sp->rst_timer_task);
6964 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6969 * rx_osm_handler - To perform some OS related operations on SKB.
6970 * @sp: private member of the device structure,pointer to s2io_nic structure.
6971 * @skb : the socket buffer pointer.
6972 * @len : length of the packet
6973 * @cksum : FCS checksum of the frame.
6974 * @ring_no : the ring from which this RxD was extracted.
6975 * Description:
6976 * This function is called by the Rx interrupt serivce routine to perform
6977 * some OS related operations on the SKB before passing it to the upper
6978 * layers. It mainly checks if the checksum is OK, if so adds it to the
6979 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6980 * to the upper layer. If the checksum is wrong, it increments the Rx
6981 * packet error count, frees the SKB and returns error.
6982 * Return value:
6983 * SUCCESS on success and -1 on failure.
6985 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6987 struct s2io_nic *sp = ring_data->nic;
6988 struct net_device *dev = (struct net_device *) sp->dev;
6989 struct sk_buff *skb = (struct sk_buff *)
6990 ((unsigned long) rxdp->Host_Control);
6991 int ring_no = ring_data->ring_no;
6992 u16 l3_csum, l4_csum;
6993 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6994 struct lro *lro;
6995 u8 err_mask;
6997 skb->dev = dev;
6999 if (err) {
7000 /* Check for parity error */
7001 if (err & 0x1) {
7002 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7004 err_mask = err >> 48;
7005 switch(err_mask) {
7006 case 1:
7007 sp->mac_control.stats_info->sw_stat.
7008 rx_parity_err_cnt++;
7009 break;
7011 case 2:
7012 sp->mac_control.stats_info->sw_stat.
7013 rx_abort_cnt++;
7014 break;
7016 case 3:
7017 sp->mac_control.stats_info->sw_stat.
7018 rx_parity_abort_cnt++;
7019 break;
7021 case 4:
7022 sp->mac_control.stats_info->sw_stat.
7023 rx_rda_fail_cnt++;
7024 break;
7026 case 5:
7027 sp->mac_control.stats_info->sw_stat.
7028 rx_unkn_prot_cnt++;
7029 break;
7031 case 6:
7032 sp->mac_control.stats_info->sw_stat.
7033 rx_fcs_err_cnt++;
7034 break;
7036 case 7:
7037 sp->mac_control.stats_info->sw_stat.
7038 rx_buf_size_err_cnt++;
7039 break;
7041 case 8:
7042 sp->mac_control.stats_info->sw_stat.
7043 rx_rxd_corrupt_cnt++;
7044 break;
7046 case 15:
7047 sp->mac_control.stats_info->sw_stat.
7048 rx_unkn_err_cnt++;
7049 break;
7052 * Drop the packet if bad transfer code. Exception being
7053 * 0x5, which could be due to unsupported IPv6 extension header.
7054 * In this case, we let stack handle the packet.
7055 * Note that in this case, since checksum will be incorrect,
7056 * stack will validate the same.
7058 if (err_mask != 0x5) {
7059 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7060 dev->name, err_mask);
7061 sp->stats.rx_crc_errors++;
7062 sp->mac_control.stats_info->sw_stat.mem_freed
7063 += skb->truesize;
7064 dev_kfree_skb(skb);
7065 atomic_dec(&sp->rx_bufs_left[ring_no]);
7066 rxdp->Host_Control = 0;
7067 return 0;
7071 /* Updating statistics */
7072 sp->stats.rx_packets++;
7073 rxdp->Host_Control = 0;
7074 if (sp->rxd_mode == RXD_MODE_1) {
7075 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7077 sp->stats.rx_bytes += len;
7078 skb_put(skb, len);
7080 } else if (sp->rxd_mode == RXD_MODE_3B) {
7081 int get_block = ring_data->rx_curr_get_info.block_index;
7082 int get_off = ring_data->rx_curr_get_info.offset;
7083 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7084 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7085 unsigned char *buff = skb_push(skb, buf0_len);
7087 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7088 sp->stats.rx_bytes += buf0_len + buf2_len;
7089 memcpy(buff, ba->ba_0, buf0_len);
7090 skb_put(skb, buf2_len);
7093 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7094 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7095 (sp->rx_csum)) {
7096 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7097 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7098 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7100 * NIC verifies if the Checksum of the received
7101 * frame is Ok or not and accordingly returns
7102 * a flag in the RxD.
7104 skb->ip_summed = CHECKSUM_UNNECESSARY;
7105 if (sp->lro) {
7106 u32 tcp_len;
7107 u8 *tcp;
7108 int ret = 0;
7110 ret = s2io_club_tcp_session(skb->data, &tcp,
7111 &tcp_len, &lro, rxdp, sp);
7112 switch (ret) {
7113 case 3: /* Begin anew */
7114 lro->parent = skb;
7115 goto aggregate;
7116 case 1: /* Aggregate */
7118 lro_append_pkt(sp, lro,
7119 skb, tcp_len);
7120 goto aggregate;
7122 case 4: /* Flush session */
7124 lro_append_pkt(sp, lro,
7125 skb, tcp_len);
7126 queue_rx_frame(lro->parent);
7127 clear_lro_session(lro);
7128 sp->mac_control.stats_info->
7129 sw_stat.flush_max_pkts++;
7130 goto aggregate;
7132 case 2: /* Flush both */
7133 lro->parent->data_len =
7134 lro->frags_len;
7135 sp->mac_control.stats_info->
7136 sw_stat.sending_both++;
7137 queue_rx_frame(lro->parent);
7138 clear_lro_session(lro);
7139 goto send_up;
7140 case 0: /* sessions exceeded */
7141 case -1: /* non-TCP or not
7142 * L2 aggregatable
7144 case 5: /*
7145 * First pkt in session not
7146 * L3/L4 aggregatable
7148 break;
7149 default:
7150 DBG_PRINT(ERR_DBG,
7151 "%s: Samadhana!!\n",
7152 __FUNCTION__);
7153 BUG();
7156 } else {
7158 * Packet with erroneous checksum, let the
7159 * upper layers deal with it.
7161 skb->ip_summed = CHECKSUM_NONE;
7163 } else {
7164 skb->ip_summed = CHECKSUM_NONE;
7166 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7167 if (!sp->lro) {
7168 skb->protocol = eth_type_trans(skb, dev);
7169 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7170 vlan_strip_flag)) {
7171 /* Queueing the vlan frame to the upper layer */
7172 if (napi)
7173 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7174 RXD_GET_VLAN_TAG(rxdp->Control_2));
7175 else
7176 vlan_hwaccel_rx(skb, sp->vlgrp,
7177 RXD_GET_VLAN_TAG(rxdp->Control_2));
7178 } else {
7179 if (napi)
7180 netif_receive_skb(skb);
7181 else
7182 netif_rx(skb);
7184 } else {
7185 send_up:
7186 queue_rx_frame(skb);
7188 dev->last_rx = jiffies;
7189 aggregate:
7190 atomic_dec(&sp->rx_bufs_left[ring_no]);
7191 return SUCCESS;
7195 * s2io_link - stops/starts the Tx queue.
7196 * @sp : private member of the device structure, which is a pointer to the
7197 * s2io_nic structure.
7198 * @link : inidicates whether link is UP/DOWN.
7199 * Description:
7200 * This function stops/starts the Tx queue depending on whether the link
7201 * status of the NIC is is down or up. This is called by the Alarm
7202 * interrupt handler whenever a link change interrupt comes up.
7203 * Return value:
7204 * void.
7207 static void s2io_link(struct s2io_nic * sp, int link)
7209 struct net_device *dev = (struct net_device *) sp->dev;
7211 if (link != sp->last_link_state) {
7212 if (link == LINK_DOWN) {
7213 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7214 netif_carrier_off(dev);
7215 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7216 sp->mac_control.stats_info->sw_stat.link_up_time =
7217 jiffies - sp->start_time;
7218 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7219 } else {
7220 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7221 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7222 sp->mac_control.stats_info->sw_stat.link_down_time =
7223 jiffies - sp->start_time;
7224 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7225 netif_carrier_on(dev);
7228 sp->last_link_state = link;
7229 sp->start_time = jiffies;
7233 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7234 * @sp : private member of the device structure, which is a pointer to the
7235 * s2io_nic structure.
7236 * Description:
7237 * This function initializes a few of the PCI and PCI-X configuration registers
7238 * with recommended values.
7239 * Return value:
7240 * void
7243 static void s2io_init_pci(struct s2io_nic * sp)
7245 u16 pci_cmd = 0, pcix_cmd = 0;
7247 /* Enable Data Parity Error Recovery in PCI-X command register. */
7248 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7249 &(pcix_cmd));
7250 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7251 (pcix_cmd | 1));
7252 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7253 &(pcix_cmd));
7255 /* Set the PErr Response bit in PCI command register. */
7256 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7257 pci_write_config_word(sp->pdev, PCI_COMMAND,
7258 (pci_cmd | PCI_COMMAND_PARITY));
7259 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7262 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7264 if ( tx_fifo_num > 8) {
7265 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7266 "supported\n");
7267 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7268 tx_fifo_num = 8;
7270 if ( rx_ring_num > 8) {
7271 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7272 "supported\n");
7273 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7274 rx_ring_num = 8;
7276 if (*dev_intr_type != INTA)
7277 napi = 0;
7279 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7280 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7281 "Defaulting to INTA\n");
7282 *dev_intr_type = INTA;
7285 if ((*dev_intr_type == MSI_X) &&
7286 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7287 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7288 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7289 "Defaulting to INTA\n");
7290 *dev_intr_type = INTA;
7293 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7294 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7295 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7296 rx_ring_mode = 1;
7298 return SUCCESS;
7302 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7303 * or Traffic class respectively.
7304 * @nic: device peivate variable
7305 * Description: The function configures the receive steering to
7306 * desired receive ring.
7307 * Return Value: SUCCESS on success and
7308 * '-1' on failure (endian settings incorrect).
7310 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7312 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7313 register u64 val64 = 0;
7315 if (ds_codepoint > 63)
7316 return FAILURE;
7318 val64 = RTS_DS_MEM_DATA(ring);
7319 writeq(val64, &bar0->rts_ds_mem_data);
7321 val64 = RTS_DS_MEM_CTRL_WE |
7322 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7323 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7325 writeq(val64, &bar0->rts_ds_mem_ctrl);
7327 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7328 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7329 S2IO_BIT_RESET);
7333 * s2io_init_nic - Initialization of the adapter .
7334 * @pdev : structure containing the PCI related information of the device.
7335 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7336 * Description:
7337 * The function initializes an adapter identified by the pci_dec structure.
7338 * All OS related initialization including memory and device structure and
7339 * initlaization of the device private variable is done. Also the swapper
7340 * control register is initialized to enable read and write into the I/O
7341 * registers of the device.
7342 * Return value:
7343 * returns 0 on success and negative on failure.
7346 static int __devinit
7347 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7349 struct s2io_nic *sp;
7350 struct net_device *dev;
7351 int i, j, ret;
7352 int dma_flag = FALSE;
7353 u32 mac_up, mac_down;
7354 u64 val64 = 0, tmp64 = 0;
7355 struct XENA_dev_config __iomem *bar0 = NULL;
7356 u16 subid;
7357 struct mac_info *mac_control;
7358 struct config_param *config;
7359 int mode;
7360 u8 dev_intr_type = intr_type;
7361 DECLARE_MAC_BUF(mac);
7363 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7364 return ret;
7366 if ((ret = pci_enable_device(pdev))) {
7367 DBG_PRINT(ERR_DBG,
7368 "s2io_init_nic: pci_enable_device failed\n");
7369 return ret;
7372 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7373 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7374 dma_flag = TRUE;
7375 if (pci_set_consistent_dma_mask
7376 (pdev, DMA_64BIT_MASK)) {
7377 DBG_PRINT(ERR_DBG,
7378 "Unable to obtain 64bit DMA for \
7379 consistent allocations\n");
7380 pci_disable_device(pdev);
7381 return -ENOMEM;
7383 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7384 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7385 } else {
7386 pci_disable_device(pdev);
7387 return -ENOMEM;
7389 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7390 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7391 pci_disable_device(pdev);
7392 return -ENODEV;
7395 dev = alloc_etherdev(sizeof(struct s2io_nic));
7396 if (dev == NULL) {
7397 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7398 pci_disable_device(pdev);
7399 pci_release_regions(pdev);
7400 return -ENODEV;
7403 pci_set_master(pdev);
7404 pci_set_drvdata(pdev, dev);
7405 SET_NETDEV_DEV(dev, &pdev->dev);
7407 /* Private member variable initialized to s2io NIC structure */
7408 sp = dev->priv;
7409 memset(sp, 0, sizeof(struct s2io_nic));
7410 sp->dev = dev;
7411 sp->pdev = pdev;
7412 sp->high_dma_flag = dma_flag;
7413 sp->device_enabled_once = FALSE;
7414 if (rx_ring_mode == 1)
7415 sp->rxd_mode = RXD_MODE_1;
7416 if (rx_ring_mode == 2)
7417 sp->rxd_mode = RXD_MODE_3B;
7419 sp->config.intr_type = dev_intr_type;
7421 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7422 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7423 sp->device_type = XFRAME_II_DEVICE;
7424 else
7425 sp->device_type = XFRAME_I_DEVICE;
7427 sp->lro = lro;
7429 /* Initialize some PCI/PCI-X fields of the NIC. */
7430 s2io_init_pci(sp);
7433 * Setting the device configuration parameters.
7434 * Most of these parameters can be specified by the user during
7435 * module insertion as they are module loadable parameters. If
7436 * these parameters are not not specified during load time, they
7437 * are initialized with default values.
7439 mac_control = &sp->mac_control;
7440 config = &sp->config;
7442 config->napi = napi;
7444 /* Tx side parameters. */
7445 config->tx_fifo_num = tx_fifo_num;
7446 for (i = 0; i < MAX_TX_FIFOS; i++) {
7447 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7448 config->tx_cfg[i].fifo_priority = i;
7451 /* mapping the QoS priority to the configured fifos */
7452 for (i = 0; i < MAX_TX_FIFOS; i++)
7453 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7455 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7456 for (i = 0; i < config->tx_fifo_num; i++) {
7457 config->tx_cfg[i].f_no_snoop =
7458 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7459 if (config->tx_cfg[i].fifo_len < 65) {
7460 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7461 break;
7464 /* + 2 because one Txd for skb->data and one Txd for UFO */
7465 config->max_txds = MAX_SKB_FRAGS + 2;
7467 /* Rx side parameters. */
7468 config->rx_ring_num = rx_ring_num;
7469 for (i = 0; i < MAX_RX_RINGS; i++) {
7470 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7471 (rxd_count[sp->rxd_mode] + 1);
7472 config->rx_cfg[i].ring_priority = i;
7475 for (i = 0; i < rx_ring_num; i++) {
7476 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7477 config->rx_cfg[i].f_no_snoop =
7478 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7481 /* Setting Mac Control parameters */
7482 mac_control->rmac_pause_time = rmac_pause_time;
7483 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7484 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7487 /* Initialize Ring buffer parameters. */
7488 for (i = 0; i < config->rx_ring_num; i++)
7489 atomic_set(&sp->rx_bufs_left[i], 0);
7491 /* initialize the shared memory used by the NIC and the host */
7492 if (init_shared_mem(sp)) {
7493 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7494 dev->name);
7495 ret = -ENOMEM;
7496 goto mem_alloc_failed;
7499 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7500 pci_resource_len(pdev, 0));
7501 if (!sp->bar0) {
7502 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7503 dev->name);
7504 ret = -ENOMEM;
7505 goto bar0_remap_failed;
7508 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7509 pci_resource_len(pdev, 2));
7510 if (!sp->bar1) {
7511 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7512 dev->name);
7513 ret = -ENOMEM;
7514 goto bar1_remap_failed;
7517 dev->irq = pdev->irq;
7518 dev->base_addr = (unsigned long) sp->bar0;
7520 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7521 for (j = 0; j < MAX_TX_FIFOS; j++) {
7522 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7523 (sp->bar1 + (j * 0x00020000));
7526 /* Driver entry points */
7527 dev->open = &s2io_open;
7528 dev->stop = &s2io_close;
7529 dev->hard_start_xmit = &s2io_xmit;
7530 dev->get_stats = &s2io_get_stats;
7531 dev->set_multicast_list = &s2io_set_multicast;
7532 dev->do_ioctl = &s2io_ioctl;
7533 dev->change_mtu = &s2io_change_mtu;
7534 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7535 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7536 dev->vlan_rx_register = s2io_vlan_rx_register;
7539 * will use eth_mac_addr() for dev->set_mac_address
7540 * mac address will be set every time dev->open() is called
7542 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7544 #ifdef CONFIG_NET_POLL_CONTROLLER
7545 dev->poll_controller = s2io_netpoll;
7546 #endif
7548 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7549 if (sp->high_dma_flag == TRUE)
7550 dev->features |= NETIF_F_HIGHDMA;
7551 dev->features |= NETIF_F_TSO;
7552 dev->features |= NETIF_F_TSO6;
7553 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7554 dev->features |= NETIF_F_UFO;
7555 dev->features |= NETIF_F_HW_CSUM;
7558 dev->tx_timeout = &s2io_tx_watchdog;
7559 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7560 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7561 INIT_WORK(&sp->set_link_task, s2io_set_link);
7563 pci_save_state(sp->pdev);
7565 /* Setting swapper control on the NIC, for proper reset operation */
7566 if (s2io_set_swapper(sp)) {
7567 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7568 dev->name);
7569 ret = -EAGAIN;
7570 goto set_swap_failed;
7573 /* Verify if the Herc works on the slot its placed into */
7574 if (sp->device_type & XFRAME_II_DEVICE) {
7575 mode = s2io_verify_pci_mode(sp);
7576 if (mode < 0) {
7577 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7578 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7579 ret = -EBADSLT;
7580 goto set_swap_failed;
7584 /* Not needed for Herc */
7585 if (sp->device_type & XFRAME_I_DEVICE) {
7587 * Fix for all "FFs" MAC address problems observed on
7588 * Alpha platforms
7590 fix_mac_address(sp);
7591 s2io_reset(sp);
7595 * MAC address initialization.
7596 * For now only one mac address will be read and used.
7598 bar0 = sp->bar0;
7599 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7600 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7601 writeq(val64, &bar0->rmac_addr_cmd_mem);
7602 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7603 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7604 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7605 mac_down = (u32) tmp64;
7606 mac_up = (u32) (tmp64 >> 32);
7608 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7609 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7610 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7611 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7612 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7613 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7615 /* Set the factory defined MAC address initially */
7616 dev->addr_len = ETH_ALEN;
7617 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7619 /* Store the values of the MSIX table in the s2io_nic structure */
7620 store_xmsi_data(sp);
7621 /* reset Nic and bring it to known state */
7622 s2io_reset(sp);
7625 * Initialize the tasklet status and link state flags
7626 * and the card state parameter
7628 sp->tasklet_status = 0;
7629 sp->state = 0;
7631 /* Initialize spinlocks */
7632 spin_lock_init(&sp->tx_lock);
7634 if (!napi)
7635 spin_lock_init(&sp->put_lock);
7636 spin_lock_init(&sp->rx_lock);
7639 * SXE-002: Configure link and activity LED to init state
7640 * on driver load.
7642 subid = sp->pdev->subsystem_device;
7643 if ((subid & 0xFF) >= 0x07) {
7644 val64 = readq(&bar0->gpio_control);
7645 val64 |= 0x0000800000000000ULL;
7646 writeq(val64, &bar0->gpio_control);
7647 val64 = 0x0411040400000000ULL;
7648 writeq(val64, (void __iomem *) bar0 + 0x2700);
7649 val64 = readq(&bar0->gpio_control);
7652 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7654 if (register_netdev(dev)) {
7655 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7656 ret = -ENODEV;
7657 goto register_failed;
7659 s2io_vpd_read(sp);
7660 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7661 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7662 sp->product_name, pdev->revision);
7663 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7664 s2io_driver_version);
7665 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7666 dev->name, print_mac(mac, dev->dev_addr));
7667 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7668 if (sp->device_type & XFRAME_II_DEVICE) {
7669 mode = s2io_print_pci_mode(sp);
7670 if (mode < 0) {
7671 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7672 ret = -EBADSLT;
7673 unregister_netdev(dev);
7674 goto set_swap_failed;
7677 switch(sp->rxd_mode) {
7678 case RXD_MODE_1:
7679 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7680 dev->name);
7681 break;
7682 case RXD_MODE_3B:
7683 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7684 dev->name);
7685 break;
7688 if (napi)
7689 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7690 switch(sp->config.intr_type) {
7691 case INTA:
7692 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7693 break;
7694 case MSI_X:
7695 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7696 break;
7698 if (sp->lro)
7699 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7700 dev->name);
7701 if (ufo)
7702 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7703 " enabled\n", dev->name);
7704 /* Initialize device name */
7705 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7708 * Make Link state as off at this point, when the Link change
7709 * interrupt comes the state will be automatically changed to
7710 * the right state.
7712 netif_carrier_off(dev);
7714 return 0;
7716 register_failed:
7717 set_swap_failed:
7718 iounmap(sp->bar1);
7719 bar1_remap_failed:
7720 iounmap(sp->bar0);
7721 bar0_remap_failed:
7722 mem_alloc_failed:
7723 free_shared_mem(sp);
7724 pci_disable_device(pdev);
7725 pci_release_regions(pdev);
7726 pci_set_drvdata(pdev, NULL);
7727 free_netdev(dev);
7729 return ret;
7733 * s2io_rem_nic - Free the PCI device
7734 * @pdev: structure containing the PCI related information of the device.
7735 * Description: This function is called by the Pci subsystem to release a
7736 * PCI device and free up all resource held up by the device. This could
7737 * be in response to a Hot plug event or when the driver is to be removed
7738 * from memory.
7741 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7743 struct net_device *dev =
7744 (struct net_device *) pci_get_drvdata(pdev);
7745 struct s2io_nic *sp;
7747 if (dev == NULL) {
7748 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7749 return;
7752 flush_scheduled_work();
7754 sp = dev->priv;
7755 unregister_netdev(dev);
7757 free_shared_mem(sp);
7758 iounmap(sp->bar0);
7759 iounmap(sp->bar1);
7760 pci_release_regions(pdev);
7761 pci_set_drvdata(pdev, NULL);
7762 free_netdev(dev);
7763 pci_disable_device(pdev);
7767 * s2io_starter - Entry point for the driver
7768 * Description: This function is the entry point for the driver. It verifies
7769 * the module loadable parameters and initializes PCI configuration space.
7772 int __init s2io_starter(void)
7774 return pci_register_driver(&s2io_driver);
7778 * s2io_closer - Cleanup routine for the driver
7779 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7782 static __exit void s2io_closer(void)
7784 pci_unregister_driver(&s2io_driver);
7785 DBG_PRINT(INIT_DBG, "cleanup done\n");
7788 module_init(s2io_starter);
7789 module_exit(s2io_closer);
7791 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7792 struct tcphdr **tcp, struct RxD_t *rxdp)
7794 int ip_off;
7795 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7797 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7798 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7799 __FUNCTION__);
7800 return -1;
7803 /* TODO:
7804 * By default the VLAN field in the MAC is stripped by the card, if this
7805 * feature is turned off in rx_pa_cfg register, then the ip_off field
7806 * has to be shifted by a further 2 bytes
7808 switch (l2_type) {
7809 case 0: /* DIX type */
7810 case 4: /* DIX type with VLAN */
7811 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7812 break;
7813 /* LLC, SNAP etc are considered non-mergeable */
7814 default:
7815 return -1;
7818 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7819 ip_len = (u8)((*ip)->ihl);
7820 ip_len <<= 2;
7821 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7823 return 0;
7826 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7827 struct tcphdr *tcp)
7829 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7830 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7831 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7832 return -1;
7833 return 0;
7836 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7838 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7841 static void initiate_new_session(struct lro *lro, u8 *l2h,
7842 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7844 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7845 lro->l2h = l2h;
7846 lro->iph = ip;
7847 lro->tcph = tcp;
7848 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7849 lro->tcp_ack = ntohl(tcp->ack_seq);
7850 lro->sg_num = 1;
7851 lro->total_len = ntohs(ip->tot_len);
7852 lro->frags_len = 0;
7854 * check if we saw TCP timestamp. Other consistency checks have
7855 * already been done.
7857 if (tcp->doff == 8) {
7858 u32 *ptr;
7859 ptr = (u32 *)(tcp+1);
7860 lro->saw_ts = 1;
7861 lro->cur_tsval = *(ptr+1);
7862 lro->cur_tsecr = *(ptr+2);
7864 lro->in_use = 1;
7867 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7869 struct iphdr *ip = lro->iph;
7870 struct tcphdr *tcp = lro->tcph;
7871 __sum16 nchk;
7872 struct stat_block *statinfo = sp->mac_control.stats_info;
7873 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7875 /* Update L3 header */
7876 ip->tot_len = htons(lro->total_len);
7877 ip->check = 0;
7878 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7879 ip->check = nchk;
7881 /* Update L4 header */
7882 tcp->ack_seq = lro->tcp_ack;
7883 tcp->window = lro->window;
7885 /* Update tsecr field if this session has timestamps enabled */
7886 if (lro->saw_ts) {
7887 u32 *ptr = (u32 *)(tcp + 1);
7888 *(ptr+2) = lro->cur_tsecr;
7891 /* Update counters required for calculation of
7892 * average no. of packets aggregated.
7894 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7895 statinfo->sw_stat.num_aggregations++;
7898 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7899 struct tcphdr *tcp, u32 l4_pyld)
7901 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7902 lro->total_len += l4_pyld;
7903 lro->frags_len += l4_pyld;
7904 lro->tcp_next_seq += l4_pyld;
7905 lro->sg_num++;
7907 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7908 lro->tcp_ack = tcp->ack_seq;
7909 lro->window = tcp->window;
7911 if (lro->saw_ts) {
7912 u32 *ptr;
7913 /* Update tsecr and tsval from this packet */
7914 ptr = (u32 *) (tcp + 1);
7915 lro->cur_tsval = *(ptr + 1);
7916 lro->cur_tsecr = *(ptr + 2);
7920 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7921 struct tcphdr *tcp, u32 tcp_pyld_len)
7923 u8 *ptr;
7925 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7927 if (!tcp_pyld_len) {
7928 /* Runt frame or a pure ack */
7929 return -1;
7932 if (ip->ihl != 5) /* IP has options */
7933 return -1;
7935 /* If we see CE codepoint in IP header, packet is not mergeable */
7936 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7937 return -1;
7939 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7940 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7941 tcp->ece || tcp->cwr || !tcp->ack) {
7943 * Currently recognize only the ack control word and
7944 * any other control field being set would result in
7945 * flushing the LRO session
7947 return -1;
7951 * Allow only one TCP timestamp option. Don't aggregate if
7952 * any other options are detected.
7954 if (tcp->doff != 5 && tcp->doff != 8)
7955 return -1;
7957 if (tcp->doff == 8) {
7958 ptr = (u8 *)(tcp + 1);
7959 while (*ptr == TCPOPT_NOP)
7960 ptr++;
7961 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7962 return -1;
7964 /* Ensure timestamp value increases monotonically */
7965 if (l_lro)
7966 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7967 return -1;
7969 /* timestamp echo reply should be non-zero */
7970 if (*((u32 *)(ptr+6)) == 0)
7971 return -1;
7974 return 0;
7977 static int
7978 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7979 struct RxD_t *rxdp, struct s2io_nic *sp)
7981 struct iphdr *ip;
7982 struct tcphdr *tcph;
7983 int ret = 0, i;
7985 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7986 rxdp))) {
7987 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7988 ip->saddr, ip->daddr);
7989 } else {
7990 return ret;
7993 tcph = (struct tcphdr *)*tcp;
7994 *tcp_len = get_l4_pyld_length(ip, tcph);
7995 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7996 struct lro *l_lro = &sp->lro0_n[i];
7997 if (l_lro->in_use) {
7998 if (check_for_socket_match(l_lro, ip, tcph))
7999 continue;
8000 /* Sock pair matched */
8001 *lro = l_lro;
8003 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8004 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8005 "0x%x, actual 0x%x\n", __FUNCTION__,
8006 (*lro)->tcp_next_seq,
8007 ntohl(tcph->seq));
8009 sp->mac_control.stats_info->
8010 sw_stat.outof_sequence_pkts++;
8011 ret = 2;
8012 break;
8015 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8016 ret = 1; /* Aggregate */
8017 else
8018 ret = 2; /* Flush both */
8019 break;
8023 if (ret == 0) {
8024 /* Before searching for available LRO objects,
8025 * check if the pkt is L3/L4 aggregatable. If not
8026 * don't create new LRO session. Just send this
8027 * packet up.
8029 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8030 return 5;
8033 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8034 struct lro *l_lro = &sp->lro0_n[i];
8035 if (!(l_lro->in_use)) {
8036 *lro = l_lro;
8037 ret = 3; /* Begin anew */
8038 break;
8043 if (ret == 0) { /* sessions exceeded */
8044 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8045 __FUNCTION__);
8046 *lro = NULL;
8047 return ret;
8050 switch (ret) {
8051 case 3:
8052 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8053 break;
8054 case 2:
8055 update_L3L4_header(sp, *lro);
8056 break;
8057 case 1:
8058 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8059 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8060 update_L3L4_header(sp, *lro);
8061 ret = 4; /* Flush the LRO */
8063 break;
8064 default:
8065 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8066 __FUNCTION__);
8067 break;
8070 return ret;
8073 static void clear_lro_session(struct lro *lro)
8075 static u16 lro_struct_size = sizeof(struct lro);
8077 memset(lro, 0, lro_struct_size);
8080 static void queue_rx_frame(struct sk_buff *skb)
8082 struct net_device *dev = skb->dev;
8084 skb->protocol = eth_type_trans(skb, dev);
8085 if (napi)
8086 netif_receive_skb(skb);
8087 else
8088 netif_rx(skb);
8091 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8092 struct sk_buff *skb,
8093 u32 tcp_len)
8095 struct sk_buff *first = lro->parent;
8097 first->len += tcp_len;
8098 first->data_len = lro->frags_len;
8099 skb_pull(skb, (skb->len - tcp_len));
8100 if (skb_shinfo(first)->frag_list)
8101 lro->last_frag->next = skb;
8102 else
8103 skb_shinfo(first)->frag_list = skb;
8104 first->truesize += skb->truesize;
8105 lro->last_frag = skb;
8106 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8107 return;
8111 * s2io_io_error_detected - called when PCI error is detected
8112 * @pdev: Pointer to PCI device
8113 * @state: The current pci connection state
8115 * This function is called after a PCI bus error affecting
8116 * this device has been detected.
8118 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8119 pci_channel_state_t state)
8121 struct net_device *netdev = pci_get_drvdata(pdev);
8122 struct s2io_nic *sp = netdev->priv;
8124 netif_device_detach(netdev);
8126 if (netif_running(netdev)) {
8127 /* Bring down the card, while avoiding PCI I/O */
8128 do_s2io_card_down(sp, 0);
8130 pci_disable_device(pdev);
8132 return PCI_ERS_RESULT_NEED_RESET;
8136 * s2io_io_slot_reset - called after the pci bus has been reset.
8137 * @pdev: Pointer to PCI device
8139 * Restart the card from scratch, as if from a cold-boot.
8140 * At this point, the card has exprienced a hard reset,
8141 * followed by fixups by BIOS, and has its config space
8142 * set up identically to what it was at cold boot.
8144 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8146 struct net_device *netdev = pci_get_drvdata(pdev);
8147 struct s2io_nic *sp = netdev->priv;
8149 if (pci_enable_device(pdev)) {
8150 printk(KERN_ERR "s2io: "
8151 "Cannot re-enable PCI device after reset.\n");
8152 return PCI_ERS_RESULT_DISCONNECT;
8155 pci_set_master(pdev);
8156 s2io_reset(sp);
8158 return PCI_ERS_RESULT_RECOVERED;
8162 * s2io_io_resume - called when traffic can start flowing again.
8163 * @pdev: Pointer to PCI device
8165 * This callback is called when the error recovery driver tells
8166 * us that its OK to resume normal operation.
8168 static void s2io_io_resume(struct pci_dev *pdev)
8170 struct net_device *netdev = pci_get_drvdata(pdev);
8171 struct s2io_nic *sp = netdev->priv;
8173 if (netif_running(netdev)) {
8174 if (s2io_card_up(sp)) {
8175 printk(KERN_ERR "s2io: "
8176 "Can't bring device back up after reset.\n");
8177 return;
8180 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8181 s2io_card_down(sp);
8182 printk(KERN_ERR "s2io: "
8183 "Can't resetore mac addr after reset.\n");
8184 return;
8188 netif_device_attach(netdev);
8189 netif_wake_queue(netdev);