Staging: add sxg network driver
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / sxg / sxg.c
bloba91c9f397270d13070ea2531f071df0eea9cfcaf
1 /**************************************************************************
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
33 **************************************************************************/
36 * FILENAME: sxg.c
38 * The SXG driver for Alacritech's 10Gbe products.
40 * NOTE: This is the standard, non-accelerated version of Alacritech's
41 * IS-NIC driver.
44 #include <linux/kernel.h>
45 #include <linux/string.h>
46 #include <linux/errno.h>
47 #include <linux/module.h>
48 #include <linux/moduleparam.h>
49 #include <linux/ioport.h>
50 #include <linux/slab.h>
51 #include <linux/interrupt.h>
52 #include <linux/timer.h>
53 #include <linux/pci.h>
54 #include <linux/spinlock.h>
55 #include <linux/init.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/ethtool.h>
59 #include <linux/skbuff.h>
60 #include <linux/delay.h>
61 #include <linux/types.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/mii.h>
65 #define SLIC_DUMP_ENABLED 0
66 #define SLIC_GET_STATS_ENABLED 0
67 #define LINUX_FREES_ADAPTER_RESOURCES 1
68 #define SXG_OFFLOAD_IP_CHECKSUM 0
69 #define SXG_POWER_MANAGEMENT_ENABLED 0
70 #define VPCI 0
71 #define DBG 1
72 #define ATK_DEBUG 1
74 #include "sxg_os.h"
75 #include "sxghw.h"
76 #include "sxghif.h"
77 #include "sxg.h"
78 #include "sxgdbg.h"
80 #include "sxgphycode.h"
81 #include "saharadbgdownload.h"
83 static int sxg_allocate_buffer_memory(p_adapter_t adapter, u32 Size, SXG_BUFFER_TYPE BufferType);
84 static void sxg_allocate_rcvblock_complete(p_adapter_t adapter, void * RcvBlock, dma_addr_t PhysicalAddress, u32 Length);
85 static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter, PSXG_SCATTER_GATHER SxgSgl, dma_addr_t PhysicalAddress, u32 Length);
87 static void sxg_mcast_init_crc32(void);
89 static int sxg_entry_open(p_net_device dev);
90 static int sxg_entry_halt(p_net_device dev);
91 static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd);
92 static int sxg_send_packets(struct sk_buff *skb, p_net_device dev);
93 static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb);
94 static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl);
96 static void sxg_handle_interrupt(p_adapter_t adapter);
97 static int sxg_process_isr(p_adapter_t adapter, u32 MessageId);
98 static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId);
99 static void sxg_complete_slow_send(p_adapter_t adapter);
100 static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event);
101 static void sxg_process_rcv_error(p_adapter_t adapter, u32 ErrorStatus);
102 static bool sxg_mac_filter(p_adapter_t adapter,
103 p_ether_header EtherHdr, ushort length);
105 #if SLIC_GET_STATS_ENABLED
106 static struct net_device_stats *sxg_get_stats(p_net_device dev);
107 #endif
109 static int sxg_mac_set_address(p_net_device dev, void * ptr);
111 static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
113 static void sxg_unmap_mmio_space(p_adapter_t adapter);
114 static void sxg_mcast_set_mask(p_adapter_t adapter);
116 static int sxg_initialize_adapter(p_adapter_t adapter);
117 static void sxg_stock_rcv_buffers(p_adapter_t adapter);
118 static void sxg_complete_descriptor_blocks(p_adapter_t adapter, unsigned char Index);
119 static int sxg_initialize_link(p_adapter_t adapter);
120 static int sxg_phy_init(p_adapter_t adapter);
121 static void sxg_link_event(p_adapter_t adapter);
122 static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter);
123 static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState);
124 static int sxg_write_mdio_reg(p_adapter_t adapter,
125 u32 DevAddr, u32 RegAddr, u32 Value);
126 static int sxg_read_mdio_reg(p_adapter_t adapter,
127 u32 DevAddr, u32 RegAddr, u32 * pValue);
128 static void sxg_mcast_set_list(p_net_device dev);
132 #define XXXTODO 0
134 static unsigned int sxg_first_init = 1;
135 static char *sxg_banner =
136 "Alacritech SLIC Technology(tm) Server and Storage 10Gbe Accelerator (Non-Accelerated)\n";
138 static int sxg_debug = 1;
139 static int debug = -1;
140 static p_net_device head_netdevice = NULL;
142 static sxgbase_driver_t sxg_global = {
143 .dynamic_intagg = 1,
145 static int intagg_delay = 100;
146 static u32 dynamic_intagg = 0;
148 #define DRV_NAME "sxg"
149 #define DRV_VERSION "1.0.1"
150 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
151 #define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
152 #define DRV_COPYRIGHT "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
154 MODULE_AUTHOR(DRV_AUTHOR);
155 MODULE_DESCRIPTION(DRV_DESCRIPTION);
156 MODULE_LICENSE("GPL");
158 module_param(dynamic_intagg, int, 0);
159 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
160 module_param(intagg_delay, int, 0);
161 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
163 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
164 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
165 {0,}
167 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
169 /***********************************************************************
170 ************************************************************************
171 ************************************************************************
172 ************************************************************************
173 ************************************************************************/
175 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
177 writel(value, reg);
178 if (flush)
179 mb();
182 static inline void sxg_reg64_write(p_adapter_t adapter, void __iomem *reg,
183 u64 value, u32 cpu)
185 u32 value_high = (u32) (value >> 32);
186 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
187 unsigned long flags;
189 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
190 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
191 writel(value_low, reg);
192 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
195 static void sxg_init_driver(void)
197 if (sxg_first_init) {
198 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
199 __FUNCTION__, jiffies);
200 sxg_first_init = 0;
201 spin_lock_init(&sxg_global.driver_lock);
205 static void sxg_dbg_macaddrs(p_adapter_t adapter)
207 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
208 adapter->netdev->name, adapter->currmacaddr[0],
209 adapter->currmacaddr[1], adapter->currmacaddr[2],
210 adapter->currmacaddr[3], adapter->currmacaddr[4],
211 adapter->currmacaddr[5]);
212 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
213 adapter->netdev->name, adapter->macaddr[0],
214 adapter->macaddr[1], adapter->macaddr[2],
215 adapter->macaddr[3], adapter->macaddr[4],
216 adapter->macaddr[5]);
217 return;
220 // SXG Globals
221 static SXG_DRIVER SxgDriver;
223 #ifdef ATKDBG
224 static sxg_trace_buffer_t LSxgTraceBuffer;
225 #endif /* ATKDBG */
226 static sxg_trace_buffer_t *SxgTraceBuffer = NULL;
229 * sxg_download_microcode
231 * Download Microcode to Sahara adapter
233 * Arguments -
234 * adapter - A pointer to our adapter structure
235 * UcodeSel - microcode file selection
237 * Return
238 * int
240 static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
242 PSXG_HW_REGS HwRegs = adapter->HwRegs;
243 u32 Section;
244 u32 ThisSectionSize;
245 u32 * Instruction = NULL;
246 u32 BaseAddress, AddressOffset, Address;
247 // u32 Failure;
248 u32 ValueRead;
249 u32 i;
250 u32 numSections = 0;
251 u32 sectionSize[16];
252 u32 sectionStart[16];
254 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
255 adapter, 0, 0, 0);
256 DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__);
258 switch (UcodeSel) {
259 case SXG_UCODE_SAHARA: // Sahara operational ucode
260 numSections = SNumSections;
261 for (i = 0; i < numSections; i++) {
262 sectionSize[i] = SSectionSize[i];
263 sectionStart[i] = SSectionStart[i];
265 break;
266 default:
267 printk(KERN_ERR KBUILD_MODNAME
268 ": Woah, big error with the microcode!\n");
269 break;
272 DBG_ERROR("sxg: RESET THE CARD\n");
273 // First, reset the card
274 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
276 // Download each section of the microcode as specified in
277 // its download file. The *download.c file is generated using
278 // the saharaobjtoc facility which converts the metastep .obj
279 // file to a .c file which contains a two dimentional array.
280 for (Section = 0; Section < numSections; Section++) {
281 DBG_ERROR("sxg: SECTION # %d\n", Section);
282 switch (UcodeSel) {
283 case SXG_UCODE_SAHARA:
284 Instruction = (u32 *) & SaharaUCode[Section][0];
285 break;
286 default:
287 ASSERT(0);
288 break;
290 BaseAddress = sectionStart[Section];
291 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
292 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
293 AddressOffset++) {
294 Address = BaseAddress + AddressOffset;
295 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
296 // Write instruction bits 31 - 0
297 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
298 // Write instruction bits 63-32
299 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
300 FLUSH);
301 // Write instruction bits 95-64
302 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
303 FLUSH);
304 // Write instruction address with the WRITE bit set
305 WRITE_REG(HwRegs->UcodeAddr,
306 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
307 // Sahara bug in the ucode download logic - the write to DataLow
308 // for the next instruction could get corrupted. To avoid this,
309 // write to DataLow again for this instruction (which may get
310 // corrupted, but it doesn't matter), then increment the address
311 // and write the data for the next instruction to DataLow. That
312 // write should succeed.
313 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
314 // Advance 3 u32S to start of next instruction
315 Instruction += 3;
318 // Now repeat the entire operation reading the instruction back and
319 // checking for parity errors
320 for (Section = 0; Section < numSections; Section++) {
321 DBG_ERROR("sxg: check SECTION # %d\n", Section);
322 switch (UcodeSel) {
323 case SXG_UCODE_SAHARA:
324 Instruction = (u32 *) & SaharaUCode[Section][0];
325 break;
326 default:
327 ASSERT(0);
328 break;
330 BaseAddress = sectionStart[Section];
331 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
332 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
333 AddressOffset++) {
334 Address = BaseAddress + AddressOffset;
335 // Write the address with the READ bit set
336 WRITE_REG(HwRegs->UcodeAddr,
337 (Address | MICROCODE_ADDRESS_READ), FLUSH);
338 // Read it back and check parity bit.
339 READ_REG(HwRegs->UcodeAddr, ValueRead);
340 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
341 DBG_ERROR("sxg: %s PARITY ERROR\n",
342 __FUNCTION__);
344 return (FALSE); // Parity error
346 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
347 // Read the instruction back and compare
348 READ_REG(HwRegs->UcodeDataLow, ValueRead);
349 if (ValueRead != *Instruction) {
350 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
351 __FUNCTION__);
352 return (FALSE); // Miscompare
354 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
355 if (ValueRead != *(Instruction + 1)) {
356 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
357 __FUNCTION__);
358 return (FALSE); // Miscompare
360 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
361 if (ValueRead != *(Instruction + 2)) {
362 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
363 __FUNCTION__);
364 return (FALSE); // Miscompare
366 // Advance 3 u32S to start of next instruction
367 Instruction += 3;
371 // Everything OK, Go.
372 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
374 // Poll the CardUp register to wait for microcode to initialize
375 // Give up after 10,000 attemps (500ms).
376 for (i = 0; i < 10000; i++) {
377 udelay(50);
378 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
379 if (ValueRead == 0xCAFE) {
380 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__);
381 break;
384 if (i == 10000) {
385 DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__);
387 return (FALSE); // Timeout
389 // Now write the LoadSync register. This is used to
390 // synchronize with the card so it can scribble on the memory
391 // that contained 0xCAFE from the "CardUp" step above
392 if (UcodeSel == SXG_UCODE_SAHARA) {
393 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
396 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
397 adapter, 0, 0, 0);
398 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
400 return (TRUE);
404 * sxg_allocate_resources - Allocate memory and locks
406 * Arguments -
407 * adapter - A pointer to our adapter structure
409 * Return
410 * int
412 static int sxg_allocate_resources(p_adapter_t adapter)
414 int status;
415 u32 i;
416 u32 RssIds, IsrCount;
417 // PSXG_XMT_RING XmtRing;
418 // PSXG_RCV_RING RcvRing;
420 DBG_ERROR("%s ENTER\n", __FUNCTION__);
422 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
423 adapter, 0, 0, 0);
425 // Windows tells us how many CPUs it plans to use for
426 // RSS
427 RssIds = SXG_RSS_CPU_COUNT(adapter);
428 IsrCount = adapter->MsiEnabled ? RssIds : 1;
430 DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__);
432 // Allocate spinlocks and initialize listheads first.
433 spin_lock_init(&adapter->RcvQLock);
434 spin_lock_init(&adapter->SglQLock);
435 spin_lock_init(&adapter->XmtZeroLock);
436 spin_lock_init(&adapter->Bit64RegLock);
437 spin_lock_init(&adapter->AdapterLock);
439 DBG_ERROR("%s Setup the lists\n", __FUNCTION__);
441 InitializeListHead(&adapter->FreeRcvBuffers);
442 InitializeListHead(&adapter->FreeRcvBlocks);
443 InitializeListHead(&adapter->AllRcvBlocks);
444 InitializeListHead(&adapter->FreeSglBuffers);
445 InitializeListHead(&adapter->AllSglBuffers);
447 // Mark these basic allocations done. This flags essentially
448 // tells the SxgFreeResources routine that it can grab spinlocks
449 // and reference listheads.
450 adapter->BasicAllocations = TRUE;
451 // Main allocation loop. Start with the maximum supported by
452 // the microcode and back off if memory allocation
453 // fails. If we hit a minimum, fail.
455 for (;;) {
456 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __FUNCTION__,
457 (sizeof(SXG_XMT_RING) * 1));
459 // Start with big items first - receive and transmit rings. At the moment
460 // I'm going to keep the ring size fixed and adjust the number of
461 // TCBs if we fail. Later we might consider reducing the ring size as well..
462 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
463 sizeof(SXG_XMT_RING) *
465 &adapter->PXmtRings);
466 DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings);
468 if (!adapter->XmtRings) {
469 goto per_tcb_allocation_failed;
471 memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
473 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __FUNCTION__,
474 (sizeof(SXG_RCV_RING) * 1));
475 adapter->RcvRings =
476 pci_alloc_consistent(adapter->pcidev,
477 sizeof(SXG_RCV_RING) * 1,
478 &adapter->PRcvRings);
479 DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings);
480 if (!adapter->RcvRings) {
481 goto per_tcb_allocation_failed;
483 memset(adapter->RcvRings, 0, sizeof(SXG_RCV_RING) * 1);
484 break;
486 per_tcb_allocation_failed:
487 // an allocation failed. Free any successful allocations.
488 if (adapter->XmtRings) {
489 pci_free_consistent(adapter->pcidev,
490 sizeof(SXG_XMT_RING) * 4096,
491 adapter->XmtRings,
492 adapter->PXmtRings);
493 adapter->XmtRings = NULL;
495 if (adapter->RcvRings) {
496 pci_free_consistent(adapter->pcidev,
497 sizeof(SXG_RCV_RING) * 4096,
498 adapter->RcvRings,
499 adapter->PRcvRings);
500 adapter->RcvRings = NULL;
502 // Loop around and try again....
505 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__);
506 // Initialize rcv zero and xmt zero rings
507 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
508 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
510 // Sanity check receive data structure format
511 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
512 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
513 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
514 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
516 // Allocate receive data buffers. We allocate a block of buffers and
517 // a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
518 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
519 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
520 sxg_allocate_buffer_memory(adapter,
521 SXG_RCV_BLOCK_SIZE(adapter->
522 ReceiveBufferSize),
523 SXG_BUFFER_TYPE_RCV);
525 // NBL resource allocation can fail in the 'AllocateComplete' routine, which
526 // doesn't return status. Make sure we got the number of buffers we requested
527 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
528 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
529 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
531 return (STATUS_RESOURCES);
534 DBG_ERROR("%s Allocate EventRings size[%x]\n", __FUNCTION__,
535 (sizeof(SXG_EVENT_RING) * RssIds));
537 // Allocate event queues.
538 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
539 sizeof(SXG_EVENT_RING) *
540 RssIds,
541 &adapter->PEventRings);
543 if (!adapter->EventRings) {
544 // Caller will call SxgFreeAdapter to clean up above allocations
545 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
546 adapter, SXG_MAX_ENTRIES, 0, 0);
547 status = STATUS_RESOURCES;
548 goto per_tcb_allocation_failed;
550 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
552 DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount);
553 // Allocate ISR
554 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
555 IsrCount, &adapter->PIsr);
556 if (!adapter->Isr) {
557 // Caller will call SxgFreeAdapter to clean up above allocations
558 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
559 adapter, SXG_MAX_ENTRIES, 0, 0);
560 status = STATUS_RESOURCES;
561 goto per_tcb_allocation_failed;
563 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
565 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
566 __FUNCTION__, sizeof(u32));
568 // Allocate shared XMT ring zero index location
569 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
570 sizeof(u32),
571 &adapter->
572 PXmtRingZeroIndex);
573 if (!adapter->XmtRingZeroIndex) {
574 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
575 adapter, SXG_MAX_ENTRIES, 0, 0);
576 status = STATUS_RESOURCES;
577 goto per_tcb_allocation_failed;
579 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
581 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
582 adapter, SXG_MAX_ENTRIES, 0, 0);
584 DBG_ERROR("%s EXIT\n", __FUNCTION__);
585 return (STATUS_SUCCESS);
589 * sxg_config_pci -
591 * Set up PCI Configuration space
593 * Arguments -
594 * pcidev - A pointer to our adapter structure
597 static void sxg_config_pci(struct pci_dev *pcidev)
599 u16 pci_command;
600 u16 new_command;
602 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
603 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __FUNCTION__, pci_command);
604 // Set the command register
605 new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable
606 PCI_COMMAND_MASTER | // Bus master enable
607 PCI_COMMAND_INVALIDATE | // Memory write and invalidate
608 PCI_COMMAND_PARITY | // Parity error response
609 PCI_COMMAND_SERR | // System ERR
610 PCI_COMMAND_FAST_BACK); // Fast back-to-back
611 if (pci_command != new_command) {
612 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
613 __FUNCTION__, pci_command, new_command);
614 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
618 static int sxg_entry_probe(struct pci_dev *pcidev,
619 const struct pci_device_id *pci_tbl_entry)
621 static int did_version = 0;
622 int err;
623 struct net_device *netdev;
624 p_adapter_t adapter;
625 void __iomem *memmapped_ioaddr;
626 u32 status = 0;
627 ulong mmio_start = 0;
628 ulong mmio_len = 0;
630 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
631 __FUNCTION__, jiffies, smp_processor_id());
633 // Initialize trace buffer
634 #ifdef ATKDBG
635 SxgTraceBuffer = &LSxgTraceBuffer;
636 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
637 #endif
639 sxg_global.dynamic_intagg = dynamic_intagg;
641 err = pci_enable_device(pcidev);
643 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
644 if (err) {
645 return err;
648 if (sxg_debug > 0 && did_version++ == 0) {
649 printk(KERN_INFO "%s\n", sxg_banner);
650 printk(KERN_INFO "%s\n", DRV_VERSION);
653 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
654 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
655 } else {
656 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
657 DBG_ERROR
658 ("No usable DMA configuration, aborting err[%x]\n",
659 err);
660 return err;
662 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
665 DBG_ERROR("Call pci_request_regions\n");
667 err = pci_request_regions(pcidev, DRV_NAME);
668 if (err) {
669 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
670 return err;
673 DBG_ERROR("call pci_set_master\n");
674 pci_set_master(pcidev);
676 DBG_ERROR("call alloc_etherdev\n");
677 netdev = alloc_etherdev(sizeof(adapter_t));
678 if (!netdev) {
679 err = -ENOMEM;
680 goto err_out_exit_sxg_probe;
682 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
684 SET_NETDEV_DEV(netdev, &pcidev->dev);
686 pci_set_drvdata(pcidev, netdev);
687 adapter = netdev_priv(netdev);
688 adapter->netdev = netdev;
689 adapter->pcidev = pcidev;
691 mmio_start = pci_resource_start(pcidev, 0);
692 mmio_len = pci_resource_len(pcidev, 0);
694 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
695 mmio_start, mmio_len);
697 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
698 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__, memmapped_ioaddr);
699 if (!memmapped_ioaddr) {
700 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
701 __FUNCTION__, mmio_len, mmio_start);
702 goto err_out_free_mmio_region;
705 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] len[%lx], IRQ %d.\n",
706 __func__, memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
708 adapter->HwRegs = (void *) memmapped_ioaddr;
709 adapter->base_addr = memmapped_ioaddr;
711 mmio_start = pci_resource_start(pcidev, 2);
712 mmio_len = pci_resource_len(pcidev, 2);
714 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
715 mmio_start, mmio_len);
717 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
718 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__, memmapped_ioaddr);
719 if (!memmapped_ioaddr) {
720 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
721 __FUNCTION__, mmio_len, mmio_start);
722 goto err_out_free_mmio_region;
725 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
726 "start[%lx] len[%lx], IRQ %d.\n", __func__,
727 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
729 adapter->UcodeRegs = (void *)memmapped_ioaddr;
731 adapter->State = SXG_STATE_INITIALIZING;
732 // Maintain a list of all adapters anchored by
733 // the global SxgDriver structure.
734 adapter->Next = SxgDriver.Adapters;
735 SxgDriver.Adapters = adapter;
736 adapter->AdapterID = ++SxgDriver.AdapterID;
738 // Initialize CRC table used to determine multicast hash
739 sxg_mcast_init_crc32();
741 adapter->JumboEnabled = FALSE;
742 adapter->RssEnabled = FALSE;
743 if (adapter->JumboEnabled) {
744 adapter->FrameSize = JUMBOMAXFRAME;
745 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
746 } else {
747 adapter->FrameSize = ETHERMAXFRAME;
748 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
751 // status = SXG_READ_EEPROM(adapter);
752 // if (!status) {
753 // goto sxg_init_bad;
754 // }
756 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__);
757 sxg_config_pci(pcidev);
758 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__);
760 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__);
761 sxg_init_driver();
762 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__);
764 adapter->vendid = pci_tbl_entry->vendor;
765 adapter->devid = pci_tbl_entry->device;
766 adapter->subsysid = pci_tbl_entry->subdevice;
767 adapter->busnumber = pcidev->bus->number;
768 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
769 adapter->functionnumber = (pcidev->devfn & 0x7);
770 adapter->memorylength = pci_resource_len(pcidev, 0);
771 adapter->irq = pcidev->irq;
772 adapter->next_netdevice = head_netdevice;
773 head_netdevice = netdev;
774 // adapter->chipid = chip_idx;
775 adapter->port = 0; //adapter->functionnumber;
776 adapter->cardindex = adapter->port;
778 // Allocate memory and other resources
779 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__);
780 status = sxg_allocate_resources(adapter);
781 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
782 __FUNCTION__, status);
783 if (status != STATUS_SUCCESS) {
784 goto err_out_unmap;
787 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
788 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
789 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
790 __FUNCTION__);
791 sxg_adapter_set_hwaddr(adapter);
792 } else {
793 adapter->state = ADAPT_FAIL;
794 adapter->linkstate = LINK_DOWN;
795 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
798 netdev->base_addr = (unsigned long)adapter->base_addr;
799 netdev->irq = adapter->irq;
800 netdev->open = sxg_entry_open;
801 netdev->stop = sxg_entry_halt;
802 netdev->hard_start_xmit = sxg_send_packets;
803 netdev->do_ioctl = sxg_ioctl;
804 #if XXXTODO
805 netdev->set_mac_address = sxg_mac_set_address;
806 #if SLIC_GET_STATS_ENABLED
807 netdev->get_stats = sxg_get_stats;
808 #endif
809 netdev->set_multicast_list = sxg_mcast_set_list;
810 #endif
812 strcpy(netdev->name, "eth%d");
813 // strcpy(netdev->name, pci_name(pcidev));
814 if ((err = register_netdev(netdev))) {
815 DBG_ERROR("Cannot register net device, aborting. %s\n",
816 netdev->name);
817 goto err_out_unmap;
820 DBG_ERROR
821 ("sxg: %s addr 0x%lx, irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
822 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
823 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
824 netdev->dev_addr[4], netdev->dev_addr[5]);
826 //sxg_init_bad:
827 ASSERT(status == FALSE);
828 // sxg_free_adapter(adapter);
830 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__,
831 status, jiffies, smp_processor_id());
832 return status;
834 err_out_unmap:
835 iounmap((void *)memmapped_ioaddr);
837 err_out_free_mmio_region:
838 release_mem_region(mmio_start, mmio_len);
840 err_out_exit_sxg_probe:
842 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies,
843 smp_processor_id());
845 return -ENODEV;
849 /***********************************************************************
850 * LINE BASE Interrupt routines..
851 ***********************************************************************/
854 * sxg_disable_interrupt
856 * DisableInterrupt Handler
858 * Arguments:
860 * adapter: Our adapter structure
862 * Return Value:
863 * None.
865 static void sxg_disable_interrupt(p_adapter_t adapter)
867 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
868 adapter, adapter->InterruptsEnabled, 0, 0);
869 // For now, RSS is disabled with line based interrupts
870 ASSERT(adapter->RssEnabled == FALSE);
871 ASSERT(adapter->MsiEnabled == FALSE);
873 // Turn off interrupts by writing to the icr register.
875 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
877 adapter->InterruptsEnabled = 0;
879 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
880 adapter, adapter->InterruptsEnabled, 0, 0);
885 * sxg_enable_interrupt
887 * EnableInterrupt Handler
889 * Arguments:
891 * adapter: Our adapter structure
893 * Return Value:
894 * None.
896 static void sxg_enable_interrupt(p_adapter_t adapter)
898 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
899 adapter, adapter->InterruptsEnabled, 0, 0);
900 // For now, RSS is disabled with line based interrupts
901 ASSERT(adapter->RssEnabled == FALSE);
902 ASSERT(adapter->MsiEnabled == FALSE);
904 // Turn on interrupts by writing to the icr register.
906 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
908 adapter->InterruptsEnabled = 1;
910 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
911 adapter, 0, 0, 0);
916 * sxg_isr - Process an line-based interrupt
918 * Arguments:
919 * Context - Our adapter structure
920 * QueueDefault - Output parameter to queue to default CPU
921 * TargetCpus - Output bitmap to schedule DPC's
923 * Return Value:
924 * TRUE if our interrupt
926 static irqreturn_t sxg_isr(int irq, void *dev_id)
928 p_net_device dev = (p_net_device) dev_id;
929 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
930 // u32 CpuMask = 0, i;
932 adapter->Stats.NumInts++;
933 if (adapter->Isr[0] == 0) {
934 // The SLIC driver used to experience a number of spurious interrupts
935 // due to the delay associated with the masking of the interrupt
936 // (we'd bounce back in here). If we see that again with Sahara,
937 // add a READ_REG of the Icr register after the WRITE_REG below.
938 adapter->Stats.FalseInts++;
939 return IRQ_NONE;
942 // Move the Isr contents and clear the value in
943 // shared memory, and mask interrupts
945 adapter->IsrCopy[0] = adapter->Isr[0];
946 adapter->Isr[0] = 0;
947 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
948 // ASSERT(adapter->IsrDpcsPending == 0);
949 #if XXXTODO // RSS Stuff
950 // If RSS is enabled and the ISR specifies
951 // SXG_ISR_EVENT, then schedule DPC's
952 // based on event queues.
953 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
954 for (i = 0;
955 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
956 i++) {
957 PSXG_EVENT_RING EventRing = &adapter->EventRings[i];
958 PSXG_EVENT Event =
959 &EventRing->Ring[adapter->NextEvent[i]];
960 unsigned char Cpu = adapter->RssSystemInfo->RssIdToCpu[i];
961 if (Event->Status & EVENT_STATUS_VALID) {
962 adapter->IsrDpcsPending++;
963 CpuMask |= (1 << Cpu);
967 // Now, either schedule the CPUs specified by the CpuMask,
968 // or queue default
969 if (CpuMask) {
970 *QueueDefault = FALSE;
971 } else {
972 adapter->IsrDpcsPending = 1;
973 *QueueDefault = TRUE;
975 *TargetCpus = CpuMask;
976 #endif
978 // There are no DPCs in Linux, so call the handler now
980 sxg_handle_interrupt(adapter);
982 return IRQ_HANDLED;
985 static void sxg_handle_interrupt(p_adapter_t adapter)
987 // unsigned char RssId = 0;
988 u32 NewIsr;
990 if (adapter->Stats.RcvNoBuffer < 5) {
991 DBG_ERROR("Enter sxg_handle_interrupt ISR[%x]\n",
992 adapter->IsrCopy[0]);
994 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
995 adapter, adapter->IsrCopy[0], 0, 0);
996 // For now, RSS is disabled with line based interrupts
997 ASSERT(adapter->RssEnabled == FALSE);
998 ASSERT(adapter->MsiEnabled == FALSE);
999 ASSERT(adapter->IsrCopy[0]);
1000 /////////////////////////////
1002 // Always process the event queue.
1003 sxg_process_event_queue(adapter,
1004 (adapter->RssEnabled ? /*RssId */ 0 : 0));
1006 #if XXXTODO // RSS stuff
1007 if (--adapter->IsrDpcsPending) {
1008 // We're done.
1009 ASSERT(adapter->RssEnabled);
1010 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1011 adapter, 0, 0, 0);
1012 return;
1014 #endif
1016 // Last (or only) DPC processes the ISR and clears the interrupt.
1018 NewIsr = sxg_process_isr(adapter, 0);
1020 // Reenable interrupts
1022 adapter->IsrCopy[0] = 0;
1023 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1024 adapter, NewIsr, 0, 0);
1026 if (adapter->Stats.RcvNoBuffer < 5) {
1027 DBG_ERROR
1028 ("Exit sxg_handle_interrupt2 after enabling interrupt\n");
1031 WRITE_REG(adapter->UcodeRegs[0].Isr, NewIsr, TRUE);
1033 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1034 adapter, 0, 0, 0);
1039 * sxg_process_isr - Process an interrupt. Called from the line-based and
1040 * message based interrupt DPC routines
1042 * Arguments:
1043 * adapter - Our adapter structure
1044 * Queue - The ISR that needs processing
1046 * Return Value:
1047 * None
1049 static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
1051 u32 Isr = adapter->IsrCopy[MessageId];
1052 u32 NewIsr = 0;
1054 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1055 adapter, Isr, 0, 0);
1057 // Error
1058 if (Isr & SXG_ISR_ERR) {
1059 if (Isr & SXG_ISR_PDQF) {
1060 adapter->Stats.PdqFull++;
1061 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __FUNCTION__);
1063 // No host buffer
1064 if (Isr & SXG_ISR_RMISS) {
1065 // There is a bunch of code in the SLIC driver which
1066 // attempts to process more receive events per DPC
1067 // if we start to fall behind. We'll probably
1068 // need to do something similar here, but hold
1069 // off for now. I don't want to make the code more
1070 // complicated than strictly needed.
1071 adapter->Stats.RcvNoBuffer++;
1072 if (adapter->Stats.RcvNoBuffer < 5) {
1073 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1074 __FUNCTION__);
1077 // Card crash
1078 if (Isr & SXG_ISR_DEAD) {
1079 // Set aside the crash info and set the adapter state to RESET
1080 adapter->CrashCpu =
1081 (unsigned char) ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1082 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1083 adapter->Dead = TRUE;
1084 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__,
1085 adapter->CrashLocation, adapter->CrashCpu);
1087 // Event ring full
1088 if (Isr & SXG_ISR_ERFULL) {
1089 // Same issue as RMISS, really. This means the
1090 // host is falling behind the card. Need to increase
1091 // event ring size, process more events per interrupt,
1092 // and/or reduce/remove interrupt aggregation.
1093 adapter->Stats.EventRingFull++;
1094 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1095 __FUNCTION__);
1097 // Transmit drop - no DRAM buffers or XMT error
1098 if (Isr & SXG_ISR_XDROP) {
1099 adapter->Stats.XmtDrops++;
1100 adapter->Stats.XmtErrors++;
1101 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __FUNCTION__);
1104 // Slowpath send completions
1105 if (Isr & SXG_ISR_SPSEND) {
1106 sxg_complete_slow_send(adapter);
1108 // Dump
1109 if (Isr & SXG_ISR_UPC) {
1110 ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added..
1111 adapter->DumpCmdRunning = FALSE;
1113 // Link event
1114 if (Isr & SXG_ISR_LINK) {
1115 sxg_link_event(adapter);
1117 // Debug - breakpoint hit
1118 if (Isr & SXG_ISR_BREAK) {
1119 // At the moment AGDB isn't written to support interactive
1120 // debug sessions. When it is, this interrupt will be used
1121 // to signal AGDB that it has hit a breakpoint. For now, ASSERT.
1122 ASSERT(0);
1124 // Heartbeat response
1125 if (Isr & SXG_ISR_PING) {
1126 adapter->PingOutstanding = FALSE;
1128 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1129 adapter, Isr, NewIsr, 0);
1131 return (NewIsr);
1136 * sxg_process_event_queue - Process our event queue
1138 * Arguments:
1139 * - adapter - Adapter structure
1140 * - RssId - The event queue requiring processing
1142 * Return Value:
1143 * None.
1145 static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1147 PSXG_EVENT_RING EventRing = &adapter->EventRings[RssId];
1148 PSXG_EVENT Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1149 u32 EventsProcessed = 0, Batches = 0;
1150 u32 num_skbs = 0;
1151 struct sk_buff *skb;
1152 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1153 struct sk_buff *prev_skb = NULL;
1154 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1155 u32 Index;
1156 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
1157 #endif
1158 u32 ReturnStatus = 0;
1160 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1161 (adapter->State == SXG_STATE_PAUSING) ||
1162 (adapter->State == SXG_STATE_PAUSED) ||
1163 (adapter->State == SXG_STATE_HALTING));
1164 // We may still have unprocessed events on the queue if
1165 // the card crashed. Don't process them.
1166 if (adapter->Dead) {
1167 return (0);
1169 // In theory there should only be a single processor that
1170 // accesses this queue, and only at interrupt-DPC time. So
1171 // we shouldn't need a lock for any of this.
1172 while (Event->Status & EVENT_STATUS_VALID) {
1173 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1174 Event, Event->Code, Event->Status,
1175 adapter->NextEvent);
1176 switch (Event->Code) {
1177 case EVENT_CODE_BUFFERS:
1178 ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char
1180 sxg_complete_descriptor_blocks(adapter,
1181 Event->CommandIndex);
1183 break;
1184 case EVENT_CODE_SLOWRCV:
1185 --adapter->RcvBuffersOnCard;
1186 if ((skb = sxg_slow_receive(adapter, Event))) {
1187 u32 rx_bytes;
1188 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1189 // Add it to our indication list
1190 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1191 IndicationList, num_skbs);
1192 // In Linux, we just pass up each skb to the protocol above at this point,
1193 // there is no capability of an indication list.
1194 #else
1195 // CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE);
1196 rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK);
1197 skb_put(skb, rx_bytes);
1198 adapter->stats.rx_packets++;
1199 adapter->stats.rx_bytes += rx_bytes;
1200 #if SXG_OFFLOAD_IP_CHECKSUM
1201 skb->ip_summed = CHECKSUM_UNNECESSARY;
1202 #endif
1203 skb->dev = adapter->netdev;
1204 skb->protocol = eth_type_trans(skb, skb->dev);
1205 netif_rx(skb);
1206 #endif
1208 break;
1209 default:
1210 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1211 __FUNCTION__, Event->Code);
1212 // ASSERT(0);
1214 // See if we need to restock card receive buffers.
1215 // There are two things to note here:
1216 // First - This test is not SMP safe. The
1217 // adapter->BuffersOnCard field is protected via atomic interlocked calls, but
1218 // we do not protect it with respect to these tests. The only way to do that
1219 // is with a lock, and I don't want to grab a lock every time we adjust the
1220 // BuffersOnCard count. Instead, we allow the buffer replenishment to be off
1221 // once in a while. The worst that can happen is the card is given one
1222 // more-or-less descriptor block than the arbitrary value we've chosen.
1223 // No big deal
1224 // In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted.
1225 // Second - We expect this test to rarely evaluate to true. We attempt to
1226 // refill descriptor blocks as they are returned to us
1227 // (sxg_complete_descriptor_blocks), so The only time this should evaluate
1228 // to true is when sxg_complete_descriptor_blocks failed to allocate
1229 // receive buffers.
1230 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1231 sxg_stock_rcv_buffers(adapter);
1233 // It's more efficient to just set this to zero.
1234 // But clearing the top bit saves potential debug info...
1235 Event->Status &= ~EVENT_STATUS_VALID;
1236 // Advanct to the next event
1237 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1238 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1239 EventsProcessed++;
1240 if (EventsProcessed == EVENT_RING_BATCH) {
1241 // Release a batch of events back to the card
1242 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1243 EVENT_RING_BATCH, FALSE);
1244 EventsProcessed = 0;
1245 // If we've processed our batch limit, break out of the
1246 // loop and return SXG_ISR_EVENT to arrange for us to
1247 // be called again
1248 if (Batches++ == EVENT_BATCH_LIMIT) {
1249 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1250 TRACE_NOISY, "EvtLimit", Batches,
1251 adapter->NextEvent, 0, 0);
1252 ReturnStatus = SXG_ISR_EVENT;
1253 break;
1257 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1259 // Indicate any received dumb-nic frames
1261 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1262 #endif
1264 // Release events back to the card.
1266 if (EventsProcessed) {
1267 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1268 EventsProcessed, FALSE);
1270 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1271 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1273 return (ReturnStatus);
1277 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1279 * Arguments -
1280 * adapter - A pointer to our adapter structure
1282 * Return
1283 * None
1285 static void sxg_complete_slow_send(p_adapter_t adapter)
1287 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
1288 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
1289 u32 * ContextType;
1290 PSXG_CMD XmtCmd;
1292 // NOTE - This lock is dropped and regrabbed in this loop.
1293 // This means two different processors can both be running
1294 // through this loop. Be *very* careful.
1295 spin_lock(&adapter->XmtZeroLock);
1296 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1297 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1299 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
1300 // Locate the current Cmd (ring descriptor entry), and
1301 // associated SGL, and advance the tail
1302 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1303 ASSERT(ContextType);
1304 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1305 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1306 // Clear the SGL field.
1307 XmtCmd->Sgl = 0;
1309 switch (*ContextType) {
1310 case SXG_SGL_DUMB:
1312 struct sk_buff *skb;
1313 // Dumb-nic send. Command context is the dumb-nic SGL
1314 skb = (struct sk_buff *)ContextType;
1315 // Complete the send
1316 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1317 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1318 0, 0);
1319 ASSERT(adapter->Stats.XmtQLen);
1320 adapter->Stats.XmtQLen--; // within XmtZeroLock
1321 adapter->Stats.XmtOk++;
1322 // Now drop the lock and complete the send back to
1323 // Microsoft. We need to drop the lock because
1324 // Microsoft can come back with a chimney send, which
1325 // results in a double trip in SxgTcpOuput
1326 spin_unlock(&adapter->XmtZeroLock);
1327 SXG_COMPLETE_DUMB_SEND(adapter, skb);
1328 // and reacquire..
1329 spin_lock(&adapter->XmtZeroLock);
1331 break;
1332 default:
1333 ASSERT(0);
1336 spin_unlock(&adapter->XmtZeroLock);
1337 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1338 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1342 * sxg_slow_receive
1344 * Arguments -
1345 * adapter - A pointer to our adapter structure
1346 * Event - Receive event
1348 * Return
1349 * skb
1351 static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1353 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
1354 struct sk_buff *Packet;
1356 RcvDataBufferHdr = (PSXG_RCV_DATA_BUFFER_HDR) Event->HostHandle;
1357 ASSERT(RcvDataBufferHdr);
1358 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1359 ASSERT(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr) ==
1360 RcvDataBufferHdr->VirtualAddress);
1361 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1362 RcvDataBufferHdr, RcvDataBufferHdr->State,
1363 RcvDataBufferHdr->VirtualAddress);
1364 // Drop rcv frames in non-running state
1365 switch (adapter->State) {
1366 case SXG_STATE_RUNNING:
1367 break;
1368 case SXG_STATE_PAUSING:
1369 case SXG_STATE_PAUSED:
1370 case SXG_STATE_HALTING:
1371 goto drop;
1372 default:
1373 ASSERT(0);
1374 goto drop;
1377 // Change buffer state to UPSTREAM
1378 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1379 if (Event->Status & EVENT_STATUS_RCVERR) {
1380 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1381 Event, Event->Status, Event->HostHandle, 0);
1382 // XXXTODO - Remove this print later
1383 DBG_ERROR("SXG: Receive error %x\n",
1384 *(u32 *)
1385 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1386 sxg_process_rcv_error(adapter,
1387 *(u32 *)
1388 SXG_RECEIVE_DATA_LOCATION
1389 (RcvDataBufferHdr));
1390 goto drop;
1392 #if XXXTODO // VLAN stuff
1393 // If there's a VLAN tag, extract it and validate it
1394 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
1395 EtherType == ETHERTYPE_VLAN) {
1396 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1397 STATUS_SUCCESS) {
1398 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1399 "BadVlan", Event,
1400 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1401 Event->Length, 0);
1402 goto drop;
1405 #endif
1407 // Dumb-nic frame. See if it passes our mac filter and update stats
1409 if (!sxg_mac_filter(adapter,
1410 (p_ether_header)
1411 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1412 Event->Length)) {
1413 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1414 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1415 Event->Length, 0);
1416 goto drop;
1419 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1421 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1422 RcvDataBufferHdr, Packet, Event->Length, 0);
1424 // Lastly adjust the receive packet length.
1426 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1428 return (Packet);
1430 drop:
1431 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1432 RcvDataBufferHdr, Event->Length, 0, 0);
1433 adapter->Stats.RcvDiscards++;
1434 spin_lock(&adapter->RcvQLock);
1435 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1436 spin_unlock(&adapter->RcvQLock);
1437 return (NULL);
1441 * sxg_process_rcv_error - process receive error and update
1442 * stats
1444 * Arguments:
1445 * adapter - Adapter structure
1446 * ErrorStatus - 4-byte receive error status
1448 * Return Value:
1449 * None
1451 static void sxg_process_rcv_error(p_adapter_t adapter, u32 ErrorStatus)
1453 u32 Error;
1455 adapter->Stats.RcvErrors++;
1457 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1458 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1459 switch (Error) {
1460 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1461 adapter->Stats.TransportCsum++;
1462 break;
1463 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1464 adapter->Stats.TransportUflow++;
1465 break;
1466 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1467 adapter->Stats.TransportHdrLen++;
1468 break;
1471 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1472 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1473 switch (Error) {
1474 case SXG_RCV_STATUS_NETWORK_CSUM:
1475 adapter->Stats.NetworkCsum++;
1476 break;
1477 case SXG_RCV_STATUS_NETWORK_UFLOW:
1478 adapter->Stats.NetworkUflow++;
1479 break;
1480 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1481 adapter->Stats.NetworkHdrLen++;
1482 break;
1485 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1486 adapter->Stats.Parity++;
1488 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1489 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1490 switch (Error) {
1491 case SXG_RCV_STATUS_LINK_PARITY:
1492 adapter->Stats.LinkParity++;
1493 break;
1494 case SXG_RCV_STATUS_LINK_EARLY:
1495 adapter->Stats.LinkEarly++;
1496 break;
1497 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1498 adapter->Stats.LinkBufOflow++;
1499 break;
1500 case SXG_RCV_STATUS_LINK_CODE:
1501 adapter->Stats.LinkCode++;
1502 break;
1503 case SXG_RCV_STATUS_LINK_DRIBBLE:
1504 adapter->Stats.LinkDribble++;
1505 break;
1506 case SXG_RCV_STATUS_LINK_CRC:
1507 adapter->Stats.LinkCrc++;
1508 break;
1509 case SXG_RCV_STATUS_LINK_OFLOW:
1510 adapter->Stats.LinkOflow++;
1511 break;
1512 case SXG_RCV_STATUS_LINK_UFLOW:
1513 adapter->Stats.LinkUflow++;
1514 break;
1520 * sxg_mac_filter
1522 * Arguments:
1523 * adapter - Adapter structure
1524 * pether - Ethernet header
1525 * length - Frame length
1527 * Return Value:
1528 * TRUE if the frame is to be allowed
1530 static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr, ushort length)
1532 bool EqualAddr;
1534 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1535 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1536 // broadcast
1537 if (adapter->MacFilter & MAC_BCAST) {
1538 adapter->Stats.DumbRcvBcastPkts++;
1539 adapter->Stats.DumbRcvBcastBytes += length;
1540 adapter->Stats.DumbRcvPkts++;
1541 adapter->Stats.DumbRcvBytes += length;
1542 return (TRUE);
1544 } else {
1545 // multicast
1546 if (adapter->MacFilter & MAC_ALLMCAST) {
1547 adapter->Stats.DumbRcvMcastPkts++;
1548 adapter->Stats.DumbRcvMcastBytes += length;
1549 adapter->Stats.DumbRcvPkts++;
1550 adapter->Stats.DumbRcvBytes += length;
1551 return (TRUE);
1553 if (adapter->MacFilter & MAC_MCAST) {
1554 PSXG_MULTICAST_ADDRESS MulticastAddrs =
1555 adapter->MulticastAddrs;
1556 while (MulticastAddrs) {
1557 ETHER_EQ_ADDR(MulticastAddrs->Address,
1558 EtherHdr->ether_dhost,
1559 EqualAddr);
1560 if (EqualAddr) {
1561 adapter->Stats.
1562 DumbRcvMcastPkts++;
1563 adapter->Stats.
1564 DumbRcvMcastBytes += length;
1565 adapter->Stats.DumbRcvPkts++;
1566 adapter->Stats.DumbRcvBytes +=
1567 length;
1568 return (TRUE);
1570 MulticastAddrs = MulticastAddrs->Next;
1574 } else if (adapter->MacFilter & MAC_DIRECTED) {
1575 // Not broadcast or multicast. Must be directed at us or
1576 // the card is in promiscuous mode. Either way, consider it
1577 // ours if MAC_DIRECTED is set
1578 adapter->Stats.DumbRcvUcastPkts++;
1579 adapter->Stats.DumbRcvUcastBytes += length;
1580 adapter->Stats.DumbRcvPkts++;
1581 adapter->Stats.DumbRcvBytes += length;
1582 return (TRUE);
1584 if (adapter->MacFilter & MAC_PROMISC) {
1585 // Whatever it is, keep it.
1586 adapter->Stats.DumbRcvPkts++;
1587 adapter->Stats.DumbRcvBytes += length;
1588 return (TRUE);
1590 adapter->Stats.RcvDiscards++;
1591 return (FALSE);
1594 static int sxg_register_interrupt(p_adapter_t adapter)
1596 if (!adapter->intrregistered) {
1597 int retval;
1599 DBG_ERROR
1600 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1601 __FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS);
1603 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1605 retval = request_irq(adapter->netdev->irq,
1606 &sxg_isr,
1607 IRQF_SHARED,
1608 adapter->netdev->name, adapter->netdev);
1610 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1612 if (retval) {
1613 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1614 adapter->netdev->name, retval);
1615 return (retval);
1617 adapter->intrregistered = 1;
1618 adapter->IntRegistered = TRUE;
1619 // Disable RSS with line-based interrupts
1620 adapter->MsiEnabled = FALSE;
1621 adapter->RssEnabled = FALSE;
1622 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1623 __FUNCTION__, adapter, adapter->netdev->irq);
1625 return (STATUS_SUCCESS);
1628 static void sxg_deregister_interrupt(p_adapter_t adapter)
1630 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter);
1631 #if XXXTODO
1632 slic_init_cleanup(adapter);
1633 #endif
1634 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1635 adapter->error_interrupts = 0;
1636 adapter->rcv_interrupts = 0;
1637 adapter->xmit_interrupts = 0;
1638 adapter->linkevent_interrupts = 0;
1639 adapter->upr_interrupts = 0;
1640 adapter->num_isrs = 0;
1641 adapter->xmit_completes = 0;
1642 adapter->rcv_broadcasts = 0;
1643 adapter->rcv_multicasts = 0;
1644 adapter->rcv_unicasts = 0;
1645 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
1649 * sxg_if_init
1651 * Perform initialization of our slic interface.
1654 static int sxg_if_init(p_adapter_t adapter)
1656 p_net_device dev = adapter->netdev;
1657 int status = 0;
1659 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
1660 __FUNCTION__, adapter->netdev->name,
1661 adapter->queues_initialized, adapter->state,
1662 adapter->linkstate, dev->flags);
1664 /* adapter should be down at this point */
1665 if (adapter->state != ADAPT_DOWN) {
1666 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
1667 return (-EIO);
1669 ASSERT(adapter->linkstate == LINK_DOWN);
1671 adapter->devflags_prev = dev->flags;
1672 adapter->macopts = MAC_DIRECTED;
1673 if (dev->flags) {
1674 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__,
1675 adapter->netdev->name);
1676 if (dev->flags & IFF_BROADCAST) {
1677 adapter->macopts |= MAC_BCAST;
1678 DBG_ERROR("BCAST ");
1680 if (dev->flags & IFF_PROMISC) {
1681 adapter->macopts |= MAC_PROMISC;
1682 DBG_ERROR("PROMISC ");
1684 if (dev->flags & IFF_ALLMULTI) {
1685 adapter->macopts |= MAC_ALLMCAST;
1686 DBG_ERROR("ALL_MCAST ");
1688 if (dev->flags & IFF_MULTICAST) {
1689 adapter->macopts |= MAC_MCAST;
1690 DBG_ERROR("MCAST ");
1692 DBG_ERROR("\n");
1694 status = sxg_register_interrupt(adapter);
1695 if (status != STATUS_SUCCESS) {
1696 DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
1697 status);
1698 sxg_deregister_interrupt(adapter);
1699 return (status);
1702 adapter->state = ADAPT_UP;
1705 * clear any pending events, then enable interrupts
1707 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__);
1709 return (STATUS_SUCCESS);
1712 static int sxg_entry_open(p_net_device dev)
1714 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1715 int status;
1717 ASSERT(adapter);
1718 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__,
1719 adapter->activated);
1720 DBG_ERROR
1721 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
1722 __FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(),
1723 adapter->netdev, adapter, adapter->port);
1725 netif_stop_queue(adapter->netdev);
1727 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1728 if (!adapter->activated) {
1729 sxg_global.num_sxg_ports_active++;
1730 adapter->activated = 1;
1733 // Initialize the adapter
1734 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__);
1735 status = sxg_initialize_adapter(adapter);
1736 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
1737 __FUNCTION__, status);
1739 if (status == STATUS_SUCCESS) {
1740 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__);
1741 status = sxg_if_init(adapter);
1742 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__,
1743 status);
1746 if (status != STATUS_SUCCESS) {
1747 if (adapter->activated) {
1748 sxg_global.num_sxg_ports_active--;
1749 adapter->activated = 0;
1751 spin_unlock_irqrestore(&sxg_global.driver_lock,
1752 sxg_global.flags);
1753 return (status);
1755 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__);
1757 // Enable interrupts
1758 SXG_ENABLE_ALL_INTERRUPTS(adapter);
1760 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
1762 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1763 return STATUS_SUCCESS;
1766 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1768 p_net_device dev = pci_get_drvdata(pcidev);
1769 u32 mmio_start = 0;
1770 unsigned int mmio_len = 0;
1771 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1773 ASSERT(adapter);
1774 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev,
1775 adapter);
1776 sxg_deregister_interrupt(adapter);
1777 sxg_unmap_mmio_space(adapter);
1778 DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__);
1779 unregister_netdev(dev);
1781 mmio_start = pci_resource_start(pcidev, 0);
1782 mmio_len = pci_resource_len(pcidev, 0);
1784 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
1785 mmio_start, mmio_len);
1786 release_mem_region(mmio_start, mmio_len);
1788 DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__,
1789 (unsigned int) dev->base_addr);
1790 iounmap((char *)dev->base_addr);
1792 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1793 kfree(dev);
1794 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
1797 static int sxg_entry_halt(p_net_device dev)
1799 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1801 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1802 DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name);
1804 netif_stop_queue(adapter->netdev);
1805 adapter->state = ADAPT_DOWN;
1806 adapter->linkstate = LINK_DOWN;
1807 adapter->devflags_prev = 0;
1808 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
1809 __FUNCTION__, dev->name, adapter, adapter->state);
1811 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
1812 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
1813 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1814 return (STATUS_SUCCESS);
1817 static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1819 ASSERT(rq);
1820 // DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev);
1821 switch (cmd) {
1822 case SIOCSLICSETINTAGG:
1824 // p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1825 u32 data[7];
1826 u32 intagg;
1828 if (copy_from_user(data, rq->ifr_data, 28)) {
1829 DBG_ERROR
1830 ("copy_from_user FAILED getting initial params\n");
1831 return -EFAULT;
1833 intagg = data[0];
1834 printk(KERN_EMERG
1835 "%s: set interrupt aggregation to %d\n",
1836 __FUNCTION__, intagg);
1837 return 0;
1840 default:
1841 // DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd);
1842 return -EOPNOTSUPP;
1844 return 0;
1847 #define NORMAL_ETHFRAME 0
1851 * sxg_send_packets - Send a skb packet
1853 * Arguments:
1854 * skb - The packet to send
1855 * dev - Our linux net device that refs our adapter
1857 * Return:
1858 * 0 regardless of outcome XXXTODO refer to e1000 driver
1860 static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1862 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1863 u32 status = STATUS_SUCCESS;
1865 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
1866 skb);
1867 // Check the adapter state
1868 switch (adapter->State) {
1869 case SXG_STATE_INITIALIZING:
1870 case SXG_STATE_HALTED:
1871 case SXG_STATE_SHUTDOWN:
1872 ASSERT(0); // unexpected
1873 // fall through
1874 case SXG_STATE_RESETTING:
1875 case SXG_STATE_SLEEP:
1876 case SXG_STATE_BOOTDIAG:
1877 case SXG_STATE_DIAG:
1878 case SXG_STATE_HALTING:
1879 status = STATUS_FAILURE;
1880 break;
1881 case SXG_STATE_RUNNING:
1882 if (adapter->LinkState != SXG_LINK_UP) {
1883 status = STATUS_FAILURE;
1885 break;
1886 default:
1887 ASSERT(0);
1888 status = STATUS_FAILURE;
1890 if (status != STATUS_SUCCESS) {
1891 goto xmit_fail;
1893 // send a packet
1894 status = sxg_transmit_packet(adapter, skb);
1895 if (status == STATUS_SUCCESS) {
1896 goto xmit_done;
1899 xmit_fail:
1900 // reject & complete all the packets if they cant be sent
1901 if (status != STATUS_SUCCESS) {
1902 #if XXXTODO
1903 // sxg_send_packets_fail(adapter, skb, status);
1904 #else
1905 SXG_DROP_DUMB_SEND(adapter, skb);
1906 adapter->stats.tx_dropped++;
1907 #endif
1909 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__,
1910 status);
1912 xmit_done:
1913 return 0;
1917 * sxg_transmit_packet
1919 * This function transmits a single packet.
1921 * Arguments -
1922 * adapter - Pointer to our adapter structure
1923 * skb - The packet to be sent
1925 * Return -
1926 * STATUS of send
1928 static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1930 PSCATTER_GATHER_LIST pSgl;
1931 PSXG_SCATTER_GATHER SxgSgl;
1932 void * SglBuffer;
1933 u32 SglBufferLength;
1935 // The vast majority of work is done in the shared
1936 // sxg_dumb_sgl routine.
1937 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
1938 adapter, skb, 0, 0);
1940 // Allocate a SGL buffer
1941 SXG_GET_SGL_BUFFER(adapter, SxgSgl);
1942 if (!SxgSgl) {
1943 adapter->Stats.NoSglBuf++;
1944 adapter->Stats.XmtErrors++;
1945 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
1946 adapter, skb, 0, 0);
1947 return (STATUS_RESOURCES);
1949 ASSERT(SxgSgl->adapter == adapter);
1950 SglBuffer = SXG_SGL_BUFFER(SxgSgl);
1951 SglBufferLength = SXG_SGL_BUF_SIZE;
1952 SxgSgl->VlanTag.VlanTci = 0;
1953 SxgSgl->VlanTag.VlanTpid = 0;
1954 SxgSgl->Type = SXG_SGL_DUMB;
1955 SxgSgl->DumbPacket = skb;
1956 pSgl = NULL;
1958 // Call the common sxg_dumb_sgl routine to complete the send.
1959 sxg_dumb_sgl(pSgl, SxgSgl);
1960 // Return success sxg_dumb_sgl (or something later) will complete it.
1961 return (STATUS_SUCCESS);
1965 * sxg_dumb_sgl
1967 * Arguments:
1968 * pSgl -
1969 * SxgSgl - SXG_SCATTER_GATHER
1971 * Return Value:
1972 * None.
1974 static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
1976 p_adapter_t adapter = SxgSgl->adapter;
1977 struct sk_buff *skb = SxgSgl->DumbPacket;
1978 // For now, all dumb-nic sends go on RSS queue zero
1979 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
1980 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
1981 PSXG_CMD XmtCmd = NULL;
1982 // u32 Index = 0;
1983 u32 DataLength = skb->len;
1984 // unsigned int BufLen;
1985 // u32 SglOffset;
1986 u64 phys_addr;
1988 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
1989 pSgl, SxgSgl, 0, 0);
1991 // Set aside a pointer to the sgl
1992 SxgSgl->pSgl = pSgl;
1994 // Sanity check that our SGL format is as we expect.
1995 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
1996 // Shouldn't be a vlan tag on this frame
1997 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
1998 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2000 // From here below we work with the SGL placed in our
2001 // buffer.
2003 SxgSgl->Sgl.NumberOfElements = 1;
2005 // Grab the spinlock and acquire a command
2006 spin_lock(&adapter->XmtZeroLock);
2007 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2008 if (XmtCmd == NULL) {
2009 // Call sxg_complete_slow_send to see if we can
2010 // free up any XmtRingZero entries and then try again
2011 spin_unlock(&adapter->XmtZeroLock);
2012 sxg_complete_slow_send(adapter);
2013 spin_lock(&adapter->XmtZeroLock);
2014 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2015 if (XmtCmd == NULL) {
2016 adapter->Stats.XmtZeroFull++;
2017 goto abortcmd;
2020 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2021 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2022 // Update stats
2023 adapter->Stats.DumbXmtPkts++;
2024 adapter->Stats.DumbXmtBytes += DataLength;
2025 #if XXXTODO // Stats stuff
2026 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2027 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2028 adapter->Stats.DumbXmtBcastPkts++;
2029 adapter->Stats.DumbXmtBcastBytes += DataLength;
2030 } else {
2031 adapter->Stats.DumbXmtMcastPkts++;
2032 adapter->Stats.DumbXmtMcastBytes += DataLength;
2034 } else {
2035 adapter->Stats.DumbXmtUcastPkts++;
2036 adapter->Stats.DumbXmtUcastBytes += DataLength;
2038 #endif
2039 // Fill in the command
2040 // Copy out the first SGE to the command and adjust for offset
2041 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE);
2042 XmtCmd->Buffer.FirstSgeAddress = SXG_GET_ADDR_HIGH(phys_addr);
2043 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
2044 XmtCmd->Buffer.FirstSgeAddress =
2045 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
2046 // XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address;
2047 // XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset;
2048 XmtCmd->Buffer.FirstSgeLength = DataLength;
2049 // Set a pointer to the remaining SGL entries
2050 // XmtCmd->Sgl = SxgSgl->PhysicalAddress;
2051 // Advance the physical address of the SxgSgl structure to
2052 // the second SGE
2053 // SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) -
2054 // (u32 *)SxgSgl);
2055 // XmtCmd->Sgl.LowPart += SglOffset;
2056 XmtCmd->Buffer.SgeOffset = 0;
2057 // Note - TotalLength might be overwritten with MSS below..
2058 XmtCmd->Buffer.TotalLength = DataLength;
2059 XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index);
2060 XmtCmd->Flags = 0;
2062 // Advance transmit cmd descripter by 1.
2063 // NOTE - See comments in SxgTcpOutput where we write
2064 // to the XmtCmd register regarding CPU ID values and/or
2065 // multiple commands.
2068 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2071 adapter->Stats.XmtQLen++; // Stats within lock
2072 spin_unlock(&adapter->XmtZeroLock);
2073 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2074 XmtCmd, pSgl, SxgSgl, 0);
2075 return;
2077 abortcmd:
2078 // NOTE - Only jump to this label AFTER grabbing the
2079 // XmtZeroLock, and DO NOT DROP IT between the
2080 // command allocation and the following abort.
2081 if (XmtCmd) {
2082 SXG_ABORT_CMD(XmtRingInfo);
2084 spin_unlock(&adapter->XmtZeroLock);
2086 // failsgl:
2087 // Jump to this label if failure occurs before the
2088 // XmtZeroLock is grabbed
2089 adapter->Stats.XmtErrors++;
2090 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2091 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2093 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb
2096 /***************************************************************
2097 * Link management functions
2098 ***************************************************************/
2101 * sxg_initialize_link - Initialize the link stuff
2103 * Arguments -
2104 * adapter - A pointer to our adapter structure
2106 * Return
2107 * status
2109 static int sxg_initialize_link(p_adapter_t adapter)
2111 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2112 u32 Value;
2113 u32 ConfigData;
2114 u32 MaxFrame;
2115 int status;
2117 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2118 adapter, 0, 0, 0);
2120 // Reset PHY and XGXS module
2121 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2123 // Reset transmit configuration register
2124 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2126 // Reset receive configuration register
2127 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2129 // Reset all MAC modules
2130 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2132 // Link address 0
2133 // XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2134 // is stored with the first nibble (0a) in the byte 0
2135 // of the Mac address. Possibly reverse?
2136 Value = *(u32 *) adapter->MacAddr;
2137 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2138 // also write the MAC address to the MAC. Endian is reversed.
2139 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2140 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
2141 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2142 // endian swap for the MAC (put high bytes in bits [31:16], swapped)
2143 Value = ntohl(Value);
2144 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2145 // Link address 1
2146 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2147 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2148 // Link address 2
2149 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2150 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2151 // Link address 3
2152 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2153 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2155 // Enable MAC modules
2156 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2158 // Configure MAC
2159 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause
2160 AXGMAC_CFG1_XMT_EN | // Enable XMT
2161 AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause
2162 AXGMAC_CFG1_RCV_EN | // Enable receive
2163 AXGMAC_CFG1_SHORT_ASSERT | // short frame detection
2164 AXGMAC_CFG1_CHECK_LEN | // Verify frame length
2165 AXGMAC_CFG1_GEN_FCS | // Generate FCS
2166 AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes
2167 TRUE);
2169 // Set AXGMAC max frame length if jumbo. Not needed for standard MTU
2170 if (adapter->JumboEnabled) {
2171 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2173 // AMIIM Configuration Register -
2174 // The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2175 // (bottom bits) of this register is used to determine the
2176 // MDC frequency as specified in the A-XGMAC Design Document.
2177 // This value must not be zero. The following value (62 or 0x3E)
2178 // is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz.
2179 // Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec),
2180 // we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2181 // This value happens to be the default value for this register,
2182 // so we really don't have to do this.
2183 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2185 // Power up and enable PHY and XAUI/XGXS/Serdes logic
2186 WRITE_REG(HwRegs->LinkStatus,
2187 (LS_PHY_CLR_RESET |
2188 LS_XGXS_ENABLE |
2189 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2190 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2192 // Per information given by Aeluros, wait 100 ms after removing reset.
2193 // It's not enough to wait for the self-clearing reset bit in reg 0 to clear.
2194 mdelay(100);
2196 // Verify the PHY has come up by checking that the Reset bit has cleared.
2197 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2198 PHY_PMA_CONTROL1, // PMA/PMD control register
2199 &Value);
2200 if (status != STATUS_SUCCESS)
2201 return (STATUS_FAILURE);
2202 if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0
2203 return (STATUS_FAILURE);
2205 // The SERDES should be initialized by now - confirm
2206 READ_REG(HwRegs->LinkStatus, Value);
2207 if (Value & LS_SERDES_DOWN) // verify SERDES is initialized
2208 return (STATUS_FAILURE);
2210 // The XAUI link should also be up - confirm
2211 if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up
2212 return (STATUS_FAILURE);
2214 // Initialize the PHY
2215 status = sxg_phy_init(adapter);
2216 if (status != STATUS_SUCCESS)
2217 return (STATUS_FAILURE);
2219 // Enable the Link Alarm
2220 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2221 LASI_CONTROL, // LASI control register
2222 LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit
2223 if (status != STATUS_SUCCESS)
2224 return (STATUS_FAILURE);
2226 // XXXTODO - temporary - verify bit is set
2227 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2228 LASI_CONTROL, // LASI control register
2229 &Value);
2230 if (status != STATUS_SUCCESS)
2231 return (STATUS_FAILURE);
2232 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2233 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2235 // Enable receive
2236 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2237 ConfigData = (RCV_CONFIG_ENABLE |
2238 RCV_CONFIG_ENPARSE |
2239 RCV_CONFIG_RCVBAD |
2240 RCV_CONFIG_RCVPAUSE |
2241 RCV_CONFIG_TZIPV6 |
2242 RCV_CONFIG_TZIPV4 |
2243 RCV_CONFIG_HASH_16 |
2244 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2245 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2247 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2249 // Mark the link as down. We'll get a link event when it comes up.
2250 sxg_link_state(adapter, SXG_LINK_DOWN);
2252 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2253 adapter, 0, 0, 0);
2254 return (STATUS_SUCCESS);
2258 * sxg_phy_init - Initialize the PHY
2260 * Arguments -
2261 * adapter - A pointer to our adapter structure
2263 * Return
2264 * status
2266 static int sxg_phy_init(p_adapter_t adapter)
2268 u32 Value;
2269 PPHY_UCODE p;
2270 int status;
2272 DBG_ERROR("ENTER %s\n", __FUNCTION__);
2274 // Read a register to identify the PHY type
2275 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2276 0xC205, // PHY ID register (?)
2277 &Value); // XXXTODO - add def
2278 if (status != STATUS_SUCCESS)
2279 return (STATUS_FAILURE);
2281 if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def
2282 DBG_ERROR
2283 ("AEL2005C PHY detected. Downloading PHY microcode.\n");
2285 // Initialize AEL2005C PHY and download PHY microcode
2286 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2287 if (p->Addr == 0) {
2288 // if address == 0, data == sleep time in ms
2289 mdelay(p->Data);
2290 } else {
2291 // write the given data to the specified address
2292 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2293 p->Addr, // PHY address
2294 p->Data); // PHY data
2295 if (status != STATUS_SUCCESS)
2296 return (STATUS_FAILURE);
2300 DBG_ERROR("EXIT %s\n", __FUNCTION__);
2302 return (STATUS_SUCCESS);
2306 * sxg_link_event - Process a link event notification from the card
2308 * Arguments -
2309 * adapter - A pointer to our adapter structure
2311 * Return
2312 * None
2314 static void sxg_link_event(p_adapter_t adapter)
2316 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2317 SXG_LINK_STATE LinkState;
2318 int status;
2319 u32 Value;
2321 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2322 adapter, 0, 0, 0);
2323 DBG_ERROR("ENTER %s\n", __FUNCTION__);
2325 // Check the Link Status register. We should have a Link Alarm.
2326 READ_REG(HwRegs->LinkStatus, Value);
2327 if (Value & LS_LINK_ALARM) {
2328 // We got a Link Status alarm. First, pause to let the
2329 // link state settle (it can bounce a number of times)
2330 mdelay(10);
2332 // Now clear the alarm by reading the LASI status register.
2333 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2334 LASI_STATUS, // LASI status register
2335 &Value);
2336 if (status != STATUS_SUCCESS) {
2337 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2338 sxg_link_state(adapter, SXG_LINK_DOWN);
2339 // ASSERT(0);
2341 ASSERT(Value & LASI_STATUS_LS_ALARM);
2343 // Now get and set the link state
2344 LinkState = sxg_get_link_state(adapter);
2345 sxg_link_state(adapter, LinkState);
2346 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2347 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2348 } else {
2349 // XXXTODO - Assuming Link Attention is only being generated for the
2350 // Link Alarm pin (and not for a XAUI Link Status change), then it's
2351 // impossible to get here. Yet we've gotten here twice (under extreme
2352 // conditions - bouncing the link up and down many times a second).
2353 // Needs further investigation.
2354 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2355 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2356 // ASSERT(0);
2358 DBG_ERROR("EXIT %s\n", __FUNCTION__);
2363 * sxg_get_link_state - Determine if the link is up or down
2365 * Arguments -
2366 * adapter - A pointer to our adapter structure
2368 * Return
2369 * Link State
2371 static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
2373 int status;
2374 u32 Value;
2376 DBG_ERROR("ENTER %s\n", __FUNCTION__);
2378 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2379 adapter, 0, 0, 0);
2381 // Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
2382 // the following 3 bits (from 3 different MDIO registers) are all true.
2383 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
2384 PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register
2385 &Value);
2386 if (status != STATUS_SUCCESS)
2387 goto bad;
2389 // If PMA/PMD receive signal detect is 0, then the link is down
2390 if (!(Value & PMA_RCV_DETECT))
2391 return (SXG_LINK_DOWN);
2393 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module
2394 PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register
2395 &Value);
2396 if (status != STATUS_SUCCESS)
2397 goto bad;
2399 // If PCS is not locked to receive blocks, then the link is down
2400 if (!(Value & PCS_10B_BLOCK_LOCK))
2401 return (SXG_LINK_DOWN);
2403 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module
2404 PHY_XS_LANE_STATUS, // XS Lane Status register
2405 &Value);
2406 if (status != STATUS_SUCCESS)
2407 goto bad;
2409 // If XS transmit lanes are not aligned, then the link is down
2410 if (!(Value & XS_LANE_ALIGN))
2411 return (SXG_LINK_DOWN);
2413 // All 3 bits are true, so the link is up
2414 DBG_ERROR("EXIT %s\n", __FUNCTION__);
2416 return (SXG_LINK_UP);
2418 bad:
2419 // An error occurred reading an MDIO register. This shouldn't happen.
2420 DBG_ERROR("Error reading an MDIO register!\n");
2421 ASSERT(0);
2422 return (SXG_LINK_DOWN);
2425 static void sxg_indicate_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
2427 if (adapter->LinkState == SXG_LINK_UP) {
2428 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
2429 __FUNCTION__);
2430 netif_start_queue(adapter->netdev);
2431 } else {
2432 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
2433 __FUNCTION__);
2434 netif_stop_queue(adapter->netdev);
2439 * sxg_link_state - Set the link state and if necessary, indicate.
2440 * This routine the central point of processing for all link state changes.
2441 * Nothing else in the driver should alter the link state or perform
2442 * link state indications
2444 * Arguments -
2445 * adapter - A pointer to our adapter structure
2446 * LinkState - The link state
2448 * Return
2449 * None
2451 static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
2453 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2454 adapter, LinkState, adapter->LinkState, adapter->State);
2456 DBG_ERROR("ENTER %s\n", __FUNCTION__);
2458 // Hold the adapter lock during this routine. Maybe move
2459 // the lock to the caller.
2460 spin_lock(&adapter->AdapterLock);
2461 if (LinkState == adapter->LinkState) {
2462 // Nothing changed..
2463 spin_unlock(&adapter->AdapterLock);
2464 DBG_ERROR("EXIT #0 %s\n", __FUNCTION__);
2465 return;
2467 // Save the adapter state
2468 adapter->LinkState = LinkState;
2470 // Drop the lock and indicate link state
2471 spin_unlock(&adapter->AdapterLock);
2472 DBG_ERROR("EXIT #1 %s\n", __FUNCTION__);
2474 sxg_indicate_link_state(adapter, LinkState);
2478 * sxg_write_mdio_reg - Write to a register on the MDIO bus
2480 * Arguments -
2481 * adapter - A pointer to our adapter structure
2482 * DevAddr - MDIO device number being addressed
2483 * RegAddr - register address for the specified MDIO device
2484 * Value - value to write to the MDIO register
2486 * Return
2487 * status
2489 static int sxg_write_mdio_reg(p_adapter_t adapter,
2490 u32 DevAddr, u32 RegAddr, u32 Value)
2492 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2493 u32 AddrOp; // Address operation (written to MIIM field reg)
2494 u32 WriteOp; // Write operation (written to MIIM field reg)
2495 u32 Cmd; // Command (written to MIIM command reg)
2496 u32 ValueRead;
2497 u32 Timeout;
2499 // DBG_ERROR("ENTER %s\n", __FUNCTION__);
2501 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2502 adapter, 0, 0, 0);
2504 // Ensure values don't exceed field width
2505 DevAddr &= 0x001F; // 5-bit field
2506 RegAddr &= 0xFFFF; // 16-bit field
2507 Value &= 0xFFFF; // 16-bit field
2509 // Set MIIM field register bits for an MIIM address operation
2510 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2511 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2512 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2513 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2515 // Set MIIM field register bits for an MIIM write operation
2516 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2517 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2518 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2519 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2521 // Set MIIM command register bits to execute an MIIM command
2522 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2524 // Reset the command register command bit (in case it's not 0)
2525 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2527 // MIIM write to set the address of the specified MDIO register
2528 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2530 // Write to MIIM Command Register to execute to address operation
2531 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2533 // Poll AMIIM Indicator register to wait for completion
2534 Timeout = SXG_LINK_TIMEOUT;
2535 do {
2536 udelay(100); // Timeout in 100us units
2537 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2538 if (--Timeout == 0) {
2539 return (STATUS_FAILURE);
2541 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2543 // Reset the command register command bit
2544 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2546 // MIIM write to set up an MDIO write operation
2547 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2549 // Write to MIIM Command Register to execute the write operation
2550 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2552 // Poll AMIIM Indicator register to wait for completion
2553 Timeout = SXG_LINK_TIMEOUT;
2554 do {
2555 udelay(100); // Timeout in 100us units
2556 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2557 if (--Timeout == 0) {
2558 return (STATUS_FAILURE);
2560 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2562 // DBG_ERROR("EXIT %s\n", __FUNCTION__);
2564 return (STATUS_SUCCESS);
2568 * sxg_read_mdio_reg - Read a register on the MDIO bus
2570 * Arguments -
2571 * adapter - A pointer to our adapter structure
2572 * DevAddr - MDIO device number being addressed
2573 * RegAddr - register address for the specified MDIO device
2574 * pValue - pointer to where to put data read from the MDIO register
2576 * Return
2577 * status
2579 static int sxg_read_mdio_reg(p_adapter_t adapter,
2580 u32 DevAddr, u32 RegAddr, u32 * pValue)
2582 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2583 u32 AddrOp; // Address operation (written to MIIM field reg)
2584 u32 ReadOp; // Read operation (written to MIIM field reg)
2585 u32 Cmd; // Command (written to MIIM command reg)
2586 u32 ValueRead;
2587 u32 Timeout;
2589 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2590 adapter, 0, 0, 0);
2591 // DBG_ERROR("ENTER %s\n", __FUNCTION__);
2593 // Ensure values don't exceed field width
2594 DevAddr &= 0x001F; // 5-bit field
2595 RegAddr &= 0xFFFF; // 16-bit field
2597 // Set MIIM field register bits for an MIIM address operation
2598 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2599 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2600 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2601 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2603 // Set MIIM field register bits for an MIIM read operation
2604 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2605 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2606 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2607 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
2609 // Set MIIM command register bits to execute an MIIM command
2610 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2612 // Reset the command register command bit (in case it's not 0)
2613 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2615 // MIIM write to set the address of the specified MDIO register
2616 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2618 // Write to MIIM Command Register to execute to address operation
2619 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2621 // Poll AMIIM Indicator register to wait for completion
2622 Timeout = SXG_LINK_TIMEOUT;
2623 do {
2624 udelay(100); // Timeout in 100us units
2625 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2626 if (--Timeout == 0) {
2627 return (STATUS_FAILURE);
2629 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2631 // Reset the command register command bit
2632 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2634 // MIIM write to set up an MDIO register read operation
2635 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
2637 // Write to MIIM Command Register to execute the read operation
2638 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2640 // Poll AMIIM Indicator register to wait for completion
2641 Timeout = SXG_LINK_TIMEOUT;
2642 do {
2643 udelay(100); // Timeout in 100us units
2644 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2645 if (--Timeout == 0) {
2646 return (STATUS_FAILURE);
2648 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2650 // Read the MDIO register data back from the field register
2651 READ_REG(HwRegs->MacAmiimField, *pValue);
2652 *pValue &= 0xFFFF; // data is in the lower 16 bits
2654 // DBG_ERROR("EXIT %s\n", __FUNCTION__);
2656 return (STATUS_SUCCESS);
2660 * Allocate a mcast_address structure to hold the multicast address.
2661 * Link it in.
2663 static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2665 p_mcast_address_t mcaddr, mlist;
2666 bool equaladdr;
2668 /* Check to see if it already exists */
2669 mlist = adapter->mcastaddrs;
2670 while (mlist) {
2671 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
2672 if (equaladdr) {
2673 return (STATUS_SUCCESS);
2675 mlist = mlist->next;
2678 /* Doesn't already exist. Allocate a structure to hold it */
2679 mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
2680 if (mcaddr == NULL)
2681 return 1;
2683 memcpy(mcaddr->address, address, 6);
2685 mcaddr->next = adapter->mcastaddrs;
2686 adapter->mcastaddrs = mcaddr;
2688 return (STATUS_SUCCESS);
2692 * Functions to obtain the CRC corresponding to the destination mac address.
2693 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
2694 * the polynomial:
2695 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1.
2697 * After the CRC for the 6 bytes is generated (but before the value is complemented),
2698 * we must then transpose the value and return bits 30-23.
2701 static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
2702 static u32 sxg_crc_init; /* Is table initialized */
2705 * Contruct the CRC32 table
2707 static void sxg_mcast_init_crc32(void)
2709 u32 c; /* CRC shit reg */
2710 u32 e = 0; /* Poly X-or pattern */
2711 int i; /* counter */
2712 int k; /* byte being shifted into crc */
2714 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
2716 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
2717 e |= 1L << (31 - p[i]);
2720 for (i = 1; i < 256; i++) {
2721 c = i;
2722 for (k = 8; k; k--) {
2723 c = c & 1 ? (c >> 1) ^ e : c >> 1;
2725 sxg_crc_table[i] = c;
2730 * Return the MAC hast as described above.
2732 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
2734 u32 crc;
2735 char *p;
2736 int i;
2737 unsigned char machash = 0;
2739 if (!sxg_crc_init) {
2740 sxg_mcast_init_crc32();
2741 sxg_crc_init = 1;
2744 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
2745 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
2746 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
2749 /* Return bits 1-8, transposed */
2750 for (i = 1; i < 9; i++) {
2751 machash |= (((crc >> i) & 1) << (8 - i));
2754 return (machash);
2757 static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
2759 unsigned char crcpoly;
2761 /* Get the CRC polynomial for the mac address */
2762 crcpoly = sxg_mcast_get_mac_hash(address);
2764 /* We only have space on the SLIC for 64 entries. Lop
2765 * off the top two bits. (2^6 = 64)
2767 crcpoly &= 0x3F;
2769 /* OR in the new bit into our 64 bit mask. */
2770 adapter->MulticastMask |= (u64) 1 << crcpoly;
2773 static void sxg_mcast_set_list(p_net_device dev)
2775 #if XXXTODO
2776 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
2777 int status = STATUS_SUCCESS;
2778 int i;
2779 char *addresses;
2780 struct dev_mc_list *mc_list = dev->mc_list;
2781 int mc_count = dev->mc_count;
2783 ASSERT(adapter);
2785 for (i = 1; i <= mc_count; i++) {
2786 addresses = (char *) & mc_list->dmi_addr;
2787 if (mc_list->dmi_addrlen == 6) {
2788 status = sxg_mcast_add_list(adapter, addresses);
2789 if (status != STATUS_SUCCESS) {
2790 break;
2792 } else {
2793 status = -EINVAL;
2794 break;
2796 sxg_mcast_set_bit(adapter, addresses);
2797 mc_list = mc_list->next;
2800 DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
2801 __FUNCTION__, adapter->devflags_prev, dev->flags, status);
2802 if (adapter->devflags_prev != dev->flags) {
2803 adapter->macopts = MAC_DIRECTED;
2804 if (dev->flags) {
2805 if (dev->flags & IFF_BROADCAST) {
2806 adapter->macopts |= MAC_BCAST;
2808 if (dev->flags & IFF_PROMISC) {
2809 adapter->macopts |= MAC_PROMISC;
2811 if (dev->flags & IFF_ALLMULTI) {
2812 adapter->macopts |= MAC_ALLMCAST;
2814 if (dev->flags & IFF_MULTICAST) {
2815 adapter->macopts |= MAC_MCAST;
2818 adapter->devflags_prev = dev->flags;
2819 DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
2820 __FUNCTION__, adapter->macopts);
2821 sxg_config_set(adapter, TRUE);
2822 } else {
2823 if (status == STATUS_SUCCESS) {
2824 sxg_mcast_set_mask(adapter);
2827 #endif
2828 return;
2831 static void sxg_mcast_set_mask(p_adapter_t adapter)
2833 PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
2835 DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
2836 adapter->netdev->name, (unsigned int) adapter->MacFilter,
2837 adapter->MulticastMask);
2839 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
2840 /* Turn on all multicast addresses. We have to do this for promiscuous
2841 * mode as well as ALLMCAST mode. It saves the Microcode from having
2842 * to keep state about the MAC configuration.
2844 // DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__FUNCTION__);
2845 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
2846 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
2847 // DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
2849 } else {
2850 /* Commit our multicast mast to the SLIC by writing to the multicast
2851 * address mask registers
2853 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
2854 __FUNCTION__, adapter->netdev->name,
2855 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
2856 ((ulong)
2857 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
2859 WRITE_REG(sxg_regs->McastLow,
2860 (u32) (adapter->MulticastMask & 0xFFFFFFFF),
2861 FLUSH);
2862 WRITE_REG(sxg_regs->McastHigh,
2863 (u32) ((adapter->
2864 MulticastMask >> 32) & 0xFFFFFFFF),
2865 FLUSH);
2869 static void sxg_unmap_mmio_space(p_adapter_t adapter)
2871 #if LINUX_FREES_ADAPTER_RESOURCES
2872 // if (adapter->Regs) {
2873 // iounmap(adapter->Regs);
2874 // }
2875 // adapter->slic_regs = NULL;
2876 #endif
2879 #if XXXTODO
2881 * SxgFreeResources - Free everything allocated in SxgAllocateResources
2883 * Arguments -
2884 * adapter - A pointer to our adapter structure
2886 * Return
2887 * none
2889 void SxgFreeResources(p_adapter_t adapter)
2891 u32 RssIds, IsrCount;
2892 PTCP_OBJECT TcpObject;
2893 u32 i;
2894 BOOLEAN TimerCancelled;
2896 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FreeRes",
2897 adapter, adapter->MaxTcbs, 0, 0);
2899 RssIds = SXG_RSS_CPU_COUNT(adapter);
2900 IsrCount = adapter->MsiEnabled ? RssIds : 1;
2902 if (adapter->BasicAllocations == FALSE) {
2903 // No allocations have been made, including spinlocks,
2904 // or listhead initializations. Return.
2905 return;
2908 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2909 SxgFreeRcvBlocks(adapter);
2911 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
2912 SxgFreeSglBuffers(adapter);
2914 // Free event queues.
2915 if (adapter->EventRings) {
2916 pci_free_consistent(adapter->pcidev,
2917 sizeof(SXG_EVENT_RING) * RssIds,
2918 adapter->EventRings, adapter->PEventRings);
2920 if (adapter->Isr) {
2921 pci_free_consistent(adapter->pcidev,
2922 sizeof(u32) * IsrCount,
2923 adapter->Isr, adapter->PIsr);
2925 if (adapter->XmtRingZeroIndex) {
2926 pci_free_consistent(adapter->pcidev,
2927 sizeof(u32),
2928 adapter->XmtRingZeroIndex,
2929 adapter->PXmtRingZeroIndex);
2931 if (adapter->IndirectionTable) {
2932 pci_free_consistent(adapter->pcidev,
2933 SXG_MAX_RSS_TABLE_SIZE,
2934 adapter->IndirectionTable,
2935 adapter->PIndirectionTable);
2938 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
2939 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
2941 // Unmap register spaces
2942 SxgUnmapResources(adapter);
2944 // Deregister DMA
2945 if (adapter->DmaHandle) {
2946 SXG_DEREGISTER_DMA(adapter->DmaHandle);
2948 // Deregister interrupt
2949 SxgDeregisterInterrupt(adapter);
2951 // Possibly free system info (5.2 only)
2952 SXG_RELEASE_SYSTEM_INFO(adapter);
2954 SxgDiagFreeResources(adapter);
2956 SxgFreeMCastAddrs(adapter);
2958 if (SXG_TIMER_ALLOCATED(adapter->ResetTimer)) {
2959 SXG_CANCEL_TIMER(adapter->ResetTimer, TimerCancelled);
2960 SXG_FREE_TIMER(adapter->ResetTimer);
2962 if (SXG_TIMER_ALLOCATED(adapter->RssTimer)) {
2963 SXG_CANCEL_TIMER(adapter->RssTimer, TimerCancelled);
2964 SXG_FREE_TIMER(adapter->RssTimer);
2966 if (SXG_TIMER_ALLOCATED(adapter->OffloadTimer)) {
2967 SXG_CANCEL_TIMER(adapter->OffloadTimer, TimerCancelled);
2968 SXG_FREE_TIMER(adapter->OffloadTimer);
2971 adapter->BasicAllocations = FALSE;
2973 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFreeRes",
2974 adapter, adapter->MaxTcbs, 0, 0);
2976 #endif
2979 * sxg_allocate_complete -
2981 * This routine is called when a memory allocation has completed.
2983 * Arguments -
2984 * p_adapter_t - Our adapter structure
2985 * VirtualAddress - Memory virtual address
2986 * PhysicalAddress - Memory physical address
2987 * Length - Length of memory allocated (or 0)
2988 * Context - The type of buffer allocated
2990 * Return
2991 * None.
2993 static void sxg_allocate_complete(p_adapter_t adapter,
2994 void *VirtualAddress,
2995 dma_addr_t PhysicalAddress,
2996 u32 Length, SXG_BUFFER_TYPE Context)
2998 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
2999 adapter, VirtualAddress, Length, Context);
3000 ASSERT(adapter->AllocationsPending);
3001 --adapter->AllocationsPending;
3003 switch (Context) {
3005 case SXG_BUFFER_TYPE_RCV:
3006 sxg_allocate_rcvblock_complete(adapter,
3007 VirtualAddress,
3008 PhysicalAddress, Length);
3009 break;
3010 case SXG_BUFFER_TYPE_SGL:
3011 sxg_allocate_sgl_buffer_complete(adapter,
3012 (PSXG_SCATTER_GATHER)
3013 VirtualAddress,
3014 PhysicalAddress, Length);
3015 break;
3017 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3018 adapter, VirtualAddress, Length, Context);
3022 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3023 * synchronous and asynchronous buffer allocations
3025 * Arguments -
3026 * adapter - A pointer to our adapter structure
3027 * Size - block size to allocate
3028 * BufferType - Type of buffer to allocate
3030 * Return
3031 * int
3033 static int sxg_allocate_buffer_memory(p_adapter_t adapter,
3034 u32 Size, SXG_BUFFER_TYPE BufferType)
3036 int status;
3037 void * Buffer;
3038 dma_addr_t pBuffer;
3040 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3041 adapter, Size, BufferType, 0);
3042 // Grab the adapter lock and check the state.
3043 // If we're in anything other than INITIALIZING or
3044 // RUNNING state, fail. This is to prevent
3045 // allocations in an improper driver state
3046 spin_lock(&adapter->AdapterLock);
3048 // Increment the AllocationsPending count while holding
3049 // the lock. Pause processing relies on this
3050 ++adapter->AllocationsPending;
3051 spin_unlock(&adapter->AdapterLock);
3053 // At initialization time allocate resources synchronously.
3054 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3055 if (Buffer == NULL) {
3056 spin_lock(&adapter->AdapterLock);
3057 // Decrement the AllocationsPending count while holding
3058 // the lock. Pause processing relies on this
3059 --adapter->AllocationsPending;
3060 spin_unlock(&adapter->AdapterLock);
3061 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3062 adapter, Size, BufferType, 0);
3063 return (STATUS_RESOURCES);
3065 sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3066 status = STATUS_SUCCESS;
3068 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3069 adapter, Size, BufferType, status);
3070 return (status);
3074 * sxg_allocate_rcvblock_complete - Complete a receive descriptor block allocation
3076 * Arguments -
3077 * adapter - A pointer to our adapter structure
3078 * RcvBlock - receive block virtual address
3079 * PhysicalAddress - Physical address
3080 * Length - Memory length
3082 * Return
3085 static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3086 void * RcvBlock,
3087 dma_addr_t PhysicalAddress, u32 Length)
3089 u32 i;
3090 u32 BufferSize = adapter->ReceiveBufferSize;
3091 u64 Paddr;
3092 PSXG_RCV_BLOCK_HDR RcvBlockHdr;
3093 unsigned char *RcvDataBuffer;
3094 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
3095 PSXG_RCV_DESCRIPTOR_BLOCK RcvDescriptorBlock;
3096 PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr;
3098 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3099 adapter, RcvBlock, Length, 0);
3100 if (RcvBlock == NULL) {
3101 goto fail;
3103 memset(RcvBlock, 0, Length);
3104 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3105 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3106 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
3107 // First, initialize the contained pool of receive data
3108 // buffers. This initialization requires NBL/NB/MDL allocations,
3109 // If any of them fail, free the block and return without
3110 // queueing the shared memory
3111 RcvDataBuffer = RcvBlock;
3112 #if 0
3113 for (i = 0, Paddr = *PhysicalAddress;
3114 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3115 i++, Paddr.LowPart += BufferSize, RcvDataBuffer += BufferSize)
3116 #endif
3117 for (i = 0, Paddr = PhysicalAddress;
3118 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3119 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
3121 RcvDataBufferHdr =
3122 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
3123 SXG_RCV_DATA_BUFFER_HDR_OFFSET
3124 (BufferSize));
3125 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
3126 RcvDataBufferHdr->PhysicalAddress = Paddr;
3127 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion
3128 RcvDataBufferHdr->Size =
3129 SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
3131 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr);
3132 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3133 goto fail;
3137 // Place this entire block of memory on the AllRcvBlocks queue so it can be
3138 // free later
3139 RcvBlockHdr =
3140 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
3141 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
3142 RcvBlockHdr->VirtualAddress = RcvBlock;
3143 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3144 spin_lock(&adapter->RcvQLock);
3145 adapter->AllRcvBlockCount++;
3146 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3147 spin_unlock(&adapter->RcvQLock);
3149 // Now free the contained receive data buffers that we initialized above
3150 RcvDataBuffer = RcvBlock;
3151 for (i = 0, Paddr = PhysicalAddress;
3152 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3153 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
3154 RcvDataBufferHdr = (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
3155 SXG_RCV_DATA_BUFFER_HDR_OFFSET
3156 (BufferSize));
3157 spin_lock(&adapter->RcvQLock);
3158 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3159 spin_unlock(&adapter->RcvQLock);
3162 // Locate the descriptor block and put it on a separate free queue
3163 RcvDescriptorBlock = (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
3164 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3165 (BufferSize));
3166 RcvDescriptorBlockHdr =
3167 (PSXG_RCV_DESCRIPTOR_BLOCK_HDR) ((unsigned char *)RcvBlock +
3168 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3169 (BufferSize));
3170 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3171 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3172 spin_lock(&adapter->RcvQLock);
3173 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3174 spin_unlock(&adapter->RcvQLock);
3175 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3176 adapter, RcvBlock, Length, 0);
3177 return;
3178 fail:
3179 // Free any allocated resources
3180 if (RcvBlock) {
3181 RcvDataBuffer = RcvBlock;
3182 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3183 i++, RcvDataBuffer += BufferSize) {
3184 RcvDataBufferHdr =
3185 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
3186 SXG_RCV_DATA_BUFFER_HDR_OFFSET
3187 (BufferSize));
3188 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3190 pci_free_consistent(adapter->pcidev,
3191 Length, RcvBlock, PhysicalAddress);
3193 DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__);
3194 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3195 adapter, adapter->FreeRcvBufferCount,
3196 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3197 adapter->Stats.NoMem++;
3201 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3203 * Arguments -
3204 * adapter - A pointer to our adapter structure
3205 * SxgSgl - SXG_SCATTER_GATHER buffer
3206 * PhysicalAddress - Physical address
3207 * Length - Memory length
3209 * Return
3212 static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
3213 PSXG_SCATTER_GATHER SxgSgl,
3214 dma_addr_t PhysicalAddress, u32 Length)
3216 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3217 adapter, SxgSgl, Length, 0);
3218 spin_lock(&adapter->SglQLock);
3219 adapter->AllSglBufferCount++;
3220 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
3221 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
3222 SxgSgl->adapter = adapter; // Initialize backpointer once
3223 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3224 spin_unlock(&adapter->SglQLock);
3225 SxgSgl->State = SXG_BUFFER_BUSY;
3226 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
3227 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3228 adapter, SxgSgl, Length, 0);
3231 static unsigned char temp_mac_address[6] = { 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69 };
3233 static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3235 // DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__,
3236 // card->config_set, adapter->port, adapter->physport, adapter->functionnumber);
3238 // sxg_dbg_macaddrs(adapter);
3240 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
3241 // DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__);
3242 // sxg_dbg_macaddrs(adapter);
3243 if (!(adapter->currmacaddr[0] ||
3244 adapter->currmacaddr[1] ||
3245 adapter->currmacaddr[2] ||
3246 adapter->currmacaddr[3] ||
3247 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3248 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3250 if (adapter->netdev) {
3251 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3253 // DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port);
3254 sxg_dbg_macaddrs(adapter);
3258 static int sxg_mac_set_address(p_net_device dev, void * ptr)
3260 #if XXXTODO
3261 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
3262 struct sockaddr *addr = ptr;
3264 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3266 if (netif_running(dev)) {
3267 return -EBUSY;
3269 if (!adapter) {
3270 return -EBUSY;
3272 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3273 __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
3274 adapter->currmacaddr[1], adapter->currmacaddr[2],
3275 adapter->currmacaddr[3], adapter->currmacaddr[4],
3276 adapter->currmacaddr[5]);
3277 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3278 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3279 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3280 __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
3281 adapter->currmacaddr[1], adapter->currmacaddr[2],
3282 adapter->currmacaddr[3], adapter->currmacaddr[4],
3283 adapter->currmacaddr[5]);
3285 sxg_config_set(adapter, TRUE);
3286 #endif
3287 return 0;
3290 /*****************************************************************************/
3291 /************* SXG DRIVER FUNCTIONS (below) ********************************/
3292 /*****************************************************************************/
3295 * sxg_initialize_adapter - Initialize adapter
3297 * Arguments -
3298 * adapter - A pointer to our adapter structure
3300 * Return
3301 * int
3303 static int sxg_initialize_adapter(p_adapter_t adapter)
3305 u32 RssIds, IsrCount;
3306 u32 i;
3307 int status;
3309 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3310 adapter, 0, 0, 0);
3312 RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter);
3313 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3315 // Sanity check SXG_UCODE_REGS structure definition to
3316 // make sure the length is correct
3317 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
3319 // Disable interrupts
3320 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3322 // Set MTU
3323 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3324 (adapter->FrameSize == JUMBOMAXFRAME));
3325 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3327 // Set event ring base address and size
3328 WRITE_REG64(adapter,
3329 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3330 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3332 // Per-ISR initialization
3333 for (i = 0; i < IsrCount; i++) {
3334 u64 Addr;
3335 // Set interrupt status pointer
3336 Addr = adapter->PIsr + (i * sizeof(u32));
3337 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3340 // XMT ring zero index
3341 WRITE_REG64(adapter,
3342 adapter->UcodeRegs[0].SPSendIndex,
3343 adapter->PXmtRingZeroIndex, 0);
3345 // Per-RSS initialization
3346 for (i = 0; i < RssIds; i++) {
3347 // Release all event ring entries to the Microcode
3348 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3349 TRUE);
3352 // Transmit ring base and size
3353 WRITE_REG64(adapter,
3354 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3355 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3357 // Receive ring base and size
3358 WRITE_REG64(adapter,
3359 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3360 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3362 // Populate the card with receive buffers
3363 sxg_stock_rcv_buffers(adapter);
3365 // Initialize checksum offload capabilities. At the moment
3366 // we always enable IP and TCP receive checksums on the card.
3367 // Depending on the checksum configuration specified by the
3368 // user, we can choose to report or ignore the checksum
3369 // information provided by the card.
3370 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3371 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3373 // Initialize the MAC, XAUI
3374 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__);
3375 status = sxg_initialize_link(adapter);
3376 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__,
3377 status);
3378 if (status != STATUS_SUCCESS) {
3379 return (status);
3381 // Initialize Dead to FALSE.
3382 // SlicCheckForHang or SlicDumpThread will take it from here.
3383 adapter->Dead = FALSE;
3384 adapter->PingOutstanding = FALSE;
3386 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
3387 adapter, 0, 0, 0);
3388 return (STATUS_SUCCESS);
3392 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
3393 * the card. The caller should hold the RcvQLock
3395 * Arguments -
3396 * adapter - A pointer to our adapter structure
3397 * RcvDescriptorBlockHdr - Descriptor block to fill
3399 * Return
3400 * status
3402 static int sxg_fill_descriptor_block(p_adapter_t adapter,
3403 PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr)
3405 u32 i;
3406 PSXG_RING_INFO RcvRingInfo = &adapter->RcvRingZeroInfo;
3407 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
3408 PSXG_RCV_DESCRIPTOR_BLOCK RcvDescriptorBlock;
3409 PSXG_CMD RingDescriptorCmd;
3410 PSXG_RCV_RING RingZero = &adapter->RcvRings[0];
3412 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
3413 adapter, adapter->RcvBuffersOnCard,
3414 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3416 ASSERT(RcvDescriptorBlockHdr);
3418 // If we don't have the resources to fill the descriptor block,
3419 // return failure
3420 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3421 SXG_RING_FULL(RcvRingInfo)) {
3422 adapter->Stats.NoMem++;
3423 return (STATUS_FAILURE);
3425 // Get a ring descriptor command
3426 SXG_GET_CMD(RingZero,
3427 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3428 ASSERT(RingDescriptorCmd);
3429 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
3430 RcvDescriptorBlock =
3431 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
3433 // Fill in the descriptor block
3434 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3435 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3436 ASSERT(RcvDataBufferHdr);
3437 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
3438 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
3439 RcvDescriptorBlock->Descriptors[i].VirtualAddress = (void *)RcvDataBufferHdr;
3440 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3441 RcvDataBufferHdr->PhysicalAddress;
3443 // Add the descriptor block to receive descriptor ring 0
3444 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3446 // RcvBuffersOnCard is not protected via the receive lock (see
3447 // sxg_process_event_queue) We don't want to grap a lock every time a
3448 // buffer is returned to us, so we use atomic interlocked functions
3449 // instead.
3450 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3452 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
3453 RcvDescriptorBlockHdr,
3454 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
3456 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
3457 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
3458 adapter, adapter->RcvBuffersOnCard,
3459 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3460 return (STATUS_SUCCESS);
3464 * sxg_stock_rcv_buffers - Stock the card with receive buffers
3466 * Arguments -
3467 * adapter - A pointer to our adapter structure
3469 * Return
3470 * None
3472 static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3474 PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr;
3476 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3477 adapter, adapter->RcvBuffersOnCard,
3478 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3479 // First, see if we've got less than our minimum threshold of
3480 // receive buffers, there isn't an allocation in progress, and
3481 // we haven't exceeded our maximum.. get another block of buffers
3482 // None of this needs to be SMP safe. It's round numbers.
3483 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3484 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3485 (adapter->AllocationsPending == 0)) {
3486 sxg_allocate_buffer_memory(adapter,
3487 SXG_RCV_BLOCK_SIZE(adapter->
3488 ReceiveBufferSize),
3489 SXG_BUFFER_TYPE_RCV);
3491 // Now grab the RcvQLock lock and proceed
3492 spin_lock(&adapter->RcvQLock);
3493 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3494 PLIST_ENTRY _ple;
3496 // Get a descriptor block
3497 RcvDescriptorBlockHdr = NULL;
3498 if (adapter->FreeRcvBlockCount) {
3499 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
3500 RcvDescriptorBlockHdr = container_of(_ple, SXG_RCV_DESCRIPTOR_BLOCK_HDR, FreeList);
3501 adapter->FreeRcvBlockCount--;
3502 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
3505 if (RcvDescriptorBlockHdr == NULL) {
3506 // Bail out..
3507 adapter->Stats.NoMem++;
3508 break;
3510 // Fill in the descriptor block and give it to the card
3511 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3512 STATUS_FAILURE) {
3513 // Free the descriptor block
3514 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3515 RcvDescriptorBlockHdr);
3516 break;
3519 spin_unlock(&adapter->RcvQLock);
3520 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
3521 adapter, adapter->RcvBuffersOnCard,
3522 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3526 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
3527 * completed by the microcode
3529 * Arguments -
3530 * adapter - A pointer to our adapter structure
3531 * Index - Where the microcode is up to
3533 * Return
3534 * None
3536 static void sxg_complete_descriptor_blocks(p_adapter_t adapter, unsigned char Index)
3538 PSXG_RCV_RING RingZero = &adapter->RcvRings[0];
3539 PSXG_RING_INFO RcvRingInfo = &adapter->RcvRingZeroInfo;
3540 PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr;
3541 PSXG_CMD RingDescriptorCmd;
3543 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
3544 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3546 // Now grab the RcvQLock lock and proceed
3547 spin_lock(&adapter->RcvQLock);
3548 ASSERT(Index != RcvRingInfo->Tail);
3549 while (RcvRingInfo->Tail != Index) {
3551 // Locate the current Cmd (ring descriptor entry), and
3552 // associated receive descriptor block, and advance
3553 // the tail
3555 SXG_RETURN_CMD(RingZero,
3556 RcvRingInfo,
3557 RingDescriptorCmd, RcvDescriptorBlockHdr);
3558 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
3559 RcvRingInfo->Head, RcvRingInfo->Tail,
3560 RingDescriptorCmd, RcvDescriptorBlockHdr);
3562 // Clear the SGL field
3563 RingDescriptorCmd->Sgl = 0;
3564 // Attempt to refill it and hand it right back to the
3565 // card. If we fail to refill it, free the descriptor block
3566 // header. The card will be restocked later via the
3567 // RcvBuffersOnCard test
3568 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3569 STATUS_FAILURE) {
3570 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3571 RcvDescriptorBlockHdr);
3574 spin_unlock(&adapter->RcvQLock);
3575 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
3576 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3580 static struct pci_driver sxg_driver = {
3581 .name = DRV_NAME,
3582 .id_table = sxg_pci_tbl,
3583 .probe = sxg_entry_probe,
3584 .remove = sxg_entry_remove,
3585 #if SXG_POWER_MANAGEMENT_ENABLED
3586 .suspend = sxgpm_suspend,
3587 .resume = sxgpm_resume,
3588 #endif
3589 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
3592 static int __init sxg_module_init(void)
3594 sxg_init_driver();
3596 if (debug >= 0)
3597 sxg_debug = debug;
3599 return pci_register_driver(&sxg_driver);
3602 static void __exit sxg_module_cleanup(void)
3604 pci_unregister_driver(&sxg_driver);
3607 module_init(sxg_module_init);
3608 module_exit(sxg_module_cleanup);