Staging: sxg: Add Rev B support in the Sahara SXG driver
[linux-2.6-xlnx.git] / drivers / staging / sxg / sxghif.h
blobe190d6add29c094d74b49cc371c7f36b796c4757
1 /*******************************************************************
2 * Copyright © 1997-2007 Alacritech, Inc. All rights reserved
4 * $Id: sxghif.h,v 1.5 2008/07/24 19:18:22 chris Exp $
6 * sxghif.h:
8 * This file contains structures and definitions for the
9 * Alacritech Sahara host interface
10 ******************************************************************/
12 #define DBG 1
14 /* UCODE Registers */
15 struct sxg_ucode_regs {
16 /* Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 */
17 u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */
18 u32 RsvdReg1; /* Code = 1 - TOE -NA */
19 u32 RsvdReg2; /* Code = 2 - TOE -NA */
20 u32 RsvdReg3; /* Code = 3 - TOE -NA */
21 u32 RsvdReg4; /* Code = 4 - TOE -NA */
22 u32 RsvdReg5; /* Code = 5 - TOE -NA */
23 u32 CardUp; /* Code = 6 - Microcode initialized when 1 */
24 u32 RsvdReg7; /* Code = 7 - TOE -NA */
25 u32 ConfigStat; /* Code = 8 - Configuration data load status */
26 u32 RsvdReg9; /* Code = 9 - TOE -NA */
27 u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */
28 /* This brings us to ExCode 1 at address 0x40 = Interrupt status pointer */
29 u32 Isp; /* Code = 0 (extended), ExCode = 1 */
30 u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */
31 /* ExCode 2 = Interrupt Status Register */
32 u32 Isr; /* Code = 0 (extended), ExCode = 2 */
33 u32 PadEx2[15];
34 /* ExCode 3 = Event base register. Location of event rings */
35 u32 EventBase; /* Code = 0 (extended), ExCode = 3 */
36 u32 PadEx3[15];
37 /* ExCode 4 = Event ring size */
38 u32 EventSize; /* Code = 0 (extended), ExCode = 4 */
39 u32 PadEx4[15];
40 /* ExCode 5 = TCB Buffers base address */
41 u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */
42 u32 PadEx5[15];
43 /* ExCode 6 = TCB Composite Buffers base address */
44 u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */
45 u32 PadEx6[15];
46 /* ExCode 7 = Transmit ring base address */
47 u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */
48 u32 PadEx7[15];
49 /* ExCode 8 = Transmit ring size */
50 u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */
51 u32 PadEx8[15];
52 /* ExCode 9 = Receive ring base address */
53 u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */
54 u32 PadEx9[15];
55 /* ExCode 10 = Receive ring size */
56 u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */
57 u32 PadEx10[15];
58 /* ExCode 11 = Read EEPROM/Flash Config */
59 u32 Config; /* Code = 0 (extended), ExCode = 11 */
60 u32 PadEx11[15];
61 /* ExCode 12 = Multicast bits 31:0 */
62 u32 McastLow; /* Code = 0 (extended), ExCode = 12 */
63 u32 PadEx12[15];
64 /* ExCode 13 = Multicast bits 63:32 */
65 u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */
66 u32 PadEx13[15];
67 /* ExCode 14 = Ping */
68 u32 Ping; /* Code = 0 (extended), ExCode = 14 */
69 u32 PadEx14[15];
70 /* ExCode 15 = Link MTU */
71 u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */
72 u32 PadEx15[15];
73 /* ExCode 16 = Download synchronization */
74 u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */
75 u32 PadEx16[15];
76 /* ExCode 17 = Upper DRAM address bits on 32-bit systems */
77 u32 Upper; /* Code = 0 (extended), ExCode = 17 */
78 u32 PadEx17[15];
79 /* ExCode 18 = Slowpath Send Index Address */
80 u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */
81 u32 PadEx18[15];
82 /* ExCode 19 = Get ucode statistics */
83 u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */
84 u32 PadEx19[15];
85 /* ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation */
86 u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */
87 u32 PadEx20[15];
88 /* ExCode 21 = Receive MDL push timer */
89 u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */
90 u32 PadEx21[15];
91 /* ExCode 22 = ACK Frequency */
92 u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */
93 u32 PadEx22[15];
94 /* ExCode 23 = TOE NA */
95 u32 RsvdReg23;
96 u32 PadEx23[15];
97 /* ExCode 24 = TOE NA */
98 u32 RsvdReg24;
99 u32 PadEx24[15];
100 /* ExCode 25 = TOE NA */
101 u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */
102 u32 PadEx25[15];
103 /* ExCode 26 = Receive checksum requirements */
104 u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */
105 u32 PadEx26[15];
106 /* ExCode 27 = RSS Requirements */
107 u32 Rss; /* Code = 0 (extended), ExCode = 27 */
108 u32 PadEx27[15];
109 /* ExCode 28 = RSS Table */
110 u32 RssTable; /* Code = 0 (extended), ExCode = 28 */
111 u32 PadEx28[15];
112 /* ExCode 29 = Event ring release entries */
113 u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */
114 u32 PadEx29[15];
115 /* ExCode 30 = Number of receive bufferlist commands on ring 0 */
116 u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */
117 u32 PadEx30[15];
118 /* ExCode 31 = slowpath transmit command - Data[31:0] = 1 */
119 u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */
120 u32 PadEx31[15];
121 /* ExCode 32 = Dump command */
122 u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */
123 u32 PadEx32[15];
124 /* ExCode 33 = Debug command */
125 u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */
126 u32 PadEx33[15];
128 * There are 128 possible extended commands - each of account for 16
129 * words (including the non-relevent base command codes 1-15).
130 * Pad for the remainder of these here to bring us to the next CPU
131 * base. As extended codes are added, reduce the first array value in
132 * the following field
134 u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33)*/
137 /* Interrupt control register (0) values */
138 #define SXG_ICR_DISABLE 0x00000000
139 #define SXG_ICR_ENABLE 0x00000001
140 #define SXG_ICR_MASK 0x00000002
141 #define SXG_ICR_MSGID_MASK 0xFFFF0000
142 #define SXG_ICR_MSGID_SHIFT 16
143 #define SXG_ICR(_MessageId, _Data) \
144 ((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \
145 SXG_ICR_MSGID_MASK) | (_Data))
147 #define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */
148 #define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */
149 #define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */
150 /* Disable interrupt aggregation on xmt */
151 #define SXG_AGG_XMT_DISABLE 0x80000000
153 /* The Microcode supports up to 16 RSS queues (RevB) */
154 #define SXG_MAX_RSS 16
155 #define SXG_MAX_RSS_REVA 8
157 #define SXG_MAX_RSS_TABLE_SIZE 256 /* 256-byte max */
159 #define SXG_RSS_REVA_TCP6 0x00000001 /* RSS TCP over IPv6 */
160 #define SXG_RSS_REVA_TCP4 0x00000002 /* RSS TCP over IPv4 */
161 #define SXG_RSS_IP 0x00000001 /* RSS TCP over IPv6 */
162 #define SXG_RSS_TCP 0x00000002 /* RSS TCP over IPv4 */
163 #define SXG_RSS_LEGACY 0x00000004 /* Line-base interrupts */
164 #define SXG_RSS_TABLE_SIZE 0x0000FF00 /* Table size mask */
166 #define SXG_RSS_TABLE_SHIFT 8
167 #define SXG_RSS_BASE_CPU 0x00FF0000 /* Base CPU (not used) */
168 #define SXG_RSS_BASE_SHIFT 16
170 #define SXG_RCV_IP_CSUM_ENABLED 0x00000001 /* ExCode 26 (ReceiveChecksum) */
171 #define SXG_RCV_TCP_CSUM_ENABLED 0x00000002 /* ExCode 26 (ReceiveChecksum) */
173 #define SXG_XMT_CPUID_SHIFT 16
176 * Status returned by ucode in the ConfigStat reg (see above) when attempted
177 * to load configuration data from the EEPROM/Flash.
179 #define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */
180 #define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */
181 #define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */
182 #define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */
183 #define SXG_CFG_LOAD_ERROR 5 /* hardware error */
185 #define SXG_CHECK_FOR_HANG_TIME 5
188 * TCB registers - This is really the same register memory area as UCODE_REGS
189 * above, but defined differently. Bits 17:06 of the address define the TCB,
190 * which means each TCB area occupies 0x40 (64) bytes, or 16 u32S. What really
191 * is happening is that these registers occupy the "PadEx[15]" areas in the
192 * struct sxg_ucode_regs definition above
194 struct sxg_tcb_regs {
195 u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
196 u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
197 u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
198 u32 Rsvd1; /* Code = 3 - TOE NA */
199 u32 Rsvd2; /* Code = 4 - TOE NA */
200 u32 Rsvd3; /* Code = 5 - TOE NA */
201 u32 Invalid1; /* Code = 6 - Reserved for "CardUp" see above */
202 u32 Rsvd4; /* Code = 7 - TOE NA */
203 u32 Invalid2; /* Code = 8 - Reserved for "ConfigStat" see above */
204 u32 Rsvd5; /* Code = 9 - TOE NA */
205 u32 Pad[6]; /* Codes 10-15 - Not used. */
208 /***************************************************************************
209 * ISR Format
210 * 31 0
211 * _______________________________________
212 * | | | | | | | | |
213 * |____|____|____|____|____|____|____|____|
214 * ^^^^ ^^^^ ^^^^ ^^^^ \ /
215 * ERR --|||| |||| |||| |||| -----------------
216 * EVENT ---||| |||| |||| |||| |
217 * ----|| |||| |||| |||| |-- Crash Address
218 * UPC -----| |||| |||| ||||
219 * LEVENT -------|||| |||| ||||
220 * PDQF --------||| |||| ||||
221 * RMISS ---------|| |||| ||||
222 * BREAK ----------| |||| ||||
223 * HBEATOK ------------|||| ||||
224 * NOHBEAT -------------||| ||||
225 * ERFULL --------------|| ||||
226 * XDROP ---------------| ||||
227 * -----------------||||
228 * -----------------||||--\
229 * ||---|-CpuId of crash
230 * |----/
231 ***************************************************************************/
232 #define SXG_ISR_ERR 0x80000000 /* Error */
233 #define SXG_ISR_EVENT 0x40000000 /* Event ring event */
234 #define SXG_ISR_NONE1 0x20000000 /* Not used */
235 #define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete*/
236 #define SXG_ISR_LINK 0x08000000 /* Link event */
237 #define SXG_ISR_PDQF 0x04000000 /* Processed data queue full */
238 #define SXG_ISR_RMISS 0x02000000 /* Drop - no host buf */
239 #define SXG_ISR_BREAK 0x01000000 /* Breakpoint hit */
240 #define SXG_ISR_PING 0x00800000 /* Heartbeat response */
241 #define SXG_ISR_DEAD 0x00400000 /* Card crash */
242 #define SXG_ISR_ERFULL 0x00200000 /* Event ring full */
243 #define SXG_ISR_XDROP 0x00100000 /* XMT Drop - no DRAM bufs or XMT err */
244 #define SXG_ISR_SPSEND 0x00080000 /* Slow send complete */
245 #define SXG_ISR_CPU 0x00070000 /* Dead CPU mask */
246 #define SXG_ISR_CPU_SHIFT 16 /* Dead CPU shift */
247 #define SXG_ISR_CRASH 0x0000FFFF /* Crash address mask */
249 /***************************************************************************
250 * Event Ring entry
252 * 31 15 0
253 * .___________________.___________________.
254 * |<------------ Pad 0 ------------>|
255 * |_________|_________|_________|_________|0 0x00
256 * |<------------ Pad 1 ------------>|
257 * |_________|_________|_________|_________|4 0x04
258 * |<------------ Pad 2 ------------>|
259 * |_________|_________|_________|_________|8 0x08
260 * |<----------- Event Word 0 ------------>|
261 * |_________|_________|_________|_________|12 0x0c
262 * |<----------- Event Word 1 ------------>|
263 * |_________|_________|_________|_________|16 0x10
264 * |<------------- Toeplitz ------------>|
265 * |_________|_________|_________|_________|20 0x14
266 * |<----- Length ---->|<------ TCB Id --->|
267 * |_________|_________|_________|_________|24 0x18
268 * |<----- Status ---->|Evnt Code|Flsh Code|
269 * |_________|_________|_________|_________|28 0x1c
270 * ^ ^^^^ ^^^^
271 * |- VALID |||| ||||- RBUFC
272 * |||| |||-- SLOWR
273 * |||| ||--- UNUSED
274 * |||| |---- FASTC
275 * ||||------ FASTR
276 * |||-------
277 * ||--------
278 * |---------
280 * Slowpath status:
281 * _______________________________________
282 * |<----- Status ---->|Evnt Code|Flsh Code|
283 * |_________|Cmd Index|_________|_________|28 0x1c
284 * ^^^ ^^^^
285 * ||| ||||- ISTCPIP6
286 * ||| |||-- IPONLY
287 * ||| ||--- RCVERR
288 * ||| |---- IPCBAD
289 * |||------ TCPCBAD
290 * ||------- ISTCPIP
291 * |-------- SCERR
293 ************************************************************************/
294 #pragma pack(push, 1)
295 struct sxg_event {
296 u32 Pad[1]; /* not used */
297 u32 SndUna; /* SndUna value */
298 u32 Resid; /* receive MDL resid */
299 union {
300 void * HostHandle; /* Receive host handle */
301 u32 Rsvd1; /* TOE NA */
302 struct {
303 u32 NotUsed;
304 u32 Rsvd2; /* TOE NA */
305 } Flush;
307 u32 Toeplitz; /* RSS Toeplitz hash */
308 union {
309 ushort Rsvd3; /* TOE NA */
310 ushort HdrOffset; /* Slowpath */
312 ushort Length;
313 unsigned char Rsvd4; /* TOE NA */
314 unsigned char Code; /* Event code */
315 unsigned char CommandIndex; /* New ring index */
316 unsigned char Status; /* Event status */
318 #pragma pack(pop)
320 /* Event code definitions */
321 #define EVENT_CODE_BUFFERS 0x01 /* Receive buffer list command (ring 0) */
322 #define EVENT_CODE_SLOWRCV 0x02 /* Slowpath receive */
323 #define EVENT_CODE_UNUSED 0x04 /* Was slowpath commands complete */
325 /* Status values */
326 #define EVENT_STATUS_VALID 0x80 /* Entry valid */
328 /* Slowpath status */
329 #define EVENT_STATUS_ERROR 0x40 /* Completed with error. Index in next byte */
330 #define EVENT_STATUS_TCPIP4 0x20 /* TCPIPv4 frame */
331 #define EVENT_STATUS_TCPBAD 0x10 /* Bad TCP checksum */
332 #define EVENT_STATUS_IPBAD 0x08 /* Bad IP checksum */
333 #define EVENT_STATUS_RCVERR 0x04 /* Slowpath receive error */
334 #define EVENT_STATUS_IPONLY 0x02 /* IP frame */
335 #define EVENT_STATUS_TCPIP6 0x01 /* TCPIPv6 frame */
336 #define EVENT_STATUS_TCPIP 0x21 /* Combination of v4 and v6 */
339 * Event ring
340 * Size must be power of 2, between 128 and 16k
342 #define EVENT_RING_SIZE 4096
343 #define EVENT_RING_BATCH 16 /* Hand entries back 16 at a time. */
344 /* Stop processing events after 4096 (256 * 16) */
345 #define EVENT_BATCH_LIMIT 256
347 struct sxg_event_ring {
348 struct sxg_event Ring[EVENT_RING_SIZE];
351 /* TCB Buffers */
352 /* Maximum number of TCBS supported by hardware/microcode */
353 #define SXG_MAX_TCB 4096
354 /* Minimum TCBs before we fail initialization */
355 #define SXG_MIN_TCB 512
357 * TCB Hash
358 * The bucket is determined by bits 11:4 of the toeplitz if we support 4k
359 * offloaded connections, 10:4 if we support 2k and so on.
361 #define SXG_TCB_BUCKET_SHIFT 4
362 #define SXG_TCB_PER_BUCKET 16
363 #define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */
364 #define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */
365 #define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */
367 #define SXG_TCB_BUFFER_SIZE 512 /* ASSERT format is correct */
369 #define SXG_TCB_RCVQ_SIZE 736
371 #define SXG_TCB_COMPOSITE_BUFFER_SIZE 1024
373 #define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \
374 (((_TcpObject)->VlanId) ? \
375 ((_IPv6) ? /* Vlan frame header = yes */ \
376 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp: \
377 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp): \
378 ((_IPv6) ? /* Vlan frame header = No */ \
379 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \
380 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.SxgTcp))
382 #define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \
383 (_TcpObject)->VlanId ? \
384 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip: \
385 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.Ip
387 #define SXG_LOCATE_IP6_FRAME_HDR(TcpObject) \
388 (_TcpObject)->VlanId ? \
389 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip: \
390 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
392 #if DBG
394 * Horrible kludge to distinguish dumb-nic, slowpath, and
395 * fastpath traffic. Decrement the HopLimit by one
396 * for slowpath, two for fastpath. This assumes the limit is measurably
397 * greater than two, which I think is reasonable.
398 * Obviously this is DBG only. Maybe remove later, or #if 0 so we
399 * can set it when needed
401 #define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \
402 PIPV6_HDR _Ip6FrameHdr; \
403 if ((_TcpObject)->IPv6) { \
404 _Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \
405 if (_FastPath) { \
406 _Ip6FrameHdr->HopLimit = \
407 (_TcpObject)->Cached.TtlOrHopLimit - 2; \
408 } else { \
409 _Ip6FrameHdr->HopLimit = \
410 (_TcpObject)->Cached.TtlOrHopLimit - 1; \
414 #else
415 /* Do nothing with free build */
416 #define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath)
417 #endif
419 /* Receive and transmit rings */
420 #define SXG_MAX_RING_SIZE 256
421 #define SXG_XMT_RING_SIZE 128 /* Start with 128 */
422 #define SXG_RCV_RING_SIZE 128 /* Start with 128 */
423 #define SXG_MAX_ENTRIES 4096
424 #define SXG_JUMBO_RCV_RING_SIZE 32
426 /* Structure and macros to manage a ring */
427 struct sxg_ring_info {
428 /* Where we add entries - Note unsigned char:RING_SIZE */
429 unsigned char Head;
430 unsigned char Tail; /* Where we pull off completed entries */
431 ushort Size; /* Ring size - Must be multiple of 2 */
432 void * Context[SXG_MAX_RING_SIZE]; /* Shadow ring */
435 #define SXG_INITIALIZE_RING(_ring, _size) { \
436 (_ring).Head = 0; \
437 (_ring).Tail = 0; \
438 (_ring).Size = (_size); \
441 #define SXG_ADVANCE_INDEX(_index, _size) \
442 ((_index) = ((_index) + 1) & ((_size) - 1))
443 #define SXG_PREVIOUS_INDEX(_index, _size) \
444 (((_index) - 1) &((_size) - 1))
445 #define SXG_RING_EMPTY(_ring) ((_ring)->Head == (_ring)->Tail)
446 #define SXG_RING_FULL(_ring) \
447 ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail)
448 #define SXG_RING_ADVANCE_HEAD(_ring) \
449 SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size))
450 #define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \
451 SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size))
452 #define SXG_RING_ADVANCE_TAIL(_ring) { \
453 ASSERT((_ring)->Tail != (_ring)->Head); \
454 SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \
457 * Set cmd to the next available ring entry, set the shadow context
458 * entry and advance the ring.
459 * The appropriate lock must be held when calling this macro
461 #define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \
462 if(SXG_RING_FULL(_ringinfo)) { \
463 (_cmd) = NULL; \
464 } else { \
465 (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \
466 (_ringinfo)->Context[(_ringinfo)->Head] = (void *)(_context);\
467 SXG_RING_ADVANCE_HEAD(_ringinfo); \
472 * Abort the previously allocated command by retreating the head.
473 * NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD
474 * and SXG_ABORT_CMD calls.
476 #define SXG_ABORT_CMD(_ringinfo) { \
477 ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \
478 SXG_RING_RETREAT_HEAD(_ringinfo); \
479 (_ringinfo)->Context[(_ringinfo)->Head] = NULL; \
483 * For the given ring, return a pointer to the tail cmd and context,
484 * clear the context and advance the tail
486 #define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \
487 (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \
488 (_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \
489 (_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \
490 SXG_RING_ADVANCE_TAIL(_ringinfo); \
494 * For a given ring find out how much the first pointer is ahead of
495 * the second pointer. "ahead" recognises the fact that the ring can wrap
497 static inline int sxg_ring_get_forward_diff (struct sxg_ring_info *ringinfo,
498 int a, int b) {
499 if ((a < 0 || a > ringinfo->Size ) || (b < 0 || b > ringinfo->Size))
500 return -1;
501 if (a > b) /* _a is lagging _b and _b has not wrapped around */
502 return (a - b);
503 else
504 return ((ringinfo->Size - (b - a)));
507 /***************************************************************
508 * Host Command Buffer - commands to INIC via the Cmd Rings
510 * 31 15 0
511 * .___________________.___________________.
512 * |<-------------- Sgl Low -------------->|
513 * |_________|_________|_________|_________|0 0x00
514 * |<-------------- Sgl High ------------->|
515 * |_________|_________|_________|_________|4 0x04
516 * |<------------- Sge 0 Low ----------->|
517 * |_________|_________|_________|_________|8 0x08
518 * |<------------- Sge 0 High ----------->|
519 * |_________|_________|_________|_________|12 0x0c
520 * |<------------ Sge 0 Length ---------->|
521 * |_________|_________|_________|_________|16 0x10
522 * |<----------- Window Update ----------->|
523 * |<-------- SP 1st SGE offset ---------->|
524 * |_________|_________|_________|_________|20 0x14
525 * |<----------- Total Length ------------>|
526 * |_________|_________|_________|_________|24 0x18
527 * |<----- LCnt ------>|<----- Flags ----->|
528 * |_________|_________|_________|_________|28 0x1c
529 ****************************************************************/
530 #pragma pack(push, 1)
531 struct sxg_cmd {
532 dma64_addr_t Sgl; /* Physical address of SGL */
533 union {
534 struct {
535 dma64_addr_t FirstSgeAddress; /* Address of first SGE */
536 u32 FirstSgeLength; /* Length of first SGE */
537 union {
538 u32 Rsvd1; /* TOE NA */
539 u32 SgeOffset; /* Slowpath - 2nd SGE offset */
540 /* MDL completion - clobbers update */
541 u32 Resid;
543 union {
544 u32 TotalLength; /* Total transfer length */
545 u32 Mss; /* LSO MSS */
547 } Buffer;
549 union {
550 struct {
551 unsigned char Flags:4; /* slowpath flags */
552 unsigned char IpHl:4; /* Ip header length (>>2) */
553 unsigned char MacLen; /* Mac header len */
554 } CsumFlags;
555 struct {
556 ushort Flags:4; /* slowpath flags */
557 ushort TcpHdrOff:7; /* TCP */
558 ushort MacLen:5; /* Mac header len */
559 } LsoFlags;
560 ushort Flags; /* flags */
562 union {
563 ushort SgEntries; /* SG entry count including first sge */
564 struct {
565 unsigned char Status; /* Copied from event status */
566 unsigned char NotUsed;
567 } Status;
570 #pragma pack(pop)
572 #pragma pack(push, 1)
573 struct vlan_hdr {
574 ushort VlanTci;
575 ushort VlanTpid;
577 #pragma pack(pop)
579 /********************************************************************
580 * Slowpath Flags:
583 * LSS Flags:
584 * .---
585 * /.--- TCP Large segment send
586 * //.---
587 * ///.---
588 * 3 1 1 ////
589 * 1 5 0 ||||
590 * .___________________.____________vvvv.
591 * | |MAC | TCP | |
592 * | LCnt |hlen|hdroff|Flgs|
593 * |___________________|||||||||||||____|
596 * Checksum Flags
598 * .---
599 * /.---
600 * //.--- Checksum TCP
601 * ///.--- Checksum IP
602 * 3 1 //// No bits - normal send
603 * 1 5 7 ||||
604 * .___________________._______________vvvv.
605 * | | Offload | IP | |
606 * | LCnt |MAC hlen |Hlen|Flgs|
607 * |___________________|____|____|____|____|
609 *****************************************************************/
610 /* Slowpath CMD flags */
611 #define SXG_SLOWCMD_CSUM_IP 0x01 /* Checksum IP */
612 #define SXG_SLOWCMD_CSUM_TCP 0x02 /* Checksum TCP */
613 #define SXG_SLOWCMD_LSO 0x04 /* Large segment send */
615 struct sxg_xmt_ring {
616 struct sxg_cmd Descriptors[SXG_XMT_RING_SIZE];
619 struct sxg_rcv_ring {
620 struct sxg_cmd Descriptors[SXG_RCV_RING_SIZE];
624 * Share memory buffer types - Used to identify asynchronous
625 * shared memory allocation
627 enum sxg_buffer_type {
628 SXG_BUFFER_TYPE_RCV, /* Receive buffer */
629 SXG_BUFFER_TYPE_SGL /* SGL buffer */
632 /* State for SXG buffers */
633 #define SXG_BUFFER_FREE 0x01
634 #define SXG_BUFFER_BUSY 0x02
635 #define SXG_BUFFER_ONCARD 0x04
636 #define SXG_BUFFER_UPSTREAM 0x08
639 * Receive data buffers
641 * Receive data buffers are given to the Sahara card 128 at a time.
642 * This is accomplished by filling in a "receive descriptor block"
643 * with 128 "receive descriptors". Each descriptor consists of
644 * a physical address, which the card uses as the address to
645 * DMA data into, and a virtual address, which is given back
646 * to the host in the "HostHandle" portion of an event.
647 * The receive descriptor data structure is defined below
648 * as sxg_rcv_data_descriptor, and the corresponding block
649 * is defined as sxg_rcv_descriptor_block.
651 * This receive descriptor block is given to the card by filling
652 * in the Sgl field of a sxg_cmd entry from pAdapt->RcvRings[0]
653 * with the physical address of the receive descriptor block.
655 * Both the receive buffers and the receive descriptor blocks
656 * require additional data structures to maintain them
657 * on a free queue and contain other information associated with them.
658 * Those data structures are defined as the sxg_rcv_data_buffer_hdr
659 * and sxg_rcv_descriptor_block_hdr respectively.
661 * Since both the receive buffers and the receive descriptor block
662 * must be accessible by the card, both must be allocated out of
663 * shared memory. To ensure that we always have a descriptor
664 * block available for every 128 buffers, we allocate all of
665 * these resources together in a single block. This entire
666 * block is managed by a struct sxg_rcv_block_hdr, who's sole purpose
667 * is to maintain address information so that the entire block
668 * can be free later.
670 * Further complicating matters is the fact that the receive
671 * buffers must be variable in length in order to accomodate
672 * jumbo frame configurations. We configure the buffer
673 * length so that the buffer and it's corresponding struct
674 * sxg_rcv_data_buffer_hdr structure add up to an even
675 * boundary. Then we place the remaining data structures after 128
676 * of them as shown in the following diagram:
678 * _________________________________________
679 * | |
680 * | Variable length receive buffer #1 |
681 * |_________________________________________|
682 * | |
683 * | sxg_rcv_data_buffer_hdr #1 |
684 * |_________________________________________| <== Even 2k or 10k boundary
685 * | |
686 * | ... repeat 2-128 .. |
687 * |_________________________________________|
688 * | |
689 * | struct sxg_rcv_descriptor_block |
690 * | Contains sxg_rcv_data_descriptor * 128 |
691 * |_________________________________________|
692 * | |
693 * | struct sxg_rcv_descriptor_block_hdr |
694 * |_________________________________________|
695 * | |
696 * | struct sxg_rcv_block_hdr |
697 * |_________________________________________|
699 * Memory consumption:
700 * Non-jumbo:
701 * Buffers and sxg_rcv_data_buffer_hdr = 2k * 128 = 256k
702 * + struct sxg_rcv_descriptor_block = 2k
703 * + struct sxg_rcv_descriptor_block_hdr = ~32
704 * + struct sxg_rcv_block_hdr = ~32
705 * => Total = ~258k/block
707 * Jumbo:
708 * Buffers and sxg_rcv_data_buffer_hdr = 10k * 128 = 1280k
709 * + struct sxg_rcv_descriptor_block = 2k
710 * + struct sxg_rcv_descriptor_block_hdr = ~32
711 * + struct sxg_rcv_block_hdr = ~32
712 * => Total = ~1282k/block
715 #define SXG_RCV_DATA_BUFFERS 8192 /* Amount to give to the card */
716 #define SXG_INITIAL_RCV_DATA_BUFFERS 16384 /* Initial pool of buffers */
717 /* Minimum amount and when to get more */
718 #define SXG_MIN_RCV_DATA_BUFFERS 4096
719 #define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */
720 /* Amount to give to the card in case of jumbo frames */
721 #define SXG_JUMBO_RCV_DATA_BUFFERS 2048
722 /* Initial pool of buffers in case of jumbo buffers */
723 #define SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS 4096
724 #define SXG_MIN_JUMBO_RCV_DATA_BUFFERS 1024
726 /* Receive buffer header */
727 struct sxg_rcv_data_buffer_hdr {
728 dma64_addr_t PhysicalAddress; /* Buffer physical address */
730 * Note - DO NOT USE the VirtualAddress field to locate data.
731 * Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
733 struct list_entry FreeList; /* Free queue of buffers */
734 unsigned char State; /* See SXG_BUFFER state above */
735 struct sk_buff * skb; /* Double mapped (nbl and pkt)*/
739 * SxgSlowReceive uses the PACKET (skb) contained
740 * in the struct sxg_rcv_data_buffer_hdr when indicating dumb-nic data
742 #define SxgDumbRcvPacket skb
744 /* Space for struct sxg_rcv_data_buffer_hdr */
745 #define SXG_RCV_DATA_HDR_SIZE sizeof(struct sxg_rcv_data_buffer_hdr)
746 /* Non jumbo = 2k including HDR */
747 #define SXG_RCV_DATA_BUFFER_SIZE 2048
748 /* jumbo = 10k including HDR */
749 #define SXG_RCV_JUMBO_BUFFER_SIZE 10240
751 /* Receive data descriptor */
752 struct sxg_rcv_data_descriptor {
753 union {
754 struct sk_buff *VirtualAddress; /* Host handle */
755 u64 ForceTo8Bytes; /*Force x86 to 8-byte boundary*/
757 dma64_addr_t PhysicalAddress;
760 /* Receive descriptor block */
761 #define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
762 #define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 /* For sanity check */
764 struct sxg_rcv_descriptor_block {
765 struct sxg_rcv_data_descriptor Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
768 /* Receive descriptor block header */
769 struct sxg_rcv_descriptor_block_hdr {
770 void *VirtualAddress; /* start of 2k buffer */
771 dma64_addr_t PhysicalAddress;/* and it's physical address */
772 struct list_entry FreeList;/* free queue of descriptor blocks */
773 unsigned char State; /* see sxg_buffer state above */
776 /* Receive block header */
777 struct sxg_rcv_block_hdr {
778 void *VirtualAddress; /* Start of virtual memory */
779 dma64_addr_t PhysicalAddress;/* ..and it's physical address*/
780 struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS*/
783 /* Macros to determine data structure offsets into receive block */
784 #define SXG_RCV_BLOCK_SIZE(_Buffersize) \
785 (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
786 (sizeof(struct sxg_rcv_descriptor_block)) + \
787 (sizeof(struct sxg_rcv_descriptor_block_hdr)) + \
788 (sizeof(struct sxg_rcv_block_hdr)))
789 #define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \
790 ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE)
791 #define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \
792 ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE)
793 #define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \
794 ((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK)
795 #define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \
796 (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
797 (sizeof(struct sxg_rcv_descriptor_block)))
798 #define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \
799 (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
800 (sizeof(struct sxg_rcv_descriptor_block)) + \
801 (sizeof(struct sxg_rcv_descriptor_block_hdr)))
803 /* Scatter gather list buffer */
804 #define SXG_INITIAL_SGL_BUFFERS 8192 /* Initial pool of SGL buffers */
805 #define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more*/
806 /* Maximum to allocate (note ADAPT:ushort) */
807 #define SXG_MAX_SGL_BUFFERS 16384
810 * SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL
811 * buffers. These buffers are allocated out of shared memory and used to
812 * contain a physical scatter gather list structure that is shared
813 * with the card.
815 * We split our SGL buffers into multiple pools based on size. The motivation
816 * is that some applications perform very large I/Os (1MB for example), so
817 * we need to be able to allocate an SGL to accommodate such a request.
818 * But such an SGL would require 256 24-byte SG entries - ~6k.
819 * Given that the vast majority of I/Os are much smaller than 1M, allocating
820 * a single pool of SGL buffers would be a horribly inefficient use of
821 * memory.
823 * The following structure includes two fields relating to its size.
824 * The NBSize field specifies the largest NET_BUFFER that can be handled
825 * by the particular pool. The SGEntries field defines the size, in
826 * entries, of the SGL for that pool. The SGEntries is determined by
827 * dividing the NBSize by the expected page size (4k), and then padding
828 * it by some appropriate amount as insurance (20% or so..??).
830 struct sxg_sgl_pool_properties {
831 u32 NBSize; /* Largest NET_BUFFER size for this pool */
832 ushort SGEntries; /* Number of entries in SGL */
833 ushort InitialBuffers; /* Number to allocate at initializationtime */
834 ushort MinBuffers; /* When to get more */
835 ushort MaxBuffers; /* When to stop */
836 ushort PerCpuThreshold;/* See sxgh.h:SXG_RESOURCES */
840 * At the moment I'm going to statically initialize 4 pools:
841 * 100k buffer pool: The vast majority of the expected buffers are expected
842 * to be less than or equal to 100k. At 30 entries per and
843 * 8k initial buffers amounts to ~4MB of memory
844 * NOTE - This used to be 64K with 20 entries, but during
845 * WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their
846 * best to send absurd NBL's with ridiculous SGLs, we
847 * have received 400byte sends contained in SGL's that
848 * have 28 entries
849 * 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial
850 * buffers with 300 entries each => ~2MB of memory
851 * 5M buffer pool: Not expected often, if at all. 32 initial buffers
852 * at 1500 entries each => ~1MB of memory
853 * 10M buffer pool: Not expected at all, except under pathelogical conditions.
854 * Allocate one at initialization time.
855 * Note - 10M is the current limit of what we can realistically
856 * support due to the sahara SGL bug described in the
857 * SAHARA SGL WORKAROUND below. We will likely adjust the
858 * number of pools and/or pool properties over time.
860 #define SXG_NUM_SGL_POOLS 4
861 #define INITIALIZE_SGL_POOL_PROPERTIES \
862 struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] =\
864 { 102400, 30, 8192, 2048, 16384, 256}, \
865 { 1048576, 300, 256, 128, 1024, 16}, \
866 { 5252880, 1500, 32, 16, 512, 0}, \
867 {10485760, 2700, 2, 4, 32, 0}, \
870 extern struct sxg_sgl_pool_properties SxgSglPoolProperties[];
872 #define SXG_MAX_SGL_BUFFER_SIZE \
873 SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize
876 * SAHARA SGL WORKAROUND!!
877 * The current Sahara card uses a 16-bit counter when advancing
878 * SGL address locations. This means that if an SGL crosses
879 * a 64k boundary, the hardware will actually skip back to
880 * the start of the previous 64k boundary, with obviously
881 * undesirable results.
883 * We currently workaround this issue by allocating SGL buffers
884 * in 64k blocks and skipping over buffers that straddle the boundary.
886 #define SXG_INVALID_SGL(phys_addr,len) \
887 (((phys_addr >> 16) != ( (phys_addr + len) >> 16 )))
890 * Allocate SGLs in blocks so we can skip over invalid entries.
891 * We allocation 64k worth of SGL buffers, including the
892 * struct sxg_sgl_block_hdr, plus one for padding
894 #define SXG_SGL_BLOCK_SIZE 65536
895 #define SXG_SGL_ALLOCATION_SIZE(_Pool) \
896 SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool)
898 struct sxg_sgl_block_hdr {
899 ushort Pool; /* Associated SGL pool */
900 /* struct sxg_scatter_gather blocks */
901 struct list_entry List;
902 dma64_addr_t PhysicalAddress;/* physical address */
906 * The following definition denotes the maximum block of memory that the
907 * card can DMA to.It is specified in the call to NdisMRegisterScatterGatherDma.
908 * For now, use the same value as used in the Slic/Oasis driver, which
909 * is 128M. That should cover any expected MDL that I can think of.
911 #define SXG_MAX_PHYS_MAP (1024 * 1024 * 128)
913 /* Self identifying structure type */
914 enum SXG_SGL_TYPE {
915 SXG_SGL_DUMB, /* Dumb NIC SGL */
916 SXG_SGL_SLOW, /* Slowpath protocol header - see below */
917 SXG_SGL_CHIMNEY /* Chimney offload SGL */
921 * The ucode expects an NDIS SGL structure that
922 * is formatted for an x64 system. When running
923 * on an x64 system, we can simply hand the NDIS SGL
924 * to the card directly. For x86 systems we must reconstruct
925 * the SGL. The following structure defines an x64
926 * formatted SGL entry
928 struct sxg_x64_sge {
929 dma64_addr_t Address; /* same as wdm.h */
930 u32 Length; /* same as wdm.h */
931 u32 CompilerPad; /* The compiler pads to 8-bytes */
932 u64 Reserved; /* u32 * in wdm.h. Force to 8 bytes */
936 * Our SGL structure - Essentially the same as
937 * wdm.h:SCATTER_GATHER_LIST. Note the variable number of
938 * elements based on the pool specified above
940 struct sxg_x64_sgl {
941 u32 NumberOfElements;
942 u32 *Reserved;
943 struct sxg_x64_sge Elements[1]; /* Variable */
946 struct sxg_scatter_gather {
947 enum SXG_SGL_TYPE Type; /* FIRST! Dumb-nic or offload */
948 ushort Pool; /* Associated SGL pool */
949 ushort Entries; /* SGL total entries */
950 void * adapter; /* Back pointer to adapter */
951 /* Free struct sxg_scatter_gather blocks */
952 struct list_entry FreeList;
953 /* All struct sxg_scatter_gather blocks */
954 struct list_entry AllList;
955 dma64_addr_t PhysicalAddress;/* physical address */
956 unsigned char State; /* See SXG_BUFFER state above */
957 unsigned char CmdIndex; /* Command ring index */
958 struct sk_buff *DumbPacket; /* Associated Packet */
959 /* For asynchronous completions */
960 u32 Direction;
961 u32 CurOffset; /* Current SGL offset */
962 u32 SglRef; /* SGL reference count */
963 struct vlan_hdr VlanTag; /* VLAN tag to be inserted into SGL */
964 struct sxg_x64_sgl *pSgl; /* SGL Addr. Possibly &Sgl */
965 struct sxg_x64_sgl Sgl; /* SGL handed to card */
969 * Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl
970 * includes 1 SGE..
972 #define SXG_SGL_SIZE(_Pool) \
973 (sizeof(struct sxg_scatter_gather) + \
974 ((SxgSglPoolProperties[_Pool].SGEntries - 1) * \
975 sizeof(struct sxg_x64_sge)))
977 /* Force NDIS to give us it's own buffer so we can reformat to our own */
978 #define SXG_SGL_BUFFER(_SxgSgl) NULL
979 #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) 0
980 #define SXG_SGL_BUF_SIZE 0
983 #if defined(CONFIG_X86_64)
984 #define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl)
985 #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * \
986 sizeof(struct sxg_x64_sge))
987 #define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl)
988 #elif defined(CONFIG_X86)
989 // Force NDIS to give us it's own buffer so we can reformat to our own
990 #define SXG_SGL_BUFFER(_SxgSgl) NULL
991 #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) 0
992 #define SXG_SGL_BUF_SIZE 0
993 #else
994 #error staging: sxg: driver is for X86 only!
995 #endif
997 /* Microcode statistics */
998 struct sxg_ucode_stats {
999 u32 RPDQOflow; /* PDQ overflow (unframed ie dq & drop 1st) */
1000 u32 XDrops; /* Xmt drops due to no xmt buffer */
1001 u32 ERDrops; /* Rcv drops due to ER full */
1002 u32 NBDrops; /* Rcv drops due to out of host buffers */
1003 u32 PQDrops; /* Rcv drops due to PDQ full */
1004 /* Rcv drops due to bad frame: no link addr match, frlen > max */
1005 u32 BFDrops;
1006 u32 UPDrops; /* Rcv drops due to UPFq full */
1007 u32 XNoBufs; /* Xmt drop due to no DRAM Xmit buffer or PxyBuf */
1011 * Macros for handling the Offload engine values
1013 /* Number of positions to shift Network Header Length before passing to card */
1014 #define SXG_NW_HDR_LEN_SHIFT 2