4 #include "ecore_common.h"
7 #define OOO_CID_USTRORM_PROD_DIFF (0x4000)
9 u8_t
lm_is_rx_completion(lm_device_t
*pdev
, u8_t chain_idx
)
12 lm_rcq_chain_t
*rcq_chain
= &LM_RCQ(pdev
, chain_idx
);
14 DbgBreakIf(!(pdev
&& rcq_chain
));
16 //the hw_con_idx_ptr of the rcq_chain points directly to the Rx index in the USTORM part of the non-default status block
17 if (rcq_chain
->hw_con_idx_ptr
&&
18 (mm_le16_to_cpu(*rcq_chain
->hw_con_idx_ptr
) !=
19 lm_bd_chain_cons_idx(&rcq_chain
->bd_chain
)))
23 DbgMessage(pdev
, INFORMi
, "lm_is_rx_completion: result is:%s\n", result
? "TRUE" : "FALSE");
28 /*******************************************************************************
30 * set both rcq, rx bd and rx sge (if valid) prods
32 ******************************************************************************/
33 static void FORCEINLINE
lm_rx_set_prods( lm_device_t
*pdev
,
34 u16_t
const iro_prod_offset
,
35 lm_bd_chain_t
*rcq_chain_bd
,
36 lm_bd_chain_t
*rx_chain_bd
,
37 lm_bd_chain_t
*rx_chain_sge
,
38 const u32_t chain_idx
)
40 lm_rx_chain_t
* rxq_chain
= &LM_RXQ(pdev
, chain_idx
);
43 u16_t val16_lo
= lm_bd_chain_prod_idx(rcq_chain_bd
);
44 u16_t val16_hi
= lm_bd_chain_prod_idx(rx_chain_bd
);
45 u32_t
const ustorm_bar_offset
= (IS_CHANNEL_VFDEV(pdev
)) ? VF_BAR0_USDM_QUEUES_OFFSET
: BAR_USTRORM_INTMEM
;
47 if(OOO_CID(pdev
) == chain_idx
)
49 DbgBreakIfFastPath( NULL
!= rx_chain_sge
);
50 DbgBreakIfFastPath(IS_CHANNEL_VFDEV(pdev
));
52 LM_INTMEM_WRITE16(PFDEV(pdev
),
53 TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(FUNC_ID(pdev
)),
54 rxq_chain
->common
.bd_prod_without_next
,
57 // Ugly FW solution OOO FW wants the
58 val16_lo
+= OOO_CID_USTRORM_PROD_DIFF
;
59 val16_hi
+= OOO_CID_USTRORM_PROD_DIFF
;
62 val32
= ((u32_t
)(val16_hi
<< 16) | val16_lo
);
64 //notify the fw of the prod of the RCQ. No need to do that for the Rx bd chain.
67 val64
= (((u64_t
)lm_bd_chain_prod_idx(rx_chain_sge
))<<32) | val32
;
69 LM_INTMEM_WRITE64(PFDEV(pdev
),
76 LM_INTMEM_WRITE32(PFDEV(pdev
),
82 /*******************************************************************************
84 * rx_chain_bd always valid, rx_chain_sge valid only in case we are LAH enabled in this queue
85 * all if() checking will be always done on rx_chain_bd since it is always valid and sge should be consistent
86 * We verify it in case sge is valid
87 * all bd_xxx operations will be done on both
89 ******************************************************************************/
94 lm_packet_t
*packet
,/* optional. */
97 lm_rx_chain_common_t
* rxq_chain_common
= NULL
;
98 lm_bd_chain_t
* rx_chain_bd
= NULL
;
99 lm_rx_chain_t
* rxq_chain
= NULL
;
100 lm_tpa_chain_t
* tpa_chain
= NULL
;
101 lm_bd_chain_t
* bd_chain_to_check
= NULL
;
102 lm_rcq_chain_t
* rcq_chain
= &LM_RCQ(pdev
, chain_idx
);
103 lm_bd_chain_t
* rx_chain_sge
= NULL
;
104 u32_t pkt_queued
= 0;
105 struct eth_rx_bd
* cur_bd
= NULL
;
106 struct eth_rx_sge
* cur_sge
= NULL
;
108 u32_t rcq_prod_bseq
= 0;
109 u16_t current_prod
= 0;
110 u16_t active_entry
= 0;
112 DbgMessage(pdev
, INFORMl2
, "### lm_post_buffers\n");
114 // Verify BD's consistent
115 DbgBreakIfFastPath( rx_chain_sge
&& !lm_bd_chains_are_consistent( rx_chain_sge
, rx_chain_bd
) );
119 rxq_chain_common
= &LM_RXQ_COMMON(pdev
, chain_idx
);
120 rx_chain_bd
= &LM_RXQ_CHAIN_BD(pdev
, chain_idx
);
121 rx_chain_sge
= LM_RXQ_SGE_PTR_IF_VALID(pdev
, chain_idx
);
122 rxq_chain
= &LM_RXQ(pdev
, chain_idx
);
124 /* the assumption is that the number of cqes is less or equal to the corresponding rx bds,
125 therefore if there no cqes left, break */
126 bd_chain_to_check
= &rcq_chain
->bd_chain
;
130 rxq_chain_common
= &LM_TPA_COMMON(pdev
, chain_idx
);
131 rx_chain_bd
= &LM_TPA_CHAIN_BD(pdev
, chain_idx
);
134 tpa_chain
= &LM_TPA(pdev
, chain_idx
);
135 // In TPA we don't add to the RCQ when posting buffers
136 bd_chain_to_check
= rx_chain_bd
;
138 /* Make sure we have a bd left for posting a receive buffer. */
141 // Insert given packet.
142 DbgBreakIfFastPath(SIG(packet
) != L2PACKET_RX_SIG
);
144 if(lm_bd_chain_is_empty(bd_chain_to_check
))
146 s_list_push_tail(&rxq_chain_common
->free_descq
, &packet
->link
);
150 else if(!lm_bd_chain_is_empty(bd_chain_to_check
))
152 packet
= (lm_packet_t
*) s_list_pop_head(&rxq_chain_common
->free_descq
);
154 prod_bseq
= rxq_chain_common
->prod_bseq
;
156 // In TPA we won't increment rcq_prod_bseq
157 rcq_prod_bseq
= rcq_chain
->prod_bseq
;
162 current_prod
= lm_bd_chain_prod_idx(rx_chain_bd
);
163 cur_bd
= lm_bd_chain_produce_bd(rx_chain_bd
);
164 rxq_chain_common
->bd_prod_without_next
++;
165 cur_sge
= rx_chain_sge
? lm_bd_chain_produce_bd(rx_chain_sge
) : NULL
;
167 prod_bseq
+= packet
->l2pkt_rx_info
->mem_size
;
171 //take care of the RCQ related prod stuff.
173 //update the prod of the RCQ only AFTER the Rx bd!
174 rcq_prod_bseq
+= packet
->l2pkt_rx_info
->mem_size
;
176 /* These were actually produced before by fw, but we only produce them now to make sure they're synced with the rx-chain */
177 lm_bd_chain_bd_produced(&rcq_chain
->bd_chain
);
180 packet
->u1
.rx
.next_bd_idx
= lm_bd_chain_prod_idx(rx_chain_bd
);
182 /* make sure signitures exist before and after the buffer */
183 DbgBreakIfFastPath(SIG(packet
->u1
.rx
.mem_virt
- pdev
->params
.rcv_buffer_offset
) != L2PACKET_RX_SIG
);
184 DbgBreakIfFastPath(END_SIG(packet
->u1
.rx
.mem_virt
, MAX_L2_CLI_BUFFER_SIZE(pdev
, chain_idx
)) != L2PACKET_RX_SIG
);
185 #endif /* L2_RX_BUF_SIG */
187 cur_bd
->addr_lo
= mm_cpu_to_le32(packet
->u1
.rx
.mem_phys
[0].as_u32
.low
);
188 cur_bd
->addr_hi
= mm_cpu_to_le32(packet
->u1
.rx
.mem_phys
[0].as_u32
.high
);
192 cur_sge
->addr_lo
= mm_cpu_to_le32(packet
->u1
.rx
.mem_phys
[1].as_u32
.low
);
193 cur_sge
->addr_hi
= mm_cpu_to_le32(packet
->u1
.rx
.mem_phys
[1].as_u32
.high
);
200 s_list_push_tail(&rxq_chain
->active_descq
, &packet
->link
);
204 // Active descriptor must sit in the same entry
205 active_entry
= LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev
, chain_idx
, current_prod
);
207 LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev
, chain_idx
,active_entry
);
208 tpa_chain
->sge_chain
.active_descq_array
[active_entry
] = packet
;
211 if(lm_bd_chain_is_empty(bd_chain_to_check
))
216 /* Make sure we have a bd left for posting a receive buffer. */
217 packet
= (lm_packet_t
*) s_list_pop_head(&rxq_chain_common
->free_descq
);
220 rxq_chain_common
->prod_bseq
= prod_bseq
;
223 //update the prod of the RCQ only AFTER the Rx bd!
224 // This code seems unnecessary maybe should be deleted.
225 // Im TPA we won't increment rcq_prod_bseq
226 rcq_chain
->prod_bseq
= rcq_prod_bseq
;
230 //notify the fw of the prod
233 lm_rx_set_prods(pdev
, rcq_chain
->iro_prod_offset
, &rcq_chain
->bd_chain
, rx_chain_bd
, rx_chain_sge
,chain_idx
);
237 lm_rx_set_prods(pdev
, rcq_chain
->iro_prod_offset
, &rcq_chain
->bd_chain
, &LM_RXQ_CHAIN_BD(pdev
, chain_idx
), &LM_TPA_CHAIN_BD(pdev
, chain_idx
) ,chain_idx
);
241 DbgMessage(pdev
, INFORMl2
, "lm_post_buffers - bd con: %d bd prod: %d \n",
242 lm_bd_chain_cons_idx(rx_chain_bd
),lm_bd_chain_prod_idx(rx_chain_bd
));
243 DbgMessage(pdev
, INFORMl2
, "lm_post_buffers - cq con: %d cq prod: %d \n",
244 lm_bd_chain_cons_idx(&rcq_chain
->bd_chain
) ,lm_bd_chain_prod_idx(&rcq_chain
->bd_chain
));
247 } /* lm_post_buffers */
251 * Updates tpa_chain->last_max_cons_sge if there is a new max.
252 * Basic assumption is that is BD prod is always higher that BD
254 * The minus will tell us who is closer to BD prod.
259 * @return STATIC void
262 lm_tpa_sge_update_last_max(IN lm_device_t
* pdev
,
263 IN
const u32_t chain_idx
,
264 IN
const u16_t new_index
)
266 lm_tpa_sge_chain_t
* sge_tpa_chain
= &LM_SGE_TPA_CHAIN(pdev
, chain_idx
);
267 u16_t
const prod_idx
= lm_bd_chain_prod_idx(&LM_TPA_CHAIN_BD(pdev
, chain_idx
));
268 u16_t
const prod_minus_new_sge
= prod_idx
- new_index
;
269 u16_t
const prod_minus_saved
= prod_idx
- sge_tpa_chain
->last_max_con
;
271 if(prod_minus_new_sge
< prod_minus_saved
)
273 sge_tpa_chain
->last_max_con
= new_index
;
277 Cyclic would have been a nicer sulotion, but adds a limitation on bd ring size that would be (2^15) instead of 2^16
278 This limitation should be closed done when allocating the TPA BD chain
279 DbgBreakIf(LM_TPA_CHAIN_BD_NUM_ELEM(_pdev, chain_idx) < (2^15) );
280 if (CYCLIC_GT_16(sge_index, sge_tpa_chain->last_max_con))
281 sge_tpa_chain->last_max_con = sge_index;
287 * The TPA sge consumer will be increments in 64 bit
292 * @return STATIC u32_t
295 lm_tpa_incr_sge_cons( IN lm_device_t
* pdev
,
296 IN
const u32_t chain_idx
,
297 IN
const u16_t mask_entry_idx
)
299 lm_tpa_sge_chain_t
* sge_tpa_chain
= &LM_SGE_TPA_CHAIN(pdev
, chain_idx
);
300 lm_bd_chain_t
* bd_chain
= &LM_TPA_CHAIN_BD(pdev
, chain_idx
);
302 u16_t active_entry
= 0;
305 bd_chain
->cons_idx
+= BIT_VEC64_ELEM_SZ
;
307 DbgBreakIf(LM_TPA_MASK_LEN(pdev
, chain_idx
) <= mask_entry_idx
);
308 sge_tpa_chain
->mask_array
[mask_entry_idx
] = BIT_VEC64_ELEM_ONE_MASK
;
310 // Make sure bds_per_page_mask is a power of 2 that is higher than 64
311 DbgBreakIf(0 != (lm_bd_chain_bds_per_page(bd_chain
) & BIT_VEC64_ELEM_MASK
));
312 DbgBreakIf(BIT_VEC64_ELEM_SZ
>= lm_bd_chain_bds_per_page(bd_chain
));
314 if((lm_bd_chain_cons_idx(bd_chain
) & lm_bd_chain_bds_per_page_mask(bd_chain
)) == 0)
316 // Just closed a page must refer to page end entries
317 lm_bd_chain_bds_consumed(bd_chain
, (BIT_VEC64_ELEM_SZ
- lm_bd_chain_bds_skip_eop(bd_chain
)));
319 /* clear page-end entries */
320 for(i
= 1; i
<= lm_bd_chain_bds_skip_eop(bd_chain
); i
++ )
322 bd_entry
= lm_bd_chain_cons_idx(bd_chain
) - i
;
323 active_entry
= LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev
, chain_idx
, bd_entry
);
324 LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev
, chain_idx
, active_entry
);
330 lm_bd_chain_bds_consumed(bd_chain
, BIT_VEC64_ELEM_SZ
);
335 * Handle TPA stop code.
337 * @param rcvd_list -Global receive list
343 * @return STATIC u32_t pkt_cnt number of packets. The number is
344 * an input parameter and packets add to the global list
348 lm_tpa_stop( IN lm_device_t
* pdev
,
349 INOUT s_list_t
* rcvd_list
,
350 IN
const struct eth_end_agg_rx_cqe
* cqe
,
351 IN
const u32_t chain_idx
,
353 IN
const u8_t queue_index
)
355 lm_tpa_chain_t
* tpa_chain
= &LM_TPA(pdev
, chain_idx
);
356 lm_tpa_sge_chain_t
* sge_tpa_chain
= &LM_SGE_TPA_CHAIN(pdev
, chain_idx
);
357 lm_bd_chain_t
* bd_chain
= &LM_TPA_CHAIN_BD(pdev
, chain_idx
);
358 lm_packet_t
* pkt
= tpa_chain
->start_coales_bd
[queue_index
].packet
;//Reads the TPA start coalesce array(PD_R)
359 u32_t sge_size
= mm_le16_to_cpu(cqe
->pkt_len
) - pkt
->l2pkt_rx_info
->size
;
360 u32_t
const sge_num_elem
= DIV_ROUND_UP_BITS(sge_size
, LM_TPA_PAGE_BITS
);
361 u32_t fw_sge_index
= 0;
362 u16_t active_entry
= 0;
363 u16_t first_max_set
= 0;
364 u16_t last_max_set
= 0;
366 u8_t b_force_first_enter
= FALSE
;
367 u16_t loop_cnt_dbg
= 0;
368 const u32_t lm_tpa_page_size
= LM_TPA_PAGE_SIZE
;
370 // Total packet size given in end aggregation must be larger than the size given in start aggregation.
371 // The only case that the both size are equal is if stop aggregation doesn't contain data.
372 DbgBreakIf( mm_le16_to_cpu(cqe
->pkt_len
) < pkt
->l2pkt_rx_info
->size
);
374 DbgBreakIf( TRUE
!= tpa_chain
->start_coales_bd
[queue_index
].is_entry_used
);
375 tpa_chain
->start_coales_bd
[queue_index
].is_entry_used
= FALSE
;
377 // Indicate to upper layer this is a TPA packet
378 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
,LM_RX_FLAG_START_RSC_TPA
);
379 // Updates the TPA only fields from the CQE
380 pkt
->l2pkt_rx_info
->total_packet_size
= mm_le16_to_cpu(cqe
->pkt_len
);
381 pkt
->l2pkt_rx_info
->coal_seg_cnt
= mm_le16_to_cpu(cqe
->num_of_coalesced_segs
);
382 pkt
->l2pkt_rx_info
->dup_ack_cnt
= cqe
->pure_ack_count
;
383 pkt
->l2pkt_rx_info
->ts_delta
= mm_le32_to_cpu(cqe
->timestamp_delta
);
385 /* make sure packet size is larger than header size */
386 DbgBreakIfFastPath(pkt
->l2pkt_rx_info
->total_packet_size
< MIN_ETHERNET_PACKET_SIZE
);
388 // Adds this packet descriptor to the global receive list (rcvd_list that is later indicated to miniport).
389 s_list_push_tail(rcvd_list
, &pkt
->link
);
392 ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE
== ARRSIZE(cqe
->sgl_or_raw_data
.sgl
));
393 DbgBreakIf(ARRSIZE(cqe
->sgl_or_raw_data
.sgl
) < sge_num_elem
);
395 // If the TPA stop doesn't contain any new BDs.
396 if(0 == sge_num_elem
)
398 // Total packet size given in end aggregation must be equal to the size given in start aggregation.
399 // if stop aggregation doesn't contain data.
400 DbgBreakIf( mm_le16_to_cpu(cqe
->pkt_len
) != pkt
->l2pkt_rx_info
->size
);
405 for(fw_sge_index
= 0; fw_sge_index
< sge_num_elem
; fw_sge_index
++)
407 DbgBreakIf(ARRSIZE(cqe
->sgl_or_raw_data
.sgl
) <= fw_sge_index
);
408 active_entry
= LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev
, chain_idx
, mm_le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[fw_sge_index
]));
410 LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev
, chain_idx
, active_entry
);
411 pkt
= tpa_chain
->sge_chain
.active_descq_array
[active_entry
];
412 LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev
, chain_idx
, active_entry
);
415 /************start TPA debbug code******************************/
416 tpa_chain
->dbg_params
.pck_ret_from_chip
++;
417 /************end TPA debbug code******************************/
420 DbgBreakIf((fw_sge_index
!= (sge_num_elem
- 1)) && (sge_size
< LM_TPA_PAGE_SIZE
));
421 pkt
->l2pkt_rx_info
->size
= min(sge_size
,lm_tpa_page_size
);
422 s_list_push_tail(rcvd_list
, &(pkt
->link
));
424 sge_size
-= LM_TPA_PAGE_SIZE
;
428 //PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
429 #pragma warning (push)
430 #pragma warning( disable:6385 )
432 /* Here we assume that the last SGE index is the biggest */
433 lm_tpa_sge_update_last_max(pdev
,
435 mm_le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_num_elem
-1]));
438 #pragma warning (pop)
440 // Find the first cosumer that is a candidate to free and the last.
441 first_max_set
= LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev
, chain_idx
, lm_bd_chain_cons_idx(bd_chain
));
442 last_max_set
= LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev
, chain_idx
, sge_tpa_chain
->last_max_con
);
444 DbgBreakIf(0 != (lm_bd_chain_cons_idx(bd_chain
) & BIT_VEC64_ELEM_MASK
));
445 /* If ring is full enter anyway*/
446 if((last_max_set
== first_max_set
) && (lm_bd_chain_is_full(bd_chain
)))
448 b_force_first_enter
= TRUE
;
450 /* Now update the cons */
451 for (i
= first_max_set
;((i
!= last_max_set
) || (TRUE
== b_force_first_enter
)); i
= LM_TPA_MASK_NEXT_ELEM(pdev
, chain_idx
, i
))
453 DbgBreakIf(LM_TPA_MASK_LEN(pdev
, chain_idx
) <= i
);
454 if (sge_tpa_chain
->mask_array
[i
])
458 b_force_first_enter
= FALSE
;
460 lm_tpa_incr_sge_cons(pdev
,
464 DbgBreakIf(LM_TPA_MASK_LEN(pdev
,chain_idx
) < loop_cnt_dbg
);
471 * Handle TPA start code.
477 * @return STATIC void
480 lm_tpa_start( IN lm_device_t
* pdev
,
482 IN
const u32_t chain_idx
,
483 IN
const u8_t queue_index
)
485 lm_tpa_chain_t
* tpa_chain
= &LM_TPA(pdev
, chain_idx
);
487 DbgBreakIf( FALSE
!= tpa_chain
->start_coales_bd
[queue_index
].is_entry_used
);
489 tpa_chain
->start_coales_bd
[queue_index
].is_entry_used
= TRUE
;
490 tpa_chain
->start_coales_bd
[queue_index
].packet
= pkt
;
494 * Set TPA start known flags.
495 * This is only an optimization to avoid known if's
498 * @return STATIC void
501 lm_tpa_start_flags_handle( IN lm_device_t
* pdev
,
502 IN
const struct eth_fast_path_rx_cqe
* cqe
,
503 INOUT lm_packet_t
* pkt
,
504 IN
const u16_t parse_flags
)
506 // TPA is always(only) above IPV4 or IPV6.
508 ((GET_FLAGS_WITH_OFFSET(parse_flags
,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
,
509 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT
) == PRS_FLAG_OVERETH_IPV4
) ||
510 (GET_FLAGS_WITH_OFFSET(parse_flags
,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
,
511 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT
) == PRS_FLAG_OVERETH_IPV6
)));
513 if(PRS_FLAG_OVERETH_IPV4
== GET_FLAGS_WITH_OFFSET(parse_flags
,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
,
514 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT
))
516 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IS_IPV4_DATAGRAM
);
518 DbgBreakIf(GET_FLAGS(cqe
->status_flags
, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG
));
519 // In IPV4 there is always a checksum
520 // TPA ip cksum is always valid
521 DbgBreakIf(GET_FLAGS(cqe
->type_error_flags
, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
));
523 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_GOOD
);
527 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IS_IPV6_DATAGRAM
);
528 // In IPV6 there is no checksum
529 DbgBreakIf(0 == GET_FLAGS(cqe
->status_flags
, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG
));
533 // If there was a fagmentation it will be delivered by a regular BD (the TPA aggregation is stoped).
534 DbgBreakIf( GET_FLAGS(parse_flags
,PARSING_FLAGS_FRAGMENTATION_STATUS
));
535 /* check if TCP segment */
536 // TPA is always above TCP.
537 DbgBreakIf(PRS_FLAG_OVERIP_TCP
!= GET_FLAGS_WITH_OFFSET(parse_flags
,PARSING_FLAGS_OVER_IP_PROTOCOL
,
538 PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT
));
540 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IS_TCP_SEGMENT
);
543 // TCP was checked before. TCP checksum must be done by FW in TPA.
544 DbgBreakIf(GET_FLAGS(cqe
->status_flags
, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
));
545 // TCP checksum must be valid in a successful TPA aggregation.
546 DbgBreakIf(GET_FLAGS(cqe
->type_error_flags
, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
));
548 /* IN TPA tcp cksum is always validated */
549 /* valid tcp/udp cksum */
550 #define SHIFT_IS_GOOD 1
551 #define SHIFT_IS_BAD 2
552 ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD
== LM_RX_FLAG_IS_UDP_DATAGRAM
<< SHIFT_IS_GOOD
);
553 ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD
== LM_RX_FLAG_IS_UDP_DATAGRAM
<< SHIFT_IS_BAD
);
554 ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD
== LM_RX_FLAG_IS_TCP_SEGMENT
<< SHIFT_IS_GOOD
);
555 ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD
== LM_RX_FLAG_IS_TCP_SEGMENT
<< SHIFT_IS_BAD
);
557 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, ( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
)) << SHIFT_IS_GOOD
) );
563 * This is only an optimization
566 * @return STATIC void
569 lm_regular_flags_handle( IN lm_device_t
* pdev
,
570 IN
const struct eth_fast_path_rx_cqe
* cqe
,
571 INOUT lm_packet_t
* pkt
,
572 IN
const u16_t parse_flags
)
574 /* check if IP datagram (either IPv4 or IPv6) */
575 if(((GET_FLAGS(parse_flags
,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) >>
576 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT
) == PRS_FLAG_OVERETH_IPV4
) ||
577 ((GET_FLAGS(parse_flags
,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) >>
578 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT
) == PRS_FLAG_OVERETH_IPV6
))
580 pkt
->l2pkt_rx_info
->flags
|=
581 (GET_FLAGS(parse_flags
,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) >>
582 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT
) == PRS_FLAG_OVERETH_IPV4
?
583 LM_RX_FLAG_IS_IPV4_DATAGRAM
:
584 LM_RX_FLAG_IS_IPV6_DATAGRAM
;
585 if(!GET_FLAGS(cqe
->status_flags
, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG
))
587 /* ip cksum validated */
588 if GET_FLAGS(cqe
->type_error_flags
, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
)
590 /* invalid ip cksum */
591 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_BAD
);
593 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev
, rx_ip_cs_error_count
);
598 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_GOOD
);
603 // TCP or UDP segment.
604 if(!GET_FLAGS(parse_flags
,PARSING_FLAGS_FRAGMENTATION_STATUS
))
606 /* check if TCP segment */
607 if((GET_FLAGS(parse_flags
,PARSING_FLAGS_OVER_IP_PROTOCOL
) >>
608 PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT
) == PRS_FLAG_OVERIP_TCP
)
610 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IS_TCP_SEGMENT
);
611 DbgMessage(pdev
, INFORM
, "--- TCP Packet --- \n");
613 /* check if UDP segment */
614 else if((GET_FLAGS(parse_flags
,PARSING_FLAGS_OVER_IP_PROTOCOL
) >>
615 PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT
) == PRS_FLAG_OVERIP_UDP
)
617 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IS_UDP_DATAGRAM
);
618 DbgMessage(pdev
, INFORM
, "--- UDP Packet --- \n");
623 if( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) &&
624 !GET_FLAGS(cqe
->status_flags
, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
))
626 ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD
== LM_RX_FLAG_IS_UDP_DATAGRAM
<< SHIFT_IS_GOOD
);
627 ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD
== LM_RX_FLAG_IS_UDP_DATAGRAM
<< SHIFT_IS_BAD
);
628 ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD
== LM_RX_FLAG_IS_TCP_SEGMENT
<< SHIFT_IS_GOOD
);
629 ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD
== LM_RX_FLAG_IS_TCP_SEGMENT
<< SHIFT_IS_BAD
);
631 DbgMessage(pdev
, INFORM
, " Checksum validated.\n");
633 /* tcp/udp cksum validated */
634 if GET_FLAGS(cqe
->type_error_flags
, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
)
636 /* invalid tcp/udp cksum */
637 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, ( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) << SHIFT_IS_BAD
) );
639 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev
, rx_tcp_cs_error_count
);
640 DbgMessage(pdev
, INFORM
, " BAD checksum.\n");
642 else if (GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_BAD
))
644 /* invalid tcp/udp cksum due to invalid ip cksum */
645 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, ( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) << SHIFT_IS_BAD
) );
646 DbgMessage(pdev
, INFORM
, " BAD IP checksum\n");
650 /* valid tcp/udp cksum */
651 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, ( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) << SHIFT_IS_GOOD
) );
652 DbgMessage(pdev
, INFORM
, " GOOD checksum.\n");
657 DbgMessage(pdev
, INFORM
, " Checksum NOT validated.\n");
658 /*Packets with invalid TCP options are reported with L4_XSUM_NO_VALIDATION due to HW limitation. In this case we assume that
659 their checksum is OK.*/
660 if(GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) &&
661 GET_FLAGS(cqe
->status_flags
, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
) &&
662 GET_FLAGS(cqe
->pars_flags
.flags
, PARSING_FLAGS_TCP_OPTIONS_EXIST
))
664 DbgMessage(pdev
, INFORM
, " TCP Options exist - forcing return value.\n");
665 if(GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_BAD
))
667 DbgMessage(pdev
, INFORM
, " IP checksum invalid - reporting BAD checksum.\n");
668 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, ( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) << SHIFT_IS_BAD
) );
672 DbgMessage(pdev
, INFORM
, " IP checksum ok - reporting GOOD checksum.\n");
673 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, ( GET_FLAGS(pkt
->l2pkt_rx_info
->flags
, (LM_RX_FLAG_IS_TCP_SEGMENT
| LM_RX_FLAG_IS_UDP_DATAGRAM
)) << SHIFT_IS_GOOD
) );
680 lm_recv_set_pkt_len( IN lm_device_t
* pdev
,
681 INOUT lm_packet_t
* pkt
,
682 IN
const u16_t pkt_len
,
683 IN
const u32_t chain_idx
)
685 //changed, as we dont have fhdr infrastructure
686 pkt
->l2pkt_rx_info
->size
= pkt_len
; //- 4; /* CRC32 */
688 DbgMessage(pdev
, VERBOSEl2
, "pkt_size: %d\n",pkt
->l2pkt_rx_info
->size
);
692 calc_cksum(u16_t
*hdr
, u32_t len_in_bytes
, u32_t sum
)
694 // len_in_bytes - the length in bytes of the header
695 // sum - initial checksum
696 while (len_in_bytes
> 1)
703 /* add left-over byte, if any */
706 sum
+= ((NTOH16(*hdr
)) & 0xFF00);
713 validate_cksum(u32_t sum
)
715 // len - the length in words of the header
716 // returns true iff the checksum (already written in the headr) is valid
718 // fold 32-bit sum to 16 bits
721 sum
= (sum
& 0xffff) + (sum
>> 16);
724 return ((u16_t
)(sum
) == 0xffff);
728 get_ip_hdr_len(u8_t
*hdr
)
730 // returns the ip header length in bytes
731 u16_t ip_hdr_len
= 40; // ipv6 header length, we won't support ipv6 with extension header for now
733 if ((hdr
[0] & 0xf0) == 0x40)
735 // ipv4, the lower 4 bit of the 1st byte of ip header
736 // contains the ip header length in unit of dword(32-bit)
737 ip_hdr_len
= ((hdr
[0] & 0xf) << 2);
743 encap_pkt_parsing(struct _lm_device_t
*pdev
,
746 u16_t tmp
, inner_ip_hdr_len
, tcp_length
;
750 // encapsulated packet:
751 // outer mac | outer ip | gre | inner mac | inner ip | tcp
752 // minimum encapsultaed packet size is:
753 // two mac headers + gre header size + tcp header size + two ipv4 headers
754 if (pkt
->l2pkt_rx_info
->total_packet_size
< (2*ETHERNET_PACKET_HEADER_SIZE
+ 2*20 + ETHERNET_GRE_SIZE
+ 20))
760 // set hdr to the outer ip header
761 hdr
= pkt
->l2pkt_rx_info
->mem_virt
+ pdev
->params
.rcv_buffer_offset
+ ETHERNET_PACKET_HEADER_SIZE
;
762 if (pkt
->l2pkt_rx_info
->flags
& LM_RX_FLAG_VALID_VLAN_TAG
)
764 hdr
+= ETHERNET_VLAN_TAG_SIZE
;
767 // in case this is not standard ETH packet (e.g. managment, or in general non ipv4/ipv6), it is for sure
768 // not gre so we can end here
769 // if outer header is ipv4, protocol is the nine'th octet
770 // if outer header is ipv6, next header is the sixth octet
771 if (!(((pkt
->l2pkt_rx_info
->flags
& LM_RX_FLAG_IS_IPV4_DATAGRAM
) && (hdr
[9] == 0x2f)) ||
772 ((pkt
->l2pkt_rx_info
->flags
& LM_RX_FLAG_IS_IPV6_DATAGRAM
) && (hdr
[6] == 0x2f))))
774 // this is not encapsulated packet, no gre tunneling
775 // on ipv6 we don't support extension header
779 // get the length of the outer ip header and set hdr to the gre header
780 hdr
+= get_ip_hdr_len(hdr
);
783 | Bits 0–4 | 5–7 | 8–12 | 13–15 | 16–31 |
784 | C|0|K|S | Recur | Flags | Version | Protocol Type |
785 | Checksum (optional) | Reserved |
787 | Sequence Number (optional) | */
790 // checksum present bit is set to 0
791 // key present bit is set to 1
792 // sequence number present bit is set to 0
793 // protocol type should be always equal to 0x6558 (for encapsulating ethernet packets in GRE)
794 if (((hdr
[0] & 0xb0) != 0x20) || (hdr
[2] != 0x65) || (hdr
[3] != 0x58))
798 // set hdr to the inner mac header
799 hdr
+= ETHERNET_GRE_SIZE
;
801 // The first two octets of the tag are the Tag Protocol Identifier (TPID) value of 0x8100.
802 // This is located in the same place as the EtherType/Length field in untagged frames
803 if ((hdr
[12] == 0x81) && (hdr
[13] == 0x00))
805 hdr
+= ETHERNET_VLAN_TAG_SIZE
;
807 // set hdr to the inner ip header
808 hdr
+= ETHERNET_PACKET_HEADER_SIZE
;
810 // get the length of the inner ip header
811 inner_ip_hdr_len
= get_ip_hdr_len(hdr
);
813 if ((hdr
[0] & 0xf0) == 0x40)
815 // inner ip header is ipv4
816 // if the ip header checksum of the outer header is ok than validate the ip checksum of the inner header
817 if (pkt
->l2pkt_rx_info
->flags
& LM_RX_FLAG_IP_CKSUM_IS_GOOD
)
819 // validate the checksum
820 if (!validate_cksum(calc_cksum((u16_t
*)hdr
, inner_ip_hdr_len
, 0)))
822 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_BAD
);
823 RESET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IP_CKSUM_IS_GOOD
);
826 // check if protocol field is tcp
829 // create the psuedo header
830 /* | Bit offset | 0–7 | 8–15 | 16–31 |
831 | 0 | Source address |
832 | 32 | Destination address |
833 | 64 | Zeros | Protocol | TCP length | */
835 // adding 1 byte of zeros + protocol to the sum
836 // and adding source and destination address
837 psuedo_cksum
= calc_cksum((u16_t
*)&hdr
[12], 8, 0x06);
838 // calculate the tcp length
839 mm_memcpy(&tmp
, &hdr
[2], sizeof(u16_t
));
840 tcp_length
= NTOH16(tmp
) - inner_ip_hdr_len
;
841 // the TCP length field is the length of the TCP header and data (measured in octets).
842 psuedo_cksum
+= tcp_length
;
850 else if ((hdr
[0] & 0xf0) == 0x60)
852 // inner ip header is ipv6
853 // check if next header field is tcp
857 // create the psuedo header
858 /* | Bit offset | 0–7 | 8–15 | 16–23 | 24–31 |
859 | 0 | Source address |
863 | 128 | Destination address |
868 | 288 | Zeros |Next header |*/
870 // adding 3 byte of zeros + protocol to the sum
871 // and adding source and destination address
872 psuedo_cksum
= calc_cksum((u16_t
*)&hdr
[8], 32, 0x06);
873 // calculate the tcp length
874 // in the ip header: the size of the payload in octets, including any extension headers
875 mm_memcpy(&tmp
, &hdr
[4], sizeof(u16_t
));
876 // reduce the length of the extension headers
877 tcp_length
= NTOH16(tmp
) - (inner_ip_hdr_len
- 40);
878 psuedo_cksum
+= tcp_length
;
891 // set hdr to the tcp header
892 hdr
+= inner_ip_hdr_len
;
894 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_IS_TCP_SEGMENT
);
895 // claculate the checksum of the rest of the packet
896 // validate the checksum
897 if (validate_cksum(calc_cksum((u16_t
*)hdr
, tcp_length
, psuedo_cksum
)))
899 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_TCP_CKSUM_IS_GOOD
);
900 RESET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_TCP_CKSUM_IS_BAD
);
904 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_TCP_CKSUM_IS_BAD
);
905 RESET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_TCP_CKSUM_IS_GOOD
);
909 /*******************************************************************************
911 * Here the RCQ chain is the chain coordinated with the status block, that is,
912 * the index in the status block describes the RCQ and NOT the rx_bd chain as in
913 * the case of Teton. We run on the delta between the new consumer index of the RCQ
914 * which we get from the sb and the old consumer index of the RCQ.
915 * In cases of both slow and fast path, the consumer of the RCQ is always incremented.
917 * The assumption which we must stick to all the way is: RCQ and Rx bd chain
918 * have the same size at all times! Otherwise, so help us Alan Bertkey!
921 ******************************************************************************/
923 lm_get_packets_rcvd( struct _lm_device_t
*pdev
,
924 u32_t
const chain_idx
,
926 struct _sp_cqes_info
*sp_cqes
)
928 lm_rx_chain_t
* rxq_chain
= &LM_RXQ(pdev
, chain_idx
); //get a hold of the matching Rx bd chain according to index
929 lm_rcq_chain_t
* rcq_chain
= &LM_RCQ(pdev
, chain_idx
); //get a hold of the matching RCQ chain according to index
930 lm_bd_chain_t
* rx_chain_bd
= &LM_RXQ_CHAIN_BD(pdev
, chain_idx
);
931 lm_bd_chain_t
* rx_chain_sge
= LM_RXQ_SGE_PTR_IF_VALID(pdev
, chain_idx
);
932 lm_tpa_chain_t
* tpa_chain
= &LM_TPA(pdev
, chain_idx
);
933 union eth_rx_cqe
* cqe
= NULL
;
934 lm_packet_t
* pkt
= NULL
;
936 u16_t rx_old_idx
= 0;
937 u16_t cq_new_idx
= 0;
938 u16_t cq_old_idx
= 0;
939 enum eth_rx_cqe_type cqe_type
= MAX_ETH_RX_CQE_TYPE
;
941 DbgMessage(pdev
, INFORMl2
, "lm_get_packets_rcvd inside!\n");
943 /* make sure to zeroize the sp_cqes... */
944 mm_mem_zero( sp_cqes
, sizeof(struct _sp_cqes_info
) );
946 /* Get the new consumer idx. The bd's between rcq_new_idx and rcq_old_idx
947 * are bd's containing receive packets.
949 cq_new_idx
= mm_le16_to_cpu(*(rcq_chain
->hw_con_idx_ptr
));
951 /* The consumer index of the RCQ only, may stop at the end of a page boundary. In
952 * this case, we need to advance the next to the next one.
953 * In here we do not increase the cons_bd as well! this is since we're dealing here
954 * with the new cons index and not with the actual old one for which, as we progress, we
955 * need to maintain the bd_cons as well.
957 if((cq_new_idx
& lm_bd_chain_usable_bds_per_page(&rcq_chain
->bd_chain
)) == lm_bd_chain_usable_bds_per_page(&rcq_chain
->bd_chain
))
959 cq_new_idx
+= lm_bd_chain_bds_skip_eop(&rcq_chain
->bd_chain
);
962 DbgBreakIfFastPath( rx_chain_sge
&& !lm_bd_chains_are_consistent( rx_chain_sge
, rx_chain_bd
) );
964 rx_old_idx
= lm_bd_chain_cons_idx(rx_chain_bd
);
965 cq_old_idx
= lm_bd_chain_cons_idx(&rcq_chain
->bd_chain
);
967 //there is no change in the RCQ consumer index so exit!
968 if (cq_old_idx
== cq_new_idx
)
970 DbgMessage(pdev
, INFORMl2rx
, "there is no change in the RCQ consumer index so exit!\n");
974 while(cq_old_idx
!= cq_new_idx
)
976 DbgBreakIfFastPath(S16_SUB(cq_new_idx
, cq_old_idx
) <= 0);
977 //get hold of the cqe, and find out what it's type corresponds to
978 cqe
= (union eth_rx_cqe
*)lm_bd_chain_consume_bd(&rcq_chain
->bd_chain
);
979 DbgBreakIfFastPath(cqe
== NULL
);
981 //update the cons of the RCQ and the bd_prod pointer of the RCQ as well!
982 //this holds both for slow and fast path!
983 cq_old_idx
= lm_bd_chain_cons_idx(&rcq_chain
->bd_chain
);
985 cqe_type
= GET_FLAGS_WITH_OFFSET(cqe
->ramrod_cqe
.ramrod_type
, COMMON_RAMROD_ETH_RX_CQE_TYPE
, COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT
);
986 DbgBreakIf(MAX_ETH_RX_CQE_TYPE
<= cqe_type
);
988 //the cqe is a ramrod, so do the ramrod and recycle the cqe.
989 //TODO: replace this with the #defines: 1- eth ramrod, 2- toe init ofld ramrod
992 case RX_ETH_CQE_TYPE_ETH_RAMROD
:
994 /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
995 * ignore toe completions on L2 ring - initiate offload */
996 if (cqe
->ramrod_cqe
.conn_type
!= TOE_CONNECTION_TYPE
)
998 if (ERR_IF(sp_cqes
->idx
>= MAX_NUM_SPE
))
1000 DbgBreakMsgFastPath("too many spe completed\n");
1001 /* we shouldn't get here - there is something very wrong if we did... in this case we will risk
1002 * completing the ramrods - even though we're holding a lock!!! */
1004 DbgBreakIfAll(sp_cqes
->idx
>= MAX_NUM_SPE
);
1007 mm_memcpy((void*)(&(sp_cqes
->sp_cqe
[sp_cqes
->idx
++])), (const void*)cqe
, sizeof(*cqe
));
1010 //update the prod of the RCQ - by this, we recycled the CQE.
1011 lm_bd_chain_bd_produced(&rcq_chain
->bd_chain
);
1014 //in case of ramrod, pop out the Rx bd and push it to the free descriptors list
1015 pkt
= (lm_packet_t
*) s_list_pop_head(&rxq_chain
->active_descq
);
1017 DbgBreakIfFastPath(pkt
== NULL
);
1019 s_list_push_tail( &LM_RXQ(pdev
, chain_idx
).free_descq
,
1024 case RX_ETH_CQE_TYPE_ETH_FASTPATH
:
1025 case RX_ETH_CQE_TYPE_ETH_START_AGG
: //Fall through case
1026 { //enter here in case the cqe is a fast path type (data)
1027 u16_t parse_flags
= 0;
1029 DbgMessage(pdev
, INFORMl2rx
, "lm_get_packets_rcvd- it is fast path, func=%d\n", FUNC_ID(pdev
));
1031 DbgBreakIf( (RX_ETH_CQE_TYPE_ETH_START_AGG
== cqe_type
)&&
1032 (lm_tpa_state_disable
== tpa_chain
->state
));
1034 pkt
= (lm_packet_t
*) s_list_pop_head(&rxq_chain
->active_descq
);
1035 parse_flags
= mm_le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
);
1037 DbgBreakIfFastPath( NULL
== pkt
);
1046 DbgBreakIfFastPath(SIG(pkt
) != L2PACKET_RX_SIG
);
1049 /* make sure signitures exist before and after the buffer */
1050 DbgBreakIfFastPath(SIG(pkt
->u1
.rx
.mem_virt
- pdev
->params
.rcv_buffer_offset
) != L2PACKET_RX_SIG
);
1051 DbgBreakIfFastPath(END_SIG(pkt
->u1
.rx
.mem_virt
, MAX_L2_CLI_BUFFER_SIZE(pdev
, chain_idx
)) != L2PACKET_RX_SIG
);
1052 #endif /* L2_RX_BUF_SIG */
1054 lm_bd_chain_bds_consumed(rx_chain_bd
, 1);
1057 lm_bd_chain_bds_consumed(rx_chain_sge
, 1);
1059 #if defined(_NTDDK_)
1060 //PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
1061 #pragma warning (push)
1062 #pragma warning( disable:28182 )
1064 /* Advance the rx_old_idx to the start bd_idx of the next packet. */
1065 rx_old_idx
= pkt
->u1
.rx
.next_bd_idx
;
1066 //cq_old_idx = pkt->u1.rx.next_bd_idx;
1068 CLEAR_FLAGS( pkt
->l2pkt_rx_info
->flags
);
1071 if(RX_ETH_CQE_TYPE_ETH_START_AGG
== cqe_type
)
1073 lm_recv_set_pkt_len(pdev
, pkt
, mm_le16_to_cpu(cqe
->fast_path_cqe
.len_on_bd
), chain_idx
);
1074 // total_packet_size is only known in stop_TPA
1076 DbgBreakIf(0 != cqe
->fast_path_cqe
.pkt_len_or_gro_seg_len
);
1081 cqe
->fast_path_cqe
.queue_index
);
1083 lm_tpa_start_flags_handle(pdev
,
1084 &(cqe
->fast_path_cqe
),
1090 lm_recv_set_pkt_len(pdev
, pkt
, mm_le16_to_cpu(cqe
->fast_path_cqe
.pkt_len_or_gro_seg_len
), chain_idx
);
1092 // In regular mode pkt->l2pkt_rx_info->size == pkt->l2pkt_rx_info->total_packet_size
1093 // We need total_packet_size for Dynamic HC in order not to ask a question there if we are RSC or regular flow.
1094 pkt
->l2pkt_rx_info
->total_packet_size
= pkt
->l2pkt_rx_info
->size
;
1096 /* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
1097 DbgBreakIfFastPath((pkt
->l2pkt_rx_info
->total_packet_size
< MIN_ETHERNET_PACKET_SIZE
) || (pkt
->l2pkt_rx_info
->total_packet_size
> MAX_CLI_PACKET_SIZE(pdev
, chain_idx
)));
1099 // ShayH:packet->size isn't useed anymore by windows we directly put the data on l2pkt_rx_info->size and l2pkt_rx_info->total_packet_size.
1100 // Need to ask if other UM clients use/need packet->size.
1101 pkt
->size
= pkt
->l2pkt_rx_info
->size
;
1103 if(OOO_CID(pdev
) == chain_idx
)
1105 DbgBreakIfFastPath( ETH_FP_CQE_RAW
!= (GET_FLAGS( cqe
->fast_path_cqe
.type_error_flags
, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL
) >>
1106 ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT
));
1109 /* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
1110 // TODO_OOO - check with flag
1111 ASSERT_STATIC( sizeof(pkt
->u1
.rx
.sgl_or_raw_data
.raw_data
) == sizeof(cqe
->fast_path_cqe
.sgl_or_raw_data
.raw_data
) );
1112 mm_memcpy( pkt
->u1
.rx
.sgl_or_raw_data
.raw_data
, cqe
->fast_path_cqe
.sgl_or_raw_data
.raw_data
, sizeof(pkt
->u1
.rx
.sgl_or_raw_data
.raw_data
) );
1116 DbgBreakIfFastPath( ETH_FP_CQE_REGULAR
!= (GET_FLAGS( cqe
->fast_path_cqe
.type_error_flags
, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL
)>>
1117 ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT
) ) ;
1120 lm_regular_flags_handle(pdev
,
1121 &(cqe
->fast_path_cqe
),
1125 if (GET_FLAGS(pdev
->params
.ofld_cap_to_ndis
, LM_OFFLOAD_ENCAP_PACKET
))
1127 // SW rx checksum for gre encapsulated packets
1128 encap_pkt_parsing(pdev
, pkt
);
1132 s_list_push_tail(rcvd_list
, &pkt
->link
);
1135 if GET_FLAGS(cqe
->fast_path_cqe
.status_flags
, ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
)
1137 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_VALID_HASH_VALUE
);
1138 *pkt
->u1
.rx
.hash_val_ptr
= mm_le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
1141 if(GET_FLAGS(parse_flags
,PARSING_FLAGS_INNER_VLAN_EXIST
))
1143 u16_t vlan_tag
= mm_le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
);
1145 DbgMessage(pdev
, INFORMl2
, "vlan frame recieved: %x\n",vlan_tag
);
1146 /* fw always set ETH_FAST_PATH_RX_CQE_VLAN_TAG_FLG and pass vlan tag when
1147 packet with vlan arrives but it remove the vlan from the packet only when
1148 it configured to remove vlan using params.vlan_removal_enable
1150 if ((!pdev
->params
.keep_vlan_tag
) &&
1151 ( OOO_CID(pdev
) != chain_idx
))
1153 SET_FLAGS(pkt
->l2pkt_rx_info
->flags
, LM_RX_FLAG_VALID_VLAN_TAG
);
1154 pkt
->l2pkt_rx_info
->vlan_tag
= vlan_tag
;
1155 DbgMessage(pdev
, INFORMl2rx
, "vlan removed from frame: %x\n",vlan_tag
);
1159 #if defined(_NTDDK_)
1160 #pragma warning (pop)
1163 if(GET_FLAGS(parse_flags
,PARSING_FLAGS_FRAGMENTATION_STATUS
))
1165 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev
, rx_ipv4_frag_count
);
1167 if(GET_FLAGS(parse_flags
,PARSING_FLAGS_LLC_SNAP
))
1169 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev
, rx_llc_snap_count
);
1171 if(GET_FLAGS(parse_flags
,PARSING_FLAGS_IP_OPTIONS
) &&
1172 GET_FLAGS(pkt
->l2pkt_rx_info
->flags
,LM_RX_FLAG_IS_IPV6_DATAGRAM
))
1174 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev
, rx_ipv6_ext_count
);
1178 /* We use to assert that if we got the PHY_DECODE_ERROR it was always a result of DROP_MAC_ERR, since we don't configure
1179 * DROP_MAC_ERR anymore, we don't expect this flag to ever be on.*/
1180 DbgBreakIfFastPath( GET_FLAGS(cqe
->fast_path_cqe
.type_error_flags
, ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
) );
1182 DbgBreakIfFastPath(cqe
->fast_path_cqe
.type_error_flags
&
1183 ~(ETH_FAST_PATH_RX_CQE_TYPE
|
1184 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
|
1185 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
|
1186 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
|
1187 ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL
));
1192 case RX_ETH_CQE_TYPE_ETH_STOP_AGG
:
1194 DbgBreakIf( lm_tpa_state_disable
== tpa_chain
->state
);
1196 pkt_cnt
= lm_tpa_stop(pdev
,
1198 &(cqe
->end_agg_cqe
),
1201 cqe
->end_agg_cqe
.queue_index
);
1203 //update the prod of the RCQ - by this, we recycled the CQE.
1204 lm_bd_chain_bd_produced(&rcq_chain
->bd_chain
);
1207 case MAX_ETH_RX_CQE_TYPE
:
1210 DbgBreakMsg("CQE type not supported");
1216 // TODO: Move index update to a more suitable place
1217 rx_chain_bd
->cons_idx
= rx_old_idx
;
1220 rx_chain_sge
->cons_idx
= rx_old_idx
;
1223 //notify the fw of the prod
1224 lm_rx_set_prods(pdev
, rcq_chain
->iro_prod_offset
, &rcq_chain
->bd_chain
, rx_chain_bd
, rx_chain_sge
,chain_idx
);
1226 DbgMessage(pdev
, INFORMl2rx
, "lm_get_packets_rcvd- bd con: %d bd prod: %d \n",
1227 lm_bd_chain_cons_idx(rx_chain_bd
), lm_bd_chain_prod_idx(rx_chain_bd
));
1228 DbgMessage(pdev
, INFORMl2rx
, "lm_get_packets_rcvd- cq con: %d cq prod: %d \n",
1229 lm_bd_chain_cons_idx(&rcq_chain
->bd_chain
), lm_bd_chain_prod_idx(&rcq_chain
->bd_chain
));
1231 } /* lm_get_packets_rcvd */
1233 lm_status_t
lm_complete_ramrods(
1234 struct _lm_device_t
*pdev
,
1235 struct _sp_cqes_info
*sp_cqes
)
1239 for (idx
= 0; idx
< sp_cqes
->idx
; idx
++) {
1240 lm_eth_init_command_comp(pdev
, &(sp_cqes
->sp_cqe
[idx
].ramrod_cqe
));
1243 return LM_STATUS_SUCCESS
;
1246 /* called by um whenever packets are returned by client
1247 rxq lock is taken by caller */
1249 lm_return_packet_bytes( struct _lm_device_t
*pdev
,
1251 u32_t
const returned_bytes
)
1253 lm_rx_chain_t
*rxq
= &LM_RXQ(pdev
, qidx
);
1255 rxq
->ret_bytes
+= returned_bytes
;
1257 /* aggregate updates over PCI */
1259 /* HC_RET_BYTES_TH = min(l2_hc_threshold0 / 2 , 16KB) */
1260 #define HC_RET_BYTES_TH(pdev) (((pdev)->params.hc_threshold0[SM_RX_ID] < 32768) ? ((pdev)->params.hc_threshold0[SM_RX_ID] >> 1) : 16384)
1262 /* TODO: Future: Add #updatesTH = 20 */
1264 /* time to update fw ? */
1265 if(S32_SUB(rxq
->ret_bytes
, rxq
->ret_bytes_last_fw_update
+ HC_RET_BYTES_TH(pdev
)) >= 0)
1269 The test below is to disable dynamic HC for the iSCSI chains
1272 if (qidx
< LM_MAX_RSS_CHAINS(pdev
) && IS_PFDEV(pdev
)) /* should be fine, if not, you can go for less robust case of != LM_CLI_RX_CHAIN_IDX(pdev, LM_CLI_IDX_ISCSI) */
1274 /* There are HC_USTORM_SB_NUM_INDICES (4) index values for each SB to set and we're using the corresponding U indexes from the microcode consts */
1275 LM_INTMEM_WRITE32(PFDEV(pdev
), rxq
->hc_sb_info
.iro_dhc_offset
, rxq
->ret_bytes
, BAR_CSTRORM_INTMEM
);
1276 rxq
->ret_bytes_last_fw_update
= rxq
->ret_bytes
;
1277 } else if (IS_VFDEV(pdev
)) {
1278 VF_REG_WR(pdev
, VF_BAR0_CSDM_QUEUES_OFFSET
+ rxq
->hc_sb_info
.iro_dhc_offset
, rxq
->ret_bytes
);
1279 rxq
->ret_bytes_last_fw_update
= rxq
->ret_bytes
;