4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 /* Copyright © 2003-2011 Emulex. All rights reserved. */
25 * Source file containing the Receive Path handling
31 void oce_rx_pool_free(char *arg
);
32 static void oce_rqb_dtor(oce_rq_bdesc_t
*rqbd
);
33 static int oce_rqb_ctor(oce_rq_bdesc_t
*rqbd
, struct oce_rq
*rq
,
34 size_t size
, int flags
);
36 static inline mblk_t
*oce_rx(struct oce_dev
*dev
, struct oce_rq
*rq
,
37 struct oce_nic_rx_cqe
*cqe
);
38 static inline mblk_t
*oce_rx_bcopy(struct oce_dev
*dev
,
39 struct oce_rq
*rq
, struct oce_nic_rx_cqe
*cqe
);
40 static int oce_rq_charge(struct oce_rq
*rq
, uint32_t nbufs
, boolean_t repost
);
41 static void oce_rx_insert_tag(mblk_t
*mp
, uint16_t vtag
);
42 static void oce_set_rx_oflags(mblk_t
*mp
, struct oce_nic_rx_cqe
*cqe
);
43 static inline void oce_rx_drop_pkt(struct oce_rq
*rq
,
44 struct oce_nic_rx_cqe
*cqe
);
45 static oce_rq_bdesc_t
*oce_rqb_alloc(struct oce_rq
*rq
);
46 static void oce_rqb_free(struct oce_rq
*rq
, oce_rq_bdesc_t
*rqbd
);
47 static void oce_rq_post_buffer(struct oce_rq
*rq
, int nbufs
);
49 #pragma inline(oce_rx)
50 #pragma inline(oce_rx_bcopy)
51 #pragma inline(oce_rq_charge)
52 #pragma inline(oce_rx_insert_tag)
53 #pragma inline(oce_set_rx_oflags)
54 #pragma inline(oce_rx_drop_pkt)
55 #pragma inline(oce_rqb_alloc)
56 #pragma inline(oce_rqb_free)
57 #pragma inline(oce_rq_post_buffer)
59 static ddi_dma_attr_t oce_rx_buf_attr
= {
60 DMA_ATTR_V0
, /* version number */
61 0x0000000000000000ull
, /* low address */
62 0xFFFFFFFFFFFFFFFFull
, /* high address */
63 0x00000000FFFFFFFFull
, /* dma counter max */
64 OCE_DMA_ALIGNMENT
, /* alignment */
65 0x000007FF, /* burst sizes */
66 0x00000001, /* minimum transfer size */
67 0x00000000FFFFFFFFull
, /* maximum transfer size */
68 0xFFFFFFFFFFFFFFFFull
, /* maximum segment size */
69 1, /* scatter/gather list length */
70 0x00000001, /* granularity */
71 DDI_DMA_FLAGERR
|DDI_DMA_RELAXED_ORDERING
/* DMA flags */
75 * function to create a DMA buffer pool for RQ
77 * dev - software handle to the device
78 * num_items - number of buffers in the pool
79 * item_size - size of each buffer
81 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
84 oce_rqb_cache_create(struct oce_rq
*rq
, size_t buf_size
)
91 _NOTE(ARGUNUSED(buf_size
));
92 rqbd
= rq
->rq_bdesc_array
;
93 size
= rq
->cfg
.frag_size
+ OCE_RQE_BUF_HEADROOM
;
94 for (cnt
= 0; cnt
< rq
->cfg
.nbufs
; cnt
++, rqbd
++) {
95 rq
->rqb_freelist
[cnt
] = rqbd
;
96 ret
= oce_rqb_ctor(rqbd
, rq
,
97 size
, (DDI_DMA_RDWR
|DDI_DMA_STREAMING
));
98 if (ret
!= DDI_SUCCESS
) {
102 rq
->rqb_free
= rq
->cfg
.nbufs
;
104 rq
->rqb_next_free
= 0;
105 return (DDI_SUCCESS
);
108 oce_rqb_cache_destroy(rq
);
109 return (DDI_FAILURE
);
110 } /* oce_rqb_cache_create */
113 * function to Destroy RQ DMA buffer cache
115 * rq - pointer to rq structure
120 oce_rqb_cache_destroy(struct oce_rq
*rq
)
122 oce_rq_bdesc_t
*rqbd
= NULL
;
125 rqbd
= rq
->rq_bdesc_array
;
126 for (cnt
= 0; cnt
< rq
->cfg
.nbufs
; cnt
++, rqbd
++) {
129 } /* oce_rqb_cache_destroy */
132 * RQ buffer destructor function
134 * rqbd - pointer to rq buffer descriptor
139 oce_rqb_dtor(oce_rq_bdesc_t
*rqbd
)
141 if ((rqbd
== NULL
) || (rqbd
->rq
== NULL
)) {
144 if (rqbd
->mp
!= NULL
) {
145 rqbd
->fr_rtn
.free_arg
= NULL
;
149 oce_free_dma_buffer(rqbd
->rq
->parent
, rqbd
->rqb
);
153 * RQ buffer constructor function
155 * rqbd - pointer to rq buffer descriptor
156 * rq - pointer to RQ structure
157 * size - size of the buffer
158 * flags - KM_SLEEP OR KM_NOSLEEP
160 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
163 oce_rqb_ctor(oce_rq_bdesc_t
*rqbd
, struct oce_rq
*rq
, size_t size
, int flags
)
170 dbuf
= oce_alloc_dma_buffer(dev
, size
, &oce_rx_buf_attr
, flags
);
172 return (DDI_FAILURE
);
175 /* Set the call back function parameters */
176 rqbd
->fr_rtn
.free_func
= (void (*)())oce_rx_pool_free
;
177 rqbd
->fr_rtn
.free_arg
= (caddr_t
)(void *)rqbd
;
178 rqbd
->mp
= desballoc((uchar_t
*)(dbuf
->base
),
179 dbuf
->size
, 0, &rqbd
->fr_rtn
);
180 if (rqbd
->mp
== NULL
) {
181 oce_free_dma_buffer(dev
, dbuf
);
182 return (DDI_FAILURE
);
186 rqbd
->frag_addr
.dw
.addr_lo
= ADDR_LO(dbuf
->addr
+ OCE_RQE_BUF_HEADROOM
);
187 rqbd
->frag_addr
.dw
.addr_hi
= ADDR_HI(dbuf
->addr
+ OCE_RQE_BUF_HEADROOM
);
188 rqbd
->mp
->b_rptr
= (uchar_t
*)rqbd
->rqb
->base
+ OCE_RQE_BUF_HEADROOM
;
190 return (DDI_SUCCESS
);
194 * RQ buffer allocator function
196 * rq - pointer to RQ structure
198 * return pointer to RQ buffer descriptor
200 static inline oce_rq_bdesc_t
*
201 oce_rqb_alloc(struct oce_rq
*rq
)
203 oce_rq_bdesc_t
*rqbd
;
205 free_index
= rq
->rqb_next_free
;
206 rqbd
= rq
->rqb_freelist
[free_index
];
207 rq
->rqb_freelist
[free_index
] = NULL
;
208 rq
->rqb_next_free
= GET_Q_NEXT(free_index
, 1, rq
->cfg
.nbufs
);
210 } /* oce_rqb_alloc */
213 * function to free the RQ buffer
215 * rq - pointer to RQ structure
216 * rqbd - pointer to recieve buffer descriptor
221 oce_rqb_free(struct oce_rq
*rq
, oce_rq_bdesc_t
*rqbd
)
224 mutex_enter(&rq
->rc_lock
);
225 free_index
= rq
->rqb_rc_head
;
226 rq
->rqb_freelist
[free_index
] = rqbd
;
227 rq
->rqb_rc_head
= GET_Q_NEXT(free_index
, 1, rq
->cfg
.nbufs
);
228 mutex_exit(&rq
->rc_lock
);
229 atomic_add_32(&rq
->rqb_free
, 1);
235 static void oce_rq_post_buffer(struct oce_rq
*rq
, int nbufs
)
237 pd_rxulp_db_t rxdb_reg
;
239 struct oce_dev
*dev
= rq
->parent
;
243 rxdb_reg
.bits
.qid
= rq
->rq_id
& DB_RQ_ID_MASK
;
245 for (count
= nbufs
/OCE_MAX_RQ_POSTS
; count
> 0; count
--) {
246 rxdb_reg
.bits
.num_posted
= OCE_MAX_RQ_POSTS
;
247 OCE_DB_WRITE32(dev
, PD_RXULP_DB
, rxdb_reg
.dw0
);
248 rq
->buf_avail
+= OCE_MAX_RQ_POSTS
;
249 nbufs
-= OCE_MAX_RQ_POSTS
;
252 rxdb_reg
.bits
.num_posted
= nbufs
;
253 OCE_DB_WRITE32(dev
, PD_RXULP_DB
, rxdb_reg
.dw0
);
254 rq
->buf_avail
+= nbufs
;
258 * function to charge a given rq with buffers from a pool's free list
260 * dev - software handle to the device
261 * rq - pointer to the RQ to charge
262 * nbufs - numbers of buffers to be charged
264 * return number of rqe's charges.
267 oce_rq_charge(struct oce_rq
*rq
, uint32_t nbufs
, boolean_t repost
)
269 struct oce_nic_rqe
*rqe
;
270 oce_rq_bdesc_t
*rqbd
;
271 oce_rq_bdesc_t
**shadow_rq
;
274 oce_ring_buffer_t
*ring
;
276 shadow_rq
= rq
->shadow_ring
;
278 cur_index
= ring
->cidx
;
280 for (cnt
= 0; cnt
< nbufs
; cnt
++) {
282 rqbd
= oce_rqb_alloc(rq
);
284 /* just repost the buffers from shadow ring */
285 rqbd
= shadow_rq
[cur_index
];
286 cur_index
= GET_Q_NEXT(cur_index
, 1, ring
->num_items
);
289 rqe
= RING_GET_PRODUCER_ITEM_VA(rq
->ring
,
291 rqe
->u0
.s
.frag_pa_lo
= rqbd
->frag_addr
.dw
.addr_lo
;
292 rqe
->u0
.s
.frag_pa_hi
= rqbd
->frag_addr
.dw
.addr_hi
;
293 shadow_rq
[rq
->ring
->pidx
] = rqbd
;
294 DW_SWAP(u32ptr(rqe
), sizeof (struct oce_nic_rqe
));
295 RING_PUT(rq
->ring
, 1);
299 } /* oce_rq_charge */
302 * function to release the posted buffers
304 * rq - pointer to the RQ to charge
309 oce_rq_discharge(struct oce_rq
*rq
)
311 oce_rq_bdesc_t
*rqbd
;
312 oce_rq_bdesc_t
**shadow_rq
;
314 shadow_rq
= rq
->shadow_ring
;
315 /* Free the posted buffer since RQ is destroyed already */
316 while ((int32_t)rq
->buf_avail
> 0) {
317 rqbd
= shadow_rq
[rq
->ring
->cidx
];
318 oce_rqb_free(rq
, rqbd
);
319 RING_GET(rq
->ring
, 1);
324 * function to process a single packet
326 * dev - software handle to the device
327 * rq - pointer to the RQ to charge
328 * cqe - Pointer to Completion Q entry
330 * return mblk pointer => success, NULL => error
332 static inline mblk_t
*
333 oce_rx(struct oce_dev
*dev
, struct oce_rq
*rq
, struct oce_nic_rx_cqe
*cqe
)
337 int32_t frag_cnt
= 0;
341 oce_rq_bdesc_t
*rqbd
;
343 oce_ring_buffer_t
*ring
;
346 frag_cnt
= cqe
->u0
.s
.num_fragments
& 0x7;
348 mblk_tail
= &mblk_head
;
351 cur_index
= ring
->cidx
;
353 /* Get the relevant Queue pointers */
354 pkt_len
= cqe
->u0
.s
.pkt_size
;
355 for (i
= 0; i
< frag_cnt
; i
++) {
356 rqbd
= rq
->shadow_ring
[cur_index
];
357 if (rqbd
->mp
== NULL
) {
358 rqbd
->mp
= desballoc((uchar_t
*)rqbd
->rqb
->base
,
359 rqbd
->rqb
->size
, 0, &rqbd
->fr_rtn
);
360 if (rqbd
->mp
== NULL
) {
365 (uchar_t
*)rqbd
->rqb
->base
+ OCE_RQE_BUF_HEADROOM
;
369 frag_size
= (pkt_len
> rq
->cfg
.frag_size
) ?
370 rq
->cfg
.frag_size
: pkt_len
;
371 mp
->b_wptr
= mp
->b_rptr
+ frag_size
;
372 pkt_len
-= frag_size
;
373 mp
->b_next
= mp
->b_cont
= NULL
;
374 /* Chain the message mblks */
376 mblk_tail
= &mp
->b_cont
;
377 (void) DBUF_SYNC(rqbd
->rqb
, DDI_DMA_SYNC_FORCPU
);
378 cur_index
= GET_Q_NEXT(cur_index
, 1, ring
->num_items
);
381 if (mblk_head
== NULL
) {
382 oce_log(dev
, CE_WARN
, MOD_RX
, "%s", "oce_rx:no frags?");
386 /* replace the buffer with new ones */
387 (void) oce_rq_charge(rq
, frag_cnt
, B_FALSE
);
388 atomic_add_32(&rq
->pending
, frag_cnt
);
392 static inline mblk_t
*
393 oce_rx_bcopy(struct oce_dev
*dev
, struct oce_rq
*rq
, struct oce_nic_rx_cqe
*cqe
)
398 int32_t frag_cnt
= 0;
400 oce_rq_bdesc_t
*rqbd
;
403 oce_ring_buffer_t
*ring
;
404 oce_rq_bdesc_t
**shadow_rq
;
407 _NOTE(ARGUNUSED(dev
));
409 shadow_rq
= rq
->shadow_ring
;
410 pkt_len
= cqe
->u0
.s
.pkt_size
;
411 alloc_len
= pkt_len
+ OCE_RQE_BUF_HEADROOM
;
412 frag_cnt
= cqe
->u0
.s
.num_fragments
& 0x7;
414 mp
= allocb(alloc_len
, BPRI_HI
);
419 mp
->b_rptr
+= OCE_RQE_BUF_HEADROOM
;
421 mp
->b_wptr
= mp
->b_rptr
+ pkt_len
;
424 cur_index
= ring
->cidx
;
425 for (cnt
= 0; cnt
< frag_cnt
; cnt
++) {
426 rqbd
= shadow_rq
[cur_index
];
427 frag_size
= (pkt_len
> rq
->cfg
.frag_size
) ?
428 rq
->cfg
.frag_size
: pkt_len
;
429 (void) DBUF_SYNC(rqbd
->rqb
, DDI_DMA_SYNC_FORCPU
);
430 bcopy(rqbd
->rqb
->base
+ OCE_RQE_BUF_HEADROOM
, rptr
, frag_size
);
432 pkt_len
-= frag_size
;
433 cur_index
= GET_Q_NEXT(cur_index
, 1, ring
->num_items
);
435 (void) oce_rq_charge(rq
, frag_cnt
, B_TRUE
);
440 oce_set_rx_oflags(mblk_t
*mp
, struct oce_nic_rx_cqe
*cqe
)
445 if (cqe
->u0
.s
.ip_cksum_pass
) {
446 csum_flags
|= HCK_IPV4_HDRCKSUM_OK
;
449 if (cqe
->u0
.s
.l4_cksum_pass
) {
450 csum_flags
|= (HCK_FULLCKSUM
| HCK_FULLCKSUM_OK
);
454 (void) mac_hcksum_set(mp
, 0, 0, 0, 0, csum_flags
);
459 oce_rx_insert_tag(mblk_t
*mp
, uint16_t vtag
)
461 struct ether_vlan_header
*ehp
;
463 (void) memmove(mp
->b_rptr
- VTAG_SIZE
,
464 mp
->b_rptr
, 2 * ETHERADDRL
);
465 mp
->b_rptr
-= VTAG_SIZE
;
466 ehp
= (struct ether_vlan_header
*)voidptr(mp
->b_rptr
);
467 ehp
->ether_tpid
= htons(ETHERTYPE_VLAN
);
468 ehp
->ether_tci
= LE_16(vtag
);
472 oce_rx_drop_pkt(struct oce_rq
*rq
, struct oce_nic_rx_cqe
*cqe
)
475 oce_rq_bdesc_t
*rqbd
;
476 oce_rq_bdesc_t
**shadow_rq
;
477 shadow_rq
= rq
->shadow_ring
;
478 for (frag_cnt
= 0; frag_cnt
< cqe
->u0
.s
.num_fragments
; frag_cnt
++) {
479 rqbd
= shadow_rq
[rq
->ring
->cidx
];
480 oce_rqb_free(rq
, rqbd
);
481 RING_GET(rq
->ring
, 1);
487 * function to process a Recieve queue
489 * arg - pointer to the RQ to charge
491 * return number of cqes processed
494 oce_drain_rq_cq(void *arg
)
496 struct oce_nic_rx_cqe
*cqe
;
501 uint16_t num_cqe
= 0;
507 rq
= (struct oce_rq
*)arg
;
511 mblk_tail
= &mblk_head
;
513 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_rx_cqe
);
515 (void) DBUF_SYNC(cq
->ring
->dbuf
, DDI_DMA_SYNC_FORKERNEL
);
516 /* dequeue till you reach an invalid cqe */
517 while (RQ_CQE_VALID(cqe
)) {
518 DW_SWAP(u32ptr(cqe
), sizeof (struct oce_nic_rx_cqe
));
519 frag_cnt
= cqe
->u0
.s
.num_fragments
& 0x7;
520 /* if insufficient buffers to charge then do copy */
521 if ((cqe
->u0
.s
.pkt_size
< dev
->rx_bcopy_limit
) ||
522 (oce_atomic_reserve(&rq
->rqb_free
, frag_cnt
) < 0)) {
523 mp
= oce_rx_bcopy(dev
, rq
, cqe
);
525 mp
= oce_rx(dev
, rq
, cqe
);
527 atomic_add_32(&rq
->rqb_free
, frag_cnt
);
528 mp
= oce_rx_bcopy(dev
, rq
, cqe
);
532 if (dev
->function_mode
& FLEX10_MODE
) {
533 if (cqe
->u0
.s
.vlan_tag_present
&&
535 oce_rx_insert_tag(mp
,
538 } else if (cqe
->u0
.s
.vlan_tag_present
) {
539 oce_rx_insert_tag(mp
, cqe
->u0
.s
.vlan_tag
);
541 oce_set_rx_oflags(mp
, cqe
);
544 mblk_tail
= &mp
->b_next
;
546 (void) oce_rq_charge(rq
, frag_cnt
, B_TRUE
);
548 RING_GET(rq
->ring
, frag_cnt
);
549 rq
->buf_avail
-= frag_cnt
;
552 oce_rq_post_buffer(rq
, frag_cnt
);
553 RQ_CQE_INVALIDATE(cqe
);
554 RING_GET(cq
->ring
, 1);
555 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
,
556 struct oce_nic_rx_cqe
);
558 /* process max ring size */
559 if (num_cqe
> dev
->rx_pkt_per_intr
) {
562 } /* for all valid CQEs */
565 mac_rx(dev
->mac_handle
, NULL
, mblk_head
);
567 oce_arm_cq(dev
, cq
->cq_id
, num_cqe
, B_TRUE
);
569 } /* oce_drain_rq_cq */
572 * function to free mblk databuffer to the RQ pool
574 * arg - pointer to the receive buffer descriptor
579 oce_rx_pool_free(char *arg
)
581 oce_rq_bdesc_t
*rqbd
;
584 /* During destroy, arg will be NULL */
589 /* retrieve the pointers from arg */
590 rqbd
= (oce_rq_bdesc_t
*)(void *)arg
;
592 rqbd
->mp
= desballoc((uchar_t
*)rqbd
->rqb
->base
,
593 rqbd
->rqb
->size
, 0, &rqbd
->fr_rtn
);
597 (uchar_t
*)rqbd
->rqb
->base
+ OCE_RQE_BUF_HEADROOM
;
600 oce_rqb_free(rq
, rqbd
);
601 (void) atomic_add_32(&rq
->pending
, -1);
605 * function to stop the RX
607 * rq - pointer to RQ structure
612 oce_clean_rq(struct oce_rq
*rq
)
614 uint16_t num_cqe
= 0;
617 struct oce_nic_rx_cqe
*cqe
;
622 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_rx_cqe
);
623 /* dequeue till you reach an invalid cqe */
624 for (ti
= 0; ti
< DEFAULT_DRAIN_TIME
; ti
++) {
626 while (RQ_CQE_VALID(cqe
)) {
627 DW_SWAP(u32ptr(cqe
), sizeof (struct oce_nic_rx_cqe
));
628 oce_rx_drop_pkt(rq
, cqe
);
629 atomic_add_32(&rq
->buf_avail
,
630 -(cqe
->u0
.s
.num_fragments
& 0x7));
631 oce_arm_cq(dev
, cq
->cq_id
, 1, B_TRUE
);
632 RQ_CQE_INVALIDATE(cqe
);
633 RING_GET(cq
->ring
, 1);
634 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
,
635 struct oce_nic_rx_cqe
);
643 * function to start the RX
645 * rq - pointer to RQ structure
647 * return number of rqe's charges.
650 oce_start_rq(struct oce_rq
*rq
)
654 struct oce_dev
*dev
= rq
->parent
;
655 to_charge
= rq
->cfg
.q_len
- rq
->buf_avail
;
656 to_charge
= min(to_charge
, rq
->rqb_free
);
657 atomic_add_32(&rq
->rqb_free
, -to_charge
);
658 (void) oce_rq_charge(rq
, to_charge
, B_FALSE
);
659 /* ok to do it here since Rx has not even started */
660 oce_rq_post_buffer(rq
, to_charge
);
661 oce_arm_cq(dev
, rq
->cq
->cq_id
, 0, B_TRUE
);
665 /* Checks for pending rx buffers with Stack */
667 oce_rx_pending(struct oce_dev
*dev
, struct oce_rq
*rq
, int32_t timeout
)
670 _NOTE(ARGUNUSED(dev
));
672 for (ti
= 0; ti
< timeout
; ti
++) {
673 if (rq
->pending
> 0) {
681 return (rq
->pending
);