2 * RPC Transport layer(for HNDRTE bus driver)
3 * Broadcom 802.11abg Networking Device Driver
5 * Copyright (C) 2012, Broadcom Corporation
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
13 * $Id: bcm_rpc_tp_rte.c 314318 2012-02-10 20:15:12Z $
22 #include <bcmendian.h>
26 #include <bcm_rpc_tp.h>
29 static uint8 tp_level_bmac
= RPC_TP_MSG_DNGL_ERR_VAL
; /* RPC_TP_MSG_DNGL_ERR_VAL; */
30 #define RPC_TP_ERR(args) do {if (tp_level_bmac & RPC_TP_MSG_DNGL_ERR_VAL) printf args;} while (0)
32 #define RPC_TP_DBG(args) do {if (tp_level_bmac & RPC_TP_MSG_DNGL_DBG_VAL) printf args;} while (0)
33 #define RPC_TP_AGG(args) do {if (tp_level_bmac & RPC_TP_MSG_DNGL_AGG_VAL) printf args;} while (0)
34 #define RPC_TP_DEAGG(args) do {if (tp_level_bmac & RPC_TP_MSG_DNGL_DEA_VAL) printf args;} while (0)
36 #define RPC_TP_DBG(args)
37 #define RPC_TP_AGG(args)
38 #define RPC_TP_DEAGG(args)
41 /* CLIENT dongle drvier RPC Transport implementation
42 * HOST dongle driver uses DBUS, so it's in bcm_rpc_tp_dbus.c.
43 * This can be moved to bcm_rpc_th_dngl.c
46 struct rpc_transport_info
{
50 rpc_tx_complete_fn_t tx_complete
;
52 bool tx_flowctl
; /* Global RX (WL->RPC->BUS->Host) flowcontrol state */
53 struct spktq
*tx_flowctlq
; /* Queue to store pkts when in global RX flowcontrol */
54 uint8 tx_q_flowctl_hiwm
; /* Queue high watermask */
55 uint8 tx_q_flowctl_lowm
; /* Queue low watermask */
56 uint tx_q_flowctl_highwm_cnt
; /* hit high watermark counter */
57 uint8 tx_q_flowctl_segcnt
; /* Queue counter all segments(no. of LBUF) */
63 int buf_cnt_inuse
; /* outstanding buf(alloc, not freed) */
64 uint tx_cnt
; /* send successfully */
65 uint txerr_cnt
; /* send failed */
69 uint tx_flowctl_cnt
; /* tx flow control transition times */
70 bool tx_flowcontrolled
; /* tx flow control active */
72 rpc_txflowctl_cb_t txflowctl_cb
; /* rpc tx flow control to control wlc_dpc() */
75 mbool tp_dngl_aggregation
; /* aggregate into transport buffers */
76 rpc_buf_t
*tp_dngl_agg_p
; /* current aggregate chain header */
77 rpc_buf_t
*tp_dngl_agg_ptail
; /* current aggregate chain tail */
78 uint tp_dngl_agg_sframes
; /* current aggregate packet subframes */
79 uint8 tp_dngl_agg_sframes_limit
; /* agg sframe limit */
80 uint tp_dngl_agg_bytes
; /* current aggregate packet total length */
81 uint16 tp_dngl_agg_bytes_max
; /* agg byte max */
82 uint tp_dngl_agg_txpending
; /* TBD, for agg watermark flow control */
83 uint tp_dngl_agg_cnt_chain
; /* total aggregated pkt */
84 uint tp_dngl_agg_cnt_sf
; /* total aggregated subframes */
85 uint tp_dngl_agg_cnt_bytes
; /* total aggregated bytes */
86 uint tp_dngl_agg_cnt_noagg
; /* no. pkts not aggregated */
87 uint tp_dngl_agg_cnt_pass
; /* no. pkt bypass agg */
89 uint tp_dngl_agg_lazy
; /* lazy to release agg on tp_dngl_aggregation clear */
91 uint tp_dngl_deagg_cnt_chain
; /* multifrag pkt */
92 uint tp_dngl_deagg_cnt_sf
; /* no. of frag inside multifrag */
93 uint tp_dngl_deagg_cnt_clone
; /* no. of clone */
94 uint tp_dngl_deagg_cnt_bytes
; /* total deagg bytes */
95 uint tp_dngl_deagg_cnt_badfmt
; /* bad format */
96 uint tp_dngl_deagg_cnt_badsflen
; /* bad sf len */
97 uint tp_dngl_deagg_cnt_pass
; /* passthrough, single frag */
98 int has_2nd_bulk_in_ep
;
101 #define BCM_RPC_TP_Q_MAX 1024 /* Rx flow control queue size - Set it big and we don't
102 * expect it to get full. If the memory gets low, we
103 * just stop processing wlc_dpc
105 #ifndef BCM_RPC_TP_FLOWCTL_QWM_HIGH
106 #define BCM_RPC_TP_FLOWCTL_QWM_HIGH 24 /* high watermark for tp queue */
108 #define BCM_RPC_TP_FLOWCTL_QWM_LOW 4 /* low watermark for tp queue */
110 /* no. of aggregated subframes per second to activate/deactivate lazy agg(delay release)
111 * For medium traffic, the sf/s is > 5k+
113 #define BCM_RPC_TP_AGG_LAZY_WM_HI 50 /* activate lazy agg if higher than this */
114 #define BCM_RPC_TP_AGG_LAZY_WM_LO 20 /* deactivate lazy agg if lower than this */
116 /* this WAR is similar to the preaggregated one in wlc_high_stubs.c
117 * #define USB_TOTAL_LEN_BAD 516
118 * #define USB_TOTAL_LEN_BAD_PAD 8
120 #define BCM_RPC_TP_DNGL_TOTLEN_BAD 516
121 #define BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD 8
123 #define BCM_RPC_TP_DNGL_BULKEP_MPS 512
124 #define BCM_RPC_TP_DNGL_CTRLEP_MPS 64
125 #define BCM_RPC_TP_DNGL_ZLP_PAD 4 /* pad bytes */
127 static void bcm_rpc_tp_tx_encap(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
);
128 static int bcm_rpc_tp_buf_send_internal(rpc_tp_info_t
* rpc_th
, rpc_buf_t
*b
, uint32 ep_idx
);
129 static void bcm_rpc_tp_buf_send_enq(rpc_tp_info_t
* rpc_th
, rpc_buf_t
*b
);
131 static void bcm_rpc_tp_dngl_agg_initstate(rpc_tp_info_t
* rpcb
);
132 static int bcm_rpc_tp_dngl_agg(rpc_tp_info_t
*rpcb
, rpc_buf_t
*b
);
133 static void bcm_rpc_tp_dngl_agg_append(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
);
134 static int bcm_rpc_tp_dngl_agg_release(rpc_tp_info_t
* rpcb
);
135 static void bcm_rpc_tp_dngl_agg_flush(rpc_tp_info_t
* rpcb
);
136 static void bcm_rpc_tp_buf_pad(rpc_tp_info_t
* rpcb
, rpc_buf_t
*bb
, uint padbytes
);
139 BCMATTACHFN(bcm_rpc_tp_attach
)(osl_t
* osh
, void *bus
)
141 rpc_tp_info_t
*rpc_th
;
142 hndrte_dev_t
*ctx
= (hndrte_dev_t
*)bus
;
144 rpc_th
= (rpc_tp_info_t
*)MALLOC(osh
, sizeof(rpc_tp_info_t
));
145 if (rpc_th
== NULL
) {
146 RPC_TP_ERR(("%s: rpc_tp_info_t malloc failed\n", __FUNCTION__
));
150 memset(rpc_th
, 0, sizeof(rpc_tp_info_t
));
155 /* Init for flow control */
156 rpc_th
->tx_flowctl
= FALSE
;
157 rpc_th
->tx_q_flowctl_segcnt
= 0;
158 rpc_th
->tx_flowctlq
= (struct spktq
*)MALLOC(osh
, sizeof(struct spktq
));
159 if (rpc_th
->tx_flowctlq
== NULL
) {
160 RPC_TP_ERR(("%s: txflowctlq malloc failed\n", __FUNCTION__
));
161 MFREE(rpc_th
->osh
, rpc_th
, sizeof(rpc_tp_info_t
));
164 pktqinit(rpc_th
->tx_flowctlq
, BCM_RPC_TP_Q_MAX
);
165 rpc_th
->tx_q_flowctl_hiwm
= BCM_RPC_TP_FLOWCTL_QWM_HIGH
;
166 rpc_th
->tx_q_flowctl_lowm
= BCM_RPC_TP_FLOWCTL_QWM_LOW
;
168 rpc_th
->tp_dngl_agg_lazy
= 0;
169 rpc_th
->tp_dngl_agg_sframes_limit
= BCM_RPC_TP_DNGL_AGG_MAX_SFRAME
;
170 rpc_th
->tp_dngl_agg_bytes_max
= BCM_RPC_TP_DNGL_AGG_MAX_BYTE
;
171 #ifdef BCMUSBDEV_EP_FOR_RPCRETURN
172 rpc_th
->has_2nd_bulk_in_ep
= 1;
173 #endif /* BCMUSBDEV_EP_FOR_RPCRETURN */
178 BCMATTACHFN(bcm_rpc_tp_detach
)(rpc_tp_info_t
* rpc_th
)
181 if (rpc_th
->tx_flowctlq
)
182 MFREE(rpc_th
->osh
, rpc_th
->tx_flowctlq
, sizeof(struct spktq
));
184 MFREE(rpc_th
->osh
, rpc_th
, sizeof(rpc_tp_info_t
));
188 bcm_rpc_tp_watchdog(rpc_tp_info_t
*rpcb
)
193 /* (1) close agg periodically to avoid stale aggregation */
194 bcm_rpc_tp_dngl_agg_release(rpcb
);
197 delta
= rpcb
->tp_dngl_agg_cnt_sf
- old
;
198 old
= rpcb
->tp_dngl_agg_cnt_sf
;
200 RPC_TP_DBG(("agg delta %d tp flowcontrol queue pending (qlen %d subframe %d)\n", delta
,
201 pktq_len(rpcb
->tx_flowctlq
), rpcb
->tx_q_flowctl_segcnt
));
203 if (rpcb
->tp_dngl_agg_lazy
)
204 rpcb
->tp_dngl_agg_lazy
= (delta
< BCM_RPC_TP_AGG_LAZY_WM_LO
) ? 0 : 1;
206 rpcb
->tp_dngl_agg_lazy
= (delta
> BCM_RPC_TP_AGG_LAZY_WM_HI
) ? 1 : 0;
210 bcm_rpc_tp_rx_from_dnglbus(rpc_tp_info_t
*rpc_th
, struct lbuf
*lb
)
213 void *rpc_p
, *rpc_prev
;
214 uint pktlen
, tp_len
, iter
= 0;
217 uint dbg_data
[16], i
; /* must fit host agg limit BCM_RPC_TP_HOST_AGG_MAX_SFRAME+1 */
223 if (rpc_th
->rx_pkt
== NULL
) {
224 RPC_TP_ERR(("%s: no rpc rx fn, dropping\n", __FUNCTION__
));
225 rpc_th
->rxdrop_cnt
++;
229 orig_p
= PKTFRMNATIVE(rpc_th
->osh
, lb
);
234 /* take ownership of the dnglbus packet chain
235 * since it will be freed by bcm_rpc_tp_buf_free()
237 rpc_th
->buf_cnt_inuse
+= pktsegcnt(rpc_th
->osh
, orig_p
);
239 dbg_data
[0] = pktsegcnt(rpc_th
->osh
, orig_p
);
241 pktlen
= PKTLEN(osh
, orig_p
);
245 /* while we have more data in the TP frame's packet chain,
246 * create a packet chain(could be cloned) for the next RPC frame
247 * then give it away to high layer for process(buffer not freed)
252 /* read TP_HDR(len of rpc frame) and pull the data pointer past the length word */
253 if (pktlen
>= BCM_RPC_TP_ENCAP_LEN
) {
254 ASSERT(((uint
)PKTDATA(osh
, p
) & 0x3) == 0); /* ensure aligned word read */
255 tp_len
= ltoh32(*(uint32
*)PKTDATA(osh
, p
));
256 PKTPULL(osh
, p
, BCM_RPC_TP_ENCAP_LEN
);
257 pktlen
-= BCM_RPC_TP_ENCAP_LEN
;
259 /* error case: less data than the encapsulation size
260 * treat as an empty tp buffer, at end of current buffer
265 rpc_th
->tp_dngl_deagg_cnt_badsflen
++; /* bad sf len */
268 /* if TP header finished a buffer(rpc header in next chained buffer), open next */
270 void *next_p
= PKTNEXT(osh
, p
);
271 PKTSETNEXT(osh
, p
, NULL
);
272 rpc_th
->buf_cnt_inuse
--;
273 PKTFREE(osh
, p
, FALSE
);
276 pktlen
= PKTLEN(osh
, p
);
279 dbg_data
[iter
] = tp_len
;
281 if (tp_len
< pktlen
|| dbg_agg
) {
283 RPC_TP_DEAGG(("DEAGG: [%d] p %p data %p pktlen %d tp_len %d\n",
284 iter
, p
, PKTDATA(osh
, p
), pktlen
, tp_len
));
285 rpc_th
->tp_dngl_deagg_cnt_sf
++;
286 rpc_th
->tp_dngl_deagg_cnt_bytes
+= tp_len
;
289 /* empty TP buffer (special case: use tp_len to pad for some USB pktsize bugs) */
291 rpc_th
->tp_dngl_deagg_cnt_pass
++;
293 } else if (tp_len
> 10000 ) { /* something is wrong */
294 /* print out msgs according to value of p -- in case it is NULL */
296 RPC_TP_ERR(("DEAGG: iter %d, p(%p data %p pktlen %d)\n",
297 iter
, p
, PKTDATA(osh
, p
), PKTLEN(osh
, p
)));
299 RPC_TP_ERR(("DEAGG: iter %d, p is NULL", iter
));
303 /* ========= For this TP subframe, find the end, build a chain, sendup ========= */
305 /* RPC frame packet chain starts with this packet */
310 /* find the last frag in this rpc chain */
311 while ((tp_len
>= pktlen
) && p
) {
313 RPC_TP_DEAGG(("DEAGG: tp_len %d consumes p(%p pktlen %d)\n", tp_len
,
320 pktlen
= PKTLEN(osh
, p
);
324 totlen
= pkttotlen(osh
, rpc_p
);
325 seg
= pktsegcnt(rpc_th
->osh
, rpc_p
);
327 RPC_TP_ERR(("DEAGG, toss[%d], orig_p %p segcnt %d",
328 iter
, orig_p
, dbg_data
[0]));
329 RPC_TP_ERR(("DEAGG,rpc_p %p totlen %d pktl %d tp_len %d\n",
330 rpc_p
, totlen
, pktlen
, tp_len
));
331 for (i
= 1; i
<= iter
; i
++)
332 RPC_TP_ERR(("tplen[%d] = %d ", i
, dbg_data
[i
]));
336 RPC_TP_ERR(("this seg len %d\n", PKTLEN(osh
, p
)));
340 rpc_th
->buf_cnt_inuse
-= seg
;
341 PKTFREE(osh
, rpc_p
, FALSE
);
342 rpc_th
->tp_dngl_deagg_cnt_badfmt
++;
344 /* big hammer to recover USB
345 * extern void dngl_reboot(void); dngl_reboot();
354 /* fix up the last frag */
356 /* if the whole RPC buffer chain ended at the end of the prev TP buffer,
357 * end the RPC buffer chain. we are done
360 RPC_TP_DEAGG(("DEAGG: END rpc chain p %p len %d\n\n", rpc_prev
,
363 PKTSETNEXT(osh
, rpc_prev
, NULL
);
365 rpc_th
->tp_dngl_deagg_cnt_chain
++;
366 RPC_TP_DEAGG(("this frag %d totlen %d\n", pktlen
,
367 pkttotlen(osh
, orig_p
)));
371 /* if pktlen has more bytes than tp_len, another tp frame must follow
372 * create a clone of the sub-range of the current TP buffer covered
373 * by the RPC buffer, attach to the end of the RPC buffer chain
374 * (cut off the original chain link)
375 * continue chain looping(p != NULL)
380 RPC_TP_DEAGG(("DEAGG: cloning %d bytes out of p(%p data %p) len %d\n",
381 tp_len
, p
, PKTDATA(osh
, p
), pktlen
));
383 new_p
= osl_pktclone(osh
, p
, 0, tp_len
);
384 rpc_th
->buf_cnt_inuse
++;
385 rpc_th
->tp_dngl_deagg_cnt_clone
++;
387 RPC_TP_DEAGG(("DEAGG: after clone, newp(%p data %p pktlen %d)\n",
388 new_p
, PKTDATA(osh
, new_p
), PKTLEN(osh
, new_p
)));
391 RPC_TP_DEAGG(("DEAGG: chaining: %p->%p(clone)\n", rpc_prev
,
393 PKTSETNEXT(osh
, rpc_prev
, new_p
);
395 RPC_TP_DEAGG(("DEAGG: clone %p is a complete rpc pkt\n", new_p
));
399 PKTPULL(osh
, p
, tp_len
);
401 RPC_TP_DEAGG(("DEAGG: remainder packet p %p data %p pktlen %d\n",
402 p
, PKTDATA(osh
, p
), PKTLEN(osh
, p
)));
406 (rpc_th
->rx_pkt
)(rpc_th
->rx_context
, rpc_p
);
414 bcm_rpc_tp_register_cb(rpc_tp_info_t
* rpc_th
,
415 rpc_tx_complete_fn_t txcmplt
, void* tx_context
,
416 rpc_rx_fn_t rxpkt
, void* rx_context
, rpc_osl_t
*rpc_osh
)
418 rpc_th
->tx_complete
= txcmplt
;
419 rpc_th
->tx_context
= tx_context
;
420 rpc_th
->rx_pkt
= rxpkt
;
421 rpc_th
->rx_context
= rx_context
;
425 bcm_rpc_tp_deregister_cb(rpc_tp_info_t
* rpcb
)
427 rpcb
->tx_complete
= NULL
;
428 rpcb
->tx_context
= NULL
;
430 rpcb
->rx_context
= NULL
;
434 /* This is called by dngl_txstop as txflowcontrol (stopping tx from dongle to host) of bcmwl,
435 * but is called rxflowcontrol in wl driver (pausing rx of wl driver). This is for low driver only.
438 bcm_rpc_tp_txflowctl(rpc_tp_info_t
*rpc_th
, bool state
, int prio
)
444 if (rpc_th
->tx_flowctl
== state
)
447 RPC_TP_AGG(("tp_txflowctl %d\n", state
));
449 rpc_th
->tx_flowctl
= state
;
450 rpc_th
->tx_flowctl_cnt
++;
451 rpc_th
->tx_flowcontrolled
= state
;
453 /* when get out of flowcontrol, send all queued packets in a loop
454 * but need to check tx_flowctl every iteration and stop if we got flowcontrolled again
456 while (!rpc_th
->tx_flowctl
&& !pktq_empty(rpc_th
->tx_flowctlq
)) {
458 b
= pktdeq(rpc_th
->tx_flowctlq
);
459 if (b
== NULL
) break;
461 rpc_th
->tx_q_flowctl_segcnt
-= pktsegcnt(rpc_th
->osh
, b
);
463 bcm_rpc_tp_buf_send_internal(rpc_th
, b
, USBDEV_BULK_IN_EP1
);
466 /* bcm_rpc_tp_agg_set(rpc_th, BCM_RPC_TP_DNGL_AGG_FLOWCTL, state); */
468 /* if lowm is reached, release wldriver
469 * TODO, count more(average 3?) if agg is ON
471 if (rpc_th
->tx_q_flowctl_segcnt
< rpc_th
->tx_q_flowctl_lowm
) {
472 RPC_TP_AGG(("bcm_rpc_tp_txflowctl, wm hit low!\n"));
473 rpc_th
->txflowctl_cb(rpc_th
->txflowctl_ctx
, OFF
);
480 bcm_rpc_tp_down(rpc_tp_info_t
* rpc_th
)
482 bcm_rpc_tp_dngl_agg_flush(rpc_th
);
486 bcm_rpc_tp_tx_encap(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
491 rpc_len
= PKTLEN(rpcb
->osh
, b
);
492 tp_lenp
= (uint32
*)PKTPUSH(rpcb
->osh
, b
, BCM_RPC_TP_ENCAP_LEN
);
497 bcm_rpc_tp_send_callreturn(rpc_tp_info_t
* rpc_th
, rpc_buf_t
*b
)
501 hndrte_dev_t
*chained
= rpc_th
->ctx
->chained
;
505 /* Add the TP encapsulation */
506 bcm_rpc_tp_tx_encap(rpc_th
, b
);
508 /* Pad if pkt size is a multiple of MPS */
509 pktlen
= bcm_rpc_buf_totlen_get(rpc_th
, b
);
510 if (pktlen
% BCM_RPC_TP_DNGL_CTRLEP_MPS
== 0) {
511 RPC_TP_AGG(("%s, tp pkt is multiple of %d bytes, padding %d bytes\n",
512 __FUNCTION__
, BCM_RPC_TP_DNGL_CTRLEP_MPS
, BCM_RPC_TP_DNGL_ZLP_PAD
));
514 bcm_rpc_tp_buf_pad(rpc_th
, b
, BCM_RPC_TP_DNGL_ZLP_PAD
);
517 lb
= PKTTONATIVE(rpc_th
->osh
, b
);
519 if (rpc_th
->has_2nd_bulk_in_ep
) {
520 err
= chained
->funcs
->xmit2(rpc_th
->ctx
, chained
, lb
, USBDEV_BULK_IN_EP2
);
522 err
= chained
->funcs
->xmit_ctl(rpc_th
->ctx
, chained
, lb
);
524 /* send through control endpoint */
526 RPC_TP_ERR(("%s: xmit failed; free pkt %p\n", __FUNCTION__
, lb
));
532 /* give pkt ownership to usb driver, decrement the counter */
533 rpc_th
->buf_cnt_inuse
-= pktsegcnt(rpc_th
->osh
, b
);
541 bcm_rpc_tp_buf_send_enq(rpc_tp_info_t
* rpc_th
, rpc_buf_t
*b
)
543 pktenq(rpc_th
->tx_flowctlq
, (void*)b
);
544 rpc_th
->tx_q_flowctl_segcnt
+= pktsegcnt(rpc_th
->osh
, b
);
546 /* if hiwm is reached, throttle wldriver
547 * TODO, count more(average 3?) if agg is ON
549 if (rpc_th
->tx_q_flowctl_segcnt
> rpc_th
->tx_q_flowctl_hiwm
) {
550 rpc_th
->tx_q_flowctl_highwm_cnt
++;
552 RPC_TP_ERR(("bcm_rpc_tp_buf_send_enq, wm hit high!\n"));
554 rpc_th
->txflowctl_cb(rpc_th
->txflowctl_ctx
, ON
);
557 /* If tx_flowctlq gets full, set a bigger BCM_RPC_TP_Q_MAX */
558 ASSERT(!pktq_full(rpc_th
->tx_flowctlq
));
562 bcm_rpc_tp_buf_send(rpc_tp_info_t
* rpc_th
, rpc_buf_t
*b
)
566 /* Add the TP encapsulation */
567 bcm_rpc_tp_tx_encap(rpc_th
, b
);
569 /* if agg successful, done; otherwise, send it */
570 if (rpc_th
->tp_dngl_aggregation
) {
571 err
= bcm_rpc_tp_dngl_agg(rpc_th
, b
);
574 rpc_th
->tp_dngl_agg_cnt_pass
++;
576 if (rpc_th
->tx_flowctl
) {
577 bcm_rpc_tp_buf_send_enq(rpc_th
, b
);
580 err
= bcm_rpc_tp_buf_send_internal(rpc_th
, b
, USBDEV_BULK_IN_EP1
);
587 bcm_rpc_tp_buf_pad(rpc_tp_info_t
* rpcb
, rpc_buf_t
*bb
, uint padbytes
)
589 uint32
*tp_lenp
= (uint32
*)bcm_rpc_buf_data(rpcb
, bb
);
590 uint32 tp_len
= ltoh32(*tp_lenp
);
591 uint32 pktlen
= bcm_rpc_buf_len_get(rpcb
, bb
);
592 ASSERT(tp_len
+ BCM_RPC_TP_ENCAP_LEN
== pktlen
);
596 *tp_lenp
= htol32(tp_len
);
597 bcm_rpc_buf_len_set(rpcb
, bb
, pktlen
);
601 bcm_rpc_tp_buf_send_internal(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
, uint32 tx_ep_index
)
604 struct lbuf
*lb
= (struct lbuf
*)b
;
605 hndrte_dev_t
*chained
= rpcb
->ctx
->chained
;
611 pktlen
= bcm_rpc_buf_totlen_get(rpcb
, b
);
613 if (pktlen
== BCM_RPC_TP_DNGL_TOTLEN_BAD
) {
614 RPC_TP_AGG(("%s, pkt is %d bytes, padding %d bytes\n", __FUNCTION__
,
615 BCM_RPC_TP_DNGL_TOTLEN_BAD
, BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD
));
617 bcm_rpc_tp_buf_pad(rpcb
, b
, BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD
);
619 } else if (pktlen
% BCM_RPC_TP_DNGL_BULKEP_MPS
== 0) {
620 RPC_TP_AGG(("%s, tp pkt is multiple of %d bytes, padding %d bytes\n",
622 BCM_RPC_TP_DNGL_BULKEP_MPS
, BCM_RPC_TP_DNGL_ZLP_PAD
));
624 bcm_rpc_tp_buf_pad(rpcb
, b
, BCM_RPC_TP_DNGL_ZLP_PAD
);
628 lb
= PKTTONATIVE(rpcb
->osh
, b
);
629 /* send through data endpoint */
630 if ((err
= chained
->funcs
->xmit(rpcb
->ctx
, chained
, lb
)) != 0) {
631 RPC_TP_ERR(("%s: xmit failed; free pkt %p\n", __FUNCTION__
, lb
));
637 /* give pkt ownership to usb driver, decrement the counter */
638 rpcb
->buf_cnt_inuse
-= pktsegcnt(rpcb
->osh
, b
);
645 bcm_rpc_tp_dump(rpc_tp_info_t
*rpcb
)
647 printf("\nRPC_TP_RTE:\n");
648 printf("bufalloc %d(buf_cnt_inuse %d) tx %d(txerr %d) rx %d(rxdrop %d)\n",
649 rpcb
->bufalloc
, rpcb
->buf_cnt_inuse
, rpcb
->tx_cnt
, rpcb
->txerr_cnt
,
650 rpcb
->rx_cnt
, rpcb
->rxdrop_cnt
);
652 printf("tx_flowctrl_cnt %d tx_flowctrl_status %d hwm %d lwm %d hit_hiwm #%d segcnt %d\n",
653 rpcb
->tx_flowctl_cnt
, rpcb
->tx_flowcontrolled
,
654 rpcb
->tx_q_flowctl_hiwm
, rpcb
->tx_q_flowctl_lowm
, rpcb
->tx_q_flowctl_highwm_cnt
,
655 rpcb
->tx_q_flowctl_segcnt
);
657 printf("tp_dngl_agg sf_limit %d bytes_limit %d aggregation 0x%x lazy %d\n",
658 rpcb
->tp_dngl_agg_sframes_limit
, rpcb
->tp_dngl_agg_bytes_max
,
659 rpcb
->tp_dngl_aggregation
, rpcb
->tp_dngl_agg_lazy
);
660 printf("agg counter: chain %u, sf %u, bytes %u byte-per-chain %u, bypass %u noagg %u\n",
661 rpcb
->tp_dngl_agg_cnt_chain
, rpcb
->tp_dngl_agg_cnt_sf
,
662 rpcb
->tp_dngl_agg_cnt_bytes
,
663 (rpcb
->tp_dngl_agg_cnt_chain
== 0) ?
664 0 : CEIL(rpcb
->tp_dngl_agg_cnt_bytes
, (rpcb
->tp_dngl_agg_cnt_chain
)),
665 rpcb
->tp_dngl_agg_cnt_pass
, rpcb
->tp_dngl_agg_cnt_noagg
);
669 printf("tp_dngl_deagg chain %u sf %u bytes %u clone %u badsflen %u badfmt %u\n",
670 rpcb
->tp_dngl_deagg_cnt_chain
, rpcb
->tp_dngl_deagg_cnt_sf
,
671 rpcb
->tp_dngl_deagg_cnt_bytes
, rpcb
->tp_dngl_deagg_cnt_clone
,
672 rpcb
->tp_dngl_deagg_cnt_badsflen
, rpcb
->tp_dngl_deagg_cnt_badfmt
);
673 printf("tp_dngl_deagg byte-per-chain %u passthrough %u\n",
674 (rpcb
->tp_dngl_deagg_cnt_chain
== 0) ?
675 0 : rpcb
->tp_dngl_deagg_cnt_bytes
/rpcb
->tp_dngl_deagg_cnt_chain
,
676 rpcb
->tp_dngl_deagg_cnt_pass
);
679 /* Buffer manipulation, LEN + RPC_header + body */
681 bcm_rpc_buf_tp_header_len(rpc_tp_info_t
* rpc_th
)
683 return BCM_RPC_TP_ENCAP_LEN
;
687 bcm_rpc_tp_buf_alloc(rpc_tp_info_t
* rpc_th
, int len
)
690 size_t tp_len
= len
+ BCM_RPC_TP_ENCAP_LEN
;
694 padlen
= MAX(BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD
, BCM_RPC_TP_DNGL_ZLP_PAD
);
695 padlen
= ROUNDUP(padlen
, sizeof(int));
698 #endif /* BCMUSBDEV */
700 /* get larger packet with padding which might be required due to USB ZLP */
701 b
= (rpc_buf_t
*)PKTGET(rpc_th
->osh
, tp_len
+ padlen
, FALSE
);
705 rpc_th
->buf_cnt_inuse
++;
706 /* set the len back to tp_len */
707 bcm_rpc_buf_len_set(rpc_th
, b
, tp_len
);
708 PKTPULL(rpc_th
->osh
, b
, BCM_RPC_TP_ENCAP_LEN
);
715 bcm_rpc_tp_buf_free(rpc_tp_info_t
* rpc_th
, rpc_buf_t
*b
)
719 rpc_th
->buf_cnt_inuse
-= pktsegcnt(rpc_th
->osh
, b
);
720 PKTFREE(rpc_th
->osh
, b
, FALSE
);
724 bcm_rpc_buf_len_get(rpc_tp_info_t
* rpc_th
, rpc_buf_t
* b
)
726 return PKTLEN(rpc_th
->osh
, b
);
730 bcm_rpc_buf_totlen_get(rpc_tp_info_t
* rpc_th
, rpc_buf_t
* b
)
733 for (; b
; b
= (rpc_buf_t
*) PKTNEXT(rpc_th
->osh
, b
)) {
734 totlen
+= PKTLEN(rpc_th
->osh
, b
);
740 bcm_rpc_buf_len_set(rpc_tp_info_t
* rpc_th
, rpc_buf_t
* b
, uint len
)
742 PKTSETLEN(rpc_th
->osh
, b
, len
);
747 bcm_rpc_buf_data(rpc_tp_info_t
* rpc_th
, rpc_buf_t
* b
)
749 return PKTDATA(rpc_th
->osh
, b
);
753 bcm_rpc_buf_push(rpc_tp_info_t
* rpc_th
, rpc_buf_t
* b
, uint bytes
)
755 return PKTPUSH(rpc_th
->osh
, b
, bytes
);
759 bcm_rpc_buf_pull(rpc_tp_info_t
* rpc_th
, rpc_buf_t
* b
, uint bytes
)
761 return PKTPULL(rpc_th
->osh
, b
, bytes
);
765 bcm_rpc_buf_next_get(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
)
767 return (rpc_buf_t
*)PKTLINK(b
);
771 bcm_rpc_buf_next_set(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
, rpc_buf_t
*nextb
)
773 PKTSETLINK(b
, nextb
);
777 bcm_rpc_tp_buf_cnt_adjust(rpc_tp_info_t
* rpcb
, int adjust
)
779 rpcb
->buf_cnt_inuse
+= adjust
;
783 bcm_rpc_tp_txflowctlcb_init(rpc_tp_info_t
*rpc_th
, void *ctx
, rpc_txflowctl_cb_t cb
)
785 rpc_th
->txflowctl_cb
= cb
;
786 rpc_th
->txflowctl_ctx
= ctx
;
790 bcm_rpc_tp_txflowctlcb_deinit(rpc_tp_info_t
*rpc_th
)
792 rpc_th
->txflowctl_cb
= NULL
;
793 rpc_th
->txflowctl_ctx
= NULL
;
797 bcm_rpc_tp_txq_wm_set(rpc_tp_info_t
*rpc_th
, uint8 hiwm
, uint8 lowm
)
799 rpc_th
->tx_q_flowctl_hiwm
= hiwm
;
800 rpc_th
->tx_q_flowctl_lowm
= lowm
;
804 bcm_rpc_tp_txq_wm_get(rpc_tp_info_t
*rpc_th
, uint8
*hiwm
, uint8
*lowm
)
806 *hiwm
= rpc_th
->tx_q_flowctl_hiwm
;
807 *lowm
= rpc_th
->tx_q_flowctl_lowm
;
811 bcm_rpc_tp_agg_limit_set(rpc_tp_info_t
*rpc_th
, uint8 sf
, uint16 bytes
)
813 rpc_th
->tp_dngl_agg_sframes_limit
= sf
;
814 rpc_th
->tp_dngl_agg_bytes_max
= bytes
;
818 bcm_rpc_tp_agg_limit_get(rpc_tp_info_t
*rpc_th
, uint8
*sf
, uint16
*bytes
)
820 *sf
= rpc_th
->tp_dngl_agg_sframes_limit
;
821 *bytes
= rpc_th
->tp_dngl_agg_bytes_max
;
824 /* TP aggregation: set, init, agg, append, close, flush */
826 bcm_rpc_tp_dngl_agg_initstate(rpc_tp_info_t
* rpcb
)
828 rpcb
->tp_dngl_agg_p
= NULL
;
829 rpcb
->tp_dngl_agg_ptail
= NULL
;
830 rpcb
->tp_dngl_agg_sframes
= 0;
831 rpcb
->tp_dngl_agg_bytes
= 0;
832 rpcb
->tp_dngl_agg_txpending
= 0;
836 bcm_rpc_tp_dngl_agg(rpc_tp_info_t
*rpcb
, rpc_buf_t
*b
)
842 ASSERT(rpcb
->tp_dngl_aggregation
);
844 pktlen
= bcm_rpc_buf_len_get(rpcb
, b
);
846 totlen
= pktlen
+ rpcb
->tp_dngl_agg_bytes
;
848 if ((totlen
> rpcb
->tp_dngl_agg_bytes_max
) ||
849 (rpcb
->tp_dngl_agg_sframes
+ 1 > rpcb
->tp_dngl_agg_sframes_limit
)) {
851 RPC_TP_AGG(("bcm_rpc_tp_dngl_agg: terminte TP agg for tpbyte %d or txframe %d\n",
852 rpcb
->tp_dngl_agg_bytes_max
, rpcb
->tp_dngl_agg_sframes_limit
));
854 /* release current agg, continue with new agg */
855 err
= bcm_rpc_tp_dngl_agg_release(rpcb
);
860 bcm_rpc_tp_dngl_agg_append(rpcb
, b
);
862 /* if the new frag is also already over the agg limit, release it */
863 if (pktlen
>= rpcb
->tp_dngl_agg_bytes_max
) {
865 new_err
= bcm_rpc_tp_dngl_agg_release(rpcb
);
874 * tp_dngl_agg_p points to the header lbuf, tp_dngl_agg_ptail points to the tail lbuf
876 * The TP agg format typically will be below
877 * | TP header(len) | subframe1 rpc_header | subframe1 data |
878 * | TP header(len) | subframe2 rpc_header | subframe2 data |
880 * | TP header(len) | subframeN rpc_header | subframeN data |
884 bcm_rpc_tp_dngl_agg_append(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
886 uint tp_len
= bcm_rpc_buf_len_get(rpcb
, b
);
888 if (rpcb
->tp_dngl_agg_p
== NULL
) {
890 rpcb
->tp_dngl_agg_p
= rpcb
->tp_dngl_agg_ptail
= b
;
893 /* chain the pkts at the end of current one */
894 ASSERT(rpcb
->tp_dngl_agg_ptail
!= NULL
);
896 PKTSETNEXT(rpcb
->osh
, rpcb
->tp_dngl_agg_ptail
, b
);
897 rpcb
->tp_dngl_agg_ptail
= b
;
900 rpcb
->tp_dngl_agg_sframes
++;
901 rpcb
->tp_dngl_agg_bytes
+= tp_len
;
903 RPC_TP_AGG(("%s, tp_len %d tot %d, sframe %d\n", __FUNCTION__
, tp_len
,
904 rpcb
->tp_dngl_agg_bytes
, rpcb
->tp_dngl_agg_sframes
));
908 bcm_rpc_tp_dngl_agg_release(rpc_tp_info_t
* rpcb
)
913 if (rpcb
->tp_dngl_agg_p
== NULL
) { /* no aggregation formed */
917 RPC_TP_AGG(("%s, send %d, sframe %d\n", __FUNCTION__
,
918 rpcb
->tp_dngl_agg_bytes
, rpcb
->tp_dngl_agg_sframes
));
920 b
= rpcb
->tp_dngl_agg_p
;
921 rpcb
->tp_dngl_agg_cnt_chain
++;
922 rpcb
->tp_dngl_agg_cnt_sf
+= rpcb
->tp_dngl_agg_sframes
;
923 rpcb
->tp_dngl_agg_cnt_bytes
+= rpcb
->tp_dngl_agg_bytes
;
924 if (rpcb
->tp_dngl_agg_sframes
== 1)
925 rpcb
->tp_dngl_agg_cnt_noagg
++;
927 bcm_rpc_tp_dngl_agg_initstate(rpcb
);
929 rpcb
->tp_dngl_agg_txpending
++;
931 if (rpcb
->tx_flowctl
) {
932 bcm_rpc_tp_buf_send_enq(rpcb
, b
);
935 err
= bcm_rpc_tp_buf_send_internal(rpcb
, b
, USBDEV_BULK_IN_EP1
);
939 RPC_TP_ERR(("bcm_rpc_tp_dngl_agg_release: send err!!!\n"));
947 bcm_rpc_tp_dngl_agg_flush(rpc_tp_info_t
* rpcb
)
949 /* toss the chained buffer */
950 if (rpcb
->tp_dngl_agg_p
)
951 bcm_rpc_tp_buf_free(rpcb
, rpcb
->tp_dngl_agg_p
);
953 bcm_rpc_tp_dngl_agg_initstate(rpcb
);
957 bcm_rpc_tp_agg_set(rpc_tp_info_t
*rpcb
, uint32 reason
, bool set
)
962 RPC_TP_AGG(("%s: agg start\n", __FUNCTION__
));
964 mboolset(rpcb
->tp_dngl_aggregation
, reason
);
966 } else if (rpcb
->tp_dngl_aggregation
) {
968 RPC_TP_AGG(("%s: agg end\n", __FUNCTION__
));
974 i
= rpcb
->tp_dngl_agg_lazy
;
977 mboolclr(rpcb
->tp_dngl_aggregation
, reason
);
978 if (!rpcb
->tp_dngl_aggregation
)
979 bcm_rpc_tp_dngl_agg_release(rpcb
);
984 bcm_rpc_tp_msglevel_set(rpc_tp_info_t
*rpc_th
, uint8 msglevel
, bool high_low
)
986 ASSERT(high_low
== FALSE
);
987 tp_level_bmac
= msglevel
;