2 * RPC Transport layer(for host dbus driver)
3 * Broadcom 802.11abg Networking Device Driver
5 * Copyright (C) 2012, Broadcom Corporation. All Rights Reserved.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 * $Id: bcm_rpc_tp_dbus.c 330245 2012-04-30 09:38:26Z $
22 #if (!defined(WLC_HIGH) && !defined(WLC_LOW))
28 #include <bcmendian.h>
33 #include <bcm_rpc_tp.h>
37 #include <linux_osl.h>
40 static uint8 tp_level_host
= RPC_TP_MSG_HOST_ERR_VAL
;
41 #define RPC_TP_ERR(args) do {if (tp_level_host & RPC_TP_MSG_HOST_ERR_VAL) printf args;} while (0)
44 #define RPC_TP_DBG(args) do {if (tp_level_host & RPC_TP_MSG_HOST_DBG_VAL) printf args;} while (0)
45 #define RPC_TP_AGG(args) do {if (tp_level_host & RPC_TP_MSG_HOST_AGG_VAL) printf args;} while (0)
46 #define RPC_TP_DEAGG(args) do {if (tp_level_host & RPC_TP_MSG_HOST_DEA_VAL) printf args;} while (0)
48 #define RPC_TP_DBG(args)
49 #define RPC_TP_AGG(args)
50 #define RPC_TP_DEAGG(args)
54 (BCM_RPC_TP_DBUS_NTXQ_PKT + BCM_RPC_TP_DBUS_NRXQ_CTRL + BCM_RPC_TP_DBUS_NRXQ_PKT * 2)
55 #define RPCRX_WM_HI (RPCNUMBUF - (BCM_RPC_TP_DBUS_NTXQ + BCM_RPC_TP_DBUS_NRXQ))
56 #define RPCRX_WM_LO (RPCNUMBUF - (BCM_RPC_TP_DBUS_NTXQ + BCM_RPC_TP_DBUS_NRXQ))
59 #define RPC_BUS_SEND_WAIT_TIMEOUT_MSEC 500
60 #define RPC_BUS_SEND_WAIT_EXT_TIMEOUT_MSEC 750
62 #define BCM_RPC_TP_HOST_TOTALLEN_ZLP 512
63 #define BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD 8
66 #define RPC_TP_LOCK(ri) NdisAcquireSpinLock(&(ri)->lock)
67 #define RPC_TP_UNLOCK(ri) NdisReleaseSpinLock(&(ri)->lock)
69 #define RPC_TP_LOCK(ri) spin_lock_irqsave(&(ri)->lock, (ri)->flags);
70 #define RPC_TP_UNLOCK(ri) spin_unlock_irqrestore(&(ri)->lock, (ri)->flags);
73 /* RPC TRANSPORT API */
75 static void bcm_rpc_tp_tx_encap(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
);
76 static int bcm_rpc_tp_buf_send_internal(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
);
77 static rpc_buf_t
*bcm_rpc_tp_pktget(rpc_tp_info_t
* rpcb
, int len
, bool send
);
78 static void bcm_rpc_tp_pktfree(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
, bool send
);
80 static void bcm_rpc_tp_tx_agg_initstate(rpc_tp_info_t
* rpcb
);
81 static int bcm_rpc_tp_tx_agg(rpc_tp_info_t
*rpcb
, rpc_buf_t
*b
);
82 static void bcm_rpc_tp_tx_agg_append(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
);
83 static int bcm_rpc_tp_tx_agg_release(rpc_tp_info_t
* rpcb
);
84 static void bcm_rpc_tp_tx_agg_flush(rpc_tp_info_t
* rpcb
);
86 static void bcm_rpc_tp_rx(rpc_tp_info_t
*rpcb
, void *p
);
88 struct rpc_transport_info
{
93 rpc_tx_complete_fn_t tx_complete
;
109 int buf_cnt_inuse
; /* outstanding buf(alloc, not freed) */
110 uint tx_cnt
; /* send successfully */
111 uint txerr_cnt
; /* send failed */
116 uint bus_mtu
; /* Max size of bus packet */
117 uint bus_txdepth
; /* Max TX that can be posted */
118 uint bus_txpending
; /* How many posted */
119 bool tx_flowctl
; /* tx flow control active */
120 bool tx_flowctl_override
; /* out of band tx flow control */
121 uint tx_flowctl_cnt
; /* tx flow control transition times */
122 bool rxflowctrl
; /* rx flow control active */
123 uint32 rpctp_dbus_hist
[BCM_RPC_TP_DBUS_NTXQ
]; /* histogram for dbus pending pkt */
125 mbool tp_tx_aggregation
; /* aggregate into transport buffers */
126 rpc_buf_t
*tp_tx_agg_p
; /* current aggregate chain header */
127 rpc_buf_t
*tp_tx_agg_ptail
; /* current aggregate chain tail */
128 uint tp_tx_agg_sframes
; /* current aggregate packet subframes */
129 uint8 tp_tx_agg_sframes_limit
; /* agg sframe limit */
130 uint tp_tx_agg_bytes
; /* current aggregate packet total length */
131 uint16 tp_tx_agg_bytes_max
; /* agg byte max */
132 uint tp_tx_agg_cnt_chain
; /* total aggregated pkt */
133 uint tp_tx_agg_cnt_sf
; /* total aggregated subframes */
134 uint tp_tx_agg_cnt_bytes
; /* total aggregated bytes */
135 uint tp_tx_agg_cnt_noagg
; /* no. pkts not aggregated */
136 uint tp_tx_agg_cnt_pass
; /* no. pkt bypass agg */
138 uint tp_host_deagg_cnt_chain
; /* multifrag pkt */
139 uint tp_host_deagg_cnt_sf
; /* total no. of frag inside multifrag */
140 uint tp_host_deagg_cnt_bytes
; /* total deagg bytes */
141 uint tp_host_deagg_cnt_badfmt
; /* bad format */
142 uint tp_host_deagg_cnt_badsflen
; /* bad sf len */
143 uint tp_host_deagg_cnt_pass
; /* passthrough, single frag */
144 int has_2nd_bulk_in_ep
;
147 extern dbus_extdl_t dbus_extdl
;
149 /* TP aggregation: set, init, agg, append, close, flush */
151 bcm_rpc_tp_agg_set(rpc_tp_info_t
*rpcb
, uint32 reason
, bool set
)
154 RPC_TP_AGG(("%s: agg start 0x%x\n", __FUNCTION__
, reason
));
156 mboolset(rpcb
->tp_tx_aggregation
, reason
);
158 } else if (rpcb
->tp_tx_aggregation
) {
159 RPC_TP_AGG(("%s: agg end 0x%x\n", __FUNCTION__
, reason
));
161 mboolclr(rpcb
->tp_tx_aggregation
, reason
);
162 if (!rpcb
->tp_tx_aggregation
)
163 bcm_rpc_tp_tx_agg_release(rpcb
);
168 bcm_rpc_tp_agg_limit_set(rpc_tp_info_t
*rpc_th
, uint8 sf
, uint16 bytes
)
170 rpc_th
->tp_tx_agg_sframes_limit
= sf
;
171 rpc_th
->tp_tx_agg_bytes_max
= bytes
;
175 bcm_rpc_tp_agg_limit_get(rpc_tp_info_t
*rpc_th
, uint8
*sf
, uint16
*bytes
)
177 *sf
= rpc_th
->tp_tx_agg_sframes_limit
;
178 *bytes
= rpc_th
->tp_tx_agg_bytes_max
;
182 bcm_rpc_tp_tx_agg_initstate(rpc_tp_info_t
* rpcb
)
184 rpcb
->tp_tx_agg_p
= NULL
;
185 rpcb
->tp_tx_agg_ptail
= NULL
;
186 rpcb
->tp_tx_agg_sframes
= 0;
187 rpcb
->tp_tx_agg_bytes
= 0;
191 bcm_rpc_tp_tx_agg(rpc_tp_info_t
*rpcb
, rpc_buf_t
*b
)
197 ASSERT(rpcb
->tp_tx_aggregation
);
199 pktlen
= pkttotlen(rpcb
->osh
, b
);
200 totlen
= pktlen
+ rpcb
->tp_tx_agg_bytes
;
202 if ((totlen
> rpcb
->tp_tx_agg_bytes_max
) ||
203 (rpcb
->tp_tx_agg_sframes
+ 1 > rpcb
->tp_tx_agg_sframes_limit
)) {
205 RPC_TP_AGG(("%s: terminte TP agg for txbyte %d or txframe %d\n", __FUNCTION__
,
206 rpcb
->tp_tx_agg_bytes_max
, rpcb
->tp_tx_agg_sframes_limit
));
208 /* release current agg, continue with new agg */
209 err
= bcm_rpc_tp_tx_agg_release(rpcb
);
212 bcm_rpc_tp_tx_agg_append(rpcb
, b
);
214 /* if the new frag is also already over the agg limit, release it */
215 if (pktlen
>= rpcb
->tp_tx_agg_bytes_max
) {
217 new_err
= bcm_rpc_tp_tx_agg_release(rpcb
);
226 * tp_tx_agg_p points to the header lbuf, tp_tx_agg_ptail points to the tail lbuf
228 * The TP agg format typically will be below
229 * | TP header(len) | subframe1 rpc_header | subframe1 data |
230 * | TP header(len) | subframe2 rpc_header | subframe2 data |
232 * | TP header(len) | subframeN rpc_header | subframeN data |
236 bcm_rpc_tp_tx_agg_append(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
240 tp_len
= pkttotlen(rpcb
->osh
, b
);
242 if (rpcb
->tp_tx_agg_p
== NULL
) {
243 /* toc, set tail to last fragment */
244 if (PKTNEXT(rpcb
->osh
, b
)) {
245 rpcb
->tp_tx_agg_p
= b
;
246 rpcb
->tp_tx_agg_ptail
= pktlast(rpcb
->osh
, b
);
248 rpcb
->tp_tx_agg_p
= rpcb
->tp_tx_agg_ptail
= b
;
250 /* chain the pkts at the end of current one */
251 ASSERT(rpcb
->tp_tx_agg_ptail
!= NULL
);
252 PKTSETNEXT(rpcb
->osh
, rpcb
->tp_tx_agg_ptail
, b
);
253 /* toc, set tail to last fragment */
254 if (PKTNEXT(rpcb
->osh
, b
)) {
255 rpcb
->tp_tx_agg_ptail
= pktlast(rpcb
->osh
, b
);
257 rpcb
->tp_tx_agg_ptail
= b
;
261 rpcb
->tp_tx_agg_sframes
++;
262 rpcb
->tp_tx_agg_bytes
+= tp_len
;
264 RPC_TP_AGG(("%s: tp_len %d tot %d, sframe %d\n", __FUNCTION__
, tp_len
,
265 rpcb
->tp_tx_agg_bytes
, rpcb
->tp_tx_agg_sframes
));
269 bcm_rpc_tp_tx_agg_release(rpc_tp_info_t
* rpcb
)
274 /* no aggregation formed */
275 if (rpcb
->tp_tx_agg_p
== NULL
)
278 RPC_TP_AGG(("%s: send %d, sframe %d\n", __FUNCTION__
,
279 rpcb
->tp_tx_agg_bytes
, rpcb
->tp_tx_agg_sframes
));
281 b
= rpcb
->tp_tx_agg_p
;
282 rpcb
->tp_tx_agg_cnt_chain
++;
283 rpcb
->tp_tx_agg_cnt_sf
+= rpcb
->tp_tx_agg_sframes
;
284 rpcb
->tp_tx_agg_cnt_bytes
+= rpcb
->tp_tx_agg_bytes
;
286 if (rpcb
->tp_tx_agg_sframes
== 1)
287 rpcb
->tp_tx_agg_cnt_noagg
++;
289 err
= bcm_rpc_tp_buf_send_internal(rpcb
, b
);
290 bcm_rpc_tp_tx_agg_initstate(rpcb
);
295 bcm_rpc_tp_tx_agg_flush(rpc_tp_info_t
* rpcb
)
297 /* toss the chained buffer */
298 if (rpcb
->tp_tx_agg_p
)
299 bcm_rpc_tp_buf_free(rpcb
, rpcb
->tp_tx_agg_p
);
301 bcm_rpc_tp_tx_agg_initstate(rpcb
);
305 static void BCMFASTPATH
306 rpc_dbus_send_complete(void *handle
, void *pktinfo
, int status
)
308 rpc_tp_info_t
*rpcb
= (rpc_tp_info_t
*)handle
;
312 /* tx_complete is for (BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPPY) */
313 if (rpcb
->tx_complete
)
314 (rpcb
->tx_complete
)(rpcb
->tx_context
, pktinfo
, status
);
316 bcm_rpc_tp_pktfree(rpcb
, pktinfo
, TRUE
);
320 rpcb
->bus_txpending
--;
322 if (rpcb
->tx_flowctl
&& rpcb
->bus_txpending
== (rpcb
->bus_txdepth
- 1)) {
323 RPC_OSL_WAKE(rpcb
->rpc_osh
);
329 printf("%s: tx failed=%d\n", __FUNCTION__
, status
);
332 static void BCMFASTPATH
333 rpc_dbus_recv_pkt(void *handle
, void *pkt
)
335 rpc_tp_info_t
*rpcb
= handle
;
337 if ((rpcb
== NULL
) || (pkt
== NULL
))
340 bcm_rpc_buf_pull(rpcb
, pkt
, BCM_RPC_TP_ENCAP_LEN
);
341 bcm_rpc_tp_rx(rpcb
, pkt
);
344 static void BCMFASTPATH
345 rpc_dbus_recv_buf(void *handle
, uint8
*buf
, int len
)
347 rpc_tp_info_t
*rpcb
= handle
;
352 if ((rpcb
== NULL
) || (buf
== NULL
))
354 frag
= rpcb
->tp_host_deagg_cnt_sf
;
357 /* TP pkt should have more than encapsulation header */
358 if (len
<= BCM_RPC_TP_ENCAP_LEN
) {
359 RPC_TP_ERR(("%s: wrong len %d\n", __FUNCTION__
, len
));
363 while (len
> BCM_RPC_TP_ENCAP_LEN
) {
364 rpc_len
= ltoh32_ua(buf
);
366 if (rpc_len
> (uint32
)(len
- BCM_RPC_TP_ENCAP_LEN
)) {
367 rpcb
->tp_host_deagg_cnt_badsflen
++;
370 /* RPC_BUFFER_RX: allocate */
371 #if defined(BCM_RPC_ROC)
372 if ((pkt
= PKTGET(rpcb
->osh
, rpc_len
, FALSE
)) == NULL
) {
374 if ((pkt
= bcm_rpc_tp_pktget(rpcb
, rpc_len
, FALSE
)) == NULL
) {
376 printf("%s: bcm_rpc_tp_pktget failed (len %d)\n", __FUNCTION__
, len
);
379 /* RPC_BUFFER_RX: BYTE_COPY from dbus buffer */
380 bcopy(buf
+ BCM_RPC_TP_ENCAP_LEN
, bcm_rpc_buf_data(rpcb
, pkt
), rpc_len
);
383 bcm_rpc_tp_rx(rpcb
, pkt
);
385 len
-= (BCM_RPC_TP_ENCAP_LEN
+ rpc_len
);
386 buf
+= (BCM_RPC_TP_ENCAP_LEN
+ rpc_len
);
388 if (len
> BCM_RPC_TP_ENCAP_LEN
) { /* more frag */
389 rpcb
->tp_host_deagg_cnt_sf
++;
390 RPC_TP_DEAGG(("%s: deagg %d(remaining %d) bytes\n", __FUNCTION__
,
394 printf("%s: deagg, remaining len %d is not 0\n", __FUNCTION__
, len
);
396 rpcb
->tp_host_deagg_cnt_pass
++;
400 if (frag
< rpcb
->tp_host_deagg_cnt_sf
) { /* aggregated frames */
401 rpcb
->tp_host_deagg_cnt_sf
++; /* last one was not counted */
402 rpcb
->tp_host_deagg_cnt_chain
++;
404 rpcb
->tp_host_deagg_cnt_bytes
+= agglen
;
411 bcm_rpc_tp_recv_rtn(rpc_tp_info_t
*rpcb
)
418 if ((pkt
= bcm_rpc_tp_pktget(rpcb
, PKTBUFSZ
, FALSE
)) == NULL
) {
419 return BCME_NORESOURCE
;
423 if (rpcb
->rx_rtn_pkt
!= NULL
) {
426 bcm_rpc_tp_pktfree(rpcb
, pkt
, FALSE
);
429 rpcb
->rx_rtn_pkt
= pkt
;
432 #ifndef BCMUSBDEV_EP_FOR_RPCRETURN
433 status
= dbus_recv_ctl(rpcb
->bus
, bcm_rpc_buf_data(rpcb
, rpcb
->rx_rtn_pkt
), PKTBUFSZ
);
435 if (rpcb
->has_2nd_bulk_in_ep
) {
436 status
= dbus_recv_bulk(rpcb
->bus
, USBDEV_BULK_IN_EP2
);
438 status
= dbus_recv_ctl(rpcb
->bus
, bcm_rpc_buf_data(rpcb
, rpcb
->rx_rtn_pkt
),
441 #endif /* BCMUSBDEV_EP_FOR_RPCRETURN */
443 /* May have been cleared by complete routine */
445 pkt
= rpcb
->rx_rtn_pkt
;
446 rpcb
->rx_rtn_pkt
= NULL
;
449 bcm_rpc_tp_pktfree(rpcb
, pkt
, FALSE
);
450 if (status
== DBUS_ERR_RXFAIL
)
451 status
= BCME_RXFAIL
;
452 else if (status
== DBUS_ERR_NODEVICE
)
453 status
= BCME_NODEVICE
;
461 rpc_dbus_flowctrl_tx(void *handle
, bool on
)
466 rpc_dbus_errhandler(void *handle
, int err
)
471 rpc_dbus_ctl_complete(void *handle
, int type
, int status
)
473 rpc_tp_info_t
*rpcb
= (rpc_tp_info_t
*)handle
;
477 pkt
= rpcb
->rx_rtn_pkt
;
478 rpcb
->rx_rtn_pkt
= NULL
;
483 bcm_rpc_buf_pull(rpcb
, pkt
, BCM_RPC_TP_ENCAP_LEN
);
484 (rpcb
->rx_pkt
)(rpcb
->rx_context
, pkt
);
487 RPC_TP_ERR(("%s: no rpc rx ctl, dropping 0x%x\n", __FUNCTION__
, status
));
488 bcm_rpc_tp_pktfree(rpcb
, pkt
, TRUE
);
493 rpc_dbus_state_change(void *handle
, int state
)
495 rpc_tp_info_t
*rpcb
= handle
;
500 /* FIX: DBUS is down, need to do something? */
501 if (state
== DBUS_STATE_DOWN
) {
502 RPC_TP_ERR(("%s: DBUS is down\n", __FUNCTION__
));
507 rpc_dbus_pktget(void *handle
, uint len
, bool send
)
509 rpc_tp_info_t
*rpcb
= handle
;
515 if ((p
= bcm_rpc_tp_pktget(rpcb
, len
, send
)) == NULL
) {
523 rpc_dbus_pktfree(void *handle
, void *p
, bool send
)
525 rpc_tp_info_t
*rpcb
= handle
;
527 if ((rpcb
== NULL
) || (p
== NULL
))
530 bcm_rpc_tp_pktfree(rpcb
, p
, send
);
533 static dbus_callbacks_t rpc_dbus_cbs
= {
534 rpc_dbus_send_complete
,
537 rpc_dbus_flowctrl_tx
,
539 rpc_dbus_ctl_complete
,
540 rpc_dbus_state_change
,
547 bcm_rpc_tp_attach(osl_t
* osh
, void *bus
)
550 bcm_rpc_tp_attach(osl_t
* osh
, shared_info_t
*shared
, void *bus
)
554 dbus_pub_t
*dbus
= NULL
;
555 dbus_attrib_t attrib
;
556 dbus_config_t config
;
561 rpcb
= (rpc_tp_info_t
*)MALLOC(osh
, sizeof(rpc_tp_info_t
));
563 printf("%s: rpc_tp_info_t malloc failed\n", __FUNCTION__
);
566 memset(rpcb
, 0, sizeof(rpc_tp_info_t
));
568 bcm_rpc_tp_tx_agg_initstate(rpcb
);
571 NdisAllocateSpinLock(&rpcb
->lock
);
573 spin_lock_init(&rpcb
->lock
);
577 /* FIX: Need to determine rx size and pass it here */
578 dbus
= (struct dbus_pub
*)dbus_attach(osh
, DBUS_RX_BUFFER_SIZE_RPC
, BCM_RPC_TP_DBUS_NRXQ
,
579 BCM_RPC_TP_DBUS_NTXQ
, rpcb
/* info */, &rpc_dbus_cbs
, &dbus_extdl
, shared
);
582 printf("%s: dbus_attach failed\n", __FUNCTION__
);
586 rpcb
->bus
= (struct dbus_pub
*)dbus
;
588 dbus_get_attrib(dbus
, &attrib
);
589 rpcb
->has_2nd_bulk_in_ep
= attrib
.has_2nd_bulk_in_ep
;
590 rpcb
->bus_mtu
= attrib
.mtu
;
591 rpcb
->bus_txdepth
= BCM_RPC_TP_DBUS_NTXQ
;
593 config
.rxctl_deferrespok
= TRUE
;
594 dbus_set_config(dbus
, &config
);
596 rpcb
->tp_tx_agg_sframes_limit
= BCM_RPC_TP_HOST_AGG_MAX_SFRAME
;
597 rpcb
->tp_tx_agg_bytes_max
= BCM_RPC_TP_HOST_AGG_MAX_BYTE
;
603 /* Bring DBUS up right away so RPC can start receiving */
605 RPC_TP_ERR(("%s: dbus_up failed\n", __FUNCTION__
));
613 bcm_rpc_tp_detach(rpcb
);
619 bcm_rpc_tp_detach(rpc_tp_info_t
* rpcb
)
625 NdisFreeSpinLock(&rpcb
->lock
);
627 if (rpcb
->bus
!= NULL
)
628 dbus_detach(rpcb
->bus
);
632 MFREE(rpcb
->osh
, rpcb
, sizeof(rpc_tp_info_t
));
636 bcm_rpc_tp_watchdog(rpc_tp_info_t
*rpcb
)
640 /* close agg periodically to avoid stale aggregation(include rpc_agg change) */
641 bcm_rpc_tp_tx_agg_release(rpcb
);
643 RPC_TP_AGG(("agg delta %d\n", (rpcb
->tp_tx_agg_cnt_sf
- old
)));
645 old
= rpcb
->tp_tx_agg_cnt_sf
;
650 bcm_rpc_tp_rx(rpc_tp_info_t
*rpcb
, void *p
)
656 if (rpcb
->rx_pkt
== NULL
) {
657 printf("%s: no rpc rx fn, dropping\n", __FUNCTION__
);
662 /* RPC_BUFFER_RX: free if no callback */
663 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
664 PKTFREE(rpcb
->osh
, p
, FALSE
);
666 bcm_rpc_tp_pktfree(rpcb
, p
, FALSE
);
671 /* RPC_BUFFER_RX: free inside */
672 (rpcb
->rx_pkt
)(rpcb
->rx_context
, p
);
676 bcm_rpc_tp_register_cb(rpc_tp_info_t
* rpcb
,
677 rpc_tx_complete_fn_t txcmplt
, void* tx_context
,
678 rpc_rx_fn_t rxpkt
, void* rx_context
,
681 rpcb
->tx_complete
= txcmplt
;
682 rpcb
->tx_context
= tx_context
;
683 rpcb
->rx_pkt
= rxpkt
;
684 rpcb
->rx_context
= rx_context
;
685 rpcb
->rpc_osh
= rpc_osh
;
689 bcm_rpc_tp_deregister_cb(rpc_tp_info_t
* rpcb
)
691 rpcb
->tx_complete
= NULL
;
692 rpcb
->tx_context
= NULL
;
694 rpcb
->rx_context
= NULL
;
695 rpcb
->rpc_osh
= NULL
;
699 bcm_rpc_tp_tx_encap(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
704 rpc_len
= pkttotlen(rpcb
->osh
, b
);
705 tp_lenp
= (uint32
*)bcm_rpc_buf_push(rpcb
, b
, BCM_RPC_TP_ENCAP_LEN
);
706 *tp_lenp
= htol32(rpc_len
);
710 bcm_rpc_tp_buf_send(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
714 /* Add the TP encapsulation */
715 bcm_rpc_tp_tx_encap(rpcb
, b
);
717 /* if aggregation enabled use the agg path, otherwise send immediately */
718 if (rpcb
->tp_tx_aggregation
) {
719 err
= bcm_rpc_tp_tx_agg(rpcb
, b
);
721 rpcb
->tp_tx_agg_cnt_pass
++;
722 err
= bcm_rpc_tp_buf_send_internal(rpcb
, b
);
728 static int BCMFASTPATH
729 bcm_rpc_tp_buf_send_internal(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
732 bool tx_flow_control
;
733 int timeout
= RPC_BUS_SEND_WAIT_TIMEOUT_MSEC
;
734 bool timedout
= FALSE
;
737 UNUSED_PARAMETER(pktlen
);
740 rpcb
->rpctp_dbus_hist
[rpcb
->bus_txpending
]++;
742 /* Increment before sending to avoid race condition */
743 rpcb
->bus_txpending
++;
744 tx_flow_control
= (rpcb
->bus_txpending
>= rpcb
->bus_txdepth
);
746 if (rpcb
->tx_flowctl
!= tx_flow_control
) {
747 rpcb
->tx_flowctl
= tx_flow_control
;
748 RPC_TP_DBG(("%s, tx_flowctl change to %d pending %d\n", __FUNCTION__
,
749 rpcb
->tx_flowctl
, rpcb
->bus_txpending
));
751 rpcb
->tx_flowctl_cnt
+= rpcb
->tx_flowctl
? 1 : 0;
754 if (rpcb
->tx_flowctl_override
) {
755 timeout
= RPC_BUS_SEND_WAIT_EXT_TIMEOUT_MSEC
;
758 if (rpcb
->tx_flowctl
|| rpcb
->tx_flowctl_override
) {
759 err
= RPC_OSL_WAIT(rpcb
->rpc_osh
, timeout
, &timedout
);
761 printf("%s: RPC_OSL_WAIT error %d timeout %d(ms)\n", __FUNCTION__
, err
,
762 RPC_BUS_SEND_WAIT_TIMEOUT_MSEC
);
765 rpcb
->bus_txpending
--;
771 #if defined(BCMUSB) || defined(USBAP)
772 if (rpcb
->tp_tx_agg_bytes
!= 0) {
773 ASSERT(rpcb
->tp_tx_agg_p
== b
);
774 ASSERT(rpcb
->tp_tx_agg_ptail
!= NULL
);
776 /* Make sure pktlen is not multiple of 512 bytes even after possible dbus padding */
777 if ((ROUNDUP(rpcb
->tp_tx_agg_bytes
, sizeof(uint32
)) % BCM_RPC_TP_HOST_TOTALLEN_ZLP
)
779 uint32
*tp_lenp
= (uint32
*)bcm_rpc_buf_data(rpcb
, rpcb
->tp_tx_agg_ptail
);
780 uint32 tp_len
= ltoh32(*tp_lenp
);
781 pktlen
= bcm_rpc_buf_len_get(rpcb
, rpcb
->tp_tx_agg_ptail
);
782 ASSERT(tp_len
+ BCM_RPC_TP_ENCAP_LEN
== pktlen
);
784 RPC_TP_DBG(("%s, agg pkt is multiple of 512 bytes\n", __FUNCTION__
));
786 tp_len
+= BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD
;
787 pktlen
+= BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD
;
788 *tp_lenp
= htol32(tp_len
);
789 bcm_rpc_buf_len_set(rpcb
, rpcb
->tp_tx_agg_ptail
, pktlen
);
791 } else { /* not aggregated */
793 pktlen
= bcm_rpc_buf_len_get(rpcb
, b
);
794 /* Make sure pktlen is not multiple of 512 bytes even after possible dbus padding */
796 ((ROUNDUP(pktlen
, sizeof(uint32
)) % BCM_RPC_TP_HOST_TOTALLEN_ZLP
) == 0)) {
797 uint32
*tp_lenp
= (uint32
*)bcm_rpc_buf_data(rpcb
, b
);
798 uint32 tp_len
= ltoh32(*tp_lenp
);
799 ASSERT(tp_len
+ BCM_RPC_TP_ENCAP_LEN
== pktlen
);
801 RPC_TP_DBG(("%s, nonagg pkt is multiple of 512 bytes\n", __FUNCTION__
));
803 tp_len
+= BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD
;
804 pktlen
+= BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD
;
805 *tp_lenp
= htol32(tp_len
);
806 bcm_rpc_buf_len_set(rpcb
, b
, pktlen
);
809 #endif /* defined(BCMUSB) || defined(USBAP) */
811 #ifdef EHCI_FASTPATH_TX
812 /* With optimization, submit code is lockless, use RPC_TP_LOCK */
814 err
= dbus_send_pkt(rpcb
->bus
, b
, b
);
816 err
= dbus_send_pkt(rpcb
->bus
, b
, b
);
818 #endif /* EHCI_FASTPATH_TX */
821 printf("%s: dbus_send_pkt failed\n", __FUNCTION__
);
823 rpcb
->bus_txpending
--;
832 /* Buffer manipulation */
834 bcm_rpc_buf_tp_header_len(rpc_tp_info_t
* rpc_th
)
836 return BCM_RPC_TP_ENCAP_LEN
;
839 /* external pkt allocation, with extra BCM_RPC_TP_ENCAP_LEN */
841 bcm_rpc_tp_buf_alloc(rpc_tp_info_t
* rpcb
, int len
)
844 size_t tp_len
= len
+ BCM_RPC_TP_ENCAP_LEN
+ BCM_RPC_BUS_HDR_LEN
;
846 b
= bcm_rpc_tp_pktget(rpcb
, tp_len
, TRUE
);
849 PKTPULL(rpcb
->osh
, b
, BCM_RPC_TP_ENCAP_LEN
+ BCM_RPC_BUS_HDR_LEN
);
855 bcm_rpc_tp_buf_free(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
)
857 bcm_rpc_tp_pktfree(rpcb
, b
, TRUE
);
860 /* internal pkt allocation, no BCM_RPC_TP_ENCAP_LEN */
862 bcm_rpc_tp_pktget(rpc_tp_info_t
* rpcb
, int len
, bool send
)
873 lb
= shared_lb_get(rpcb
->sh
, &rpcb
->sh
->txfree
);
875 lb
= shared_lb_get(rpcb
->sh
, &rpcb
->sh
->rxfree
);
886 skb
= PKTGET(rpcb
->osh
, len
, FALSE
);
888 if ((skb
= dev_alloc_skb(len
))) {
892 #endif /* defined(CTFPOOL) */
898 /* Clear the ctf buf flag to allow full dma map */
899 PKTCLRCTF(rpcb
->osh
, skb
);
900 CTFMAPPTR(rpcb
->osh
, skb
) = NULL
;
906 if (!rpcb
->rxflowctrl
&& (rpcb
->buf_cnt_inuse
>= RPCRX_WM_HI
)) {
907 rpcb
->rxflowctrl
= TRUE
;
908 RPC_TP_ERR(("%s, rxflowctrl change to %d\n", __FUNCTION__
,
910 dbus_flowctrl_rx(rpcb
->bus
, TRUE
);
913 rpcb
->buf_cnt_inuse
++;
915 if (rpcb
->buf_cnt_inuse
> (int)rpcb
->buf_cnt_max
)
916 rpcb
->buf_cnt_max
= rpcb
->buf_cnt_inuse
;
920 printf("%s: buf alloc failed buf_cnt_inuse %d rxflowctrl:%d\n",
921 __FUNCTION__
, rpcb
->buf_cnt_inuse
, rpcb
->rxflowctrl
);
929 static void BCMFASTPATH
930 bcm_rpc_tp_pktfree(rpc_tp_info_t
* rpcb
, rpc_buf_t
*b
, bool send
)
934 struct lbuf
*lb
= (struct lbuf
*)b
;
944 ASSERT(lb
->p
== NULL
);
946 shared_lb_put(rpcb
->sh
, lb
->l
, lb
);
953 struct sk_buff
*skb
= (struct sk_buff
*)b
, *next
;
957 while (next
!= NULL
) {
962 PKTFREE(rpcb
->osh
, skb
, FALSE
);
967 if (skb
->destructor
) {
968 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if destructor exists */
969 dev_kfree_skb_any(skb
);
971 /* can free immediately (even in_irq()) if destructor does not exist */
977 #endif /* defined(CTFPOOL) */
980 rpcb
->buf_cnt_inuse
-= free_cnt
;
982 if (rpcb
->rxflowctrl
&& (rpcb
->buf_cnt_inuse
< RPCRX_WM_LO
)) {
983 rpcb
->rxflowctrl
= FALSE
;
984 RPC_TP_ERR(("%s, rxflowctrl change to %d\n", __FUNCTION__
, rpcb
->rxflowctrl
));
985 dbus_flowctrl_rx(rpcb
->bus
, FALSE
);
994 bcm_rpc_tp_down(rpc_tp_info_t
*rpcb
)
996 bcm_rpc_tp_tx_agg_flush(rpcb
);
998 dbus_down(rpcb
->bus
);
1002 bcm_rpc_buf_len_get(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
)
1004 return PKTLEN(rpcb
->osh
, b
);
1008 bcm_rpc_buf_len_set(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
, uint len
)
1010 PKTSETLEN(rpcb
->osh
, b
, len
);
1015 bcm_rpc_buf_data(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
)
1017 return PKTDATA(rpcb
->osh
, b
);
1021 bcm_rpc_buf_push(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
, uint bytes
)
1023 return PKTPUSH(rpcb
->osh
, b
, bytes
);
1026 unsigned char* BCMFASTPATH
1027 bcm_rpc_buf_pull(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
, uint bytes
)
1029 return PKTPULL(rpcb
->osh
, b
, bytes
);
1033 bcm_rpc_buf_next_get(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
)
1035 return (rpc_buf_t
*)PKTLINK(b
);
1039 bcm_rpc_buf_next_set(rpc_tp_info_t
* rpcb
, rpc_buf_t
* b
, rpc_buf_t
*nextb
)
1041 PKTSETLINK(b
, nextb
);
1044 #if defined(WLC_HIGH) && defined(BCMDBG)
1046 bcm_rpc_tp_dump(rpc_tp_info_t
*rpcb
, struct bcmstrbuf
*b
)
1052 bcm_bprintf(b
, "\nRPC_TP_DBUS:\n");
1053 bcm_bprintf(b
, "bufalloc %d(buf_inuse %d, max %d) tx %d(txerr %d) rx %d(rxdrop %d)\n",
1054 rpcb
->bufalloc
, rpcb
->buf_cnt_inuse
, rpcb
->buf_cnt_max
, rpcb
->tx_cnt
,
1055 rpcb
->txerr_cnt
, rpcb
->rx_cnt
, rpcb
->rxdrop_cnt
);
1057 bcm_bprintf(b
, "mtu %d depth %d pending %d tx_flowctrl_cnt %d, rxflowctl %d\n",
1058 rpcb
->bus_mtu
, rpcb
->bus_txdepth
, rpcb
->bus_txpending
, rpcb
->tx_flowctl_cnt
,
1061 bcm_bprintf(b
, "tp_host_deagg chain %d subframes %d bytes %d badsflen %d passthrough %d\n",
1062 rpcb
->tp_host_deagg_cnt_chain
, rpcb
->tp_host_deagg_cnt_sf
,
1063 rpcb
->tp_host_deagg_cnt_bytes
,
1064 rpcb
->tp_host_deagg_cnt_badsflen
, rpcb
->tp_host_deagg_cnt_pass
);
1065 bcm_bprintf(b
, "tp_host_deagg sf/chain %d bytes/chain %d \n",
1066 (rpcb
->tp_host_deagg_cnt_chain
== 0) ?
1067 0 : rpcb
->tp_host_deagg_cnt_sf
/rpcb
->tp_host_deagg_cnt_chain
,
1068 (rpcb
->tp_host_deagg_cnt_chain
== 0) ?
1069 0 : rpcb
->tp_host_deagg_cnt_bytes
/rpcb
->tp_host_deagg_cnt_chain
);
1071 bcm_bprintf(b
, "\n");
1073 bcm_bprintf(b
, "tp_host_agg sf_limit %d bytes_limit %d\n",
1074 rpcb
->tp_tx_agg_sframes_limit
, rpcb
->tp_tx_agg_bytes_max
);
1075 bcm_bprintf(b
, "tp_host_agg: chain %d, sf %d, bytes %d, non-agg-frame %d bypass %d\n",
1076 rpcb
->tp_tx_agg_cnt_chain
, rpcb
->tp_tx_agg_cnt_sf
, rpcb
->tp_tx_agg_cnt_bytes
,
1077 rpcb
->tp_tx_agg_cnt_noagg
, rpcb
->tp_tx_agg_cnt_pass
);
1078 bcm_bprintf(b
, "tp_host_agg: sf/chain %d, bytes/chain %d\n",
1079 (rpcb
->tp_tx_agg_cnt_chain
== 0) ?
1080 0 : rpcb
->tp_tx_agg_cnt_sf
/rpcb
->tp_tx_agg_cnt_chain
,
1081 (rpcb
->tp_tx_agg_cnt_chain
== 0) ?
1082 0 : rpcb
->tp_tx_agg_cnt_bytes
/rpcb
->tp_tx_agg_cnt_chain
);
1084 bcm_bprintf(b
, "\nRPC TP histogram\n");
1085 for (i
= 0; i
< BCM_RPC_TP_DBUS_NTXQ
; i
++) {
1086 if (rpcb
->rpctp_dbus_hist
[i
]) {
1087 bcm_bprintf(b
, "%d: %d ", i
, rpcb
->rpctp_dbus_hist
[i
]);
1090 bcm_bprintf(b
, "\n");
1094 bcm_bprintf(b
, "\n");
1096 RPC_TP_UNLOCK(rpcb
);
1098 dbus_hist_dump(rpcb
->bus
, b
);
1105 bcm_rpc_tp_sleep(rpc_tp_info_t
*rpcb
)
1107 dbus_pnp_sleep(rpcb
->bus
);
1111 bcm_rpc_tp_resume(rpc_tp_info_t
*rpcb
, int *fw_reload
)
1113 return dbus_pnp_resume(rpcb
->bus
, fw_reload
);
1118 bcm_rpc_tp_shutdown(rpc_tp_info_t
*rpcb
)
1120 return dbus_shutdown(rpcb
->bus
);
1124 bcm_rpc_tp_surp_remove(rpc_tp_info_t
* rpcb
)
1126 dbus_pnp_disconnect(rpcb
->bus
);
1131 bcm_rpc_tp_tx_flowctl_get(rpc_tp_info_t
*rpc_th
)
1133 return rpc_th
->tx_flowctl
;
1139 bcm_rpc_tp_get_device_speed(rpc_tp_info_t
*rpc_th
)
1141 return dbus_get_device_speed(rpc_th
->bus
);
1145 bcm_rpc_tp_msglevel_set(rpc_tp_info_t
*rpc_th
, uint8 msglevel
, bool high_low
)
1147 ASSERT(high_low
== TRUE
);
1149 tp_level_host
= msglevel
;
1152 bcm_rpc_tp_get_vidpid(rpc_tp_info_t
*rpc_th
, uint16
*dnglvid
, uint16
*dnglpid
)
1154 dbus_attrib_t attrib
;
1155 if (rpc_th
&& rpc_th
->bus
) {
1156 dbus_get_attrib(rpc_th
->bus
, &attrib
);
1157 *dnglvid
= (uint16
) attrib
.vid
;
1158 *dnglpid
= (uint16
) attrib
.pid
;
1163 bcm_rpc_tp_get_devinfo(rpc_tp_info_t
*rpc_th
)
1165 if (rpc_th
&& rpc_th
->bus
) {
1166 return dbus_get_devinfo(rpc_th
->bus
);