2 * RPC layer. It links to bus layer with transport layer(bus dependent)
3 * Broadcom 802.11abg Networking Device Driver
5 * Copyright (C) 2010, Broadcom Corporation
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
13 * $Id: bcm_rpc.c,v 1.80.2.30 2010-12-24 23:35:24 Exp $
19 #include <bcmendian.h>
23 #include <bcm_rpc_tp.h>
28 #if (!defined(WLC_HIGH) && !defined(WLC_LOW))
31 #if defined(WLC_HIGH) && defined(WLC_LOW)
35 /* RPC may use OS APIs directly to avoid overloading osl.h
36 * HIGH_ONLY supports NDIS, LINUX, and MACOSX so far. can be ported to other OS if needed
39 #if !defined(NDIS) && !defined(linux) && !defined(MACOSX)
40 #error "RPC only supports NDIS, LINUX, MACOSX in HIGH driver"
44 #if !defined(_HNDRTE_)
45 #error "RPC only supports HNDRTE in LOW driver"
49 /* use local flag BCMDBG_RPC so that it can be turned on without global BCMDBG */
57 /* #define BCMDBG_RPC */
59 static uint32 rpc_msg_level
= RPC_ERROR_VAL
;
60 /* Print error messages even for non-debug drivers
61 * NOTE: RPC_PKTLOG_VAL can be added in bcm_rpc_pktlog_init()
64 /* osl_msg_level is a bitvector with defs in wlioctl.h */
65 #define RPC_ERR(args) do {if (rpc_msg_level & RPC_ERROR_VAL) printf args;} while (0)
68 #define RPC_TRACE(args) do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
69 #define RPC_PKTTRACE_ON() (rpc_msg_level & RPC_PKTTRACE_VAL)
72 #define RPC_TRACE(args) do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
73 #define RPC_PKTTRACE_ON() (FALSE)
74 #define prhex(a, b, c) do { } while (0) /* prhex is not defined under */
75 #define RPC_PKTLOG_ON() (FALSE)
77 #define RPC_TRACE(args)
78 #define RPC_PKTTRACE_ON() (FALSE)
79 #define RPC_PKTLOG_ON() (FALSE)
80 #define prhex(a, b, c) do { } while (0) /* prhex is not defined under */
81 #endif /* BCMDBG_ERR */
82 #endif /* BCMDBG_RPC */
85 #define RPC_PKTLOG_ON() (rpc_msg_level & RPC_PKTLOG_VAL)
87 #define RPC_PKTLOG_ON() (FALSE)
88 #endif /* BCMDBG_RPC */
90 #ifndef BCM_RPC_REORDER_LIMIT
92 #define BCM_RPC_REORDER_LIMIT 40 /* limit to toss hole to avoid overflow reorder queque */
94 #define BCM_RPC_REORDER_LIMIT 30
96 #endif /* BCM_RPC_REORDER_LIMIT */
98 /* OS specific files for locks */
99 #define RPC_INIT_WAIT_TIMEOUT_MSEC 2000
100 #ifndef RPC_RETURN_WAIT_TIMEOUT_MSEC
101 #if defined(NDIS) && !defined(SDIO_BMAC)
102 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 800 /* NDIS OIDs timeout in 1 second.
103 * This timeout needs to be smaller than that
105 #elif defined(linux) || defined(SDIO_BMAC)
106 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 3200
107 #elif defined(MACOSX)
108 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 800 /* guess at a reasonable turnaround time */
110 #endif /* RPC_RETURN_WAIT_TIMEOUT_MSEC */
112 /* RPC Frame formats */
113 /* |--------------||-------------|
114 * RPC Header RPC Payload
117 * |-------|--------|----------------|
119 * Type Session Transaction ID
125 * Data and Return RPC payload is RPC all dependent
127 * Management frame formats:
128 * |--------|--------|--------|--------|
130 * Header Action Version Reason
132 * Version is included only for following actions:
139 * Reason sent only by BMAC for following actions:
144 typedef uint32 rpc_header_t
;
146 #define RPC_HDR_LEN sizeof(rpc_header_t)
147 #define RPC_ACN_LEN sizeof(uint32)
148 #define RPC_VER_LEN sizeof(EPI_VERSION_NUM)
149 #define RPC_RC_LEN sizeof(uint32)
150 #define RPC_CHIPID_LEN sizeof(uint32)
152 #define RPC_HDR_TYPE(_rpch) (((_rpch) >> 24) & 0xff)
153 #define RPC_HDR_SESSION(_rpch) (((_rpch) >> 16) & 0xff)
154 #define RPC_HDR_XACTION(_rpch) ((_rpch) & 0xffff) /* When the type is data or return */
156 #define NAME_ENTRY(x) #x
158 /* RPC Header defines -- attached to every RPC call */
160 RPC_TYPE_UNKNOWN
, /* Unknown header type */
161 RPC_TYPE_DATA
, /* RPC call that go straight through */
162 RPC_TYPE_RTN
, /* RPC calls that are syncrhonous */
163 RPC_TYPE_MGN
, /* RPC state management */
173 /* Management actions */
177 RPC_CONNECT
, /* Master (high) to slave (low). Slave to copy current
178 * session id and transaction id (mostly 0)
180 RPC_CONNECT_ACK
, /* Ack from LOW_RPC */
181 RPC_DOWN
, /* Down the other-end. The actual action is
184 RPC_CONNECT_NACK
, /* Nack from LOW_RPC. This indicates potentially that
185 * dongle could already be running
187 RPC_RESET
/* Resync using other end's session id (mostly HIGH->LOW)
188 * Also, reset the oe_trans, and trans to 0
204 #define HDR_STATE_MISMATCH 0x1
205 #define HDR_SESSION_MISMATCH 0x2
206 #define HDR_XACTION_MISMATCH 0x4
209 #define RPC_PKTLOG_DATASIZE 4
213 uint32 data
[RPC_PKTLOG_DATASIZE
]; /* First few bytes of the payload only */
215 #endif /* BCMDBG_RPC */
218 static void bcm_rpc_dump_state(uint32 arg
, uint argc
, char *argv
[]);
220 static void bcm_rpc_fatal_dump(void *arg
);
224 static void _bcm_rpc_dump_pktlog(rpc_info_t
*rpci
);
226 static void bcm_rpc_dump_pktlog_high(rpc_info_t
*rpci
);
228 static void bcm_rpc_dump_pktlog_low(uint32 arg
, uint argc
, char *argv
[]);
230 #endif /* BCMDBG_RPC */
233 /* This lock is needed to handle the Receive Re-order queue that guarantees
234 * in-order receive as it was observed that in NDIS at least, USB subsystem does
238 #define RPC_RO_LOCK(ri) NdisAcquireSpinLock(&(ri)->reorder_lock)
239 #define RPC_RO_UNLOCK(ri) NdisReleaseSpinLock(&(ri)->reorder_lock)
241 #define RPC_RO_LOCK(ri) spin_lock_irqsave(&(ri)->reorder_lock, (ri)->reorder_flags);
242 #define RPC_RO_UNLOCK(ri) spin_unlock_irqrestore(&(ri)->reorder_lock, (ri)->reorder_flags);
243 #elif defined(MACOSX)
244 #define RPC_RO_LOCK(ri) do { } while (0)
245 #define RPC_RO_UNLOCK(ri) do { } while (0)
248 #define RPC_RO_LOCK(ri) do { } while (0)
249 #define RPC_RO_UNLOCK(ri) do { } while (0)
250 #endif /* WLC_HIGH */
253 void *pdev
; /* Per-port driver handle for rx callback */
254 struct rpc_transport_info
*rpc_th
; /* transport layer handle */
257 rpc_dispatch_cb_t dispatchcb
; /* callback when data is received */
258 void *ctx
; /* Callback context */
260 rpc_down_cb_t dncb
; /* callback when RPC goes down */
261 void *dnctx
; /* Callback context */
263 rpc_resync_cb_t resync_cb
; /* callback when host reenabled and dongle
264 * was not rebooted. Uses dnctx
266 rpc_txdone_cb_t txdone_cb
; /* when non-null, called when a tx has completed. */
267 uint8 rpc_tp_hdr_len
; /* header len for rpc and tp layer */
269 uint8 session
; /* 255 sessions enough ? */
270 uint16 trans
; /* More than 255 can't be pending */
271 uint16 oe_trans
; /* OtherEnd tran id, dongle->host */
272 uint16 rtn_trans
; /* BMAC: callreturn Id dongle->host */
273 uint16 oe_rtn_trans
; /* HIGH: received BMAC callreturn id */
275 rpc_buf_t
*rtn_rpcbuf
; /* RPC ID for return transaction */
278 uint reset
; /* # of resets */
279 uint cnt_xidooo
; /* transactionID out of order */
280 uint cnt_rx_drop_hole
; /* number of rcp calls dropped due to reorder overflow */
281 uint cnt_reorder_overflow
; /* number of time the reorder queue overflowed,
292 struct rpc_pktlog
*send_log
;
293 uint16 send_log_idx
; /* Point to the next slot to fill-in */
294 uint16 send_log_num
; /* Number of entries */
296 struct rpc_pktlog
*recv_log
;
297 uint16 recv_log_idx
; /* Point to the next slot to fill-in */
298 uint16 recv_log_num
; /* Number of entries */
299 #endif /* BCMDBG_RPC */
303 NDIS_SPIN_LOCK reorder_lock
; /* TO RAISE the IRQ */
304 bool reorder_lock_alloced
;
305 bool down_oe_pending
;
308 spinlock_t reorder_lock
;
311 #endif /* WLC_HIGH */
312 /* Protect against rx reordering */
313 rpc_buf_t
*reorder_pktq
;
315 uint reorder_depth_max
;
320 static void bcm_rpc_tx_complete(void *ctx
, rpc_buf_t
*buf
, int status
);
321 static void bcm_rpc_buf_recv(void *context
, rpc_buf_t
*);
322 static void bcm_rpc_process_reorder_queue(rpc_info_t
*rpci
);
323 static bool bcm_rpc_buf_recv_inorder(rpc_info_t
*rpci
, rpc_buf_t
*rpc_buf
, mbool hdr_invalid
);
326 static rpc_buf_t
*bcm_rpc_buf_recv_high(struct rpc_info
*rpci
, rpc_type_t type
, rpc_acn_t acn
,
328 static int bcm_rpc_resume_oe(struct rpc_info
*rpci
);
330 static int bcm_rpc_hello(rpc_info_t
*rpci
);
333 static rpc_buf_t
*bcm_rpc_buf_recv_low(struct rpc_info
*rpci
, rpc_header_t header
,
334 rpc_acn_t acn
, rpc_buf_t
*rpc_buf
);
335 #endif /* WLC_HIGH */
336 static int bcm_rpc_up(rpc_info_t
*rpci
);
337 static uint16
bcm_rpc_reorder_next_xid(struct rpc_info
*rpci
);
341 static void bcm_rpc_pktlog_init(rpc_info_t
*rpci
);
342 static void bcm_rpc_pktlog_deinit(rpc_info_t
*rpci
);
343 static struct rpc_pktlog
*bcm_rpc_prep_entry(struct rpc_info
* rpci
, rpc_buf_t
*b
,
344 struct rpc_pktlog
*cur
, bool tx
);
345 static void bcm_rpc_add_entry_tx(struct rpc_info
* rpci
, struct rpc_pktlog
*cur
);
346 static void bcm_rpc_add_entry_rx(struct rpc_info
* rpci
, struct rpc_pktlog
*cur
);
347 #endif /* BCMDBG_RPC */
350 /* Header and componet retrieval functions */
351 static INLINE rpc_header_t
352 bcm_rpc_header(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
354 rpc_header_t
*rpch
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
355 return ltoh32(*rpch
);
358 static INLINE rpc_acn_t
359 bcm_rpc_mgn_acn(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
361 rpc_header_t
*rpch
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
363 return (rpc_acn_t
)ltoh32(*rpch
);
367 bcm_rpc_mgn_ver(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
369 rpc_header_t
*rpch
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
371 return ltoh32(*rpch
);
374 static INLINE rpc_rc_t
375 bcm_rpc_mgn_reason(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
377 rpc_header_t
*rpch
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
378 return (rpc_rc_t
)ltoh32(*rpch
);
383 bcm_rpc_mgn_chipid(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
385 rpc_header_t
*rpch
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
387 return ltoh32(*rpch
);
389 #endif /* WLC_HIGH */
392 bcm_rpc_hdr_xaction_validate(struct rpc_info
*rpci
, rpc_header_t header
, uint32
*xaction
,
397 type
= RPC_HDR_TYPE(header
);
398 *xaction
= RPC_HDR_XACTION(header
);
399 /* High driver does not check the return transaction to be in order */
400 if (type
!= RPC_TYPE_MGN
&&
402 type
!= RPC_TYPE_RTN
&&
404 *xaction
!= rpci
->oe_trans
) {
407 RPC_ERR(("Transaction mismatch: expected:0x%x got:0x%x type: %d\n",
408 rpci
->oe_trans
, *xaction
, type
));
411 return HDR_XACTION_MISMATCH
;
418 bcm_rpc_hdr_session_validate(struct rpc_info
*rpci
, rpc_header_t header
)
421 if (RPC_HDR_TYPE(header
) == RPC_TYPE_MGN
)
425 if (rpci
->session
!= RPC_HDR_SESSION(header
))
426 return HDR_SESSION_MISMATCH
;
431 bcm_rpc_hdr_state_validate(struct rpc_info
*rpci
, rpc_header_t header
)
433 uint type
= RPC_HDR_TYPE(header
);
435 if ((type
== RPC_TYPE_UNKNOWN
) || (type
> RPC_TYPE_MGN
))
436 return HDR_STATE_MISMATCH
;
438 /* Everything allowed during this transition time */
439 if (rpci
->state
== ASLEEP
)
442 /* Only managment frames allowed before ESTABLISHED state */
443 if ((rpci
->state
!= ESTABLISHED
) && (type
!= RPC_TYPE_MGN
)) {
444 RPC_ERR(("bcm_rpc_header_validate: State mismatch: state:%d type:%d\n",
446 return HDR_STATE_MISMATCH
;
453 bcm_rpc_hdr_validate(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
, uint32
*xaction
,
456 /* First the state against the type */
458 rpc_header_t header
= bcm_rpc_header(rpci
, rpc_buf
);
460 mboolset(ret
, bcm_rpc_hdr_state_validate(rpci
, header
));
461 mboolset(ret
, bcm_rpc_hdr_xaction_validate(rpci
, header
, xaction
, verbose
));
462 mboolset(ret
, bcm_rpc_hdr_session_validate(rpci
, header
));
468 BCMATTACHFN(bcm_rpc_attach
)(void *pdev
, osl_t
*osh
, struct rpc_transport_info
*rpc_th
,
471 struct rpc_info
*rpci
;
474 UNUSED_PARAMETER(devid
);
475 #endif /* WLC_HIGH */
477 if ((rpci
= (struct rpc_info
*)MALLOC(osh
, sizeof(struct rpc_info
))) == NULL
)
480 bzero(rpci
, sizeof(struct rpc_info
));
484 rpci
->rpc_th
= rpc_th
;
485 rpci
->session
= 0x69;
487 /* initialize lock and queue */
488 rpci
->rpc_osh
= rpc_osl_attach(osh
);
490 if (rpci
->rpc_osh
== NULL
) {
491 RPC_ERR(("bcm_rpc_attach: osl attach failed\n"));
495 bcm_rpc_tp_register_cb(rpc_th
, bcm_rpc_tx_complete
, rpci
,
496 bcm_rpc_buf_recv
, rpci
, rpci
->rpc_osh
);
498 rpci
->version
= EPI_VERSION_NUM
;
500 rpci
->rpc_tp_hdr_len
= RPC_HDR_LEN
+ bcm_rpc_buf_tp_header_len(rpci
->rpc_th
);
502 #if defined(WLC_HIGH) && defined(NDIS)
506 if (bcm_rpc_up(rpci
)) {
507 RPC_ERR(("bcm_rpc_attach: rpc_up failed\n"));
512 *devid
= (uint16
)rpci
->chipid
;
517 bcm_rpc_detach(rpci
);
522 bcm_rpc_reorder_next_xid(struct rpc_info
*rpci
)
526 uint16 cur_xid
= rpci
->oe_trans
;
528 uint16 min_delta
= 0xffff;
531 ASSERT(rpci
->rpc_th
);
532 for (buf
= rpci
->reorder_pktq
;
534 buf
= bcm_rpc_buf_next_get(rpci
->rpc_th
, buf
)) {
535 header
= bcm_rpc_header(rpci
, buf
);
536 xid
= RPC_HDR_XACTION(header
);
537 delta
= xid
- cur_xid
;
539 if (delta
< min_delta
) {
549 BCMATTACHFN(bcm_rpc_detach
)(struct rpc_info
*rpci
)
556 if (rpci
->reorder_pktq
) {
558 ASSERT(rpci
->rpc_th
);
559 while ((node
= rpci
->reorder_pktq
)) {
560 rpci
->reorder_pktq
= bcm_rpc_buf_next_get(rpci
->rpc_th
,
562 bcm_rpc_buf_next_set(rpci
->rpc_th
, node
, NULL
);
563 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
564 PKTFREE(rpci
->osh
, node
, FALSE
);
566 bcm_rpc_tp_buf_free(rpci
->rpc_th
, node
);
567 #endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
569 ASSERT(rpci
->reorder_pktq
== NULL
);
570 rpci
->reorder_depth
= 0;
571 rpci
->reorder_depth_max
= 0;
576 if (rpci
->reorder_lock_alloced
)
577 NdisFreeSpinLock(&rpcb
->lock
);
579 #endif /* WLC_HIGH */
581 /* rpc is going away, cut off registered cbs from rpc_tp layer */
582 bcm_rpc_tp_deregister_cb(rpci
->rpc_th
);
585 bcm_rpc_tp_txflowctlcb_deinit(rpci
->rpc_th
);
589 rpc_osl_detach(rpci
->rpc_osh
);
591 MFREE(rpci
->osh
, rpci
, sizeof(struct rpc_info
));
596 bcm_rpc_buf_alloc(struct rpc_info
*rpci
, int datalen
)
599 int len
= datalen
+ RPC_HDR_LEN
;
601 ASSERT(rpci
->rpc_th
);
602 rpc_buf
= bcm_rpc_tp_buf_alloc(rpci
->rpc_th
, len
);
607 /* Reserve space for RPC Header */
608 bcm_rpc_buf_pull(rpci
->rpc_th
, rpc_buf
, RPC_HDR_LEN
);
614 bcm_rpc_buf_header_len(struct rpc_info
*rpci
)
616 return rpci
->rpc_tp_hdr_len
;
620 bcm_rpc_buf_free(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
622 bcm_rpc_tp_buf_free(rpci
->rpc_th
, rpc_buf
);
626 bcm_rpc_rxcb_init(struct rpc_info
*rpci
, void *ctx
, rpc_dispatch_cb_t cb
,
627 void *dnctx
, rpc_down_cb_t dncb
, rpc_resync_cb_t resync_cb
, rpc_txdone_cb_t txdone_cb
)
629 rpci
->dispatchcb
= cb
;
633 rpci
->resync_cb
= resync_cb
;
634 rpci
->txdone_cb
= txdone_cb
;
638 bcm_rpc_rxcb_deinit(struct rpc_info
*rpci
)
643 rpci
->dispatchcb
= NULL
;
647 rpci
->resync_cb
= NULL
;
650 struct rpc_transport_info
*
651 bcm_rpc_tp_get(struct rpc_info
*rpci
)
658 bcm_rpc_tp_tx_encap(struct rpc_info
*rpci
, rpc_buf_t
*rpc_buf
)
663 rpc_len
= pkttotlen(rpci
->osh
, rpc_buf
);
664 tp_lenp
= (uint32
*)bcm_rpc_buf_push(rpci
->rpc_th
, rpc_buf
, BCM_RPC_TP_ENCAP_LEN
);
665 *tp_lenp
= htol32(rpc_len
);
670 rpc_header_prep(struct rpc_info
*rpci
, rpc_header_t
*header
, uint type
, uint action
)
677 /* Mgmt action follows the header */
678 if (type
== RPC_TYPE_MGN
) {
679 *(header
+ 1) = htol32(action
);
681 if (action
== RPC_CONNECT
|| action
== RPC_RESET
|| action
== RPC_HELLO
)
682 *(header
+ 2) = htol32(rpci
->version
);
686 else if (type
== RPC_TYPE_RTN
)
687 v
|= (rpci
->rtn_trans
);
692 v
|= (rpci
->session
<< 16);
696 RPC_TRACE(("rpc_header_prep: type:0x%x action: %d trans:0x%x\n",
697 type
, action
, rpci
->trans
));
700 #if defined(WLC_HIGH) && defined(NDIS)
703 bcm_rpc_hello(struct rpc_info
*rpci
)
705 int ret
= -1, count
= 10;
707 rpc_header_t
*header
;
709 RPC_OSL_LOCK(rpci
->rpc_osh
);
710 rpci
->state
= WAIT_HELLO
;
711 rpci
->wait_init
= TRUE
;
712 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
714 while (ret
&& (count
-- > 0)) {
716 /* Allocate a frame, prep it, send and wait */
717 rpc_buf
= bcm_rpc_tp_buf_alloc(rpci
->rpc_th
, RPC_HDR_LEN
+ RPC_ACN_LEN
+ RPC_VER_LEN
723 header
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
725 rpc_header_prep(rpci
, header
, RPC_TYPE_MGN
, RPC_HELLO
);
727 if (bcm_rpc_tp_buf_send(rpci
->rpc_th
, rpc_buf
)) {
728 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__
));
732 RPC_ERR(("%s: waiting to receive hello\n", __FUNCTION__
));
734 RPC_OSL_WAIT(rpci
->rpc_osh
, RPC_INIT_WAIT_TIMEOUT_MSEC
, NULL
);
736 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__
, ret
));
738 /* See if we timed out or actually initialized */
739 RPC_OSL_LOCK(rpci
->rpc_osh
);
740 if (rpci
->state
== HELLO_RECEIVED
)
742 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
746 /* See if we timed out or actually initialized */
747 RPC_OSL_LOCK(rpci
->rpc_osh
);
748 rpci
->wait_init
= FALSE
;
749 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
754 #endif /* WLC_HIGH && NDIS */
759 bcm_rpc_up(struct rpc_info
*rpci
)
762 rpc_header_t
*header
;
765 /* Allocate a frame, prep it, send and wait */
766 rpc_buf
= bcm_rpc_tp_buf_alloc(rpci
->rpc_th
, RPC_HDR_LEN
+ RPC_ACN_LEN
+ RPC_VER_LEN
772 header
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
774 rpc_header_prep(rpci
, header
, RPC_TYPE_MGN
, RPC_CONNECT
);
776 RPC_OSL_LOCK(rpci
->rpc_osh
);
777 rpci
->state
= WAIT_INITIALIZING
;
778 rpci
->wait_init
= TRUE
;
781 if (!rpci
->reorder_lock_alloced
) {
782 NdisAllocateSpinLock(&rpci
->reorder_lock
);
783 rpci
->reorder_lock_alloced
= TRUE
;
786 spin_lock_init(&rpci
->reorder_lock
);
789 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
791 if (bcm_rpc_tp_buf_send(rpci
->rpc_th
, rpc_buf
)) {
792 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__
));
796 /* Wait for state to change to established. The receive thread knows what to do */
797 RPC_ERR(("%s: waiting to be connected\n", __FUNCTION__
));
799 ret
= RPC_OSL_WAIT(rpci
->rpc_osh
, RPC_INIT_WAIT_TIMEOUT_MSEC
, NULL
);
801 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__
, ret
));
804 rpci
->wait_init
= FALSE
;
808 /* See if we timed out or actually initialized */
809 RPC_OSL_LOCK(rpci
->rpc_osh
);
810 if (rpci
->state
== ESTABLISHED
)
814 rpci
->wait_init
= FALSE
;
815 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
818 bcm_rpc_pktlog_init(rpci
);
825 bcm_rpc_is_asleep(struct rpc_info
*rpci
)
827 return (rpci
->state
== ASLEEP
);
831 bcm_rpc_sleep(struct rpc_info
*rpci
)
833 if (!rpci
->suspend_enable
)
835 bcm_rpc_tp_sleep(rpci
->rpc_th
);
836 rpci
->state
= ASLEEP
;
837 /* Ignore anything coming after this */
848 bcm_rpc_shutdown(struct rpc_info
*rpci
)
853 ret
= bcm_rpc_tp_shutdown(rpci
->rpc_th
);
854 rpci
->state
= DISCONNECTED
;
861 bcm_rpc_resume(struct rpc_info
*rpci
, int *fw_reload
)
863 if (!rpci
->suspend_enable
)
866 bcm_rpc_tp_resume(rpci
->rpc_th
, fw_reload
);
875 rpci
->state
= ESTABLISHED
;
877 if (bcm_rpc_resume_oe(rpci
) == 0) {
882 RPC_TRACE(("bcm_rpc_resume done, state %d\n", rpci
->state
));
883 return (rpci
->state
== ESTABLISHED
);
887 bcm_rpc_resume_oe(struct rpc_info
*rpci
)
890 rpc_header_t
*header
;
893 /* Allocate a frame, prep it, send and wait */
894 rpc_buf
= bcm_rpc_tp_buf_alloc(rpci
->rpc_th
, RPC_HDR_LEN
+ RPC_ACN_LEN
+ RPC_VER_LEN
);
899 header
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
901 rpc_header_prep(rpci
, header
, RPC_TYPE_MGN
, RPC_RESET
);
903 RPC_OSL_LOCK(rpci
->rpc_osh
);
904 rpci
->state
= WAIT_RESUME
;
905 rpci
->wait_init
= TRUE
;
906 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
908 /* Don't care for the return value */
909 if (bcm_rpc_tp_buf_send(rpci
->rpc_th
, rpc_buf
)) {
910 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__
));
914 /* Wait for state to change to established. The receive thread knows what to do */
915 RPC_ERR(("%s: waiting to be resumed\n", __FUNCTION__
));
917 ret
= RPC_OSL_WAIT(rpci
->rpc_osh
, RPC_INIT_WAIT_TIMEOUT_MSEC
, NULL
);
919 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__
, ret
));
922 rpci
->wait_init
= FALSE
;
926 /* See if we timed out or actually initialized */
927 RPC_OSL_LOCK(rpci
->rpc_osh
);
928 if (rpci
->state
== ESTABLISHED
)
932 rpci
->wait_init
= FALSE
;
933 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
939 bcm_rpc_up(struct rpc_info
*rpci
)
941 rpci
->state
= WAIT_INITIALIZING
;
944 bcm_rpc_pktlog_init(rpci
);
945 hndrte_cons_addcmd("rpcpktdump", bcm_rpc_dump_pktlog_low
, (uint32
)rpci
);
947 hndrte_cons_addcmd("rpcdump", bcm_rpc_dump_state
, (uint32
)rpci
);
952 bcm_rpc_connect_resp(struct rpc_info
*rpci
, rpc_acn_t acn
, uint32 reason
)
955 rpc_header_t
*header
;
957 /* Allocate a frame, prep it, send and wait */
958 rpc_buf
= bcm_rpc_tp_buf_alloc(rpci
->rpc_th
, RPC_HDR_LEN
+ RPC_ACN_LEN
+
959 RPC_RC_LEN
+ RPC_VER_LEN
+ RPC_CHIPID_LEN
);
961 RPC_ERR(("%s: bcm_rpc_tp_buf_alloc() failed\n", __FUNCTION__
));
965 header
= (rpc_header_t
*)bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
);
967 rpc_header_prep(rpci
, header
, RPC_TYPE_MGN
, acn
);
969 *(header
+ 2) = ltoh32(rpci
->version
);
970 *(header
+ 3) = ltoh32(reason
);
972 *(header
+ 4) = ltoh32(BCMCHIPID
);
973 #endif /* BCMCHIPID */
974 if (bcm_rpc_tp_buf_send(rpci
->rpc_th
, rpc_buf
)) {
975 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__
));
981 #endif /* WLC_HIGH */
984 bcm_rpc_watchdog(struct rpc_info
*rpci
)
986 static uint32 uptime
= 0;
988 /* rpc watchdog is called every 10 msec in the low driver */
989 static uint32 count
= 0;
991 if (count
% 100 == 0) {
994 if (uptime
% 60 == 0)
995 RPC_TRACE(("rpc uptime %d minutes\n", (uptime
/ 60)));
999 if (uptime
% 60 == 0) {
1000 RPC_TRACE(("rpc uptime %d minutes\n", (uptime
/ 60)));
1004 bcm_rpc_tp_watchdog(rpci
->rpc_th
);
1008 bcm_rpc_down(struct rpc_info
*rpci
)
1010 RPC_ERR(("%s\n", __FUNCTION__
));
1013 bcm_rpc_pktlog_deinit(rpci
);
1016 RPC_OSL_LOCK(rpci
->rpc_osh
);
1017 if (rpci
->state
!= DISCONNECTED
&& rpci
->state
!= ASLEEP
) {
1019 bcm_rpc_fatal_dump(rpci
);
1021 bcm_rpc_dump_state((uint32
)rpci
, 0, NULL
);
1023 rpci
->state
= DISCONNECTED
;
1024 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1026 (rpci
->dncb
)(rpci
->dnctx
);
1027 bcm_rpc_tp_down(rpci
->rpc_th
);
1030 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1033 #if defined(USBAP) && (defined(WLC_HIGH) && !defined(WLC_LOW))
1034 /* For USBAP external image, reboot system upon RPC error instead of just turning RPC down */
1035 #include <siutils.h>
1037 bcm_rpc_err_down(struct rpc_info
*rpci
)
1039 si_t
*sih
= si_kattach(SI_OSH
);
1041 RPC_ERR(("%s: rebooting system due to RPC error.\n", __FUNCTION__
));
1042 si_watchdog(sih
, 1);
1045 #define bcm_rpc_err_down bcm_rpc_down
1049 bcm_rpc_tx_complete(void *ctx
, rpc_buf_t
*buf
, int status
)
1051 struct rpc_info
*rpci
= (struct rpc_info
*)ctx
;
1053 RPC_TRACE(("%s: status 0x%x\n", __FUNCTION__
, status
));
1055 ASSERT(rpci
&& rpci
->rpc_th
);
1058 if (rpci
->txdone_cb
) {
1059 /* !!must pull off the rpc/tp header after dbus is done for wl driver */
1060 rpci
->txdone_cb(rpci
->ctx
, buf
);
1062 bcm_rpc_tp_buf_free(rpci
->rpc_th
, buf
);
1067 bcm_rpc_call(struct rpc_info
*rpci
, rpc_buf_t
*b
)
1069 rpc_header_t
*header
;
1072 struct rpc_pktlog cur
;
1075 RPC_TRACE(("%s:\n", __FUNCTION__
));
1077 RPC_OSL_LOCK(rpci
->rpc_osh
);
1078 if (rpci
->state
!= ESTABLISHED
) {
1080 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1082 header
= (rpc_header_t
*)bcm_rpc_buf_push(rpci
->rpc_th
, b
, RPC_HDR_LEN
);
1083 rpc_header_prep(rpci
, header
, RPC_TYPE_DATA
, 0);
1084 bcm_rpc_tp_tx_encap(rpci
, b
);
1086 if (rpci
->txdone_cb
) {
1087 /* !!must pull off the rpc/tp header after dbus is done for wl driver */
1088 rpci
->txdone_cb(rpci
->ctx
, b
);
1092 bcm_rpc_buf_free(rpci
, b
);
1096 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1099 /* Prepare the current log entry but add only if the TX was successful */
1100 /* This is done here before DATA pointer gets modified */
1101 if (RPC_PKTLOG_ON())
1102 bcm_rpc_prep_entry(rpci
, b
, &cur
, TRUE
);
1105 header
= (rpc_header_t
*)bcm_rpc_buf_push(rpci
->rpc_th
, b
, RPC_HDR_LEN
);
1107 rpc_header_prep(rpci
, header
, RPC_TYPE_DATA
, 0);
1110 if (RPC_PKTTRACE_ON()) {
1112 prhex("RPC Call ", bcm_rpc_buf_data(rpci
->rpc_th
, b
),
1113 bcm_rpc_buf_len_get(rpci
->rpc_th
, b
));
1116 #endif /* BCMDBG_RPC */
1118 if (bcm_rpc_tp_buf_send(rpci
->rpc_th
, b
)) {
1119 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__
));
1121 if (rpci
->txdone_cb
) {
1122 rpci
->txdone_cb(rpci
->ctx
, b
);
1124 bcm_rpc_tp_buf_free(rpci
->rpc_th
, b
);
1126 bcm_rpc_err_down(rpci
);
1130 RPC_OSL_LOCK(rpci
->rpc_osh
);
1132 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1134 #ifdef BCMDBG_RPC /* Since successful add the entry */
1135 if (RPC_PKTLOG_ON()) {
1136 bcm_rpc_add_entry_tx(rpci
, &cur
);
1145 bcm_rpc_call_with_return(struct rpc_info
*rpci
, rpc_buf_t
*b
)
1147 rpc_header_t
*header
;
1148 rpc_buf_t
*retb
= NULL
;
1151 struct rpc_pktlog cur
;
1153 bool timedout
= FALSE
;
1154 uint32 start_wait_time
;
1156 RPC_TRACE(("%s:\n", __FUNCTION__
));
1158 RPC_OSL_LOCK(rpci
->rpc_osh
);
1159 if (rpci
->state
!= ESTABLISHED
) {
1160 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1161 RPC_ERR(("%s: RPC call before ESTABLISHED state\n", __FUNCTION__
));
1162 bcm_rpc_buf_free(rpci
, b
);
1165 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1168 /* Prepare the current log entry but add only if the TX was successful */
1169 /* This is done here before DATA pointer gets modified */
1170 if (RPC_PKTLOG_ON())
1171 bcm_rpc_prep_entry(rpci
, b
, &cur
, TRUE
);
1174 header
= (rpc_header_t
*)bcm_rpc_buf_push(rpci
->rpc_th
, b
, RPC_HDR_LEN
);
1176 rpc_header_prep(rpci
, header
, RPC_TYPE_RTN
, 0);
1178 RPC_OSL_LOCK(rpci
->rpc_osh
);
1180 ASSERT(rpci
->rtn_rpcbuf
== NULL
);
1181 rpci
->wait_return
= TRUE
;
1182 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1184 /* Prep the return packet BEFORE sending the buffer and also within spinlock
1187 ret
= bcm_rpc_tp_recv_rtn(rpci
->rpc_th
);
1188 if ((ret
== BCME_RXFAIL
) || (ret
== BCME_NODEVICE
)) {
1189 RPC_ERR(("%s: bcm_rpc_tp_recv_rtn() failed\n", __FUNCTION__
));
1191 RPC_OSL_LOCK(rpci
->rpc_osh
);
1192 rpci
->wait_return
= FALSE
;
1193 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1194 bcm_rpc_err_down(rpci
);
1200 if (RPC_PKTTRACE_ON()) {
1202 prhex("RPC Call With Return Buf", bcm_rpc_buf_data(rpci
->rpc_th
, b
),
1203 bcm_rpc_buf_len_get(rpci
->rpc_th
, b
));
1206 #endif /* BCMDBG_RPC */
1208 if (bcm_rpc_tp_buf_send(rpci
->rpc_th
, b
)) {
1209 RPC_ERR(("%s: bcm_rpc_bus_buf_send() failed\n", __FUNCTION__
));
1211 RPC_OSL_LOCK(rpci
->rpc_osh
);
1212 rpci
->wait_return
= FALSE
;
1213 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1214 bcm_rpc_err_down(rpci
);
1218 start_wait_time
= OSL_SYSUPTIME();
1219 ret
= RPC_OSL_WAIT(rpci
->rpc_osh
, RPC_RETURN_WAIT_TIMEOUT_MSEC
, &timedout
);
1221 /* When RPC_OSL_WAIT returns because of signal pending. wait for the signal to
1224 RPC_OSL_LOCK(rpci
->rpc_osh
);
1225 while ((ret
< 0) && ((OSL_SYSUPTIME() - start_wait_time
) <= RPC_RETURN_WAIT_TIMEOUT_MSEC
)) {
1226 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1227 ret
= RPC_OSL_WAIT(rpci
->rpc_osh
, RPC_RETURN_WAIT_TIMEOUT_MSEC
, &timedout
);
1228 RPC_OSL_LOCK(rpci
->rpc_osh
);
1231 if (ret
|| timedout
) {
1232 RPC_ERR(("%s: RPC call trans 0x%x return wait err %d timedout %d limit %d(ms)\n",
1233 __FUNCTION__
, (rpci
->trans
- 1), ret
, timedout
,
1234 RPC_RETURN_WAIT_TIMEOUT_MSEC
));
1235 rpci
->wait_return
= FALSE
;
1236 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1238 bcm_rpc_dump_pktlog_high(rpci
);
1240 bcm_rpc_err_down(rpci
);
1244 /* See if we timed out or actually initialized */
1245 ASSERT(rpci
->rtn_rpcbuf
!= NULL
); /* Make sure we've got the response */
1246 retb
= rpci
->rtn_rpcbuf
;
1247 rpci
->rtn_rpcbuf
= NULL
;
1248 rpci
->wait_return
= FALSE
; /* Could have woken up by timeout */
1249 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1251 #ifdef BCMDBG_RPC /* Since successful add the entry */
1252 if (RPC_PKTLOG_ON())
1253 bcm_rpc_add_entry_tx(rpci
, &cur
);
1258 #endif /* WLC_HIGH */
1262 bcm_rpc_call_return(struct rpc_info
*rpci
, rpc_buf_t
*b
)
1264 rpc_header_t
*header
;
1266 RPC_TRACE(("%s\n", __FUNCTION__
));
1268 header
= (rpc_header_t
*)bcm_rpc_buf_push(rpci
->rpc_th
, b
, RPC_HDR_LEN
);
1270 rpc_header_prep(rpci
, header
, RPC_TYPE_RTN
, 0);
1273 if (RPC_PKTTRACE_ON()) {
1275 prhex("RPC Call Return Buf", bcm_rpc_buf_data(rpci
->rpc_th
, b
),
1276 bcm_rpc_buf_len_get(rpci
->rpc_th
, b
));
1279 #endif /* BCMDBG_RPC */
1281 /* If the TX fails, it's sender's responsibilty */
1282 if (bcm_rpc_tp_send_callreturn(rpci
->rpc_th
, b
)) {
1283 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__
));
1284 bcm_rpc_err_down(rpci
);
1291 #endif /* WLC_LOW */
1293 /* This is expected to be called at DPC of the bus driver ? */
1295 bcm_rpc_buf_recv(void *context
, rpc_buf_t
*rpc_buf
)
1298 struct rpc_info
*rpci
= (struct rpc_info
*)context
;
1299 mbool hdr_invalid
= 0;
1300 ASSERT(rpci
&& rpci
->rpc_th
);
1302 RPC_TRACE(("%s:\n", __FUNCTION__
));
1306 /* Only if the header itself checks out , and only xaction does not */
1307 hdr_invalid
= bcm_rpc_hdr_validate(rpci
, rpc_buf
, &xaction
, TRUE
);
1309 if (mboolisset(hdr_invalid
, HDR_XACTION_MISMATCH
) &&
1310 !mboolisset(hdr_invalid
, ~HDR_XACTION_MISMATCH
)) {
1311 rpc_buf_t
*node
= rpci
->reorder_pktq
;
1313 rpci
->reorder_depth
++;
1314 if (rpci
->reorder_depth
> rpci
->reorder_depth_max
)
1315 rpci
->reorder_depth_max
= rpci
->reorder_depth
;
1317 /* Catch roll-over or retries */
1318 rpci
->reorder_pktq
= rpc_buf
;
1321 bcm_rpc_buf_next_set(rpci
->rpc_th
, rpc_buf
, node
);
1323 /* if we have held too many packets, move past the hole */
1324 if (rpci
->reorder_depth
> BCM_RPC_REORDER_LIMIT
) {
1325 uint16 next_xid
= bcm_rpc_reorder_next_xid(rpci
);
1327 RPC_ERR(("%s: reorder queue depth %d, skipping ID 0x%x to 0x%x\n",
1328 __FUNCTION__
, rpci
->reorder_depth
,
1329 rpci
->oe_trans
, next_xid
));
1330 rpci
->cnt_reorder_overflow
++;
1331 rpci
->cnt_rx_drop_hole
+= (uint
)(next_xid
- rpci
->oe_trans
);
1332 rpci
->oe_trans
= next_xid
;
1333 bcm_rpc_process_reorder_queue(rpci
);
1339 /* Bail out if failed */
1340 if (!bcm_rpc_buf_recv_inorder(rpci
, rpc_buf
, hdr_invalid
))
1343 /* see if we can make progress on the reorder backlog */
1344 bcm_rpc_process_reorder_queue(rpci
);
1347 RPC_RO_UNLOCK(rpci
);
1351 bcm_rpc_process_reorder_queue(rpc_info_t
*rpci
)
1354 mbool hdr_invalid
= 0;
1356 while (rpci
->reorder_pktq
) {
1358 rpc_buf_t
*buf
= rpci
->reorder_pktq
;
1359 rpc_buf_t
*prev
= rpci
->reorder_pktq
;
1360 while (buf
!= NULL
) {
1361 rpc_buf_t
*next
= bcm_rpc_buf_next_get(rpci
->rpc_th
, buf
);
1362 hdr_invalid
= bcm_rpc_hdr_validate(rpci
, buf
, &xaction
, FALSE
);
1364 if (!mboolisset(hdr_invalid
, HDR_XACTION_MISMATCH
)) {
1365 bcm_rpc_buf_next_set(rpci
->rpc_th
, buf
, NULL
);
1367 if (buf
== rpci
->reorder_pktq
)
1368 rpci
->reorder_pktq
= next
;
1370 bcm_rpc_buf_next_set(rpci
->rpc_th
, prev
, next
);
1371 rpci
->reorder_depth
--;
1373 /* Bail out if failed */
1374 if (!bcm_rpc_buf_recv_inorder(rpci
, buf
, hdr_invalid
))
1385 /* bail if not found */
1394 bcm_rpc_buf_recv_inorder(rpc_info_t
*rpci
, rpc_buf_t
*rpc_buf
, mbool hdr_invalid
)
1396 rpc_header_t header
;
1397 rpc_acn_t acn
= RPC_NULL
;
1399 ASSERT(rpci
&& rpci
->rpc_th
);
1401 RPC_TRACE(("%s: got rpc_buf %p len %d data %p\n", __FUNCTION__
,
1402 rpc_buf
, bcm_rpc_buf_len_get(rpci
->rpc_th
, rpc_buf
),
1403 bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
)));
1406 if (RPC_PKTTRACE_ON()) {
1408 prhex("RPC Rx Buf", bcm_rpc_buf_data(rpci
->rpc_th
, rpc_buf
),
1409 bcm_rpc_buf_len_get(rpci
->rpc_th
, rpc_buf
));
1412 #endif /* BCMDBG_RPC */
1414 header
= bcm_rpc_header(rpci
, rpc_buf
);
1416 RPC_OSL_LOCK(rpci
->rpc_osh
);
1419 RPC_ERR(("%s: bcm_rpc_hdr_validate failed on 0x%08x 0x%x\n", __FUNCTION__
,
1420 header
, hdr_invalid
));
1421 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1422 if (RPC_HDR_TYPE(header
) != RPC_TYPE_RTN
) {
1424 PKTFRMNATIVE(rpci
->osh
, rpc_buf
);
1426 PKTFREE(rpci
->osh
, rpc_buf
, FALSE
);
1429 bcm_rpc_tp_buf_free(rpci
->rpc_th
, rpc_buf
);
1430 #endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1431 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1435 RPC_TRACE(("%s state:0x%x type:0x%x session:0x%x xacn:0x%x\n", __FUNCTION__
, rpci
->state
,
1436 RPC_HDR_TYPE(header
), RPC_HDR_SESSION(header
), RPC_HDR_XACTION(header
)));
1438 if (bcm_rpc_buf_len_get(rpci
->rpc_th
, rpc_buf
) > RPC_HDR_LEN
)
1439 bcm_rpc_buf_pull(rpci
->rpc_th
, rpc_buf
, RPC_HDR_LEN
);
1441 /* if the head packet ends with rpc_hdr, free and advance to next packet in chain */
1444 ASSERT(bcm_rpc_buf_len_get(rpci
->rpc_th
, rpc_buf
) == RPC_HDR_LEN
);
1445 next_p
= (rpc_buf_t
*)PKTNEXT(rpci
->osh
, rpc_buf
);
1447 RPC_TRACE(("%s: following pkt chain to pkt %p len %d\n", __FUNCTION__
,
1448 next_p
, bcm_rpc_buf_len_get(rpci
->rpc_th
, next_p
)));
1450 PKTSETNEXT(rpci
->osh
, rpc_buf
, NULL
);
1451 bcm_rpc_tp_buf_free(rpci
->rpc_th
, rpc_buf
);
1453 if (rpc_buf
== NULL
) {
1454 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1459 switch (RPC_HDR_TYPE(header
)) {
1461 acn
= bcm_rpc_mgn_acn(rpci
, rpc_buf
);
1462 bcm_rpc_buf_pull(rpci
->rpc_th
, rpc_buf
, RPC_ACN_LEN
);
1463 RPC_TRACE(("Mgn: %x\n", acn
));
1467 rpci
->oe_rtn_trans
= RPC_HDR_XACTION(header
) + 1;
1471 rpci
->oe_trans
= RPC_HDR_XACTION(header
) + 1;
1478 rpc_buf
= bcm_rpc_buf_recv_high(rpci
, RPC_HDR_TYPE(header
), acn
, rpc_buf
);
1480 rpc_buf
= bcm_rpc_buf_recv_low(rpci
, header
, acn
, rpc_buf
);
1482 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1485 bcm_rpc_tp_buf_free(rpci
->rpc_th
, rpc_buf
);
1491 bcm_rpc_buf_recv_mgn_high(struct rpc_info
*rpci
, rpc_acn_t acn
, rpc_buf_t
*rpc_buf
)
1493 rpc_rc_t reason
= RPC_RC_ACK
;
1496 RPC_TRACE(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__
,
1497 acn
, rpci
->version
, rpci
->state
, rpci
->session
));
1500 if (acn
== RPC_CONNECT_ACK
|| acn
== RPC_CONNECT_NACK
) {
1502 if (acn
== RPC_HELLO
|| acn
== RPC_CONNECT_ACK
|| acn
== RPC_CONNECT_NACK
) {
1504 version
= bcm_rpc_mgn_ver(rpci
, rpc_buf
);
1505 bcm_rpc_buf_pull(rpci
->rpc_th
, rpc_buf
, RPC_VER_LEN
);
1507 reason
= bcm_rpc_mgn_reason(rpci
, rpc_buf
);
1508 bcm_rpc_buf_pull(rpci
->rpc_th
, rpc_buf
, RPC_RC_LEN
);
1510 RPC_ERR(("%s: Reason: %x Dongle Version: 0x%x\n", __FUNCTION__
,
1517 /* If the original thread has not given up,
1518 * then change the state and wake it up
1520 if (rpci
->state
== WAIT_HELLO
) {
1521 rpci
->state
= HELLO_RECEIVED
;
1523 RPC_ERR(("%s: Hello Received!\n", __FUNCTION__
));
1524 if (rpci
->wait_init
)
1525 RPC_OSL_WAKE(rpci
->rpc_osh
);
1529 case RPC_CONNECT_ACK
:
1530 /* If the original thread has not given up,
1531 * then change the state and wake it up
1533 if (rpci
->state
!= UNINITED
) {
1534 rpci
->state
= ESTABLISHED
;
1535 rpci
->chipid
= bcm_rpc_mgn_chipid(rpci
, rpc_buf
);
1536 bcm_rpc_buf_pull(rpci
->rpc_th
, rpc_buf
, RPC_CHIPID_LEN
);
1538 RPC_ERR(("%s: Connected!\n", __FUNCTION__
));
1539 if (rpci
->wait_init
)
1540 RPC_OSL_WAKE(rpci
->rpc_osh
);
1542 ASSERT(reason
!= RPC_RC_VER_MISMATCH
);
1545 case RPC_CONNECT_NACK
:
1546 /* Connect failed. Just bail out by waking the thread */
1547 RPC_ERR(("%s: Connect failed !!!\n", __FUNCTION__
));
1548 if (rpci
->wait_init
)
1549 RPC_OSL_WAKE(rpci
->rpc_osh
);
1553 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1555 if ((KeGetCurrentIrql() == DISPATCH_LEVEL
) &&
1556 (bcm_rpc_tp_tx_flowctl_get(rpci
->rpc_th
))) {
1557 RPC_TRACE(("%s: unsafe to down rpc, delay\n", __FUNCTION__
));
1558 rpci
->down_pending
= TRUE
;
1565 RPC_OSL_LOCK(rpci
->rpc_osh
);
1575 bcm_rpc_buf_recv_high(struct rpc_info
*rpci
, rpc_type_t type
, rpc_acn_t acn
, rpc_buf_t
*rpc_buf
)
1577 RPC_TRACE(("%s: acn %d\n", __FUNCTION__
, acn
));
1581 if (rpci
->wait_return
) {
1582 rpci
->rtn_rpcbuf
= rpc_buf
;
1583 /* This buffer will be freed in bcm_rpc_tp_recv_rtn() */
1585 RPC_OSL_WAKE(rpci
->rpc_osh
);
1586 } else if (rpci
->state
!= DISCONNECTED
)
1587 RPC_ERR(("%s: Received return buffer but no one waiting\n", __FUNCTION__
));
1591 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1592 bcm_rpc_buf_recv_mgn_high(rpci
, acn
, rpc_buf
);
1594 PKTFRMNATIVE(rpci
->osh
, rpc_buf
);
1596 PKTFREE(rpci
->osh
, rpc_buf
, FALSE
);
1599 bcm_rpc_buf_recv_mgn_high(rpci
, acn
, rpc_buf
);
1600 #endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1604 ASSERT(rpci
->state
== ESTABLISHED
);
1606 /* Prepare the current log entry but add only if the TX was successful */
1607 /* This is done here before DATA pointer gets modified */
1608 if (RPC_PKTLOG_ON()) {
1609 struct rpc_pktlog cur
;
1610 bcm_rpc_prep_entry(rpci
, rpc_buf
, &cur
, FALSE
);
1611 bcm_rpc_add_entry_rx(rpci
, &cur
);
1613 #endif /* BCMDBG_RPC */
1614 if (rpci
->dispatchcb
) {
1616 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1617 PKTTONATIVE(rpci
->osh
, rpc_buf
);
1618 #endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
1620 (rpci
->dispatchcb
)(rpci
->ctx
, rpc_buf
);
1621 /* The dispatch routine will free the buffer */
1624 RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__
));
1636 bcm_rpc_buf_recv_mgn_low(struct rpc_info
*rpci
, uint8 session
, rpc_acn_t acn
, rpc_buf_t
*rpc_buf
)
1641 RPC_TRACE(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__
,
1643 rpci
->version
, rpci
->state
, rpci
->session
));
1645 if (acn
== RPC_HELLO
) {
1646 bcm_rpc_connect_resp(rpci
, RPC_HELLO
, RPC_RC_HELLO
);
1647 } else if (acn
== RPC_CONNECT
|| acn
== RPC_RESET
) {
1648 version
= bcm_rpc_mgn_ver(rpci
, rpc_buf
);
1650 RPC_ERR(("%s: Host Version: 0x%x\n", __FUNCTION__
, version
));
1652 ASSERT(rpci
->state
!= UNINITED
);
1654 if (version
!= rpci
->version
) {
1655 RPC_ERR(("RPC Establish failed due to version mismatch\n"));
1656 RPC_ERR(("Expected: 0x%x Got: 0x%x\n", rpci
->version
, version
));
1657 RPC_ERR(("Connect failed !!!\n"));
1659 rpci
->state
= WAIT_INITIALIZING
;
1660 bcm_rpc_connect_resp(rpci
, RPC_CONNECT_NACK
, RPC_RC_VER_MISMATCH
);
1664 /* When receiving CONNECT/RESET from HIGH, just
1665 * resync to the HIGH's session and reset the transactions
1667 if ((acn
== RPC_CONNECT
) && (rpci
->state
== ESTABLISHED
))
1668 reason
= RPC_RC_RECONNECT
;
1670 rpci
->session
= session
;
1672 if (bcm_rpc_connect_resp(rpci
, RPC_CONNECT_ACK
, reason
)) {
1673 /* call the resync callback if already established */
1674 if ((acn
== RPC_CONNECT
) && (rpci
->state
== ESTABLISHED
) &&
1675 (rpci
->resync_cb
)) {
1676 (rpci
->resync_cb
)(rpci
->dnctx
);
1678 rpci
->state
= ESTABLISHED
;
1680 RPC_ERR(("%s: RPC Establish failed !!!\n", __FUNCTION__
));
1683 RPC_ERR(("Connected Session:%x!\n", rpci
->session
));
1686 rpci
->rtn_trans
= 0;
1687 } else if (acn
== RPC_DOWN
) {
1693 bcm_rpc_buf_recv_low(struct rpc_info
*rpci
, rpc_header_t header
,
1694 rpc_acn_t acn
, rpc_buf_t
*rpc_buf
)
1696 switch (RPC_HDR_TYPE(header
)) {
1698 bcm_rpc_buf_recv_mgn_low(rpci
, RPC_HDR_SESSION(header
), acn
, rpc_buf
);
1703 ASSERT(rpci
->state
== ESTABLISHED
);
1705 /* Prepare the current log entry but add only if the TX was successful */
1706 /* This is done here before DATA pointer gets modified */
1707 if (RPC_PKTLOG_ON()) {
1708 struct rpc_pktlog cur
;
1709 bcm_rpc_prep_entry(rpci
, rpc_buf
, &cur
, FALSE
);
1710 bcm_rpc_add_entry_rx(rpci
, &cur
);
1712 #endif /* BCMDBG_RPC */
1714 if (rpci
->dispatchcb
) {
1715 (rpci
->dispatchcb
)(rpci
->ctx
, rpc_buf
);
1718 RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__
));
1729 #endif /* WLC_HIGH */
1733 bcm_rpc_pktlog_init(rpc_info_t
*rpci
)
1735 rpc_msg_level
|= RPC_PKTLOG_VAL
;
1737 if (RPC_PKTLOG_ON()) {
1738 if ((rpci
->send_log
= MALLOC(rpci
->osh
,
1739 sizeof(struct rpc_pktlog
) * RPC_PKTLOG_SIZE
)) == NULL
)
1741 bzero(rpci
->send_log
, sizeof(struct rpc_pktlog
) * RPC_PKTLOG_SIZE
);
1742 if ((rpci
->recv_log
= MALLOC(rpci
->osh
,
1743 sizeof(struct rpc_pktlog
) * RPC_PKTLOG_SIZE
)) == NULL
)
1745 bzero(rpci
->recv_log
, sizeof(struct rpc_pktlog
) * RPC_PKTLOG_SIZE
);
1748 RPC_ERR(("pktlog is on\n"));
1750 bcm_rpc_pktlog_deinit(rpci
);
1754 bcm_rpc_pktlog_deinit(rpc_info_t
*rpci
)
1756 if (rpci
->send_log
) {
1757 MFREE(rpci
->osh
, rpci
->send_log
, sizeof(struct rpc_pktlog
) * RPC_PKTLOG_SIZE
);
1758 rpci
->send_log
= NULL
;
1760 if (rpci
->recv_log
) {
1761 MFREE(rpci
->osh
, rpci
->recv_log
, sizeof(struct rpc_pktlog
) * RPC_PKTLOG_SIZE
);
1762 rpci
->recv_log
= NULL
;
1764 rpc_msg_level
&= ~RPC_PKTLOG_VAL
; /* Turn off logging on failure */
1767 static struct rpc_pktlog
*
1768 bcm_rpc_prep_entry(struct rpc_info
* rpci
, rpc_buf_t
*b
, struct rpc_pktlog
*cur
, bool tx
)
1770 bzero(cur
, sizeof(struct rpc_pktlog
));
1772 cur
->trans
= rpci
->trans
;
1774 /* this function is called after match, so the oe_trans is already advanced */
1775 cur
->trans
= rpci
->oe_trans
- 1;
1777 cur
->len
= bcm_rpc_buf_len_get(rpci
->rpc_th
, b
);
1778 bcopy(bcm_rpc_buf_data(rpci
->rpc_th
, b
), cur
->data
, RPC_PKTLOG_DATASIZE
);
1783 bcm_rpc_add_entry_tx(struct rpc_info
* rpci
, struct rpc_pktlog
*cur
)
1785 RPC_OSL_LOCK(rpci
->rpc_osh
);
1786 bcopy(cur
, &rpci
->send_log
[rpci
->send_log_idx
], sizeof(struct rpc_pktlog
));
1787 rpci
->send_log_idx
= (rpci
->send_log_idx
+ 1) % RPC_PKTLOG_SIZE
;
1789 if (rpci
->send_log_num
< RPC_PKTLOG_SIZE
)
1790 rpci
->send_log_num
++;
1792 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1796 bcm_rpc_add_entry_rx(struct rpc_info
* rpci
, struct rpc_pktlog
*cur
)
1798 bcopy(cur
, &rpci
->recv_log
[rpci
->recv_log_idx
], sizeof(struct rpc_pktlog
));
1799 rpci
->recv_log_idx
= (rpci
->recv_log_idx
+ 1) % RPC_PKTLOG_SIZE
;
1801 if (rpci
->recv_log_num
< RPC_PKTLOG_SIZE
)
1802 rpci
->recv_log_num
++;
1804 #endif /* BCMDBG_RPC */
1808 bcm_rpc_dump(rpc_info_t
*rpci
, struct bcmstrbuf
*b
)
1812 bcm_bprintf(b
, "\nHOST rpc dump:\n");
1813 RPC_OSL_LOCK(rpci
->rpc_osh
);
1814 bcm_bprintf(b
, "Version: 0x%x State: %x\n", rpci
->version
, rpci
->state
);
1815 bcm_bprintf(b
, "session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x oe_rtn_trans 0x%x\n",
1816 rpci
->session
, rpci
->trans
, rpci
->oe_trans
,
1817 rpci
->rtn_trans
, rpci
->oe_rtn_trans
);
1818 bcm_bprintf(b
, "xactionID out of order %d\n", rpci
->cnt_xidooo
);
1819 bcm_bprintf(b
, "reorder queue depth %u first ID 0x%x, max depth %u, tossthreshold %u\n",
1820 rpci
->reorder_depth
, bcm_rpc_reorder_next_xid(rpci
), rpci
->reorder_depth_max
,
1821 BCM_RPC_REORDER_LIMIT
);
1823 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1824 return bcm_rpc_tp_dump(rpci
->rpc_th
, b
);
1831 bcm_rpc_pktlog_get(struct rpc_info
*rpci
, uint32
*buf
, uint buf_size
, bool send
)
1838 /* Clear the whole buffer */
1839 bzero(buf
, buf_size
);
1840 RPC_OSL_LOCK(rpci
->rpc_osh
);
1842 ret
= rpci
->send_log_num
;
1843 if (ret
< RPC_PKTLOG_SIZE
)
1846 start
= (rpci
->send_log_idx
+ 1) % RPC_PKTLOG_SIZE
;
1848 ret
= rpci
->recv_log_num
;
1849 if (ret
< RPC_PKTLOG_SIZE
)
1852 start
= (rpci
->recv_log_idx
+ 1) % RPC_PKTLOG_SIZE
;
1855 /* Return only first byte */
1856 if (buf_size
< (uint
) (ret
* RPC_PKTLOG_RD_LEN
)) {
1857 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1858 RPC_ERR(("%s buf too short\n", __FUNCTION__
));
1859 return BCME_BUFTOOSHORT
;
1863 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1864 RPC_ERR(("%s no record\n", __FUNCTION__
));
1869 for (i
= 0; tot
> 0; tot
--, i
++) {
1871 buf
[i
*RPC_PKTLOG_RD_LEN
] = rpci
->send_log
[start
].data
[0];
1872 buf
[i
*RPC_PKTLOG_RD_LEN
+1] = rpci
->send_log
[start
].trans
;
1873 buf
[i
*RPC_PKTLOG_RD_LEN
+2] = rpci
->send_log
[start
].len
;
1876 buf
[i
*RPC_PKTLOG_RD_LEN
] = rpci
->recv_log
[start
].data
[0];
1877 buf
[i
*RPC_PKTLOG_RD_LEN
+1] = rpci
->recv_log
[start
].trans
;
1878 buf
[i
*RPC_PKTLOG_RD_LEN
+2] = rpci
->recv_log
[start
].len
;
1881 start
= (start
% RPC_PKTLOG_SIZE
);
1883 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1885 #endif /* BCMDBG_RPC */
1888 #endif /* WLC_HIGH */
1894 _bcm_rpc_dump_pktlog(rpc_info_t
*rpci
)
1899 RPC_OSL_LOCK(rpci
->rpc_osh
);
1900 ret
= rpci
->send_log_num
;
1904 if (ret
< RPC_PKTLOG_SIZE
)
1907 start
= (rpci
->send_log_idx
+ 1) % RPC_PKTLOG_SIZE
;
1909 printf("send %d\n", ret
);
1910 for (i
= 0; ret
> 0; ret
--, i
++) {
1911 printf("[%d] trans 0x%x len %d data 0x%x\n", i
,
1912 rpci
->send_log
[start
].trans
,
1913 rpci
->send_log
[start
].len
,
1914 rpci
->send_log
[start
].data
[0]);
1916 start
= (start
% RPC_PKTLOG_SIZE
);
1919 ret
= rpci
->recv_log_num
;
1923 if (ret
< RPC_PKTLOG_SIZE
)
1926 start
= (rpci
->recv_log_idx
+ 1) % RPC_PKTLOG_SIZE
;
1928 printf("recv %d\n", ret
);
1929 for (i
= 0; ret
> 0; ret
--, i
++) {
1930 printf("[%d] trans 0x%x len %d data 0x%x\n", i
,
1931 rpci
->recv_log
[start
].trans
,
1932 rpci
->recv_log
[start
].len
,
1933 rpci
->recv_log
[start
].data
[0]);
1935 start
= (start
% RPC_PKTLOG_SIZE
);
1939 RPC_OSL_UNLOCK(rpci
->rpc_osh
);
1944 bcm_rpc_dump_pktlog_high(rpc_info_t
*rpci
)
1946 printf("HOST rpc pktlog dump:\n");
1947 _bcm_rpc_dump_pktlog(rpci
);
1953 bcm_rpc_dump_pktlog_low(uint32 arg
, uint argc
, char *argv
[])
1957 rpci
= (rpc_info_t
*)(uintptr
)arg
;
1959 printf("DONGLE rpc pktlog dump:\n");
1960 _bcm_rpc_dump_pktlog(rpci
);
1962 #endif /* WLC_HIGH */
1963 #endif /* BCMDBG_RPC */
1967 bcm_rpc_dump_state(uint32 arg
, uint argc
, char *argv
[])
1970 bcm_rpc_fatal_dump(void *arg
)
1973 rpc_info_t
*rpci
= (rpc_info_t
*)(uintptr
)arg
;
1974 printf("DONGLE rpc dump:\n");
1975 printf("Version: 0x%x State: %x\n", rpci
->version
, rpci
->state
);
1976 printf("session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x\n",
1977 rpci
->session
, rpci
->trans
, rpci
->oe_trans
,
1979 printf("xactionID out of order %u reorder ovfl %u dropped hole %u\n",
1980 rpci
->cnt_xidooo
, rpci
->cnt_reorder_overflow
, rpci
->cnt_rx_drop_hole
);
1981 printf("reorder queue depth %u first ID 0x%x reorder_q_depth_max %d, tossthreshold %u\n",
1982 rpci
->reorder_depth
, bcm_rpc_reorder_next_xid(rpci
), rpci
->reorder_depth_max
,
1983 BCM_RPC_REORDER_LIMIT
);
1986 bcm_rpc_tp_dump(rpci
->rpc_th
);
1991 bcm_rpc_msglevel_set(struct rpc_info
*rpci
, uint16 msglevel
, bool high
)
1994 ASSERT(high
== TRUE
);
1995 /* high 8 bits are for rpc, low 8 bits are for tp */
1996 rpc_msg_level
= msglevel
>> 8;
1997 bcm_rpc_tp_msglevel_set(rpci
->rpc_th
, (uint8
)(msglevel
& 0xff), TRUE
);
2000 ASSERT(high
== FALSE
);
2001 /* high 8 bits are for rpc, low 8 bits are for tp */
2002 rpc_msg_level
= msglevel
>> 8;
2003 bcm_rpc_tp_msglevel_set(rpci
->rpc_th
, (uint8
)(msglevel
& 0xff), FALSE
);
2009 bcm_rpc_dngl_suspend_enable_set(rpc_info_t
*rpc
, uint32 val
)
2011 rpc
->suspend_enable
= val
;
2015 bcm_rpc_dngl_suspend_enable_get(rpc_info_t
*rpc
, uint32
*pval
)
2017 *pval
= rpc
->suspend_enable
;