GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / shared / bcm_rpc.c
blobcd9944b9bb9ef674c100f584a45f547e6edc7ad7
1 /*
2 * RPC layer. It links to bus layer with transport layer(bus dependent)
3 * Broadcom 802.11abg Networking Device Driver
5 * Copyright (C) 2012, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
13 * $Id: bcm_rpc.c 358609 2012-09-24 21:59:30Z $
16 #include <epivers.h>
17 #include <typedefs.h>
18 #include <bcmdefs.h>
19 #include <bcmendian.h>
20 #include <osl.h>
21 #include <bcmutils.h>
23 #include <bcm_rpc_tp.h>
24 #include <bcm_rpc.h>
25 #include <rpc_osl.h>
26 #include <bcmdevs.h>
28 #if (!defined(WLC_HIGH) && !defined(WLC_LOW))
29 #error "SPLIT"
30 #endif
31 #if defined(WLC_HIGH) && defined(WLC_LOW)
32 #error "SPLIT"
33 #endif
35 /* RPC may use OS APIs directly to avoid overloading osl.h
36 * HIGH_ONLY supports NDIS and LINUX so far. can be ported to other OS if needed
38 #ifdef WLC_HIGH
39 #if !defined(NDIS) && !defined(linux)
40 #error "RPC only supports NDIS and LINUX in HIGH driver"
41 #endif
42 #endif /* WLC_HIGH */
43 #ifdef WLC_LOW
44 #error "RPC only supports HNDRTE in LOW driver"
45 #endif /* WLC_LOW */
47 /* use local flag BCMDBG_RPC so that it can be turned on without global BCMDBG */
48 #ifdef BCMDBG
49 #ifndef BCMDBG_RPC
50 #define BCMDBG_RPC
51 #endif
52 #endif /* BCMDBG */
54 /* #define BCMDBG_RPC */
56 static uint32 rpc_msg_level = RPC_ERROR_VAL;
57 /* Print error messages even for non-debug drivers
58 * NOTE: RPC_PKTLOG_VAL can be added in bcm_rpc_pktlog_init()
61 /* osl_msg_level is a bitvector with defs in wlioctl.h */
62 #define RPC_ERR(args) do {if (rpc_msg_level & RPC_ERROR_VAL) printf args;} while (0)
64 #ifdef BCMDBG_RPC
65 #define RPC_TRACE(args) do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
66 #define RPC_PKTTRACE_ON() (rpc_msg_level & RPC_PKTTRACE_VAL)
67 #else
68 #ifdef BCMDBG_ERR
69 #define RPC_TRACE(args) do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
70 #define RPC_PKTTRACE_ON() (FALSE)
71 #define prhex(a, b, c) do { } while (0) /* prhex is not defined under */
72 #define RPC_PKTLOG_ON() (FALSE)
73 #else
74 #define RPC_TRACE(args)
75 #define RPC_PKTTRACE_ON() (FALSE)
76 #define RPC_PKTLOG_ON() (FALSE)
77 #define prhex(a, b, c) do { } while (0) /* prhex is not defined under */
78 #endif /* BCMDBG_ERR */
79 #endif /* BCMDBG_RPC */
81 #ifdef BCMDBG_RPC
82 #define RPC_PKTLOG_ON() (rpc_msg_level & RPC_PKTLOG_VAL)
83 #else
84 #define RPC_PKTLOG_ON() (FALSE)
85 #endif /* BCMDBG_RPC */
87 #ifndef BCM_RPC_REORDER_LIMIT
88 #ifdef WLC_HIGH
89 #define BCM_RPC_REORDER_LIMIT 40 /* limit to toss hole to avoid overflow reorder queque */
90 #else
91 #define BCM_RPC_REORDER_LIMIT 30
92 #endif
93 #endif /* BCM_RPC_REORDER_LIMIT */
95 /* OS specific files for locks */
96 #define RPC_INIT_WAIT_TIMEOUT_MSEC 2000
97 #ifndef RPC_RETURN_WAIT_TIMEOUT_MSEC
98 #if defined(NDIS) && !defined(SDIO_BMAC)
99 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 800 /* NDIS OIDs timeout in 1 second.
100 * This timeout needs to be smaller than that
102 #else
103 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 3200
104 #endif
105 #endif /* RPC_RETURN_WAIT_TIMEOUT_MSEC */
107 /* RPC Frame formats */
108 /* |--------------||-------------|
109 * RPC Header RPC Payload
111 * 1) RPC Header:
112 * |-------|--------|----------------|
113 * 31 23 15 0
114 * Type Session Transaction ID
115 * = 0 Data
116 * = 1 Return
117 * = 2 Mgn
119 * 2) payload
120 * Data and Return RPC payload is RPC all dependent
122 * Management frame formats:
123 * |--------|--------|--------|--------|
124 * Byte 0 1 2 3
125 * Header Action Version Reason
127 * Version is included only for following actions:
128 * -- CONNECT
129 * -- RESET
130 * -- DOWN
131 * -- CONNECT_ACK
132 * -- CONNECT_NACK
134 * Reason sent only by BMAC for following actions:
135 * -- CONNECT_ACK
136 * -- CONNECT_NACk
139 typedef uint32 rpc_header_t;
141 #define RPC_HDR_LEN sizeof(rpc_header_t)
142 #define RPC_ACN_LEN sizeof(uint32)
143 #define RPC_VER_LEN sizeof(EPI_VERSION_NUM)
144 #define RPC_RC_LEN sizeof(uint32)
145 #define RPC_CHIPID_LEN sizeof(uint32)
147 #define RPC_HDR_TYPE(_rpch) (((_rpch) >> 24) & 0xff)
148 #define RPC_HDR_SESSION(_rpch) (((_rpch) >> 16) & 0xff)
149 #define RPC_HDR_XACTION(_rpch) ((_rpch) & 0xffff) /* When the type is data or return */
151 #define NAME_ENTRY(x) #x
153 /* RPC Header defines -- attached to every RPC call */
154 typedef enum {
155 RPC_TYPE_UNKNOWN, /* Unknown header type */
156 RPC_TYPE_DATA, /* RPC call that go straight through */
157 RPC_TYPE_RTN, /* RPC calls that are syncrhonous */
158 RPC_TYPE_MGN, /* RPC state management */
159 } rpc_type_t;
161 typedef enum {
162 RPC_RC_ACK = 0,
163 RPC_RC_HELLO,
164 RPC_RC_RECONNECT,
165 RPC_RC_VER_MISMATCH
166 } rpc_rc_t;
168 /* Management actions */
169 typedef enum {
170 RPC_NULL = 0,
171 RPC_HELLO,
172 RPC_CONNECT, /* Master (high) to slave (low). Slave to copy current
173 * session id and transaction id (mostly 0)
175 RPC_CONNECT_ACK, /* Ack from LOW_RPC */
176 RPC_DOWN, /* Down the other-end. The actual action is
177 * end specific.
179 RPC_CONNECT_NACK, /* Nack from LOW_RPC. This indicates potentially that
180 * dongle could already be running
182 RPC_RESET /* Resync using other end's session id (mostly HIGH->LOW)
183 * Also, reset the oe_trans, and trans to 0
185 } rpc_acn_t;
187 /* RPC States */
188 typedef enum {
189 UNINITED = 0,
190 WAIT_HELLO,
191 HELLO_RECEIVED,
192 WAIT_INITIALIZING,
193 ESTABLISHED,
194 DISCONNECTED,
195 ASLEEP,
196 WAIT_RESUME
197 } rpc_state_t;
199 #define HDR_STATE_MISMATCH 0x1
200 #define HDR_SESSION_MISMATCH 0x2
201 #define HDR_XACTION_MISMATCH 0x4
203 #ifdef BCMDBG_RPC
204 #define RPC_PKTLOG_DATASIZE 4
205 struct rpc_pktlog {
206 uint16 trans;
207 int len;
208 uint32 data[RPC_PKTLOG_DATASIZE]; /* First few bytes of the payload only */
210 #endif /* BCMDBG_RPC */
212 #ifdef WLC_LOW
213 static void bcm_rpc_dump_state(uint32 arg, uint argc, char *argv[]);
214 #else
215 static void bcm_rpc_fatal_dump(void *arg);
216 #endif /* WLC_LOW */
218 #ifdef BCMDBG_RPC
219 static void _bcm_rpc_dump_pktlog(rpc_info_t *rpci);
220 #ifdef WLC_HIGH
221 static void bcm_rpc_dump_pktlog_high(rpc_info_t *rpci);
222 #else
223 static void bcm_rpc_dump_pktlog_low(uint32 arg, uint argc, char *argv[]);
224 #endif
225 #endif /* BCMDBG_RPC */
227 #ifdef WLC_HIGH
228 /* This lock is needed to handle the Receive Re-order queue that guarantees
229 * in-order receive as it was observed that in NDIS at least, USB subsystem does
230 * not guarantee it
232 #ifdef NDIS
233 #define RPC_RO_LOCK(ri) NdisAcquireSpinLock(&(ri)->reorder_lock)
234 #define RPC_RO_UNLOCK(ri) NdisReleaseSpinLock(&(ri)->reorder_lock)
235 #else
236 #define RPC_RO_LOCK(ri) spin_lock_irqsave(&(ri)->reorder_lock, (ri)->reorder_flags);
237 #define RPC_RO_UNLOCK(ri) spin_unlock_irqrestore(&(ri)->reorder_lock, (ri)->reorder_flags);
238 #endif /* NDIS */
239 #else
240 #define RPC_RO_LOCK(ri) do { } while (0)
241 #define RPC_RO_UNLOCK(ri) do { } while (0)
242 #endif /* WLC_HIGH */
244 struct rpc_info {
245 void *pdev; /* Per-port driver handle for rx callback */
246 struct rpc_transport_info *rpc_th; /* transport layer handle */
247 osl_t *osh;
249 rpc_dispatch_cb_t dispatchcb; /* callback when data is received */
250 void *ctx; /* Callback context */
252 rpc_down_cb_t dncb; /* callback when RPC goes down */
253 void *dnctx; /* Callback context */
255 rpc_resync_cb_t resync_cb; /* callback when host reenabled and dongle
256 * was not rebooted. Uses dnctx
258 rpc_txdone_cb_t txdone_cb; /* when non-null, called when a tx has completed. */
259 uint8 rpc_tp_hdr_len; /* header len for rpc and tp layer */
261 uint8 session; /* 255 sessions enough ? */
262 uint16 trans; /* More than 255 can't be pending */
263 uint16 oe_trans; /* OtherEnd tran id, dongle->host */
264 uint16 rtn_trans; /* BMAC: callreturn Id dongle->host */
265 uint16 oe_rtn_trans; /* HIGH: received BMAC callreturn id */
267 rpc_buf_t *rtn_rpcbuf; /* RPC ID for return transaction */
269 rpc_state_t state;
270 uint reset; /* # of resets */
271 uint cnt_xidooo; /* transactionID out of order */
272 uint cnt_rx_drop_hole; /* number of rcp calls dropped due to reorder overflow */
273 uint cnt_reorder_overflow; /* number of time the reorder queue overflowed,
274 * causing drops
276 uint32 version;
278 bool wait_init;
279 bool wait_return;
281 rpc_osl_t *rpc_osh;
283 #ifdef BCMDBG_RPC
284 struct rpc_pktlog *send_log;
285 uint16 send_log_idx; /* Point to the next slot to fill-in */
286 uint16 send_log_num; /* Number of entries */
288 struct rpc_pktlog *recv_log;
289 uint16 recv_log_idx; /* Point to the next slot to fill-in */
290 uint16 recv_log_num; /* Number of entries */
291 #endif /* BCMDBG_RPC */
293 #ifdef WLC_HIGH
294 #if defined(NDIS)
295 NDIS_SPIN_LOCK reorder_lock; /* TO RAISE the IRQ */
296 bool reorder_lock_alloced;
297 bool down_oe_pending;
298 bool down_pending;
299 #elif defined(linux)
300 spinlock_t reorder_lock;
301 ulong reorder_flags;
302 #endif /* NDIS */
303 #endif /* WLC_HIGH */
304 /* Protect against rx reordering */
305 rpc_buf_t *reorder_pktq;
306 uint reorder_depth;
307 uint reorder_depth_max;
308 uint chipid;
309 uint suspend_enable;
312 static void bcm_rpc_tx_complete(void *ctx, rpc_buf_t *buf, int status);
313 static void bcm_rpc_buf_recv(void *context, rpc_buf_t *);
314 static void bcm_rpc_process_reorder_queue(rpc_info_t *rpci);
315 static bool bcm_rpc_buf_recv_inorder(rpc_info_t *rpci, rpc_buf_t *rpc_buf, mbool hdr_invalid);
317 #ifdef WLC_HIGH
318 static rpc_buf_t *bcm_rpc_buf_recv_high(struct rpc_info *rpci, rpc_type_t type, rpc_acn_t acn,
319 rpc_buf_t *rpc_buf);
320 static int bcm_rpc_resume_oe(struct rpc_info *rpci);
321 #ifdef NDIS
322 static int bcm_rpc_hello(rpc_info_t *rpci);
323 #endif
324 #else
325 static rpc_buf_t *bcm_rpc_buf_recv_low(struct rpc_info *rpci, rpc_header_t header,
326 rpc_acn_t acn, rpc_buf_t *rpc_buf);
327 #endif /* WLC_HIGH */
328 static int bcm_rpc_up(rpc_info_t *rpci);
329 static uint16 bcm_rpc_reorder_next_xid(struct rpc_info *rpci);
332 #ifdef BCMDBG_RPC
333 static void bcm_rpc_pktlog_init(rpc_info_t *rpci);
334 static void bcm_rpc_pktlog_deinit(rpc_info_t *rpci);
335 static struct rpc_pktlog *bcm_rpc_prep_entry(struct rpc_info * rpci, rpc_buf_t *b,
336 struct rpc_pktlog *cur, bool tx);
337 static void bcm_rpc_add_entry_tx(struct rpc_info * rpci, struct rpc_pktlog *cur);
338 static void bcm_rpc_add_entry_rx(struct rpc_info * rpci, struct rpc_pktlog *cur);
339 #endif /* BCMDBG_RPC */
342 /* Header and componet retrieval functions */
343 static INLINE rpc_header_t
344 bcm_rpc_header(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
346 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
347 return ltoh32(*rpch);
350 static INLINE rpc_acn_t
351 bcm_rpc_mgn_acn(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
353 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
355 return (rpc_acn_t)ltoh32(*rpch);
358 static INLINE uint32
359 bcm_rpc_mgn_ver(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
361 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
363 return ltoh32(*rpch);
366 static INLINE rpc_rc_t
367 bcm_rpc_mgn_reason(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
369 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
370 return (rpc_rc_t)ltoh32(*rpch);
373 #ifdef WLC_HIGH
374 static uint32
375 bcm_rpc_mgn_chipid(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
377 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
379 return ltoh32(*rpch);
381 #endif /* WLC_HIGH */
383 static INLINE uint
384 bcm_rpc_hdr_xaction_validate(struct rpc_info *rpci, rpc_header_t header, uint32 *xaction,
385 bool verbose)
387 uint type;
389 type = RPC_HDR_TYPE(header);
390 *xaction = RPC_HDR_XACTION(header);
392 /* High driver does not check the return transaction to be in order */
393 if (type != RPC_TYPE_MGN &&
394 #ifdef WLC_HIGH
395 type != RPC_TYPE_RTN &&
396 #endif
397 *xaction != rpci->oe_trans) {
398 #ifdef WLC_HIGH
399 if (verbose) {
400 RPC_ERR(("Transaction mismatch: expected:0x%x got:0x%x type: %d\n",
401 rpci->oe_trans, *xaction, type));
403 #endif
404 return HDR_XACTION_MISMATCH;
407 return 0;
410 static INLINE uint
411 bcm_rpc_hdr_session_validate(struct rpc_info *rpci, rpc_header_t header)
413 #ifdef WLC_LOW
414 if (RPC_HDR_TYPE(header) == RPC_TYPE_MGN)
415 return 0;
416 #endif
418 if (rpci->session != RPC_HDR_SESSION(header))
419 return HDR_SESSION_MISMATCH;
420 return 0;
423 static INLINE uint
424 bcm_rpc_hdr_state_validate(struct rpc_info *rpci, rpc_header_t header)
426 uint type = RPC_HDR_TYPE(header);
428 if ((type == RPC_TYPE_UNKNOWN) || (type > RPC_TYPE_MGN))
429 return HDR_STATE_MISMATCH;
431 /* Everything allowed during this transition time */
432 if (rpci->state == ASLEEP)
433 return 0;
435 /* Only managment frames allowed before ESTABLISHED state */
436 if ((rpci->state != ESTABLISHED) && (type != RPC_TYPE_MGN)) {
437 RPC_ERR(("bcm_rpc_header_validate: State mismatch: state:%d type:%d\n",
438 rpci->state, type));
439 return HDR_STATE_MISMATCH;
442 return 0;
445 static INLINE mbool
446 bcm_rpc_hdr_validate(struct rpc_info *rpci, rpc_buf_t *rpc_buf, uint32 *xaction,
447 bool verbose)
449 /* First the state against the type */
450 mbool ret = 0;
451 rpc_header_t header = bcm_rpc_header(rpci, rpc_buf);
453 mboolset(ret, bcm_rpc_hdr_state_validate(rpci, header));
454 mboolset(ret, bcm_rpc_hdr_xaction_validate(rpci, header, xaction, verbose));
455 mboolset(ret, bcm_rpc_hdr_session_validate(rpci, header));
457 return ret;
460 struct rpc_info *
461 BCMATTACHFN(bcm_rpc_attach)(void *pdev, osl_t *osh, struct rpc_transport_info *rpc_th,
462 uint16 *devid)
464 struct rpc_info *rpci;
466 #ifndef WLC_HIGH
467 UNUSED_PARAMETER(devid);
468 #endif /* WLC_HIGH */
470 if ((rpci = (struct rpc_info *)MALLOC(osh, sizeof(struct rpc_info))) == NULL)
471 return NULL;
473 bzero(rpci, sizeof(struct rpc_info));
475 rpci->osh = osh;
476 rpci->pdev = pdev;
477 rpci->rpc_th = rpc_th;
478 rpci->session = 0x69;
480 /* initialize lock and queue */
481 rpci->rpc_osh = rpc_osl_attach(osh);
483 if (rpci->rpc_osh == NULL) {
484 RPC_ERR(("bcm_rpc_attach: osl attach failed\n"));
485 goto fail;
488 bcm_rpc_tp_register_cb(rpc_th, bcm_rpc_tx_complete, rpci,
489 bcm_rpc_buf_recv, rpci, rpci->rpc_osh);
491 rpci->version = EPI_VERSION_NUM;
493 rpci->rpc_tp_hdr_len = RPC_HDR_LEN + bcm_rpc_buf_tp_header_len(rpci->rpc_th);
495 #if defined(WLC_HIGH) && defined(NDIS)
496 bcm_rpc_hello(rpci);
497 #endif
499 if (bcm_rpc_up(rpci)) {
500 RPC_ERR(("bcm_rpc_attach: rpc_up failed\n"));
501 goto fail;
504 #ifdef WLC_HIGH
505 *devid = (uint16)rpci->chipid;
506 #endif
508 return rpci;
509 fail:
510 bcm_rpc_detach(rpci);
511 return NULL;
514 static uint16
515 bcm_rpc_reorder_next_xid(struct rpc_info *rpci)
517 rpc_buf_t * buf;
518 rpc_header_t header;
519 uint16 cur_xid = rpci->oe_trans;
520 uint16 min_xid = 0;
521 uint16 min_delta = 0xffff;
522 uint16 xid, delta;
524 ASSERT(rpci->rpc_th);
525 for (buf = rpci->reorder_pktq;
526 buf != NULL;
527 buf = bcm_rpc_buf_next_get(rpci->rpc_th, buf)) {
528 header = bcm_rpc_header(rpci, buf);
529 xid = RPC_HDR_XACTION(header);
530 delta = xid - cur_xid;
532 if (delta < min_delta) {
533 min_delta = delta;
534 min_xid = xid;
538 return min_xid;
541 void
542 BCMATTACHFN(bcm_rpc_detach)(struct rpc_info *rpci)
544 if (!rpci)
545 return;
547 bcm_rpc_down(rpci);
549 if (rpci->reorder_pktq) {
550 rpc_buf_t * node;
551 ASSERT(rpci->rpc_th);
552 while ((node = rpci->reorder_pktq)) {
553 rpci->reorder_pktq = bcm_rpc_buf_next_get(rpci->rpc_th,
554 node);
555 bcm_rpc_buf_next_set(rpci->rpc_th, node, NULL);
556 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
557 PKTFREE(rpci->osh, node, FALSE);
558 #else
559 bcm_rpc_tp_buf_free(rpci->rpc_th, node);
560 #endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
562 ASSERT(rpci->reorder_pktq == NULL);
563 rpci->reorder_depth = 0;
564 rpci->reorder_depth_max = 0;
567 #ifdef WLC_HIGH
568 #if defined(NDIS)
569 if (rpci->reorder_lock_alloced)
570 NdisFreeSpinLock(&rpci->reorder_lock);
571 #endif
572 #endif /* WLC_HIGH */
574 /* rpc is going away, cut off registered cbs from rpc_tp layer */
575 bcm_rpc_tp_deregister_cb(rpci->rpc_th);
577 #ifdef WLC_LOW
578 bcm_rpc_tp_txflowctlcb_deinit(rpci->rpc_th);
579 #endif
581 if (rpci->rpc_osh)
582 rpc_osl_detach(rpci->rpc_osh);
584 MFREE(rpci->osh, rpci, sizeof(struct rpc_info));
585 rpci = NULL;
588 rpc_buf_t *
589 bcm_rpc_buf_alloc(struct rpc_info *rpci, int datalen)
591 rpc_buf_t *rpc_buf;
592 int len = datalen + RPC_HDR_LEN;
594 ASSERT(rpci->rpc_th);
595 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, len);
597 if (rpc_buf == NULL)
598 return NULL;
600 /* Reserve space for RPC Header */
601 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_HDR_LEN);
603 return rpc_buf;
606 uint
607 bcm_rpc_buf_header_len(struct rpc_info *rpci)
609 return rpci->rpc_tp_hdr_len;
612 void
613 bcm_rpc_buf_free(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
615 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
618 void
619 bcm_rpc_rxcb_init(struct rpc_info *rpci, void *ctx, rpc_dispatch_cb_t cb,
620 void *dnctx, rpc_down_cb_t dncb, rpc_resync_cb_t resync_cb, rpc_txdone_cb_t txdone_cb)
622 rpci->dispatchcb = cb;
623 rpci->ctx = ctx;
624 rpci->dnctx = dnctx;
625 rpci->dncb = dncb;
626 rpci->resync_cb = resync_cb;
627 rpci->txdone_cb = txdone_cb;
630 void
631 bcm_rpc_rxcb_deinit(struct rpc_info *rpci)
633 if (!rpci)
634 return;
636 rpci->dispatchcb = NULL;
637 rpci->ctx = NULL;
638 rpci->dnctx = NULL;
639 rpci->dncb = NULL;
640 rpci->resync_cb = NULL;
643 struct rpc_transport_info *
644 bcm_rpc_tp_get(struct rpc_info *rpci)
646 return rpci->rpc_th;
649 /* get original os handle */
650 osl_t*
651 bcm_rpc_osh_get(struct rpc_info *rpci)
653 return rpci->osh;
657 #ifdef BCM_RPC_TOC
658 static void
659 bcm_rpc_tp_tx_encap(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
661 uint32 *tp_lenp;
662 uint32 rpc_len;
664 rpc_len = pkttotlen(rpci->osh, rpc_buf);
665 tp_lenp = (uint32*)bcm_rpc_buf_push(rpci->rpc_th, rpc_buf, BCM_RPC_TP_ENCAP_LEN);
666 *tp_lenp = htol32(rpc_len);
669 #endif
670 static void
671 rpc_header_prep(struct rpc_info *rpci, rpc_header_t *header, uint type, uint action)
673 uint32 v;
675 v = 0;
676 v |= (type << 24);
678 /* Mgmt action follows the header */
679 if (type == RPC_TYPE_MGN) {
680 *(header + 1) = htol32(action);
681 #ifdef WLC_HIGH
682 if (action == RPC_CONNECT || action == RPC_RESET || action == RPC_HELLO)
683 *(header + 2) = htol32(rpci->version);
684 #endif
686 #ifdef WLC_LOW
687 else if (type == RPC_TYPE_RTN)
688 v |= (rpci->rtn_trans);
689 #endif
690 else
691 v |= (rpci->trans);
693 v |= (rpci->session << 16);
695 *header = htol32(v);
697 RPC_TRACE(("rpc_header_prep: type:0x%x action: %d trans:0x%x\n",
698 type, action, rpci->trans));
701 #if defined(WLC_HIGH) && defined(NDIS)
703 static int
704 bcm_rpc_hello(struct rpc_info *rpci)
706 int ret = -1, count = 10;
707 rpc_buf_t *rpc_buf;
708 rpc_header_t *header;
710 RPC_OSL_LOCK(rpci->rpc_osh);
711 rpci->state = WAIT_HELLO;
712 rpci->wait_init = TRUE;
713 RPC_OSL_UNLOCK(rpci->rpc_osh);
715 while (ret && (count-- > 0)) {
717 /* Allocate a frame, prep it, send and wait */
718 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN
719 + RPC_CHIPID_LEN);
721 if (!rpc_buf)
722 break;
724 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
726 rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_HELLO);
728 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
729 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
730 return -1;
733 RPC_ERR(("%s: waiting to receive hello\n", __FUNCTION__));
735 RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
737 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
739 /* See if we timed out or actually initialized */
740 RPC_OSL_LOCK(rpci->rpc_osh);
741 if (rpci->state == HELLO_RECEIVED)
742 ret = 0;
743 RPC_OSL_UNLOCK(rpci->rpc_osh);
747 /* See if we timed out or actually initialized */
748 RPC_OSL_LOCK(rpci->rpc_osh);
749 rpci->wait_init = FALSE;
750 RPC_OSL_UNLOCK(rpci->rpc_osh);
752 return ret;
755 #endif /* WLC_HIGH && NDIS */
757 #ifdef WLC_HIGH
758 static int
759 bcm_rpc_up(struct rpc_info *rpci)
761 rpc_buf_t *rpc_buf;
762 rpc_header_t *header;
763 int ret;
765 /* Allocate a frame, prep it, send and wait */
766 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN
767 + RPC_CHIPID_LEN);
769 if (!rpc_buf)
770 return -1;
772 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
774 rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_CONNECT);
776 RPC_OSL_LOCK(rpci->rpc_osh);
777 rpci->state = WAIT_INITIALIZING;
778 rpci->wait_init = TRUE;
780 #if defined(NDIS)
781 if (!rpci->reorder_lock_alloced) {
782 NdisAllocateSpinLock(&rpci->reorder_lock);
783 rpci->reorder_lock_alloced = TRUE;
785 #elif defined(linux)
786 spin_lock_init(&rpci->reorder_lock);
787 #endif
789 RPC_OSL_UNLOCK(rpci->rpc_osh);
791 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
792 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
793 return -1;
796 /* Wait for state to change to established. The receive thread knows what to do */
797 RPC_ERR(("%s: waiting to be connected\n", __FUNCTION__));
799 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
801 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
803 if (ret < 0) {
804 rpci->wait_init = FALSE;
805 return ret;
808 /* See if we timed out or actually initialized */
809 RPC_OSL_LOCK(rpci->rpc_osh);
810 if (rpci->state == ESTABLISHED)
811 ret = 0;
812 else
813 ret = -1;
814 rpci->wait_init = FALSE;
815 RPC_OSL_UNLOCK(rpci->rpc_osh);
817 #ifdef BCMDBG_RPC
818 bcm_rpc_pktlog_init(rpci);
819 #endif
821 return ret;
825 bcm_rpc_is_asleep(struct rpc_info *rpci)
827 return (rpci->state == ASLEEP);
830 bool
831 bcm_rpc_sleep(struct rpc_info *rpci)
833 if (!rpci->suspend_enable)
834 return TRUE;
835 bcm_rpc_tp_sleep(rpci->rpc_th);
836 rpci->state = ASLEEP;
837 /* Ignore anything coming after this */
838 #ifdef NDIS
839 bcm_rpc_down(rpci);
840 #else
841 rpci->session++;
842 #endif
843 return TRUE;
846 #ifdef NDIS
848 bcm_rpc_shutdown(struct rpc_info *rpci)
850 int ret = -1;
852 if (rpci) {
853 ret = bcm_rpc_tp_shutdown(rpci->rpc_th);
854 rpci->state = DISCONNECTED;
856 return ret;
858 #endif /* NDIS */
860 bool
861 bcm_rpc_resume(struct rpc_info *rpci, int *fw_reload)
863 if (!rpci->suspend_enable)
864 return TRUE;
866 bcm_rpc_tp_resume(rpci->rpc_th, fw_reload);
867 #ifdef NDIS
868 if (fw_reload) {
869 rpci->trans = 0;
870 rpci->oe_trans = 0;
871 bcm_rpc_hello(rpci);
872 bcm_rpc_up(rpci);
874 else
875 rpci->state = ESTABLISHED;
876 #else
877 if (bcm_rpc_resume_oe(rpci) == 0) {
878 rpci->trans = 0;
879 rpci->oe_trans = 0;
881 #endif
882 RPC_TRACE(("bcm_rpc_resume done, state %d\n", rpci->state));
883 return (rpci->state == ESTABLISHED);
886 static int
887 bcm_rpc_resume_oe(struct rpc_info *rpci)
889 rpc_buf_t *rpc_buf;
890 rpc_header_t *header;
891 int ret;
893 /* Allocate a frame, prep it, send and wait */
894 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN);
896 if (!rpc_buf)
897 return -1;
899 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
901 rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_RESET);
903 RPC_OSL_LOCK(rpci->rpc_osh);
904 rpci->state = WAIT_RESUME;
905 rpci->wait_init = TRUE;
906 RPC_OSL_UNLOCK(rpci->rpc_osh);
908 /* Don't care for the return value */
909 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
910 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
911 return -1;
914 /* Wait for state to change to established. The receive thread knows what to do */
915 RPC_ERR(("%s: waiting to be resumed\n", __FUNCTION__));
917 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
919 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
921 if (ret < 0) {
922 rpci->wait_init = FALSE;
923 return ret;
926 /* See if we timed out or actually initialized */
927 RPC_OSL_LOCK(rpci->rpc_osh);
928 if (rpci->state == ESTABLISHED)
929 ret = 0;
930 else
931 ret = -1;
932 rpci->wait_init = FALSE;
933 RPC_OSL_UNLOCK(rpci->rpc_osh);
935 return ret;
937 #else
938 static int
939 bcm_rpc_up(struct rpc_info *rpci)
941 rpci->state = WAIT_INITIALIZING;
943 #ifdef BCMDBG_RPC
944 bcm_rpc_pktlog_init(rpci);
945 hndrte_cons_addcmd("rpcpktdump", bcm_rpc_dump_pktlog_low, (uint32)rpci);
946 #endif
947 hndrte_cons_addcmd("rpcdump", bcm_rpc_dump_state, (uint32)rpci);
948 return 0;
951 static int
952 bcm_rpc_connect_resp(struct rpc_info *rpci, rpc_acn_t acn, uint32 reason)
954 rpc_buf_t *rpc_buf;
955 rpc_header_t *header;
957 /* Allocate a frame, prep it, send and wait */
958 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN +
959 RPC_RC_LEN + RPC_VER_LEN + RPC_CHIPID_LEN);
960 if (!rpc_buf) {
961 RPC_ERR(("%s: bcm_rpc_tp_buf_alloc() failed\n", __FUNCTION__));
962 return FALSE;
965 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
967 rpc_header_prep(rpci, header, RPC_TYPE_MGN, acn);
969 *(header + 2) = ltoh32(rpci->version);
970 *(header + 3) = ltoh32(reason);
971 #ifdef BCMCHIPID
972 *(header + 4) = ltoh32(BCMCHIPID);
973 #endif /* BCMCHIPID */
974 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
975 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
976 return FALSE;
979 return TRUE;
981 #endif /* WLC_HIGH */
983 void
984 bcm_rpc_watchdog(struct rpc_info *rpci)
986 static uint32 uptime = 0;
988 #ifdef WLC_LOW
989 /* rpc watchdog is called every 5 msec in the low driver */
990 static uint32 count = 0;
991 count++;
992 if (count % 200 == 0) {
993 count = 0;
994 uptime++;
995 if (uptime % 60 == 0)
996 RPC_ERR(("rpc uptime %d minutes\n", (uptime / 60)));
998 #else
999 uptime++;
1000 if (uptime % 60 == 0) {
1001 RPC_ERR(("rpc uptime %d minutes\n", (uptime / 60)));
1003 #endif
1004 bcm_rpc_tp_watchdog(rpci->rpc_th);
1007 void
1008 bcm_rpc_down(struct rpc_info *rpci)
1010 RPC_ERR(("%s\n", __FUNCTION__));
1012 #ifdef BCMDBG_RPC
1013 bcm_rpc_pktlog_deinit(rpci);
1014 #endif
1016 RPC_OSL_LOCK(rpci->rpc_osh);
1017 if (rpci->state != DISCONNECTED && rpci->state != ASLEEP) {
1018 RPC_OSL_UNLOCK(rpci->rpc_osh);
1019 #ifdef WLC_HIGH
1020 bcm_rpc_fatal_dump(rpci);
1021 #else
1022 bcm_rpc_dump_state((uint32)rpci, 0, NULL);
1023 #endif
1024 RPC_OSL_LOCK(rpci->rpc_osh);
1025 rpci->state = DISCONNECTED;
1026 RPC_OSL_UNLOCK(rpci->rpc_osh);
1027 if (rpci->dncb)
1028 (rpci->dncb)(rpci->dnctx);
1029 bcm_rpc_tp_down(rpci->rpc_th);
1030 return;
1032 RPC_OSL_UNLOCK(rpci->rpc_osh);
1035 #if defined(USBAP) && (defined(WLC_HIGH) && !defined(WLC_LOW))
1036 /* For USBAP external image, reboot system upon RPC error instead of just turning RPC down */
1037 #include <siutils.h>
1038 void
1039 bcm_rpc_err_down(struct rpc_info *rpci)
1041 si_t *sih = si_kattach(SI_OSH);
1043 RPC_ERR(("%s: rebooting system due to RPC error.\n", __FUNCTION__));
1044 si_watchdog(sih, 1);
1046 #else
1047 #define bcm_rpc_err_down bcm_rpc_down
1048 #endif
1050 static void
1051 bcm_rpc_tx_complete(void *ctx, rpc_buf_t *buf, int status)
1053 struct rpc_info *rpci = (struct rpc_info *)ctx;
1055 RPC_TRACE(("%s: status 0x%x\n", __FUNCTION__, status));
1057 ASSERT(rpci && rpci->rpc_th);
1059 if (buf) {
1060 if (rpci->txdone_cb) {
1061 /* !!must pull off the rpc/tp header after dbus is done for wl driver */
1062 rpci->txdone_cb(rpci->ctx, buf);
1063 } else
1064 bcm_rpc_tp_buf_free(rpci->rpc_th, buf);
1069 bcm_rpc_call(struct rpc_info *rpci, rpc_buf_t *b)
1071 rpc_header_t *header;
1072 int err = 0;
1073 #ifdef BCMDBG_RPC
1074 struct rpc_pktlog cur;
1075 #endif
1077 RPC_TRACE(("%s:\n", __FUNCTION__));
1079 RPC_OSL_LOCK(rpci->rpc_osh);
1080 if (rpci->state != ESTABLISHED) {
1081 err = -1;
1082 RPC_OSL_UNLOCK(rpci->rpc_osh);
1083 #ifdef BCM_RPC_TOC
1085 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1086 rpc_header_prep(rpci, header, RPC_TYPE_DATA, 0);
1087 bcm_rpc_tp_tx_encap(rpci, b);
1088 if (rpci->txdone_cb) {
1089 /* !!must pull off the rpc/tp header after dbus is done for wl driver */
1090 rpci->txdone_cb(rpci->ctx, b);
1091 } else
1093 #endif
1094 bcm_rpc_buf_free(rpci, b);
1096 goto done;
1098 RPC_OSL_UNLOCK(rpci->rpc_osh);
1100 #ifdef BCMDBG_RPC
1101 /* Prepare the current log entry but add only if the TX was successful */
1102 /* This is done here before DATA pointer gets modified */
1103 if (RPC_PKTLOG_ON())
1104 bcm_rpc_prep_entry(rpci, b, &cur, TRUE);
1105 #endif
1107 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1109 rpc_header_prep(rpci, header, RPC_TYPE_DATA, 0);
1111 #ifdef BCMDBG_RPC
1112 if (RPC_PKTTRACE_ON()) {
1113 #ifdef BCMDBG
1114 prhex("RPC Call ", bcm_rpc_buf_data(rpci->rpc_th, b),
1115 bcm_rpc_buf_len_get(rpci->rpc_th, b));
1116 #endif
1118 #endif /* BCMDBG_RPC */
1120 if (bcm_rpc_tp_buf_send(rpci->rpc_th, b)) {
1121 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
1123 if (rpci->txdone_cb) {
1124 rpci->txdone_cb(rpci->ctx, b);
1125 } else
1126 bcm_rpc_tp_buf_free(rpci->rpc_th, b);
1128 bcm_rpc_err_down(rpci);
1129 return -1;
1132 RPC_OSL_LOCK(rpci->rpc_osh);
1133 rpci->trans++;
1134 RPC_OSL_UNLOCK(rpci->rpc_osh);
1136 #ifdef BCMDBG_RPC /* Since successful add the entry */
1137 if (RPC_PKTLOG_ON()) {
1138 bcm_rpc_add_entry_tx(rpci, &cur);
1140 #endif
1141 done:
1142 return err;
1145 #ifdef WLC_HIGH
1146 rpc_buf_t *
1147 bcm_rpc_call_with_return(struct rpc_info *rpci, rpc_buf_t *b)
1149 rpc_header_t *header;
1150 rpc_buf_t *retb = NULL;
1151 int ret;
1152 #ifdef BCMDBG_RPC
1153 struct rpc_pktlog cur;
1154 #endif
1155 bool timedout = FALSE;
1156 uint32 start_wait_time;
1158 RPC_TRACE(("%s:\n", __FUNCTION__));
1160 RPC_OSL_LOCK(rpci->rpc_osh);
1161 if (rpci->state != ESTABLISHED) {
1162 RPC_OSL_UNLOCK(rpci->rpc_osh);
1163 RPC_ERR(("%s: RPC call before ESTABLISHED state\n", __FUNCTION__));
1164 bcm_rpc_buf_free(rpci, b);
1165 return NULL;
1167 RPC_OSL_UNLOCK(rpci->rpc_osh);
1169 #ifdef BCMDBG_RPC
1170 /* Prepare the current log entry but add only if the TX was successful */
1171 /* This is done here before DATA pointer gets modified */
1172 if (RPC_PKTLOG_ON())
1173 bcm_rpc_prep_entry(rpci, b, &cur, TRUE);
1174 #endif
1176 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1178 rpc_header_prep(rpci, header, RPC_TYPE_RTN, 0);
1180 RPC_OSL_LOCK(rpci->rpc_osh);
1181 rpci->trans++;
1182 ASSERT(rpci->rtn_rpcbuf == NULL);
1183 rpci->wait_return = TRUE;
1184 RPC_OSL_UNLOCK(rpci->rpc_osh);
1186 /* Prep the return packet BEFORE sending the buffer and also within spinlock
1187 * within raised IRQ
1189 if ((ret = bcm_rpc_tp_recv_rtn(rpci->rpc_th)) != BCME_OK) {
1190 RPC_ERR(("%s: bcm_rpc_tp_recv_rtn() failed\n", __FUNCTION__));
1192 RPC_OSL_LOCK(rpci->rpc_osh);
1193 rpci->wait_return = FALSE;
1194 RPC_OSL_UNLOCK(rpci->rpc_osh);
1195 if ((ret != BCME_NORESOURCE) && (ret != BCME_BUSY))
1196 bcm_rpc_err_down(rpci);
1197 return NULL;
1201 #ifdef BCMDBG_RPC
1202 if (RPC_PKTTRACE_ON()) {
1203 #ifdef BCMDBG
1204 prhex("RPC Call With Return Buf", bcm_rpc_buf_data(rpci->rpc_th, b),
1205 bcm_rpc_buf_len_get(rpci->rpc_th, b));
1206 #endif
1208 #endif /* BCMDBG_RPC */
1210 if (bcm_rpc_tp_buf_send(rpci->rpc_th, b)) {
1211 RPC_ERR(("%s: bcm_rpc_bus_buf_send() failed\n", __FUNCTION__));
1213 RPC_OSL_LOCK(rpci->rpc_osh);
1214 rpci->wait_return = FALSE;
1215 RPC_OSL_UNLOCK(rpci->rpc_osh);
1216 bcm_rpc_err_down(rpci);
1217 return NULL;
1220 bcm_rpc_tp_agg_set(rpci->rpc_th, BCM_RPC_TP_HOST_AGG_AMPDU, FALSE);
1222 start_wait_time = OSL_SYSUPTIME();
1223 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_RETURN_WAIT_TIMEOUT_MSEC, &timedout);
1225 /* When RPC_OSL_WAIT returns because of signal pending. wait for the signal to
1226 * be processed
1228 RPC_OSL_LOCK(rpci->rpc_osh);
1229 while ((ret < 0) && ((OSL_SYSUPTIME() - start_wait_time) <= RPC_RETURN_WAIT_TIMEOUT_MSEC)) {
1230 RPC_OSL_UNLOCK(rpci->rpc_osh);
1231 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_RETURN_WAIT_TIMEOUT_MSEC, &timedout);
1232 RPC_OSL_LOCK(rpci->rpc_osh);
1235 if (ret || timedout) {
1236 RPC_ERR(("%s: RPC call trans 0x%x return wait err %d timedout %d limit %d(ms)\n",
1237 __FUNCTION__, (rpci->trans - 1), ret, timedout,
1238 RPC_RETURN_WAIT_TIMEOUT_MSEC));
1239 rpci->wait_return = FALSE;
1240 RPC_OSL_UNLOCK(rpci->rpc_osh);
1241 #ifdef BCMDBG_RPC
1242 printf("Failed trans 0x%x len %d data 0x%x\n",
1243 cur.trans,
1244 cur.len,
1245 cur.data[0]);
1246 bcm_rpc_dump_pktlog_high(rpci);
1247 #endif
1248 bcm_rpc_err_down(rpci);
1249 return NULL;
1252 /* See if we timed out or actually initialized */
1253 ASSERT(rpci->rtn_rpcbuf != NULL); /* Make sure we've got the response */
1254 retb = rpci->rtn_rpcbuf;
1255 rpci->rtn_rpcbuf = NULL;
1256 rpci->wait_return = FALSE; /* Could have woken up by timeout */
1257 RPC_OSL_UNLOCK(rpci->rpc_osh);
1259 #ifdef BCMDBG_RPC /* Since successful add the entry */
1260 if (RPC_PKTLOG_ON())
1261 bcm_rpc_add_entry_tx(rpci, &cur);
1262 #endif
1264 return retb;
1266 #endif /* WLC_HIGH */
1268 #ifdef WLC_LOW
1270 bcm_rpc_call_return(struct rpc_info *rpci, rpc_buf_t *b)
1272 rpc_header_t *header;
1274 RPC_TRACE(("%s\n", __FUNCTION__));
1276 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1278 rpc_header_prep(rpci, header, RPC_TYPE_RTN, 0);
1280 #ifdef BCMDBG_RPC
1281 if (RPC_PKTTRACE_ON()) {
1282 #ifdef BCMDBG
1283 prhex("RPC Call Return Buf", bcm_rpc_buf_data(rpci->rpc_th, b),
1284 bcm_rpc_buf_len_get(rpci->rpc_th, b));
1285 #endif
1287 #endif /* BCMDBG_RPC */
1289 /* If the TX fails, it's sender's responsibilty */
1290 if (bcm_rpc_tp_send_callreturn(rpci->rpc_th, b)) {
1291 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
1292 bcm_rpc_err_down(rpci);
1293 return -1;
1296 rpci->rtn_trans++;
1297 return 0;
1299 #endif /* WLC_LOW */
1301 /* This is expected to be called at DPC of the bus driver ? */
1302 static void
1303 bcm_rpc_buf_recv(void *context, rpc_buf_t *rpc_buf)
1305 uint xaction;
1306 struct rpc_info *rpci = (struct rpc_info *)context;
1307 mbool hdr_invalid = 0;
1308 ASSERT(rpci && rpci->rpc_th);
1310 RPC_TRACE(("%s:\n", __FUNCTION__));
1312 RPC_RO_LOCK(rpci);
1314 /* Only if the header itself checks out , and only xaction does not */
1315 hdr_invalid = bcm_rpc_hdr_validate(rpci, rpc_buf, &xaction, TRUE);
1317 if (mboolisset(hdr_invalid, HDR_XACTION_MISMATCH) &&
1318 !mboolisset(hdr_invalid, ~HDR_XACTION_MISMATCH)) {
1319 rpc_buf_t *node = rpci->reorder_pktq;
1320 rpci->cnt_xidooo++;
1321 rpci->reorder_depth++;
1322 if (rpci->reorder_depth > rpci->reorder_depth_max)
1323 rpci->reorder_depth_max = rpci->reorder_depth;
1325 /* Catch roll-over or retries */
1326 rpci->reorder_pktq = rpc_buf;
1328 if (node != NULL)
1329 bcm_rpc_buf_next_set(rpci->rpc_th, rpc_buf, node);
1331 /* if we have held too many packets, move past the hole */
1332 if (rpci->reorder_depth > BCM_RPC_REORDER_LIMIT) {
1333 uint16 next_xid = bcm_rpc_reorder_next_xid(rpci);
1335 RPC_ERR(("%s: reorder queue depth %d, skipping ID 0x%x to 0x%x\n",
1336 __FUNCTION__, rpci->reorder_depth,
1337 rpci->oe_trans, next_xid));
1338 rpci->cnt_reorder_overflow++;
1339 rpci->cnt_rx_drop_hole += (uint)(next_xid - rpci->oe_trans);
1340 rpci->oe_trans = next_xid;
1341 bcm_rpc_process_reorder_queue(rpci);
1344 goto done;
1347 /* Bail out if failed */
1348 if (!bcm_rpc_buf_recv_inorder(rpci, rpc_buf, hdr_invalid))
1349 goto done;
1351 /* see if we can make progress on the reorder backlog */
1352 bcm_rpc_process_reorder_queue(rpci);
1354 done:
1355 RPC_RO_UNLOCK(rpci);
1358 static void
1359 bcm_rpc_process_reorder_queue(rpc_info_t *rpci)
1361 uint32 xaction;
1362 mbool hdr_invalid = 0;
1364 while (rpci->reorder_pktq) {
1365 bool found = FALSE;
1366 rpc_buf_t *buf = rpci->reorder_pktq;
1367 rpc_buf_t *prev = rpci->reorder_pktq;
1368 while (buf != NULL) {
1369 rpc_buf_t *next = bcm_rpc_buf_next_get(rpci->rpc_th, buf);
1370 hdr_invalid = bcm_rpc_hdr_validate(rpci, buf, &xaction, FALSE);
1372 if (!mboolisset(hdr_invalid, HDR_XACTION_MISMATCH)) {
1373 bcm_rpc_buf_next_set(rpci->rpc_th, buf, NULL);
1375 if (buf == rpci->reorder_pktq)
1376 rpci->reorder_pktq = next;
1377 else
1378 bcm_rpc_buf_next_set(rpci->rpc_th, prev, next);
1379 rpci->reorder_depth--;
1381 /* Bail out if failed */
1382 if (!bcm_rpc_buf_recv_inorder(rpci, buf, hdr_invalid))
1383 return;
1385 buf = NULL;
1386 found = TRUE;
1387 } else {
1388 prev = buf;
1389 buf = next;
1393 /* bail if not found */
1394 if (!found)
1395 break;
1398 return;
1401 static bool
1402 bcm_rpc_buf_recv_inorder(rpc_info_t *rpci, rpc_buf_t *rpc_buf, mbool hdr_invalid)
1404 rpc_header_t header;
1405 rpc_acn_t acn = RPC_NULL;
1407 ASSERT(rpci && rpci->rpc_th);
1409 RPC_TRACE(("%s: got rpc_buf %p len %d data %p\n", __FUNCTION__,
1410 rpc_buf, bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf),
1411 bcm_rpc_buf_data(rpci->rpc_th, rpc_buf)));
1413 #ifdef BCMDBG_RPC
1414 if (RPC_PKTTRACE_ON()) {
1415 #ifdef BCMDBG
1416 prhex("RPC Rx Buf", bcm_rpc_buf_data(rpci->rpc_th, rpc_buf),
1417 bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf));
1418 #endif
1420 #endif /* BCMDBG_RPC */
1422 header = bcm_rpc_header(rpci, rpc_buf);
1424 RPC_OSL_LOCK(rpci->rpc_osh);
1426 if (hdr_invalid) {
1427 RPC_ERR(("%s: bcm_rpc_hdr_validate failed on 0x%08x 0x%x\n", __FUNCTION__,
1428 header, hdr_invalid));
1429 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1430 if (RPC_HDR_TYPE(header) != RPC_TYPE_RTN) {
1431 #if defined(USBAP)
1432 PKTFRMNATIVE(rpci->osh, rpc_buf);
1433 #endif
1434 PKTFREE(rpci->osh, rpc_buf, FALSE);
1436 #else
1437 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1438 #endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1439 RPC_OSL_UNLOCK(rpci->rpc_osh);
1440 return FALSE;
1443 RPC_TRACE(("%s state:0x%x type:0x%x session:0x%x xacn:0x%x\n", __FUNCTION__, rpci->state,
1444 RPC_HDR_TYPE(header), RPC_HDR_SESSION(header), RPC_HDR_XACTION(header)));
1446 if (bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf) > RPC_HDR_LEN)
1447 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_HDR_LEN);
1448 else {
1449 /* if the head packet ends with rpc_hdr, free and advance to next packet in chain */
1450 rpc_buf_t *next_p;
1452 ASSERT(bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf) == RPC_HDR_LEN);
1453 next_p = (rpc_buf_t*)PKTNEXT(rpci->osh, rpc_buf);
1455 RPC_TRACE(("%s: following pkt chain to pkt %p len %d\n", __FUNCTION__,
1456 next_p, bcm_rpc_buf_len_get(rpci->rpc_th, next_p)));
1458 PKTSETNEXT(rpci->osh, rpc_buf, NULL);
1459 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1460 rpc_buf = next_p;
1461 if (rpc_buf == NULL) {
1462 RPC_OSL_UNLOCK(rpci->rpc_osh);
1463 return FALSE;
1467 switch (RPC_HDR_TYPE(header)) {
1468 case RPC_TYPE_MGN:
1469 acn = bcm_rpc_mgn_acn(rpci, rpc_buf);
1470 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_ACN_LEN);
1471 RPC_TRACE(("Mgn: %x\n", acn));
1472 break;
1473 case RPC_TYPE_RTN:
1474 #ifdef WLC_HIGH
1475 rpci->oe_rtn_trans = RPC_HDR_XACTION(header) + 1;
1476 break;
1477 #endif
1478 case RPC_TYPE_DATA:
1479 rpci->oe_trans = RPC_HDR_XACTION(header) + 1;
1480 break;
1481 default:
1482 ASSERT(0);
1485 #ifdef WLC_HIGH
1486 rpc_buf = bcm_rpc_buf_recv_high(rpci, RPC_HDR_TYPE(header), acn, rpc_buf);
1487 #else
1488 rpc_buf = bcm_rpc_buf_recv_low(rpci, header, acn, rpc_buf);
1489 #endif
1490 RPC_OSL_UNLOCK(rpci->rpc_osh);
1492 if (rpc_buf)
1493 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1494 return TRUE;
1497 #ifdef WLC_HIGH
1498 static void
1499 bcm_rpc_buf_recv_mgn_high(struct rpc_info *rpci, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1501 rpc_rc_t reason = RPC_RC_ACK;
1502 uint32 version = 0;
1504 RPC_ERR(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__,
1505 acn, rpci->version, rpci->state, rpci->session));
1507 #ifndef NDIS
1508 if (acn == RPC_CONNECT_ACK || acn == RPC_CONNECT_NACK) {
1509 #else
1510 if (acn == RPC_HELLO || acn == RPC_CONNECT_ACK || acn == RPC_CONNECT_NACK) {
1511 #endif
1512 version = bcm_rpc_mgn_ver(rpci, rpc_buf);
1513 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_VER_LEN);
1515 reason = bcm_rpc_mgn_reason(rpci, rpc_buf);
1516 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_RC_LEN);
1518 RPC_ERR(("%s: Reason: %x Dongle Version: 0x%x\n", __FUNCTION__,
1519 reason, version));
1522 switch (acn) {
1523 #ifdef NDIS
1524 case RPC_HELLO:
1525 /* If the original thread has not given up,
1526 * then change the state and wake it up
1528 if (rpci->state == WAIT_HELLO) {
1529 rpci->state = HELLO_RECEIVED;
1531 RPC_ERR(("%s: Hello Received!\n", __FUNCTION__));
1532 if (rpci->wait_init)
1533 RPC_OSL_WAKE(rpci->rpc_osh);
1535 break;
1536 #endif
1537 case RPC_CONNECT_ACK:
1538 /* If the original thread has not given up,
1539 * then change the state and wake it up
1541 if (rpci->state != UNINITED) {
1542 rpci->state = ESTABLISHED;
1543 rpci->chipid = bcm_rpc_mgn_chipid(rpci, rpc_buf);
1544 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_CHIPID_LEN);
1546 RPC_ERR(("%s: Connected!\n", __FUNCTION__));
1547 if (rpci->wait_init)
1548 RPC_OSL_WAKE(rpci->rpc_osh);
1550 ASSERT(reason != RPC_RC_VER_MISMATCH);
1551 break;
1553 case RPC_CONNECT_NACK:
1554 /* Connect failed. Just bail out by waking the thread */
1555 RPC_ERR(("%s: Connect failed !!!\n", __FUNCTION__));
1556 if (rpci->wait_init)
1557 RPC_OSL_WAKE(rpci->rpc_osh);
1558 break;
1560 case RPC_DOWN:
1561 RPC_OSL_UNLOCK(rpci->rpc_osh);
1562 bcm_rpc_down(rpci);
1564 RPC_OSL_LOCK(rpci->rpc_osh);
1565 break;
1567 default:
1568 ASSERT(0);
1569 break;
1573 static rpc_buf_t *
1574 bcm_rpc_buf_recv_high(struct rpc_info *rpci, rpc_type_t type, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1576 RPC_TRACE(("%s: acn %d\n", __FUNCTION__, acn));
1578 switch (type) {
1579 case RPC_TYPE_RTN:
1580 if (rpci->wait_return) {
1581 rpci->rtn_rpcbuf = rpc_buf;
1582 /* This buffer will be freed in bcm_rpc_tp_recv_rtn() */
1583 rpc_buf = NULL;
1584 RPC_OSL_WAKE(rpci->rpc_osh);
1585 } else if (rpci->state != DISCONNECTED)
1586 RPC_ERR(("%s: Received return buffer but no one waiting\n", __FUNCTION__));
1587 break;
1589 case RPC_TYPE_MGN:
1590 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1591 bcm_rpc_buf_recv_mgn_high(rpci, acn, rpc_buf);
1592 #if defined(USBAP)
1593 PKTFRMNATIVE(rpci->osh, rpc_buf);
1594 #endif
1595 PKTFREE(rpci->osh, rpc_buf, FALSE);
1596 rpc_buf = NULL;
1597 #else
1598 bcm_rpc_buf_recv_mgn_high(rpci, acn, rpc_buf);
1599 #endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1600 break;
1602 case RPC_TYPE_DATA:
1603 ASSERT(rpci->state == ESTABLISHED);
1604 #ifdef BCMDBG_RPC
1605 /* Prepare the current log entry but add only if the TX was successful */
1606 /* This is done here before DATA pointer gets modified */
1607 if (RPC_PKTLOG_ON()) {
1608 struct rpc_pktlog cur;
1609 bcm_rpc_prep_entry(rpci, rpc_buf, &cur, FALSE);
1610 bcm_rpc_add_entry_rx(rpci, &cur);
1612 #endif /* BCMDBG_RPC */
1613 if (rpci->dispatchcb) {
1614 #if !defined(USBAP)
1615 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1616 PKTTONATIVE(rpci->osh, rpc_buf);
1617 #endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
1618 #endif /* !USBAP */
1619 (rpci->dispatchcb)(rpci->ctx, rpc_buf);
1620 /* The dispatch routine will free the buffer */
1621 rpc_buf = NULL;
1622 } else {
1623 RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__));
1625 break;
1627 default:
1628 ASSERT(0);
1631 return (rpc_buf);
1633 #else
1634 static void
1635 bcm_rpc_buf_recv_mgn_low(struct rpc_info *rpci, uint8 session, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1637 uint32 reason = 0;
1638 uint32 version = 0;
1640 RPC_TRACE(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__,
1641 acn,
1642 rpci->version, rpci->state, rpci->session));
1644 if (acn == RPC_HELLO) {
1645 bcm_rpc_connect_resp(rpci, RPC_HELLO, RPC_RC_HELLO);
1646 } else if (acn == RPC_CONNECT || acn == RPC_RESET) {
1647 version = bcm_rpc_mgn_ver(rpci, rpc_buf);
1649 RPC_ERR(("%s: Host Version: 0x%x\n", __FUNCTION__, version));
1651 ASSERT(rpci->state != UNINITED);
1653 if (version != rpci->version) {
1654 RPC_ERR(("RPC Establish failed due to version mismatch\n"));
1655 RPC_ERR(("Expected: 0x%x Got: 0x%x\n", rpci->version, version));
1656 RPC_ERR(("Connect failed !!!\n"));
1658 rpci->state = WAIT_INITIALIZING;
1659 bcm_rpc_connect_resp(rpci, RPC_CONNECT_NACK, RPC_RC_VER_MISMATCH);
1660 return;
1663 /* When receiving CONNECT/RESET from HIGH, just
1664 * resync to the HIGH's session and reset the transactions
1666 if ((acn == RPC_CONNECT) && (rpci->state == ESTABLISHED))
1667 reason = RPC_RC_RECONNECT;
1669 rpci->session = session;
1671 if (bcm_rpc_connect_resp(rpci, RPC_CONNECT_ACK, reason)) {
1672 /* call the resync callback if already established */
1673 if ((acn == RPC_CONNECT) && (rpci->state == ESTABLISHED) &&
1674 (rpci->resync_cb)) {
1675 (rpci->resync_cb)(rpci->dnctx);
1677 rpci->state = ESTABLISHED;
1678 } else {
1679 RPC_ERR(("%s: RPC Establish failed !!!\n", __FUNCTION__));
1682 RPC_ERR(("Connected Session:%x!\n", rpci->session));
1683 rpci->oe_trans = 0;
1684 rpci->trans = 0;
1685 rpci->rtn_trans = 0;
1686 } else if (acn == RPC_DOWN) {
1687 bcm_rpc_down(rpci);
1691 static rpc_buf_t *
1692 bcm_rpc_buf_recv_low(struct rpc_info *rpci, rpc_header_t header,
1693 rpc_acn_t acn, rpc_buf_t *rpc_buf)
1695 switch (RPC_HDR_TYPE(header)) {
1696 case RPC_TYPE_MGN:
1697 bcm_rpc_buf_recv_mgn_low(rpci, RPC_HDR_SESSION(header), acn, rpc_buf);
1698 break;
1700 case RPC_TYPE_RTN:
1701 case RPC_TYPE_DATA:
1702 ASSERT(rpci->state == ESTABLISHED);
1703 #ifdef BCMDBG_RPC
1704 /* Prepare the current log entry but add only if the TX was successful */
1705 /* This is done here before DATA pointer gets modified */
1706 if (RPC_PKTLOG_ON()) {
1707 struct rpc_pktlog cur;
1708 bcm_rpc_prep_entry(rpci, rpc_buf, &cur, FALSE);
1709 bcm_rpc_add_entry_rx(rpci, &cur);
1711 #endif /* BCMDBG_RPC */
1713 if (rpci->dispatchcb) {
1714 (rpci->dispatchcb)(rpci->ctx, rpc_buf);
1715 rpc_buf = NULL;
1716 } else {
1717 RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__));
1718 ASSERT(0);
1720 break;
1722 default:
1723 ASSERT(0);
1726 return (rpc_buf);
1728 #endif /* WLC_HIGH */
1730 #ifdef BCMDBG_RPC
1731 static void
1732 bcm_rpc_pktlog_init(rpc_info_t *rpci)
1734 rpc_msg_level |= RPC_PKTLOG_VAL;
1736 if (RPC_PKTLOG_ON()) {
1737 if ((rpci->send_log = MALLOC(rpci->osh,
1738 sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE)) == NULL)
1739 goto err;
1740 bzero(rpci->send_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1741 if ((rpci->recv_log = MALLOC(rpci->osh,
1742 sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE)) == NULL)
1743 goto err;
1744 bzero(rpci->recv_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1745 return;
1747 RPC_ERR(("pktlog is on\n"));
1748 err:
1749 bcm_rpc_pktlog_deinit(rpci);
1752 static void
1753 bcm_rpc_pktlog_deinit(rpc_info_t *rpci)
1755 if (rpci->send_log) {
1756 MFREE(rpci->osh, rpci->send_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1757 rpci->send_log = NULL;
1759 if (rpci->recv_log) {
1760 MFREE(rpci->osh, rpci->recv_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1761 rpci->recv_log = NULL;
1763 rpc_msg_level &= ~RPC_PKTLOG_VAL; /* Turn off logging on failure */
1766 static struct rpc_pktlog *
1767 bcm_rpc_prep_entry(struct rpc_info * rpci, rpc_buf_t *b, struct rpc_pktlog *cur, bool tx)
1769 bzero(cur, sizeof(struct rpc_pktlog));
1770 if (tx) {
1771 cur->trans = rpci->trans;
1772 } else {
1773 /* this function is called after match, so the oe_trans is already advanced */
1774 cur->trans = rpci->oe_trans - 1;
1776 cur->len = bcm_rpc_buf_len_get(rpci->rpc_th, b);
1777 bcopy(bcm_rpc_buf_data(rpci->rpc_th, b), cur->data, RPC_PKTLOG_DATASIZE);
1778 return cur;
1781 static void
1782 bcm_rpc_add_entry_tx(struct rpc_info * rpci, struct rpc_pktlog *cur)
1784 RPC_OSL_LOCK(rpci->rpc_osh);
1785 bcopy(cur, &rpci->send_log[rpci->send_log_idx], sizeof(struct rpc_pktlog));
1786 rpci->send_log_idx = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1788 if (rpci->send_log_num < RPC_PKTLOG_SIZE)
1789 rpci->send_log_num++;
1791 RPC_OSL_UNLOCK(rpci->rpc_osh);
1794 static void
1795 bcm_rpc_add_entry_rx(struct rpc_info * rpci, struct rpc_pktlog *cur)
1797 bcopy(cur, &rpci->recv_log[rpci->recv_log_idx], sizeof(struct rpc_pktlog));
1798 rpci->recv_log_idx = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1800 if (rpci->recv_log_num < RPC_PKTLOG_SIZE)
1801 rpci->recv_log_num++;
1803 #endif /* BCMDBG_RPC */
1805 #ifdef WLC_HIGH
1807 bcm_rpc_dump(rpc_info_t *rpci, struct bcmstrbuf *b)
1809 #ifdef BCMDBG
1811 bcm_bprintf(b, "\nHOST rpc dump:\n");
1812 RPC_OSL_LOCK(rpci->rpc_osh);
1813 bcm_bprintf(b, "Version: 0x%x State: %x\n", rpci->version, rpci->state);
1814 bcm_bprintf(b, "session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x oe_rtn_trans 0x%x\n",
1815 rpci->session, rpci->trans, rpci->oe_trans,
1816 rpci->rtn_trans, rpci->oe_rtn_trans);
1817 bcm_bprintf(b, "xactionID out of order %d\n", rpci->cnt_xidooo);
1818 bcm_bprintf(b, "reorder queue depth %u first ID 0x%x, max depth %u, tossthreshold %u\n",
1819 rpci->reorder_depth, bcm_rpc_reorder_next_xid(rpci), rpci->reorder_depth_max,
1820 BCM_RPC_REORDER_LIMIT);
1822 RPC_OSL_UNLOCK(rpci->rpc_osh);
1823 return bcm_rpc_tp_dump(rpci->rpc_th, b);
1824 #else
1825 return 0;
1826 #endif /* BCMDBG */
1830 bcm_rpc_pktlog_get(struct rpc_info *rpci, uint32 *buf, uint buf_size, bool send)
1832 int ret = -1;
1834 #ifdef BCMDBG_RPC
1835 int start, i, tot;
1837 /* Clear the whole buffer */
1838 bzero(buf, buf_size);
1839 RPC_OSL_LOCK(rpci->rpc_osh);
1840 if (send) {
1841 ret = rpci->send_log_num;
1842 if (ret < RPC_PKTLOG_SIZE)
1843 start = 0;
1844 else
1845 start = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1846 } else {
1847 ret = rpci->recv_log_num;
1848 if (ret < RPC_PKTLOG_SIZE)
1849 start = 0;
1850 else
1851 start = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1854 /* Return only first byte */
1855 if (buf_size < (uint) (ret * RPC_PKTLOG_RD_LEN)) {
1856 RPC_OSL_UNLOCK(rpci->rpc_osh);
1857 RPC_ERR(("%s buf too short\n", __FUNCTION__));
1858 return BCME_BUFTOOSHORT;
1861 if (ret == 0) {
1862 RPC_OSL_UNLOCK(rpci->rpc_osh);
1863 RPC_ERR(("%s no record\n", __FUNCTION__));
1864 return ret;
1867 tot = ret;
1868 for (i = 0; tot > 0; tot--, i++) {
1869 if (send) {
1870 buf[i*RPC_PKTLOG_RD_LEN] = rpci->send_log[start].data[0];
1871 buf[i*RPC_PKTLOG_RD_LEN+1] = rpci->send_log[start].trans;
1872 buf[i*RPC_PKTLOG_RD_LEN+2] = rpci->send_log[start].len;
1873 start++;
1874 } else {
1875 buf[i*RPC_PKTLOG_RD_LEN] = rpci->recv_log[start].data[0];
1876 buf[i*RPC_PKTLOG_RD_LEN+1] = rpci->recv_log[start].trans;
1877 buf[i*RPC_PKTLOG_RD_LEN+2] = rpci->recv_log[start].len;
1878 start++;
1880 start = (start % RPC_PKTLOG_SIZE);
1882 RPC_OSL_UNLOCK(rpci->rpc_osh);
1884 #endif /* BCMDBG_RPC */
1885 return ret;
1887 #endif /* WLC_HIGH */
1890 #ifdef BCMDBG_RPC
1892 static void
1893 _bcm_rpc_dump_pktlog(rpc_info_t *rpci)
1895 int ret = -1;
1896 int start, i;
1898 RPC_OSL_LOCK(rpci->rpc_osh);
1899 ret = rpci->send_log_num;
1900 if (ret == 0)
1901 goto done;
1903 if (ret < RPC_PKTLOG_SIZE)
1904 start = 0;
1905 else
1906 start = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1908 printf("send %d\n", ret);
1909 for (i = 0; ret > 0; ret--, i++) {
1910 printf("[%d] trans 0x%x len %d data 0x%x\n", i,
1911 rpci->send_log[start].trans,
1912 rpci->send_log[start].len,
1913 rpci->send_log[start].data[0]);
1914 start++;
1915 start = (start % RPC_PKTLOG_SIZE);
1918 ret = rpci->recv_log_num;
1919 if (ret == 0)
1920 goto done;
1922 if (ret < RPC_PKTLOG_SIZE)
1923 start = 0;
1924 else
1925 start = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1927 printf("recv %d\n", ret);
1928 for (i = 0; ret > 0; ret--, i++) {
1929 printf("[%d] trans 0x%x len %d data 0x%x\n", i,
1930 rpci->recv_log[start].trans,
1931 rpci->recv_log[start].len,
1932 rpci->recv_log[start].data[0]);
1933 start++;
1934 start = (start % RPC_PKTLOG_SIZE);
1937 done:
1938 RPC_OSL_UNLOCK(rpci->rpc_osh);
1941 #ifdef WLC_HIGH
1942 static void
1943 bcm_rpc_dump_pktlog_high(rpc_info_t *rpci)
1945 printf("HOST rpc pktlog dump:\n");
1946 _bcm_rpc_dump_pktlog(rpci);
1949 #else
1951 static void
1952 bcm_rpc_dump_pktlog_low(uint32 arg, uint argc, char *argv[])
1954 rpc_info_t *rpci;
1956 rpci = (rpc_info_t *)(uintptr)arg;
1958 printf("DONGLE rpc pktlog dump:\n");
1959 _bcm_rpc_dump_pktlog(rpci);
1961 #endif /* WLC_HIGH */
1962 #endif /* BCMDBG_RPC */
1964 #ifdef WLC_LOW
1965 static void
1966 bcm_rpc_dump_state(uint32 arg, uint argc, char *argv[])
1967 #else
1968 static void
1969 bcm_rpc_fatal_dump(void *arg)
1970 #endif
1972 #ifdef BCMDBG_RPC
1973 #ifndef WLC_LOW
1974 struct bcmstrbuf b;
1975 char *buf, *t, *p;
1976 uint size = 1024*1024;
1977 #endif /* WLC_LOW */
1978 #endif /* BCMDBG_RPC */
1979 rpc_info_t *rpci = (rpc_info_t *)(uintptr)arg;
1980 printf("DONGLE rpc dump:\n");
1981 printf("Version: 0x%x State: %x\n", rpci->version, rpci->state);
1982 printf("session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x\n",
1983 rpci->session, rpci->trans, rpci->oe_trans,
1984 rpci->rtn_trans);
1985 printf("xactionID out of order %u reorder ovfl %u dropped hole %u\n",
1986 rpci->cnt_xidooo, rpci->cnt_reorder_overflow, rpci->cnt_rx_drop_hole);
1987 printf("reorder queue depth %u first ID 0x%x reorder_q_depth_max %d, tossthreshold %u\n",
1988 rpci->reorder_depth, bcm_rpc_reorder_next_xid(rpci), rpci->reorder_depth_max,
1989 BCM_RPC_REORDER_LIMIT);
1991 #ifdef BCMDBG_RPC
1992 #ifdef WLC_LOW
1993 bcm_rpc_tp_dump(rpci->rpc_th);
1994 #else
1995 buf = (char *)MALLOC(rpci->osh, size);
1997 if (buf != NULL) {
1998 bzero(buf, size);
1999 bcm_binit(&b, buf, size);
2000 bcm_rpc_tp_dump(rpci->rpc_th, &b);
2001 p = buf;
2002 while (p != NULL) {
2003 while ((((uintptr)p) < (((uintptr)buf) + size)) && (*p == '\0'))
2004 p++;
2005 if (((uintptr)p) >= (((uintptr)buf) + size))
2006 break;
2007 if ((t = strchr(p, '\n')) != NULL) {
2008 *t++ = '\0';
2009 printf("%s\n", p);
2012 p = t;
2014 MFREE(rpci->osh, buf, size);
2016 #endif /* WLC_LOW */
2017 #endif /* BCMDBG_RPC */
2020 void
2021 bcm_rpc_msglevel_set(struct rpc_info *rpci, uint16 msglevel, bool high)
2023 #ifdef WLC_HIGH
2024 ASSERT(high == TRUE);
2025 /* high 8 bits are for rpc, low 8 bits are for tp */
2026 rpc_msg_level = msglevel >> 8;
2027 bcm_rpc_tp_msglevel_set(rpci->rpc_th, (uint8)(msglevel & 0xff), TRUE);
2028 return;
2029 #else
2030 ASSERT(high == FALSE);
2031 /* high 8 bits are for rpc, low 8 bits are for tp */
2032 rpc_msg_level = msglevel >> 8;
2033 bcm_rpc_tp_msglevel_set(rpci->rpc_th, (uint8)(msglevel & 0xff), FALSE);
2034 return;
2035 #endif
2038 void
2039 bcm_rpc_dngl_suspend_enable_set(rpc_info_t *rpc, uint32 val)
2041 rpc->suspend_enable = val;
2044 void
2045 bcm_rpc_dngl_suspend_enable_get(rpc_info_t *rpc, uint32 *pval)
2047 *pval = rpc->suspend_enable;