Resync with broadcom drivers 5.100.138.20 and utilities.
[tomato.git] / release / src-rt / shared / bcm_rpc.c
blob6eef3b05253ec4b3a2f61be684516190fbd442d1
1 /*
2 * RPC layer. It links to bus layer with transport layer(bus dependent)
3 * Broadcom 802.11abg Networking Device Driver
5 * Copyright (C) 2010, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
13 * $Id: bcm_rpc.c,v 1.80.2.30 2010-12-24 23:35:24 Exp $
16 #include <epivers.h>
17 #include <typedefs.h>
18 #include <bcmdefs.h>
19 #include <bcmendian.h>
20 #include <osl.h>
21 #include <bcmutils.h>
23 #include <bcm_rpc_tp.h>
24 #include <bcm_rpc.h>
25 #include <rpc_osl.h>
26 #include <bcmdevs.h>
28 #if (!defined(WLC_HIGH) && !defined(WLC_LOW))
29 #error "SPLIT"
30 #endif
31 #if defined(WLC_HIGH) && defined(WLC_LOW)
32 #error "SPLIT"
33 #endif
35 /* RPC may use OS APIs directly to avoid overloading osl.h
36 * HIGH_ONLY supports NDIS, LINUX, and MACOSX so far. can be ported to other OS if needed
38 #ifdef WLC_HIGH
39 #if !defined(NDIS) && !defined(linux) && !defined(MACOSX)
40 #error "RPC only supports NDIS, LINUX, MACOSX in HIGH driver"
41 #endif
42 #endif /* WLC_HIGH */
43 #ifdef WLC_LOW
44 #if !defined(_HNDRTE_)
45 #error "RPC only supports HNDRTE in LOW driver"
46 #endif
47 #endif /* WLC_LOW */
49 /* use local flag BCMDBG_RPC so that it can be turned on without global BCMDBG */
50 #ifdef BCMDBG
51 #ifndef BCMDBG_RPC
52 #define BCMDBG_RPC
53 #endif
54 #endif /* BCMDBG */
56 #define BCMDBG_RPC
57 /* #define BCMDBG_RPC */
59 static uint32 rpc_msg_level = RPC_ERROR_VAL;
60 /* Print error messages even for non-debug drivers
61 * NOTE: RPC_PKTLOG_VAL can be added in bcm_rpc_pktlog_init()
64 /* osl_msg_level is a bitvector with defs in wlioctl.h */
65 #define RPC_ERR(args) do {if (rpc_msg_level & RPC_ERROR_VAL) printf args;} while (0)
67 #ifdef BCMDBG_RPC
68 #define RPC_TRACE(args) do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
69 #define RPC_PKTTRACE_ON() (rpc_msg_level & RPC_PKTTRACE_VAL)
70 #else
71 #ifdef BCMDBG_ERR
72 #define RPC_TRACE(args) do {if (rpc_msg_level & RPC_TRACE_VAL) printf args;} while (0)
73 #define RPC_PKTTRACE_ON() (FALSE)
74 #define prhex(a, b, c) do { } while (0) /* prhex is not defined under */
75 #define RPC_PKTLOG_ON() (FALSE)
76 #else
77 #define RPC_TRACE(args)
78 #define RPC_PKTTRACE_ON() (FALSE)
79 #define RPC_PKTLOG_ON() (FALSE)
80 #define prhex(a, b, c) do { } while (0) /* prhex is not defined under */
81 #endif /* BCMDBG_ERR */
82 #endif /* BCMDBG_RPC */
84 #ifdef BCMDBG_RPC
85 #define RPC_PKTLOG_ON() (rpc_msg_level & RPC_PKTLOG_VAL)
86 #else
87 #define RPC_PKTLOG_ON() (FALSE)
88 #endif /* BCMDBG_RPC */
90 #ifndef BCM_RPC_REORDER_LIMIT
91 #ifdef WLC_HIGH
92 #define BCM_RPC_REORDER_LIMIT 40 /* limit to toss hole to avoid overflow reorder queque */
93 #else
94 #define BCM_RPC_REORDER_LIMIT 30
95 #endif
96 #endif /* BCM_RPC_REORDER_LIMIT */
98 /* OS specific files for locks */
99 #define RPC_INIT_WAIT_TIMEOUT_MSEC 2000
100 #ifndef RPC_RETURN_WAIT_TIMEOUT_MSEC
101 #if defined(NDIS) && !defined(SDIO_BMAC)
102 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 800 /* NDIS OIDs timeout in 1 second.
103 * This timeout needs to be smaller than that
105 #elif defined(linux) || defined(SDIO_BMAC)
106 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 3200
107 #elif defined(MACOSX)
108 #define RPC_RETURN_WAIT_TIMEOUT_MSEC 800 /* guess at a reasonable turnaround time */
109 #endif /* NDIS */
110 #endif /* RPC_RETURN_WAIT_TIMEOUT_MSEC */
112 /* RPC Frame formats */
113 /* |--------------||-------------|
114 * RPC Header RPC Payload
116 * 1) RPC Header:
117 * |-------|--------|----------------|
118 * 31 23 15 0
119 * Type Session Transaction ID
120 * = 0 Data
121 * = 1 Return
122 * = 2 Mgn
124 * 2) payload
125 * Data and Return RPC payload is RPC all dependent
127 * Management frame formats:
128 * |--------|--------|--------|--------|
129 * Byte 0 1 2 3
130 * Header Action Version Reason
132 * Version is included only for following actions:
133 * -- CONNECT
134 * -- RESET
135 * -- DOWN
136 * -- CONNECT_ACK
137 * -- CONNECT_NACK
139 * Reason sent only by BMAC for following actions:
140 * -- CONNECT_ACK
141 * -- CONNECT_NACk
144 typedef uint32 rpc_header_t;
146 #define RPC_HDR_LEN sizeof(rpc_header_t)
147 #define RPC_ACN_LEN sizeof(uint32)
148 #define RPC_VER_LEN sizeof(EPI_VERSION_NUM)
149 #define RPC_RC_LEN sizeof(uint32)
150 #define RPC_CHIPID_LEN sizeof(uint32)
152 #define RPC_HDR_TYPE(_rpch) (((_rpch) >> 24) & 0xff)
153 #define RPC_HDR_SESSION(_rpch) (((_rpch) >> 16) & 0xff)
154 #define RPC_HDR_XACTION(_rpch) ((_rpch) & 0xffff) /* When the type is data or return */
156 #define NAME_ENTRY(x) #x
158 /* RPC Header defines -- attached to every RPC call */
159 typedef enum {
160 RPC_TYPE_UNKNOWN, /* Unknown header type */
161 RPC_TYPE_DATA, /* RPC call that go straight through */
162 RPC_TYPE_RTN, /* RPC calls that are syncrhonous */
163 RPC_TYPE_MGN, /* RPC state management */
164 } rpc_type_t;
166 typedef enum {
167 RPC_RC_ACK = 0,
168 RPC_RC_HELLO,
169 RPC_RC_RECONNECT,
170 RPC_RC_VER_MISMATCH
171 } rpc_rc_t;
173 /* Management actions */
174 typedef enum {
175 RPC_NULL = 0,
176 RPC_HELLO,
177 RPC_CONNECT, /* Master (high) to slave (low). Slave to copy current
178 * session id and transaction id (mostly 0)
180 RPC_CONNECT_ACK, /* Ack from LOW_RPC */
181 RPC_DOWN, /* Down the other-end. The actual action is
182 * end specific.
184 RPC_CONNECT_NACK, /* Nack from LOW_RPC. This indicates potentially that
185 * dongle could already be running
187 RPC_RESET /* Resync using other end's session id (mostly HIGH->LOW)
188 * Also, reset the oe_trans, and trans to 0
190 } rpc_acn_t;
192 /* RPC States */
193 typedef enum {
194 UNINITED = 0,
195 WAIT_HELLO,
196 HELLO_RECEIVED,
197 WAIT_INITIALIZING,
198 ESTABLISHED,
199 DISCONNECTED,
200 ASLEEP,
201 WAIT_RESUME
202 } rpc_state_t;
204 #define HDR_STATE_MISMATCH 0x1
205 #define HDR_SESSION_MISMATCH 0x2
206 #define HDR_XACTION_MISMATCH 0x4
208 #ifdef BCMDBG_RPC
209 #define RPC_PKTLOG_DATASIZE 4
210 struct rpc_pktlog {
211 uint16 trans;
212 int len;
213 uint32 data[RPC_PKTLOG_DATASIZE]; /* First few bytes of the payload only */
215 #endif /* BCMDBG_RPC */
217 #ifdef WLC_LOW
218 static void bcm_rpc_dump_state(uint32 arg, uint argc, char *argv[]);
219 #else
220 static void bcm_rpc_fatal_dump(void *arg);
221 #endif /* WLC_LOW */
223 #ifdef BCMDBG_RPC
224 static void _bcm_rpc_dump_pktlog(rpc_info_t *rpci);
225 #ifdef WLC_HIGH
226 static void bcm_rpc_dump_pktlog_high(rpc_info_t *rpci);
227 #else
228 static void bcm_rpc_dump_pktlog_low(uint32 arg, uint argc, char *argv[]);
229 #endif
230 #endif /* BCMDBG_RPC */
232 #ifdef WLC_HIGH
233 /* This lock is needed to handle the Receive Re-order queue that guarantees
234 * in-order receive as it was observed that in NDIS at least, USB subsystem does
235 * not guarantee it
237 #ifdef NDIS
238 #define RPC_RO_LOCK(ri) NdisAcquireSpinLock(&(ri)->reorder_lock)
239 #define RPC_RO_UNLOCK(ri) NdisReleaseSpinLock(&(ri)->reorder_lock)
240 #elif defined(linux)
241 #define RPC_RO_LOCK(ri) spin_lock_irqsave(&(ri)->reorder_lock, (ri)->reorder_flags);
242 #define RPC_RO_UNLOCK(ri) spin_unlock_irqrestore(&(ri)->reorder_lock, (ri)->reorder_flags);
243 #elif defined(MACOSX)
244 #define RPC_RO_LOCK(ri) do { } while (0)
245 #define RPC_RO_UNLOCK(ri) do { } while (0)
246 #endif /* NDIS */
247 #else
248 #define RPC_RO_LOCK(ri) do { } while (0)
249 #define RPC_RO_UNLOCK(ri) do { } while (0)
250 #endif /* WLC_HIGH */
252 struct rpc_info {
253 void *pdev; /* Per-port driver handle for rx callback */
254 struct rpc_transport_info *rpc_th; /* transport layer handle */
255 osl_t *osh;
257 rpc_dispatch_cb_t dispatchcb; /* callback when data is received */
258 void *ctx; /* Callback context */
260 rpc_down_cb_t dncb; /* callback when RPC goes down */
261 void *dnctx; /* Callback context */
263 rpc_resync_cb_t resync_cb; /* callback when host reenabled and dongle
264 * was not rebooted. Uses dnctx
266 rpc_txdone_cb_t txdone_cb; /* when non-null, called when a tx has completed. */
267 uint8 rpc_tp_hdr_len; /* header len for rpc and tp layer */
269 uint8 session; /* 255 sessions enough ? */
270 uint16 trans; /* More than 255 can't be pending */
271 uint16 oe_trans; /* OtherEnd tran id, dongle->host */
272 uint16 rtn_trans; /* BMAC: callreturn Id dongle->host */
273 uint16 oe_rtn_trans; /* HIGH: received BMAC callreturn id */
275 rpc_buf_t *rtn_rpcbuf; /* RPC ID for return transaction */
277 rpc_state_t state;
278 uint reset; /* # of resets */
279 uint cnt_xidooo; /* transactionID out of order */
280 uint cnt_rx_drop_hole; /* number of rcp calls dropped due to reorder overflow */
281 uint cnt_reorder_overflow; /* number of time the reorder queue overflowed,
282 * causing drops
284 uint32 version;
286 bool wait_init;
287 bool wait_return;
289 rpc_osl_t *rpc_osh;
291 #ifdef BCMDBG_RPC
292 struct rpc_pktlog *send_log;
293 uint16 send_log_idx; /* Point to the next slot to fill-in */
294 uint16 send_log_num; /* Number of entries */
296 struct rpc_pktlog *recv_log;
297 uint16 recv_log_idx; /* Point to the next slot to fill-in */
298 uint16 recv_log_num; /* Number of entries */
299 #endif /* BCMDBG_RPC */
301 #ifdef WLC_HIGH
302 #if defined(NDIS)
303 NDIS_SPIN_LOCK reorder_lock; /* TO RAISE the IRQ */
304 bool reorder_lock_alloced;
305 bool down_oe_pending;
306 bool down_pending;
307 #elif defined(linux)
308 spinlock_t reorder_lock;
309 ulong reorder_flags;
310 #endif /* NDIS */
311 #endif /* WLC_HIGH */
312 /* Protect against rx reordering */
313 rpc_buf_t *reorder_pktq;
314 uint reorder_depth;
315 uint reorder_depth_max;
316 uint chipid;
317 uint suspend_enable;
320 static void bcm_rpc_tx_complete(void *ctx, rpc_buf_t *buf, int status);
321 static void bcm_rpc_buf_recv(void *context, rpc_buf_t *);
322 static void bcm_rpc_process_reorder_queue(rpc_info_t *rpci);
323 static bool bcm_rpc_buf_recv_inorder(rpc_info_t *rpci, rpc_buf_t *rpc_buf, mbool hdr_invalid);
325 #ifdef WLC_HIGH
326 static rpc_buf_t *bcm_rpc_buf_recv_high(struct rpc_info *rpci, rpc_type_t type, rpc_acn_t acn,
327 rpc_buf_t *rpc_buf);
328 static int bcm_rpc_resume_oe(struct rpc_info *rpci);
329 #ifdef NDIS
330 static int bcm_rpc_hello(rpc_info_t *rpci);
331 #endif
332 #else
333 static rpc_buf_t *bcm_rpc_buf_recv_low(struct rpc_info *rpci, rpc_header_t header,
334 rpc_acn_t acn, rpc_buf_t *rpc_buf);
335 #endif /* WLC_HIGH */
336 static int bcm_rpc_up(rpc_info_t *rpci);
337 static uint16 bcm_rpc_reorder_next_xid(struct rpc_info *rpci);
340 #ifdef BCMDBG_RPC
341 static void bcm_rpc_pktlog_init(rpc_info_t *rpci);
342 static void bcm_rpc_pktlog_deinit(rpc_info_t *rpci);
343 static struct rpc_pktlog *bcm_rpc_prep_entry(struct rpc_info * rpci, rpc_buf_t *b,
344 struct rpc_pktlog *cur, bool tx);
345 static void bcm_rpc_add_entry_tx(struct rpc_info * rpci, struct rpc_pktlog *cur);
346 static void bcm_rpc_add_entry_rx(struct rpc_info * rpci, struct rpc_pktlog *cur);
347 #endif /* BCMDBG_RPC */
350 /* Header and componet retrieval functions */
351 static INLINE rpc_header_t
352 bcm_rpc_header(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
354 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
355 return ltoh32(*rpch);
358 static INLINE rpc_acn_t
359 bcm_rpc_mgn_acn(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
361 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
363 return (rpc_acn_t)ltoh32(*rpch);
366 static INLINE uint32
367 bcm_rpc_mgn_ver(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
369 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
371 return ltoh32(*rpch);
374 static INLINE rpc_rc_t
375 bcm_rpc_mgn_reason(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
377 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
378 return (rpc_rc_t)ltoh32(*rpch);
381 #ifdef WLC_HIGH
382 static uint32
383 bcm_rpc_mgn_chipid(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
385 rpc_header_t *rpch = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
387 return ltoh32(*rpch);
389 #endif /* WLC_HIGH */
391 static INLINE uint
392 bcm_rpc_hdr_xaction_validate(struct rpc_info *rpci, rpc_header_t header, uint32 *xaction,
393 bool verbose)
395 uint type;
397 type = RPC_HDR_TYPE(header);
398 *xaction = RPC_HDR_XACTION(header);
399 /* High driver does not check the return transaction to be in order */
400 if (type != RPC_TYPE_MGN &&
401 #ifdef WLC_HIGH
402 type != RPC_TYPE_RTN &&
403 #endif
404 *xaction != rpci->oe_trans) {
405 #ifdef WLC_HIGH
406 if (verbose) {
407 RPC_ERR(("Transaction mismatch: expected:0x%x got:0x%x type: %d\n",
408 rpci->oe_trans, *xaction, type));
410 #endif
411 return HDR_XACTION_MISMATCH;
414 return 0;
417 static INLINE uint
418 bcm_rpc_hdr_session_validate(struct rpc_info *rpci, rpc_header_t header)
420 #ifdef WLC_LOW
421 if (RPC_HDR_TYPE(header) == RPC_TYPE_MGN)
422 return 0;
423 #endif
425 if (rpci->session != RPC_HDR_SESSION(header))
426 return HDR_SESSION_MISMATCH;
427 return 0;
430 static INLINE uint
431 bcm_rpc_hdr_state_validate(struct rpc_info *rpci, rpc_header_t header)
433 uint type = RPC_HDR_TYPE(header);
435 if ((type == RPC_TYPE_UNKNOWN) || (type > RPC_TYPE_MGN))
436 return HDR_STATE_MISMATCH;
438 /* Everything allowed during this transition time */
439 if (rpci->state == ASLEEP)
440 return 0;
442 /* Only managment frames allowed before ESTABLISHED state */
443 if ((rpci->state != ESTABLISHED) && (type != RPC_TYPE_MGN)) {
444 RPC_ERR(("bcm_rpc_header_validate: State mismatch: state:%d type:%d\n",
445 rpci->state, type));
446 return HDR_STATE_MISMATCH;
449 return 0;
452 static INLINE mbool
453 bcm_rpc_hdr_validate(struct rpc_info *rpci, rpc_buf_t *rpc_buf, uint32 *xaction,
454 bool verbose)
456 /* First the state against the type */
457 mbool ret = 0;
458 rpc_header_t header = bcm_rpc_header(rpci, rpc_buf);
460 mboolset(ret, bcm_rpc_hdr_state_validate(rpci, header));
461 mboolset(ret, bcm_rpc_hdr_xaction_validate(rpci, header, xaction, verbose));
462 mboolset(ret, bcm_rpc_hdr_session_validate(rpci, header));
464 return ret;
467 struct rpc_info *
468 BCMATTACHFN(bcm_rpc_attach)(void *pdev, osl_t *osh, struct rpc_transport_info *rpc_th,
469 uint16 *devid)
471 struct rpc_info *rpci;
473 #ifndef WLC_HIGH
474 UNUSED_PARAMETER(devid);
475 #endif /* WLC_HIGH */
477 if ((rpci = (struct rpc_info *)MALLOC(osh, sizeof(struct rpc_info))) == NULL)
478 return NULL;
480 bzero(rpci, sizeof(struct rpc_info));
482 rpci->osh = osh;
483 rpci->pdev = pdev;
484 rpci->rpc_th = rpc_th;
485 rpci->session = 0x69;
487 /* initialize lock and queue */
488 rpci->rpc_osh = rpc_osl_attach(osh);
490 if (rpci->rpc_osh == NULL) {
491 RPC_ERR(("bcm_rpc_attach: osl attach failed\n"));
492 goto fail;
495 bcm_rpc_tp_register_cb(rpc_th, bcm_rpc_tx_complete, rpci,
496 bcm_rpc_buf_recv, rpci, rpci->rpc_osh);
498 rpci->version = EPI_VERSION_NUM;
500 rpci->rpc_tp_hdr_len = RPC_HDR_LEN + bcm_rpc_buf_tp_header_len(rpci->rpc_th);
502 #if defined(WLC_HIGH) && defined(NDIS)
503 bcm_rpc_hello(rpci);
504 #endif
506 if (bcm_rpc_up(rpci)) {
507 RPC_ERR(("bcm_rpc_attach: rpc_up failed\n"));
508 goto fail;
511 #ifdef WLC_HIGH
512 *devid = (uint16)rpci->chipid;
513 #endif
515 return rpci;
516 fail:
517 bcm_rpc_detach(rpci);
518 return NULL;
521 static uint16
522 bcm_rpc_reorder_next_xid(struct rpc_info *rpci)
524 rpc_buf_t * buf;
525 rpc_header_t header;
526 uint16 cur_xid = rpci->oe_trans;
527 uint16 min_xid = 0;
528 uint16 min_delta = 0xffff;
529 uint16 xid, delta;
531 ASSERT(rpci->rpc_th);
532 for (buf = rpci->reorder_pktq;
533 buf != NULL;
534 buf = bcm_rpc_buf_next_get(rpci->rpc_th, buf)) {
535 header = bcm_rpc_header(rpci, buf);
536 xid = RPC_HDR_XACTION(header);
537 delta = xid - cur_xid;
539 if (delta < min_delta) {
540 min_delta = delta;
541 min_xid = xid;
545 return min_xid;
548 void
549 BCMATTACHFN(bcm_rpc_detach)(struct rpc_info *rpci)
551 if (!rpci)
552 return;
554 bcm_rpc_down(rpci);
556 if (rpci->reorder_pktq) {
557 rpc_buf_t * node;
558 ASSERT(rpci->rpc_th);
559 while ((node = rpci->reorder_pktq)) {
560 rpci->reorder_pktq = bcm_rpc_buf_next_get(rpci->rpc_th,
561 node);
562 bcm_rpc_buf_next_set(rpci->rpc_th, node, NULL);
563 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
564 PKTFREE(rpci->osh, node, FALSE);
565 #else
566 bcm_rpc_tp_buf_free(rpci->rpc_th, node);
567 #endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
569 ASSERT(rpci->reorder_pktq == NULL);
570 rpci->reorder_depth = 0;
571 rpci->reorder_depth_max = 0;
574 #ifdef WLC_HIGH
575 #if defined(NDIS)
576 if (rpci->reorder_lock_alloced)
577 NdisFreeSpinLock(&rpcb->lock);
578 #endif
579 #endif /* WLC_HIGH */
581 /* rpc is going away, cut off registered cbs from rpc_tp layer */
582 bcm_rpc_tp_deregister_cb(rpci->rpc_th);
584 #ifdef WLC_LOW
585 bcm_rpc_tp_txflowctlcb_deinit(rpci->rpc_th);
586 #endif
588 if (rpci->rpc_osh)
589 rpc_osl_detach(rpci->rpc_osh);
591 MFREE(rpci->osh, rpci, sizeof(struct rpc_info));
592 rpci = NULL;
595 rpc_buf_t *
596 bcm_rpc_buf_alloc(struct rpc_info *rpci, int datalen)
598 rpc_buf_t *rpc_buf;
599 int len = datalen + RPC_HDR_LEN;
601 ASSERT(rpci->rpc_th);
602 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, len);
604 if (rpc_buf == NULL)
605 return NULL;
607 /* Reserve space for RPC Header */
608 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_HDR_LEN);
610 return rpc_buf;
613 uint
614 bcm_rpc_buf_header_len(struct rpc_info *rpci)
616 return rpci->rpc_tp_hdr_len;
619 void
620 bcm_rpc_buf_free(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
622 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
625 void
626 bcm_rpc_rxcb_init(struct rpc_info *rpci, void *ctx, rpc_dispatch_cb_t cb,
627 void *dnctx, rpc_down_cb_t dncb, rpc_resync_cb_t resync_cb, rpc_txdone_cb_t txdone_cb)
629 rpci->dispatchcb = cb;
630 rpci->ctx = ctx;
631 rpci->dnctx = dnctx;
632 rpci->dncb = dncb;
633 rpci->resync_cb = resync_cb;
634 rpci->txdone_cb = txdone_cb;
637 void
638 bcm_rpc_rxcb_deinit(struct rpc_info *rpci)
640 if (!rpci)
641 return;
643 rpci->dispatchcb = NULL;
644 rpci->ctx = NULL;
645 rpci->dnctx = NULL;
646 rpci->dncb = NULL;
647 rpci->resync_cb = NULL;
650 struct rpc_transport_info *
651 bcm_rpc_tp_get(struct rpc_info *rpci)
653 return rpci->rpc_th;
656 #ifdef BCM_RPC_TOC
657 static void
658 bcm_rpc_tp_tx_encap(struct rpc_info *rpci, rpc_buf_t *rpc_buf)
660 uint32 *tp_lenp;
661 uint32 rpc_len;
663 rpc_len = pkttotlen(rpci->osh, rpc_buf);
664 tp_lenp = (uint32*)bcm_rpc_buf_push(rpci->rpc_th, rpc_buf, BCM_RPC_TP_ENCAP_LEN);
665 *tp_lenp = htol32(rpc_len);
668 #endif
669 static void
670 rpc_header_prep(struct rpc_info *rpci, rpc_header_t *header, uint type, uint action)
672 uint32 v;
674 v = 0;
675 v |= (type << 24);
677 /* Mgmt action follows the header */
678 if (type == RPC_TYPE_MGN) {
679 *(header + 1) = htol32(action);
680 #ifdef WLC_HIGH
681 if (action == RPC_CONNECT || action == RPC_RESET || action == RPC_HELLO)
682 *(header + 2) = htol32(rpci->version);
683 #endif
685 #ifdef WLC_LOW
686 else if (type == RPC_TYPE_RTN)
687 v |= (rpci->rtn_trans);
688 #endif
689 else
690 v |= (rpci->trans);
692 v |= (rpci->session << 16);
694 *header = htol32(v);
696 RPC_TRACE(("rpc_header_prep: type:0x%x action: %d trans:0x%x\n",
697 type, action, rpci->trans));
700 #if defined(WLC_HIGH) && defined(NDIS)
702 static int
703 bcm_rpc_hello(struct rpc_info *rpci)
705 int ret = -1, count = 10;
706 rpc_buf_t *rpc_buf;
707 rpc_header_t *header;
709 RPC_OSL_LOCK(rpci->rpc_osh);
710 rpci->state = WAIT_HELLO;
711 rpci->wait_init = TRUE;
712 RPC_OSL_UNLOCK(rpci->rpc_osh);
714 while (ret && (count-- > 0)) {
716 /* Allocate a frame, prep it, send and wait */
717 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN
718 + RPC_CHIPID_LEN);
720 if (!rpc_buf)
721 break;
723 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
725 rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_HELLO);
727 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
728 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
729 return -1;
732 RPC_ERR(("%s: waiting to receive hello\n", __FUNCTION__));
734 RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
736 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
738 /* See if we timed out or actually initialized */
739 RPC_OSL_LOCK(rpci->rpc_osh);
740 if (rpci->state == HELLO_RECEIVED)
741 ret = 0;
742 RPC_OSL_UNLOCK(rpci->rpc_osh);
746 /* See if we timed out or actually initialized */
747 RPC_OSL_LOCK(rpci->rpc_osh);
748 rpci->wait_init = FALSE;
749 RPC_OSL_UNLOCK(rpci->rpc_osh);
751 return ret;
754 #endif /* WLC_HIGH && NDIS */
756 #ifdef WLC_HIGH
758 static int
759 bcm_rpc_up(struct rpc_info *rpci)
761 rpc_buf_t *rpc_buf;
762 rpc_header_t *header;
763 int ret;
765 /* Allocate a frame, prep it, send and wait */
766 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN
767 + RPC_CHIPID_LEN);
769 if (!rpc_buf)
770 return -1;
772 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
774 rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_CONNECT);
776 RPC_OSL_LOCK(rpci->rpc_osh);
777 rpci->state = WAIT_INITIALIZING;
778 rpci->wait_init = TRUE;
780 #if defined(NDIS)
781 if (!rpci->reorder_lock_alloced) {
782 NdisAllocateSpinLock(&rpci->reorder_lock);
783 rpci->reorder_lock_alloced = TRUE;
785 #elif defined(linux)
786 spin_lock_init(&rpci->reorder_lock);
787 #endif
789 RPC_OSL_UNLOCK(rpci->rpc_osh);
791 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
792 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
793 return -1;
796 /* Wait for state to change to established. The receive thread knows what to do */
797 RPC_ERR(("%s: waiting to be connected\n", __FUNCTION__));
799 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
801 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
803 if (ret < 0) {
804 rpci->wait_init = FALSE;
805 return ret;
808 /* See if we timed out or actually initialized */
809 RPC_OSL_LOCK(rpci->rpc_osh);
810 if (rpci->state == ESTABLISHED)
811 ret = 0;
812 else
813 ret = -1;
814 rpci->wait_init = FALSE;
815 RPC_OSL_UNLOCK(rpci->rpc_osh);
817 #ifdef BCMDBG_RPC
818 bcm_rpc_pktlog_init(rpci);
819 #endif
821 return ret;
825 bcm_rpc_is_asleep(struct rpc_info *rpci)
827 return (rpci->state == ASLEEP);
830 bool
831 bcm_rpc_sleep(struct rpc_info *rpci)
833 if (!rpci->suspend_enable)
834 return TRUE;
835 bcm_rpc_tp_sleep(rpci->rpc_th);
836 rpci->state = ASLEEP;
837 /* Ignore anything coming after this */
838 #ifdef NDIS
839 bcm_rpc_down(rpci);
840 #else
841 rpci->session++;
842 #endif
843 return TRUE;
846 #ifdef NDIS
848 bcm_rpc_shutdown(struct rpc_info *rpci)
850 int ret = -1;
852 if (rpci) {
853 ret = bcm_rpc_tp_shutdown(rpci->rpc_th);
854 rpci->state = DISCONNECTED;
856 return ret;
858 #endif /* NDIS */
860 bool
861 bcm_rpc_resume(struct rpc_info *rpci, int *fw_reload)
863 if (!rpci->suspend_enable)
864 return TRUE;
866 bcm_rpc_tp_resume(rpci->rpc_th, fw_reload);
867 #ifdef NDIS
868 if (fw_reload) {
869 rpci->trans = 0;
870 rpci->oe_trans = 0;
871 bcm_rpc_hello(rpci);
872 bcm_rpc_up(rpci);
874 else
875 rpci->state = ESTABLISHED;
876 #else
877 if (bcm_rpc_resume_oe(rpci) == 0) {
878 rpci->trans = 0;
879 rpci->oe_trans = 0;
881 #endif
882 RPC_TRACE(("bcm_rpc_resume done, state %d\n", rpci->state));
883 return (rpci->state == ESTABLISHED);
886 static int
887 bcm_rpc_resume_oe(struct rpc_info *rpci)
889 rpc_buf_t *rpc_buf;
890 rpc_header_t *header;
891 int ret;
893 /* Allocate a frame, prep it, send and wait */
894 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN + RPC_VER_LEN);
896 if (!rpc_buf)
897 return -1;
899 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
901 rpc_header_prep(rpci, header, RPC_TYPE_MGN, RPC_RESET);
903 RPC_OSL_LOCK(rpci->rpc_osh);
904 rpci->state = WAIT_RESUME;
905 rpci->wait_init = TRUE;
906 RPC_OSL_UNLOCK(rpci->rpc_osh);
908 /* Don't care for the return value */
909 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
910 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
911 return -1;
914 /* Wait for state to change to established. The receive thread knows what to do */
915 RPC_ERR(("%s: waiting to be resumed\n", __FUNCTION__));
917 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_INIT_WAIT_TIMEOUT_MSEC, NULL);
919 RPC_TRACE(("%s: wait done, ret = %d\n", __FUNCTION__, ret));
921 if (ret < 0) {
922 rpci->wait_init = FALSE;
923 return ret;
926 /* See if we timed out or actually initialized */
927 RPC_OSL_LOCK(rpci->rpc_osh);
928 if (rpci->state == ESTABLISHED)
929 ret = 0;
930 else
931 ret = -1;
932 rpci->wait_init = FALSE;
933 RPC_OSL_UNLOCK(rpci->rpc_osh);
935 return ret;
937 #else
938 static int
939 bcm_rpc_up(struct rpc_info *rpci)
941 rpci->state = WAIT_INITIALIZING;
943 #ifdef BCMDBG_RPC
944 bcm_rpc_pktlog_init(rpci);
945 hndrte_cons_addcmd("rpcpktdump", bcm_rpc_dump_pktlog_low, (uint32)rpci);
946 #endif
947 hndrte_cons_addcmd("rpcdump", bcm_rpc_dump_state, (uint32)rpci);
948 return 0;
951 static int
952 bcm_rpc_connect_resp(struct rpc_info *rpci, rpc_acn_t acn, uint32 reason)
954 rpc_buf_t *rpc_buf;
955 rpc_header_t *header;
957 /* Allocate a frame, prep it, send and wait */
958 rpc_buf = bcm_rpc_tp_buf_alloc(rpci->rpc_th, RPC_HDR_LEN + RPC_ACN_LEN +
959 RPC_RC_LEN + RPC_VER_LEN + RPC_CHIPID_LEN);
960 if (!rpc_buf) {
961 RPC_ERR(("%s: bcm_rpc_tp_buf_alloc() failed\n", __FUNCTION__));
962 return FALSE;
965 header = (rpc_header_t *)bcm_rpc_buf_data(rpci->rpc_th, rpc_buf);
967 rpc_header_prep(rpci, header, RPC_TYPE_MGN, acn);
969 *(header + 2) = ltoh32(rpci->version);
970 *(header + 3) = ltoh32(reason);
971 #ifdef BCMCHIPID
972 *(header + 4) = ltoh32(BCMCHIPID);
973 #endif /* BCMCHIPID */
974 if (bcm_rpc_tp_buf_send(rpci->rpc_th, rpc_buf)) {
975 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
976 return FALSE;
979 return TRUE;
981 #endif /* WLC_HIGH */
983 void
984 bcm_rpc_watchdog(struct rpc_info *rpci)
986 static uint32 uptime = 0;
987 #ifdef WLC_LOW
988 /* rpc watchdog is called every 10 msec in the low driver */
989 static uint32 count = 0;
990 count++;
991 if (count % 100 == 0) {
992 count = 0;
993 uptime++;
994 if (uptime % 60 == 0)
995 RPC_TRACE(("rpc uptime %d minutes\n", (uptime / 60)));
997 #else
998 uptime++;
999 if (uptime % 60 == 0) {
1000 RPC_TRACE(("rpc uptime %d minutes\n", (uptime / 60)));
1002 #endif
1004 bcm_rpc_tp_watchdog(rpci->rpc_th);
1007 void
1008 bcm_rpc_down(struct rpc_info *rpci)
1010 RPC_ERR(("%s\n", __FUNCTION__));
1012 #ifdef BCMDBG_RPC
1013 bcm_rpc_pktlog_deinit(rpci);
1014 #endif
1016 RPC_OSL_LOCK(rpci->rpc_osh);
1017 if (rpci->state != DISCONNECTED && rpci->state != ASLEEP) {
1018 #ifdef WLC_HIGH
1019 bcm_rpc_fatal_dump(rpci);
1020 #else
1021 bcm_rpc_dump_state((uint32)rpci, 0, NULL);
1022 #endif
1023 rpci->state = DISCONNECTED;
1024 RPC_OSL_UNLOCK(rpci->rpc_osh);
1025 if (rpci->dncb)
1026 (rpci->dncb)(rpci->dnctx);
1027 bcm_rpc_tp_down(rpci->rpc_th);
1028 return;
1030 RPC_OSL_UNLOCK(rpci->rpc_osh);
1033 #if defined(USBAP) && (defined(WLC_HIGH) && !defined(WLC_LOW))
1034 /* For USBAP external image, reboot system upon RPC error instead of just turning RPC down */
1035 #include <siutils.h>
1036 void
1037 bcm_rpc_err_down(struct rpc_info *rpci)
1039 si_t *sih = si_kattach(SI_OSH);
1041 RPC_ERR(("%s: rebooting system due to RPC error.\n", __FUNCTION__));
1042 si_watchdog(sih, 1);
1044 #else
1045 #define bcm_rpc_err_down bcm_rpc_down
1046 #endif
1048 static void
1049 bcm_rpc_tx_complete(void *ctx, rpc_buf_t *buf, int status)
1051 struct rpc_info *rpci = (struct rpc_info *)ctx;
1053 RPC_TRACE(("%s: status 0x%x\n", __FUNCTION__, status));
1055 ASSERT(rpci && rpci->rpc_th);
1057 if (buf) {
1058 if (rpci->txdone_cb) {
1059 /* !!must pull off the rpc/tp header after dbus is done for wl driver */
1060 rpci->txdone_cb(rpci->ctx, buf);
1061 } else
1062 bcm_rpc_tp_buf_free(rpci->rpc_th, buf);
1067 bcm_rpc_call(struct rpc_info *rpci, rpc_buf_t *b)
1069 rpc_header_t *header;
1070 int err = 0;
1071 #ifdef BCMDBG_RPC
1072 struct rpc_pktlog cur;
1073 #endif
1075 RPC_TRACE(("%s:\n", __FUNCTION__));
1077 RPC_OSL_LOCK(rpci->rpc_osh);
1078 if (rpci->state != ESTABLISHED) {
1079 err = -1;
1080 RPC_OSL_UNLOCK(rpci->rpc_osh);
1081 #ifdef BCM_RPC_TOC
1082 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1083 rpc_header_prep(rpci, header, RPC_TYPE_DATA, 0);
1084 bcm_rpc_tp_tx_encap(rpci, b);
1086 if (rpci->txdone_cb) {
1087 /* !!must pull off the rpc/tp header after dbus is done for wl driver */
1088 rpci->txdone_cb(rpci->ctx, b);
1089 } else
1091 #endif
1092 bcm_rpc_buf_free(rpci, b);
1094 goto done;
1096 RPC_OSL_UNLOCK(rpci->rpc_osh);
1098 #ifdef BCMDBG_RPC
1099 /* Prepare the current log entry but add only if the TX was successful */
1100 /* This is done here before DATA pointer gets modified */
1101 if (RPC_PKTLOG_ON())
1102 bcm_rpc_prep_entry(rpci, b, &cur, TRUE);
1103 #endif
1105 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1107 rpc_header_prep(rpci, header, RPC_TYPE_DATA, 0);
1109 #ifdef BCMDBG_RPC
1110 if (RPC_PKTTRACE_ON()) {
1111 #ifdef BCMDBG
1112 prhex("RPC Call ", bcm_rpc_buf_data(rpci->rpc_th, b),
1113 bcm_rpc_buf_len_get(rpci->rpc_th, b));
1114 #endif
1116 #endif /* BCMDBG_RPC */
1118 if (bcm_rpc_tp_buf_send(rpci->rpc_th, b)) {
1119 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
1121 if (rpci->txdone_cb) {
1122 rpci->txdone_cb(rpci->ctx, b);
1123 } else
1124 bcm_rpc_tp_buf_free(rpci->rpc_th, b);
1126 bcm_rpc_err_down(rpci);
1127 return -1;
1130 RPC_OSL_LOCK(rpci->rpc_osh);
1131 rpci->trans++;
1132 RPC_OSL_UNLOCK(rpci->rpc_osh);
1134 #ifdef BCMDBG_RPC /* Since successful add the entry */
1135 if (RPC_PKTLOG_ON()) {
1136 bcm_rpc_add_entry_tx(rpci, &cur);
1138 #endif
1139 done:
1140 return err;
1143 #ifdef WLC_HIGH
1144 rpc_buf_t *
1145 bcm_rpc_call_with_return(struct rpc_info *rpci, rpc_buf_t *b)
1147 rpc_header_t *header;
1148 rpc_buf_t *retb = NULL;
1149 int ret;
1150 #ifdef BCMDBG_RPC
1151 struct rpc_pktlog cur;
1152 #endif
1153 bool timedout = FALSE;
1154 uint32 start_wait_time;
1156 RPC_TRACE(("%s:\n", __FUNCTION__));
1158 RPC_OSL_LOCK(rpci->rpc_osh);
1159 if (rpci->state != ESTABLISHED) {
1160 RPC_OSL_UNLOCK(rpci->rpc_osh);
1161 RPC_ERR(("%s: RPC call before ESTABLISHED state\n", __FUNCTION__));
1162 bcm_rpc_buf_free(rpci, b);
1163 return NULL;
1165 RPC_OSL_UNLOCK(rpci->rpc_osh);
1167 #ifdef BCMDBG_RPC
1168 /* Prepare the current log entry but add only if the TX was successful */
1169 /* This is done here before DATA pointer gets modified */
1170 if (RPC_PKTLOG_ON())
1171 bcm_rpc_prep_entry(rpci, b, &cur, TRUE);
1172 #endif
1174 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1176 rpc_header_prep(rpci, header, RPC_TYPE_RTN, 0);
1178 RPC_OSL_LOCK(rpci->rpc_osh);
1179 rpci->trans++;
1180 ASSERT(rpci->rtn_rpcbuf == NULL);
1181 rpci->wait_return = TRUE;
1182 RPC_OSL_UNLOCK(rpci->rpc_osh);
1184 /* Prep the return packet BEFORE sending the buffer and also within spinlock
1185 * within raised IRQ
1187 ret = bcm_rpc_tp_recv_rtn(rpci->rpc_th);
1188 if ((ret == BCME_RXFAIL) || (ret == BCME_NODEVICE)) {
1189 RPC_ERR(("%s: bcm_rpc_tp_recv_rtn() failed\n", __FUNCTION__));
1191 RPC_OSL_LOCK(rpci->rpc_osh);
1192 rpci->wait_return = FALSE;
1193 RPC_OSL_UNLOCK(rpci->rpc_osh);
1194 bcm_rpc_err_down(rpci);
1195 return NULL;
1199 #ifdef BCMDBG_RPC
1200 if (RPC_PKTTRACE_ON()) {
1201 #ifdef BCMDBG
1202 prhex("RPC Call With Return Buf", bcm_rpc_buf_data(rpci->rpc_th, b),
1203 bcm_rpc_buf_len_get(rpci->rpc_th, b));
1204 #endif
1206 #endif /* BCMDBG_RPC */
1208 if (bcm_rpc_tp_buf_send(rpci->rpc_th, b)) {
1209 RPC_ERR(("%s: bcm_rpc_bus_buf_send() failed\n", __FUNCTION__));
1211 RPC_OSL_LOCK(rpci->rpc_osh);
1212 rpci->wait_return = FALSE;
1213 RPC_OSL_UNLOCK(rpci->rpc_osh);
1214 bcm_rpc_err_down(rpci);
1215 return NULL;
1218 start_wait_time = OSL_SYSUPTIME();
1219 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_RETURN_WAIT_TIMEOUT_MSEC, &timedout);
1221 /* When RPC_OSL_WAIT returns because of signal pending. wait for the signal to
1222 * be processed
1224 RPC_OSL_LOCK(rpci->rpc_osh);
1225 while ((ret < 0) && ((OSL_SYSUPTIME() - start_wait_time) <= RPC_RETURN_WAIT_TIMEOUT_MSEC)) {
1226 RPC_OSL_UNLOCK(rpci->rpc_osh);
1227 ret = RPC_OSL_WAIT(rpci->rpc_osh, RPC_RETURN_WAIT_TIMEOUT_MSEC, &timedout);
1228 RPC_OSL_LOCK(rpci->rpc_osh);
1231 if (ret || timedout) {
1232 RPC_ERR(("%s: RPC call trans 0x%x return wait err %d timedout %d limit %d(ms)\n",
1233 __FUNCTION__, (rpci->trans - 1), ret, timedout,
1234 RPC_RETURN_WAIT_TIMEOUT_MSEC));
1235 rpci->wait_return = FALSE;
1236 RPC_OSL_UNLOCK(rpci->rpc_osh);
1237 #ifdef BCMDBG_RPC
1238 bcm_rpc_dump_pktlog_high(rpci);
1239 #endif
1240 bcm_rpc_err_down(rpci);
1241 return NULL;
1244 /* See if we timed out or actually initialized */
1245 ASSERT(rpci->rtn_rpcbuf != NULL); /* Make sure we've got the response */
1246 retb = rpci->rtn_rpcbuf;
1247 rpci->rtn_rpcbuf = NULL;
1248 rpci->wait_return = FALSE; /* Could have woken up by timeout */
1249 RPC_OSL_UNLOCK(rpci->rpc_osh);
1251 #ifdef BCMDBG_RPC /* Since successful add the entry */
1252 if (RPC_PKTLOG_ON())
1253 bcm_rpc_add_entry_tx(rpci, &cur);
1254 #endif
1256 return retb;
1258 #endif /* WLC_HIGH */
1260 #ifdef WLC_LOW
1262 bcm_rpc_call_return(struct rpc_info *rpci, rpc_buf_t *b)
1264 rpc_header_t *header;
1266 RPC_TRACE(("%s\n", __FUNCTION__));
1268 header = (rpc_header_t *)bcm_rpc_buf_push(rpci->rpc_th, b, RPC_HDR_LEN);
1270 rpc_header_prep(rpci, header, RPC_TYPE_RTN, 0);
1272 #ifdef BCMDBG_RPC
1273 if (RPC_PKTTRACE_ON()) {
1274 #ifdef BCMDBG
1275 prhex("RPC Call Return Buf", bcm_rpc_buf_data(rpci->rpc_th, b),
1276 bcm_rpc_buf_len_get(rpci->rpc_th, b));
1277 #endif
1279 #endif /* BCMDBG_RPC */
1281 /* If the TX fails, it's sender's responsibilty */
1282 if (bcm_rpc_tp_send_callreturn(rpci->rpc_th, b)) {
1283 RPC_ERR(("%s: bcm_rpc_tp_buf_send() call failed\n", __FUNCTION__));
1284 bcm_rpc_err_down(rpci);
1285 return -1;
1288 rpci->rtn_trans++;
1289 return 0;
1291 #endif /* WLC_LOW */
1293 /* This is expected to be called at DPC of the bus driver ? */
1294 static void
1295 bcm_rpc_buf_recv(void *context, rpc_buf_t *rpc_buf)
1297 uint xaction;
1298 struct rpc_info *rpci = (struct rpc_info *)context;
1299 mbool hdr_invalid = 0;
1300 ASSERT(rpci && rpci->rpc_th);
1302 RPC_TRACE(("%s:\n", __FUNCTION__));
1304 RPC_RO_LOCK(rpci);
1306 /* Only if the header itself checks out , and only xaction does not */
1307 hdr_invalid = bcm_rpc_hdr_validate(rpci, rpc_buf, &xaction, TRUE);
1309 if (mboolisset(hdr_invalid, HDR_XACTION_MISMATCH) &&
1310 !mboolisset(hdr_invalid, ~HDR_XACTION_MISMATCH)) {
1311 rpc_buf_t *node = rpci->reorder_pktq;
1312 rpci->cnt_xidooo++;
1313 rpci->reorder_depth++;
1314 if (rpci->reorder_depth > rpci->reorder_depth_max)
1315 rpci->reorder_depth_max = rpci->reorder_depth;
1317 /* Catch roll-over or retries */
1318 rpci->reorder_pktq = rpc_buf;
1320 if (node != NULL)
1321 bcm_rpc_buf_next_set(rpci->rpc_th, rpc_buf, node);
1323 /* if we have held too many packets, move past the hole */
1324 if (rpci->reorder_depth > BCM_RPC_REORDER_LIMIT) {
1325 uint16 next_xid = bcm_rpc_reorder_next_xid(rpci);
1327 RPC_ERR(("%s: reorder queue depth %d, skipping ID 0x%x to 0x%x\n",
1328 __FUNCTION__, rpci->reorder_depth,
1329 rpci->oe_trans, next_xid));
1330 rpci->cnt_reorder_overflow++;
1331 rpci->cnt_rx_drop_hole += (uint)(next_xid - rpci->oe_trans);
1332 rpci->oe_trans = next_xid;
1333 bcm_rpc_process_reorder_queue(rpci);
1336 goto done;
1339 /* Bail out if failed */
1340 if (!bcm_rpc_buf_recv_inorder(rpci, rpc_buf, hdr_invalid))
1341 goto done;
1343 /* see if we can make progress on the reorder backlog */
1344 bcm_rpc_process_reorder_queue(rpci);
1346 done:
1347 RPC_RO_UNLOCK(rpci);
1350 static void
1351 bcm_rpc_process_reorder_queue(rpc_info_t *rpci)
1353 uint32 xaction;
1354 mbool hdr_invalid = 0;
1356 while (rpci->reorder_pktq) {
1357 bool found = FALSE;
1358 rpc_buf_t *buf = rpci->reorder_pktq;
1359 rpc_buf_t *prev = rpci->reorder_pktq;
1360 while (buf != NULL) {
1361 rpc_buf_t *next = bcm_rpc_buf_next_get(rpci->rpc_th, buf);
1362 hdr_invalid = bcm_rpc_hdr_validate(rpci, buf, &xaction, FALSE);
1364 if (!mboolisset(hdr_invalid, HDR_XACTION_MISMATCH)) {
1365 bcm_rpc_buf_next_set(rpci->rpc_th, buf, NULL);
1367 if (buf == rpci->reorder_pktq)
1368 rpci->reorder_pktq = next;
1369 else
1370 bcm_rpc_buf_next_set(rpci->rpc_th, prev, next);
1371 rpci->reorder_depth--;
1373 /* Bail out if failed */
1374 if (!bcm_rpc_buf_recv_inorder(rpci, buf, hdr_invalid))
1375 return;
1377 buf = NULL;
1378 found = TRUE;
1379 } else {
1380 prev = buf;
1381 buf = next;
1385 /* bail if not found */
1386 if (!found)
1387 break;
1390 return;
1393 static bool
1394 bcm_rpc_buf_recv_inorder(rpc_info_t *rpci, rpc_buf_t *rpc_buf, mbool hdr_invalid)
1396 rpc_header_t header;
1397 rpc_acn_t acn = RPC_NULL;
1399 ASSERT(rpci && rpci->rpc_th);
1401 RPC_TRACE(("%s: got rpc_buf %p len %d data %p\n", __FUNCTION__,
1402 rpc_buf, bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf),
1403 bcm_rpc_buf_data(rpci->rpc_th, rpc_buf)));
1405 #ifdef BCMDBG_RPC
1406 if (RPC_PKTTRACE_ON()) {
1407 #ifdef BCMDBG
1408 prhex("RPC Rx Buf", bcm_rpc_buf_data(rpci->rpc_th, rpc_buf),
1409 bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf));
1410 #endif
1412 #endif /* BCMDBG_RPC */
1414 header = bcm_rpc_header(rpci, rpc_buf);
1416 RPC_OSL_LOCK(rpci->rpc_osh);
1418 if (hdr_invalid) {
1419 RPC_ERR(("%s: bcm_rpc_hdr_validate failed on 0x%08x 0x%x\n", __FUNCTION__,
1420 header, hdr_invalid));
1421 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1422 if (RPC_HDR_TYPE(header) != RPC_TYPE_RTN) {
1423 #if defined(USBAP)
1424 PKTFRMNATIVE(rpci->osh, rpc_buf);
1425 #endif
1426 PKTFREE(rpci->osh, rpc_buf, FALSE);
1428 #else
1429 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1430 #endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1431 RPC_OSL_UNLOCK(rpci->rpc_osh);
1432 return FALSE;
1435 RPC_TRACE(("%s state:0x%x type:0x%x session:0x%x xacn:0x%x\n", __FUNCTION__, rpci->state,
1436 RPC_HDR_TYPE(header), RPC_HDR_SESSION(header), RPC_HDR_XACTION(header)));
1438 if (bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf) > RPC_HDR_LEN)
1439 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_HDR_LEN);
1440 else {
1441 /* if the head packet ends with rpc_hdr, free and advance to next packet in chain */
1442 rpc_buf_t *next_p;
1444 ASSERT(bcm_rpc_buf_len_get(rpci->rpc_th, rpc_buf) == RPC_HDR_LEN);
1445 next_p = (rpc_buf_t*)PKTNEXT(rpci->osh, rpc_buf);
1447 RPC_TRACE(("%s: following pkt chain to pkt %p len %d\n", __FUNCTION__,
1448 next_p, bcm_rpc_buf_len_get(rpci->rpc_th, next_p)));
1450 PKTSETNEXT(rpci->osh, rpc_buf, NULL);
1451 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1452 rpc_buf = next_p;
1453 if (rpc_buf == NULL) {
1454 RPC_OSL_UNLOCK(rpci->rpc_osh);
1455 return FALSE;
1459 switch (RPC_HDR_TYPE(header)) {
1460 case RPC_TYPE_MGN:
1461 acn = bcm_rpc_mgn_acn(rpci, rpc_buf);
1462 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_ACN_LEN);
1463 RPC_TRACE(("Mgn: %x\n", acn));
1464 break;
1465 case RPC_TYPE_RTN:
1466 #ifdef WLC_HIGH
1467 rpci->oe_rtn_trans = RPC_HDR_XACTION(header) + 1;
1468 break;
1469 #endif
1470 case RPC_TYPE_DATA:
1471 rpci->oe_trans = RPC_HDR_XACTION(header) + 1;
1472 break;
1473 default:
1474 ASSERT(0);
1477 #ifdef WLC_HIGH
1478 rpc_buf = bcm_rpc_buf_recv_high(rpci, RPC_HDR_TYPE(header), acn, rpc_buf);
1479 #else
1480 rpc_buf = bcm_rpc_buf_recv_low(rpci, header, acn, rpc_buf);
1481 #endif
1482 RPC_OSL_UNLOCK(rpci->rpc_osh);
1484 if (rpc_buf)
1485 bcm_rpc_tp_buf_free(rpci->rpc_th, rpc_buf);
1486 return TRUE;
1489 #ifdef WLC_HIGH
1490 static void
1491 bcm_rpc_buf_recv_mgn_high(struct rpc_info *rpci, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1493 rpc_rc_t reason = RPC_RC_ACK;
1494 uint32 version = 0;
1496 RPC_TRACE(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__,
1497 acn, rpci->version, rpci->state, rpci->session));
1499 #ifndef NDIS
1500 if (acn == RPC_CONNECT_ACK || acn == RPC_CONNECT_NACK) {
1501 #else
1502 if (acn == RPC_HELLO || acn == RPC_CONNECT_ACK || acn == RPC_CONNECT_NACK) {
1503 #endif
1504 version = bcm_rpc_mgn_ver(rpci, rpc_buf);
1505 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_VER_LEN);
1507 reason = bcm_rpc_mgn_reason(rpci, rpc_buf);
1508 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_RC_LEN);
1510 RPC_ERR(("%s: Reason: %x Dongle Version: 0x%x\n", __FUNCTION__,
1511 reason, version));
1514 switch (acn) {
1515 #ifdef NDIS
1516 case RPC_HELLO:
1517 /* If the original thread has not given up,
1518 * then change the state and wake it up
1520 if (rpci->state == WAIT_HELLO) {
1521 rpci->state = HELLO_RECEIVED;
1523 RPC_ERR(("%s: Hello Received!\n", __FUNCTION__));
1524 if (rpci->wait_init)
1525 RPC_OSL_WAKE(rpci->rpc_osh);
1527 break;
1528 #endif
1529 case RPC_CONNECT_ACK:
1530 /* If the original thread has not given up,
1531 * then change the state and wake it up
1533 if (rpci->state != UNINITED) {
1534 rpci->state = ESTABLISHED;
1535 rpci->chipid = bcm_rpc_mgn_chipid(rpci, rpc_buf);
1536 bcm_rpc_buf_pull(rpci->rpc_th, rpc_buf, RPC_CHIPID_LEN);
1538 RPC_ERR(("%s: Connected!\n", __FUNCTION__));
1539 if (rpci->wait_init)
1540 RPC_OSL_WAKE(rpci->rpc_osh);
1542 ASSERT(reason != RPC_RC_VER_MISMATCH);
1543 break;
1545 case RPC_CONNECT_NACK:
1546 /* Connect failed. Just bail out by waking the thread */
1547 RPC_ERR(("%s: Connect failed !!!\n", __FUNCTION__));
1548 if (rpci->wait_init)
1549 RPC_OSL_WAKE(rpci->rpc_osh);
1550 break;
1552 case RPC_DOWN:
1553 RPC_OSL_UNLOCK(rpci->rpc_osh);
1554 #ifdef NDIS
1555 if ((KeGetCurrentIrql() == DISPATCH_LEVEL) &&
1556 (bcm_rpc_tp_tx_flowctl_get(rpci->rpc_th))) {
1557 RPC_TRACE(("%s: unsafe to down rpc, delay\n", __FUNCTION__));
1558 rpci->down_pending = TRUE;
1559 } else
1560 #endif
1562 bcm_rpc_down(rpci);
1565 RPC_OSL_LOCK(rpci->rpc_osh);
1566 break;
1568 default:
1569 ASSERT(0);
1570 break;
1574 static rpc_buf_t *
1575 bcm_rpc_buf_recv_high(struct rpc_info *rpci, rpc_type_t type, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1577 RPC_TRACE(("%s: acn %d\n", __FUNCTION__, acn));
1579 switch (type) {
1580 case RPC_TYPE_RTN:
1581 if (rpci->wait_return) {
1582 rpci->rtn_rpcbuf = rpc_buf;
1583 /* This buffer will be freed in bcm_rpc_tp_recv_rtn() */
1584 rpc_buf = NULL;
1585 RPC_OSL_WAKE(rpci->rpc_osh);
1586 } else if (rpci->state != DISCONNECTED)
1587 RPC_ERR(("%s: Received return buffer but no one waiting\n", __FUNCTION__));
1588 break;
1590 case RPC_TYPE_MGN:
1591 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1592 bcm_rpc_buf_recv_mgn_high(rpci, acn, rpc_buf);
1593 #if defined(USBAP)
1594 PKTFRMNATIVE(rpci->osh, rpc_buf);
1595 #endif
1596 PKTFREE(rpci->osh, rpc_buf, FALSE);
1597 rpc_buf = NULL;
1598 #else
1599 bcm_rpc_buf_recv_mgn_high(rpci, acn, rpc_buf);
1600 #endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC) */
1601 break;
1603 case RPC_TYPE_DATA:
1604 ASSERT(rpci->state == ESTABLISHED);
1605 #ifdef BCMDBG_RPC
1606 /* Prepare the current log entry but add only if the TX was successful */
1607 /* This is done here before DATA pointer gets modified */
1608 if (RPC_PKTLOG_ON()) {
1609 struct rpc_pktlog cur;
1610 bcm_rpc_prep_entry(rpci, rpc_buf, &cur, FALSE);
1611 bcm_rpc_add_entry_rx(rpci, &cur);
1613 #endif /* BCMDBG_RPC */
1614 if (rpci->dispatchcb) {
1615 #if !defined(USBAP)
1616 #if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) || defined(BCM_RPC_ROC)
1617 PKTTONATIVE(rpci->osh, rpc_buf);
1618 #endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY || BCM_RPC_ROC */
1619 #endif /* !USBAP */
1620 (rpci->dispatchcb)(rpci->ctx, rpc_buf);
1621 /* The dispatch routine will free the buffer */
1622 rpc_buf = NULL;
1623 } else {
1624 RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__));
1626 break;
1628 default:
1629 ASSERT(0);
1632 return (rpc_buf);
1634 #else
1635 static void
1636 bcm_rpc_buf_recv_mgn_low(struct rpc_info *rpci, uint8 session, rpc_acn_t acn, rpc_buf_t *rpc_buf)
1638 uint32 reason = 0;
1639 uint32 version = 0;
1641 RPC_TRACE(("%s: Recvd:%x Version: 0x%x\nState: %x Session:%d\n", __FUNCTION__,
1642 acn,
1643 rpci->version, rpci->state, rpci->session));
1645 if (acn == RPC_HELLO) {
1646 bcm_rpc_connect_resp(rpci, RPC_HELLO, RPC_RC_HELLO);
1647 } else if (acn == RPC_CONNECT || acn == RPC_RESET) {
1648 version = bcm_rpc_mgn_ver(rpci, rpc_buf);
1650 RPC_ERR(("%s: Host Version: 0x%x\n", __FUNCTION__, version));
1652 ASSERT(rpci->state != UNINITED);
1654 if (version != rpci->version) {
1655 RPC_ERR(("RPC Establish failed due to version mismatch\n"));
1656 RPC_ERR(("Expected: 0x%x Got: 0x%x\n", rpci->version, version));
1657 RPC_ERR(("Connect failed !!!\n"));
1659 rpci->state = WAIT_INITIALIZING;
1660 bcm_rpc_connect_resp(rpci, RPC_CONNECT_NACK, RPC_RC_VER_MISMATCH);
1661 return;
1664 /* When receiving CONNECT/RESET from HIGH, just
1665 * resync to the HIGH's session and reset the transactions
1667 if ((acn == RPC_CONNECT) && (rpci->state == ESTABLISHED))
1668 reason = RPC_RC_RECONNECT;
1670 rpci->session = session;
1672 if (bcm_rpc_connect_resp(rpci, RPC_CONNECT_ACK, reason)) {
1673 /* call the resync callback if already established */
1674 if ((acn == RPC_CONNECT) && (rpci->state == ESTABLISHED) &&
1675 (rpci->resync_cb)) {
1676 (rpci->resync_cb)(rpci->dnctx);
1678 rpci->state = ESTABLISHED;
1679 } else {
1680 RPC_ERR(("%s: RPC Establish failed !!!\n", __FUNCTION__));
1683 RPC_ERR(("Connected Session:%x!\n", rpci->session));
1684 rpci->oe_trans = 0;
1685 rpci->trans = 0;
1686 rpci->rtn_trans = 0;
1687 } else if (acn == RPC_DOWN) {
1688 bcm_rpc_down(rpci);
1692 static rpc_buf_t *
1693 bcm_rpc_buf_recv_low(struct rpc_info *rpci, rpc_header_t header,
1694 rpc_acn_t acn, rpc_buf_t *rpc_buf)
1696 switch (RPC_HDR_TYPE(header)) {
1697 case RPC_TYPE_MGN:
1698 bcm_rpc_buf_recv_mgn_low(rpci, RPC_HDR_SESSION(header), acn, rpc_buf);
1699 break;
1701 case RPC_TYPE_RTN:
1702 case RPC_TYPE_DATA:
1703 ASSERT(rpci->state == ESTABLISHED);
1704 #ifdef BCMDBG_RPC
1705 /* Prepare the current log entry but add only if the TX was successful */
1706 /* This is done here before DATA pointer gets modified */
1707 if (RPC_PKTLOG_ON()) {
1708 struct rpc_pktlog cur;
1709 bcm_rpc_prep_entry(rpci, rpc_buf, &cur, FALSE);
1710 bcm_rpc_add_entry_rx(rpci, &cur);
1712 #endif /* BCMDBG_RPC */
1714 if (rpci->dispatchcb) {
1715 (rpci->dispatchcb)(rpci->ctx, rpc_buf);
1716 rpc_buf = NULL;
1717 } else {
1718 RPC_ERR(("%s: no rpcq callback, drop the pkt\n", __FUNCTION__));
1719 ASSERT(0);
1721 break;
1723 default:
1724 ASSERT(0);
1727 return (rpc_buf);
1729 #endif /* WLC_HIGH */
1731 #ifdef BCMDBG_RPC
1732 static void
1733 bcm_rpc_pktlog_init(rpc_info_t *rpci)
1735 rpc_msg_level |= RPC_PKTLOG_VAL;
1737 if (RPC_PKTLOG_ON()) {
1738 if ((rpci->send_log = MALLOC(rpci->osh,
1739 sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE)) == NULL)
1740 goto err;
1741 bzero(rpci->send_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1742 if ((rpci->recv_log = MALLOC(rpci->osh,
1743 sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE)) == NULL)
1744 goto err;
1745 bzero(rpci->recv_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1746 return;
1748 RPC_ERR(("pktlog is on\n"));
1749 err:
1750 bcm_rpc_pktlog_deinit(rpci);
1753 static void
1754 bcm_rpc_pktlog_deinit(rpc_info_t *rpci)
1756 if (rpci->send_log) {
1757 MFREE(rpci->osh, rpci->send_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1758 rpci->send_log = NULL;
1760 if (rpci->recv_log) {
1761 MFREE(rpci->osh, rpci->recv_log, sizeof(struct rpc_pktlog) * RPC_PKTLOG_SIZE);
1762 rpci->recv_log = NULL;
1764 rpc_msg_level &= ~RPC_PKTLOG_VAL; /* Turn off logging on failure */
1767 static struct rpc_pktlog *
1768 bcm_rpc_prep_entry(struct rpc_info * rpci, rpc_buf_t *b, struct rpc_pktlog *cur, bool tx)
1770 bzero(cur, sizeof(struct rpc_pktlog));
1771 if (tx) {
1772 cur->trans = rpci->trans;
1773 } else {
1774 /* this function is called after match, so the oe_trans is already advanced */
1775 cur->trans = rpci->oe_trans - 1;
1777 cur->len = bcm_rpc_buf_len_get(rpci->rpc_th, b);
1778 bcopy(bcm_rpc_buf_data(rpci->rpc_th, b), cur->data, RPC_PKTLOG_DATASIZE);
1779 return cur;
1782 static void
1783 bcm_rpc_add_entry_tx(struct rpc_info * rpci, struct rpc_pktlog *cur)
1785 RPC_OSL_LOCK(rpci->rpc_osh);
1786 bcopy(cur, &rpci->send_log[rpci->send_log_idx], sizeof(struct rpc_pktlog));
1787 rpci->send_log_idx = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1789 if (rpci->send_log_num < RPC_PKTLOG_SIZE)
1790 rpci->send_log_num++;
1792 RPC_OSL_UNLOCK(rpci->rpc_osh);
1795 static void
1796 bcm_rpc_add_entry_rx(struct rpc_info * rpci, struct rpc_pktlog *cur)
1798 bcopy(cur, &rpci->recv_log[rpci->recv_log_idx], sizeof(struct rpc_pktlog));
1799 rpci->recv_log_idx = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1801 if (rpci->recv_log_num < RPC_PKTLOG_SIZE)
1802 rpci->recv_log_num++;
1804 #endif /* BCMDBG_RPC */
1806 #ifdef WLC_HIGH
1808 bcm_rpc_dump(rpc_info_t *rpci, struct bcmstrbuf *b)
1810 #ifdef BCMDBG
1812 bcm_bprintf(b, "\nHOST rpc dump:\n");
1813 RPC_OSL_LOCK(rpci->rpc_osh);
1814 bcm_bprintf(b, "Version: 0x%x State: %x\n", rpci->version, rpci->state);
1815 bcm_bprintf(b, "session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x oe_rtn_trans 0x%x\n",
1816 rpci->session, rpci->trans, rpci->oe_trans,
1817 rpci->rtn_trans, rpci->oe_rtn_trans);
1818 bcm_bprintf(b, "xactionID out of order %d\n", rpci->cnt_xidooo);
1819 bcm_bprintf(b, "reorder queue depth %u first ID 0x%x, max depth %u, tossthreshold %u\n",
1820 rpci->reorder_depth, bcm_rpc_reorder_next_xid(rpci), rpci->reorder_depth_max,
1821 BCM_RPC_REORDER_LIMIT);
1823 RPC_OSL_UNLOCK(rpci->rpc_osh);
1824 return bcm_rpc_tp_dump(rpci->rpc_th, b);
1825 #else
1826 return 0;
1827 #endif /* BCMDBG */
1831 bcm_rpc_pktlog_get(struct rpc_info *rpci, uint32 *buf, uint buf_size, bool send)
1833 int ret = -1;
1835 #ifdef BCMDBG_RPC
1836 int start, i, tot;
1838 /* Clear the whole buffer */
1839 bzero(buf, buf_size);
1840 RPC_OSL_LOCK(rpci->rpc_osh);
1841 if (send) {
1842 ret = rpci->send_log_num;
1843 if (ret < RPC_PKTLOG_SIZE)
1844 start = 0;
1845 else
1846 start = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1847 } else {
1848 ret = rpci->recv_log_num;
1849 if (ret < RPC_PKTLOG_SIZE)
1850 start = 0;
1851 else
1852 start = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1855 /* Return only first byte */
1856 if (buf_size < (uint) (ret * RPC_PKTLOG_RD_LEN)) {
1857 RPC_OSL_UNLOCK(rpci->rpc_osh);
1858 RPC_ERR(("%s buf too short\n", __FUNCTION__));
1859 return BCME_BUFTOOSHORT;
1862 if (ret == 0) {
1863 RPC_OSL_UNLOCK(rpci->rpc_osh);
1864 RPC_ERR(("%s no record\n", __FUNCTION__));
1865 return ret;
1868 tot = ret;
1869 for (i = 0; tot > 0; tot--, i++) {
1870 if (send) {
1871 buf[i*RPC_PKTLOG_RD_LEN] = rpci->send_log[start].data[0];
1872 buf[i*RPC_PKTLOG_RD_LEN+1] = rpci->send_log[start].trans;
1873 buf[i*RPC_PKTLOG_RD_LEN+2] = rpci->send_log[start].len;
1874 start++;
1875 } else {
1876 buf[i*RPC_PKTLOG_RD_LEN] = rpci->recv_log[start].data[0];
1877 buf[i*RPC_PKTLOG_RD_LEN+1] = rpci->recv_log[start].trans;
1878 buf[i*RPC_PKTLOG_RD_LEN+2] = rpci->recv_log[start].len;
1879 start++;
1881 start = (start % RPC_PKTLOG_SIZE);
1883 RPC_OSL_UNLOCK(rpci->rpc_osh);
1885 #endif /* BCMDBG_RPC */
1886 return ret;
1888 #endif /* WLC_HIGH */
1891 #ifdef BCMDBG_RPC
1893 static void
1894 _bcm_rpc_dump_pktlog(rpc_info_t *rpci)
1896 int ret = -1;
1897 int start, i;
1899 RPC_OSL_LOCK(rpci->rpc_osh);
1900 ret = rpci->send_log_num;
1901 if (ret == 0)
1902 goto done;
1904 if (ret < RPC_PKTLOG_SIZE)
1905 start = 0;
1906 else
1907 start = (rpci->send_log_idx + 1) % RPC_PKTLOG_SIZE;
1909 printf("send %d\n", ret);
1910 for (i = 0; ret > 0; ret--, i++) {
1911 printf("[%d] trans 0x%x len %d data 0x%x\n", i,
1912 rpci->send_log[start].trans,
1913 rpci->send_log[start].len,
1914 rpci->send_log[start].data[0]);
1915 start++;
1916 start = (start % RPC_PKTLOG_SIZE);
1919 ret = rpci->recv_log_num;
1920 if (ret == 0)
1921 goto done;
1923 if (ret < RPC_PKTLOG_SIZE)
1924 start = 0;
1925 else
1926 start = (rpci->recv_log_idx + 1) % RPC_PKTLOG_SIZE;
1928 printf("recv %d\n", ret);
1929 for (i = 0; ret > 0; ret--, i++) {
1930 printf("[%d] trans 0x%x len %d data 0x%x\n", i,
1931 rpci->recv_log[start].trans,
1932 rpci->recv_log[start].len,
1933 rpci->recv_log[start].data[0]);
1934 start++;
1935 start = (start % RPC_PKTLOG_SIZE);
1938 done:
1939 RPC_OSL_UNLOCK(rpci->rpc_osh);
1942 #ifdef WLC_HIGH
1943 static void
1944 bcm_rpc_dump_pktlog_high(rpc_info_t *rpci)
1946 printf("HOST rpc pktlog dump:\n");
1947 _bcm_rpc_dump_pktlog(rpci);
1950 #else
1952 static void
1953 bcm_rpc_dump_pktlog_low(uint32 arg, uint argc, char *argv[])
1955 rpc_info_t *rpci;
1957 rpci = (rpc_info_t *)(uintptr)arg;
1959 printf("DONGLE rpc pktlog dump:\n");
1960 _bcm_rpc_dump_pktlog(rpci);
1962 #endif /* WLC_HIGH */
1963 #endif /* BCMDBG_RPC */
1965 #ifdef WLC_LOW
1966 static void
1967 bcm_rpc_dump_state(uint32 arg, uint argc, char *argv[])
1968 #else
1969 static void
1970 bcm_rpc_fatal_dump(void *arg)
1971 #endif
1973 rpc_info_t *rpci = (rpc_info_t *)(uintptr)arg;
1974 printf("DONGLE rpc dump:\n");
1975 printf("Version: 0x%x State: %x\n", rpci->version, rpci->state);
1976 printf("session %d trans 0x%x oe_trans 0x%x rtn_trans 0x%x\n",
1977 rpci->session, rpci->trans, rpci->oe_trans,
1978 rpci->rtn_trans);
1979 printf("xactionID out of order %u reorder ovfl %u dropped hole %u\n",
1980 rpci->cnt_xidooo, rpci->cnt_reorder_overflow, rpci->cnt_rx_drop_hole);
1981 printf("reorder queue depth %u first ID 0x%x reorder_q_depth_max %d, tossthreshold %u\n",
1982 rpci->reorder_depth, bcm_rpc_reorder_next_xid(rpci), rpci->reorder_depth_max,
1983 BCM_RPC_REORDER_LIMIT);
1985 #ifdef WLC_LOW
1986 bcm_rpc_tp_dump(rpci->rpc_th);
1987 #endif
1990 void
1991 bcm_rpc_msglevel_set(struct rpc_info *rpci, uint16 msglevel, bool high)
1993 #ifdef WLC_HIGH
1994 ASSERT(high == TRUE);
1995 /* high 8 bits are for rpc, low 8 bits are for tp */
1996 rpc_msg_level = msglevel >> 8;
1997 bcm_rpc_tp_msglevel_set(rpci->rpc_th, (uint8)(msglevel & 0xff), TRUE);
1998 return;
1999 #else
2000 ASSERT(high == FALSE);
2001 /* high 8 bits are for rpc, low 8 bits are for tp */
2002 rpc_msg_level = msglevel >> 8;
2003 bcm_rpc_tp_msglevel_set(rpci->rpc_th, (uint8)(msglevel & 0xff), FALSE);
2004 return;
2005 #endif
2008 void
2009 bcm_rpc_dngl_suspend_enable_set(rpc_info_t *rpc, uint32 val)
2011 rpc->suspend_enable = val;
2014 void
2015 bcm_rpc_dngl_suspend_enable_get(rpc_info_t *rpc, uint32 *pval)
2017 *pval = rpc->suspend_enable;