4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * IO dispatcher for a shared memory channel driver.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * There is an important invariant condition which must be maintained per
22 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23 * which may cause timeouts and/or failure of the sync_wait_on_event
26 #include <linux/types.h>
29 #include <dspbridge/host_os.h>
30 #include <linux/workqueue.h>
32 /* ----------------------------------- DSP/BIOS Bridge */
33 #include <dspbridge/dbdefs.h>
36 #include <dspbridge/dbc.h>
39 #include <dspbridge/ntfy.h>
40 #include <dspbridge/sync.h>
42 /* Hardware Abstraction Layer */
47 #include <dspbridge/dspdeh.h>
48 #include <dspbridge/dspio.h>
49 #include <dspbridge/dspioctl.h>
50 #include <dspbridge/wdt.h>
52 #include <tiomap_io.h>
53 #include <_tiomap_pwr.h>
55 /* Platform Manager */
56 #include <dspbridge/cod.h>
57 #include <dspbridge/node.h>
58 #include <dspbridge/dev.h>
61 #include <dspbridge/rms_sh.h>
62 #include <dspbridge/mgr.h>
63 #include <dspbridge/drv.h>
65 #include "module_list.h"
68 #include <dspbridge/io_sm.h>
71 /* Defines, Data Structures, Typedefs */
72 #define OUTPUTNOTREADY 0xffff
73 #define NOTENABLED 0xffff /* Channel(s) not enabled */
75 #define EXTEND "_EXT_END"
77 #define SWAP_WORD(x) (x)
78 #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
80 #define MAX_PM_REQS 32
82 #define MMU_FAULT_HEAD1 0xa5a5a5a5
83 #define MMU_FAULT_HEAD2 0x96969696
85 #define MAX_MMU_DBGBUFF 10240
87 /* IO Manager: only one created per board */
89 /* These four fields must be the first fields in a io_mgr_ struct */
90 /* Bridge device context */
91 struct bridge_dev_context
*hbridge_context
;
92 /* Function interface to Bridge driver */
93 struct bridge_drv_interface
*intf_fxns
;
94 struct dev_object
*hdev_obj
; /* Device this board represents */
96 /* These fields initialized in bridge_io_create() */
97 struct chnl_mgr
*hchnl_mgr
;
98 struct shm
*shared_mem
; /* Shared Memory control */
99 u8
*input
; /* Address of input channel */
100 u8
*output
; /* Address of output channel */
101 struct msg_mgr
*hmsg_mgr
; /* Message manager */
102 /* Msg control for from DSP messages */
103 struct msg_ctrl
*msg_input_ctrl
;
104 /* Msg control for to DSP messages */
105 struct msg_ctrl
*msg_output_ctrl
;
106 u8
*msg_input
; /* Address of input messages */
107 u8
*msg_output
; /* Address of output messages */
108 u32 usm_buf_size
; /* Size of a shared memory I/O channel */
109 bool shared_irq
; /* Is this IRQ shared? */
110 u32 word_size
; /* Size in bytes of DSP word */
111 u16 intr_val
; /* Interrupt value */
112 /* Private extnd proc info; mmu setup */
113 struct mgr_processorextinfo ext_proc_info
;
114 struct cmm_object
*hcmm_mgr
; /* Shared Mem Mngr */
115 struct work_struct io_workq
; /* workqueue */
116 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
117 u32 ul_trace_buffer_begin
; /* Trace message start address */
118 u32 ul_trace_buffer_end
; /* Trace message end address */
119 u32 ul_trace_buffer_current
; /* Trace message current address */
120 u32 ul_gpp_read_pointer
; /* GPP Read pointer to Trace buffer */
126 u32 dpc_req
; /* Number of requested DPC's. */
127 u32 dpc_sched
; /* Number of executed DPC's. */
128 struct tasklet_struct dpc_tasklet
;
133 /* Function Prototypes */
134 static void io_dispatch_pm(struct io_mgr
*pio_mgr
);
135 static void notify_chnl_complete(struct chnl_object
*pchnl
,
136 struct chnl_irp
*chnl_packet_obj
);
137 static void input_chnl(struct io_mgr
*pio_mgr
, struct chnl_object
*pchnl
,
139 static void output_chnl(struct io_mgr
*pio_mgr
, struct chnl_object
*pchnl
,
141 static void input_msg(struct io_mgr
*pio_mgr
, struct msg_mgr
*hmsg_mgr
);
142 static void output_msg(struct io_mgr
*pio_mgr
, struct msg_mgr
*hmsg_mgr
);
143 static u32
find_ready_output(struct chnl_mgr
*chnl_mgr_obj
,
144 struct chnl_object
*pchnl
, u32 mask
);
146 /* Bus Addr (cached kernel) */
147 static int register_shm_segs(struct io_mgr
*hio_mgr
,
148 struct cod_manager
*cod_man
,
151 static inline void set_chnl_free(struct shm
*sm
, u32 chnl
)
153 sm
->host_free_mask
&= ~(1 << chnl
);
156 static inline void set_chnl_busy(struct shm
*sm
, u32 chnl
)
158 sm
->host_free_mask
|= 1 << chnl
;
163 * ======== bridge_io_create ========
164 * Create an IO manager object.
166 int bridge_io_create(struct io_mgr
**io_man
,
167 struct dev_object
*hdev_obj
,
168 const struct io_attrs
*mgr_attrts
)
171 struct io_mgr
*pio_mgr
= NULL
;
172 struct shm
*shared_mem
= NULL
;
173 struct bridge_dev_context
*hbridge_context
= NULL
;
174 struct cfg_devnode
*dev_node_obj
;
175 struct chnl_mgr
*hchnl_mgr
;
178 /* Check requirements */
179 if (!io_man
|| !mgr_attrts
|| mgr_attrts
->word_size
== 0) {
183 dev_get_chnl_mgr(hdev_obj
, &hchnl_mgr
);
184 if (!hchnl_mgr
|| hchnl_mgr
->hio_mgr
) {
189 * Message manager will be created when a file is loaded, since
190 * size of message buffer in shared memory is configurable in
193 dev_get_bridge_context(hdev_obj
, &hbridge_context
);
194 if (!hbridge_context
) {
198 dev_get_dev_type(hdev_obj
, &dev_type
);
200 * DSP shared memory area will get set properly when
201 * a program is loaded. They are unknown until a COFF file is
202 * loaded. I chose the value -1 because it was less likely to be
203 * a valid address than 0.
205 shared_mem
= (struct shm
*)-1;
207 /* Allocate IO manager object */
208 pio_mgr
= kzalloc(sizeof(struct io_mgr
), GFP_KERNEL
);
209 if (pio_mgr
== NULL
) {
214 /* Initialize chnl_mgr object */
215 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
216 pio_mgr
->pmsg
= NULL
;
218 pio_mgr
->hchnl_mgr
= hchnl_mgr
;
219 pio_mgr
->word_size
= mgr_attrts
->word_size
;
220 pio_mgr
->shared_mem
= shared_mem
;
222 if (dev_type
== DSP_UNIT
) {
223 /* Create an IO DPC */
224 tasklet_init(&pio_mgr
->dpc_tasklet
, io_dpc
, (u32
) pio_mgr
);
226 /* Initialize DPC counters */
227 pio_mgr
->dpc_req
= 0;
228 pio_mgr
->dpc_sched
= 0;
230 spin_lock_init(&pio_mgr
->dpc_lock
);
232 status
= dev_get_dev_node(hdev_obj
, &dev_node_obj
);
236 pio_mgr
->hbridge_context
= hbridge_context
;
237 pio_mgr
->shared_irq
= mgr_attrts
->irq_shared
;
246 bridge_io_destroy(pio_mgr
);
250 /* Return IO manager object to caller... */
251 hchnl_mgr
->hio_mgr
= pio_mgr
;
258 * ======== bridge_io_destroy ========
260 * Disable interrupts, destroy the IO manager.
262 int bridge_io_destroy(struct io_mgr
*hio_mgr
)
266 /* Free IO DPC object */
267 tasklet_kill(&hio_mgr
->dpc_tasklet
);
269 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
270 kfree(hio_mgr
->pmsg
);
273 /* Free this IO manager object */
283 * ======== bridge_io_on_loaded ========
285 * Called when a new program is loaded to get shared memory buffer
286 * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
287 * are in DSP address units.
289 int bridge_io_on_loaded(struct io_mgr
*hio_mgr
)
291 struct cod_manager
*cod_man
;
292 struct chnl_mgr
*hchnl_mgr
;
293 struct msg_mgr
*hmsg_mgr
;
295 u32 ul_shm_base_offset
;
297 u32 ul_shm_length
= -1;
298 u32 ul_mem_length
= -1;
301 u32 ul_msg_length
= -1;
312 /* DSP MMU setup table */
313 struct bridge_ioctl_extproc ae_proc
[BRDIOCTL_NUMOFMMUTLB
];
314 struct cfg_hostres
*host_res
;
315 struct bridge_dev_context
*pbridge_context
;
319 u32 ul_seg1_size
= 0;
325 u32 page_size
[] = { HW_PAGE_SIZE16MB
, HW_PAGE_SIZE1MB
,
326 HW_PAGE_SIZE64KB
, HW_PAGE_SIZE4KB
329 status
= dev_get_bridge_context(hio_mgr
->hdev_obj
, &pbridge_context
);
330 if (!pbridge_context
) {
335 host_res
= pbridge_context
->resources
;
340 status
= dev_get_cod_mgr(hio_mgr
->hdev_obj
, &cod_man
);
345 hchnl_mgr
= hio_mgr
->hchnl_mgr
;
346 /* The message manager is destroyed when the board is stopped. */
347 dev_get_msg_mgr(hio_mgr
->hdev_obj
, &hio_mgr
->hmsg_mgr
);
348 hmsg_mgr
= hio_mgr
->hmsg_mgr
;
349 if (!hchnl_mgr
|| !hmsg_mgr
) {
353 if (hio_mgr
->shared_mem
)
354 hio_mgr
->shared_mem
= NULL
;
356 /* Get start and length of channel part of shared memory */
357 status
= cod_get_sym_value(cod_man
, CHNL_SHARED_BUFFER_BASE_SYM
,
363 status
= cod_get_sym_value(cod_man
, CHNL_SHARED_BUFFER_LIMIT_SYM
,
369 if (ul_shm_limit
<= ul_shm_base
) {
373 /* Get total length in bytes */
374 ul_shm_length
= (ul_shm_limit
- ul_shm_base
+ 1) * hio_mgr
->word_size
;
375 /* Calculate size of a PROCCOPY shared memory region */
376 dev_dbg(bridge
, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
377 __func__
, (ul_shm_length
- sizeof(struct shm
)));
379 /* Get start and length of message part of shared memory */
380 status
= cod_get_sym_value(cod_man
, MSG_SHARED_BUFFER_BASE_SYM
,
383 status
= cod_get_sym_value(cod_man
, MSG_SHARED_BUFFER_LIMIT_SYM
,
386 if (ul_msg_limit
<= ul_msg_base
) {
390 * Length (bytes) of messaging part of shared
394 (ul_msg_limit
- ul_msg_base
+
395 1) * hio_mgr
->word_size
;
397 * Total length (bytes) of shared memory:
400 ul_mem_length
= ul_shm_length
+ ul_msg_length
;
409 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
411 cod_get_sym_value(cod_man
, DSP_TRACESEC_END
, &shm0_end
);
413 status
= cod_get_sym_value(cod_man
, SHM0_SHARED_END_SYM
,
421 cod_get_sym_value(cod_man
, DYNEXTBASE
, &ul_dyn_ext_base
);
426 status
= cod_get_sym_value(cod_man
, EXTEND
, &ul_ext_end
);
431 /* Get memory reserved in host resources */
432 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo
*)
433 &hio_mgr
->ext_proc_info
,
435 mgr_processorextinfo
),
438 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
440 ul_gpp_pa
= host_res
->dw_mem_phys
[1];
441 ul_gpp_va
= host_res
->dw_mem_base
[1];
442 /* This is the virtual uncached ioremapped address!!! */
443 /* Why can't we directly take the DSPVA from the symbols? */
444 ul_dsp_va
= hio_mgr
->ext_proc_info
.ty_tlb
[0].ul_dsp_virt
;
445 ul_seg_size
= (shm0_end
- ul_dsp_va
) * hio_mgr
->word_size
;
447 (ul_ext_end
- ul_dyn_ext_base
) * hio_mgr
->word_size
;
449 ul_seg1_size
= (ul_seg1_size
+ 0xFFF) & (~0xFFFUL
);
451 ul_seg_size
= (ul_seg_size
+ 0xFFFF) & (~0xFFFFUL
);
452 ul_pad_size
= UL_PAGE_ALIGN_SIZE
- ((ul_gpp_pa
+ ul_seg1_size
) %
454 if (ul_pad_size
== UL_PAGE_ALIGN_SIZE
)
457 dev_dbg(bridge
, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
458 "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
459 "ul_seg_size %x ul_seg1_size %x \n", __func__
,
460 ul_gpp_pa
, ul_gpp_va
, ul_dsp_va
, shm0_end
,
461 ul_dyn_ext_base
, ul_ext_end
, ul_seg_size
, ul_seg1_size
);
463 if ((ul_seg_size
+ ul_seg1_size
+ ul_pad_size
) >
464 host_res
->dw_mem_length
[1]) {
465 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
466 __func__
, host_res
->dw_mem_length
[1],
467 ul_seg_size
+ ul_seg1_size
+ ul_pad_size
);
475 va_curr
= ul_dyn_ext_base
* hio_mgr
->word_size
;
476 gpp_va_curr
= ul_gpp_va
;
477 num_bytes
= ul_seg1_size
;
480 * Try to fit into TLB entries. If not possible, push them to page
481 * tables. It is quite possible that if sections are not on
482 * bigger page boundary, we may end up making several small pages.
483 * So, push them onto page tables, if that is the case.
485 map_attrs
= 0x00000000;
486 map_attrs
= DSP_MAPLITTLEENDIAN
;
487 map_attrs
|= DSP_MAPPHYSICALADDR
;
488 map_attrs
|= DSP_MAPELEMSIZE32
;
489 map_attrs
|= DSP_MAPDONOTLOCK
;
493 * To find the max. page size with which both PA & VA are
496 all_bits
= pa_curr
| va_curr
;
497 dev_dbg(bridge
, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits
, pa_curr
, va_curr
,
500 for (i
= 0; i
< 4; i
++) {
501 if ((num_bytes
>= page_size
[i
]) && ((all_bits
&
506 pfn_brd_mem_map(hio_mgr
->hbridge_context
,
508 page_size
[i
], map_attrs
,
512 pa_curr
+= page_size
[i
];
513 va_curr
+= page_size
[i
];
514 gpp_va_curr
+= page_size
[i
];
515 num_bytes
-= page_size
[i
];
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
525 pa_curr
+= ul_pad_size
;
526 va_curr
+= ul_pad_size
;
527 gpp_va_curr
+= ul_pad_size
;
529 /* Configure the TLB entries for the next cacheable segment */
530 num_bytes
= ul_seg_size
;
531 va_curr
= ul_dsp_va
* hio_mgr
->word_size
;
534 * To find the max. page size with which both PA & VA are
537 all_bits
= pa_curr
| va_curr
;
538 dev_dbg(bridge
, "all_bits for Seg1 %x, pa_curr %x, "
539 "va_curr %x, num_bytes %x\n", all_bits
, pa_curr
,
541 for (i
= 0; i
< 4; i
++) {
542 if (!(num_bytes
>= page_size
[i
]) ||
543 !((all_bits
& (page_size
[i
] - 1)) == 0))
545 if (ndx
< MAX_LOCK_TLB_ENTRIES
) {
547 * This is the physical address written to
550 ae_proc
[ndx
].ul_gpp_pa
= pa_curr
;
552 * This is the virtual uncached ioremapped
555 ae_proc
[ndx
].ul_gpp_va
= gpp_va_curr
;
556 ae_proc
[ndx
].ul_dsp_va
=
557 va_curr
/ hio_mgr
->word_size
;
558 ae_proc
[ndx
].ul_size
= page_size
[i
];
559 ae_proc
[ndx
].endianism
= HW_LITTLE_ENDIAN
;
560 ae_proc
[ndx
].elem_size
= HW_ELEM_SIZE16BIT
;
561 ae_proc
[ndx
].mixed_mode
= HW_MMU_CPUES
;
562 dev_dbg(bridge
, "shm MMU TLB entry PA %x"
563 " VA %x DSP_VA %x Size %x\n",
564 ae_proc
[ndx
].ul_gpp_pa
,
565 ae_proc
[ndx
].ul_gpp_va
,
566 ae_proc
[ndx
].ul_dsp_va
*
567 hio_mgr
->word_size
, page_size
[i
]);
572 pfn_brd_mem_map(hio_mgr
->hbridge_context
,
574 page_size
[i
], map_attrs
,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc
[ndx
].ul_gpp_pa
,
580 ae_proc
[ndx
].ul_gpp_va
,
581 ae_proc
[ndx
].ul_dsp_va
*
582 hio_mgr
->word_size
, page_size
[i
]);
586 pa_curr
+= page_size
[i
];
587 va_curr
+= page_size
[i
];
588 gpp_va_curr
+= page_size
[i
];
589 num_bytes
-= page_size
[i
];
591 * Don't try smaller sizes. Hopefully we have reached
592 * an address aligned to a bigger page size.
599 * Copy remaining entries from CDB. All entries are 1 MB and
600 * should not conflict with shm entries on MPU or DSP side.
602 for (i
= 3; i
< 7 && ndx
< BRDIOCTL_NUMOFMMUTLB
; i
++) {
603 if (hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_gpp_phys
== 0)
606 if ((hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_gpp_phys
>
608 && hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_gpp_phys
<=
609 ul_gpp_pa
+ ul_seg_size
)
610 || (hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_dsp_virt
>
611 ul_dsp_va
- 0x100000 / hio_mgr
->word_size
612 && hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_dsp_virt
<=
613 ul_dsp_va
+ ul_seg_size
/ hio_mgr
->word_size
)) {
615 "CDB MMU entry %d conflicts with "
616 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
617 "GppPa %x, DspVa %x, Bytes %x.\n", i
,
618 hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_gpp_phys
,
619 hio_mgr
->ext_proc_info
.ty_tlb
[i
].ul_dsp_virt
,
620 ul_gpp_pa
, ul_dsp_va
, ul_seg_size
);
623 if (ndx
< MAX_LOCK_TLB_ENTRIES
) {
624 ae_proc
[ndx
].ul_dsp_va
=
625 hio_mgr
->ext_proc_info
.ty_tlb
[i
].
627 ae_proc
[ndx
].ul_gpp_pa
=
628 hio_mgr
->ext_proc_info
.ty_tlb
[i
].
630 ae_proc
[ndx
].ul_gpp_va
= 0;
632 ae_proc
[ndx
].ul_size
= 0x100000;
633 dev_dbg(bridge
, "shm MMU entry PA %x "
634 "DSP_VA 0x%x\n", ae_proc
[ndx
].ul_gpp_pa
,
635 ae_proc
[ndx
].ul_dsp_va
);
638 status
= hio_mgr
->intf_fxns
->pfn_brd_mem_map
639 (hio_mgr
->hbridge_context
,
640 hio_mgr
->ext_proc_info
.ty_tlb
[i
].
642 hio_mgr
->ext_proc_info
.ty_tlb
[i
].
643 ul_dsp_virt
, 0x100000, map_attrs
,
651 map_attrs
= 0x00000000;
652 map_attrs
= DSP_MAPLITTLEENDIAN
;
653 map_attrs
|= DSP_MAPPHYSICALADDR
;
654 map_attrs
|= DSP_MAPELEMSIZE32
;
655 map_attrs
|= DSP_MAPDONOTLOCK
;
657 /* Map the L4 peripherals */
659 while (l4_peripheral_table
[i
].phys_addr
) {
660 status
= hio_mgr
->intf_fxns
->pfn_brd_mem_map
661 (hio_mgr
->hbridge_context
, l4_peripheral_table
[i
].phys_addr
,
662 l4_peripheral_table
[i
].dsp_virt_addr
, HW_PAGE_SIZE4KB
,
669 for (i
= ndx
; i
< BRDIOCTL_NUMOFMMUTLB
; i
++) {
670 ae_proc
[i
].ul_dsp_va
= 0;
671 ae_proc
[i
].ul_gpp_pa
= 0;
672 ae_proc
[i
].ul_gpp_va
= 0;
673 ae_proc
[i
].ul_size
= 0;
676 * Set the shm physical address entry (grayed out in CDB file)
677 * to the virtual uncached ioremapped address of shm reserved
680 hio_mgr
->ext_proc_info
.ty_tlb
[0].ul_gpp_phys
=
681 (ul_gpp_va
+ ul_seg1_size
+ ul_pad_size
);
684 * Need shm Phys addr. IO supports only one DSP for now:
687 if (!hio_mgr
->ext_proc_info
.ty_tlb
[0].ul_gpp_phys
|| num_procs
!= 1) {
691 if (ae_proc
[0].ul_dsp_va
> ul_shm_base
) {
695 /* ul_shm_base may not be at ul_dsp_va address */
696 ul_shm_base_offset
= (ul_shm_base
- ae_proc
[0].ul_dsp_va
) *
699 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
700 * bridge_brd_start() the MMU will be re-programed with MMU
701 * DSPVa-GPPPa pair info while DSP is in a known
706 hio_mgr
->intf_fxns
->pfn_dev_cntrl(hio_mgr
->hbridge_context
,
707 BRDIOCTL_SETMMUCONFIG
,
711 ul_shm_base
= hio_mgr
->ext_proc_info
.ty_tlb
[0].ul_gpp_phys
;
712 ul_shm_base
+= ul_shm_base_offset
;
713 ul_shm_base
= (u32
) MEM_LINEAR_ADDRESS((void *)ul_shm_base
,
715 if (ul_shm_base
== 0) {
721 register_shm_segs(hio_mgr
, cod_man
, ae_proc
[0].ul_gpp_pa
);
724 hio_mgr
->shared_mem
= (struct shm
*)ul_shm_base
;
725 hio_mgr
->input
= (u8
*) hio_mgr
->shared_mem
+ sizeof(struct shm
);
726 hio_mgr
->output
= hio_mgr
->input
+ (ul_shm_length
-
727 sizeof(struct shm
)) / 2;
728 hio_mgr
->usm_buf_size
= hio_mgr
->output
- hio_mgr
->input
;
730 /* Set up Shared memory addresses for messaging. */
731 hio_mgr
->msg_input_ctrl
= (struct msg_ctrl
*)((u8
*) hio_mgr
->shared_mem
734 (u8
*) hio_mgr
->msg_input_ctrl
+ sizeof(struct msg_ctrl
);
735 hio_mgr
->msg_output_ctrl
=
736 (struct msg_ctrl
*)((u8
*) hio_mgr
->msg_input_ctrl
+
738 hio_mgr
->msg_output
=
739 (u8
*) hio_mgr
->msg_output_ctrl
+ sizeof(struct msg_ctrl
);
741 ((u8
*) hio_mgr
->msg_output_ctrl
- hio_mgr
->msg_input
)
742 / sizeof(struct msg_dspmsg
);
743 dev_dbg(bridge
, "IO MGR shm details: shared_mem %p, input %p, "
744 "output %p, msg_input_ctrl %p, msg_input %p, "
745 "msg_output_ctrl %p, msg_output %p\n",
746 (u8
*) hio_mgr
->shared_mem
, hio_mgr
->input
,
747 hio_mgr
->output
, (u8
*) hio_mgr
->msg_input_ctrl
,
748 hio_mgr
->msg_input
, (u8
*) hio_mgr
->msg_output_ctrl
,
749 hio_mgr
->msg_output
);
750 dev_dbg(bridge
, "(proc) Mas msgs in shared memory: 0x%x\n",
752 memset((void *)hio_mgr
->shared_mem
, 0, sizeof(struct shm
));
754 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
755 /* Get the start address of trace buffer */
756 status
= cod_get_sym_value(cod_man
, SYS_PUTCBEG
,
757 &hio_mgr
->ul_trace_buffer_begin
);
763 hio_mgr
->ul_gpp_read_pointer
= hio_mgr
->ul_trace_buffer_begin
=
764 (ul_gpp_va
+ ul_seg1_size
+ ul_pad_size
) +
765 (hio_mgr
->ul_trace_buffer_begin
- ul_dsp_va
);
766 /* Get the end address of trace buffer */
767 status
= cod_get_sym_value(cod_man
, SYS_PUTCEND
,
768 &hio_mgr
->ul_trace_buffer_end
);
773 hio_mgr
->ul_trace_buffer_end
=
774 (ul_gpp_va
+ ul_seg1_size
+ ul_pad_size
) +
775 (hio_mgr
->ul_trace_buffer_end
- ul_dsp_va
);
776 /* Get the current address of DSP write pointer */
777 status
= cod_get_sym_value(cod_man
, BRIDGE_SYS_PUTC_CURRENT
,
778 &hio_mgr
->ul_trace_buffer_current
);
783 hio_mgr
->ul_trace_buffer_current
=
784 (ul_gpp_va
+ ul_seg1_size
+ ul_pad_size
) +
785 (hio_mgr
->ul_trace_buffer_current
- ul_dsp_va
);
786 /* Calculate the size of trace buffer */
787 kfree(hio_mgr
->pmsg
);
788 hio_mgr
->pmsg
= kmalloc(((hio_mgr
->ul_trace_buffer_end
-
789 hio_mgr
->ul_trace_buffer_begin
) *
790 hio_mgr
->word_size
) + 2, GFP_KERNEL
);
794 hio_mgr
->ul_dsp_va
= ul_dsp_va
;
795 hio_mgr
->ul_gpp_va
= (ul_gpp_va
+ ul_seg1_size
+ ul_pad_size
);
803 * ======== io_buf_size ========
804 * Size of shared memory I/O channel.
806 u32
io_buf_size(struct io_mgr
*hio_mgr
)
809 return hio_mgr
->usm_buf_size
;
815 * ======== io_cancel_chnl ========
816 * Cancel IO on a given PCPY channel.
818 void io_cancel_chnl(struct io_mgr
*hio_mgr
, u32 chnl
)
820 struct io_mgr
*pio_mgr
= (struct io_mgr
*)hio_mgr
;
825 sm
= hio_mgr
->shared_mem
;
827 /* Inform DSP that we have no more buffers on this channel */
828 set_chnl_free(sm
, chnl
);
830 sm_interrupt_dsp(pio_mgr
->hbridge_context
, MBX_PCPY_CLASS
);
837 * ======== io_dispatch_pm ========
838 * Performs I/O dispatch on PM related messages from DSP
840 static void io_dispatch_pm(struct io_mgr
*pio_mgr
)
845 /* Perform Power message processing here */
846 parg
[0] = pio_mgr
->intr_val
;
848 /* Send the command to the Bridge clk/pwr manager to handle */
849 if (parg
[0] == MBX_PM_HIBERNATE_EN
) {
850 dev_dbg(bridge
, "PM: Hibernate command\n");
851 status
= pio_mgr
->intf_fxns
->
852 pfn_dev_cntrl(pio_mgr
->hbridge_context
,
853 BRDIOCTL_PWR_HIBERNATE
, parg
);
855 pr_err("%s: hibernate cmd failed 0x%x\n",
857 } else if (parg
[0] == MBX_PM_OPP_REQ
) {
858 parg
[1] = pio_mgr
->shared_mem
->opp_request
.rqst_opp_pt
;
859 dev_dbg(bridge
, "PM: Requested OPP = 0x%x\n", parg
[1]);
860 status
= pio_mgr
->intf_fxns
->
861 pfn_dev_cntrl(pio_mgr
->hbridge_context
,
862 BRDIOCTL_CONSTRAINT_REQUEST
, parg
);
864 dev_dbg(bridge
, "PM: Failed to set constraint "
865 "= 0x%x\n", parg
[1]);
867 dev_dbg(bridge
, "PM: clk control value of msg = 0x%x\n",
869 status
= pio_mgr
->intf_fxns
->
870 pfn_dev_cntrl(pio_mgr
->hbridge_context
,
871 BRDIOCTL_CLK_CTRL
, parg
);
873 dev_dbg(bridge
, "PM: Failed to ctrl the DSP clk"
879 * ======== io_dpc ========
880 * Deferred procedure call for shared memory channel driver ISR. Carries
881 * out the dispatch of I/O as a non-preemptible event.It can only be
882 * pre-empted by an ISR.
884 void io_dpc(unsigned long ref_data
)
886 struct io_mgr
*pio_mgr
= (struct io_mgr
*)ref_data
;
887 struct chnl_mgr
*chnl_mgr_obj
;
888 struct msg_mgr
*msg_mgr_obj
;
889 struct deh_mgr
*hdeh_mgr
;
895 chnl_mgr_obj
= pio_mgr
->hchnl_mgr
;
896 dev_get_msg_mgr(pio_mgr
->hdev_obj
, &msg_mgr_obj
);
897 dev_get_deh_mgr(pio_mgr
->hdev_obj
, &hdeh_mgr
);
901 requested
= pio_mgr
->dpc_req
;
902 serviced
= pio_mgr
->dpc_sched
;
904 if (serviced
== requested
)
907 /* Process pending DPC's */
909 /* Check value of interrupt reg to ensure it's a valid error */
910 if ((pio_mgr
->intr_val
> DEH_BASE
) &&
911 (pio_mgr
->intr_val
< DEH_LIMIT
)) {
912 /* Notify DSP/BIOS exception */
914 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
915 print_dsp_debug_trace(pio_mgr
);
917 bridge_deh_notify(hdeh_mgr
, DSP_SYSERROR
,
921 /* Proc-copy chanel dispatch */
922 input_chnl(pio_mgr
, NULL
, IO_SERVICE
);
923 output_chnl(pio_mgr
, NULL
, IO_SERVICE
);
927 /* Perform I/O dispatch on message queues */
928 input_msg(pio_mgr
, msg_mgr_obj
);
929 output_msg(pio_mgr
, msg_mgr_obj
);
933 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
934 if (pio_mgr
->intr_val
& MBX_DBG_SYSPRINTF
) {
935 /* Notify DSP Trace message */
936 print_dsp_debug_trace(pio_mgr
);
940 } while (serviced
!= requested
);
941 pio_mgr
->dpc_sched
= requested
;
947 * ======== io_mbox_msg ========
948 * Main interrupt handler for the shared memory IO manager.
949 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
950 * schedules a DPC to dispatch I/O.
952 int io_mbox_msg(struct notifier_block
*self
, unsigned long len
, void *msg
)
954 struct io_mgr
*pio_mgr
;
955 struct dev_object
*dev_obj
;
958 dev_obj
= dev_get_first();
959 dev_get_io_mgr(dev_obj
, &pio_mgr
);
964 pio_mgr
->intr_val
= (u16
)((u32
)msg
);
965 if (pio_mgr
->intr_val
& MBX_PM_CLASS
)
966 io_dispatch_pm(pio_mgr
);
968 if (pio_mgr
->intr_val
== MBX_DEH_RESET
) {
969 pio_mgr
->intr_val
= 0;
971 spin_lock_irqsave(&pio_mgr
->dpc_lock
, flags
);
973 spin_unlock_irqrestore(&pio_mgr
->dpc_lock
, flags
);
974 tasklet_schedule(&pio_mgr
->dpc_tasklet
);
980 * ======== io_request_chnl ========
982 * Request chanenel I/O from the DSP. Sets flags in shared memory, then
983 * interrupts the DSP.
985 void io_request_chnl(struct io_mgr
*io_manager
, struct chnl_object
*pchnl
,
986 u8 io_mode
, u16
*mbx_val
)
988 struct chnl_mgr
*chnl_mgr_obj
;
991 if (!pchnl
|| !mbx_val
)
993 chnl_mgr_obj
= io_manager
->hchnl_mgr
;
994 sm
= io_manager
->shared_mem
;
995 if (io_mode
== IO_INPUT
) {
997 * Assertion fires if CHNL_AddIOReq() called on a stream
998 * which was cancelled, or attached to a dead board.
1000 DBC_ASSERT((pchnl
->dw_state
== CHNL_STATEREADY
) ||
1001 (pchnl
->dw_state
== CHNL_STATEEOS
));
1002 /* Indicate to the DSP we have a buffer available for input */
1003 set_chnl_busy(sm
, pchnl
->chnl_id
);
1004 *mbx_val
= MBX_PCPY_CLASS
;
1005 } else if (io_mode
== IO_OUTPUT
) {
1007 * This assertion fails if CHNL_AddIOReq() was called on a
1008 * stream which was cancelled, or attached to a dead board.
1010 DBC_ASSERT((pchnl
->dw_state
& ~CHNL_STATEEOS
) ==
1013 * Record the fact that we have a buffer available for
1016 chnl_mgr_obj
->dw_output_mask
|= (1 << pchnl
->chnl_id
);
1018 DBC_ASSERT(io_mode
); /* Shouldn't get here. */
1025 * ======== iosm_schedule ========
1026 * Schedule DPC for IO.
1028 void iosm_schedule(struct io_mgr
*io_manager
)
1030 unsigned long flags
;
1035 /* Increment count of DPC's pending. */
1036 spin_lock_irqsave(&io_manager
->dpc_lock
, flags
);
1037 io_manager
->dpc_req
++;
1038 spin_unlock_irqrestore(&io_manager
->dpc_lock
, flags
);
1041 tasklet_schedule(&io_manager
->dpc_tasklet
);
1045 * ======== find_ready_output ========
1046 * Search for a host output channel which is ready to send. If this is
1047 * called as a result of servicing the DPC, then implement a round
1048 * robin search; otherwise, this was called by a client thread (via
1049 * IO_Dispatch()), so just start searching from the current channel id.
1051 static u32
find_ready_output(struct chnl_mgr
*chnl_mgr_obj
,
1052 struct chnl_object
*pchnl
, u32 mask
)
1054 u32 ret
= OUTPUTNOTREADY
;
1059 NULL
? pchnl
->chnl_id
: (chnl_mgr_obj
->dw_last_output
+ 1));
1060 id
= ((id
== CHNL_MAXCHANNELS
) ? 0 : id
);
1061 if (id
>= CHNL_MAXCHANNELS
)
1070 chnl_mgr_obj
->dw_last_output
= id
;
1074 id
= ((id
== CHNL_MAXCHANNELS
) ? 0 : id
);
1076 } while (id
!= start_id
);
1083 * ======== input_chnl ========
1084 * Dispatch a buffer on an input channel.
1086 static void input_chnl(struct io_mgr
*pio_mgr
, struct chnl_object
*pchnl
,
1089 struct chnl_mgr
*chnl_mgr_obj
;
1093 struct chnl_irp
*chnl_packet_obj
= NULL
;
1095 bool clear_chnl
= false;
1096 bool notify_client
= false;
1098 sm
= pio_mgr
->shared_mem
;
1099 chnl_mgr_obj
= pio_mgr
->hchnl_mgr
;
1101 /* Attempt to perform input */
1102 if (!sm
->input_full
)
1105 bytes
= sm
->input_size
* chnl_mgr_obj
->word_size
;
1106 chnl_id
= sm
->input_id
;
1108 if (chnl_id
>= CHNL_MAXCHANNELS
) {
1109 /* Shouldn't be here: would indicate corrupted shm. */
1110 DBC_ASSERT(chnl_id
);
1113 pchnl
= chnl_mgr_obj
->ap_channel
[chnl_id
];
1114 if ((pchnl
!= NULL
) && CHNL_IS_INPUT(pchnl
->chnl_mode
)) {
1115 if ((pchnl
->dw_state
& ~CHNL_STATEEOS
) == CHNL_STATEREADY
) {
1116 if (!pchnl
->pio_requests
)
1118 /* Get the I/O request, and attempt a transfer */
1119 chnl_packet_obj
= (struct chnl_irp
*)
1120 lst_get_head(pchnl
->pio_requests
);
1121 if (chnl_packet_obj
) {
1123 if (pchnl
->cio_reqs
< 0)
1126 * Ensure we don't overflow the client's
1129 bytes
= min(bytes
, chnl_packet_obj
->byte_size
);
1130 memcpy(chnl_packet_obj
->host_sys_buf
,
1131 pio_mgr
->input
, bytes
);
1132 pchnl
->bytes_moved
+= bytes
;
1133 chnl_packet_obj
->byte_size
= bytes
;
1134 chnl_packet_obj
->dw_arg
= dw_arg
;
1135 chnl_packet_obj
->status
= CHNL_IOCSTATCOMPLETE
;
1139 * This assertion fails if the DSP
1140 * sends EOS more than once on this
1143 if (pchnl
->dw_state
& CHNL_STATEEOS
)
1146 * Zero bytes indicates EOS. Update
1147 * IOC status for this chirp, and also
1148 * the channel state.
1150 chnl_packet_obj
->status
|=
1152 pchnl
->dw_state
|= CHNL_STATEEOS
;
1154 * Notify that end of stream has
1157 ntfy_notify(pchnl
->ntfy_obj
,
1160 /* Tell DSP if no more I/O buffers available */
1161 if (!pchnl
->pio_requests
)
1163 if (LST_IS_EMPTY(pchnl
->pio_requests
)) {
1164 set_chnl_free(sm
, pchnl
->chnl_id
);
1167 notify_client
= true;
1170 * Input full for this channel, but we have no
1171 * buffers available. The channel must be
1172 * "idling". Clear out the physical input
1178 /* Input channel cancelled: clear input channel */
1182 /* DPC fired after host closed channel: clear input channel */
1186 /* Indicate to the DSP we have read the input */
1188 sm_interrupt_dsp(pio_mgr
->hbridge_context
, MBX_PCPY_CLASS
);
1190 if (notify_client
) {
1191 /* Notify client with IO completion record */
1192 notify_chnl_complete(pchnl
, chnl_packet_obj
);
1199 * ======== input_msg ========
1200 * Copies messages from shared memory to the message queues.
1202 static void input_msg(struct io_mgr
*pio_mgr
, struct msg_mgr
*hmsg_mgr
)
1207 struct msg_queue
*msg_queue_obj
;
1208 struct msg_frame
*pmsg
;
1209 struct msg_dspmsg msg
;
1210 struct msg_ctrl
*msg_ctr_obj
;
1214 msg_ctr_obj
= pio_mgr
->msg_input_ctrl
;
1215 /* Get the number of input messages to be read */
1216 input_empty
= msg_ctr_obj
->buf_empty
;
1217 num_msgs
= msg_ctr_obj
->size
;
1221 msg_input
= pio_mgr
->msg_input
;
1222 for (i
= 0; i
< num_msgs
; i
++) {
1223 /* Read the next message */
1224 addr
= (u32
) &(((struct msg_dspmsg
*)msg_input
)->msg
.dw_cmd
);
1226 read_ext32_bit_dsp_data(pio_mgr
->hbridge_context
, addr
);
1227 addr
= (u32
) &(((struct msg_dspmsg
*)msg_input
)->msg
.dw_arg1
);
1229 read_ext32_bit_dsp_data(pio_mgr
->hbridge_context
, addr
);
1230 addr
= (u32
) &(((struct msg_dspmsg
*)msg_input
)->msg
.dw_arg2
);
1232 read_ext32_bit_dsp_data(pio_mgr
->hbridge_context
, addr
);
1233 addr
= (u32
) &(((struct msg_dspmsg
*)msg_input
)->msgq_id
);
1235 read_ext32_bit_dsp_data(pio_mgr
->hbridge_context
, addr
);
1236 msg_input
+= sizeof(struct msg_dspmsg
);
1237 if (!hmsg_mgr
->queue_list
)
1240 /* Determine which queue to put the message in */
1242 (struct msg_queue
*)lst_first(hmsg_mgr
->queue_list
);
1243 dev_dbg(bridge
, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
1244 "dw_arg2=0x%x msgq_id=0x%x \n", msg
.msg
.dw_cmd
,
1245 msg
.msg
.dw_arg1
, msg
.msg
.dw_arg2
, msg
.msgq_id
);
1247 * Interrupt may occur before shared memory and message
1248 * input locations have been set up. If all nodes were
1249 * cleaned up, hmsg_mgr->max_msgs should be 0.
1251 while (msg_queue_obj
!= NULL
) {
1252 if (msg
.msgq_id
== msg_queue_obj
->msgq_id
) {
1254 if (msg
.msg
.dw_cmd
== RMS_EXITACK
) {
1256 * Call the node exit notification.
1257 * The exit message does not get
1260 (*hmsg_mgr
->on_exit
) ((void *)
1265 * Not an exit acknowledgement, queue
1268 if (!msg_queue_obj
->msg_free_list
)
1270 pmsg
= (struct msg_frame
*)lst_get_head
1271 (msg_queue_obj
->msg_free_list
);
1272 if (msg_queue_obj
->msg_used_list
1274 pmsg
->msg_data
= msg
;
1276 (msg_queue_obj
->msg_used_list
,
1277 (struct list_head
*)pmsg
);
1279 (msg_queue_obj
->ntfy_obj
,
1280 DSP_NODEMESSAGEREADY
);
1282 (msg_queue_obj
->sync_event
);
1285 * No free frame to copy the
1288 pr_err("%s: no free msg frames,"
1289 " discarding msg\n",
1296 if (!hmsg_mgr
->queue_list
|| !msg_queue_obj
)
1299 (struct msg_queue
*)lst_next(hmsg_mgr
->queue_list
,
1300 (struct list_head
*)
1304 /* Set the post SWI flag */
1306 /* Tell the DSP we've read the messages */
1307 msg_ctr_obj
->buf_empty
= true;
1308 msg_ctr_obj
->post_swi
= true;
1309 sm_interrupt_dsp(pio_mgr
->hbridge_context
, MBX_PCPY_CLASS
);
1316 * ======== notify_chnl_complete ========
1318 * Signal the channel event, notifying the client that I/O has completed.
1320 static void notify_chnl_complete(struct chnl_object
*pchnl
,
1321 struct chnl_irp
*chnl_packet_obj
)
1325 if (!pchnl
|| !pchnl
->sync_event
||
1326 !pchnl
->pio_completions
|| !chnl_packet_obj
)
1330 * Note: we signal the channel event only if the queue of IO
1331 * completions is empty. If it is not empty, the event is sure to be
1332 * signalled by the only IO completion list consumer:
1333 * bridge_chnl_get_ioc().
1335 signal_event
= LST_IS_EMPTY(pchnl
->pio_completions
);
1336 /* Enqueue the IO completion info for the client */
1337 lst_put_tail(pchnl
->pio_completions
,
1338 (struct list_head
*)chnl_packet_obj
);
1341 if (pchnl
->cio_cs
> pchnl
->chnl_packets
)
1343 /* Signal the channel event (if not already set) that IO is complete */
1345 sync_set_event(pchnl
->sync_event
);
1347 /* Notify that IO is complete */
1348 ntfy_notify(pchnl
->ntfy_obj
, DSP_STREAMIOCOMPLETION
);
1354 * ======== output_chnl ========
1356 * Dispatch a buffer on an output channel.
1358 static void output_chnl(struct io_mgr
*pio_mgr
, struct chnl_object
*pchnl
,
1361 struct chnl_mgr
*chnl_mgr_obj
;
1364 struct chnl_irp
*chnl_packet_obj
;
1367 chnl_mgr_obj
= pio_mgr
->hchnl_mgr
;
1368 sm
= pio_mgr
->shared_mem
;
1369 /* Attempt to perform output */
1370 if (sm
->output_full
)
1373 if (pchnl
&& !((pchnl
->dw_state
& ~CHNL_STATEEOS
) == CHNL_STATEREADY
))
1376 /* Look to see if both a PC and DSP output channel are ready */
1377 dw_dsp_f_mask
= sm
->dsp_free_mask
;
1379 find_ready_output(chnl_mgr_obj
, pchnl
,
1380 (chnl_mgr_obj
->dw_output_mask
& dw_dsp_f_mask
));
1381 if (chnl_id
== OUTPUTNOTREADY
)
1384 pchnl
= chnl_mgr_obj
->ap_channel
[chnl_id
];
1385 if (!pchnl
|| !pchnl
->pio_requests
) {
1386 /* Shouldn't get here */
1389 /* Get the I/O request, and attempt a transfer */
1390 chnl_packet_obj
= (struct chnl_irp
*)lst_get_head(pchnl
->pio_requests
);
1391 if (!chnl_packet_obj
)
1395 if (pchnl
->cio_reqs
< 0 || !pchnl
->pio_requests
)
1398 /* Record fact that no more I/O buffers available */
1399 if (LST_IS_EMPTY(pchnl
->pio_requests
))
1400 chnl_mgr_obj
->dw_output_mask
&= ~(1 << chnl_id
);
1402 /* Transfer buffer to DSP side */
1403 chnl_packet_obj
->byte_size
= min(pio_mgr
->usm_buf_size
,
1404 chnl_packet_obj
->byte_size
);
1405 memcpy(pio_mgr
->output
, chnl_packet_obj
->host_sys_buf
,
1406 chnl_packet_obj
->byte_size
);
1407 pchnl
->bytes_moved
+= chnl_packet_obj
->byte_size
;
1408 /* Write all 32 bits of arg */
1409 sm
->arg
= chnl_packet_obj
->dw_arg
;
1410 #if _CHNL_WORDSIZE == 2
1411 /* Access can be different SM access word size (e.g. 16/32 bit words) */
1412 sm
->output_id
= (u16
) chnl_id
;
1413 sm
->output_size
= (u16
) (chnl_packet_obj
->byte_size
+
1414 chnl_mgr_obj
->word_size
- 1) /
1415 (u16
) chnl_mgr_obj
->word_size
;
1417 sm
->output_id
= chnl_id
;
1418 sm
->output_size
= (chnl_packet_obj
->byte_size
+
1419 chnl_mgr_obj
->word_size
- 1) / chnl_mgr_obj
->word_size
;
1421 sm
->output_full
= 1;
1422 /* Indicate to the DSP we have written the output */
1423 sm_interrupt_dsp(pio_mgr
->hbridge_context
, MBX_PCPY_CLASS
);
1424 /* Notify client with IO completion record (keep EOS) */
1425 chnl_packet_obj
->status
&= CHNL_IOCSTATEOS
;
1426 notify_chnl_complete(pchnl
, chnl_packet_obj
);
1427 /* Notify if stream is done. */
1428 if (chnl_packet_obj
->status
& CHNL_IOCSTATEOS
)
1429 ntfy_notify(pchnl
->ntfy_obj
, DSP_STREAMDONE
);
1436 * ======== output_msg ========
1437 * Copies messages from the message queues to the shared memory.
1439 static void output_msg(struct io_mgr
*pio_mgr
, struct msg_mgr
*hmsg_mgr
)
1444 struct msg_frame
*pmsg
;
1445 struct msg_ctrl
*msg_ctr_obj
;
1450 msg_ctr_obj
= pio_mgr
->msg_output_ctrl
;
1452 /* Check if output has been cleared */
1453 output_empty
= msg_ctr_obj
->buf_empty
;
1455 num_msgs
= (hmsg_mgr
->msgs_pending
> hmsg_mgr
->max_msgs
) ?
1456 hmsg_mgr
->max_msgs
: hmsg_mgr
->msgs_pending
;
1457 msg_output
= pio_mgr
->msg_output
;
1458 /* Copy num_msgs messages into shared memory */
1459 for (i
= 0; i
< num_msgs
; i
++) {
1460 if (!hmsg_mgr
->msg_used_list
) {
1464 pmsg
= (struct msg_frame
*)
1465 lst_get_head(hmsg_mgr
->msg_used_list
);
1468 val
= (pmsg
->msg_data
).msgq_id
;
1469 addr
= (u32
) &(((struct msg_dspmsg
*)
1470 msg_output
)->msgq_id
);
1471 write_ext32_bit_dsp_data(
1472 pio_mgr
->hbridge_context
, addr
, val
);
1473 val
= (pmsg
->msg_data
).msg
.dw_cmd
;
1474 addr
= (u32
) &((((struct msg_dspmsg
*)
1475 msg_output
)->msg
).dw_cmd
);
1476 write_ext32_bit_dsp_data(
1477 pio_mgr
->hbridge_context
, addr
, val
);
1478 val
= (pmsg
->msg_data
).msg
.dw_arg1
;
1479 addr
= (u32
) &((((struct msg_dspmsg
*)
1480 msg_output
)->msg
).dw_arg1
);
1481 write_ext32_bit_dsp_data(
1482 pio_mgr
->hbridge_context
, addr
, val
);
1483 val
= (pmsg
->msg_data
).msg
.dw_arg2
;
1484 addr
= (u32
) &((((struct msg_dspmsg
*)
1485 msg_output
)->msg
).dw_arg2
);
1486 write_ext32_bit_dsp_data(
1487 pio_mgr
->hbridge_context
, addr
, val
);
1488 msg_output
+= sizeof(struct msg_dspmsg
);
1489 if (!hmsg_mgr
->msg_free_list
)
1491 lst_put_tail(hmsg_mgr
->msg_free_list
,
1492 (struct list_head
*)pmsg
);
1493 sync_set_event(hmsg_mgr
->sync_event
);
1498 hmsg_mgr
->msgs_pending
-= num_msgs
;
1499 #if _CHNL_WORDSIZE == 2
1501 * Access can be different SM access word size
1502 * (e.g. 16/32 bit words)
1504 msg_ctr_obj
->size
= (u16
) num_msgs
;
1506 msg_ctr_obj
->size
= num_msgs
;
1508 msg_ctr_obj
->buf_empty
= false;
1509 /* Set the post SWI flag */
1510 msg_ctr_obj
->post_swi
= true;
1511 /* Tell the DSP we have written the output. */
1512 sm_interrupt_dsp(pio_mgr
->hbridge_context
,
1521 * ======== register_shm_segs ========
1523 * Registers GPP SM segment with CMM.
1525 static int register_shm_segs(struct io_mgr
*hio_mgr
,
1526 struct cod_manager
*cod_man
,
1530 u32 ul_shm0_base
= 0;
1532 u32 ul_shm0_rsrvd_start
= 0;
1533 u32 ul_rsrvd_size
= 0;
1536 u32 ul_shm_seg_id0
= 0;
1537 u32 dw_offset
, dw_gpp_base_va
, ul_dsp_size
;
1540 * Read address and size info for first SM region.
1541 * Get start of 1st SM Heap region.
1544 cod_get_sym_value(cod_man
, SHM0_SHARED_BASE_SYM
, &ul_shm0_base
);
1545 if (ul_shm0_base
== 0) {
1549 /* Get end of 1st SM Heap region */
1551 /* Get start and length of message part of shared memory */
1552 status
= cod_get_sym_value(cod_man
, SHM0_SHARED_END_SYM
,
1554 if (shm0_end
== 0) {
1559 /* Start of Gpp reserved region */
1561 /* Get start and length of message part of shared memory */
1563 cod_get_sym_value(cod_man
, SHM0_SHARED_RESERVED_BASE_SYM
,
1564 &ul_shm0_rsrvd_start
);
1565 if (ul_shm0_rsrvd_start
== 0) {
1570 /* Register with CMM */
1572 status
= dev_get_cmm_mgr(hio_mgr
->hdev_obj
, &hio_mgr
->hcmm_mgr
);
1574 status
= cmm_un_register_gppsm_seg(hio_mgr
->hcmm_mgr
,
1578 /* Register new SM region(s) */
1579 if (!status
&& (shm0_end
- ul_shm0_base
) > 0) {
1580 /* Calc size (bytes) of SM the GPP can alloc from */
1582 (shm0_end
- ul_shm0_rsrvd_start
+ 1) * hio_mgr
->word_size
;
1583 if (ul_rsrvd_size
<= 0) {
1587 /* Calc size of SM DSP can alloc from */
1589 (ul_shm0_rsrvd_start
- ul_shm0_base
) * hio_mgr
->word_size
;
1590 if (ul_dsp_size
<= 0) {
1594 /* First TLB entry reserved for Bridge SM use. */
1595 ul_gpp_phys
= hio_mgr
->ext_proc_info
.ty_tlb
[0].ul_gpp_phys
;
1596 /* Get size in bytes */
1598 hio_mgr
->ext_proc_info
.ty_tlb
[0].ul_dsp_virt
*
1601 * Calc byte offset used to convert GPP phys <-> DSP byte
1604 if (dw_gpp_base_pa
> ul_dsp_virt
)
1605 dw_offset
= dw_gpp_base_pa
- ul_dsp_virt
;
1607 dw_offset
= ul_dsp_virt
- dw_gpp_base_pa
;
1609 if (ul_shm0_rsrvd_start
* hio_mgr
->word_size
< ul_dsp_virt
) {
1614 * Calc Gpp phys base of SM region.
1615 * This is actually uncached kernel virtual address.
1618 ul_gpp_phys
+ ul_shm0_rsrvd_start
* hio_mgr
->word_size
-
1621 * Calc Gpp phys base of SM region.
1622 * This is the physical address.
1625 dw_gpp_base_pa
+ ul_shm0_rsrvd_start
* hio_mgr
->word_size
-
1627 /* Register SM Segment 0. */
1629 cmm_register_gppsm_seg(hio_mgr
->hcmm_mgr
, dw_gpp_base_pa
,
1630 ul_rsrvd_size
, dw_offset
,
1632 ul_dsp_virt
) ? CMM_ADDTODSPPA
:
1634 (u32
) (ul_shm0_base
*
1635 hio_mgr
->word_size
),
1636 ul_dsp_size
, &ul_shm_seg_id0
,
1638 /* First SM region is seg_id = 1 */
1639 if (ul_shm_seg_id0
!= 1)
1646 /* ZCPY IO routines. */
1648 * ======== IO_SHMcontrol ========
1649 * Sets the requested shm setting.
1651 int io_sh_msetting(struct io_mgr
*hio_mgr
, u8 desc
, void *pargs
)
1653 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1655 struct dspbridge_platform_data
*pdata
=
1656 omap_dspbridge_dev
->dev
.platform_data
;
1660 /* Update the shared memory with requested OPP information */
1662 hio_mgr
->shared_mem
->opp_table_struct
.curr_opp_pt
=
1669 * Update the shared memory with the voltage, frequency,
1670 * min and max frequency values for an OPP.
1672 for (i
= 0; i
<= dsp_max_opps
; i
++) {
1673 hio_mgr
->shared_mem
->opp_table_struct
.opp_point
[i
].
1674 voltage
= vdd1_dsp_freq
[i
][0];
1675 dev_dbg(bridge
, "OPP-shm: voltage: %d\n",
1676 vdd1_dsp_freq
[i
][0]);
1677 hio_mgr
->shared_mem
->opp_table_struct
.
1678 opp_point
[i
].frequency
= vdd1_dsp_freq
[i
][1];
1679 dev_dbg(bridge
, "OPP-shm: frequency: %d\n",
1680 vdd1_dsp_freq
[i
][1]);
1681 hio_mgr
->shared_mem
->opp_table_struct
.opp_point
[i
].
1682 min_freq
= vdd1_dsp_freq
[i
][2];
1683 dev_dbg(bridge
, "OPP-shm: min freq: %d\n",
1684 vdd1_dsp_freq
[i
][2]);
1685 hio_mgr
->shared_mem
->opp_table_struct
.opp_point
[i
].
1686 max_freq
= vdd1_dsp_freq
[i
][3];
1687 dev_dbg(bridge
, "OPP-shm: max freq: %d\n",
1688 vdd1_dsp_freq
[i
][3]);
1690 hio_mgr
->shared_mem
->opp_table_struct
.num_opp_pts
=
1692 dev_dbg(bridge
, "OPP-shm: max OPP number: %d\n", dsp_max_opps
);
1693 /* Update the current OPP number */
1694 if (pdata
->dsp_get_opp
)
1695 i
= (*pdata
->dsp_get_opp
) ();
1696 hio_mgr
->shared_mem
->opp_table_struct
.curr_opp_pt
= i
;
1697 dev_dbg(bridge
, "OPP-shm: value programmed = %d\n", i
);
1700 /* Get the OPP that DSP has requested */
1701 *(u32
*) pargs
= hio_mgr
->shared_mem
->opp_request
.rqst_opp_pt
;
1711 * ======== bridge_io_get_proc_load ========
1712 * Gets the Processor's Load information
1714 int bridge_io_get_proc_load(struct io_mgr
*hio_mgr
,
1715 struct dsp_procloadstat
*proc_lstat
)
1717 proc_lstat
->curr_load
=
1718 hio_mgr
->shared_mem
->load_mon_info
.curr_dsp_load
;
1719 proc_lstat
->predicted_load
=
1720 hio_mgr
->shared_mem
->load_mon_info
.pred_dsp_load
;
1721 proc_lstat
->curr_dsp_freq
=
1722 hio_mgr
->shared_mem
->load_mon_info
.curr_dsp_freq
;
1723 proc_lstat
->predicted_freq
=
1724 hio_mgr
->shared_mem
->load_mon_info
.pred_dsp_freq
;
1726 dev_dbg(bridge
, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1727 "Pred Freq = %d\n", proc_lstat
->curr_load
,
1728 proc_lstat
->predicted_load
, proc_lstat
->curr_dsp_freq
,
1729 proc_lstat
->predicted_freq
);
1733 void io_sm_init(void)
1738 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
1739 void print_dsp_debug_trace(struct io_mgr
*hio_mgr
)
1741 u32 ul_new_message_length
= 0, ul_gpp_cur_pointer
;
1744 /* Get the DSP current pointer */
1745 ul_gpp_cur_pointer
=
1746 *(u32
*) (hio_mgr
->ul_trace_buffer_current
);
1747 ul_gpp_cur_pointer
=
1748 hio_mgr
->ul_gpp_va
+ (ul_gpp_cur_pointer
-
1749 hio_mgr
->ul_dsp_va
);
1751 /* No new debug messages available yet */
1752 if (ul_gpp_cur_pointer
== hio_mgr
->ul_gpp_read_pointer
) {
1754 } else if (ul_gpp_cur_pointer
> hio_mgr
->ul_gpp_read_pointer
) {
1755 /* Continuous data */
1756 ul_new_message_length
=
1757 ul_gpp_cur_pointer
- hio_mgr
->ul_gpp_read_pointer
;
1759 memcpy(hio_mgr
->pmsg
,
1760 (char *)hio_mgr
->ul_gpp_read_pointer
,
1761 ul_new_message_length
);
1762 hio_mgr
->pmsg
[ul_new_message_length
] = '\0';
1764 * Advance the GPP trace pointer to DSP current
1767 hio_mgr
->ul_gpp_read_pointer
+= ul_new_message_length
;
1768 /* Print the trace messages */
1769 pr_info("DSPTrace: %s\n", hio_mgr
->pmsg
);
1770 } else if (ul_gpp_cur_pointer
< hio_mgr
->ul_gpp_read_pointer
) {
1771 /* Handle trace buffer wraparound */
1772 memcpy(hio_mgr
->pmsg
,
1773 (char *)hio_mgr
->ul_gpp_read_pointer
,
1774 hio_mgr
->ul_trace_buffer_end
-
1775 hio_mgr
->ul_gpp_read_pointer
);
1776 ul_new_message_length
=
1777 ul_gpp_cur_pointer
- hio_mgr
->ul_trace_buffer_begin
;
1778 memcpy(&hio_mgr
->pmsg
[hio_mgr
->ul_trace_buffer_end
-
1779 hio_mgr
->ul_gpp_read_pointer
],
1780 (char *)hio_mgr
->ul_trace_buffer_begin
,
1781 ul_new_message_length
);
1782 hio_mgr
->pmsg
[hio_mgr
->ul_trace_buffer_end
-
1783 hio_mgr
->ul_gpp_read_pointer
+
1784 ul_new_message_length
] = '\0';
1786 * Advance the GPP trace pointer to DSP current
1789 hio_mgr
->ul_gpp_read_pointer
=
1790 hio_mgr
->ul_trace_buffer_begin
+
1791 ul_new_message_length
;
1792 /* Print the trace messages */
1793 pr_info("DSPTrace: %s\n", hio_mgr
->pmsg
);
1799 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1801 * ======== print_dsp_trace_buffer ========
1802 * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1804 * hdeh_mgr: Handle to DEH manager object
1805 * number of extra carriage returns to generate.
1808 * -ENOMEM: Unable to allocate memory.
1810 * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1812 int print_dsp_trace_buffer(struct bridge_dev_context
*hbridge_context
)
1815 struct cod_manager
*cod_mgr
;
1819 u32 ul_num_bytes
= 0;
1820 u32 ul_num_words
= 0;
1821 u32 ul_word_size
= 2;
1828 struct bridge_dev_context
*pbridge_context
= hbridge_context
;
1829 struct bridge_drv_interface
*intf_fxns
;
1830 struct dev_object
*dev_obj
= (struct dev_object
*)
1831 pbridge_context
->hdev_obj
;
1833 status
= dev_get_cod_mgr(dev_obj
, &cod_mgr
);
1836 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1838 cod_get_sym_value(cod_mgr
, COD_TRACEBEG
, &ul_trace_begin
);
1844 cod_get_sym_value(cod_mgr
, COD_TRACEEND
, &ul_trace_end
);
1847 /* trace_cur_pos will hold the address of a DSP pointer */
1848 status
= cod_get_sym_value(cod_mgr
, COD_TRACECURPOS
,
1854 ul_num_bytes
= (ul_trace_end
- ul_trace_begin
);
1856 ul_num_words
= ul_num_bytes
* ul_word_size
;
1857 status
= dev_get_intf_fxns(dev_obj
, &intf_fxns
);
1862 psz_buf
= kzalloc(ul_num_bytes
+ 2, GFP_ATOMIC
);
1863 if (psz_buf
!= NULL
) {
1864 /* Read trace buffer data */
1865 status
= (*intf_fxns
->pfn_brd_read
)(pbridge_context
,
1866 (u8
*)psz_buf
, (u32
)ul_trace_begin
,
1872 /* Pack and do newline conversion */
1873 pr_debug("PrintDspTraceBuffer: "
1874 "before pack and unpack.\n");
1875 pr_debug("%s: DSP Trace Buffer Begin:\n"
1876 "=======================\n%s\n",
1879 /* Read the value at the DSP address in trace_cur_pos. */
1880 status
= (*intf_fxns
->pfn_brd_read
)(pbridge_context
,
1881 (u8
*)&trace_cur_pos
, (u32
)trace_cur_pos
,
1885 /* Pack and do newline conversion */
1886 pr_info("DSP Trace Buffer Begin:\n"
1887 "=======================\n%s\n",
1891 /* convert to offset */
1892 trace_cur_pos
= trace_cur_pos
- ul_trace_begin
;
1896 * The buffer is not full, find the end of the
1897 * data -- buf_end will be >= pszBuf after
1900 buf_end
= &psz_buf
[ul_num_bytes
+1];
1901 /* DSP print position */
1902 trace_end
= &psz_buf
[trace_cur_pos
];
1905 * Search buffer for a new_line and replace it
1906 * with '\0', then print as string.
1907 * Continue until end of buffer is reached.
1909 str_beg
= trace_end
;
1910 ul_num_bytes
= buf_end
- str_beg
;
1912 while (str_beg
< buf_end
) {
1913 new_line
= strnchr(str_beg
, ul_num_bytes
,
1915 if (new_line
&& new_line
< buf_end
) {
1917 pr_debug("%s\n", str_beg
);
1918 str_beg
= ++new_line
;
1919 ul_num_bytes
= buf_end
- str_beg
;
1922 * Assume buffer empty if it contains
1925 if (*str_beg
!= '\0') {
1926 str_beg
[ul_num_bytes
] = 0;
1927 pr_debug("%s\n", str_beg
);
1934 * Search buffer for a nNewLine and replace it
1935 * with '\0', then print as string.
1936 * Continue until buffer is exhausted.
1939 ul_num_bytes
= trace_end
- str_beg
;
1941 while (str_beg
< trace_end
) {
1942 new_line
= strnchr(str_beg
, ul_num_bytes
, '\n');
1943 if (new_line
!= NULL
&& new_line
< trace_end
) {
1945 pr_debug("%s\n", str_beg
);
1946 str_beg
= ++new_line
;
1947 ul_num_bytes
= trace_end
- str_beg
;
1950 * Assume buffer empty if it contains
1953 if (*str_beg
!= '\0') {
1954 str_beg
[ul_num_bytes
] = 0;
1955 pr_debug("%s\n", str_beg
);
1957 str_beg
= trace_end
;
1962 pr_info("\n=======================\n"
1963 "DSP Trace Buffer End:\n");
1970 dev_dbg(bridge
, "%s Failed, status 0x%x\n", __func__
, status
);
1975 * dump_dsp_stack() - This function dumps the data on the DSP stack.
1976 * @bridge_context: Bridge driver's device context pointer.
1979 int dump_dsp_stack(struct bridge_dev_context
*bridge_context
)
1982 struct cod_manager
*code_mgr
;
1983 struct node_mgr
*node_mgr
;
1989 } mmu_fault_dbg_info
;
1999 const char *dsp_regs
[] = {"EFR", "IERR", "ITSR", "NTSR",
2000 "IRP", "NRP", "AMR", "SSR",
2001 "ILC", "RILC", "IER", "CSR"};
2002 const char *exec_ctxt
[] = {"Task", "SWI", "HWI", "Unknown"};
2003 struct bridge_drv_interface
*intf_fxns
;
2004 struct dev_object
*dev_object
= bridge_context
->hdev_obj
;
2006 status
= dev_get_cod_mgr(dev_object
, &code_mgr
);
2008 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__
);
2013 status
= dev_get_node_manager(dev_object
, &node_mgr
);
2015 pr_debug("%s: Failed on dev_get_node_manager.\n",
2022 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
2024 cod_get_sym_value(code_mgr
, COD_TRACEBEG
, &trace_begin
);
2025 pr_debug("%s: trace_begin Value 0x%x\n",
2026 __func__
, trace_begin
);
2028 pr_debug("%s: Failed on cod_get_sym_value.\n",
2032 status
= dev_get_intf_fxns(dev_object
, &intf_fxns
);
2034 * Check for the "magic number" in the trace buffer. If it has
2035 * yet to appear then poll the trace buffer to wait for it. Its
2036 * appearance signals that the DSP has finished dumping its state.
2038 mmu_fault_dbg_info
.head
[0] = 0;
2039 mmu_fault_dbg_info
.head
[1] = 0;
2042 while ((mmu_fault_dbg_info
.head
[0] != MMU_FAULT_HEAD1
||
2043 mmu_fault_dbg_info
.head
[1] != MMU_FAULT_HEAD2
) &&
2044 poll_cnt
< POLL_MAX
) {
2046 /* Read DSP dump size from the DSP trace buffer... */
2047 status
= (*intf_fxns
->pfn_brd_read
)(bridge_context
,
2048 (u8
*)&mmu_fault_dbg_info
, (u32
)trace_begin
,
2049 sizeof(mmu_fault_dbg_info
), 0);
2057 if (mmu_fault_dbg_info
.head
[0] != MMU_FAULT_HEAD1
&&
2058 mmu_fault_dbg_info
.head
[1] != MMU_FAULT_HEAD2
) {
2060 pr_err("%s:No DSP MMU-Fault information available.\n",
2066 total_size
= mmu_fault_dbg_info
.size
;
2067 /* Limit the size in case DSP went crazy */
2068 if (total_size
> MAX_MMU_DBGBUFF
)
2069 total_size
= MAX_MMU_DBGBUFF
;
2071 buffer
= kzalloc(total_size
, GFP_ATOMIC
);
2074 pr_debug("%s: Failed to "
2075 "allocate stack dump buffer.\n", __func__
);
2079 buffer_beg
= buffer
;
2080 buffer_end
= buffer
+ total_size
/ 4;
2082 /* Read bytes from the DSP trace buffer... */
2083 status
= (*intf_fxns
->pfn_brd_read
)(bridge_context
,
2084 (u8
*)buffer
, (u32
)trace_begin
,
2087 pr_debug("%s: Failed to Read Trace Buffer.\n",
2092 pr_err("\nAproximate Crash Position:\n"
2093 "--------------------------\n");
2095 exc_type
= buffer
[3];
2097 i
= buffer
[79]; /* IRP */
2099 i
= buffer
[80]; /* NRP */
2102 cod_get_sym_value(code_mgr
, DYNEXTBASE
, &dyn_ext_base
);
2108 if ((i
> dyn_ext_base
) && (node_find_addr(node_mgr
, i
,
2109 0x1000, &offset_output
, name
) == 0))
2110 pr_err("0x%-8x [\"%s\" + 0x%x]\n", i
, name
,
2113 pr_err("0x%-8x [Unable to match to a symbol.]\n", i
);
2117 pr_err("\nExecution Info:\n"
2118 "---------------\n");
2120 if (*buffer
< ARRAY_SIZE(exec_ctxt
)) {
2121 pr_err("Execution context \t%s\n",
2122 exec_ctxt
[*buffer
++]);
2124 pr_err("Execution context corrupt\n");
2128 pr_err("Task Handle\t\t0x%x\n", *buffer
++);
2129 pr_err("Stack Pointer\t\t0x%x\n", *buffer
++);
2130 pr_err("Stack Top\t\t0x%x\n", *buffer
++);
2131 pr_err("Stack Bottom\t\t0x%x\n", *buffer
++);
2132 pr_err("Stack Size\t\t0x%x\n", *buffer
++);
2133 pr_err("Stack Size In Use\t0x%x\n", *buffer
++);
2135 pr_err("\nCPU Registers\n"
2136 "---------------\n");
2138 for (i
= 0; i
< 32; i
++) {
2139 if (i
== 4 || i
== 6 || i
== 8)
2140 pr_err("A%d 0x%-8x [Function Argument %d]\n",
2143 pr_err("A15 0x%-8x [Frame Pointer]\n",
2146 pr_err("A%d 0x%x\n", i
, *buffer
++);
2149 pr_err("\nB0 0x%x\n", *buffer
++);
2150 pr_err("B1 0x%x\n", *buffer
++);
2151 pr_err("B2 0x%x\n", *buffer
++);
2153 if ((*buffer
> dyn_ext_base
) && (node_find_addr(node_mgr
,
2154 *buffer
, 0x1000, &offset_output
, name
) == 0))
2156 pr_err("B3 0x%-8x [Function Return Pointer:"
2157 " \"%s\" + 0x%x]\n", *buffer
, name
,
2158 *buffer
- offset_output
);
2160 pr_err("B3 0x%-8x [Function Return Pointer:"
2161 "Unable to match to a symbol.]\n", *buffer
);
2165 for (i
= 4; i
< 32; i
++) {
2166 if (i
== 4 || i
== 6 || i
== 8)
2167 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2170 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2173 pr_err("B%d 0x%x\n", i
, *buffer
++);
2178 for (i
= 0; i
< ARRAY_SIZE(dsp_regs
); i
++)
2179 pr_err("%s 0x%x\n", dsp_regs
[i
], *buffer
++);
2184 for (i
= 0; buffer
< buffer_end
; i
++, buffer
++) {
2185 if ((*buffer
> dyn_ext_base
) && (
2186 node_find_addr(node_mgr
, *buffer
, 0x600,
2187 &offset_output
, name
) == 0))
2188 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2190 *buffer
- offset_output
);
2192 pr_err("[%d] 0x%x\n", i
, *buffer
);
2201 * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2202 * @bridge_context: Bridge driver's device context pointer.
2205 void dump_dl_modules(struct bridge_dev_context
*bridge_context
)
2207 struct cod_manager
*code_mgr
;
2208 struct bridge_drv_interface
*intf_fxns
;
2209 struct bridge_dev_context
*bridge_ctxt
= bridge_context
;
2210 struct dev_object
*dev_object
= bridge_ctxt
->hdev_obj
;
2211 struct modules_header modules_hdr
;
2212 struct dll_module
*module_struct
= NULL
;
2213 u32 module_dsp_addr
;
2215 u32 module_struct_size
= 0;
2220 status
= dev_get_intf_fxns(dev_object
, &intf_fxns
);
2222 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__
);
2226 status
= dev_get_cod_mgr(dev_object
, &code_mgr
);
2228 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__
);
2233 /* Lookup the address of the modules_header structure */
2234 status
= cod_get_sym_value(code_mgr
, "_DLModules", &module_dsp_addr
);
2236 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2241 pr_debug("%s: _DLModules at 0x%x\n", __func__
, module_dsp_addr
);
2243 /* Copy the modules_header structure from DSP memory. */
2244 status
= (*intf_fxns
->pfn_brd_read
)(bridge_context
, (u8
*) &modules_hdr
,
2245 (u32
) module_dsp_addr
, sizeof(modules_hdr
), 0);
2248 pr_debug("%s: Failed failed to read modules header.\n",
2253 module_dsp_addr
= modules_hdr
.first_module
;
2254 module_size
= modules_hdr
.first_module_size
;
2256 pr_debug("%s: dll_module_header 0x%x %d\n", __func__
, module_dsp_addr
,
2259 pr_err("\nDynamically Loaded Modules:\n"
2260 "---------------------------\n");
2262 /* For each dll_module structure in the list... */
2263 while (module_size
) {
2265 * Allocate/re-allocate memory to hold the dll_module
2266 * structure. The memory is re-allocated only if the existing
2267 * allocation is too small.
2269 if (module_size
> module_struct_size
) {
2270 kfree(module_struct
);
2271 module_struct
= kzalloc(module_size
+128, GFP_ATOMIC
);
2272 module_struct_size
= module_size
+128;
2273 pr_debug("%s: allocated module struct %p %d\n",
2274 __func__
, module_struct
, module_struct_size
);
2278 /* Copy the dll_module structure from DSP memory */
2279 status
= (*intf_fxns
->pfn_brd_read
)(bridge_context
,
2280 (u8
*)module_struct
, module_dsp_addr
, module_size
, 0);
2284 "%s: Failed to read dll_module stuct for 0x%x.\n",
2285 __func__
, module_dsp_addr
);
2289 /* Update info regarding the _next_ module in the list. */
2290 module_dsp_addr
= module_struct
->next_module
;
2291 module_size
= module_struct
->next_module_size
;
2293 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2294 __func__
, module_dsp_addr
, module_size
,
2295 module_struct
->num_sects
);
2298 * The section name strings start immedialty following
2299 * the array of dll_sect structures.
2301 sect_str
= (char *) &module_struct
->
2302 sects
[module_struct
->num_sects
];
2303 pr_err("%s\n", sect_str
);
2306 * Advance to the first section name string.
2307 * Each string follows the one before.
2309 sect_str
+= strlen(sect_str
) + 1;
2311 /* Access each dll_sect structure and its name string. */
2313 sect_ndx
< module_struct
->num_sects
; sect_ndx
++) {
2314 pr_err(" Section: 0x%x ",
2315 module_struct
->sects
[sect_ndx
].sect_load_adr
);
2317 if (((u32
) sect_str
- (u32
) module_struct
) <
2318 module_struct_size
) {
2319 pr_err("%s\n", sect_str
);
2320 /* Each string follows the one before. */
2321 sect_str
+= strlen(sect_str
)+1;
2323 pr_err("<string error>\n");
2324 pr_debug("%s: section name sting address "
2325 "is invalid %p\n", __func__
, sect_str
);
2330 kfree(module_struct
);