staging: ti dspbridge: prefix configs with TIDSPBRIDGE
[linux-2.6.git] / drivers / staging / tidspbridge / core / io_sm.c
blobee33abb42fc234c159c27e12fc3d5024544a1c79
1 /*
2 * io_sm.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * IO dispatcher for a shared memory channel driver.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 * Channel Invariant:
21 * There is an important invariant condition which must be maintained per
22 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23 * which may cause timeouts and/or failure of the sync_wait_on_event
24 * function.
27 /* Host OS */
28 #include <dspbridge/host_os.h>
29 #include <linux/workqueue.h>
31 /* ----------------------------------- DSP/BIOS Bridge */
32 #include <dspbridge/std.h>
33 #include <dspbridge/dbdefs.h>
35 /* Trace & Debug */
36 #include <dspbridge/dbc.h>
38 /* Services Layer */
39 #include <dspbridge/cfg.h>
40 #include <dspbridge/ntfy.h>
41 #include <dspbridge/sync.h>
43 /* Hardware Abstraction Layer */
44 #include <hw_defs.h>
45 #include <hw_mmu.h>
47 /* Bridge Driver */
48 #include <dspbridge/dspdeh.h>
49 #include <dspbridge/dspio.h>
50 #include <dspbridge/dspioctl.h>
51 #include <dspbridge/wdt.h>
52 #include <_tiomap.h>
53 #include <tiomap_io.h>
54 #include <_tiomap_pwr.h>
56 /* Platform Manager */
57 #include <dspbridge/cod.h>
58 #include <dspbridge/node.h>
59 #include <dspbridge/dev.h>
61 /* Others */
62 #include <dspbridge/rms_sh.h>
63 #include <dspbridge/mgr.h>
64 #include <dspbridge/drv.h>
65 #include "_cmm.h"
66 #include "module_list.h"
68 /* This */
69 #include <dspbridge/io_sm.h>
70 #include "_msg_sm.h"
72 /* Defines, Data Structures, Typedefs */
73 #define OUTPUTNOTREADY 0xffff
74 #define NOTENABLED 0xffff /* Channel(s) not enabled */
76 #define EXTEND "_EXT_END"
78 #define SWAP_WORD(x) (x)
79 #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
81 #define MAX_PM_REQS 32
83 #define MMU_FAULT_HEAD1 0xa5a5a5a5
84 #define MMU_FAULT_HEAD2 0x96969696
85 #define POLL_MAX 1000
86 #define MAX_MMU_DBGBUFF 10240
88 /* IO Manager: only one created per board */
89 struct io_mgr {
90 /* These four fields must be the first fields in a io_mgr_ struct */
91 /* Bridge device context */
92 struct bridge_dev_context *hbridge_context;
93 /* Function interface to Bridge driver */
94 struct bridge_drv_interface *intf_fxns;
95 struct dev_object *hdev_obj; /* Device this board represents */
97 /* These fields initialized in bridge_io_create() */
98 struct chnl_mgr *hchnl_mgr;
99 struct shm *shared_mem; /* Shared Memory control */
100 u8 *input; /* Address of input channel */
101 u8 *output; /* Address of output channel */
102 struct msg_mgr *hmsg_mgr; /* Message manager */
103 /* Msg control for from DSP messages */
104 struct msg_ctrl *msg_input_ctrl;
105 /* Msg control for to DSP messages */
106 struct msg_ctrl *msg_output_ctrl;
107 u8 *msg_input; /* Address of input messages */
108 u8 *msg_output; /* Address of output messages */
109 u32 usm_buf_size; /* Size of a shared memory I/O channel */
110 bool shared_irq; /* Is this IRQ shared? */
111 u32 word_size; /* Size in bytes of DSP word */
112 u16 intr_val; /* Interrupt value */
113 /* Private extnd proc info; mmu setup */
114 struct mgr_processorextinfo ext_proc_info;
115 struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */
116 struct work_struct io_workq; /* workqueue */
117 #ifndef DSP_TRACEBUF_DISABLED
118 u32 ul_trace_buffer_begin; /* Trace message start address */
119 u32 ul_trace_buffer_end; /* Trace message end address */
120 u32 ul_trace_buffer_current; /* Trace message current address */
121 u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
122 u8 *pmsg;
123 u32 ul_gpp_va;
124 u32 ul_dsp_va;
125 #endif
126 /* IO Dpc */
127 u32 dpc_req; /* Number of requested DPC's. */
128 u32 dpc_sched; /* Number of executed DPC's. */
129 struct tasklet_struct dpc_tasklet;
130 spinlock_t dpc_lock;
134 /* Function Prototypes */
135 static void io_dispatch_chnl(IN struct io_mgr *pio_mgr,
136 IN OUT struct chnl_object *pchnl, u8 iMode);
137 static void io_dispatch_msg(IN struct io_mgr *pio_mgr,
138 struct msg_mgr *hmsg_mgr);
139 static void io_dispatch_pm(struct io_mgr *pio_mgr);
140 static void notify_chnl_complete(struct chnl_object *pchnl,
141 struct chnl_irp *chnl_packet_obj);
142 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
143 u8 iMode);
144 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
145 u8 iMode);
146 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
147 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
148 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
149 struct chnl_object *pchnl, u32 dwMask);
150 static u32 read_data(struct bridge_dev_context *hDevContext, void *dest,
151 void *pSrc, u32 usize);
152 static u32 write_data(struct bridge_dev_context *hDevContext, void *dest,
153 void *pSrc, u32 usize);
155 /* Bus Addr (cached kernel) */
156 static int register_shm_segs(struct io_mgr *hio_mgr,
157 struct cod_manager *cod_man,
158 u32 dw_gpp_base_pa);
161 * ======== bridge_io_create ========
162 * Create an IO manager object.
164 int bridge_io_create(OUT struct io_mgr **phIOMgr,
165 struct dev_object *hdev_obj,
166 IN CONST struct io_attrs *pMgrAttrs)
168 int status = 0;
169 struct io_mgr *pio_mgr = NULL;
170 struct shm *shared_mem = NULL;
171 struct bridge_dev_context *hbridge_context = NULL;
172 struct cfg_devnode *dev_node_obj;
173 struct chnl_mgr *hchnl_mgr;
174 u8 dev_type;
176 /* Check requirements */
177 if (!phIOMgr || !pMgrAttrs || pMgrAttrs->word_size == 0) {
178 status = -EFAULT;
179 goto func_end;
181 dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
182 if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
183 status = -EFAULT;
184 goto func_end;
187 * Message manager will be created when a file is loaded, since
188 * size of message buffer in shared memory is configurable in
189 * the base image.
191 dev_get_bridge_context(hdev_obj, &hbridge_context);
192 if (!hbridge_context) {
193 status = -EFAULT;
194 goto func_end;
196 dev_get_dev_type(hdev_obj, &dev_type);
198 * DSP shared memory area will get set properly when
199 * a program is loaded. They are unknown until a COFF file is
200 * loaded. I chose the value -1 because it was less likely to be
201 * a valid address than 0.
203 shared_mem = (struct shm *)-1;
205 /* Allocate IO manager object */
206 pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
207 if (pio_mgr == NULL) {
208 status = -ENOMEM;
209 goto func_end;
212 /* Initialize chnl_mgr object */
213 #ifndef DSP_TRACEBUF_DISABLED
214 pio_mgr->pmsg = NULL;
215 #endif
216 pio_mgr->hchnl_mgr = hchnl_mgr;
217 pio_mgr->word_size = pMgrAttrs->word_size;
218 pio_mgr->shared_mem = shared_mem;
220 if (dev_type == DSP_UNIT) {
221 /* Create an IO DPC */
222 tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
224 /* Initialize DPC counters */
225 pio_mgr->dpc_req = 0;
226 pio_mgr->dpc_sched = 0;
228 spin_lock_init(&pio_mgr->dpc_lock);
230 if (DSP_SUCCEEDED(status))
231 status = dev_get_dev_node(hdev_obj, &dev_node_obj);
234 if (DSP_SUCCEEDED(status)) {
235 pio_mgr->hbridge_context = hbridge_context;
236 pio_mgr->shared_irq = pMgrAttrs->irq_shared;
237 if (dsp_wdt_init())
238 status = -EPERM;
239 } else {
240 status = -EIO;
242 func_end:
243 if (DSP_FAILED(status)) {
244 /* Cleanup */
245 bridge_io_destroy(pio_mgr);
246 if (phIOMgr)
247 *phIOMgr = NULL;
248 } else {
249 /* Return IO manager object to caller... */
250 hchnl_mgr->hio_mgr = pio_mgr;
251 *phIOMgr = pio_mgr;
253 return status;
257 * ======== bridge_io_destroy ========
258 * Purpose:
259 * Disable interrupts, destroy the IO manager.
261 int bridge_io_destroy(struct io_mgr *hio_mgr)
263 int status = 0;
264 if (hio_mgr) {
265 /* Free IO DPC object */
266 tasklet_kill(&hio_mgr->dpc_tasklet);
268 #ifndef DSP_TRACEBUF_DISABLED
269 kfree(hio_mgr->pmsg);
270 #endif
271 dsp_wdt_exit();
272 /* Free this IO manager object */
273 kfree(hio_mgr);
274 } else {
275 status = -EFAULT;
278 return status;
282 * ======== bridge_io_on_loaded ========
283 * Purpose:
284 * Called when a new program is loaded to get shared memory buffer
285 * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
286 * are in DSP address units.
288 int bridge_io_on_loaded(struct io_mgr *hio_mgr)
290 struct cod_manager *cod_man;
291 struct chnl_mgr *hchnl_mgr;
292 struct msg_mgr *hmsg_mgr;
293 u32 ul_shm_base;
294 u32 ul_shm_base_offset;
295 u32 ul_shm_limit;
296 u32 ul_shm_length = -1;
297 u32 ul_mem_length = -1;
298 u32 ul_msg_base;
299 u32 ul_msg_limit;
300 u32 ul_msg_length = -1;
301 u32 ul_ext_end;
302 u32 ul_gpp_pa = 0;
303 u32 ul_gpp_va = 0;
304 u32 ul_dsp_va = 0;
305 u32 ul_seg_size = 0;
306 u32 ul_pad_size = 0;
307 u32 i;
308 int status = 0;
309 u8 num_procs = 0;
310 s32 ndx = 0;
311 /* DSP MMU setup table */
312 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
313 struct cfg_hostres *host_res;
314 struct bridge_dev_context *pbridge_context;
315 u32 map_attrs;
316 u32 shm0_end;
317 u32 ul_dyn_ext_base;
318 u32 ul_seg1_size = 0;
319 u32 pa_curr = 0;
320 u32 va_curr = 0;
321 u32 gpp_va_curr = 0;
322 u32 num_bytes = 0;
323 u32 all_bits = 0;
324 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
325 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
328 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
329 if (!pbridge_context) {
330 status = -EFAULT;
331 goto func_end;
334 host_res = pbridge_context->resources;
335 if (!host_res) {
336 status = -EFAULT;
337 goto func_end;
339 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
340 if (!cod_man) {
341 status = -EFAULT;
342 goto func_end;
344 hchnl_mgr = hio_mgr->hchnl_mgr;
345 /* The message manager is destroyed when the board is stopped. */
346 dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
347 hmsg_mgr = hio_mgr->hmsg_mgr;
348 if (!hchnl_mgr || !hmsg_mgr) {
349 status = -EFAULT;
350 goto func_end;
352 if (hio_mgr->shared_mem)
353 hio_mgr->shared_mem = NULL;
355 /* Get start and length of channel part of shared memory */
356 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
357 &ul_shm_base);
358 if (DSP_FAILED(status)) {
359 status = -EFAULT;
360 goto func_end;
362 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
363 &ul_shm_limit);
364 if (DSP_FAILED(status)) {
365 status = -EFAULT;
366 goto func_end;
368 if (ul_shm_limit <= ul_shm_base) {
369 status = -EINVAL;
370 goto func_end;
372 /* Get total length in bytes */
373 ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
374 /* Calculate size of a PROCCOPY shared memory region */
375 dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
376 __func__, (ul_shm_length - sizeof(struct shm)));
378 if (DSP_SUCCEEDED(status)) {
379 /* Get start and length of message part of shared memory */
380 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
381 &ul_msg_base);
383 if (DSP_SUCCEEDED(status)) {
384 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
385 &ul_msg_limit);
386 if (DSP_SUCCEEDED(status)) {
387 if (ul_msg_limit <= ul_msg_base) {
388 status = -EINVAL;
389 } else {
391 * Length (bytes) of messaging part of shared
392 * memory.
394 ul_msg_length =
395 (ul_msg_limit - ul_msg_base +
396 1) * hio_mgr->word_size;
398 * Total length (bytes) of shared memory:
399 * chnl + msg.
401 ul_mem_length = ul_shm_length + ul_msg_length;
403 } else {
404 status = -EFAULT;
406 } else {
407 status = -EFAULT;
409 if (DSP_SUCCEEDED(status)) {
410 #ifndef DSP_TRACEBUF_DISABLED
411 status =
412 cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
413 #else
414 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
415 &shm0_end);
416 #endif
417 if (DSP_FAILED(status))
418 status = -EFAULT;
420 if (DSP_SUCCEEDED(status)) {
421 status =
422 cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
423 if (DSP_FAILED(status))
424 status = -EFAULT;
426 if (DSP_SUCCEEDED(status)) {
427 status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
428 if (DSP_FAILED(status))
429 status = -EFAULT;
431 if (DSP_SUCCEEDED(status)) {
432 /* Get memory reserved in host resources */
433 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
434 &hio_mgr->ext_proc_info,
435 sizeof(struct
436 mgr_processorextinfo),
437 &num_procs);
439 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
440 ndx = 0;
441 ul_gpp_pa = host_res->dw_mem_phys[1];
442 ul_gpp_va = host_res->dw_mem_base[1];
443 /* This is the virtual uncached ioremapped address!!! */
444 /* Why can't we directly take the DSPVA from the symbols? */
445 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
446 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
447 ul_seg1_size =
448 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
449 /* 4K align */
450 ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
451 /* 64K align */
452 ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
453 ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
454 UL_PAGE_ALIGN_SIZE);
455 if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
456 ul_pad_size = 0x0;
458 dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
459 "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
460 "ul_seg_size %x ul_seg1_size %x \n", __func__,
461 ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
462 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
464 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
465 host_res->dw_mem_length[1]) {
466 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
467 __func__, host_res->dw_mem_length[1],
468 ul_seg_size + ul_seg1_size + ul_pad_size);
469 status = -ENOMEM;
472 if (DSP_FAILED(status))
473 goto func_end;
475 pa_curr = ul_gpp_pa;
476 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
477 gpp_va_curr = ul_gpp_va;
478 num_bytes = ul_seg1_size;
481 * Try to fit into TLB entries. If not possible, push them to page
482 * tables. It is quite possible that if sections are not on
483 * bigger page boundary, we may end up making several small pages.
484 * So, push them onto page tables, if that is the case.
486 map_attrs = 0x00000000;
487 map_attrs = DSP_MAPLITTLEENDIAN;
488 map_attrs |= DSP_MAPPHYSICALADDR;
489 map_attrs |= DSP_MAPELEMSIZE32;
490 map_attrs |= DSP_MAPDONOTLOCK;
492 while (num_bytes) {
494 * To find the max. page size with which both PA & VA are
495 * aligned.
497 all_bits = pa_curr | va_curr;
498 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
499 "num_bytes %x\n", all_bits, pa_curr, va_curr,
500 num_bytes);
501 for (i = 0; i < 4; i++) {
502 if ((num_bytes >= page_size[i]) && ((all_bits &
503 (page_size[i] -
504 1)) == 0)) {
505 status =
506 hio_mgr->intf_fxns->
507 pfn_brd_mem_map(hio_mgr->hbridge_context,
508 pa_curr, va_curr,
509 page_size[i], map_attrs,
510 NULL);
511 if (DSP_FAILED(status))
512 goto func_end;
513 pa_curr += page_size[i];
514 va_curr += page_size[i];
515 gpp_va_curr += page_size[i];
516 num_bytes -= page_size[i];
518 * Don't try smaller sizes. Hopefully we have
519 * reached an address aligned to a bigger page
520 * size.
522 break;
526 pa_curr += ul_pad_size;
527 va_curr += ul_pad_size;
528 gpp_va_curr += ul_pad_size;
530 /* Configure the TLB entries for the next cacheable segment */
531 num_bytes = ul_seg_size;
532 va_curr = ul_dsp_va * hio_mgr->word_size;
533 while (num_bytes) {
535 * To find the max. page size with which both PA & VA are
536 * aligned.
538 all_bits = pa_curr | va_curr;
539 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
540 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
541 va_curr, num_bytes);
542 for (i = 0; i < 4; i++) {
543 if (!(num_bytes >= page_size[i]) ||
544 !((all_bits & (page_size[i] - 1)) == 0))
545 continue;
546 if (ndx < MAX_LOCK_TLB_ENTRIES) {
548 * This is the physical address written to
549 * DSP MMU.
551 ae_proc[ndx].ul_gpp_pa = pa_curr;
553 * This is the virtual uncached ioremapped
554 * address!!!
556 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
557 ae_proc[ndx].ul_dsp_va =
558 va_curr / hio_mgr->word_size;
559 ae_proc[ndx].ul_size = page_size[i];
560 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
561 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
562 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
563 dev_dbg(bridge, "shm MMU TLB entry PA %x"
564 " VA %x DSP_VA %x Size %x\n",
565 ae_proc[ndx].ul_gpp_pa,
566 ae_proc[ndx].ul_gpp_va,
567 ae_proc[ndx].ul_dsp_va *
568 hio_mgr->word_size, page_size[i]);
569 ndx++;
570 } else {
571 status =
572 hio_mgr->intf_fxns->
573 pfn_brd_mem_map(hio_mgr->hbridge_context,
574 pa_curr, va_curr,
575 page_size[i], map_attrs,
576 NULL);
577 dev_dbg(bridge,
578 "shm MMU PTE entry PA %x"
579 " VA %x DSP_VA %x Size %x\n",
580 ae_proc[ndx].ul_gpp_pa,
581 ae_proc[ndx].ul_gpp_va,
582 ae_proc[ndx].ul_dsp_va *
583 hio_mgr->word_size, page_size[i]);
584 if (DSP_FAILED(status))
585 goto func_end;
587 pa_curr += page_size[i];
588 va_curr += page_size[i];
589 gpp_va_curr += page_size[i];
590 num_bytes -= page_size[i];
592 * Don't try smaller sizes. Hopefully we have reached
593 * an address aligned to a bigger page size.
595 break;
600 * Copy remaining entries from CDB. All entries are 1 MB and
601 * should not conflict with shm entries on MPU or DSP side.
603 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
604 if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
605 continue;
607 if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
608 ul_gpp_pa - 0x100000
609 && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
610 ul_gpp_pa + ul_seg_size)
611 || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
612 ul_dsp_va - 0x100000 / hio_mgr->word_size
613 && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
614 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
615 dev_dbg(bridge,
616 "CDB MMU entry %d conflicts with "
617 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
618 "GppPa %x, DspVa %x, Bytes %x.\n", i,
619 hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
620 hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
621 ul_gpp_pa, ul_dsp_va, ul_seg_size);
622 status = -EPERM;
623 } else {
624 if (ndx < MAX_LOCK_TLB_ENTRIES) {
625 ae_proc[ndx].ul_dsp_va =
626 hio_mgr->ext_proc_info.ty_tlb[i].
627 ul_dsp_virt;
628 ae_proc[ndx].ul_gpp_pa =
629 hio_mgr->ext_proc_info.ty_tlb[i].
630 ul_gpp_phys;
631 ae_proc[ndx].ul_gpp_va = 0;
632 /* 1 MB */
633 ae_proc[ndx].ul_size = 0x100000;
634 dev_dbg(bridge, "shm MMU entry PA %x "
635 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
636 ae_proc[ndx].ul_dsp_va);
637 ndx++;
638 } else {
639 status = hio_mgr->intf_fxns->pfn_brd_mem_map
640 (hio_mgr->hbridge_context,
641 hio_mgr->ext_proc_info.ty_tlb[i].
642 ul_gpp_phys,
643 hio_mgr->ext_proc_info.ty_tlb[i].
644 ul_dsp_virt, 0x100000, map_attrs,
645 NULL);
648 if (DSP_FAILED(status))
649 goto func_end;
652 map_attrs = 0x00000000;
653 map_attrs = DSP_MAPLITTLEENDIAN;
654 map_attrs |= DSP_MAPPHYSICALADDR;
655 map_attrs |= DSP_MAPELEMSIZE32;
656 map_attrs |= DSP_MAPDONOTLOCK;
658 /* Map the L4 peripherals */
659 i = 0;
660 while (l4_peripheral_table[i].phys_addr) {
661 status = hio_mgr->intf_fxns->pfn_brd_mem_map
662 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
663 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
664 map_attrs, NULL);
665 if (DSP_FAILED(status))
666 goto func_end;
667 i++;
670 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
671 ae_proc[i].ul_dsp_va = 0;
672 ae_proc[i].ul_gpp_pa = 0;
673 ae_proc[i].ul_gpp_va = 0;
674 ae_proc[i].ul_size = 0;
677 * Set the shm physical address entry (grayed out in CDB file)
678 * to the virtual uncached ioremapped address of shm reserved
679 * on MPU.
681 hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
682 (ul_gpp_va + ul_seg1_size + ul_pad_size);
685 * Need shm Phys addr. IO supports only one DSP for now:
686 * num_procs = 1.
688 if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
689 status = -EFAULT;
690 goto func_end;
691 } else {
692 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
693 status = -EPERM;
694 goto func_end;
696 /* ul_shm_base may not be at ul_dsp_va address */
697 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
698 hio_mgr->word_size;
700 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
701 * bridge_brd_start() the MMU will be re-programed with MMU
702 * DSPVa-GPPPa pair info while DSP is in a known
703 * (reset) state.
706 status =
707 hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
708 BRDIOCTL_SETMMUCONFIG,
709 ae_proc);
710 if (DSP_FAILED(status))
711 goto func_end;
712 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
713 ul_shm_base += ul_shm_base_offset;
714 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
715 ul_mem_length);
716 if (ul_shm_base == 0) {
717 status = -EFAULT;
718 goto func_end;
720 /* Register SM */
721 status =
722 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
725 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
726 hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
727 hio_mgr->output = hio_mgr->input + (ul_shm_length -
728 sizeof(struct shm)) / 2;
729 hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
731 /* Set up Shared memory addresses for messaging. */
732 hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
733 + ul_shm_length);
734 hio_mgr->msg_input =
735 (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
736 hio_mgr->msg_output_ctrl =
737 (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
738 ul_msg_length / 2);
739 hio_mgr->msg_output =
740 (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
741 hmsg_mgr->max_msgs =
742 ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
743 / sizeof(struct msg_dspmsg);
744 dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
745 "output %p, msg_input_ctrl %p, msg_input %p, "
746 "msg_output_ctrl %p, msg_output %p\n",
747 (u8 *) hio_mgr->shared_mem, hio_mgr->input,
748 hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
749 hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
750 hio_mgr->msg_output);
751 dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
752 hmsg_mgr->max_msgs);
753 memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
755 #ifndef DSP_TRACEBUF_DISABLED
756 /* Get the start address of trace buffer */
757 status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
758 &hio_mgr->ul_trace_buffer_begin);
759 if (DSP_FAILED(status)) {
760 status = -EFAULT;
761 goto func_end;
764 hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
765 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
766 (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
767 /* Get the end address of trace buffer */
768 status = cod_get_sym_value(cod_man, SYS_PUTCEND,
769 &hio_mgr->ul_trace_buffer_end);
770 if (DSP_FAILED(status)) {
771 status = -EFAULT;
772 goto func_end;
774 hio_mgr->ul_trace_buffer_end =
775 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
776 (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
777 /* Get the current address of DSP write pointer */
778 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
779 &hio_mgr->ul_trace_buffer_current);
780 if (DSP_FAILED(status)) {
781 status = -EFAULT;
782 goto func_end;
784 hio_mgr->ul_trace_buffer_current =
785 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
786 (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
787 /* Calculate the size of trace buffer */
788 kfree(hio_mgr->pmsg);
789 hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
790 hio_mgr->ul_trace_buffer_begin) *
791 hio_mgr->word_size) + 2, GFP_KERNEL);
792 if (!hio_mgr->pmsg)
793 status = -ENOMEM;
795 hio_mgr->ul_dsp_va = ul_dsp_va;
796 hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
798 #endif
799 func_end:
800 return status;
804 * ======== io_buf_size ========
805 * Size of shared memory I/O channel.
807 u32 io_buf_size(struct io_mgr *hio_mgr)
809 if (hio_mgr)
810 return hio_mgr->usm_buf_size;
811 else
812 return 0;
816 * ======== io_cancel_chnl ========
817 * Cancel IO on a given PCPY channel.
819 void io_cancel_chnl(struct io_mgr *hio_mgr, u32 ulChnl)
821 struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
822 struct shm *sm;
824 if (!hio_mgr)
825 goto func_end;
826 sm = hio_mgr->shared_mem;
828 /* Inform DSP that we have no more buffers on this channel */
829 IO_AND_VALUE(pio_mgr->hbridge_context, struct shm, sm, host_free_mask,
830 (~(1 << ulChnl)));
832 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
833 func_end:
834 return;
838 * ======== io_dispatch_chnl ========
839 * Proc-copy chanl dispatch.
841 static void io_dispatch_chnl(IN struct io_mgr *pio_mgr,
842 IN OUT struct chnl_object *pchnl, u8 iMode)
844 if (!pio_mgr)
845 goto func_end;
847 /* See if there is any data available for transfer */
848 if (iMode != IO_SERVICE)
849 goto func_end;
851 /* Any channel will do for this mode */
852 input_chnl(pio_mgr, pchnl, iMode);
853 output_chnl(pio_mgr, pchnl, iMode);
854 func_end:
855 return;
859 * ======== io_dispatch_msg ========
860 * Performs I/O dispatch on message queues.
862 static void io_dispatch_msg(IN struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
864 if (!pio_mgr)
865 goto func_end;
867 /* We are performing both input and output processing. */
868 input_msg(pio_mgr, hmsg_mgr);
869 output_msg(pio_mgr, hmsg_mgr);
870 func_end:
871 return;
875 * ======== io_dispatch_pm ========
876 * Performs I/O dispatch on PM related messages from DSP
878 static void io_dispatch_pm(struct io_mgr *pio_mgr)
880 int status;
881 u32 parg[2];
883 /* Perform Power message processing here */
884 parg[0] = pio_mgr->intr_val;
886 /* Send the command to the Bridge clk/pwr manager to handle */
887 if (parg[0] == MBX_PM_HIBERNATE_EN) {
888 dev_dbg(bridge, "PM: Hibernate command\n");
889 status = pio_mgr->intf_fxns->
890 pfn_dev_cntrl(pio_mgr->hbridge_context,
891 BRDIOCTL_PWR_HIBERNATE, parg);
892 if (DSP_FAILED(status))
893 pr_err("%s: hibernate cmd failed 0x%x\n",
894 __func__, status);
895 } else if (parg[0] == MBX_PM_OPP_REQ) {
896 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
897 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
898 status = pio_mgr->intf_fxns->
899 pfn_dev_cntrl(pio_mgr->hbridge_context,
900 BRDIOCTL_CONSTRAINT_REQUEST, parg);
901 if (DSP_FAILED(status))
902 dev_dbg(bridge, "PM: Failed to set constraint "
903 "= 0x%x \n", parg[1]);
904 } else {
905 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
906 parg[0]);
907 status = pio_mgr->intf_fxns->
908 pfn_dev_cntrl(pio_mgr->hbridge_context,
909 BRDIOCTL_CLK_CTRL, parg);
910 if (DSP_FAILED(status))
911 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
912 "= 0x%x\n", *parg);
917 * ======== io_dpc ========
918 * Deferred procedure call for shared memory channel driver ISR. Carries
919 * out the dispatch of I/O as a non-preemptible event.It can only be
920 * pre-empted by an ISR.
922 void io_dpc(IN OUT unsigned long pRefData)
924 struct io_mgr *pio_mgr = (struct io_mgr *)pRefData;
925 struct chnl_mgr *chnl_mgr_obj;
926 struct msg_mgr *msg_mgr_obj;
927 struct deh_mgr *hdeh_mgr;
928 u32 requested;
929 u32 serviced;
931 if (!pio_mgr)
932 goto func_end;
933 chnl_mgr_obj = pio_mgr->hchnl_mgr;
934 dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
935 dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
936 if (!chnl_mgr_obj)
937 goto func_end;
939 requested = pio_mgr->dpc_req;
940 serviced = pio_mgr->dpc_sched;
942 if (serviced == requested)
943 goto func_end;
945 /* Process pending DPC's */
946 do {
947 /* Check value of interrupt reg to ensure it's a valid error */
948 if ((pio_mgr->intr_val > DEH_BASE) &&
949 (pio_mgr->intr_val < DEH_LIMIT)) {
950 /* Notify DSP/BIOS exception */
951 if (hdeh_mgr) {
952 #ifndef DSP_TRACE_BUF_DISABLED
953 print_dsp_debug_trace(pio_mgr);
954 #endif
955 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
956 pio_mgr->intr_val);
959 io_dispatch_chnl(pio_mgr, NULL, IO_SERVICE);
960 #ifdef CHNL_MESSAGES
961 if (msg_mgr_obj)
962 io_dispatch_msg(pio_mgr, msg_mgr_obj);
963 #endif
964 #ifndef DSP_TRACEBUF_DISABLED
965 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
966 /* Notify DSP Trace message */
967 print_dsp_debug_trace(pio_mgr);
969 #endif
970 serviced++;
971 } while (serviced != requested);
972 pio_mgr->dpc_sched = requested;
973 func_end:
974 return;
978 * ======== io_mbox_msg ========
979 * Main interrupt handler for the shared memory IO manager.
980 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
981 * schedules a DPC to dispatch I/O.
983 void io_mbox_msg(u32 msg)
985 struct io_mgr *pio_mgr;
986 struct dev_object *dev_obj;
987 unsigned long flags;
989 dev_obj = dev_get_first();
990 dev_get_io_mgr(dev_obj, &pio_mgr);
992 if (!pio_mgr)
993 return;
995 pio_mgr->intr_val = (u16)msg;
996 if (pio_mgr->intr_val & MBX_PM_CLASS)
997 io_dispatch_pm(pio_mgr);
999 if (pio_mgr->intr_val == MBX_DEH_RESET) {
1000 pio_mgr->intr_val = 0;
1001 } else {
1002 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
1003 pio_mgr->dpc_req++;
1004 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
1005 tasklet_schedule(&pio_mgr->dpc_tasklet);
1007 return;
1011 * ======== io_request_chnl ========
1012 * Purpose:
1013 * Request chanenel I/O from the DSP. Sets flags in shared memory, then
1014 * interrupts the DSP.
1016 void io_request_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1017 u8 iMode, OUT u16 *pwMbVal)
1019 struct chnl_mgr *chnl_mgr_obj;
1020 struct shm *sm;
1022 if (!pchnl || !pwMbVal)
1023 goto func_end;
1024 chnl_mgr_obj = pio_mgr->hchnl_mgr;
1025 sm = pio_mgr->shared_mem;
1026 if (iMode == IO_INPUT) {
1028 * Assertion fires if CHNL_AddIOReq() called on a stream
1029 * which was cancelled, or attached to a dead board.
1031 DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
1032 (pchnl->dw_state == CHNL_STATEEOS));
1033 /* Indicate to the DSP we have a buffer available for input */
1034 IO_OR_VALUE(pio_mgr->hbridge_context, struct shm, sm,
1035 host_free_mask, (1 << pchnl->chnl_id));
1036 *pwMbVal = MBX_PCPY_CLASS;
1037 } else if (iMode == IO_OUTPUT) {
1039 * This assertion fails if CHNL_AddIOReq() was called on a
1040 * stream which was cancelled, or attached to a dead board.
1042 DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
1043 CHNL_STATEREADY);
1045 * Record the fact that we have a buffer available for
1046 * output.
1048 chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
1049 } else {
1050 DBC_ASSERT(iMode); /* Shouldn't get here. */
1052 func_end:
1053 return;
1057 * ======== iosm_schedule ========
1058 * Schedule DPC for IO.
1060 void iosm_schedule(struct io_mgr *pio_mgr)
1062 unsigned long flags;
1064 if (!pio_mgr)
1065 return;
1067 /* Increment count of DPC's pending. */
1068 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
1069 pio_mgr->dpc_req++;
1070 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
1072 /* Schedule DPC */
1073 tasklet_schedule(&pio_mgr->dpc_tasklet);
1077 * ======== find_ready_output ========
1078 * Search for a host output channel which is ready to send. If this is
1079 * called as a result of servicing the DPC, then implement a round
1080 * robin search; otherwise, this was called by a client thread (via
1081 * IO_Dispatch()), so just start searching from the current channel id.
1083 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1084 struct chnl_object *pchnl, u32 dwMask)
1086 u32 ret = OUTPUTNOTREADY;
1087 u32 id, start_id;
1088 u32 shift;
1090 id = (pchnl !=
1091 NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
1092 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1093 if (id >= CHNL_MAXCHANNELS)
1094 goto func_end;
1095 if (dwMask) {
1096 shift = (1 << id);
1097 start_id = id;
1098 do {
1099 if (dwMask & shift) {
1100 ret = id;
1101 if (pchnl == NULL)
1102 chnl_mgr_obj->dw_last_output = id;
1103 break;
1105 id = id + 1;
1106 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1107 shift = (1 << id);
1108 } while (id != start_id);
1110 func_end:
1111 return ret;
1115 * ======== input_chnl ========
1116 * Dispatch a buffer on an input channel.
1118 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1119 u8 iMode)
1121 struct chnl_mgr *chnl_mgr_obj;
1122 struct shm *sm;
1123 u32 chnl_id;
1124 u32 bytes;
1125 struct chnl_irp *chnl_packet_obj = NULL;
1126 u32 dw_arg;
1127 bool clear_chnl = false;
1128 bool notify_client = false;
1130 sm = pio_mgr->shared_mem;
1131 chnl_mgr_obj = pio_mgr->hchnl_mgr;
1133 /* Attempt to perform input */
1134 if (!IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm, input_full))
1135 goto func_end;
1137 bytes =
1138 IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm,
1139 input_size) * chnl_mgr_obj->word_size;
1140 chnl_id = IO_GET_VALUE(pio_mgr->hbridge_context, struct shm,
1141 sm, input_id);
1142 dw_arg = IO_GET_LONG(pio_mgr->hbridge_context, struct shm, sm, arg);
1143 if (chnl_id >= CHNL_MAXCHANNELS) {
1144 /* Shouldn't be here: would indicate corrupted shm. */
1145 DBC_ASSERT(chnl_id);
1146 goto func_end;
1148 pchnl = chnl_mgr_obj->ap_channel[chnl_id];
1149 if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1150 if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1151 if (!pchnl->pio_requests)
1152 goto func_end;
1153 /* Get the I/O request, and attempt a transfer */
1154 chnl_packet_obj = (struct chnl_irp *)
1155 lst_get_head(pchnl->pio_requests);
1156 if (chnl_packet_obj) {
1157 pchnl->cio_reqs--;
1158 if (pchnl->cio_reqs < 0)
1159 goto func_end;
1161 * Ensure we don't overflow the client's
1162 * buffer.
1164 bytes = min(bytes, chnl_packet_obj->byte_size);
1165 /* Transfer buffer from DSP side */
1166 bytes = read_data(pio_mgr->hbridge_context,
1167 chnl_packet_obj->host_sys_buf,
1168 pio_mgr->input, bytes);
1169 pchnl->bytes_moved += bytes;
1170 chnl_packet_obj->byte_size = bytes;
1171 chnl_packet_obj->dw_arg = dw_arg;
1172 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1174 if (bytes == 0) {
1176 * This assertion fails if the DSP
1177 * sends EOS more than once on this
1178 * channel.
1180 if (pchnl->dw_state & CHNL_STATEEOS)
1181 goto func_end;
1183 * Zero bytes indicates EOS. Update
1184 * IOC status for this chirp, and also
1185 * the channel state.
1187 chnl_packet_obj->status |=
1188 CHNL_IOCSTATEOS;
1189 pchnl->dw_state |= CHNL_STATEEOS;
1191 * Notify that end of stream has
1192 * occurred.
1194 ntfy_notify(pchnl->ntfy_obj,
1195 DSP_STREAMDONE);
1197 /* Tell DSP if no more I/O buffers available */
1198 if (!pchnl->pio_requests)
1199 goto func_end;
1200 if (LST_IS_EMPTY(pchnl->pio_requests)) {
1201 IO_AND_VALUE(pio_mgr->hbridge_context,
1202 struct shm, sm,
1203 host_free_mask,
1204 ~(1 << pchnl->chnl_id));
1206 clear_chnl = true;
1207 notify_client = true;
1208 } else {
1210 * Input full for this channel, but we have no
1211 * buffers available. The channel must be
1212 * "idling". Clear out the physical input
1213 * channel.
1215 clear_chnl = true;
1217 } else {
1218 /* Input channel cancelled: clear input channel */
1219 clear_chnl = true;
1221 } else {
1222 /* DPC fired after host closed channel: clear input channel */
1223 clear_chnl = true;
1225 if (clear_chnl) {
1226 /* Indicate to the DSP we have read the input */
1227 IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm,
1228 input_full, 0);
1229 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1231 if (notify_client) {
1232 /* Notify client with IO completion record */
1233 notify_chnl_complete(pchnl, chnl_packet_obj);
1235 func_end:
1236 return;
1240 * ======== input_msg ========
1241 * Copies messages from shared memory to the message queues.
1243 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1245 u32 num_msgs;
1246 u32 i;
1247 u8 *msg_input;
1248 struct msg_queue *msg_queue_obj;
1249 struct msg_frame *pmsg;
1250 struct msg_dspmsg msg;
1251 struct msg_ctrl *msg_ctr_obj;
1252 u32 input_empty;
1253 u32 addr;
1255 msg_ctr_obj = pio_mgr->msg_input_ctrl;
1256 /* Get the number of input messages to be read */
1257 input_empty =
1258 IO_GET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl, msg_ctr_obj,
1259 buf_empty);
1260 num_msgs =
1261 IO_GET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl, msg_ctr_obj,
1262 size);
1263 if (input_empty)
1264 goto func_end;
1266 msg_input = pio_mgr->msg_input;
1267 for (i = 0; i < num_msgs; i++) {
1268 /* Read the next message */
1269 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
1270 msg.msg.dw_cmd =
1271 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1272 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
1273 msg.msg.dw_arg1 =
1274 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1275 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
1276 msg.msg.dw_arg2 =
1277 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1278 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1279 msg.msgq_id =
1280 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1281 msg_input += sizeof(struct msg_dspmsg);
1282 if (!hmsg_mgr->queue_list)
1283 goto func_end;
1285 /* Determine which queue to put the message in */
1286 msg_queue_obj =
1287 (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
1288 dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
1289 "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
1290 msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
1292 * Interrupt may occur before shared memory and message
1293 * input locations have been set up. If all nodes were
1294 * cleaned up, hmsg_mgr->max_msgs should be 0.
1296 while (msg_queue_obj != NULL) {
1297 if (msg.msgq_id == msg_queue_obj->msgq_id) {
1298 /* Found it */
1299 if (msg.msg.dw_cmd == RMS_EXITACK) {
1301 * Call the node exit notification.
1302 * The exit message does not get
1303 * queued.
1305 (*hmsg_mgr->on_exit) ((void *)
1306 msg_queue_obj->arg,
1307 msg.msg.dw_arg1);
1308 } else {
1310 * Not an exit acknowledgement, queue
1311 * the message.
1313 if (!msg_queue_obj->msg_free_list)
1314 goto func_end;
1315 pmsg = (struct msg_frame *)lst_get_head
1316 (msg_queue_obj->msg_free_list);
1317 if (msg_queue_obj->msg_used_list
1318 && pmsg) {
1319 pmsg->msg_data = msg;
1320 lst_put_tail
1321 (msg_queue_obj->msg_used_list,
1322 (struct list_head *)pmsg);
1323 ntfy_notify
1324 (msg_queue_obj->ntfy_obj,
1325 DSP_NODEMESSAGEREADY);
1326 sync_set_event
1327 (msg_queue_obj->sync_event);
1328 } else {
1330 * No free frame to copy the
1331 * message into.
1333 pr_err("%s: no free msg frames,"
1334 " discarding msg\n",
1335 __func__);
1338 break;
1341 if (!hmsg_mgr->queue_list || !msg_queue_obj)
1342 goto func_end;
1343 msg_queue_obj =
1344 (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
1345 (struct list_head *)
1346 msg_queue_obj);
1349 /* Set the post SWI flag */
1350 if (num_msgs > 0) {
1351 /* Tell the DSP we've read the messages */
1352 IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
1353 msg_ctr_obj, buf_empty, true);
1354 IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
1355 msg_ctr_obj, post_swi, true);
1356 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1358 func_end:
1359 return;
1363 * ======== notify_chnl_complete ========
1364 * Purpose:
1365 * Signal the channel event, notifying the client that I/O has completed.
1367 static void notify_chnl_complete(struct chnl_object *pchnl,
1368 struct chnl_irp *chnl_packet_obj)
1370 bool signal_event;
1372 if (!pchnl || !pchnl->sync_event ||
1373 !pchnl->pio_completions || !chnl_packet_obj)
1374 goto func_end;
1377 * Note: we signal the channel event only if the queue of IO
1378 * completions is empty. If it is not empty, the event is sure to be
1379 * signalled by the only IO completion list consumer:
1380 * bridge_chnl_get_ioc().
1382 signal_event = LST_IS_EMPTY(pchnl->pio_completions);
1383 /* Enqueue the IO completion info for the client */
1384 lst_put_tail(pchnl->pio_completions,
1385 (struct list_head *)chnl_packet_obj);
1386 pchnl->cio_cs++;
1388 if (pchnl->cio_cs > pchnl->chnl_packets)
1389 goto func_end;
1390 /* Signal the channel event (if not already set) that IO is complete */
1391 if (signal_event)
1392 sync_set_event(pchnl->sync_event);
1394 /* Notify that IO is complete */
1395 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1396 func_end:
1397 return;
1401 * ======== output_chnl ========
1402 * Purpose:
1403 * Dispatch a buffer on an output channel.
1405 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1406 u8 iMode)
1408 struct chnl_mgr *chnl_mgr_obj;
1409 struct shm *sm;
1410 u32 chnl_id;
1411 struct chnl_irp *chnl_packet_obj;
1412 u32 dw_dsp_f_mask;
1414 chnl_mgr_obj = pio_mgr->hchnl_mgr;
1415 sm = pio_mgr->shared_mem;
1416 /* Attempt to perform output */
1417 if (IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_full))
1418 goto func_end;
1420 if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1421 goto func_end;
1423 /* Look to see if both a PC and DSP output channel are ready */
1424 dw_dsp_f_mask = IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm,
1425 dsp_free_mask);
1426 chnl_id =
1427 find_ready_output(chnl_mgr_obj, pchnl,
1428 (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
1429 if (chnl_id == OUTPUTNOTREADY)
1430 goto func_end;
1432 pchnl = chnl_mgr_obj->ap_channel[chnl_id];
1433 if (!pchnl || !pchnl->pio_requests) {
1434 /* Shouldn't get here */
1435 goto func_end;
1437 /* Get the I/O request, and attempt a transfer */
1438 chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
1439 if (!chnl_packet_obj)
1440 goto func_end;
1442 pchnl->cio_reqs--;
1443 if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
1444 goto func_end;
1446 /* Record fact that no more I/O buffers available */
1447 if (LST_IS_EMPTY(pchnl->pio_requests))
1448 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
1450 /* Transfer buffer to DSP side */
1451 chnl_packet_obj->byte_size =
1452 write_data(pio_mgr->hbridge_context, pio_mgr->output,
1453 chnl_packet_obj->host_sys_buf, min(pio_mgr->usm_buf_size,
1454 chnl_packet_obj->byte_size));
1455 pchnl->bytes_moved += chnl_packet_obj->byte_size;
1456 /* Write all 32 bits of arg */
1457 IO_SET_LONG(pio_mgr->hbridge_context, struct shm, sm, arg,
1458 chnl_packet_obj->dw_arg);
1459 #if _CHNL_WORDSIZE == 2
1460 IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_id,
1461 (u16) chnl_id);
1462 IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_size,
1463 (u16) (chnl_packet_obj->byte_size +
1464 (chnl_mgr_obj->word_size -
1465 1)) / (u16) chnl_mgr_obj->word_size);
1466 #else
1467 IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_id,
1468 chnl_id);
1469 IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_size,
1470 (chnl_packet_obj->byte_size +
1471 (chnl_mgr_obj->word_size - 1)) / chnl_mgr_obj->word_size);
1472 #endif
1473 IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_full, 1);
1474 /* Indicate to the DSP we have written the output */
1475 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1476 /* Notify client with IO completion record (keep EOS) */
1477 chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1478 notify_chnl_complete(pchnl, chnl_packet_obj);
1479 /* Notify if stream is done. */
1480 if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1481 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1483 func_end:
1484 return;
1488 * ======== output_msg ========
1489 * Copies messages from the message queues to the shared memory.
1491 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1493 u32 num_msgs = 0;
1494 u32 i;
1495 u8 *msg_output;
1496 struct msg_frame *pmsg;
1497 struct msg_ctrl *msg_ctr_obj;
1498 u32 output_empty;
1499 u32 val;
1500 u32 addr;
1502 msg_ctr_obj = pio_mgr->msg_output_ctrl;
1504 /* Check if output has been cleared */
1505 output_empty =
1506 IO_GET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl, msg_ctr_obj,
1507 buf_empty);
1508 if (output_empty) {
1509 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1510 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1511 msg_output = pio_mgr->msg_output;
1512 /* Copy num_msgs messages into shared memory */
1513 for (i = 0; i < num_msgs; i++) {
1514 if (!hmsg_mgr->msg_used_list) {
1515 pmsg = NULL;
1516 goto func_end;
1517 } else {
1518 pmsg = (struct msg_frame *)
1519 lst_get_head(hmsg_mgr->msg_used_list);
1521 if (pmsg != NULL) {
1522 val = (pmsg->msg_data).msgq_id;
1523 addr = (u32) &(((struct msg_dspmsg *)
1524 msg_output)->msgq_id);
1525 write_ext32_bit_dsp_data(
1526 pio_mgr->hbridge_context, addr, val);
1527 val = (pmsg->msg_data).msg.dw_cmd;
1528 addr = (u32) &((((struct msg_dspmsg *)
1529 msg_output)->msg).dw_cmd);
1530 write_ext32_bit_dsp_data(
1531 pio_mgr->hbridge_context, addr, val);
1532 val = (pmsg->msg_data).msg.dw_arg1;
1533 addr = (u32) &((((struct msg_dspmsg *)
1534 msg_output)->msg).dw_arg1);
1535 write_ext32_bit_dsp_data(
1536 pio_mgr->hbridge_context, addr, val);
1537 val = (pmsg->msg_data).msg.dw_arg2;
1538 addr = (u32) &((((struct msg_dspmsg *)
1539 msg_output)->msg).dw_arg2);
1540 write_ext32_bit_dsp_data(
1541 pio_mgr->hbridge_context, addr, val);
1542 msg_output += sizeof(struct msg_dspmsg);
1543 if (!hmsg_mgr->msg_free_list)
1544 goto func_end;
1545 lst_put_tail(hmsg_mgr->msg_free_list,
1546 (struct list_head *)pmsg);
1547 sync_set_event(hmsg_mgr->sync_event);
1551 if (num_msgs > 0) {
1552 hmsg_mgr->msgs_pending -= num_msgs;
1553 #if _CHNL_WORDSIZE == 2
1554 IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
1555 msg_ctr_obj, size, (u16) num_msgs);
1556 #else
1557 IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
1558 msg_ctr_obj, size, num_msgs);
1559 #endif
1560 IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
1561 msg_ctr_obj, buf_empty, false);
1562 /* Set the post SWI flag */
1563 IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
1564 msg_ctr_obj, post_swi, true);
1565 /* Tell the DSP we have written the output. */
1566 sm_interrupt_dsp(pio_mgr->hbridge_context,
1567 MBX_PCPY_CLASS);
1570 func_end:
1571 return;
1575 * ======== register_shm_segs ========
1576 * purpose:
1577 * Registers GPP SM segment with CMM.
1579 static int register_shm_segs(struct io_mgr *hio_mgr,
1580 struct cod_manager *cod_man,
1581 u32 dw_gpp_base_pa)
1583 int status = 0;
1584 u32 ul_shm0_base = 0;
1585 u32 shm0_end = 0;
1586 u32 ul_shm0_rsrvd_start = 0;
1587 u32 ul_rsrvd_size = 0;
1588 u32 ul_gpp_phys;
1589 u32 ul_dsp_virt;
1590 u32 ul_shm_seg_id0 = 0;
1591 u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1594 * Read address and size info for first SM region.
1595 * Get start of 1st SM Heap region.
1597 status =
1598 cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1599 if (ul_shm0_base == 0) {
1600 status = -EPERM;
1601 goto func_end;
1603 /* Get end of 1st SM Heap region */
1604 if (DSP_SUCCEEDED(status)) {
1605 /* Get start and length of message part of shared memory */
1606 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1607 &shm0_end);
1608 if (shm0_end == 0) {
1609 status = -EPERM;
1610 goto func_end;
1613 /* Start of Gpp reserved region */
1614 if (DSP_SUCCEEDED(status)) {
1615 /* Get start and length of message part of shared memory */
1616 status =
1617 cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1618 &ul_shm0_rsrvd_start);
1619 if (ul_shm0_rsrvd_start == 0) {
1620 status = -EPERM;
1621 goto func_end;
1624 /* Register with CMM */
1625 if (DSP_SUCCEEDED(status)) {
1626 status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
1627 if (DSP_SUCCEEDED(status)) {
1628 status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
1629 CMM_ALLSEGMENTS);
1632 /* Register new SM region(s) */
1633 if (DSP_SUCCEEDED(status) && (shm0_end - ul_shm0_base) > 0) {
1634 /* Calc size (bytes) of SM the GPP can alloc from */
1635 ul_rsrvd_size =
1636 (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1637 if (ul_rsrvd_size <= 0) {
1638 status = -EPERM;
1639 goto func_end;
1641 /* Calc size of SM DSP can alloc from */
1642 ul_dsp_size =
1643 (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1644 if (ul_dsp_size <= 0) {
1645 status = -EPERM;
1646 goto func_end;
1648 /* First TLB entry reserved for Bridge SM use. */
1649 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
1650 /* Get size in bytes */
1651 ul_dsp_virt =
1652 hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
1653 hio_mgr->word_size;
1655 * Calc byte offset used to convert GPP phys <-> DSP byte
1656 * address.
1658 if (dw_gpp_base_pa > ul_dsp_virt)
1659 dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1660 else
1661 dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1663 if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1664 status = -EPERM;
1665 goto func_end;
1668 * Calc Gpp phys base of SM region.
1669 * This is actually uncached kernel virtual address.
1671 dw_gpp_base_va =
1672 ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1673 ul_dsp_virt;
1675 * Calc Gpp phys base of SM region.
1676 * This is the physical address.
1678 dw_gpp_base_pa =
1679 dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1680 ul_dsp_virt;
1681 /* Register SM Segment 0. */
1682 status =
1683 cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
1684 ul_rsrvd_size, dw_offset,
1685 (dw_gpp_base_pa >
1686 ul_dsp_virt) ? CMM_ADDTODSPPA :
1687 CMM_SUBFROMDSPPA,
1688 (u32) (ul_shm0_base *
1689 hio_mgr->word_size),
1690 ul_dsp_size, &ul_shm_seg_id0,
1691 dw_gpp_base_va);
1692 /* First SM region is seg_id = 1 */
1693 if (ul_shm_seg_id0 != 1)
1694 status = -EPERM;
1696 func_end:
1697 return status;
1701 * ======== read_data ========
1702 * Copies buffers from the shared memory to the host buffer.
1704 static u32 read_data(struct bridge_dev_context *hDevContext, void *dest,
1705 void *pSrc, u32 usize)
1707 memcpy(dest, pSrc, usize);
1708 return usize;
1712 * ======== write_data ========
1713 * Copies buffers from the host side buffer to the shared memory.
1715 static u32 write_data(struct bridge_dev_context *hDevContext, void *dest,
1716 void *pSrc, u32 usize)
1718 memcpy(dest, pSrc, usize);
1719 return usize;
1722 /* ZCPY IO routines. */
1723 void io_intr_dsp2(IN struct io_mgr *pio_mgr, IN u16 mb_val)
1725 sm_interrupt_dsp(pio_mgr->hbridge_context, mb_val);
1729 * ======== IO_SHMcontrol ========
1730 * Sets the requested shm setting.
1732 int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1734 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1735 u32 i;
1736 struct dspbridge_platform_data *pdata =
1737 omap_dspbridge_dev->dev.platform_data;
1739 switch (desc) {
1740 case SHM_CURROPP:
1741 /* Update the shared memory with requested OPP information */
1742 if (pargs != NULL)
1743 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1744 *(u32 *) pargs;
1745 else
1746 return -EPERM;
1747 break;
1748 case SHM_OPPINFO:
1750 * Update the shared memory with the voltage, frequency,
1751 * min and max frequency values for an OPP.
1753 for (i = 0; i <= dsp_max_opps; i++) {
1754 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1755 voltage = vdd1_dsp_freq[i][0];
1756 dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1757 vdd1_dsp_freq[i][0]);
1758 hio_mgr->shared_mem->opp_table_struct.
1759 opp_point[i].frequency = vdd1_dsp_freq[i][1];
1760 dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1761 vdd1_dsp_freq[i][1]);
1762 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1763 min_freq = vdd1_dsp_freq[i][2];
1764 dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1765 vdd1_dsp_freq[i][2]);
1766 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1767 max_freq = vdd1_dsp_freq[i][3];
1768 dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1769 vdd1_dsp_freq[i][3]);
1771 hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1772 dsp_max_opps;
1773 dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1774 /* Update the current OPP number */
1775 if (pdata->dsp_get_opp)
1776 i = (*pdata->dsp_get_opp) ();
1777 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1778 dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1779 break;
1780 case SHM_GETOPP:
1781 /* Get the OPP that DSP has requested */
1782 *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1783 break;
1784 default:
1785 break;
1787 #endif
1788 return 0;
1792 * ======== bridge_io_get_proc_load ========
1793 * Gets the Processor's Load information
1795 int bridge_io_get_proc_load(IN struct io_mgr *hio_mgr,
1796 OUT struct dsp_procloadstat *pProcStat)
1798 pProcStat->curr_load = hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1799 pProcStat->predicted_load =
1800 hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1801 pProcStat->curr_dsp_freq =
1802 hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1803 pProcStat->predicted_freq =
1804 hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1806 dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1807 "Pred Freq = %d\n", pProcStat->curr_load,
1808 pProcStat->predicted_load, pProcStat->curr_dsp_freq,
1809 pProcStat->predicted_freq);
1810 return 0;
1813 #ifndef DSP_TRACEBUF_DISABLED
1814 void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1816 u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1818 while (true) {
1819 /* Get the DSP current pointer */
1820 ul_gpp_cur_pointer =
1821 *(u32 *) (hio_mgr->ul_trace_buffer_current);
1822 ul_gpp_cur_pointer =
1823 hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
1824 hio_mgr->ul_dsp_va);
1826 /* No new debug messages available yet */
1827 if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
1828 break;
1829 } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
1830 /* Continuous data */
1831 ul_new_message_length =
1832 ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
1834 memcpy(hio_mgr->pmsg,
1835 (char *)hio_mgr->ul_gpp_read_pointer,
1836 ul_new_message_length);
1837 hio_mgr->pmsg[ul_new_message_length] = '\0';
1839 * Advance the GPP trace pointer to DSP current
1840 * pointer.
1842 hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
1843 /* Print the trace messages */
1844 pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
1845 } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
1846 /* Handle trace buffer wraparound */
1847 memcpy(hio_mgr->pmsg,
1848 (char *)hio_mgr->ul_gpp_read_pointer,
1849 hio_mgr->ul_trace_buffer_end -
1850 hio_mgr->ul_gpp_read_pointer);
1851 ul_new_message_length =
1852 ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
1853 memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
1854 hio_mgr->ul_gpp_read_pointer],
1855 (char *)hio_mgr->ul_trace_buffer_begin,
1856 ul_new_message_length);
1857 hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
1858 hio_mgr->ul_gpp_read_pointer +
1859 ul_new_message_length] = '\0';
1861 * Advance the GPP trace pointer to DSP current
1862 * pointer.
1864 hio_mgr->ul_gpp_read_pointer =
1865 hio_mgr->ul_trace_buffer_begin +
1866 ul_new_message_length;
1867 /* Print the trace messages */
1868 pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
1872 #endif
1875 * ======== print_dsp_trace_buffer ========
1876 * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1877 * Parameters:
1878 * hdeh_mgr: Handle to DEH manager object
1879 * number of extra carriage returns to generate.
1880 * Returns:
1881 * 0: Success.
1882 * -ENOMEM: Unable to allocate memory.
1883 * Requires:
1884 * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1886 int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1888 int status = 0;
1889 struct cod_manager *cod_mgr;
1890 u32 ul_trace_end;
1891 u32 ul_trace_begin;
1892 u32 trace_cur_pos;
1893 u32 ul_num_bytes = 0;
1894 u32 ul_num_words = 0;
1895 u32 ul_word_size = 2;
1896 char *psz_buf;
1897 char *str_beg;
1898 char *trace_end;
1899 char *buf_end;
1900 char *new_line;
1902 struct bridge_dev_context *pbridge_context = hbridge_context;
1903 struct bridge_drv_interface *intf_fxns;
1904 struct dev_object *dev_obj = (struct dev_object *)
1905 pbridge_context->hdev_obj;
1907 status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1909 if (cod_mgr) {
1910 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1911 status =
1912 cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1913 } else {
1914 status = -EFAULT;
1916 if (DSP_SUCCEEDED(status))
1917 status =
1918 cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1920 if (DSP_SUCCEEDED(status))
1921 /* trace_cur_pos will hold the address of a DSP pointer */
1922 status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1923 &trace_cur_pos);
1925 if (DSP_FAILED(status))
1926 goto func_end;
1928 ul_num_bytes = (ul_trace_end - ul_trace_begin);
1930 ul_num_words = ul_num_bytes * ul_word_size;
1931 status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1933 if (DSP_FAILED(status))
1934 goto func_end;
1936 psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1937 if (psz_buf != NULL) {
1938 /* Read trace buffer data */
1939 status = (*intf_fxns->pfn_brd_read)(pbridge_context,
1940 (u8 *)psz_buf, (u32)ul_trace_begin,
1941 ul_num_bytes, 0);
1943 if (DSP_FAILED(status))
1944 goto func_end;
1946 /* Pack and do newline conversion */
1947 pr_debug("PrintDspTraceBuffer: "
1948 "before pack and unpack.\n");
1949 pr_debug("%s: DSP Trace Buffer Begin:\n"
1950 "=======================\n%s\n",
1951 __func__, psz_buf);
1953 /* Read the value at the DSP address in trace_cur_pos. */
1954 status = (*intf_fxns->pfn_brd_read)(pbridge_context,
1955 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1956 4, 0);
1957 if (DSP_FAILED(status))
1958 goto func_end;
1959 /* Pack and do newline conversion */
1960 pr_info("DSP Trace Buffer Begin:\n"
1961 "=======================\n%s\n",
1962 psz_buf);
1965 /* convert to offset */
1966 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1968 if (ul_num_bytes) {
1970 * The buffer is not full, find the end of the
1971 * data -- buf_end will be >= pszBuf after
1972 * while.
1974 buf_end = &psz_buf[ul_num_bytes+1];
1975 /* DSP print position */
1976 trace_end = &psz_buf[trace_cur_pos];
1979 * Search buffer for a new_line and replace it
1980 * with '\0', then print as string.
1981 * Continue until end of buffer is reached.
1983 str_beg = trace_end;
1984 ul_num_bytes = buf_end - str_beg;
1986 while (str_beg < buf_end) {
1987 new_line = strnchr(str_beg, ul_num_bytes,
1988 '\n');
1989 if (new_line && new_line < buf_end) {
1990 *new_line = 0;
1991 pr_debug("%s\n", str_beg);
1992 str_beg = ++new_line;
1993 ul_num_bytes = buf_end - str_beg;
1994 } else {
1996 * Assume buffer empty if it contains
1997 * a zero
1999 if (*str_beg != '\0') {
2000 str_beg[ul_num_bytes] = 0;
2001 pr_debug("%s\n", str_beg);
2003 str_beg = buf_end;
2004 ul_num_bytes = 0;
2008 * Search buffer for a nNewLine and replace it
2009 * with '\0', then print as string.
2010 * Continue until buffer is exhausted.
2012 str_beg = psz_buf;
2013 ul_num_bytes = trace_end - str_beg;
2015 while (str_beg < trace_end) {
2016 new_line = strnchr(str_beg, ul_num_bytes, '\n');
2017 if (new_line != NULL && new_line < trace_end) {
2018 *new_line = 0;
2019 pr_debug("%s\n", str_beg);
2020 str_beg = ++new_line;
2021 ul_num_bytes = trace_end - str_beg;
2022 } else {
2024 * Assume buffer empty if it contains
2025 * a zero
2027 if (*str_beg != '\0') {
2028 str_beg[ul_num_bytes] = 0;
2029 pr_debug("%s\n", str_beg);
2031 str_beg = trace_end;
2032 ul_num_bytes = 0;
2036 pr_info("\n=======================\n"
2037 "DSP Trace Buffer End:\n");
2038 kfree(psz_buf);
2039 } else {
2040 status = -ENOMEM;
2042 func_end:
2043 if (DSP_FAILED(status))
2044 dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
2045 return status;
2048 void io_sm_init(void)
2050 /* Do nothing */
2053 * dump_dsp_stack() - This function dumps the data on the DSP stack.
2054 * @bridge_context: Bridge driver's device context pointer.
2057 int dump_dsp_stack(struct bridge_dev_context *bridge_context)
2059 int status = 0;
2060 struct cod_manager *code_mgr;
2061 struct node_mgr *node_mgr;
2062 u32 trace_begin;
2063 char name[256];
2064 struct {
2065 u32 head[2];
2066 u32 size;
2067 } mmu_fault_dbg_info;
2068 u32 *buffer;
2069 u32 *buffer_beg;
2070 u32 *buffer_end;
2071 u32 exc_type;
2072 u32 dyn_ext_base;
2073 u32 i;
2074 u32 offset_output;
2075 u32 total_size;
2076 u32 poll_cnt;
2077 const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
2078 "IRP", "NRP", "AMR", "SSR",
2079 "ILC", "RILC", "IER", "CSR"};
2080 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
2081 struct bridge_drv_interface *intf_fxns;
2082 struct dev_object *dev_object = bridge_context->hdev_obj;
2084 status = dev_get_cod_mgr(dev_object, &code_mgr);
2085 if (!code_mgr) {
2086 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2087 status = -EFAULT;
2090 if (DSP_SUCCEEDED(status)) {
2091 status = dev_get_node_manager(dev_object, &node_mgr);
2092 if (!node_mgr) {
2093 pr_debug("%s: Failed on dev_get_node_manager.\n",
2094 __func__);
2095 status = -EFAULT;
2099 if (DSP_SUCCEEDED(status)) {
2100 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
2101 status =
2102 cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
2103 pr_debug("%s: trace_begin Value 0x%x\n",
2104 __func__, trace_begin);
2105 if (DSP_FAILED(status))
2106 pr_debug("%s: Failed on cod_get_sym_value.\n",
2107 __func__);
2109 if (DSP_SUCCEEDED(status))
2110 status = dev_get_intf_fxns(dev_object, &intf_fxns);
2112 * Check for the "magic number" in the trace buffer. If it has
2113 * yet to appear then poll the trace buffer to wait for it. Its
2114 * appearance signals that the DSP has finished dumping its state.
2116 mmu_fault_dbg_info.head[0] = 0;
2117 mmu_fault_dbg_info.head[1] = 0;
2118 if (DSP_SUCCEEDED(status)) {
2119 poll_cnt = 0;
2120 while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
2121 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
2122 poll_cnt < POLL_MAX) {
2124 /* Read DSP dump size from the DSP trace buffer... */
2125 status = (*intf_fxns->pfn_brd_read)(bridge_context,
2126 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
2127 sizeof(mmu_fault_dbg_info), 0);
2129 if (DSP_FAILED(status))
2130 break;
2132 poll_cnt++;
2135 if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
2136 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
2137 status = -ETIME;
2138 pr_err("%s:No DSP MMU-Fault information available.\n",
2139 __func__);
2143 if (DSP_SUCCEEDED(status)) {
2144 total_size = mmu_fault_dbg_info.size;
2145 /* Limit the size in case DSP went crazy */
2146 if (total_size > MAX_MMU_DBGBUFF)
2147 total_size = MAX_MMU_DBGBUFF;
2149 buffer = kzalloc(total_size, GFP_ATOMIC);
2150 if (!buffer) {
2151 status = -ENOMEM;
2152 pr_debug("%s: Failed to "
2153 "allocate stack dump buffer.\n", __func__);
2154 goto func_end;
2157 buffer_beg = buffer;
2158 buffer_end = buffer + total_size / 4;
2160 /* Read bytes from the DSP trace buffer... */
2161 status = (*intf_fxns->pfn_brd_read)(bridge_context,
2162 (u8 *)buffer, (u32)trace_begin,
2163 total_size, 0);
2164 if (DSP_FAILED(status)) {
2165 pr_debug("%s: Failed to Read Trace Buffer.\n",
2166 __func__);
2167 goto func_end;
2170 pr_err("\nAproximate Crash Position:\n"
2171 "--------------------------\n");
2173 exc_type = buffer[3];
2174 if (!exc_type)
2175 i = buffer[79]; /* IRP */
2176 else
2177 i = buffer[80]; /* NRP */
2179 status =
2180 cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
2181 if (DSP_FAILED(status)) {
2182 status = -EFAULT;
2183 goto func_end;
2186 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2187 0x1000, &offset_output, name) == 0))
2188 pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
2189 i - offset_output);
2190 else
2191 pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
2193 buffer += 4;
2195 pr_err("\nExecution Info:\n"
2196 "---------------\n");
2198 if (*buffer < ARRAY_SIZE(exec_ctxt)) {
2199 pr_err("Execution context \t%s\n",
2200 exec_ctxt[*buffer++]);
2201 } else {
2202 pr_err("Execution context corrupt\n");
2203 kfree(buffer_beg);
2204 return -EFAULT;
2206 pr_err("Task Handle\t\t0x%x\n", *buffer++);
2207 pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
2208 pr_err("Stack Top\t\t0x%x\n", *buffer++);
2209 pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
2210 pr_err("Stack Size\t\t0x%x\n", *buffer++);
2211 pr_err("Stack Size In Use\t0x%x\n", *buffer++);
2213 pr_err("\nCPU Registers\n"
2214 "---------------\n");
2216 for (i = 0; i < 32; i++) {
2217 if (i == 4 || i == 6 || i == 8)
2218 pr_err("A%d 0x%-8x [Function Argument %d]\n",
2219 i, *buffer++, i-3);
2220 else if (i == 15)
2221 pr_err("A15 0x%-8x [Frame Pointer]\n",
2222 *buffer++);
2223 else
2224 pr_err("A%d 0x%x\n", i, *buffer++);
2227 pr_err("\nB0 0x%x\n", *buffer++);
2228 pr_err("B1 0x%x\n", *buffer++);
2229 pr_err("B2 0x%x\n", *buffer++);
2231 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2232 *buffer, 0x1000, &offset_output, name) == 0))
2234 pr_err("B3 0x%-8x [Function Return Pointer:"
2235 " \"%s\" + 0x%x]\n", *buffer, name,
2236 *buffer - offset_output);
2237 else
2238 pr_err("B3 0x%-8x [Function Return Pointer:"
2239 "Unable to match to a symbol.]\n", *buffer);
2241 buffer++;
2243 for (i = 4; i < 32; i++) {
2244 if (i == 4 || i == 6 || i == 8)
2245 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2246 i, *buffer++, i-2);
2247 else if (i == 14)
2248 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2249 *buffer++);
2250 else
2251 pr_err("B%d 0x%x\n", i, *buffer++);
2254 pr_err("\n");
2256 for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2257 pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2259 pr_err("\nStack:\n"
2260 "------\n");
2262 for (i = 0; buffer < buffer_end; i++, buffer++) {
2263 if ((*buffer > dyn_ext_base) && (
2264 node_find_addr(node_mgr, *buffer , 0x600,
2265 &offset_output, name) == 0))
2266 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2267 i, *buffer, name,
2268 *buffer - offset_output);
2269 else
2270 pr_err("[%d] 0x%x\n", i, *buffer);
2272 kfree(buffer_beg);
2274 func_end:
2275 return status;
2279 * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2280 * @bridge_context: Bridge driver's device context pointer.
2283 void dump_dl_modules(struct bridge_dev_context *bridge_context)
2285 struct cod_manager *code_mgr;
2286 struct bridge_drv_interface *intf_fxns;
2287 struct bridge_dev_context *bridge_ctxt = bridge_context;
2288 struct dev_object *dev_object = bridge_ctxt->hdev_obj;
2289 struct modules_header modules_hdr;
2290 struct dll_module *module_struct = NULL;
2291 u32 module_dsp_addr;
2292 u32 module_size;
2293 u32 module_struct_size = 0;
2294 u32 sect_ndx;
2295 char *sect_str ;
2296 int status = 0;
2298 status = dev_get_intf_fxns(dev_object, &intf_fxns);
2299 if (DSP_FAILED(status)) {
2300 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2301 goto func_end;
2304 status = dev_get_cod_mgr(dev_object, &code_mgr);
2305 if (!code_mgr) {
2306 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2307 status = -EFAULT;
2308 goto func_end;
2311 /* Lookup the address of the modules_header structure */
2312 status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2313 if (DSP_FAILED(status)) {
2314 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2315 __func__);
2316 goto func_end;
2319 pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2321 /* Copy the modules_header structure from DSP memory. */
2322 status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
2323 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2325 if (DSP_FAILED(status)) {
2326 pr_debug("%s: Failed failed to read modules header.\n",
2327 __func__);
2328 goto func_end;
2331 module_dsp_addr = modules_hdr.first_module;
2332 module_size = modules_hdr.first_module_size;
2334 pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2335 module_size);
2337 pr_err("\nDynamically Loaded Modules:\n"
2338 "---------------------------\n");
2340 /* For each dll_module structure in the list... */
2341 while (module_size) {
2343 * Allocate/re-allocate memory to hold the dll_module
2344 * structure. The memory is re-allocated only if the existing
2345 * allocation is too small.
2347 if (module_size > module_struct_size) {
2348 kfree(module_struct);
2349 module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2350 module_struct_size = module_size+128;
2351 pr_debug("%s: allocated module struct %p %d\n",
2352 __func__, module_struct, module_struct_size);
2353 if (!module_struct)
2354 goto func_end;
2356 /* Copy the dll_module structure from DSP memory */
2357 status = (*intf_fxns->pfn_brd_read)(bridge_context,
2358 (u8 *)module_struct, module_dsp_addr, module_size, 0);
2360 if (DSP_FAILED(status)) {
2361 pr_debug(
2362 "%s: Failed to read dll_module stuct for 0x%x.\n",
2363 __func__, module_dsp_addr);
2364 break;
2367 /* Update info regarding the _next_ module in the list. */
2368 module_dsp_addr = module_struct->next_module;
2369 module_size = module_struct->next_module_size;
2371 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2372 __func__, module_dsp_addr, module_size,
2373 module_struct->num_sects);
2376 * The section name strings start immedialty following
2377 * the array of dll_sect structures.
2379 sect_str = (char *) &module_struct->
2380 sects[module_struct->num_sects];
2381 pr_err("%s\n", sect_str);
2384 * Advance to the first section name string.
2385 * Each string follows the one before.
2387 sect_str += strlen(sect_str) + 1;
2389 /* Access each dll_sect structure and its name string. */
2390 for (sect_ndx = 0;
2391 sect_ndx < module_struct->num_sects; sect_ndx++) {
2392 pr_err(" Section: 0x%x ",
2393 module_struct->sects[sect_ndx].sect_load_adr);
2395 if (((u32) sect_str - (u32) module_struct) <
2396 module_struct_size) {
2397 pr_err("%s\n", sect_str);
2398 /* Each string follows the one before. */
2399 sect_str += strlen(sect_str)+1;
2400 } else {
2401 pr_err("<string error>\n");
2402 pr_debug("%s: section name sting address "
2403 "is invalid %p\n", __func__, sect_str);
2407 func_end:
2408 kfree(module_struct);