ASoC: Intel: Skylake: Check device type to get endpoint configuration
[linux-2.6/btrfs-unstable.git] / sound / soc / intel / skylake / skl-topology.c
blobed58b5b3555a869ff91772689b761369873d6c89
1 /*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/firmware.h>
22 #include <sound/soc.h>
23 #include <sound/soc-topology.h>
24 #include <uapi/sound/snd_sst_tokens.h>
25 #include "skl-sst-dsp.h"
26 #include "skl-sst-ipc.h"
27 #include "skl-topology.h"
28 #include "skl.h"
29 #include "skl-tplg-interface.h"
30 #include "../common/sst-dsp.h"
31 #include "../common/sst-dsp-priv.h"
33 #define SKL_CH_FIXUP_MASK (1 << 0)
34 #define SKL_RATE_FIXUP_MASK (1 << 1)
35 #define SKL_FMT_FIXUP_MASK (1 << 2)
36 #define SKL_IN_DIR_BIT_MASK BIT(0)
37 #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
39 void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
41 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
43 switch (caps) {
44 case SKL_D0I3_NONE:
45 d0i3->non_d0i3++;
46 break;
48 case SKL_D0I3_STREAMING:
49 d0i3->streaming++;
50 break;
52 case SKL_D0I3_NON_STREAMING:
53 d0i3->non_streaming++;
54 break;
58 void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
60 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
62 switch (caps) {
63 case SKL_D0I3_NONE:
64 d0i3->non_d0i3--;
65 break;
67 case SKL_D0I3_STREAMING:
68 d0i3->streaming--;
69 break;
71 case SKL_D0I3_NON_STREAMING:
72 d0i3->non_streaming--;
73 break;
78 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
79 * ignore. This helpers checks if the SKL driver handles this widget type
81 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
83 switch (w->id) {
84 case snd_soc_dapm_dai_link:
85 case snd_soc_dapm_dai_in:
86 case snd_soc_dapm_aif_in:
87 case snd_soc_dapm_aif_out:
88 case snd_soc_dapm_dai_out:
89 case snd_soc_dapm_switch:
90 return false;
91 default:
92 return true;
97 * Each pipelines needs memory to be allocated. Check if we have free memory
98 * from available pool.
100 static bool skl_is_pipe_mem_avail(struct skl *skl,
101 struct skl_module_cfg *mconfig)
103 struct skl_sst *ctx = skl->skl_sst;
105 if (skl->resource.mem + mconfig->pipe->memory_pages >
106 skl->resource.max_mem) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id,
110 mconfig->id.instance_id);
111 dev_err(ctx->dev,
112 "exceeds ppl memory available %d mem %d\n",
113 skl->resource.max_mem, skl->resource.mem);
114 return false;
115 } else {
116 return true;
121 * Add the mem to the mem pool. This is freed when pipe is deleted.
122 * Note: DSP does actual memory management we only keep track for complete
123 * pool
125 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
126 struct skl_module_cfg *mconfig)
128 skl->resource.mem += mconfig->pipe->memory_pages;
132 * Pipeline needs needs DSP CPU resources for computation, this is
133 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
135 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
136 * pipe.
139 static bool skl_is_pipe_mcps_avail(struct skl *skl,
140 struct skl_module_cfg *mconfig)
142 struct skl_sst *ctx = skl->skl_sst;
144 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
145 dev_err(ctx->dev,
146 "%s: module_id %d instance %d\n", __func__,
147 mconfig->id.module_id, mconfig->id.instance_id);
148 dev_err(ctx->dev,
149 "exceeds ppl mcps available %d > mem %d\n",
150 skl->resource.max_mcps, skl->resource.mcps);
151 return false;
152 } else {
153 return true;
157 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
158 struct skl_module_cfg *mconfig)
160 skl->resource.mcps += mconfig->mcps;
164 * Free the mcps when tearing down
166 static void
167 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
169 skl->resource.mcps -= mconfig->mcps;
173 * Free the memory when tearing down
175 static void
176 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
178 skl->resource.mem -= mconfig->pipe->memory_pages;
182 static void skl_dump_mconfig(struct skl_sst *ctx,
183 struct skl_module_cfg *mcfg)
185 dev_dbg(ctx->dev, "Dumping config\n");
186 dev_dbg(ctx->dev, "Input Format:\n");
187 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
188 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
189 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
190 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
191 dev_dbg(ctx->dev, "Output Format:\n");
192 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
193 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
194 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
195 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
198 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
200 int slot_map = 0xFFFFFFFF;
201 int start_slot = 0;
202 int i;
204 for (i = 0; i < chs; i++) {
206 * For 2 channels with starting slot as 0, slot map will
207 * look like 0xFFFFFF10.
209 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
210 start_slot++;
212 fmt->ch_map = slot_map;
215 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
216 struct skl_pipe_params *params, int fixup)
218 if (fixup & SKL_RATE_FIXUP_MASK)
219 fmt->s_freq = params->s_freq;
220 if (fixup & SKL_CH_FIXUP_MASK) {
221 fmt->channels = params->ch;
222 skl_tplg_update_chmap(fmt, fmt->channels);
224 if (fixup & SKL_FMT_FIXUP_MASK) {
225 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
228 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
229 * container so update bit depth accordingly
231 switch (fmt->valid_bit_depth) {
232 case SKL_DEPTH_16BIT:
233 fmt->bit_depth = fmt->valid_bit_depth;
234 break;
236 default:
237 fmt->bit_depth = SKL_DEPTH_32BIT;
238 break;
245 * A pipeline may have modules which impact the pcm parameters, like SRC,
246 * channel converter, format converter.
247 * We need to calculate the output params by applying the 'fixup'
248 * Topology will tell driver which type of fixup is to be applied by
249 * supplying the fixup mask, so based on that we calculate the output
251 * Now In FE the pcm hw_params is source/target format. Same is applicable
252 * for BE with its hw_params invoked.
253 * here based on FE, BE pipeline and direction we calculate the input and
254 * outfix and then apply that for a module
256 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
257 struct skl_pipe_params *params, bool is_fe)
259 int in_fixup, out_fixup;
260 struct skl_module_fmt *in_fmt, *out_fmt;
262 /* Fixups will be applied to pin 0 only */
263 in_fmt = &m_cfg->in_fmt[0];
264 out_fmt = &m_cfg->out_fmt[0];
266 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
267 if (is_fe) {
268 in_fixup = m_cfg->params_fixup;
269 out_fixup = (~m_cfg->converter) &
270 m_cfg->params_fixup;
271 } else {
272 out_fixup = m_cfg->params_fixup;
273 in_fixup = (~m_cfg->converter) &
274 m_cfg->params_fixup;
276 } else {
277 if (is_fe) {
278 out_fixup = m_cfg->params_fixup;
279 in_fixup = (~m_cfg->converter) &
280 m_cfg->params_fixup;
281 } else {
282 in_fixup = m_cfg->params_fixup;
283 out_fixup = (~m_cfg->converter) &
284 m_cfg->params_fixup;
288 skl_tplg_update_params(in_fmt, params, in_fixup);
289 skl_tplg_update_params(out_fmt, params, out_fixup);
293 * A module needs input and output buffers, which are dependent upon pcm
294 * params, so once we have calculate params, we need buffer calculation as
295 * well.
297 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
298 struct skl_module_cfg *mcfg)
300 int multiplier = 1;
301 struct skl_module_fmt *in_fmt, *out_fmt;
302 int in_rate, out_rate;
305 /* Since fixups is applied to pin 0 only, ibs, obs needs
306 * change for pin 0 only
308 in_fmt = &mcfg->in_fmt[0];
309 out_fmt = &mcfg->out_fmt[0];
311 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
312 multiplier = 5;
314 if (in_fmt->s_freq % 1000)
315 in_rate = (in_fmt->s_freq / 1000) + 1;
316 else
317 in_rate = (in_fmt->s_freq / 1000);
319 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
320 (mcfg->in_fmt->bit_depth >> 3) *
321 multiplier;
323 if (mcfg->out_fmt->s_freq % 1000)
324 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
325 else
326 out_rate = (mcfg->out_fmt->s_freq / 1000);
328 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
329 (mcfg->out_fmt->bit_depth >> 3) *
330 multiplier;
333 static u8 skl_tplg_be_dev_type(int dev_type)
335 int ret;
337 switch (dev_type) {
338 case SKL_DEVICE_BT:
339 ret = NHLT_DEVICE_BT;
340 break;
342 case SKL_DEVICE_DMIC:
343 ret = NHLT_DEVICE_DMIC;
344 break;
346 case SKL_DEVICE_I2S:
347 ret = NHLT_DEVICE_I2S;
348 break;
350 default:
351 ret = NHLT_DEVICE_INVALID;
352 break;
355 return ret;
358 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
359 struct skl_sst *ctx)
361 struct skl_module_cfg *m_cfg = w->priv;
362 int link_type, dir;
363 u32 ch, s_freq, s_fmt;
364 struct nhlt_specific_cfg *cfg;
365 struct skl *skl = get_skl_ctx(ctx->dev);
366 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
368 /* check if we already have blob */
369 if (m_cfg->formats_config.caps_size > 0)
370 return 0;
372 dev_dbg(ctx->dev, "Applying default cfg blob\n");
373 switch (m_cfg->dev_type) {
374 case SKL_DEVICE_DMIC:
375 link_type = NHLT_LINK_DMIC;
376 dir = SNDRV_PCM_STREAM_CAPTURE;
377 s_freq = m_cfg->in_fmt[0].s_freq;
378 s_fmt = m_cfg->in_fmt[0].bit_depth;
379 ch = m_cfg->in_fmt[0].channels;
380 break;
382 case SKL_DEVICE_I2S:
383 link_type = NHLT_LINK_SSP;
384 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
385 dir = SNDRV_PCM_STREAM_PLAYBACK;
386 s_freq = m_cfg->out_fmt[0].s_freq;
387 s_fmt = m_cfg->out_fmt[0].bit_depth;
388 ch = m_cfg->out_fmt[0].channels;
389 } else {
390 dir = SNDRV_PCM_STREAM_CAPTURE;
391 s_freq = m_cfg->in_fmt[0].s_freq;
392 s_fmt = m_cfg->in_fmt[0].bit_depth;
393 ch = m_cfg->in_fmt[0].channels;
395 break;
397 default:
398 return -EINVAL;
401 /* update the blob based on virtual bus_id and default params */
402 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
403 s_fmt, ch, s_freq, dir, dev_type);
404 if (cfg) {
405 m_cfg->formats_config.caps_size = cfg->size;
406 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
407 } else {
408 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
409 m_cfg->vbus_id, link_type, dir);
410 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
411 ch, s_freq, s_fmt);
412 return -EIO;
415 return 0;
418 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
419 struct skl_sst *ctx)
421 struct skl_module_cfg *m_cfg = w->priv;
422 struct skl_pipe_params *params = m_cfg->pipe->p_params;
423 int p_conn_type = m_cfg->pipe->conn_type;
424 bool is_fe;
426 if (!m_cfg->params_fixup)
427 return;
429 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
430 w->name);
432 skl_dump_mconfig(ctx, m_cfg);
434 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
435 is_fe = true;
436 else
437 is_fe = false;
439 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
440 skl_tplg_update_buffer_size(ctx, m_cfg);
442 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
443 w->name);
445 skl_dump_mconfig(ctx, m_cfg);
449 * some modules can have multiple params set from user control and
450 * need to be set after module is initialized. If set_param flag is
451 * set module params will be done after module is initialised.
453 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
454 struct skl_sst *ctx)
456 int i, ret;
457 struct skl_module_cfg *mconfig = w->priv;
458 const struct snd_kcontrol_new *k;
459 struct soc_bytes_ext *sb;
460 struct skl_algo_data *bc;
461 struct skl_specific_cfg *sp_cfg;
463 if (mconfig->formats_config.caps_size > 0 &&
464 mconfig->formats_config.set_params == SKL_PARAM_SET) {
465 sp_cfg = &mconfig->formats_config;
466 ret = skl_set_module_params(ctx, sp_cfg->caps,
467 sp_cfg->caps_size,
468 sp_cfg->param_id, mconfig);
469 if (ret < 0)
470 return ret;
473 for (i = 0; i < w->num_kcontrols; i++) {
474 k = &w->kcontrol_news[i];
475 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
476 sb = (void *) k->private_value;
477 bc = (struct skl_algo_data *)sb->dobj.private;
479 if (bc->set_params == SKL_PARAM_SET) {
480 ret = skl_set_module_params(ctx,
481 (u32 *)bc->params, bc->size,
482 bc->param_id, mconfig);
483 if (ret < 0)
484 return ret;
489 return 0;
493 * some module param can set from user control and this is required as
494 * when module is initailzed. if module param is required in init it is
495 * identifed by set_param flag. if set_param flag is not set, then this
496 * parameter needs to set as part of module init.
498 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
500 const struct snd_kcontrol_new *k;
501 struct soc_bytes_ext *sb;
502 struct skl_algo_data *bc;
503 struct skl_module_cfg *mconfig = w->priv;
504 int i;
506 for (i = 0; i < w->num_kcontrols; i++) {
507 k = &w->kcontrol_news[i];
508 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
509 sb = (struct soc_bytes_ext *)k->private_value;
510 bc = (struct skl_algo_data *)sb->dobj.private;
512 if (bc->set_params != SKL_PARAM_INIT)
513 continue;
515 mconfig->formats_config.caps = (u32 *)&bc->params;
516 mconfig->formats_config.caps_size = bc->size;
518 break;
522 return 0;
525 static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
526 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
528 switch (mcfg->dev_type) {
529 case SKL_DEVICE_HDAHOST:
530 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
532 case SKL_DEVICE_HDALINK:
533 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
536 return 0;
540 * Inside a pipe instance, we can have various modules. These modules need
541 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
542 * skl_init_module() routine, so invoke that for all modules in a pipeline
544 static int
545 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
547 struct skl_pipe_module *w_module;
548 struct snd_soc_dapm_widget *w;
549 struct skl_module_cfg *mconfig;
550 struct skl_sst *ctx = skl->skl_sst;
551 int ret = 0;
553 list_for_each_entry(w_module, &pipe->w_list, node) {
554 w = w_module->w;
555 mconfig = w->priv;
557 /* check if module ids are populated */
558 if (mconfig->id.module_id < 0) {
559 dev_err(skl->skl_sst->dev,
560 "module %pUL id not populated\n",
561 (uuid_le *)mconfig->guid);
562 return -EIO;
565 /* check resource available */
566 if (!skl_is_pipe_mcps_avail(skl, mconfig))
567 return -ENOMEM;
569 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
570 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
571 mconfig->id.module_id, mconfig->guid);
572 if (ret < 0)
573 return ret;
575 mconfig->m_state = SKL_MODULE_LOADED;
578 /* prepare the DMA if the module is gateway cpr */
579 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
580 if (ret < 0)
581 return ret;
583 /* update blob if blob is null for be with default value */
584 skl_tplg_update_be_blob(w, ctx);
587 * apply fix/conversion to module params based on
588 * FE/BE params
590 skl_tplg_update_module_params(w, ctx);
591 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
592 if (mconfig->id.pvt_id < 0)
593 return ret;
594 skl_tplg_set_module_init_data(w);
595 ret = skl_init_module(ctx, mconfig);
596 if (ret < 0) {
597 skl_put_pvt_id(ctx, mconfig);
598 return ret;
600 skl_tplg_alloc_pipe_mcps(skl, mconfig);
601 ret = skl_tplg_set_module_params(w, ctx);
602 if (ret < 0)
603 return ret;
606 return 0;
609 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
610 struct skl_pipe *pipe)
612 int ret;
613 struct skl_pipe_module *w_module = NULL;
614 struct skl_module_cfg *mconfig = NULL;
616 list_for_each_entry(w_module, &pipe->w_list, node) {
617 mconfig = w_module->w->priv;
619 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
620 mconfig->m_state > SKL_MODULE_UNINIT) {
621 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
622 mconfig->id.module_id);
623 if (ret < 0)
624 return -EIO;
626 skl_put_pvt_id(ctx, mconfig);
629 /* no modules to unload in this path, so return */
630 return 0;
634 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
635 * need create the pipeline. So we do following:
636 * - check the resources
637 * - Create the pipeline
638 * - Initialize the modules in pipeline
639 * - finally bind all modules together
641 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
642 struct skl *skl)
644 int ret;
645 struct skl_module_cfg *mconfig = w->priv;
646 struct skl_pipe_module *w_module;
647 struct skl_pipe *s_pipe = mconfig->pipe;
648 struct skl_module_cfg *src_module = NULL, *dst_module;
649 struct skl_sst *ctx = skl->skl_sst;
651 /* check resource available */
652 if (!skl_is_pipe_mcps_avail(skl, mconfig))
653 return -EBUSY;
655 if (!skl_is_pipe_mem_avail(skl, mconfig))
656 return -ENOMEM;
659 * Create a list of modules for pipe.
660 * This list contains modules from source to sink
662 ret = skl_create_pipeline(ctx, mconfig->pipe);
663 if (ret < 0)
664 return ret;
666 skl_tplg_alloc_pipe_mem(skl, mconfig);
667 skl_tplg_alloc_pipe_mcps(skl, mconfig);
669 /* Init all pipe modules from source to sink */
670 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
671 if (ret < 0)
672 return ret;
674 /* Bind modules from source to sink */
675 list_for_each_entry(w_module, &s_pipe->w_list, node) {
676 dst_module = w_module->w->priv;
678 if (src_module == NULL) {
679 src_module = dst_module;
680 continue;
683 ret = skl_bind_modules(ctx, src_module, dst_module);
684 if (ret < 0)
685 return ret;
687 src_module = dst_module;
690 return 0;
693 static int skl_fill_sink_instance_id(struct skl_sst *ctx,
694 struct skl_algo_data *alg_data)
696 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
697 struct skl_mod_inst_map *inst;
698 int i, pvt_id;
700 inst = params->map;
702 for (i = 0; i < params->num_modules; i++) {
703 pvt_id = skl_get_pvt_instance_id_map(ctx,
704 inst->mod_id, inst->inst_id);
705 if (pvt_id < 0)
706 return -EINVAL;
707 inst->inst_id = pvt_id;
708 inst++;
710 return 0;
714 * Some modules require params to be set after the module is bound to
715 * all pins connected.
717 * The module provider initializes set_param flag for such modules and we
718 * send params after binding
720 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
721 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
723 int i, ret;
724 struct skl_module_cfg *mconfig = w->priv;
725 const struct snd_kcontrol_new *k;
726 struct soc_bytes_ext *sb;
727 struct skl_algo_data *bc;
728 struct skl_specific_cfg *sp_cfg;
731 * check all out/in pins are in bind state.
732 * if so set the module param
734 for (i = 0; i < mcfg->max_out_queue; i++) {
735 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
736 return 0;
739 for (i = 0; i < mcfg->max_in_queue; i++) {
740 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
741 return 0;
744 if (mconfig->formats_config.caps_size > 0 &&
745 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
746 sp_cfg = &mconfig->formats_config;
747 ret = skl_set_module_params(ctx, sp_cfg->caps,
748 sp_cfg->caps_size,
749 sp_cfg->param_id, mconfig);
750 if (ret < 0)
751 return ret;
754 for (i = 0; i < w->num_kcontrols; i++) {
755 k = &w->kcontrol_news[i];
756 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
757 sb = (void *) k->private_value;
758 bc = (struct skl_algo_data *)sb->dobj.private;
760 if (bc->set_params == SKL_PARAM_BIND) {
761 if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
762 skl_fill_sink_instance_id(ctx, bc);
763 ret = skl_set_module_params(ctx,
764 (u32 *)bc->params, bc->max,
765 bc->param_id, mconfig);
766 if (ret < 0)
767 return ret;
772 return 0;
775 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
776 struct skl *skl,
777 struct snd_soc_dapm_widget *src_w,
778 struct skl_module_cfg *src_mconfig)
780 struct snd_soc_dapm_path *p;
781 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
782 struct skl_module_cfg *sink_mconfig;
783 struct skl_sst *ctx = skl->skl_sst;
784 int ret;
786 snd_soc_dapm_widget_for_each_sink_path(w, p) {
787 if (!p->connect)
788 continue;
790 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
791 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
793 next_sink = p->sink;
795 if (!is_skl_dsp_widget_type(p->sink))
796 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
799 * here we will check widgets in sink pipelines, so that
800 * can be any widgets type and we are only interested if
801 * they are ones used for SKL so check that first
803 if ((p->sink->priv != NULL) &&
804 is_skl_dsp_widget_type(p->sink)) {
806 sink = p->sink;
807 sink_mconfig = sink->priv;
809 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
810 sink_mconfig->m_state == SKL_MODULE_UNINIT)
811 continue;
813 /* Bind source to sink, mixin is always source */
814 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
815 if (ret)
816 return ret;
818 /* set module params after bind */
819 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
820 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
822 /* Start sinks pipe first */
823 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
824 if (sink_mconfig->pipe->conn_type !=
825 SKL_PIPE_CONN_TYPE_FE)
826 ret = skl_run_pipe(ctx,
827 sink_mconfig->pipe);
828 if (ret)
829 return ret;
834 if (!sink)
835 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
837 return 0;
841 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
842 * we need to do following:
843 * - Bind to sink pipeline
844 * Since the sink pipes can be running and we don't get mixer event on
845 * connect for already running mixer, we need to find the sink pipes
846 * here and bind to them. This way dynamic connect works.
847 * - Start sink pipeline, if not running
848 * - Then run current pipe
850 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
851 struct skl *skl)
853 struct skl_module_cfg *src_mconfig;
854 struct skl_sst *ctx = skl->skl_sst;
855 int ret = 0;
857 src_mconfig = w->priv;
860 * find which sink it is connected to, bind with the sink,
861 * if sink is not started, start sink pipe first, then start
862 * this pipe
864 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
865 if (ret)
866 return ret;
868 /* Start source pipe last after starting all sinks */
869 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
870 return skl_run_pipe(ctx, src_mconfig->pipe);
872 return 0;
875 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
876 struct snd_soc_dapm_widget *w, struct skl *skl)
878 struct snd_soc_dapm_path *p;
879 struct snd_soc_dapm_widget *src_w = NULL;
880 struct skl_sst *ctx = skl->skl_sst;
882 snd_soc_dapm_widget_for_each_source_path(w, p) {
883 src_w = p->source;
884 if (!p->connect)
885 continue;
887 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
888 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
891 * here we will check widgets in sink pipelines, so that can
892 * be any widgets type and we are only interested if they are
893 * ones used for SKL so check that first
895 if ((p->source->priv != NULL) &&
896 is_skl_dsp_widget_type(p->source)) {
897 return p->source;
901 if (src_w != NULL)
902 return skl_get_src_dsp_widget(src_w, skl);
904 return NULL;
908 * in the Post-PMU event of mixer we need to do following:
909 * - Check if this pipe is running
910 * - if not, then
911 * - bind this pipeline to its source pipeline
912 * if source pipe is already running, this means it is a dynamic
913 * connection and we need to bind only to that pipe
914 * - start this pipeline
916 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
917 struct skl *skl)
919 int ret = 0;
920 struct snd_soc_dapm_widget *source, *sink;
921 struct skl_module_cfg *src_mconfig, *sink_mconfig;
922 struct skl_sst *ctx = skl->skl_sst;
923 int src_pipe_started = 0;
925 sink = w;
926 sink_mconfig = sink->priv;
929 * If source pipe is already started, that means source is driving
930 * one more sink before this sink got connected, Since source is
931 * started, bind this sink to source and start this pipe.
933 source = skl_get_src_dsp_widget(w, skl);
934 if (source != NULL) {
935 src_mconfig = source->priv;
936 sink_mconfig = sink->priv;
937 src_pipe_started = 1;
940 * check pipe state, then no need to bind or start the
941 * pipe
943 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
944 src_pipe_started = 0;
947 if (src_pipe_started) {
948 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
949 if (ret)
950 return ret;
952 /* set module params after bind */
953 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
954 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
956 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
957 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
960 return ret;
964 * in the Pre-PMD event of mixer we need to do following:
965 * - Stop the pipe
966 * - find the source connections and remove that from dapm_path_list
967 * - unbind with source pipelines if still connected
969 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
970 struct skl *skl)
972 struct skl_module_cfg *src_mconfig, *sink_mconfig;
973 int ret = 0, i;
974 struct skl_sst *ctx = skl->skl_sst;
976 sink_mconfig = w->priv;
978 /* Stop the pipe */
979 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
980 if (ret)
981 return ret;
983 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
984 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
985 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
986 if (!src_mconfig)
987 continue;
989 * If path_found == 1, that means pmd for source
990 * pipe has not occurred, source is connected to
991 * some other sink. so its responsibility of sink
992 * to unbind itself from source.
994 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
995 if (ret < 0)
996 return ret;
998 ret = skl_unbind_modules(ctx,
999 src_mconfig, sink_mconfig);
1003 return ret;
1007 * in the Post-PMD event of mixer we need to do following:
1008 * - Free the mcps used
1009 * - Free the mem used
1010 * - Unbind the modules within the pipeline
1011 * - Delete the pipeline (modules are not required to be explicitly
1012 * deleted, pipeline delete is enough here
1014 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1015 struct skl *skl)
1017 struct skl_module_cfg *mconfig = w->priv;
1018 struct skl_pipe_module *w_module;
1019 struct skl_module_cfg *src_module = NULL, *dst_module;
1020 struct skl_sst *ctx = skl->skl_sst;
1021 struct skl_pipe *s_pipe = mconfig->pipe;
1023 if (s_pipe->state == SKL_PIPE_INVALID)
1024 return -EINVAL;
1026 skl_tplg_free_pipe_mcps(skl, mconfig);
1027 skl_tplg_free_pipe_mem(skl, mconfig);
1029 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1030 dst_module = w_module->w->priv;
1032 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1033 skl_tplg_free_pipe_mcps(skl, dst_module);
1034 if (src_module == NULL) {
1035 src_module = dst_module;
1036 continue;
1039 skl_unbind_modules(ctx, src_module, dst_module);
1040 src_module = dst_module;
1043 skl_delete_pipe(ctx, mconfig->pipe);
1045 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
1049 * in the Post-PMD event of PGA we need to do following:
1050 * - Free the mcps used
1051 * - Stop the pipeline
1052 * - In source pipe is connected, unbind with source pipelines
1054 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1055 struct skl *skl)
1057 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1058 int ret = 0, i;
1059 struct skl_sst *ctx = skl->skl_sst;
1061 src_mconfig = w->priv;
1063 /* Stop the pipe since this is a mixin module */
1064 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1065 if (ret)
1066 return ret;
1068 for (i = 0; i < src_mconfig->max_out_queue; i++) {
1069 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1070 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1071 if (!sink_mconfig)
1072 continue;
1074 * This is a connecter and if path is found that means
1075 * unbind between source and sink has not happened yet
1077 ret = skl_unbind_modules(ctx, src_mconfig,
1078 sink_mconfig);
1082 return ret;
1086 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
1087 * mixer is not required then it is treated as static mixer aka vmixer with
1088 * a hard path to source module
1089 * So we don't need to check if source is started or not as hard path puts
1090 * dependency on each other
1092 static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
1093 struct snd_kcontrol *k, int event)
1095 struct snd_soc_dapm_context *dapm = w->dapm;
1096 struct skl *skl = get_skl_ctx(dapm->dev);
1098 switch (event) {
1099 case SND_SOC_DAPM_PRE_PMU:
1100 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1102 case SND_SOC_DAPM_POST_PMU:
1103 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1105 case SND_SOC_DAPM_PRE_PMD:
1106 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1108 case SND_SOC_DAPM_POST_PMD:
1109 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1112 return 0;
1116 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1117 * second one is required that is created as another pipe entity.
1118 * The mixer is responsible for pipe management and represent a pipeline
1119 * instance
1121 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1122 struct snd_kcontrol *k, int event)
1124 struct snd_soc_dapm_context *dapm = w->dapm;
1125 struct skl *skl = get_skl_ctx(dapm->dev);
1127 switch (event) {
1128 case SND_SOC_DAPM_PRE_PMU:
1129 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1131 case SND_SOC_DAPM_POST_PMU:
1132 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1134 case SND_SOC_DAPM_PRE_PMD:
1135 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1137 case SND_SOC_DAPM_POST_PMD:
1138 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1141 return 0;
1145 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1146 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1147 * the sink when it is running (two FE to one BE or one FE to two BE)
1148 * scenarios
1150 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1151 struct snd_kcontrol *k, int event)
1154 struct snd_soc_dapm_context *dapm = w->dapm;
1155 struct skl *skl = get_skl_ctx(dapm->dev);
1157 switch (event) {
1158 case SND_SOC_DAPM_PRE_PMU:
1159 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1161 case SND_SOC_DAPM_POST_PMD:
1162 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1165 return 0;
1168 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1169 unsigned int __user *data, unsigned int size)
1171 struct soc_bytes_ext *sb =
1172 (struct soc_bytes_ext *)kcontrol->private_value;
1173 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1174 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1175 struct skl_module_cfg *mconfig = w->priv;
1176 struct skl *skl = get_skl_ctx(w->dapm->dev);
1178 if (w->power)
1179 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1180 bc->size, bc->param_id, mconfig);
1182 /* decrement size for TLV header */
1183 size -= 2 * sizeof(u32);
1185 /* check size as we don't want to send kernel data */
1186 if (size > bc->max)
1187 size = bc->max;
1189 if (bc->params) {
1190 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1191 return -EFAULT;
1192 if (copy_to_user(data + 1, &size, sizeof(u32)))
1193 return -EFAULT;
1194 if (copy_to_user(data + 2, bc->params, size))
1195 return -EFAULT;
1198 return 0;
1201 #define SKL_PARAM_VENDOR_ID 0xff
1203 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1204 const unsigned int __user *data, unsigned int size)
1206 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1207 struct skl_module_cfg *mconfig = w->priv;
1208 struct soc_bytes_ext *sb =
1209 (struct soc_bytes_ext *)kcontrol->private_value;
1210 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1211 struct skl *skl = get_skl_ctx(w->dapm->dev);
1213 if (ac->params) {
1214 if (size > ac->max)
1215 return -EINVAL;
1217 ac->size = size;
1219 * if the param_is is of type Vendor, firmware expects actual
1220 * parameter id and size from the control.
1222 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1223 if (copy_from_user(ac->params, data, size))
1224 return -EFAULT;
1225 } else {
1226 if (copy_from_user(ac->params,
1227 data + 2, size))
1228 return -EFAULT;
1231 if (w->power)
1232 return skl_set_module_params(skl->skl_sst,
1233 (u32 *)ac->params, ac->size,
1234 ac->param_id, mconfig);
1237 return 0;
1241 * Fill the dma id for host and link. In case of passthrough
1242 * pipeline, this will both host and link in the same
1243 * pipeline, so need to copy the link and host based on dev_type
1245 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1246 struct skl_pipe_params *params)
1248 struct skl_pipe *pipe = mcfg->pipe;
1250 if (pipe->passthru) {
1251 switch (mcfg->dev_type) {
1252 case SKL_DEVICE_HDALINK:
1253 pipe->p_params->link_dma_id = params->link_dma_id;
1254 pipe->p_params->link_index = params->link_index;
1255 break;
1257 case SKL_DEVICE_HDAHOST:
1258 pipe->p_params->host_dma_id = params->host_dma_id;
1259 break;
1261 default:
1262 break;
1264 pipe->p_params->s_fmt = params->s_fmt;
1265 pipe->p_params->ch = params->ch;
1266 pipe->p_params->s_freq = params->s_freq;
1267 pipe->p_params->stream = params->stream;
1268 pipe->p_params->format = params->format;
1270 } else {
1271 memcpy(pipe->p_params, params, sizeof(*params));
1276 * The FE params are passed by hw_params of the DAI.
1277 * On hw_params, the params are stored in Gateway module of the FE and we
1278 * need to calculate the format in DSP module configuration, that
1279 * conversion is done here
1281 int skl_tplg_update_pipe_params(struct device *dev,
1282 struct skl_module_cfg *mconfig,
1283 struct skl_pipe_params *params)
1285 struct skl_module_fmt *format = NULL;
1287 skl_tplg_fill_dma_id(mconfig, params);
1289 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1290 format = &mconfig->in_fmt[0];
1291 else
1292 format = &mconfig->out_fmt[0];
1294 /* set the hw_params */
1295 format->s_freq = params->s_freq;
1296 format->channels = params->ch;
1297 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1300 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1301 * container so update bit depth accordingly
1303 switch (format->valid_bit_depth) {
1304 case SKL_DEPTH_16BIT:
1305 format->bit_depth = format->valid_bit_depth;
1306 break;
1308 case SKL_DEPTH_24BIT:
1309 case SKL_DEPTH_32BIT:
1310 format->bit_depth = SKL_DEPTH_32BIT;
1311 break;
1313 default:
1314 dev_err(dev, "Invalid bit depth %x for pipe\n",
1315 format->valid_bit_depth);
1316 return -EINVAL;
1319 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1320 mconfig->ibs = (format->s_freq / 1000) *
1321 (format->channels) *
1322 (format->bit_depth >> 3);
1323 } else {
1324 mconfig->obs = (format->s_freq / 1000) *
1325 (format->channels) *
1326 (format->bit_depth >> 3);
1329 return 0;
1333 * Query the module config for the FE DAI
1334 * This is used to find the hw_params set for that DAI and apply to FE
1335 * pipeline
1337 struct skl_module_cfg *
1338 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1340 struct snd_soc_dapm_widget *w;
1341 struct snd_soc_dapm_path *p = NULL;
1343 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1344 w = dai->playback_widget;
1345 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1346 if (p->connect && p->sink->power &&
1347 !is_skl_dsp_widget_type(p->sink))
1348 continue;
1350 if (p->sink->priv) {
1351 dev_dbg(dai->dev, "set params for %s\n",
1352 p->sink->name);
1353 return p->sink->priv;
1356 } else {
1357 w = dai->capture_widget;
1358 snd_soc_dapm_widget_for_each_source_path(w, p) {
1359 if (p->connect && p->source->power &&
1360 !is_skl_dsp_widget_type(p->source))
1361 continue;
1363 if (p->source->priv) {
1364 dev_dbg(dai->dev, "set params for %s\n",
1365 p->source->name);
1366 return p->source->priv;
1371 return NULL;
1374 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1375 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1377 struct snd_soc_dapm_path *p;
1378 struct skl_module_cfg *mconfig = NULL;
1380 snd_soc_dapm_widget_for_each_source_path(w, p) {
1381 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1382 if (p->connect &&
1383 (p->sink->id == snd_soc_dapm_aif_out) &&
1384 p->source->priv) {
1385 mconfig = p->source->priv;
1386 return mconfig;
1388 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1389 if (mconfig)
1390 return mconfig;
1393 return mconfig;
1396 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1397 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1399 struct snd_soc_dapm_path *p;
1400 struct skl_module_cfg *mconfig = NULL;
1402 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1403 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1404 if (p->connect &&
1405 (p->source->id == snd_soc_dapm_aif_in) &&
1406 p->sink->priv) {
1407 mconfig = p->sink->priv;
1408 return mconfig;
1410 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1411 if (mconfig)
1412 return mconfig;
1415 return mconfig;
1418 struct skl_module_cfg *
1419 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1421 struct snd_soc_dapm_widget *w;
1422 struct skl_module_cfg *mconfig;
1424 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1425 w = dai->playback_widget;
1426 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1427 } else {
1428 w = dai->capture_widget;
1429 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1431 return mconfig;
1434 static u8 skl_tplg_be_link_type(int dev_type)
1436 int ret;
1438 switch (dev_type) {
1439 case SKL_DEVICE_BT:
1440 ret = NHLT_LINK_SSP;
1441 break;
1443 case SKL_DEVICE_DMIC:
1444 ret = NHLT_LINK_DMIC;
1445 break;
1447 case SKL_DEVICE_I2S:
1448 ret = NHLT_LINK_SSP;
1449 break;
1451 case SKL_DEVICE_HDALINK:
1452 ret = NHLT_LINK_HDA;
1453 break;
1455 default:
1456 ret = NHLT_LINK_INVALID;
1457 break;
1460 return ret;
1464 * Fill the BE gateway parameters
1465 * The BE gateway expects a blob of parameters which are kept in the ACPI
1466 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1467 * The port can have multiple settings so pick based on the PCM
1468 * parameters
1470 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1471 struct skl_module_cfg *mconfig,
1472 struct skl_pipe_params *params)
1474 struct nhlt_specific_cfg *cfg;
1475 struct skl *skl = get_skl_ctx(dai->dev);
1476 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1477 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1479 skl_tplg_fill_dma_id(mconfig, params);
1481 if (link_type == NHLT_LINK_HDA)
1482 return 0;
1484 /* update the blob based on virtual bus_id*/
1485 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1486 params->s_fmt, params->ch,
1487 params->s_freq, params->stream,
1488 dev_type);
1489 if (cfg) {
1490 mconfig->formats_config.caps_size = cfg->size;
1491 mconfig->formats_config.caps = (u32 *) &cfg->caps;
1492 } else {
1493 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1494 mconfig->vbus_id, link_type,
1495 params->stream);
1496 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1497 params->ch, params->s_freq, params->s_fmt);
1498 return -EINVAL;
1501 return 0;
1504 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1505 struct snd_soc_dapm_widget *w,
1506 struct skl_pipe_params *params)
1508 struct snd_soc_dapm_path *p;
1509 int ret = -EIO;
1511 snd_soc_dapm_widget_for_each_source_path(w, p) {
1512 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1513 p->source->priv) {
1515 ret = skl_tplg_be_fill_pipe_params(dai,
1516 p->source->priv, params);
1517 if (ret < 0)
1518 return ret;
1519 } else {
1520 ret = skl_tplg_be_set_src_pipe_params(dai,
1521 p->source, params);
1522 if (ret < 0)
1523 return ret;
1527 return ret;
1530 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1531 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1533 struct snd_soc_dapm_path *p = NULL;
1534 int ret = -EIO;
1536 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1537 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1538 p->sink->priv) {
1540 ret = skl_tplg_be_fill_pipe_params(dai,
1541 p->sink->priv, params);
1542 if (ret < 0)
1543 return ret;
1544 } else {
1545 ret = skl_tplg_be_set_sink_pipe_params(
1546 dai, p->sink, params);
1547 if (ret < 0)
1548 return ret;
1552 return ret;
1556 * BE hw_params can be a source parameters (capture) or sink parameters
1557 * (playback). Based on sink and source we need to either find the source
1558 * list or the sink list and set the pipeline parameters
1560 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1561 struct skl_pipe_params *params)
1563 struct snd_soc_dapm_widget *w;
1565 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1566 w = dai->playback_widget;
1568 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1570 } else {
1571 w = dai->capture_widget;
1573 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1576 return 0;
1579 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1580 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1581 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1582 {SKL_PGA_EVENT, skl_tplg_pga_event},
1585 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1586 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1587 skl_tplg_tlv_control_set},
1590 static int skl_tplg_fill_pipe_tkn(struct device *dev,
1591 struct skl_pipe *pipe, u32 tkn,
1592 u32 tkn_val)
1595 switch (tkn) {
1596 case SKL_TKN_U32_PIPE_CONN_TYPE:
1597 pipe->conn_type = tkn_val;
1598 break;
1600 case SKL_TKN_U32_PIPE_PRIORITY:
1601 pipe->pipe_priority = tkn_val;
1602 break;
1604 case SKL_TKN_U32_PIPE_MEM_PGS:
1605 pipe->memory_pages = tkn_val;
1606 break;
1608 case SKL_TKN_U32_PMODE:
1609 pipe->lp_mode = tkn_val;
1610 break;
1612 default:
1613 dev_err(dev, "Token not handled %d\n", tkn);
1614 return -EINVAL;
1617 return 0;
1621 * Add pipeline by parsing the relevant tokens
1622 * Return an existing pipe if the pipe already exists.
1624 static int skl_tplg_add_pipe(struct device *dev,
1625 struct skl_module_cfg *mconfig, struct skl *skl,
1626 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
1628 struct skl_pipeline *ppl;
1629 struct skl_pipe *pipe;
1630 struct skl_pipe_params *params;
1632 list_for_each_entry(ppl, &skl->ppl_list, node) {
1633 if (ppl->pipe->ppl_id == tkn_elem->value) {
1634 mconfig->pipe = ppl->pipe;
1635 return EEXIST;
1639 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1640 if (!ppl)
1641 return -ENOMEM;
1643 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1644 if (!pipe)
1645 return -ENOMEM;
1647 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1648 if (!params)
1649 return -ENOMEM;
1651 pipe->p_params = params;
1652 pipe->ppl_id = tkn_elem->value;
1653 INIT_LIST_HEAD(&pipe->w_list);
1655 ppl->pipe = pipe;
1656 list_add(&ppl->node, &skl->ppl_list);
1658 mconfig->pipe = pipe;
1659 mconfig->pipe->state = SKL_PIPE_INVALID;
1661 return 0;
1664 static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1665 struct skl_module_pin *m_pin,
1666 int pin_index, u32 value)
1668 switch (tkn) {
1669 case SKL_TKN_U32_PIN_MOD_ID:
1670 m_pin[pin_index].id.module_id = value;
1671 break;
1673 case SKL_TKN_U32_PIN_INST_ID:
1674 m_pin[pin_index].id.instance_id = value;
1675 break;
1677 default:
1678 dev_err(dev, "%d Not a pin token\n", value);
1679 return -EINVAL;
1682 return 0;
1686 * Parse for pin config specific tokens to fill up the
1687 * module private data
1689 static int skl_tplg_fill_pins_info(struct device *dev,
1690 struct skl_module_cfg *mconfig,
1691 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1692 int dir, int pin_count)
1694 int ret;
1695 struct skl_module_pin *m_pin;
1697 switch (dir) {
1698 case SKL_DIR_IN:
1699 m_pin = mconfig->m_in_pin;
1700 break;
1702 case SKL_DIR_OUT:
1703 m_pin = mconfig->m_out_pin;
1704 break;
1706 default:
1707 dev_err(dev, "Invalid direction value\n");
1708 return -EINVAL;
1711 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1712 m_pin, pin_count, tkn_elem->value);
1714 if (ret < 0)
1715 return ret;
1717 m_pin[pin_count].in_use = false;
1718 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1720 return 0;
1724 * Fill up input/output module config format based
1725 * on the direction
1727 static int skl_tplg_fill_fmt(struct device *dev,
1728 struct skl_module_cfg *mconfig, u32 tkn,
1729 u32 value, u32 dir, u32 pin_count)
1731 struct skl_module_fmt *dst_fmt;
1733 switch (dir) {
1734 case SKL_DIR_IN:
1735 dst_fmt = mconfig->in_fmt;
1736 dst_fmt += pin_count;
1737 break;
1739 case SKL_DIR_OUT:
1740 dst_fmt = mconfig->out_fmt;
1741 dst_fmt += pin_count;
1742 break;
1744 default:
1745 dev_err(dev, "Invalid direction value\n");
1746 return -EINVAL;
1749 switch (tkn) {
1750 case SKL_TKN_U32_FMT_CH:
1751 dst_fmt->channels = value;
1752 break;
1754 case SKL_TKN_U32_FMT_FREQ:
1755 dst_fmt->s_freq = value;
1756 break;
1758 case SKL_TKN_U32_FMT_BIT_DEPTH:
1759 dst_fmt->bit_depth = value;
1760 break;
1762 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1763 dst_fmt->valid_bit_depth = value;
1764 break;
1766 case SKL_TKN_U32_FMT_CH_CONFIG:
1767 dst_fmt->ch_cfg = value;
1768 break;
1770 case SKL_TKN_U32_FMT_INTERLEAVE:
1771 dst_fmt->interleaving_style = value;
1772 break;
1774 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1775 dst_fmt->sample_type = value;
1776 break;
1778 case SKL_TKN_U32_FMT_CH_MAP:
1779 dst_fmt->ch_map = value;
1780 break;
1782 default:
1783 dev_err(dev, "Invalid token %d\n", tkn);
1784 return -EINVAL;
1787 return 0;
1790 static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1791 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1793 if (uuid_tkn->token == SKL_TKN_UUID)
1794 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1795 else {
1796 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
1797 return -EINVAL;
1800 return 0;
1803 static void skl_tplg_fill_pin_dynamic_val(
1804 struct skl_module_pin *mpin, u32 pin_count, u32 value)
1806 int i;
1808 for (i = 0; i < pin_count; i++)
1809 mpin[i].is_dynamic = value;
1813 * Parse tokens to fill up the module private data
1815 static int skl_tplg_get_token(struct device *dev,
1816 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1817 struct skl *skl, struct skl_module_cfg *mconfig)
1819 int tkn_count = 0;
1820 int ret;
1821 static int is_pipe_exists;
1822 static int pin_index, dir;
1824 if (tkn_elem->token > SKL_TKN_MAX)
1825 return -EINVAL;
1827 switch (tkn_elem->token) {
1828 case SKL_TKN_U8_IN_QUEUE_COUNT:
1829 mconfig->max_in_queue = tkn_elem->value;
1830 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1831 sizeof(*mconfig->m_in_pin),
1832 GFP_KERNEL);
1833 if (!mconfig->m_in_pin)
1834 return -ENOMEM;
1836 break;
1838 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1839 mconfig->max_out_queue = tkn_elem->value;
1840 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1841 sizeof(*mconfig->m_out_pin),
1842 GFP_KERNEL);
1844 if (!mconfig->m_out_pin)
1845 return -ENOMEM;
1847 break;
1849 case SKL_TKN_U8_DYN_IN_PIN:
1850 if (!mconfig->m_in_pin)
1851 return -ENOMEM;
1853 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1854 mconfig->max_in_queue, tkn_elem->value);
1856 break;
1858 case SKL_TKN_U8_DYN_OUT_PIN:
1859 if (!mconfig->m_out_pin)
1860 return -ENOMEM;
1862 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1863 mconfig->max_out_queue, tkn_elem->value);
1865 break;
1867 case SKL_TKN_U8_TIME_SLOT:
1868 mconfig->time_slot = tkn_elem->value;
1869 break;
1871 case SKL_TKN_U8_CORE_ID:
1872 mconfig->core_id = tkn_elem->value;
1874 case SKL_TKN_U8_MOD_TYPE:
1875 mconfig->m_type = tkn_elem->value;
1876 break;
1878 case SKL_TKN_U8_DEV_TYPE:
1879 mconfig->dev_type = tkn_elem->value;
1880 break;
1882 case SKL_TKN_U8_HW_CONN_TYPE:
1883 mconfig->hw_conn_type = tkn_elem->value;
1884 break;
1886 case SKL_TKN_U16_MOD_INST_ID:
1887 mconfig->id.instance_id =
1888 tkn_elem->value;
1889 break;
1891 case SKL_TKN_U32_MEM_PAGES:
1892 mconfig->mem_pages = tkn_elem->value;
1893 break;
1895 case SKL_TKN_U32_MAX_MCPS:
1896 mconfig->mcps = tkn_elem->value;
1897 break;
1899 case SKL_TKN_U32_OBS:
1900 mconfig->obs = tkn_elem->value;
1901 break;
1903 case SKL_TKN_U32_IBS:
1904 mconfig->ibs = tkn_elem->value;
1905 break;
1907 case SKL_TKN_U32_VBUS_ID:
1908 mconfig->vbus_id = tkn_elem->value;
1909 break;
1911 case SKL_TKN_U32_PARAMS_FIXUP:
1912 mconfig->params_fixup = tkn_elem->value;
1913 break;
1915 case SKL_TKN_U32_CONVERTER:
1916 mconfig->converter = tkn_elem->value;
1917 break;
1919 case SKL_TKL_U32_D0I3_CAPS:
1920 mconfig->d0i3_caps = tkn_elem->value;
1921 break;
1923 case SKL_TKN_U32_PIPE_ID:
1924 ret = skl_tplg_add_pipe(dev,
1925 mconfig, skl, tkn_elem);
1927 if (ret < 0)
1928 return is_pipe_exists;
1930 if (ret == EEXIST)
1931 is_pipe_exists = 1;
1933 break;
1935 case SKL_TKN_U32_PIPE_CONN_TYPE:
1936 case SKL_TKN_U32_PIPE_PRIORITY:
1937 case SKL_TKN_U32_PIPE_MEM_PGS:
1938 case SKL_TKN_U32_PMODE:
1939 if (is_pipe_exists) {
1940 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1941 tkn_elem->token, tkn_elem->value);
1942 if (ret < 0)
1943 return ret;
1946 break;
1949 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1950 * direction and the pin count. The first four bits represent
1951 * direction and next four the pin count.
1953 case SKL_TKN_U32_DIR_PIN_COUNT:
1954 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1955 pin_index = (tkn_elem->value &
1956 SKL_PIN_COUNT_MASK) >> 4;
1958 break;
1960 case SKL_TKN_U32_FMT_CH:
1961 case SKL_TKN_U32_FMT_FREQ:
1962 case SKL_TKN_U32_FMT_BIT_DEPTH:
1963 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1964 case SKL_TKN_U32_FMT_CH_CONFIG:
1965 case SKL_TKN_U32_FMT_INTERLEAVE:
1966 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1967 case SKL_TKN_U32_FMT_CH_MAP:
1968 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1969 tkn_elem->value, dir, pin_index);
1971 if (ret < 0)
1972 return ret;
1974 break;
1976 case SKL_TKN_U32_PIN_MOD_ID:
1977 case SKL_TKN_U32_PIN_INST_ID:
1978 ret = skl_tplg_fill_pins_info(dev,
1979 mconfig, tkn_elem, dir,
1980 pin_index);
1981 if (ret < 0)
1982 return ret;
1984 break;
1986 case SKL_TKN_U32_CAPS_SIZE:
1987 mconfig->formats_config.caps_size =
1988 tkn_elem->value;
1990 break;
1992 case SKL_TKN_U32_PROC_DOMAIN:
1993 mconfig->domain =
1994 tkn_elem->value;
1996 break;
1998 case SKL_TKN_U8_IN_PIN_TYPE:
1999 case SKL_TKN_U8_OUT_PIN_TYPE:
2000 case SKL_TKN_U8_CONN_TYPE:
2001 break;
2003 default:
2004 dev_err(dev, "Token %d not handled\n",
2005 tkn_elem->token);
2006 return -EINVAL;
2009 tkn_count++;
2011 return tkn_count;
2015 * Parse the vendor array for specific tokens to construct
2016 * module private data
2018 static int skl_tplg_get_tokens(struct device *dev,
2019 char *pvt_data, struct skl *skl,
2020 struct skl_module_cfg *mconfig, int block_size)
2022 struct snd_soc_tplg_vendor_array *array;
2023 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2024 int tkn_count = 0, ret;
2025 int off = 0, tuple_size = 0;
2027 if (block_size <= 0)
2028 return -EINVAL;
2030 while (tuple_size < block_size) {
2031 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2033 off += array->size;
2035 switch (array->type) {
2036 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2037 dev_warn(dev, "no string tokens expected for skl tplg\n");
2038 continue;
2040 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2041 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
2042 if (ret < 0)
2043 return ret;
2045 tuple_size += sizeof(*array->uuid);
2047 continue;
2049 default:
2050 tkn_elem = array->value;
2051 tkn_count = 0;
2052 break;
2055 while (tkn_count <= (array->num_elems - 1)) {
2056 ret = skl_tplg_get_token(dev, tkn_elem,
2057 skl, mconfig);
2059 if (ret < 0)
2060 return ret;
2062 tkn_count = tkn_count + ret;
2063 tkn_elem++;
2066 tuple_size += tkn_count * sizeof(*tkn_elem);
2069 return 0;
2073 * Every data block is preceded by a descriptor to read the number
2074 * of data blocks, they type of the block and it's size
2076 static int skl_tplg_get_desc_blocks(struct device *dev,
2077 struct snd_soc_tplg_vendor_array *array)
2079 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2081 tkn_elem = array->value;
2083 switch (tkn_elem->token) {
2084 case SKL_TKN_U8_NUM_BLOCKS:
2085 case SKL_TKN_U8_BLOCK_TYPE:
2086 case SKL_TKN_U16_BLOCK_SIZE:
2087 return tkn_elem->value;
2089 default:
2090 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2091 break;
2094 return -EINVAL;
2098 * Parse the private data for the token and corresponding value.
2099 * The private data can have multiple data blocks. So, a data block
2100 * is preceded by a descriptor for number of blocks and a descriptor
2101 * for the type and size of the suceeding data block.
2103 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2104 struct skl *skl, struct device *dev,
2105 struct skl_module_cfg *mconfig)
2107 struct snd_soc_tplg_vendor_array *array;
2108 int num_blocks, block_size = 0, block_type, off = 0;
2109 char *data;
2110 int ret;
2112 /* Read the NUM_DATA_BLOCKS descriptor */
2113 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2114 ret = skl_tplg_get_desc_blocks(dev, array);
2115 if (ret < 0)
2116 return ret;
2117 num_blocks = ret;
2119 off += array->size;
2120 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2122 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2123 while (num_blocks > 0) {
2124 ret = skl_tplg_get_desc_blocks(dev, array);
2126 if (ret < 0)
2127 return ret;
2128 block_type = ret;
2129 off += array->size;
2131 array = (struct snd_soc_tplg_vendor_array *)
2132 (tplg_w->priv.data + off);
2134 ret = skl_tplg_get_desc_blocks(dev, array);
2136 if (ret < 0)
2137 return ret;
2138 block_size = ret;
2139 off += array->size;
2141 array = (struct snd_soc_tplg_vendor_array *)
2142 (tplg_w->priv.data + off);
2144 data = (tplg_w->priv.data + off);
2146 if (block_type == SKL_TYPE_TUPLE) {
2147 ret = skl_tplg_get_tokens(dev, data,
2148 skl, mconfig, block_size);
2150 if (ret < 0)
2151 return ret;
2153 --num_blocks;
2154 } else {
2155 if (mconfig->formats_config.caps_size > 0)
2156 memcpy(mconfig->formats_config.caps, data,
2157 mconfig->formats_config.caps_size);
2158 --num_blocks;
2162 return 0;
2165 static void skl_clear_pin_config(struct snd_soc_platform *platform,
2166 struct snd_soc_dapm_widget *w)
2168 int i;
2169 struct skl_module_cfg *mconfig;
2170 struct skl_pipe *pipe;
2172 if (!strncmp(w->dapm->component->name, platform->component.name,
2173 strlen(platform->component.name))) {
2174 mconfig = w->priv;
2175 pipe = mconfig->pipe;
2176 for (i = 0; i < mconfig->max_in_queue; i++) {
2177 mconfig->m_in_pin[i].in_use = false;
2178 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2180 for (i = 0; i < mconfig->max_out_queue; i++) {
2181 mconfig->m_out_pin[i].in_use = false;
2182 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2184 pipe->state = SKL_PIPE_INVALID;
2185 mconfig->m_state = SKL_MODULE_UNINIT;
2189 void skl_cleanup_resources(struct skl *skl)
2191 struct skl_sst *ctx = skl->skl_sst;
2192 struct snd_soc_platform *soc_platform = skl->platform;
2193 struct snd_soc_dapm_widget *w;
2194 struct snd_soc_card *card;
2196 if (soc_platform == NULL)
2197 return;
2199 card = soc_platform->component.card;
2200 if (!card || !card->instantiated)
2201 return;
2203 skl->resource.mem = 0;
2204 skl->resource.mcps = 0;
2206 list_for_each_entry(w, &card->widgets, list) {
2207 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2208 skl_clear_pin_config(soc_platform, w);
2211 skl_clear_module_cnt(ctx->dsp);
2215 * Topology core widget load callback
2217 * This is used to save the private data for each widget which gives
2218 * information to the driver about module and pipeline parameters which DSP
2219 * FW expects like ids, resource values, formats etc
2221 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
2222 struct snd_soc_dapm_widget *w,
2223 struct snd_soc_tplg_dapm_widget *tplg_w)
2225 int ret;
2226 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2227 struct skl *skl = ebus_to_skl(ebus);
2228 struct hdac_bus *bus = ebus_to_hbus(ebus);
2229 struct skl_module_cfg *mconfig;
2231 if (!tplg_w->priv.size)
2232 goto bind_event;
2234 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2236 if (!mconfig)
2237 return -ENOMEM;
2239 w->priv = mconfig;
2242 * module binary can be loaded later, so set it to query when
2243 * module is load for a use case
2245 mconfig->id.module_id = -1;
2247 /* Parse private data for tuples */
2248 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2249 if (ret < 0)
2250 return ret;
2251 bind_event:
2252 if (tplg_w->event_type == 0) {
2253 dev_dbg(bus->dev, "ASoC: No event handler required\n");
2254 return 0;
2257 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
2258 ARRAY_SIZE(skl_tplg_widget_ops),
2259 tplg_w->event_type);
2261 if (ret) {
2262 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2263 __func__, tplg_w->event_type);
2264 return -EINVAL;
2267 return 0;
2270 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2271 struct snd_soc_tplg_bytes_control *bc)
2273 struct skl_algo_data *ac;
2274 struct skl_dfw_algo_data *dfw_ac =
2275 (struct skl_dfw_algo_data *)bc->priv.data;
2277 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2278 if (!ac)
2279 return -ENOMEM;
2281 /* Fill private data */
2282 ac->max = dfw_ac->max;
2283 ac->param_id = dfw_ac->param_id;
2284 ac->set_params = dfw_ac->set_params;
2285 ac->size = dfw_ac->max;
2287 if (ac->max) {
2288 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2289 if (!ac->params)
2290 return -ENOMEM;
2292 memcpy(ac->params, dfw_ac->params, ac->max);
2295 be->dobj.private = ac;
2296 return 0;
2299 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2300 struct snd_kcontrol_new *kctl,
2301 struct snd_soc_tplg_ctl_hdr *hdr)
2303 struct soc_bytes_ext *sb;
2304 struct snd_soc_tplg_bytes_control *tplg_bc;
2305 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2306 struct hdac_bus *bus = ebus_to_hbus(ebus);
2308 switch (hdr->ops.info) {
2309 case SND_SOC_TPLG_CTL_BYTES:
2310 tplg_bc = container_of(hdr,
2311 struct snd_soc_tplg_bytes_control, hdr);
2312 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2313 sb = (struct soc_bytes_ext *)kctl->private_value;
2314 if (tplg_bc->priv.size)
2315 return skl_init_algo_data(
2316 bus->dev, sb, tplg_bc);
2318 break;
2320 default:
2321 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2322 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2323 break;
2326 return 0;
2329 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2330 struct snd_soc_tplg_vendor_string_elem *str_elem,
2331 struct skl *skl)
2333 int tkn_count = 0;
2334 static int ref_count;
2336 switch (str_elem->token) {
2337 case SKL_TKN_STR_LIB_NAME:
2338 if (ref_count > skl->skl_sst->lib_count - 1) {
2339 ref_count = 0;
2340 return -EINVAL;
2343 strncpy(skl->skl_sst->lib_info[ref_count].name,
2344 str_elem->string,
2345 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
2346 ref_count++;
2347 tkn_count++;
2348 break;
2350 default:
2351 dev_err(dev, "Not a string token %d\n", str_elem->token);
2352 break;
2355 return tkn_count;
2358 static int skl_tplg_get_str_tkn(struct device *dev,
2359 struct snd_soc_tplg_vendor_array *array,
2360 struct skl *skl)
2362 int tkn_count = 0, ret;
2363 struct snd_soc_tplg_vendor_string_elem *str_elem;
2365 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2366 while (tkn_count < array->num_elems) {
2367 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
2368 str_elem++;
2370 if (ret < 0)
2371 return ret;
2373 tkn_count = tkn_count + ret;
2376 return tkn_count;
2379 static int skl_tplg_get_int_tkn(struct device *dev,
2380 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2381 struct skl *skl)
2383 int tkn_count = 0;
2385 switch (tkn_elem->token) {
2386 case SKL_TKN_U32_LIB_COUNT:
2387 skl->skl_sst->lib_count = tkn_elem->value;
2388 tkn_count++;
2389 break;
2391 default:
2392 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
2393 return -EINVAL;
2396 return tkn_count;
2400 * Fill the manifest structure by parsing the tokens based on the
2401 * type.
2403 static int skl_tplg_get_manifest_tkn(struct device *dev,
2404 char *pvt_data, struct skl *skl,
2405 int block_size)
2407 int tkn_count = 0, ret;
2408 int off = 0, tuple_size = 0;
2409 struct snd_soc_tplg_vendor_array *array;
2410 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2412 if (block_size <= 0)
2413 return -EINVAL;
2415 while (tuple_size < block_size) {
2416 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2417 off += array->size;
2418 switch (array->type) {
2419 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2420 ret = skl_tplg_get_str_tkn(dev, array, skl);
2422 if (ret < 0)
2423 return ret;
2424 tkn_count += ret;
2426 tuple_size += tkn_count *
2427 sizeof(struct snd_soc_tplg_vendor_string_elem);
2428 continue;
2430 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2431 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
2432 continue;
2434 default:
2435 tkn_elem = array->value;
2436 tkn_count = 0;
2437 break;
2440 while (tkn_count <= array->num_elems - 1) {
2441 ret = skl_tplg_get_int_tkn(dev,
2442 tkn_elem, skl);
2443 if (ret < 0)
2444 return ret;
2446 tkn_count = tkn_count + ret;
2447 tkn_elem++;
2448 tuple_size += tkn_count *
2449 sizeof(struct snd_soc_tplg_vendor_value_elem);
2450 break;
2452 tkn_count = 0;
2455 return 0;
2459 * Parse manifest private data for tokens. The private data block is
2460 * preceded by descriptors for type and size of data block.
2462 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
2463 struct device *dev, struct skl *skl)
2465 struct snd_soc_tplg_vendor_array *array;
2466 int num_blocks, block_size = 0, block_type, off = 0;
2467 char *data;
2468 int ret;
2470 /* Read the NUM_DATA_BLOCKS descriptor */
2471 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2472 ret = skl_tplg_get_desc_blocks(dev, array);
2473 if (ret < 0)
2474 return ret;
2475 num_blocks = ret;
2477 off += array->size;
2478 array = (struct snd_soc_tplg_vendor_array *)
2479 (manifest->priv.data + off);
2481 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2482 while (num_blocks > 0) {
2483 ret = skl_tplg_get_desc_blocks(dev, array);
2485 if (ret < 0)
2486 return ret;
2487 block_type = ret;
2488 off += array->size;
2490 array = (struct snd_soc_tplg_vendor_array *)
2491 (manifest->priv.data + off);
2493 ret = skl_tplg_get_desc_blocks(dev, array);
2495 if (ret < 0)
2496 return ret;
2497 block_size = ret;
2498 off += array->size;
2500 array = (struct snd_soc_tplg_vendor_array *)
2501 (manifest->priv.data + off);
2503 data = (manifest->priv.data + off);
2505 if (block_type == SKL_TYPE_TUPLE) {
2506 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
2507 block_size);
2509 if (ret < 0)
2510 return ret;
2512 --num_blocks;
2513 } else {
2514 return -EINVAL;
2518 return 0;
2521 static int skl_manifest_load(struct snd_soc_component *cmpnt,
2522 struct snd_soc_tplg_manifest *manifest)
2524 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2525 struct hdac_bus *bus = ebus_to_hbus(ebus);
2526 struct skl *skl = ebus_to_skl(ebus);
2528 /* proceed only if we have private data defined */
2529 if (manifest->priv.size == 0)
2530 return 0;
2532 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
2534 if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
2535 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
2536 skl->skl_sst->lib_count);
2537 return -EINVAL;
2540 return 0;
2543 static struct snd_soc_tplg_ops skl_tplg_ops = {
2544 .widget_load = skl_tplg_widget_load,
2545 .control_load = skl_tplg_control_load,
2546 .bytes_ext_ops = skl_tlv_ops,
2547 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
2548 .manifest = skl_manifest_load,
2552 * A pipe can have multiple modules, each of them will be a DAPM widget as
2553 * well. While managing a pipeline we need to get the list of all the
2554 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2555 * helps to get the SKL type widgets in that pipeline
2557 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2559 struct snd_soc_dapm_widget *w;
2560 struct skl_module_cfg *mcfg = NULL;
2561 struct skl_pipe_module *p_module = NULL;
2562 struct skl_pipe *pipe;
2564 list_for_each_entry(w, &platform->component.card->widgets, list) {
2565 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2566 mcfg = w->priv;
2567 pipe = mcfg->pipe;
2569 p_module = devm_kzalloc(platform->dev,
2570 sizeof(*p_module), GFP_KERNEL);
2571 if (!p_module)
2572 return -ENOMEM;
2574 p_module->w = w;
2575 list_add_tail(&p_module->node, &pipe->w_list);
2579 return 0;
2582 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2584 struct skl_pipe_module *w_module;
2585 struct snd_soc_dapm_widget *w;
2586 struct skl_module_cfg *mconfig;
2587 bool host_found = false, link_found = false;
2589 list_for_each_entry(w_module, &pipe->w_list, node) {
2590 w = w_module->w;
2591 mconfig = w->priv;
2593 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2594 host_found = true;
2595 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2596 link_found = true;
2599 if (host_found && link_found)
2600 pipe->passthru = true;
2601 else
2602 pipe->passthru = false;
2605 /* This will be read from topology manifest, currently defined here */
2606 #define SKL_MAX_MCPS 30000000
2607 #define SKL_FW_MAX_MEM 1000000
2610 * SKL topology init routine
2612 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2614 int ret;
2615 const struct firmware *fw;
2616 struct hdac_bus *bus = ebus_to_hbus(ebus);
2617 struct skl *skl = ebus_to_skl(ebus);
2618 struct skl_pipeline *ppl;
2620 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
2621 if (ret < 0) {
2622 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
2623 skl->tplg_name, ret);
2624 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2625 if (ret < 0) {
2626 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2627 "dfw_sst.bin", ret);
2628 return ret;
2633 * The complete tplg for SKL is loaded as index 0, we don't use
2634 * any other index
2636 ret = snd_soc_tplg_component_load(&platform->component,
2637 &skl_tplg_ops, fw, 0);
2638 if (ret < 0) {
2639 dev_err(bus->dev, "tplg component load failed%d\n", ret);
2640 release_firmware(fw);
2641 return -EINVAL;
2644 skl->resource.max_mcps = SKL_MAX_MCPS;
2645 skl->resource.max_mem = SKL_FW_MAX_MEM;
2647 skl->tplg = fw;
2648 ret = skl_tplg_create_pipe_widget_list(platform);
2649 if (ret < 0)
2650 return ret;
2652 list_for_each_entry(ppl, &skl->ppl_list, node)
2653 skl_tplg_set_pipe_type(skl, ppl->pipe);
2655 return 0;