Btrfs: fix ctime update of on-disk inode
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / sound / soc / soc-cache.c
blob20b7f3b003a33d377991044fd4e2d71efba62353
1 /*
2 * soc-cache.c -- ASoC register cache helpers
4 * Copyright 2009 Wolfson Microelectronics PLC.
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/i2c.h>
15 #include <linux/spi/spi.h>
16 #include <sound/soc.h>
17 #include <linux/lzo.h>
18 #include <linux/bitmap.h>
19 #include <linux/rbtree.h>
21 #include <trace/events/asoc.h>
23 static bool snd_soc_set_cache_val(void *base, unsigned int idx,
24 unsigned int val, unsigned int word_size)
26 switch (word_size) {
27 case 1: {
28 u8 *cache = base;
29 if (cache[idx] == val)
30 return true;
31 cache[idx] = val;
32 break;
34 case 2: {
35 u16 *cache = base;
36 if (cache[idx] == val)
37 return true;
38 cache[idx] = val;
39 break;
41 default:
42 BUG();
44 return false;
47 static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
48 unsigned int word_size)
50 if (!base)
51 return -1;
53 switch (word_size) {
54 case 1: {
55 const u8 *cache = base;
56 return cache[idx];
58 case 2: {
59 const u16 *cache = base;
60 return cache[idx];
62 default:
63 BUG();
65 /* unreachable */
66 return -1;
69 struct snd_soc_rbtree_node {
70 struct rb_node node; /* the actual rbtree node holding this block */
71 unsigned int base_reg; /* base register handled by this block */
72 unsigned int word_size; /* number of bytes needed to represent the register index */
73 void *block; /* block of adjacent registers */
74 unsigned int blklen; /* number of registers available in the block */
75 } __attribute__ ((packed));
77 struct snd_soc_rbtree_ctx {
78 struct rb_root root;
79 struct snd_soc_rbtree_node *cached_rbnode;
82 static inline void snd_soc_rbtree_get_base_top_reg(
83 struct snd_soc_rbtree_node *rbnode,
84 unsigned int *base, unsigned int *top)
86 *base = rbnode->base_reg;
87 *top = rbnode->base_reg + rbnode->blklen - 1;
90 static unsigned int snd_soc_rbtree_get_register(
91 struct snd_soc_rbtree_node *rbnode, unsigned int idx)
93 unsigned int val;
95 switch (rbnode->word_size) {
96 case 1: {
97 u8 *p = rbnode->block;
98 val = p[idx];
99 return val;
101 case 2: {
102 u16 *p = rbnode->block;
103 val = p[idx];
104 return val;
106 default:
107 BUG();
108 break;
110 return -1;
113 static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
114 unsigned int idx, unsigned int val)
116 switch (rbnode->word_size) {
117 case 1: {
118 u8 *p = rbnode->block;
119 p[idx] = val;
120 break;
122 case 2: {
123 u16 *p = rbnode->block;
124 p[idx] = val;
125 break;
127 default:
128 BUG();
129 break;
133 static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
134 struct rb_root *root, unsigned int reg)
136 struct rb_node *node;
137 struct snd_soc_rbtree_node *rbnode;
138 unsigned int base_reg, top_reg;
140 node = root->rb_node;
141 while (node) {
142 rbnode = container_of(node, struct snd_soc_rbtree_node, node);
143 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
144 if (reg >= base_reg && reg <= top_reg)
145 return rbnode;
146 else if (reg > top_reg)
147 node = node->rb_right;
148 else if (reg < base_reg)
149 node = node->rb_left;
152 return NULL;
155 static int snd_soc_rbtree_insert(struct rb_root *root,
156 struct snd_soc_rbtree_node *rbnode)
158 struct rb_node **new, *parent;
159 struct snd_soc_rbtree_node *rbnode_tmp;
160 unsigned int base_reg_tmp, top_reg_tmp;
161 unsigned int base_reg;
163 parent = NULL;
164 new = &root->rb_node;
165 while (*new) {
166 rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
167 node);
168 /* base and top registers of the current rbnode */
169 snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
170 &top_reg_tmp);
171 /* base register of the rbnode to be added */
172 base_reg = rbnode->base_reg;
173 parent = *new;
174 /* if this register has already been inserted, just return */
175 if (base_reg >= base_reg_tmp &&
176 base_reg <= top_reg_tmp)
177 return 0;
178 else if (base_reg > top_reg_tmp)
179 new = &((*new)->rb_right);
180 else if (base_reg < base_reg_tmp)
181 new = &((*new)->rb_left);
184 /* insert the node into the rbtree */
185 rb_link_node(&rbnode->node, parent, new);
186 rb_insert_color(&rbnode->node, root);
188 return 1;
191 static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
193 struct snd_soc_rbtree_ctx *rbtree_ctx;
194 struct rb_node *node;
195 struct snd_soc_rbtree_node *rbnode;
196 unsigned int regtmp;
197 unsigned int val, def;
198 int ret;
199 int i;
201 rbtree_ctx = codec->reg_cache;
202 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
204 for (i = 0; i < rbnode->blklen; ++i) {
205 regtmp = rbnode->base_reg + i;
206 val = snd_soc_rbtree_get_register(rbnode, i);
207 def = snd_soc_get_cache_val(codec->reg_def_copy, i,
208 rbnode->word_size);
209 if (val == def)
210 continue;
212 WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
214 codec->cache_bypass = 1;
215 ret = snd_soc_write(codec, regtmp, val);
216 codec->cache_bypass = 0;
217 if (ret)
218 return ret;
219 dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
220 regtmp, val);
224 return 0;
227 static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
228 unsigned int pos, unsigned int reg,
229 unsigned int value)
231 u8 *blk;
233 blk = krealloc(rbnode->block,
234 (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
235 if (!blk)
236 return -ENOMEM;
238 /* insert the register value in the correct place in the rbnode block */
239 memmove(blk + (pos + 1) * rbnode->word_size,
240 blk + pos * rbnode->word_size,
241 (rbnode->blklen - pos) * rbnode->word_size);
243 /* update the rbnode block, its size and the base register */
244 rbnode->block = blk;
245 rbnode->blklen++;
246 if (!pos)
247 rbnode->base_reg = reg;
249 snd_soc_rbtree_set_register(rbnode, pos, value);
250 return 0;
253 static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
254 unsigned int reg, unsigned int value)
256 struct snd_soc_rbtree_ctx *rbtree_ctx;
257 struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
258 struct rb_node *node;
259 unsigned int val;
260 unsigned int reg_tmp;
261 unsigned int base_reg, top_reg;
262 unsigned int pos;
263 int i;
264 int ret;
266 rbtree_ctx = codec->reg_cache;
267 /* look up the required register in the cached rbnode */
268 rbnode = rbtree_ctx->cached_rbnode;
269 if (rbnode) {
270 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
271 if (reg >= base_reg && reg <= top_reg) {
272 reg_tmp = reg - base_reg;
273 val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
274 if (val == value)
275 return 0;
276 snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
277 return 0;
280 /* if we can't locate it in the cached rbnode we'll have
281 * to traverse the rbtree looking for it.
283 rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
284 if (rbnode) {
285 reg_tmp = reg - rbnode->base_reg;
286 val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
287 if (val == value)
288 return 0;
289 snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
290 rbtree_ctx->cached_rbnode = rbnode;
291 } else {
292 /* bail out early, no need to create the rbnode yet */
293 if (!value)
294 return 0;
295 /* look for an adjacent register to the one we are about to add */
296 for (node = rb_first(&rbtree_ctx->root); node;
297 node = rb_next(node)) {
298 rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
299 for (i = 0; i < rbnode_tmp->blklen; ++i) {
300 reg_tmp = rbnode_tmp->base_reg + i;
301 if (abs(reg_tmp - reg) != 1)
302 continue;
303 /* decide where in the block to place our register */
304 if (reg_tmp + 1 == reg)
305 pos = i + 1;
306 else
307 pos = i;
308 ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
309 reg, value);
310 if (ret)
311 return ret;
312 rbtree_ctx->cached_rbnode = rbnode_tmp;
313 return 0;
316 /* we did not manage to find a place to insert it in an existing
317 * block so create a new rbnode with a single register in its block.
318 * This block will get populated further if any other adjacent
319 * registers get modified in the future.
321 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
322 if (!rbnode)
323 return -ENOMEM;
324 rbnode->blklen = 1;
325 rbnode->base_reg = reg;
326 rbnode->word_size = codec->driver->reg_word_size;
327 rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
328 GFP_KERNEL);
329 if (!rbnode->block) {
330 kfree(rbnode);
331 return -ENOMEM;
333 snd_soc_rbtree_set_register(rbnode, 0, value);
334 snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
335 rbtree_ctx->cached_rbnode = rbnode;
338 return 0;
341 static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
342 unsigned int reg, unsigned int *value)
344 struct snd_soc_rbtree_ctx *rbtree_ctx;
345 struct snd_soc_rbtree_node *rbnode;
346 unsigned int base_reg, top_reg;
347 unsigned int reg_tmp;
349 rbtree_ctx = codec->reg_cache;
350 /* look up the required register in the cached rbnode */
351 rbnode = rbtree_ctx->cached_rbnode;
352 if (rbnode) {
353 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
354 if (reg >= base_reg && reg <= top_reg) {
355 reg_tmp = reg - base_reg;
356 *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
357 return 0;
360 /* if we can't locate it in the cached rbnode we'll have
361 * to traverse the rbtree looking for it.
363 rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
364 if (rbnode) {
365 reg_tmp = reg - rbnode->base_reg;
366 *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
367 rbtree_ctx->cached_rbnode = rbnode;
368 } else {
369 /* uninitialized registers default to 0 */
370 *value = 0;
373 return 0;
376 static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
378 struct rb_node *next;
379 struct snd_soc_rbtree_ctx *rbtree_ctx;
380 struct snd_soc_rbtree_node *rbtree_node;
382 /* if we've already been called then just return */
383 rbtree_ctx = codec->reg_cache;
384 if (!rbtree_ctx)
385 return 0;
387 /* free up the rbtree */
388 next = rb_first(&rbtree_ctx->root);
389 while (next) {
390 rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
391 next = rb_next(&rbtree_node->node);
392 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
393 kfree(rbtree_node->block);
394 kfree(rbtree_node);
397 /* release the resources */
398 kfree(codec->reg_cache);
399 codec->reg_cache = NULL;
401 return 0;
404 static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
406 struct snd_soc_rbtree_ctx *rbtree_ctx;
407 unsigned int word_size;
408 unsigned int val;
409 int i;
410 int ret;
412 codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
413 if (!codec->reg_cache)
414 return -ENOMEM;
416 rbtree_ctx = codec->reg_cache;
417 rbtree_ctx->root = RB_ROOT;
418 rbtree_ctx->cached_rbnode = NULL;
420 if (!codec->reg_def_copy)
421 return 0;
423 word_size = codec->driver->reg_word_size;
424 for (i = 0; i < codec->driver->reg_cache_size; ++i) {
425 val = snd_soc_get_cache_val(codec->reg_def_copy, i,
426 word_size);
427 if (!val)
428 continue;
429 ret = snd_soc_rbtree_cache_write(codec, i, val);
430 if (ret)
431 goto err;
434 return 0;
436 err:
437 snd_soc_cache_exit(codec);
438 return ret;
441 #ifdef CONFIG_SND_SOC_CACHE_LZO
442 struct snd_soc_lzo_ctx {
443 void *wmem;
444 void *dst;
445 const void *src;
446 size_t src_len;
447 size_t dst_len;
448 size_t decompressed_size;
449 unsigned long *sync_bmp;
450 int sync_bmp_nbits;
453 #define LZO_BLOCK_NUM 8
454 static int snd_soc_lzo_block_count(void)
456 return LZO_BLOCK_NUM;
459 static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
461 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
462 if (!lzo_ctx->wmem)
463 return -ENOMEM;
464 return 0;
467 static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
469 size_t compress_size;
470 int ret;
472 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
473 lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
474 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
475 return -EINVAL;
476 lzo_ctx->dst_len = compress_size;
477 return 0;
480 static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
482 size_t dst_len;
483 int ret;
485 dst_len = lzo_ctx->dst_len;
486 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
487 lzo_ctx->dst, &dst_len);
488 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
489 return -EINVAL;
490 return 0;
493 static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
494 struct snd_soc_lzo_ctx *lzo_ctx)
496 int ret;
498 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
499 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
500 if (!lzo_ctx->dst) {
501 lzo_ctx->dst_len = 0;
502 return -ENOMEM;
505 ret = snd_soc_lzo_compress(lzo_ctx);
506 if (ret < 0)
507 return ret;
508 return 0;
511 static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
512 struct snd_soc_lzo_ctx *lzo_ctx)
514 int ret;
516 lzo_ctx->dst_len = lzo_ctx->decompressed_size;
517 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
518 if (!lzo_ctx->dst) {
519 lzo_ctx->dst_len = 0;
520 return -ENOMEM;
523 ret = snd_soc_lzo_decompress(lzo_ctx);
524 if (ret < 0)
525 return ret;
526 return 0;
529 static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
530 unsigned int reg)
532 const struct snd_soc_codec_driver *codec_drv;
534 codec_drv = codec->driver;
535 return (reg * codec_drv->reg_word_size) /
536 DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
539 static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
540 unsigned int reg)
542 const struct snd_soc_codec_driver *codec_drv;
544 codec_drv = codec->driver;
545 return reg % (DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count()) /
546 codec_drv->reg_word_size);
549 static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
551 const struct snd_soc_codec_driver *codec_drv;
553 codec_drv = codec->driver;
554 return DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
557 static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
559 struct snd_soc_lzo_ctx **lzo_blocks;
560 unsigned int val;
561 int i;
562 int ret;
564 lzo_blocks = codec->reg_cache;
565 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
566 WARN_ON(!snd_soc_codec_writable_register(codec, i));
567 ret = snd_soc_cache_read(codec, i, &val);
568 if (ret)
569 return ret;
570 codec->cache_bypass = 1;
571 ret = snd_soc_write(codec, i, val);
572 codec->cache_bypass = 0;
573 if (ret)
574 return ret;
575 dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
576 i, val);
579 return 0;
582 static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
583 unsigned int reg, unsigned int value)
585 struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
586 int ret, blkindex, blkpos;
587 size_t blksize, tmp_dst_len;
588 void *tmp_dst;
590 /* index of the compressed lzo block */
591 blkindex = snd_soc_lzo_get_blkindex(codec, reg);
592 /* register index within the decompressed block */
593 blkpos = snd_soc_lzo_get_blkpos(codec, reg);
594 /* size of the compressed block */
595 blksize = snd_soc_lzo_get_blksize(codec);
596 lzo_blocks = codec->reg_cache;
597 lzo_block = lzo_blocks[blkindex];
599 /* save the pointer and length of the compressed block */
600 tmp_dst = lzo_block->dst;
601 tmp_dst_len = lzo_block->dst_len;
603 /* prepare the source to be the compressed block */
604 lzo_block->src = lzo_block->dst;
605 lzo_block->src_len = lzo_block->dst_len;
607 /* decompress the block */
608 ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
609 if (ret < 0) {
610 kfree(lzo_block->dst);
611 goto out;
614 /* write the new value to the cache */
615 if (snd_soc_set_cache_val(lzo_block->dst, blkpos, value,
616 codec->driver->reg_word_size)) {
617 kfree(lzo_block->dst);
618 goto out;
621 /* prepare the source to be the decompressed block */
622 lzo_block->src = lzo_block->dst;
623 lzo_block->src_len = lzo_block->dst_len;
625 /* compress the block */
626 ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
627 if (ret < 0) {
628 kfree(lzo_block->dst);
629 kfree(lzo_block->src);
630 goto out;
633 /* set the bit so we know we have to sync this register */
634 set_bit(reg, lzo_block->sync_bmp);
635 kfree(tmp_dst);
636 kfree(lzo_block->src);
637 return 0;
638 out:
639 lzo_block->dst = tmp_dst;
640 lzo_block->dst_len = tmp_dst_len;
641 return ret;
644 static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
645 unsigned int reg, unsigned int *value)
647 struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
648 int ret, blkindex, blkpos;
649 size_t blksize, tmp_dst_len;
650 void *tmp_dst;
652 *value = 0;
653 /* index of the compressed lzo block */
654 blkindex = snd_soc_lzo_get_blkindex(codec, reg);
655 /* register index within the decompressed block */
656 blkpos = snd_soc_lzo_get_blkpos(codec, reg);
657 /* size of the compressed block */
658 blksize = snd_soc_lzo_get_blksize(codec);
659 lzo_blocks = codec->reg_cache;
660 lzo_block = lzo_blocks[blkindex];
662 /* save the pointer and length of the compressed block */
663 tmp_dst = lzo_block->dst;
664 tmp_dst_len = lzo_block->dst_len;
666 /* prepare the source to be the compressed block */
667 lzo_block->src = lzo_block->dst;
668 lzo_block->src_len = lzo_block->dst_len;
670 /* decompress the block */
671 ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
672 if (ret >= 0)
673 /* fetch the value from the cache */
674 *value = snd_soc_get_cache_val(lzo_block->dst, blkpos,
675 codec->driver->reg_word_size);
677 kfree(lzo_block->dst);
678 /* restore the pointer and length of the compressed block */
679 lzo_block->dst = tmp_dst;
680 lzo_block->dst_len = tmp_dst_len;
681 return 0;
684 static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
686 struct snd_soc_lzo_ctx **lzo_blocks;
687 int i, blkcount;
689 lzo_blocks = codec->reg_cache;
690 if (!lzo_blocks)
691 return 0;
693 blkcount = snd_soc_lzo_block_count();
695 * the pointer to the bitmap used for syncing the cache
696 * is shared amongst all lzo_blocks. Ensure it is freed
697 * only once.
699 if (lzo_blocks[0])
700 kfree(lzo_blocks[0]->sync_bmp);
701 for (i = 0; i < blkcount; ++i) {
702 if (lzo_blocks[i]) {
703 kfree(lzo_blocks[i]->wmem);
704 kfree(lzo_blocks[i]->dst);
706 /* each lzo_block is a pointer returned by kmalloc or NULL */
707 kfree(lzo_blocks[i]);
709 kfree(lzo_blocks);
710 codec->reg_cache = NULL;
711 return 0;
714 static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
716 struct snd_soc_lzo_ctx **lzo_blocks;
717 size_t bmp_size;
718 const struct snd_soc_codec_driver *codec_drv;
719 int ret, tofree, i, blksize, blkcount;
720 const char *p, *end;
721 unsigned long *sync_bmp;
723 ret = 0;
724 codec_drv = codec->driver;
727 * If we have not been given a default register cache
728 * then allocate a dummy zero-ed out region, compress it
729 * and remember to free it afterwards.
731 tofree = 0;
732 if (!codec->reg_def_copy)
733 tofree = 1;
735 if (!codec->reg_def_copy) {
736 codec->reg_def_copy = kzalloc(codec->reg_size, GFP_KERNEL);
737 if (!codec->reg_def_copy)
738 return -ENOMEM;
741 blkcount = snd_soc_lzo_block_count();
742 codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
743 GFP_KERNEL);
744 if (!codec->reg_cache) {
745 ret = -ENOMEM;
746 goto err_tofree;
748 lzo_blocks = codec->reg_cache;
751 * allocate a bitmap to be used when syncing the cache with
752 * the hardware. Each time a register is modified, the corresponding
753 * bit is set in the bitmap, so we know that we have to sync
754 * that register.
756 bmp_size = codec_drv->reg_cache_size;
757 sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
758 GFP_KERNEL);
759 if (!sync_bmp) {
760 ret = -ENOMEM;
761 goto err;
763 bitmap_zero(sync_bmp, bmp_size);
765 /* allocate the lzo blocks and initialize them */
766 for (i = 0; i < blkcount; ++i) {
767 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
768 GFP_KERNEL);
769 if (!lzo_blocks[i]) {
770 kfree(sync_bmp);
771 ret = -ENOMEM;
772 goto err;
774 lzo_blocks[i]->sync_bmp = sync_bmp;
775 lzo_blocks[i]->sync_bmp_nbits = bmp_size;
776 /* alloc the working space for the compressed block */
777 ret = snd_soc_lzo_prepare(lzo_blocks[i]);
778 if (ret < 0)
779 goto err;
782 blksize = snd_soc_lzo_get_blksize(codec);
783 p = codec->reg_def_copy;
784 end = codec->reg_def_copy + codec->reg_size;
785 /* compress the register map and fill the lzo blocks */
786 for (i = 0; i < blkcount; ++i, p += blksize) {
787 lzo_blocks[i]->src = p;
788 if (p + blksize > end)
789 lzo_blocks[i]->src_len = end - p;
790 else
791 lzo_blocks[i]->src_len = blksize;
792 ret = snd_soc_lzo_compress_cache_block(codec,
793 lzo_blocks[i]);
794 if (ret < 0)
795 goto err;
796 lzo_blocks[i]->decompressed_size =
797 lzo_blocks[i]->src_len;
800 if (tofree) {
801 kfree(codec->reg_def_copy);
802 codec->reg_def_copy = NULL;
804 return 0;
805 err:
806 snd_soc_cache_exit(codec);
807 err_tofree:
808 if (tofree) {
809 kfree(codec->reg_def_copy);
810 codec->reg_def_copy = NULL;
812 return ret;
814 #endif
816 static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
818 int i;
819 int ret;
820 const struct snd_soc_codec_driver *codec_drv;
821 unsigned int val;
823 codec_drv = codec->driver;
824 for (i = 0; i < codec_drv->reg_cache_size; ++i) {
825 ret = snd_soc_cache_read(codec, i, &val);
826 if (ret)
827 return ret;
828 if (codec->reg_def_copy)
829 if (snd_soc_get_cache_val(codec->reg_def_copy,
830 i, codec_drv->reg_word_size) == val)
831 continue;
833 WARN_ON(!snd_soc_codec_writable_register(codec, i));
835 ret = snd_soc_write(codec, i, val);
836 if (ret)
837 return ret;
838 dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
839 i, val);
841 return 0;
844 static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
845 unsigned int reg, unsigned int value)
847 snd_soc_set_cache_val(codec->reg_cache, reg, value,
848 codec->driver->reg_word_size);
849 return 0;
852 static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
853 unsigned int reg, unsigned int *value)
855 *value = snd_soc_get_cache_val(codec->reg_cache, reg,
856 codec->driver->reg_word_size);
857 return 0;
860 static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
862 if (!codec->reg_cache)
863 return 0;
864 kfree(codec->reg_cache);
865 codec->reg_cache = NULL;
866 return 0;
869 static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
871 const struct snd_soc_codec_driver *codec_drv;
873 codec_drv = codec->driver;
875 if (codec->reg_def_copy)
876 codec->reg_cache = kmemdup(codec->reg_def_copy,
877 codec->reg_size, GFP_KERNEL);
878 else
879 codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
880 if (!codec->reg_cache)
881 return -ENOMEM;
883 return 0;
886 /* an array of all supported compression types */
887 static const struct snd_soc_cache_ops cache_types[] = {
888 /* Flat *must* be the first entry for fallback */
890 .id = SND_SOC_FLAT_COMPRESSION,
891 .name = "flat",
892 .init = snd_soc_flat_cache_init,
893 .exit = snd_soc_flat_cache_exit,
894 .read = snd_soc_flat_cache_read,
895 .write = snd_soc_flat_cache_write,
896 .sync = snd_soc_flat_cache_sync
898 #ifdef CONFIG_SND_SOC_CACHE_LZO
900 .id = SND_SOC_LZO_COMPRESSION,
901 .name = "LZO",
902 .init = snd_soc_lzo_cache_init,
903 .exit = snd_soc_lzo_cache_exit,
904 .read = snd_soc_lzo_cache_read,
905 .write = snd_soc_lzo_cache_write,
906 .sync = snd_soc_lzo_cache_sync
908 #endif
910 .id = SND_SOC_RBTREE_COMPRESSION,
911 .name = "rbtree",
912 .init = snd_soc_rbtree_cache_init,
913 .exit = snd_soc_rbtree_cache_exit,
914 .read = snd_soc_rbtree_cache_read,
915 .write = snd_soc_rbtree_cache_write,
916 .sync = snd_soc_rbtree_cache_sync
920 int snd_soc_cache_init(struct snd_soc_codec *codec)
922 int i;
924 for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
925 if (cache_types[i].id == codec->compress_type)
926 break;
928 /* Fall back to flat compression */
929 if (i == ARRAY_SIZE(cache_types)) {
930 dev_warn(codec->dev, "Could not match compress type: %d\n",
931 codec->compress_type);
932 i = 0;
935 mutex_init(&codec->cache_rw_mutex);
936 codec->cache_ops = &cache_types[i];
938 if (codec->cache_ops->init) {
939 if (codec->cache_ops->name)
940 dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
941 codec->cache_ops->name, codec->name);
942 return codec->cache_ops->init(codec);
944 return -ENOSYS;
948 * NOTE: keep in mind that this function might be called
949 * multiple times.
951 int snd_soc_cache_exit(struct snd_soc_codec *codec)
953 if (codec->cache_ops && codec->cache_ops->exit) {
954 if (codec->cache_ops->name)
955 dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
956 codec->cache_ops->name, codec->name);
957 return codec->cache_ops->exit(codec);
959 return -ENOSYS;
963 * snd_soc_cache_read: Fetch the value of a given register from the cache.
965 * @codec: CODEC to configure.
966 * @reg: The register index.
967 * @value: The value to be returned.
969 int snd_soc_cache_read(struct snd_soc_codec *codec,
970 unsigned int reg, unsigned int *value)
972 int ret;
974 mutex_lock(&codec->cache_rw_mutex);
976 if (value && codec->cache_ops && codec->cache_ops->read) {
977 ret = codec->cache_ops->read(codec, reg, value);
978 mutex_unlock(&codec->cache_rw_mutex);
979 return ret;
982 mutex_unlock(&codec->cache_rw_mutex);
983 return -ENOSYS;
985 EXPORT_SYMBOL_GPL(snd_soc_cache_read);
988 * snd_soc_cache_write: Set the value of a given register in the cache.
990 * @codec: CODEC to configure.
991 * @reg: The register index.
992 * @value: The new register value.
994 int snd_soc_cache_write(struct snd_soc_codec *codec,
995 unsigned int reg, unsigned int value)
997 int ret;
999 mutex_lock(&codec->cache_rw_mutex);
1001 if (codec->cache_ops && codec->cache_ops->write) {
1002 ret = codec->cache_ops->write(codec, reg, value);
1003 mutex_unlock(&codec->cache_rw_mutex);
1004 return ret;
1007 mutex_unlock(&codec->cache_rw_mutex);
1008 return -ENOSYS;
1010 EXPORT_SYMBOL_GPL(snd_soc_cache_write);
1013 * snd_soc_cache_sync: Sync the register cache with the hardware.
1015 * @codec: CODEC to configure.
1017 * Any registers that should not be synced should be marked as
1018 * volatile. In general drivers can choose not to use the provided
1019 * syncing functionality if they so require.
1021 int snd_soc_cache_sync(struct snd_soc_codec *codec)
1023 int ret;
1024 const char *name;
1026 if (!codec->cache_sync) {
1027 return 0;
1030 if (!codec->cache_ops || !codec->cache_ops->sync)
1031 return -ENOSYS;
1033 if (codec->cache_ops->name)
1034 name = codec->cache_ops->name;
1035 else
1036 name = "unknown";
1038 if (codec->cache_ops->name)
1039 dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
1040 codec->cache_ops->name, codec->name);
1041 trace_snd_soc_cache_sync(codec, name, "start");
1042 ret = codec->cache_ops->sync(codec);
1043 if (!ret)
1044 codec->cache_sync = 0;
1045 trace_snd_soc_cache_sync(codec, name, "end");
1046 return ret;
1048 EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
1050 static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
1051 unsigned int reg)
1053 const struct snd_soc_codec_driver *codec_drv;
1054 unsigned int min, max, index;
1056 codec_drv = codec->driver;
1057 min = 0;
1058 max = codec_drv->reg_access_size - 1;
1059 do {
1060 index = (min + max) / 2;
1061 if (codec_drv->reg_access_default[index].reg == reg)
1062 return index;
1063 if (codec_drv->reg_access_default[index].reg < reg)
1064 min = index + 1;
1065 else
1066 max = index;
1067 } while (min <= max);
1068 return -1;
1071 int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
1072 unsigned int reg)
1074 int index;
1076 if (reg >= codec->driver->reg_cache_size)
1077 return 1;
1078 index = snd_soc_get_reg_access_index(codec, reg);
1079 if (index < 0)
1080 return 0;
1081 return codec->driver->reg_access_default[index].vol;
1083 EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
1085 int snd_soc_default_readable_register(struct snd_soc_codec *codec,
1086 unsigned int reg)
1088 int index;
1090 if (reg >= codec->driver->reg_cache_size)
1091 return 1;
1092 index = snd_soc_get_reg_access_index(codec, reg);
1093 if (index < 0)
1094 return 0;
1095 return codec->driver->reg_access_default[index].read;
1097 EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
1099 int snd_soc_default_writable_register(struct snd_soc_codec *codec,
1100 unsigned int reg)
1102 int index;
1104 if (reg >= codec->driver->reg_cache_size)
1105 return 1;
1106 index = snd_soc_get_reg_access_index(codec, reg);
1107 if (index < 0)
1108 return 0;
1109 return codec->driver->reg_access_default[index].write;
1111 EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);