kpacket_gen: use constants for cmdlength
[cor.git] / drivers / nvmem / core.c
blob9f1ee9c766eca9bb76685e03aa2b741a9a5602d8
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include "nvmem.h"
22 struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
28 struct device_node *np;
29 struct nvmem_device *nvmem;
30 struct list_head node;
33 static DEFINE_MUTEX(nvmem_mutex);
34 static DEFINE_IDA(nvmem_ida);
36 static DEFINE_MUTEX(nvmem_cell_mutex);
37 static LIST_HEAD(nvmem_cell_tables);
39 static DEFINE_MUTEX(nvmem_lookup_mutex);
40 static LIST_HEAD(nvmem_lookup_list);
42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
51 return -EINVAL;
54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
60 return -EINVAL;
63 static void nvmem_release(struct device *dev)
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
71 static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
75 static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
79 static void nvmem_cell_drop(struct nvmem_cell *cell)
81 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
82 mutex_lock(&nvmem_mutex);
83 list_del(&cell->node);
84 mutex_unlock(&nvmem_mutex);
85 of_node_put(cell->np);
86 kfree(cell->name);
87 kfree(cell);
90 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
92 struct nvmem_cell *cell, *p;
94 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
95 nvmem_cell_drop(cell);
98 static void nvmem_cell_add(struct nvmem_cell *cell)
100 mutex_lock(&nvmem_mutex);
101 list_add_tail(&cell->node, &cell->nvmem->cells);
102 mutex_unlock(&nvmem_mutex);
103 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
106 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
107 const struct nvmem_cell_info *info,
108 struct nvmem_cell *cell)
110 cell->nvmem = nvmem;
111 cell->offset = info->offset;
112 cell->bytes = info->bytes;
113 cell->name = info->name;
115 cell->bit_offset = info->bit_offset;
116 cell->nbits = info->nbits;
118 if (cell->nbits)
119 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
120 BITS_PER_BYTE);
122 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
123 dev_err(&nvmem->dev,
124 "cell %s unaligned to nvmem stride %d\n",
125 cell->name, nvmem->stride);
126 return -EINVAL;
129 return 0;
133 * nvmem_add_cells() - Add cell information to an nvmem device
135 * @nvmem: nvmem device to add cells to.
136 * @info: nvmem cell info to add to the device
137 * @ncells: number of cells in info
139 * Return: 0 or negative error code on failure.
141 static int nvmem_add_cells(struct nvmem_device *nvmem,
142 const struct nvmem_cell_info *info,
143 int ncells)
145 struct nvmem_cell **cells;
146 int i, rval;
148 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
149 if (!cells)
150 return -ENOMEM;
152 for (i = 0; i < ncells; i++) {
153 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
154 if (!cells[i]) {
155 rval = -ENOMEM;
156 goto err;
159 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
160 if (rval) {
161 kfree(cells[i]);
162 goto err;
165 nvmem_cell_add(cells[i]);
168 /* remove tmp array */
169 kfree(cells);
171 return 0;
172 err:
173 while (i--)
174 nvmem_cell_drop(cells[i]);
176 kfree(cells);
178 return rval;
182 * nvmem_register_notifier() - Register a notifier block for nvmem events.
184 * @nb: notifier block to be called on nvmem events.
186 * Return: 0 on success, negative error number on failure.
188 int nvmem_register_notifier(struct notifier_block *nb)
190 return blocking_notifier_chain_register(&nvmem_notifier, nb);
192 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
195 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
197 * @nb: notifier block to be unregistered.
199 * Return: 0 on success, negative error number on failure.
201 int nvmem_unregister_notifier(struct notifier_block *nb)
203 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
205 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
207 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
209 const struct nvmem_cell_info *info;
210 struct nvmem_cell_table *table;
211 struct nvmem_cell *cell;
212 int rval = 0, i;
214 mutex_lock(&nvmem_cell_mutex);
215 list_for_each_entry(table, &nvmem_cell_tables, node) {
216 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
217 for (i = 0; i < table->ncells; i++) {
218 info = &table->cells[i];
220 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
221 if (!cell) {
222 rval = -ENOMEM;
223 goto out;
226 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
227 info,
228 cell);
229 if (rval) {
230 kfree(cell);
231 goto out;
234 nvmem_cell_add(cell);
239 out:
240 mutex_unlock(&nvmem_cell_mutex);
241 return rval;
244 static struct nvmem_cell *
245 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
247 struct nvmem_cell *iter, *cell = NULL;
249 mutex_lock(&nvmem_mutex);
250 list_for_each_entry(iter, &nvmem->cells, node) {
251 if (strcmp(cell_id, iter->name) == 0) {
252 cell = iter;
253 break;
256 mutex_unlock(&nvmem_mutex);
258 return cell;
261 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
263 struct device_node *parent, *child;
264 struct device *dev = &nvmem->dev;
265 struct nvmem_cell *cell;
266 const __be32 *addr;
267 int len;
269 parent = dev->of_node;
271 for_each_child_of_node(parent, child) {
272 addr = of_get_property(child, "reg", &len);
273 if (!addr || (len < 2 * sizeof(u32))) {
274 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
275 return -EINVAL;
278 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
279 if (!cell)
280 return -ENOMEM;
282 cell->nvmem = nvmem;
283 cell->np = of_node_get(child);
284 cell->offset = be32_to_cpup(addr++);
285 cell->bytes = be32_to_cpup(addr);
286 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
288 addr = of_get_property(child, "bits", &len);
289 if (addr && len == (2 * sizeof(u32))) {
290 cell->bit_offset = be32_to_cpup(addr++);
291 cell->nbits = be32_to_cpup(addr);
294 if (cell->nbits)
295 cell->bytes = DIV_ROUND_UP(
296 cell->nbits + cell->bit_offset,
297 BITS_PER_BYTE);
299 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
300 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
301 cell->name, nvmem->stride);
302 /* Cells already added will be freed later. */
303 kfree(cell->name);
304 kfree(cell);
305 return -EINVAL;
308 nvmem_cell_add(cell);
311 return 0;
315 * nvmem_register() - Register a nvmem device for given nvmem_config.
316 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
318 * @config: nvmem device configuration with which nvmem device is created.
320 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
321 * on success.
324 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
326 struct nvmem_device *nvmem;
327 int rval;
329 if (!config->dev)
330 return ERR_PTR(-EINVAL);
332 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
333 if (!nvmem)
334 return ERR_PTR(-ENOMEM);
336 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
337 if (rval < 0) {
338 kfree(nvmem);
339 return ERR_PTR(rval);
342 kref_init(&nvmem->refcnt);
343 INIT_LIST_HEAD(&nvmem->cells);
345 nvmem->id = rval;
346 nvmem->owner = config->owner;
347 if (!nvmem->owner && config->dev->driver)
348 nvmem->owner = config->dev->driver->owner;
349 nvmem->stride = config->stride ?: 1;
350 nvmem->word_size = config->word_size ?: 1;
351 nvmem->size = config->size;
352 nvmem->dev.type = &nvmem_provider_type;
353 nvmem->dev.bus = &nvmem_bus_type;
354 nvmem->dev.parent = config->dev;
355 nvmem->priv = config->priv;
356 nvmem->type = config->type;
357 nvmem->reg_read = config->reg_read;
358 nvmem->reg_write = config->reg_write;
359 if (!config->no_of_node)
360 nvmem->dev.of_node = config->dev->of_node;
362 if (config->id == -1 && config->name) {
363 dev_set_name(&nvmem->dev, "%s", config->name);
364 } else {
365 dev_set_name(&nvmem->dev, "%s%d",
366 config->name ? : "nvmem",
367 config->name ? config->id : nvmem->id);
370 nvmem->read_only = device_property_present(config->dev, "read-only") ||
371 config->read_only || !nvmem->reg_write;
373 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
375 device_initialize(&nvmem->dev);
377 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
379 rval = device_add(&nvmem->dev);
380 if (rval)
381 goto err_put_device;
383 if (config->compat) {
384 rval = nvmem_sysfs_setup_compat(nvmem, config);
385 if (rval)
386 goto err_device_del;
389 if (config->cells) {
390 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
391 if (rval)
392 goto err_teardown_compat;
395 rval = nvmem_add_cells_from_table(nvmem);
396 if (rval)
397 goto err_remove_cells;
399 rval = nvmem_add_cells_from_of(nvmem);
400 if (rval)
401 goto err_remove_cells;
403 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
405 return nvmem;
407 err_remove_cells:
408 nvmem_device_remove_all_cells(nvmem);
409 err_teardown_compat:
410 if (config->compat)
411 nvmem_sysfs_remove_compat(nvmem, config);
412 err_device_del:
413 device_del(&nvmem->dev);
414 err_put_device:
415 put_device(&nvmem->dev);
417 return ERR_PTR(rval);
419 EXPORT_SYMBOL_GPL(nvmem_register);
421 static void nvmem_device_release(struct kref *kref)
423 struct nvmem_device *nvmem;
425 nvmem = container_of(kref, struct nvmem_device, refcnt);
427 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
429 if (nvmem->flags & FLAG_COMPAT)
430 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
432 nvmem_device_remove_all_cells(nvmem);
433 device_del(&nvmem->dev);
434 put_device(&nvmem->dev);
438 * nvmem_unregister() - Unregister previously registered nvmem device
440 * @nvmem: Pointer to previously registered nvmem device.
442 void nvmem_unregister(struct nvmem_device *nvmem)
444 kref_put(&nvmem->refcnt, nvmem_device_release);
446 EXPORT_SYMBOL_GPL(nvmem_unregister);
448 static void devm_nvmem_release(struct device *dev, void *res)
450 nvmem_unregister(*(struct nvmem_device **)res);
454 * devm_nvmem_register() - Register a managed nvmem device for given
455 * nvmem_config.
456 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
458 * @dev: Device that uses the nvmem device.
459 * @config: nvmem device configuration with which nvmem device is created.
461 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
462 * on success.
464 struct nvmem_device *devm_nvmem_register(struct device *dev,
465 const struct nvmem_config *config)
467 struct nvmem_device **ptr, *nvmem;
469 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
470 if (!ptr)
471 return ERR_PTR(-ENOMEM);
473 nvmem = nvmem_register(config);
475 if (!IS_ERR(nvmem)) {
476 *ptr = nvmem;
477 devres_add(dev, ptr);
478 } else {
479 devres_free(ptr);
482 return nvmem;
484 EXPORT_SYMBOL_GPL(devm_nvmem_register);
486 static int devm_nvmem_match(struct device *dev, void *res, void *data)
488 struct nvmem_device **r = res;
490 return *r == data;
494 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
495 * device.
497 * @dev: Device that uses the nvmem device.
498 * @nvmem: Pointer to previously registered nvmem device.
500 * Return: Will be an negative on error or a zero on success.
502 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
504 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
506 EXPORT_SYMBOL(devm_nvmem_unregister);
508 static struct nvmem_device *__nvmem_device_get(void *data,
509 int (*match)(struct device *dev, const void *data))
511 struct nvmem_device *nvmem = NULL;
512 struct device *dev;
514 mutex_lock(&nvmem_mutex);
515 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
516 if (dev)
517 nvmem = to_nvmem_device(dev);
518 mutex_unlock(&nvmem_mutex);
519 if (!nvmem)
520 return ERR_PTR(-EPROBE_DEFER);
522 if (!try_module_get(nvmem->owner)) {
523 dev_err(&nvmem->dev,
524 "could not increase module refcount for cell %s\n",
525 nvmem_dev_name(nvmem));
527 put_device(&nvmem->dev);
528 return ERR_PTR(-EINVAL);
531 kref_get(&nvmem->refcnt);
533 return nvmem;
536 static void __nvmem_device_put(struct nvmem_device *nvmem)
538 put_device(&nvmem->dev);
539 module_put(nvmem->owner);
540 kref_put(&nvmem->refcnt, nvmem_device_release);
543 #if IS_ENABLED(CONFIG_OF)
545 * of_nvmem_device_get() - Get nvmem device from a given id
547 * @np: Device tree node that uses the nvmem device.
548 * @id: nvmem name from nvmem-names property.
550 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
551 * on success.
553 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
556 struct device_node *nvmem_np;
557 int index = 0;
559 if (id)
560 index = of_property_match_string(np, "nvmem-names", id);
562 nvmem_np = of_parse_phandle(np, "nvmem", index);
563 if (!nvmem_np)
564 return ERR_PTR(-ENOENT);
566 return __nvmem_device_get(nvmem_np, device_match_of_node);
568 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
569 #endif
572 * nvmem_device_get() - Get nvmem device from a given id
574 * @dev: Device that uses the nvmem device.
575 * @dev_name: name of the requested nvmem device.
577 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
578 * on success.
580 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
582 if (dev->of_node) { /* try dt first */
583 struct nvmem_device *nvmem;
585 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
587 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
588 return nvmem;
592 return __nvmem_device_get((void *)dev_name, device_match_name);
594 EXPORT_SYMBOL_GPL(nvmem_device_get);
597 * nvmem_device_find() - Find nvmem device with matching function
599 * @data: Data to pass to match function
600 * @match: Callback function to check device
602 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
603 * on success.
605 struct nvmem_device *nvmem_device_find(void *data,
606 int (*match)(struct device *dev, const void *data))
608 return __nvmem_device_get(data, match);
610 EXPORT_SYMBOL_GPL(nvmem_device_find);
612 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
614 struct nvmem_device **nvmem = res;
616 if (WARN_ON(!nvmem || !*nvmem))
617 return 0;
619 return *nvmem == data;
622 static void devm_nvmem_device_release(struct device *dev, void *res)
624 nvmem_device_put(*(struct nvmem_device **)res);
628 * devm_nvmem_device_put() - put alredy got nvmem device
630 * @dev: Device that uses the nvmem device.
631 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
632 * that needs to be released.
634 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
636 int ret;
638 ret = devres_release(dev, devm_nvmem_device_release,
639 devm_nvmem_device_match, nvmem);
641 WARN_ON(ret);
643 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
646 * nvmem_device_put() - put alredy got nvmem device
648 * @nvmem: pointer to nvmem device that needs to be released.
650 void nvmem_device_put(struct nvmem_device *nvmem)
652 __nvmem_device_put(nvmem);
654 EXPORT_SYMBOL_GPL(nvmem_device_put);
657 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
659 * @dev: Device that requests the nvmem device.
660 * @id: name id for the requested nvmem device.
662 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
663 * on success. The nvmem_cell will be freed by the automatically once the
664 * device is freed.
666 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
668 struct nvmem_device **ptr, *nvmem;
670 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
671 if (!ptr)
672 return ERR_PTR(-ENOMEM);
674 nvmem = nvmem_device_get(dev, id);
675 if (!IS_ERR(nvmem)) {
676 *ptr = nvmem;
677 devres_add(dev, ptr);
678 } else {
679 devres_free(ptr);
682 return nvmem;
684 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
686 static struct nvmem_cell *
687 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
689 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
690 struct nvmem_cell_lookup *lookup;
691 struct nvmem_device *nvmem;
692 const char *dev_id;
694 if (!dev)
695 return ERR_PTR(-EINVAL);
697 dev_id = dev_name(dev);
699 mutex_lock(&nvmem_lookup_mutex);
701 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
702 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
703 (strcmp(lookup->con_id, con_id) == 0)) {
704 /* This is the right entry. */
705 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
706 device_match_name);
707 if (IS_ERR(nvmem)) {
708 /* Provider may not be registered yet. */
709 cell = ERR_CAST(nvmem);
710 break;
713 cell = nvmem_find_cell_by_name(nvmem,
714 lookup->cell_name);
715 if (!cell) {
716 __nvmem_device_put(nvmem);
717 cell = ERR_PTR(-ENOENT);
719 break;
723 mutex_unlock(&nvmem_lookup_mutex);
724 return cell;
727 #if IS_ENABLED(CONFIG_OF)
728 static struct nvmem_cell *
729 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
731 struct nvmem_cell *iter, *cell = NULL;
733 mutex_lock(&nvmem_mutex);
734 list_for_each_entry(iter, &nvmem->cells, node) {
735 if (np == iter->np) {
736 cell = iter;
737 break;
740 mutex_unlock(&nvmem_mutex);
742 return cell;
746 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
748 * @np: Device tree node that uses the nvmem cell.
749 * @id: nvmem cell name from nvmem-cell-names property, or NULL
750 * for the cell at index 0 (the lone cell with no accompanying
751 * nvmem-cell-names property).
753 * Return: Will be an ERR_PTR() on error or a valid pointer
754 * to a struct nvmem_cell. The nvmem_cell will be freed by the
755 * nvmem_cell_put().
757 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
759 struct device_node *cell_np, *nvmem_np;
760 struct nvmem_device *nvmem;
761 struct nvmem_cell *cell;
762 int index = 0;
764 /* if cell name exists, find index to the name */
765 if (id)
766 index = of_property_match_string(np, "nvmem-cell-names", id);
768 cell_np = of_parse_phandle(np, "nvmem-cells", index);
769 if (!cell_np)
770 return ERR_PTR(-ENOENT);
772 nvmem_np = of_get_next_parent(cell_np);
773 if (!nvmem_np)
774 return ERR_PTR(-EINVAL);
776 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
777 of_node_put(nvmem_np);
778 if (IS_ERR(nvmem))
779 return ERR_CAST(nvmem);
781 cell = nvmem_find_cell_by_node(nvmem, cell_np);
782 if (!cell) {
783 __nvmem_device_put(nvmem);
784 return ERR_PTR(-ENOENT);
787 return cell;
789 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
790 #endif
793 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
795 * @dev: Device that requests the nvmem cell.
796 * @id: nvmem cell name to get (this corresponds with the name from the
797 * nvmem-cell-names property for DT systems and with the con_id from
798 * the lookup entry for non-DT systems).
800 * Return: Will be an ERR_PTR() on error or a valid pointer
801 * to a struct nvmem_cell. The nvmem_cell will be freed by the
802 * nvmem_cell_put().
804 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
806 struct nvmem_cell *cell;
808 if (dev->of_node) { /* try dt first */
809 cell = of_nvmem_cell_get(dev->of_node, id);
810 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
811 return cell;
814 /* NULL cell id only allowed for device tree; invalid otherwise */
815 if (!id)
816 return ERR_PTR(-EINVAL);
818 return nvmem_cell_get_from_lookup(dev, id);
820 EXPORT_SYMBOL_GPL(nvmem_cell_get);
822 static void devm_nvmem_cell_release(struct device *dev, void *res)
824 nvmem_cell_put(*(struct nvmem_cell **)res);
828 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
830 * @dev: Device that requests the nvmem cell.
831 * @id: nvmem cell name id to get.
833 * Return: Will be an ERR_PTR() on error or a valid pointer
834 * to a struct nvmem_cell. The nvmem_cell will be freed by the
835 * automatically once the device is freed.
837 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
839 struct nvmem_cell **ptr, *cell;
841 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
842 if (!ptr)
843 return ERR_PTR(-ENOMEM);
845 cell = nvmem_cell_get(dev, id);
846 if (!IS_ERR(cell)) {
847 *ptr = cell;
848 devres_add(dev, ptr);
849 } else {
850 devres_free(ptr);
853 return cell;
855 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
857 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
859 struct nvmem_cell **c = res;
861 if (WARN_ON(!c || !*c))
862 return 0;
864 return *c == data;
868 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
869 * from devm_nvmem_cell_get.
871 * @dev: Device that requests the nvmem cell.
872 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
874 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
876 int ret;
878 ret = devres_release(dev, devm_nvmem_cell_release,
879 devm_nvmem_cell_match, cell);
881 WARN_ON(ret);
883 EXPORT_SYMBOL(devm_nvmem_cell_put);
886 * nvmem_cell_put() - Release previously allocated nvmem cell.
888 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
890 void nvmem_cell_put(struct nvmem_cell *cell)
892 struct nvmem_device *nvmem = cell->nvmem;
894 __nvmem_device_put(nvmem);
896 EXPORT_SYMBOL_GPL(nvmem_cell_put);
898 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
900 u8 *p, *b;
901 int i, extra, bit_offset = cell->bit_offset;
903 p = b = buf;
904 if (bit_offset) {
905 /* First shift */
906 *b++ >>= bit_offset;
908 /* setup rest of the bytes if any */
909 for (i = 1; i < cell->bytes; i++) {
910 /* Get bits from next byte and shift them towards msb */
911 *p |= *b << (BITS_PER_BYTE - bit_offset);
913 p = b;
914 *b++ >>= bit_offset;
916 } else {
917 /* point to the msb */
918 p += cell->bytes - 1;
921 /* result fits in less bytes */
922 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
923 while (--extra >= 0)
924 *p-- = 0;
926 /* clear msb bits if any leftover in the last byte */
927 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
930 static int __nvmem_cell_read(struct nvmem_device *nvmem,
931 struct nvmem_cell *cell,
932 void *buf, size_t *len)
934 int rc;
936 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
938 if (rc)
939 return rc;
941 /* shift bits in-place */
942 if (cell->bit_offset || cell->nbits)
943 nvmem_shift_read_buffer_in_place(cell, buf);
945 if (len)
946 *len = cell->bytes;
948 return 0;
952 * nvmem_cell_read() - Read a given nvmem cell
954 * @cell: nvmem cell to be read.
955 * @len: pointer to length of cell which will be populated on successful read;
956 * can be NULL.
958 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
959 * buffer should be freed by the consumer with a kfree().
961 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
963 struct nvmem_device *nvmem = cell->nvmem;
964 u8 *buf;
965 int rc;
967 if (!nvmem)
968 return ERR_PTR(-EINVAL);
970 buf = kzalloc(cell->bytes, GFP_KERNEL);
971 if (!buf)
972 return ERR_PTR(-ENOMEM);
974 rc = __nvmem_cell_read(nvmem, cell, buf, len);
975 if (rc) {
976 kfree(buf);
977 return ERR_PTR(rc);
980 return buf;
982 EXPORT_SYMBOL_GPL(nvmem_cell_read);
984 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
985 u8 *_buf, int len)
987 struct nvmem_device *nvmem = cell->nvmem;
988 int i, rc, nbits, bit_offset = cell->bit_offset;
989 u8 v, *p, *buf, *b, pbyte, pbits;
991 nbits = cell->nbits;
992 buf = kzalloc(cell->bytes, GFP_KERNEL);
993 if (!buf)
994 return ERR_PTR(-ENOMEM);
996 memcpy(buf, _buf, len);
997 p = b = buf;
999 if (bit_offset) {
1000 pbyte = *b;
1001 *b <<= bit_offset;
1003 /* setup the first byte with lsb bits from nvmem */
1004 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1005 if (rc)
1006 goto err;
1007 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1009 /* setup rest of the byte if any */
1010 for (i = 1; i < cell->bytes; i++) {
1011 /* Get last byte bits and shift them towards lsb */
1012 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1013 pbyte = *b;
1014 p = b;
1015 *b <<= bit_offset;
1016 *b++ |= pbits;
1020 /* if it's not end on byte boundary */
1021 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1022 /* setup the last byte with msb bits from nvmem */
1023 rc = nvmem_reg_read(nvmem,
1024 cell->offset + cell->bytes - 1, &v, 1);
1025 if (rc)
1026 goto err;
1027 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1031 return buf;
1032 err:
1033 kfree(buf);
1034 return ERR_PTR(rc);
1038 * nvmem_cell_write() - Write to a given nvmem cell
1040 * @cell: nvmem cell to be written.
1041 * @buf: Buffer to be written.
1042 * @len: length of buffer to be written to nvmem cell.
1044 * Return: length of bytes written or negative on failure.
1046 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1048 struct nvmem_device *nvmem = cell->nvmem;
1049 int rc;
1051 if (!nvmem || nvmem->read_only ||
1052 (cell->bit_offset == 0 && len != cell->bytes))
1053 return -EINVAL;
1055 if (cell->bit_offset || cell->nbits) {
1056 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1057 if (IS_ERR(buf))
1058 return PTR_ERR(buf);
1061 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1063 /* free the tmp buffer */
1064 if (cell->bit_offset || cell->nbits)
1065 kfree(buf);
1067 if (rc)
1068 return rc;
1070 return len;
1072 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1075 * nvmem_cell_read_u16() - Read a cell value as an u16
1077 * @dev: Device that requests the nvmem cell.
1078 * @cell_id: Name of nvmem cell to read.
1079 * @val: pointer to output value.
1081 * Return: 0 on success or negative errno.
1083 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1085 struct nvmem_cell *cell;
1086 void *buf;
1087 size_t len;
1089 cell = nvmem_cell_get(dev, cell_id);
1090 if (IS_ERR(cell))
1091 return PTR_ERR(cell);
1093 buf = nvmem_cell_read(cell, &len);
1094 if (IS_ERR(buf)) {
1095 nvmem_cell_put(cell);
1096 return PTR_ERR(buf);
1098 if (len != sizeof(*val)) {
1099 kfree(buf);
1100 nvmem_cell_put(cell);
1101 return -EINVAL;
1103 memcpy(val, buf, sizeof(*val));
1104 kfree(buf);
1105 nvmem_cell_put(cell);
1107 return 0;
1109 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1112 * nvmem_cell_read_u32() - Read a cell value as an u32
1114 * @dev: Device that requests the nvmem cell.
1115 * @cell_id: Name of nvmem cell to read.
1116 * @val: pointer to output value.
1118 * Return: 0 on success or negative errno.
1120 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1122 struct nvmem_cell *cell;
1123 void *buf;
1124 size_t len;
1126 cell = nvmem_cell_get(dev, cell_id);
1127 if (IS_ERR(cell))
1128 return PTR_ERR(cell);
1130 buf = nvmem_cell_read(cell, &len);
1131 if (IS_ERR(buf)) {
1132 nvmem_cell_put(cell);
1133 return PTR_ERR(buf);
1135 if (len != sizeof(*val)) {
1136 kfree(buf);
1137 nvmem_cell_put(cell);
1138 return -EINVAL;
1140 memcpy(val, buf, sizeof(*val));
1142 kfree(buf);
1143 nvmem_cell_put(cell);
1144 return 0;
1146 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1149 * nvmem_device_cell_read() - Read a given nvmem device and cell
1151 * @nvmem: nvmem device to read from.
1152 * @info: nvmem cell info to be read.
1153 * @buf: buffer pointer which will be populated on successful read.
1155 * Return: length of successful bytes read on success and negative
1156 * error code on error.
1158 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1159 struct nvmem_cell_info *info, void *buf)
1161 struct nvmem_cell cell;
1162 int rc;
1163 ssize_t len;
1165 if (!nvmem)
1166 return -EINVAL;
1168 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1169 if (rc)
1170 return rc;
1172 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1173 if (rc)
1174 return rc;
1176 return len;
1178 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1181 * nvmem_device_cell_write() - Write cell to a given nvmem device
1183 * @nvmem: nvmem device to be written to.
1184 * @info: nvmem cell info to be written.
1185 * @buf: buffer to be written to cell.
1187 * Return: length of bytes written or negative error code on failure.
1189 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1190 struct nvmem_cell_info *info, void *buf)
1192 struct nvmem_cell cell;
1193 int rc;
1195 if (!nvmem)
1196 return -EINVAL;
1198 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1199 if (rc)
1200 return rc;
1202 return nvmem_cell_write(&cell, buf, cell.bytes);
1204 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1207 * nvmem_device_read() - Read from a given nvmem device
1209 * @nvmem: nvmem device to read from.
1210 * @offset: offset in nvmem device.
1211 * @bytes: number of bytes to read.
1212 * @buf: buffer pointer which will be populated on successful read.
1214 * Return: length of successful bytes read on success and negative
1215 * error code on error.
1217 int nvmem_device_read(struct nvmem_device *nvmem,
1218 unsigned int offset,
1219 size_t bytes, void *buf)
1221 int rc;
1223 if (!nvmem)
1224 return -EINVAL;
1226 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1228 if (rc)
1229 return rc;
1231 return bytes;
1233 EXPORT_SYMBOL_GPL(nvmem_device_read);
1236 * nvmem_device_write() - Write cell to a given nvmem device
1238 * @nvmem: nvmem device to be written to.
1239 * @offset: offset in nvmem device.
1240 * @bytes: number of bytes to write.
1241 * @buf: buffer to be written.
1243 * Return: length of bytes written or negative error code on failure.
1245 int nvmem_device_write(struct nvmem_device *nvmem,
1246 unsigned int offset,
1247 size_t bytes, void *buf)
1249 int rc;
1251 if (!nvmem)
1252 return -EINVAL;
1254 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1256 if (rc)
1257 return rc;
1260 return bytes;
1262 EXPORT_SYMBOL_GPL(nvmem_device_write);
1265 * nvmem_add_cell_table() - register a table of cell info entries
1267 * @table: table of cell info entries
1269 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1271 mutex_lock(&nvmem_cell_mutex);
1272 list_add_tail(&table->node, &nvmem_cell_tables);
1273 mutex_unlock(&nvmem_cell_mutex);
1275 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1278 * nvmem_del_cell_table() - remove a previously registered cell info table
1280 * @table: table of cell info entries
1282 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1284 mutex_lock(&nvmem_cell_mutex);
1285 list_del(&table->node);
1286 mutex_unlock(&nvmem_cell_mutex);
1288 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1291 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1293 * @entries: array of cell lookup entries
1294 * @nentries: number of cell lookup entries in the array
1296 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1298 int i;
1300 mutex_lock(&nvmem_lookup_mutex);
1301 for (i = 0; i < nentries; i++)
1302 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1303 mutex_unlock(&nvmem_lookup_mutex);
1305 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1308 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1309 * entries
1311 * @entries: array of cell lookup entries
1312 * @nentries: number of cell lookup entries in the array
1314 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1316 int i;
1318 mutex_lock(&nvmem_lookup_mutex);
1319 for (i = 0; i < nentries; i++)
1320 list_del(&entries[i].node);
1321 mutex_unlock(&nvmem_lookup_mutex);
1323 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1326 * nvmem_dev_name() - Get the name of a given nvmem device.
1328 * @nvmem: nvmem device.
1330 * Return: name of the nvmem device.
1332 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1334 return dev_name(&nvmem->dev);
1336 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1338 static int __init nvmem_init(void)
1340 return bus_register(&nvmem_bus_type);
1343 static void __exit nvmem_exit(void)
1345 bus_unregister(&nvmem_bus_type);
1348 subsys_initcall(nvmem_init);
1349 module_exit(nvmem_exit);
1351 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1352 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1353 MODULE_DESCRIPTION("nvmem Driver Core");
1354 MODULE_LICENSE("GPL v2");