udptunnels: Call handle_offloads after inserting vlan tag.
[linux-2.6/btrfs-unstable.git] / drivers / spi / spi.c
blob57a195041dc72e019a02b3c7c4b6f4c2a6fac59b
1 /*
2 * SPI init/core code
4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/kmod.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 #include <linux/cache.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mutex.h>
26 #include <linux/of_device.h>
27 #include <linux/of_irq.h>
28 #include <linux/clk/clk-conf.h>
29 #include <linux/slab.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/spi/spi.h>
32 #include <linux/of_gpio.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/pm_domain.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/ioport.h>
40 #include <linux/acpi.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/spi.h>
45 static void spidev_release(struct device *dev)
47 struct spi_device *spi = to_spi_device(dev);
49 /* spi masters may cleanup for released devices */
50 if (spi->master->cleanup)
51 spi->master->cleanup(spi);
53 spi_master_put(spi->master);
54 kfree(spi);
57 static ssize_t
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
69 static DEVICE_ATTR_RO(modalias);
71 static struct attribute *spi_dev_attrs[] = {
72 &dev_attr_modalias.attr,
73 NULL,
75 ATTRIBUTE_GROUPS(spi_dev);
77 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
78 * and the sysfs version makes coldplug work too.
81 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
82 const struct spi_device *sdev)
84 while (id->name[0]) {
85 if (!strcmp(sdev->modalias, id->name))
86 return id;
87 id++;
89 return NULL;
92 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
94 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
96 return spi_match_id(sdrv->id_table, sdev);
98 EXPORT_SYMBOL_GPL(spi_get_device_id);
100 static int spi_match_device(struct device *dev, struct device_driver *drv)
102 const struct spi_device *spi = to_spi_device(dev);
103 const struct spi_driver *sdrv = to_spi_driver(drv);
105 /* Attempt an OF style match */
106 if (of_driver_match_device(dev, drv))
107 return 1;
109 /* Then try ACPI */
110 if (acpi_driver_match_device(dev, drv))
111 return 1;
113 if (sdrv->id_table)
114 return !!spi_match_id(sdrv->id_table, spi);
116 return strcmp(spi->modalias, drv->name) == 0;
119 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
121 const struct spi_device *spi = to_spi_device(dev);
122 int rc;
124 rc = acpi_device_uevent_modalias(dev, env);
125 if (rc != -ENODEV)
126 return rc;
128 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
129 return 0;
132 #ifdef CONFIG_PM_SLEEP
133 static int spi_legacy_suspend(struct device *dev, pm_message_t message)
135 int value = 0;
136 struct spi_driver *drv = to_spi_driver(dev->driver);
138 /* suspend will stop irqs and dma; no more i/o */
139 if (drv) {
140 if (drv->suspend)
141 value = drv->suspend(to_spi_device(dev), message);
142 else
143 dev_dbg(dev, "... can't suspend\n");
145 return value;
148 static int spi_legacy_resume(struct device *dev)
150 int value = 0;
151 struct spi_driver *drv = to_spi_driver(dev->driver);
153 /* resume may restart the i/o queue */
154 if (drv) {
155 if (drv->resume)
156 value = drv->resume(to_spi_device(dev));
157 else
158 dev_dbg(dev, "... can't resume\n");
160 return value;
163 static int spi_pm_suspend(struct device *dev)
165 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
167 if (pm)
168 return pm_generic_suspend(dev);
169 else
170 return spi_legacy_suspend(dev, PMSG_SUSPEND);
173 static int spi_pm_resume(struct device *dev)
175 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
177 if (pm)
178 return pm_generic_resume(dev);
179 else
180 return spi_legacy_resume(dev);
183 static int spi_pm_freeze(struct device *dev)
185 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
187 if (pm)
188 return pm_generic_freeze(dev);
189 else
190 return spi_legacy_suspend(dev, PMSG_FREEZE);
193 static int spi_pm_thaw(struct device *dev)
195 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
197 if (pm)
198 return pm_generic_thaw(dev);
199 else
200 return spi_legacy_resume(dev);
203 static int spi_pm_poweroff(struct device *dev)
205 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
207 if (pm)
208 return pm_generic_poweroff(dev);
209 else
210 return spi_legacy_suspend(dev, PMSG_HIBERNATE);
213 static int spi_pm_restore(struct device *dev)
215 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
217 if (pm)
218 return pm_generic_restore(dev);
219 else
220 return spi_legacy_resume(dev);
222 #else
223 #define spi_pm_suspend NULL
224 #define spi_pm_resume NULL
225 #define spi_pm_freeze NULL
226 #define spi_pm_thaw NULL
227 #define spi_pm_poweroff NULL
228 #define spi_pm_restore NULL
229 #endif
231 static const struct dev_pm_ops spi_pm = {
232 .suspend = spi_pm_suspend,
233 .resume = spi_pm_resume,
234 .freeze = spi_pm_freeze,
235 .thaw = spi_pm_thaw,
236 .poweroff = spi_pm_poweroff,
237 .restore = spi_pm_restore,
238 SET_RUNTIME_PM_OPS(
239 pm_generic_runtime_suspend,
240 pm_generic_runtime_resume,
241 NULL
245 struct bus_type spi_bus_type = {
246 .name = "spi",
247 .dev_groups = spi_dev_groups,
248 .match = spi_match_device,
249 .uevent = spi_uevent,
250 .pm = &spi_pm,
252 EXPORT_SYMBOL_GPL(spi_bus_type);
255 static int spi_drv_probe(struct device *dev)
257 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
258 int ret;
260 ret = of_clk_set_defaults(dev->of_node, false);
261 if (ret)
262 return ret;
264 ret = dev_pm_domain_attach(dev, true);
265 if (ret != -EPROBE_DEFER) {
266 ret = sdrv->probe(to_spi_device(dev));
267 if (ret)
268 dev_pm_domain_detach(dev, true);
271 return ret;
274 static int spi_drv_remove(struct device *dev)
276 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
277 int ret;
279 ret = sdrv->remove(to_spi_device(dev));
280 dev_pm_domain_detach(dev, true);
282 return ret;
285 static void spi_drv_shutdown(struct device *dev)
287 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
289 sdrv->shutdown(to_spi_device(dev));
293 * spi_register_driver - register a SPI driver
294 * @sdrv: the driver to register
295 * Context: can sleep
297 int spi_register_driver(struct spi_driver *sdrv)
299 sdrv->driver.bus = &spi_bus_type;
300 if (sdrv->probe)
301 sdrv->driver.probe = spi_drv_probe;
302 if (sdrv->remove)
303 sdrv->driver.remove = spi_drv_remove;
304 if (sdrv->shutdown)
305 sdrv->driver.shutdown = spi_drv_shutdown;
306 return driver_register(&sdrv->driver);
308 EXPORT_SYMBOL_GPL(spi_register_driver);
310 /*-------------------------------------------------------------------------*/
312 /* SPI devices should normally not be created by SPI device drivers; that
313 * would make them board-specific. Similarly with SPI master drivers.
314 * Device registration normally goes into like arch/.../mach.../board-YYY.c
315 * with other readonly (flashable) information about mainboard devices.
318 struct boardinfo {
319 struct list_head list;
320 struct spi_board_info board_info;
323 static LIST_HEAD(board_list);
324 static LIST_HEAD(spi_master_list);
327 * Used to protect add/del opertion for board_info list and
328 * spi_master list, and their matching process
330 static DEFINE_MUTEX(board_lock);
333 * spi_alloc_device - Allocate a new SPI device
334 * @master: Controller to which device is connected
335 * Context: can sleep
337 * Allows a driver to allocate and initialize a spi_device without
338 * registering it immediately. This allows a driver to directly
339 * fill the spi_device with device parameters before calling
340 * spi_add_device() on it.
342 * Caller is responsible to call spi_add_device() on the returned
343 * spi_device structure to add it to the SPI master. If the caller
344 * needs to discard the spi_device without adding it, then it should
345 * call spi_dev_put() on it.
347 * Returns a pointer to the new device, or NULL.
349 struct spi_device *spi_alloc_device(struct spi_master *master)
351 struct spi_device *spi;
353 if (!spi_master_get(master))
354 return NULL;
356 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
357 if (!spi) {
358 spi_master_put(master);
359 return NULL;
362 spi->master = master;
363 spi->dev.parent = &master->dev;
364 spi->dev.bus = &spi_bus_type;
365 spi->dev.release = spidev_release;
366 spi->cs_gpio = -ENOENT;
367 device_initialize(&spi->dev);
368 return spi;
370 EXPORT_SYMBOL_GPL(spi_alloc_device);
372 static void spi_dev_set_name(struct spi_device *spi)
374 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
376 if (adev) {
377 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
378 return;
381 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
382 spi->chip_select);
385 static int spi_dev_check(struct device *dev, void *data)
387 struct spi_device *spi = to_spi_device(dev);
388 struct spi_device *new_spi = data;
390 if (spi->master == new_spi->master &&
391 spi->chip_select == new_spi->chip_select)
392 return -EBUSY;
393 return 0;
397 * spi_add_device - Add spi_device allocated with spi_alloc_device
398 * @spi: spi_device to register
400 * Companion function to spi_alloc_device. Devices allocated with
401 * spi_alloc_device can be added onto the spi bus with this function.
403 * Returns 0 on success; negative errno on failure
405 int spi_add_device(struct spi_device *spi)
407 static DEFINE_MUTEX(spi_add_lock);
408 struct spi_master *master = spi->master;
409 struct device *dev = master->dev.parent;
410 int status;
412 /* Chipselects are numbered 0..max; validate. */
413 if (spi->chip_select >= master->num_chipselect) {
414 dev_err(dev, "cs%d >= max %d\n",
415 spi->chip_select,
416 master->num_chipselect);
417 return -EINVAL;
420 /* Set the bus ID string */
421 spi_dev_set_name(spi);
423 /* We need to make sure there's no other device with this
424 * chipselect **BEFORE** we call setup(), else we'll trash
425 * its configuration. Lock against concurrent add() calls.
427 mutex_lock(&spi_add_lock);
429 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
430 if (status) {
431 dev_err(dev, "chipselect %d already in use\n",
432 spi->chip_select);
433 goto done;
436 if (master->cs_gpios)
437 spi->cs_gpio = master->cs_gpios[spi->chip_select];
439 /* Drivers may modify this initial i/o setup, but will
440 * normally rely on the device being setup. Devices
441 * using SPI_CS_HIGH can't coexist well otherwise...
443 status = spi_setup(spi);
444 if (status < 0) {
445 dev_err(dev, "can't setup %s, status %d\n",
446 dev_name(&spi->dev), status);
447 goto done;
450 /* Device may be bound to an active driver when this returns */
451 status = device_add(&spi->dev);
452 if (status < 0)
453 dev_err(dev, "can't add %s, status %d\n",
454 dev_name(&spi->dev), status);
455 else
456 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
458 done:
459 mutex_unlock(&spi_add_lock);
460 return status;
462 EXPORT_SYMBOL_GPL(spi_add_device);
465 * spi_new_device - instantiate one new SPI device
466 * @master: Controller to which device is connected
467 * @chip: Describes the SPI device
468 * Context: can sleep
470 * On typical mainboards, this is purely internal; and it's not needed
471 * after board init creates the hard-wired devices. Some development
472 * platforms may not be able to use spi_register_board_info though, and
473 * this is exported so that for example a USB or parport based adapter
474 * driver could add devices (which it would learn about out-of-band).
476 * Returns the new device, or NULL.
478 struct spi_device *spi_new_device(struct spi_master *master,
479 struct spi_board_info *chip)
481 struct spi_device *proxy;
482 int status;
484 /* NOTE: caller did any chip->bus_num checks necessary.
486 * Also, unless we change the return value convention to use
487 * error-or-pointer (not NULL-or-pointer), troubleshootability
488 * suggests syslogged diagnostics are best here (ugh).
491 proxy = spi_alloc_device(master);
492 if (!proxy)
493 return NULL;
495 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
497 proxy->chip_select = chip->chip_select;
498 proxy->max_speed_hz = chip->max_speed_hz;
499 proxy->mode = chip->mode;
500 proxy->irq = chip->irq;
501 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
502 proxy->dev.platform_data = (void *) chip->platform_data;
503 proxy->controller_data = chip->controller_data;
504 proxy->controller_state = NULL;
506 status = spi_add_device(proxy);
507 if (status < 0) {
508 spi_dev_put(proxy);
509 return NULL;
512 return proxy;
514 EXPORT_SYMBOL_GPL(spi_new_device);
516 static void spi_match_master_to_boardinfo(struct spi_master *master,
517 struct spi_board_info *bi)
519 struct spi_device *dev;
521 if (master->bus_num != bi->bus_num)
522 return;
524 dev = spi_new_device(master, bi);
525 if (!dev)
526 dev_err(master->dev.parent, "can't create new device for %s\n",
527 bi->modalias);
531 * spi_register_board_info - register SPI devices for a given board
532 * @info: array of chip descriptors
533 * @n: how many descriptors are provided
534 * Context: can sleep
536 * Board-specific early init code calls this (probably during arch_initcall)
537 * with segments of the SPI device table. Any device nodes are created later,
538 * after the relevant parent SPI controller (bus_num) is defined. We keep
539 * this table of devices forever, so that reloading a controller driver will
540 * not make Linux forget about these hard-wired devices.
542 * Other code can also call this, e.g. a particular add-on board might provide
543 * SPI devices through its expansion connector, so code initializing that board
544 * would naturally declare its SPI devices.
546 * The board info passed can safely be __initdata ... but be careful of
547 * any embedded pointers (platform_data, etc), they're copied as-is.
549 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
551 struct boardinfo *bi;
552 int i;
554 if (!n)
555 return -EINVAL;
557 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
558 if (!bi)
559 return -ENOMEM;
561 for (i = 0; i < n; i++, bi++, info++) {
562 struct spi_master *master;
564 memcpy(&bi->board_info, info, sizeof(*info));
565 mutex_lock(&board_lock);
566 list_add_tail(&bi->list, &board_list);
567 list_for_each_entry(master, &spi_master_list, list)
568 spi_match_master_to_boardinfo(master, &bi->board_info);
569 mutex_unlock(&board_lock);
572 return 0;
575 /*-------------------------------------------------------------------------*/
577 static void spi_set_cs(struct spi_device *spi, bool enable)
579 if (spi->mode & SPI_CS_HIGH)
580 enable = !enable;
582 if (spi->cs_gpio >= 0)
583 gpio_set_value(spi->cs_gpio, !enable);
584 else if (spi->master->set_cs)
585 spi->master->set_cs(spi, !enable);
588 #ifdef CONFIG_HAS_DMA
589 static int spi_map_buf(struct spi_master *master, struct device *dev,
590 struct sg_table *sgt, void *buf, size_t len,
591 enum dma_data_direction dir)
593 const bool vmalloced_buf = is_vmalloc_addr(buf);
594 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
595 const int sgs = DIV_ROUND_UP(len, desc_len);
596 struct page *vm_page;
597 void *sg_buf;
598 size_t min;
599 int i, ret;
601 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
602 if (ret != 0)
603 return ret;
605 for (i = 0; i < sgs; i++) {
606 min = min_t(size_t, len, desc_len);
608 if (vmalloced_buf) {
609 vm_page = vmalloc_to_page(buf);
610 if (!vm_page) {
611 sg_free_table(sgt);
612 return -ENOMEM;
614 sg_set_page(&sgt->sgl[i], vm_page,
615 min, offset_in_page(buf));
616 } else {
617 sg_buf = buf;
618 sg_set_buf(&sgt->sgl[i], sg_buf, min);
622 buf += min;
623 len -= min;
626 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
627 if (!ret)
628 ret = -ENOMEM;
629 if (ret < 0) {
630 sg_free_table(sgt);
631 return ret;
634 sgt->nents = ret;
636 return 0;
639 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
640 struct sg_table *sgt, enum dma_data_direction dir)
642 if (sgt->orig_nents) {
643 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
644 sg_free_table(sgt);
648 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
650 struct device *tx_dev, *rx_dev;
651 struct spi_transfer *xfer;
652 int ret;
654 if (!master->can_dma)
655 return 0;
657 tx_dev = master->dma_tx->device->dev;
658 rx_dev = master->dma_rx->device->dev;
660 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
661 if (!master->can_dma(master, msg->spi, xfer))
662 continue;
664 if (xfer->tx_buf != NULL) {
665 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
666 (void *)xfer->tx_buf, xfer->len,
667 DMA_TO_DEVICE);
668 if (ret != 0)
669 return ret;
672 if (xfer->rx_buf != NULL) {
673 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
674 xfer->rx_buf, xfer->len,
675 DMA_FROM_DEVICE);
676 if (ret != 0) {
677 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
678 DMA_TO_DEVICE);
679 return ret;
684 master->cur_msg_mapped = true;
686 return 0;
689 static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
691 struct spi_transfer *xfer;
692 struct device *tx_dev, *rx_dev;
694 if (!master->cur_msg_mapped || !master->can_dma)
695 return 0;
697 tx_dev = master->dma_tx->device->dev;
698 rx_dev = master->dma_rx->device->dev;
700 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
701 if (!master->can_dma(master, msg->spi, xfer))
702 continue;
704 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
705 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
708 return 0;
710 #else /* !CONFIG_HAS_DMA */
711 static inline int __spi_map_msg(struct spi_master *master,
712 struct spi_message *msg)
714 return 0;
717 static inline int spi_unmap_msg(struct spi_master *master,
718 struct spi_message *msg)
720 return 0;
722 #endif /* !CONFIG_HAS_DMA */
724 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
726 struct spi_transfer *xfer;
727 void *tmp;
728 unsigned int max_tx, max_rx;
730 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
731 max_tx = 0;
732 max_rx = 0;
734 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
735 if ((master->flags & SPI_MASTER_MUST_TX) &&
736 !xfer->tx_buf)
737 max_tx = max(xfer->len, max_tx);
738 if ((master->flags & SPI_MASTER_MUST_RX) &&
739 !xfer->rx_buf)
740 max_rx = max(xfer->len, max_rx);
743 if (max_tx) {
744 tmp = krealloc(master->dummy_tx, max_tx,
745 GFP_KERNEL | GFP_DMA);
746 if (!tmp)
747 return -ENOMEM;
748 master->dummy_tx = tmp;
749 memset(tmp, 0, max_tx);
752 if (max_rx) {
753 tmp = krealloc(master->dummy_rx, max_rx,
754 GFP_KERNEL | GFP_DMA);
755 if (!tmp)
756 return -ENOMEM;
757 master->dummy_rx = tmp;
760 if (max_tx || max_rx) {
761 list_for_each_entry(xfer, &msg->transfers,
762 transfer_list) {
763 if (!xfer->tx_buf)
764 xfer->tx_buf = master->dummy_tx;
765 if (!xfer->rx_buf)
766 xfer->rx_buf = master->dummy_rx;
771 return __spi_map_msg(master, msg);
775 * spi_transfer_one_message - Default implementation of transfer_one_message()
777 * This is a standard implementation of transfer_one_message() for
778 * drivers which impelment a transfer_one() operation. It provides
779 * standard handling of delays and chip select management.
781 static int spi_transfer_one_message(struct spi_master *master,
782 struct spi_message *msg)
784 struct spi_transfer *xfer;
785 bool keep_cs = false;
786 int ret = 0;
787 unsigned long ms = 1;
789 spi_set_cs(msg->spi, true);
791 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
792 trace_spi_transfer_start(msg, xfer);
794 if (xfer->tx_buf || xfer->rx_buf) {
795 reinit_completion(&master->xfer_completion);
797 ret = master->transfer_one(master, msg->spi, xfer);
798 if (ret < 0) {
799 dev_err(&msg->spi->dev,
800 "SPI transfer failed: %d\n", ret);
801 goto out;
804 if (ret > 0) {
805 ret = 0;
806 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
807 ms += ms + 100; /* some tolerance */
809 ms = wait_for_completion_timeout(&master->xfer_completion,
810 msecs_to_jiffies(ms));
813 if (ms == 0) {
814 dev_err(&msg->spi->dev,
815 "SPI transfer timed out\n");
816 msg->status = -ETIMEDOUT;
818 } else {
819 if (xfer->len)
820 dev_err(&msg->spi->dev,
821 "Bufferless transfer has length %u\n",
822 xfer->len);
825 trace_spi_transfer_stop(msg, xfer);
827 if (msg->status != -EINPROGRESS)
828 goto out;
830 if (xfer->delay_usecs)
831 udelay(xfer->delay_usecs);
833 if (xfer->cs_change) {
834 if (list_is_last(&xfer->transfer_list,
835 &msg->transfers)) {
836 keep_cs = true;
837 } else {
838 spi_set_cs(msg->spi, false);
839 udelay(10);
840 spi_set_cs(msg->spi, true);
844 msg->actual_length += xfer->len;
847 out:
848 if (ret != 0 || !keep_cs)
849 spi_set_cs(msg->spi, false);
851 if (msg->status == -EINPROGRESS)
852 msg->status = ret;
854 spi_finalize_current_message(master);
856 return ret;
860 * spi_finalize_current_transfer - report completion of a transfer
861 * @master: the master reporting completion
863 * Called by SPI drivers using the core transfer_one_message()
864 * implementation to notify it that the current interrupt driven
865 * transfer has finished and the next one may be scheduled.
867 void spi_finalize_current_transfer(struct spi_master *master)
869 complete(&master->xfer_completion);
871 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
874 * __spi_pump_messages - function which processes spi message queue
875 * @master: master to process queue for
876 * @in_kthread: true if we are in the context of the message pump thread
878 * This function checks if there is any spi message in the queue that
879 * needs processing and if so call out to the driver to initialize hardware
880 * and transfer each message.
882 * Note that it is called both from the kthread itself and also from
883 * inside spi_sync(); the queue extraction handling at the top of the
884 * function should deal with this safely.
886 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
888 unsigned long flags;
889 bool was_busy = false;
890 int ret;
892 /* Lock queue */
893 spin_lock_irqsave(&master->queue_lock, flags);
895 /* Make sure we are not already running a message */
896 if (master->cur_msg) {
897 spin_unlock_irqrestore(&master->queue_lock, flags);
898 return;
901 /* If another context is idling the device then defer */
902 if (master->idling) {
903 queue_kthread_work(&master->kworker, &master->pump_messages);
904 spin_unlock_irqrestore(&master->queue_lock, flags);
905 return;
908 /* Check if the queue is idle */
909 if (list_empty(&master->queue) || !master->running) {
910 if (!master->busy) {
911 spin_unlock_irqrestore(&master->queue_lock, flags);
912 return;
915 /* Only do teardown in the thread */
916 if (!in_kthread) {
917 queue_kthread_work(&master->kworker,
918 &master->pump_messages);
919 spin_unlock_irqrestore(&master->queue_lock, flags);
920 return;
923 master->busy = false;
924 master->idling = true;
925 spin_unlock_irqrestore(&master->queue_lock, flags);
927 kfree(master->dummy_rx);
928 master->dummy_rx = NULL;
929 kfree(master->dummy_tx);
930 master->dummy_tx = NULL;
931 if (master->unprepare_transfer_hardware &&
932 master->unprepare_transfer_hardware(master))
933 dev_err(&master->dev,
934 "failed to unprepare transfer hardware\n");
935 if (master->auto_runtime_pm) {
936 pm_runtime_mark_last_busy(master->dev.parent);
937 pm_runtime_put_autosuspend(master->dev.parent);
939 trace_spi_master_idle(master);
941 spin_lock_irqsave(&master->queue_lock, flags);
942 master->idling = false;
943 spin_unlock_irqrestore(&master->queue_lock, flags);
944 return;
947 /* Extract head of queue */
948 master->cur_msg =
949 list_first_entry(&master->queue, struct spi_message, queue);
951 list_del_init(&master->cur_msg->queue);
952 if (master->busy)
953 was_busy = true;
954 else
955 master->busy = true;
956 spin_unlock_irqrestore(&master->queue_lock, flags);
958 if (!was_busy && master->auto_runtime_pm) {
959 ret = pm_runtime_get_sync(master->dev.parent);
960 if (ret < 0) {
961 dev_err(&master->dev, "Failed to power device: %d\n",
962 ret);
963 return;
967 if (!was_busy)
968 trace_spi_master_busy(master);
970 if (!was_busy && master->prepare_transfer_hardware) {
971 ret = master->prepare_transfer_hardware(master);
972 if (ret) {
973 dev_err(&master->dev,
974 "failed to prepare transfer hardware\n");
976 if (master->auto_runtime_pm)
977 pm_runtime_put(master->dev.parent);
978 return;
982 trace_spi_message_start(master->cur_msg);
984 if (master->prepare_message) {
985 ret = master->prepare_message(master, master->cur_msg);
986 if (ret) {
987 dev_err(&master->dev,
988 "failed to prepare message: %d\n", ret);
989 master->cur_msg->status = ret;
990 spi_finalize_current_message(master);
991 return;
993 master->cur_msg_prepared = true;
996 ret = spi_map_msg(master, master->cur_msg);
997 if (ret) {
998 master->cur_msg->status = ret;
999 spi_finalize_current_message(master);
1000 return;
1003 ret = master->transfer_one_message(master, master->cur_msg);
1004 if (ret) {
1005 dev_err(&master->dev,
1006 "failed to transfer one message from queue\n");
1007 return;
1012 * spi_pump_messages - kthread work function which processes spi message queue
1013 * @work: pointer to kthread work struct contained in the master struct
1015 static void spi_pump_messages(struct kthread_work *work)
1017 struct spi_master *master =
1018 container_of(work, struct spi_master, pump_messages);
1020 __spi_pump_messages(master, true);
1023 static int spi_init_queue(struct spi_master *master)
1025 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1027 master->running = false;
1028 master->busy = false;
1030 init_kthread_worker(&master->kworker);
1031 master->kworker_task = kthread_run(kthread_worker_fn,
1032 &master->kworker, "%s",
1033 dev_name(&master->dev));
1034 if (IS_ERR(master->kworker_task)) {
1035 dev_err(&master->dev, "failed to create message pump task\n");
1036 return PTR_ERR(master->kworker_task);
1038 init_kthread_work(&master->pump_messages, spi_pump_messages);
1041 * Master config will indicate if this controller should run the
1042 * message pump with high (realtime) priority to reduce the transfer
1043 * latency on the bus by minimising the delay between a transfer
1044 * request and the scheduling of the message pump thread. Without this
1045 * setting the message pump thread will remain at default priority.
1047 if (master->rt) {
1048 dev_info(&master->dev,
1049 "will run message pump with realtime priority\n");
1050 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1053 return 0;
1057 * spi_get_next_queued_message() - called by driver to check for queued
1058 * messages
1059 * @master: the master to check for queued messages
1061 * If there are more messages in the queue, the next message is returned from
1062 * this call.
1064 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1066 struct spi_message *next;
1067 unsigned long flags;
1069 /* get a pointer to the next message, if any */
1070 spin_lock_irqsave(&master->queue_lock, flags);
1071 next = list_first_entry_or_null(&master->queue, struct spi_message,
1072 queue);
1073 spin_unlock_irqrestore(&master->queue_lock, flags);
1075 return next;
1077 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1080 * spi_finalize_current_message() - the current message is complete
1081 * @master: the master to return the message to
1083 * Called by the driver to notify the core that the message in the front of the
1084 * queue is complete and can be removed from the queue.
1086 void spi_finalize_current_message(struct spi_master *master)
1088 struct spi_message *mesg;
1089 unsigned long flags;
1090 int ret;
1092 spin_lock_irqsave(&master->queue_lock, flags);
1093 mesg = master->cur_msg;
1094 master->cur_msg = NULL;
1096 queue_kthread_work(&master->kworker, &master->pump_messages);
1097 spin_unlock_irqrestore(&master->queue_lock, flags);
1099 spi_unmap_msg(master, mesg);
1101 if (master->cur_msg_prepared && master->unprepare_message) {
1102 ret = master->unprepare_message(master, mesg);
1103 if (ret) {
1104 dev_err(&master->dev,
1105 "failed to unprepare message: %d\n", ret);
1109 trace_spi_message_done(mesg);
1111 master->cur_msg_prepared = false;
1113 mesg->state = NULL;
1114 if (mesg->complete)
1115 mesg->complete(mesg->context);
1117 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1119 static int spi_start_queue(struct spi_master *master)
1121 unsigned long flags;
1123 spin_lock_irqsave(&master->queue_lock, flags);
1125 if (master->running || master->busy) {
1126 spin_unlock_irqrestore(&master->queue_lock, flags);
1127 return -EBUSY;
1130 master->running = true;
1131 master->cur_msg = NULL;
1132 spin_unlock_irqrestore(&master->queue_lock, flags);
1134 queue_kthread_work(&master->kworker, &master->pump_messages);
1136 return 0;
1139 static int spi_stop_queue(struct spi_master *master)
1141 unsigned long flags;
1142 unsigned limit = 500;
1143 int ret = 0;
1145 spin_lock_irqsave(&master->queue_lock, flags);
1148 * This is a bit lame, but is optimized for the common execution path.
1149 * A wait_queue on the master->busy could be used, but then the common
1150 * execution path (pump_messages) would be required to call wake_up or
1151 * friends on every SPI message. Do this instead.
1153 while ((!list_empty(&master->queue) || master->busy) && limit--) {
1154 spin_unlock_irqrestore(&master->queue_lock, flags);
1155 usleep_range(10000, 11000);
1156 spin_lock_irqsave(&master->queue_lock, flags);
1159 if (!list_empty(&master->queue) || master->busy)
1160 ret = -EBUSY;
1161 else
1162 master->running = false;
1164 spin_unlock_irqrestore(&master->queue_lock, flags);
1166 if (ret) {
1167 dev_warn(&master->dev,
1168 "could not stop message queue\n");
1169 return ret;
1171 return ret;
1174 static int spi_destroy_queue(struct spi_master *master)
1176 int ret;
1178 ret = spi_stop_queue(master);
1181 * flush_kthread_worker will block until all work is done.
1182 * If the reason that stop_queue timed out is that the work will never
1183 * finish, then it does no good to call flush/stop thread, so
1184 * return anyway.
1186 if (ret) {
1187 dev_err(&master->dev, "problem destroying queue\n");
1188 return ret;
1191 flush_kthread_worker(&master->kworker);
1192 kthread_stop(master->kworker_task);
1194 return 0;
1197 static int __spi_queued_transfer(struct spi_device *spi,
1198 struct spi_message *msg,
1199 bool need_pump)
1201 struct spi_master *master = spi->master;
1202 unsigned long flags;
1204 spin_lock_irqsave(&master->queue_lock, flags);
1206 if (!master->running) {
1207 spin_unlock_irqrestore(&master->queue_lock, flags);
1208 return -ESHUTDOWN;
1210 msg->actual_length = 0;
1211 msg->status = -EINPROGRESS;
1213 list_add_tail(&msg->queue, &master->queue);
1214 if (!master->busy && need_pump)
1215 queue_kthread_work(&master->kworker, &master->pump_messages);
1217 spin_unlock_irqrestore(&master->queue_lock, flags);
1218 return 0;
1222 * spi_queued_transfer - transfer function for queued transfers
1223 * @spi: spi device which is requesting transfer
1224 * @msg: spi message which is to handled is queued to driver queue
1226 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1228 return __spi_queued_transfer(spi, msg, true);
1231 static int spi_master_initialize_queue(struct spi_master *master)
1233 int ret;
1235 master->transfer = spi_queued_transfer;
1236 if (!master->transfer_one_message)
1237 master->transfer_one_message = spi_transfer_one_message;
1239 /* Initialize and start queue */
1240 ret = spi_init_queue(master);
1241 if (ret) {
1242 dev_err(&master->dev, "problem initializing queue\n");
1243 goto err_init_queue;
1245 master->queued = true;
1246 ret = spi_start_queue(master);
1247 if (ret) {
1248 dev_err(&master->dev, "problem starting queue\n");
1249 goto err_start_queue;
1252 return 0;
1254 err_start_queue:
1255 spi_destroy_queue(master);
1256 err_init_queue:
1257 return ret;
1260 /*-------------------------------------------------------------------------*/
1262 #if defined(CONFIG_OF)
1263 static struct spi_device *
1264 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1266 struct spi_device *spi;
1267 int rc;
1268 u32 value;
1270 /* Alloc an spi_device */
1271 spi = spi_alloc_device(master);
1272 if (!spi) {
1273 dev_err(&master->dev, "spi_device alloc error for %s\n",
1274 nc->full_name);
1275 rc = -ENOMEM;
1276 goto err_out;
1279 /* Select device driver */
1280 rc = of_modalias_node(nc, spi->modalias,
1281 sizeof(spi->modalias));
1282 if (rc < 0) {
1283 dev_err(&master->dev, "cannot find modalias for %s\n",
1284 nc->full_name);
1285 goto err_out;
1288 /* Device address */
1289 rc = of_property_read_u32(nc, "reg", &value);
1290 if (rc) {
1291 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1292 nc->full_name, rc);
1293 goto err_out;
1295 spi->chip_select = value;
1297 /* Mode (clock phase/polarity/etc.) */
1298 if (of_find_property(nc, "spi-cpha", NULL))
1299 spi->mode |= SPI_CPHA;
1300 if (of_find_property(nc, "spi-cpol", NULL))
1301 spi->mode |= SPI_CPOL;
1302 if (of_find_property(nc, "spi-cs-high", NULL))
1303 spi->mode |= SPI_CS_HIGH;
1304 if (of_find_property(nc, "spi-3wire", NULL))
1305 spi->mode |= SPI_3WIRE;
1306 if (of_find_property(nc, "spi-lsb-first", NULL))
1307 spi->mode |= SPI_LSB_FIRST;
1309 /* Device DUAL/QUAD mode */
1310 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1311 switch (value) {
1312 case 1:
1313 break;
1314 case 2:
1315 spi->mode |= SPI_TX_DUAL;
1316 break;
1317 case 4:
1318 spi->mode |= SPI_TX_QUAD;
1319 break;
1320 default:
1321 dev_warn(&master->dev,
1322 "spi-tx-bus-width %d not supported\n",
1323 value);
1324 break;
1328 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1329 switch (value) {
1330 case 1:
1331 break;
1332 case 2:
1333 spi->mode |= SPI_RX_DUAL;
1334 break;
1335 case 4:
1336 spi->mode |= SPI_RX_QUAD;
1337 break;
1338 default:
1339 dev_warn(&master->dev,
1340 "spi-rx-bus-width %d not supported\n",
1341 value);
1342 break;
1346 /* Device speed */
1347 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1348 if (rc) {
1349 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1350 nc->full_name, rc);
1351 goto err_out;
1353 spi->max_speed_hz = value;
1355 /* IRQ */
1356 spi->irq = irq_of_parse_and_map(nc, 0);
1358 /* Store a pointer to the node in the device structure */
1359 of_node_get(nc);
1360 spi->dev.of_node = nc;
1362 /* Register the new device */
1363 request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
1364 rc = spi_add_device(spi);
1365 if (rc) {
1366 dev_err(&master->dev, "spi_device register error %s\n",
1367 nc->full_name);
1368 goto err_out;
1371 return spi;
1373 err_out:
1374 spi_dev_put(spi);
1375 return ERR_PTR(rc);
1379 * of_register_spi_devices() - Register child devices onto the SPI bus
1380 * @master: Pointer to spi_master device
1382 * Registers an spi_device for each child node of master node which has a 'reg'
1383 * property.
1385 static void of_register_spi_devices(struct spi_master *master)
1387 struct spi_device *spi;
1388 struct device_node *nc;
1390 if (!master->dev.of_node)
1391 return;
1393 for_each_available_child_of_node(master->dev.of_node, nc) {
1394 spi = of_register_spi_device(master, nc);
1395 if (IS_ERR(spi))
1396 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1397 nc->full_name);
1400 #else
1401 static void of_register_spi_devices(struct spi_master *master) { }
1402 #endif
1404 #ifdef CONFIG_ACPI
1405 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1407 struct spi_device *spi = data;
1409 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1410 struct acpi_resource_spi_serialbus *sb;
1412 sb = &ares->data.spi_serial_bus;
1413 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1414 spi->chip_select = sb->device_selection;
1415 spi->max_speed_hz = sb->connection_speed;
1417 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1418 spi->mode |= SPI_CPHA;
1419 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1420 spi->mode |= SPI_CPOL;
1421 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1422 spi->mode |= SPI_CS_HIGH;
1424 } else if (spi->irq < 0) {
1425 struct resource r;
1427 if (acpi_dev_resource_interrupt(ares, 0, &r))
1428 spi->irq = r.start;
1431 /* Always tell the ACPI core to skip this resource */
1432 return 1;
1435 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1436 void *data, void **return_value)
1438 struct spi_master *master = data;
1439 struct list_head resource_list;
1440 struct acpi_device *adev;
1441 struct spi_device *spi;
1442 int ret;
1444 if (acpi_bus_get_device(handle, &adev))
1445 return AE_OK;
1446 if (acpi_bus_get_status(adev) || !adev->status.present)
1447 return AE_OK;
1449 spi = spi_alloc_device(master);
1450 if (!spi) {
1451 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1452 dev_name(&adev->dev));
1453 return AE_NO_MEMORY;
1456 ACPI_COMPANION_SET(&spi->dev, adev);
1457 spi->irq = -1;
1459 INIT_LIST_HEAD(&resource_list);
1460 ret = acpi_dev_get_resources(adev, &resource_list,
1461 acpi_spi_add_resource, spi);
1462 acpi_dev_free_resource_list(&resource_list);
1464 if (ret < 0 || !spi->max_speed_hz) {
1465 spi_dev_put(spi);
1466 return AE_OK;
1469 adev->power.flags.ignore_parent = true;
1470 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1471 if (spi_add_device(spi)) {
1472 adev->power.flags.ignore_parent = false;
1473 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1474 dev_name(&adev->dev));
1475 spi_dev_put(spi);
1478 return AE_OK;
1481 static void acpi_register_spi_devices(struct spi_master *master)
1483 acpi_status status;
1484 acpi_handle handle;
1486 handle = ACPI_HANDLE(master->dev.parent);
1487 if (!handle)
1488 return;
1490 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1491 acpi_spi_add_device, NULL,
1492 master, NULL);
1493 if (ACPI_FAILURE(status))
1494 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1496 #else
1497 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1498 #endif /* CONFIG_ACPI */
1500 static void spi_master_release(struct device *dev)
1502 struct spi_master *master;
1504 master = container_of(dev, struct spi_master, dev);
1505 kfree(master);
1508 static struct class spi_master_class = {
1509 .name = "spi_master",
1510 .owner = THIS_MODULE,
1511 .dev_release = spi_master_release,
1517 * spi_alloc_master - allocate SPI master controller
1518 * @dev: the controller, possibly using the platform_bus
1519 * @size: how much zeroed driver-private data to allocate; the pointer to this
1520 * memory is in the driver_data field of the returned device,
1521 * accessible with spi_master_get_devdata().
1522 * Context: can sleep
1524 * This call is used only by SPI master controller drivers, which are the
1525 * only ones directly touching chip registers. It's how they allocate
1526 * an spi_master structure, prior to calling spi_register_master().
1528 * This must be called from context that can sleep. It returns the SPI
1529 * master structure on success, else NULL.
1531 * The caller is responsible for assigning the bus number and initializing
1532 * the master's methods before calling spi_register_master(); and (after errors
1533 * adding the device) calling spi_master_put() and kfree() to prevent a memory
1534 * leak.
1536 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1538 struct spi_master *master;
1540 if (!dev)
1541 return NULL;
1543 master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1544 if (!master)
1545 return NULL;
1547 device_initialize(&master->dev);
1548 master->bus_num = -1;
1549 master->num_chipselect = 1;
1550 master->dev.class = &spi_master_class;
1551 master->dev.parent = get_device(dev);
1552 spi_master_set_devdata(master, &master[1]);
1554 return master;
1556 EXPORT_SYMBOL_GPL(spi_alloc_master);
1558 #ifdef CONFIG_OF
1559 static int of_spi_register_master(struct spi_master *master)
1561 int nb, i, *cs;
1562 struct device_node *np = master->dev.of_node;
1564 if (!np)
1565 return 0;
1567 nb = of_gpio_named_count(np, "cs-gpios");
1568 master->num_chipselect = max_t(int, nb, master->num_chipselect);
1570 /* Return error only for an incorrectly formed cs-gpios property */
1571 if (nb == 0 || nb == -ENOENT)
1572 return 0;
1573 else if (nb < 0)
1574 return nb;
1576 cs = devm_kzalloc(&master->dev,
1577 sizeof(int) * master->num_chipselect,
1578 GFP_KERNEL);
1579 master->cs_gpios = cs;
1581 if (!master->cs_gpios)
1582 return -ENOMEM;
1584 for (i = 0; i < master->num_chipselect; i++)
1585 cs[i] = -ENOENT;
1587 for (i = 0; i < nb; i++)
1588 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1590 return 0;
1592 #else
1593 static int of_spi_register_master(struct spi_master *master)
1595 return 0;
1597 #endif
1600 * spi_register_master - register SPI master controller
1601 * @master: initialized master, originally from spi_alloc_master()
1602 * Context: can sleep
1604 * SPI master controllers connect to their drivers using some non-SPI bus,
1605 * such as the platform bus. The final stage of probe() in that code
1606 * includes calling spi_register_master() to hook up to this SPI bus glue.
1608 * SPI controllers use board specific (often SOC specific) bus numbers,
1609 * and board-specific addressing for SPI devices combines those numbers
1610 * with chip select numbers. Since SPI does not directly support dynamic
1611 * device identification, boards need configuration tables telling which
1612 * chip is at which address.
1614 * This must be called from context that can sleep. It returns zero on
1615 * success, else a negative error code (dropping the master's refcount).
1616 * After a successful return, the caller is responsible for calling
1617 * spi_unregister_master().
1619 int spi_register_master(struct spi_master *master)
1621 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1622 struct device *dev = master->dev.parent;
1623 struct boardinfo *bi;
1624 int status = -ENODEV;
1625 int dynamic = 0;
1627 if (!dev)
1628 return -ENODEV;
1630 status = of_spi_register_master(master);
1631 if (status)
1632 return status;
1634 /* even if it's just one always-selected device, there must
1635 * be at least one chipselect
1637 if (master->num_chipselect == 0)
1638 return -EINVAL;
1640 if ((master->bus_num < 0) && master->dev.of_node)
1641 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1643 /* convention: dynamically assigned bus IDs count down from the max */
1644 if (master->bus_num < 0) {
1645 /* FIXME switch to an IDR based scheme, something like
1646 * I2C now uses, so we can't run out of "dynamic" IDs
1648 master->bus_num = atomic_dec_return(&dyn_bus_id);
1649 dynamic = 1;
1652 INIT_LIST_HEAD(&master->queue);
1653 spin_lock_init(&master->queue_lock);
1654 spin_lock_init(&master->bus_lock_spinlock);
1655 mutex_init(&master->bus_lock_mutex);
1656 master->bus_lock_flag = 0;
1657 init_completion(&master->xfer_completion);
1658 if (!master->max_dma_len)
1659 master->max_dma_len = INT_MAX;
1661 /* register the device, then userspace will see it.
1662 * registration fails if the bus ID is in use.
1664 dev_set_name(&master->dev, "spi%u", master->bus_num);
1665 status = device_add(&master->dev);
1666 if (status < 0)
1667 goto done;
1668 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1669 dynamic ? " (dynamic)" : "");
1671 /* If we're using a queued driver, start the queue */
1672 if (master->transfer)
1673 dev_info(dev, "master is unqueued, this is deprecated\n");
1674 else {
1675 status = spi_master_initialize_queue(master);
1676 if (status) {
1677 device_del(&master->dev);
1678 goto done;
1682 mutex_lock(&board_lock);
1683 list_add_tail(&master->list, &spi_master_list);
1684 list_for_each_entry(bi, &board_list, list)
1685 spi_match_master_to_boardinfo(master, &bi->board_info);
1686 mutex_unlock(&board_lock);
1688 /* Register devices from the device tree and ACPI */
1689 of_register_spi_devices(master);
1690 acpi_register_spi_devices(master);
1691 done:
1692 return status;
1694 EXPORT_SYMBOL_GPL(spi_register_master);
1696 static void devm_spi_unregister(struct device *dev, void *res)
1698 spi_unregister_master(*(struct spi_master **)res);
1702 * dev_spi_register_master - register managed SPI master controller
1703 * @dev: device managing SPI master
1704 * @master: initialized master, originally from spi_alloc_master()
1705 * Context: can sleep
1707 * Register a SPI device as with spi_register_master() which will
1708 * automatically be unregister
1710 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1712 struct spi_master **ptr;
1713 int ret;
1715 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1716 if (!ptr)
1717 return -ENOMEM;
1719 ret = spi_register_master(master);
1720 if (!ret) {
1721 *ptr = master;
1722 devres_add(dev, ptr);
1723 } else {
1724 devres_free(ptr);
1727 return ret;
1729 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1731 static int __unregister(struct device *dev, void *null)
1733 spi_unregister_device(to_spi_device(dev));
1734 return 0;
1738 * spi_unregister_master - unregister SPI master controller
1739 * @master: the master being unregistered
1740 * Context: can sleep
1742 * This call is used only by SPI master controller drivers, which are the
1743 * only ones directly touching chip registers.
1745 * This must be called from context that can sleep.
1747 void spi_unregister_master(struct spi_master *master)
1749 int dummy;
1751 if (master->queued) {
1752 if (spi_destroy_queue(master))
1753 dev_err(&master->dev, "queue remove failed\n");
1756 mutex_lock(&board_lock);
1757 list_del(&master->list);
1758 mutex_unlock(&board_lock);
1760 dummy = device_for_each_child(&master->dev, NULL, __unregister);
1761 device_unregister(&master->dev);
1763 EXPORT_SYMBOL_GPL(spi_unregister_master);
1765 int spi_master_suspend(struct spi_master *master)
1767 int ret;
1769 /* Basically no-ops for non-queued masters */
1770 if (!master->queued)
1771 return 0;
1773 ret = spi_stop_queue(master);
1774 if (ret)
1775 dev_err(&master->dev, "queue stop failed\n");
1777 return ret;
1779 EXPORT_SYMBOL_GPL(spi_master_suspend);
1781 int spi_master_resume(struct spi_master *master)
1783 int ret;
1785 if (!master->queued)
1786 return 0;
1788 ret = spi_start_queue(master);
1789 if (ret)
1790 dev_err(&master->dev, "queue restart failed\n");
1792 return ret;
1794 EXPORT_SYMBOL_GPL(spi_master_resume);
1796 static int __spi_master_match(struct device *dev, const void *data)
1798 struct spi_master *m;
1799 const u16 *bus_num = data;
1801 m = container_of(dev, struct spi_master, dev);
1802 return m->bus_num == *bus_num;
1806 * spi_busnum_to_master - look up master associated with bus_num
1807 * @bus_num: the master's bus number
1808 * Context: can sleep
1810 * This call may be used with devices that are registered after
1811 * arch init time. It returns a refcounted pointer to the relevant
1812 * spi_master (which the caller must release), or NULL if there is
1813 * no such master registered.
1815 struct spi_master *spi_busnum_to_master(u16 bus_num)
1817 struct device *dev;
1818 struct spi_master *master = NULL;
1820 dev = class_find_device(&spi_master_class, NULL, &bus_num,
1821 __spi_master_match);
1822 if (dev)
1823 master = container_of(dev, struct spi_master, dev);
1824 /* reference got in class_find_device */
1825 return master;
1827 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1830 /*-------------------------------------------------------------------------*/
1832 /* Core methods for SPI master protocol drivers. Some of the
1833 * other core methods are currently defined as inline functions.
1837 * spi_setup - setup SPI mode and clock rate
1838 * @spi: the device whose settings are being modified
1839 * Context: can sleep, and no requests are queued to the device
1841 * SPI protocol drivers may need to update the transfer mode if the
1842 * device doesn't work with its default. They may likewise need
1843 * to update clock rates or word sizes from initial values. This function
1844 * changes those settings, and must be called from a context that can sleep.
1845 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1846 * effect the next time the device is selected and data is transferred to
1847 * or from it. When this function returns, the spi device is deselected.
1849 * Note that this call will fail if the protocol driver specifies an option
1850 * that the underlying controller or its driver does not support. For
1851 * example, not all hardware supports wire transfers using nine bit words,
1852 * LSB-first wire encoding, or active-high chipselects.
1854 int spi_setup(struct spi_device *spi)
1856 unsigned bad_bits, ugly_bits;
1857 int status = 0;
1859 /* check mode to prevent that DUAL and QUAD set at the same time
1861 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
1862 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
1863 dev_err(&spi->dev,
1864 "setup: can not select dual and quad at the same time\n");
1865 return -EINVAL;
1867 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1869 if ((spi->mode & SPI_3WIRE) && (spi->mode &
1870 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
1871 return -EINVAL;
1872 /* help drivers fail *cleanly* when they need options
1873 * that aren't supported with their current master
1875 bad_bits = spi->mode & ~spi->master->mode_bits;
1876 ugly_bits = bad_bits &
1877 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1878 if (ugly_bits) {
1879 dev_warn(&spi->dev,
1880 "setup: ignoring unsupported mode bits %x\n",
1881 ugly_bits);
1882 spi->mode &= ~ugly_bits;
1883 bad_bits &= ~ugly_bits;
1885 if (bad_bits) {
1886 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1887 bad_bits);
1888 return -EINVAL;
1891 if (!spi->bits_per_word)
1892 spi->bits_per_word = 8;
1894 if (!spi->max_speed_hz)
1895 spi->max_speed_hz = spi->master->max_speed_hz;
1897 if (spi->master->setup)
1898 status = spi->master->setup(spi);
1900 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1901 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1902 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1903 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1904 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
1905 (spi->mode & SPI_LOOP) ? "loopback, " : "",
1906 spi->bits_per_word, spi->max_speed_hz,
1907 status);
1909 return status;
1911 EXPORT_SYMBOL_GPL(spi_setup);
1913 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1915 struct spi_master *master = spi->master;
1916 struct spi_transfer *xfer;
1917 int w_size;
1919 if (list_empty(&message->transfers))
1920 return -EINVAL;
1922 /* Half-duplex links include original MicroWire, and ones with
1923 * only one data pin like SPI_3WIRE (switches direction) or where
1924 * either MOSI or MISO is missing. They can also be caused by
1925 * software limitations.
1927 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1928 || (spi->mode & SPI_3WIRE)) {
1929 unsigned flags = master->flags;
1931 list_for_each_entry(xfer, &message->transfers, transfer_list) {
1932 if (xfer->rx_buf && xfer->tx_buf)
1933 return -EINVAL;
1934 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1935 return -EINVAL;
1936 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1937 return -EINVAL;
1942 * Set transfer bits_per_word and max speed as spi device default if
1943 * it is not set for this transfer.
1944 * Set transfer tx_nbits and rx_nbits as single transfer default
1945 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1947 list_for_each_entry(xfer, &message->transfers, transfer_list) {
1948 message->frame_length += xfer->len;
1949 if (!xfer->bits_per_word)
1950 xfer->bits_per_word = spi->bits_per_word;
1952 if (!xfer->speed_hz)
1953 xfer->speed_hz = spi->max_speed_hz;
1955 if (master->max_speed_hz &&
1956 xfer->speed_hz > master->max_speed_hz)
1957 xfer->speed_hz = master->max_speed_hz;
1959 if (master->bits_per_word_mask) {
1960 /* Only 32 bits fit in the mask */
1961 if (xfer->bits_per_word > 32)
1962 return -EINVAL;
1963 if (!(master->bits_per_word_mask &
1964 BIT(xfer->bits_per_word - 1)))
1965 return -EINVAL;
1969 * SPI transfer length should be multiple of SPI word size
1970 * where SPI word size should be power-of-two multiple
1972 if (xfer->bits_per_word <= 8)
1973 w_size = 1;
1974 else if (xfer->bits_per_word <= 16)
1975 w_size = 2;
1976 else
1977 w_size = 4;
1979 /* No partial transfers accepted */
1980 if (xfer->len % w_size)
1981 return -EINVAL;
1983 if (xfer->speed_hz && master->min_speed_hz &&
1984 xfer->speed_hz < master->min_speed_hz)
1985 return -EINVAL;
1987 if (xfer->tx_buf && !xfer->tx_nbits)
1988 xfer->tx_nbits = SPI_NBITS_SINGLE;
1989 if (xfer->rx_buf && !xfer->rx_nbits)
1990 xfer->rx_nbits = SPI_NBITS_SINGLE;
1991 /* check transfer tx/rx_nbits:
1992 * 1. check the value matches one of single, dual and quad
1993 * 2. check tx/rx_nbits match the mode in spi_device
1995 if (xfer->tx_buf) {
1996 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
1997 xfer->tx_nbits != SPI_NBITS_DUAL &&
1998 xfer->tx_nbits != SPI_NBITS_QUAD)
1999 return -EINVAL;
2000 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2001 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2002 return -EINVAL;
2003 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2004 !(spi->mode & SPI_TX_QUAD))
2005 return -EINVAL;
2007 /* check transfer rx_nbits */
2008 if (xfer->rx_buf) {
2009 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2010 xfer->rx_nbits != SPI_NBITS_DUAL &&
2011 xfer->rx_nbits != SPI_NBITS_QUAD)
2012 return -EINVAL;
2013 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2014 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2015 return -EINVAL;
2016 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2017 !(spi->mode & SPI_RX_QUAD))
2018 return -EINVAL;
2022 message->status = -EINPROGRESS;
2024 return 0;
2027 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2029 struct spi_master *master = spi->master;
2031 message->spi = spi;
2033 trace_spi_message_submit(message);
2035 return master->transfer(spi, message);
2039 * spi_async - asynchronous SPI transfer
2040 * @spi: device with which data will be exchanged
2041 * @message: describes the data transfers, including completion callback
2042 * Context: any (irqs may be blocked, etc)
2044 * This call may be used in_irq and other contexts which can't sleep,
2045 * as well as from task contexts which can sleep.
2047 * The completion callback is invoked in a context which can't sleep.
2048 * Before that invocation, the value of message->status is undefined.
2049 * When the callback is issued, message->status holds either zero (to
2050 * indicate complete success) or a negative error code. After that
2051 * callback returns, the driver which issued the transfer request may
2052 * deallocate the associated memory; it's no longer in use by any SPI
2053 * core or controller driver code.
2055 * Note that although all messages to a spi_device are handled in
2056 * FIFO order, messages may go to different devices in other orders.
2057 * Some device might be higher priority, or have various "hard" access
2058 * time requirements, for example.
2060 * On detection of any fault during the transfer, processing of
2061 * the entire message is aborted, and the device is deselected.
2062 * Until returning from the associated message completion callback,
2063 * no other spi_message queued to that device will be processed.
2064 * (This rule applies equally to all the synchronous transfer calls,
2065 * which are wrappers around this core asynchronous primitive.)
2067 int spi_async(struct spi_device *spi, struct spi_message *message)
2069 struct spi_master *master = spi->master;
2070 int ret;
2071 unsigned long flags;
2073 ret = __spi_validate(spi, message);
2074 if (ret != 0)
2075 return ret;
2077 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2079 if (master->bus_lock_flag)
2080 ret = -EBUSY;
2081 else
2082 ret = __spi_async(spi, message);
2084 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2086 return ret;
2088 EXPORT_SYMBOL_GPL(spi_async);
2091 * spi_async_locked - version of spi_async with exclusive bus usage
2092 * @spi: device with which data will be exchanged
2093 * @message: describes the data transfers, including completion callback
2094 * Context: any (irqs may be blocked, etc)
2096 * This call may be used in_irq and other contexts which can't sleep,
2097 * as well as from task contexts which can sleep.
2099 * The completion callback is invoked in a context which can't sleep.
2100 * Before that invocation, the value of message->status is undefined.
2101 * When the callback is issued, message->status holds either zero (to
2102 * indicate complete success) or a negative error code. After that
2103 * callback returns, the driver which issued the transfer request may
2104 * deallocate the associated memory; it's no longer in use by any SPI
2105 * core or controller driver code.
2107 * Note that although all messages to a spi_device are handled in
2108 * FIFO order, messages may go to different devices in other orders.
2109 * Some device might be higher priority, or have various "hard" access
2110 * time requirements, for example.
2112 * On detection of any fault during the transfer, processing of
2113 * the entire message is aborted, and the device is deselected.
2114 * Until returning from the associated message completion callback,
2115 * no other spi_message queued to that device will be processed.
2116 * (This rule applies equally to all the synchronous transfer calls,
2117 * which are wrappers around this core asynchronous primitive.)
2119 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2121 struct spi_master *master = spi->master;
2122 int ret;
2123 unsigned long flags;
2125 ret = __spi_validate(spi, message);
2126 if (ret != 0)
2127 return ret;
2129 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2131 ret = __spi_async(spi, message);
2133 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2135 return ret;
2138 EXPORT_SYMBOL_GPL(spi_async_locked);
2141 /*-------------------------------------------------------------------------*/
2143 /* Utility methods for SPI master protocol drivers, layered on
2144 * top of the core. Some other utility methods are defined as
2145 * inline functions.
2148 static void spi_complete(void *arg)
2150 complete(arg);
2153 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2154 int bus_locked)
2156 DECLARE_COMPLETION_ONSTACK(done);
2157 int status;
2158 struct spi_master *master = spi->master;
2159 unsigned long flags;
2161 status = __spi_validate(spi, message);
2162 if (status != 0)
2163 return status;
2165 message->complete = spi_complete;
2166 message->context = &done;
2167 message->spi = spi;
2169 if (!bus_locked)
2170 mutex_lock(&master->bus_lock_mutex);
2172 /* If we're not using the legacy transfer method then we will
2173 * try to transfer in the calling context so special case.
2174 * This code would be less tricky if we could remove the
2175 * support for driver implemented message queues.
2177 if (master->transfer == spi_queued_transfer) {
2178 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2180 trace_spi_message_submit(message);
2182 status = __spi_queued_transfer(spi, message, false);
2184 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2185 } else {
2186 status = spi_async_locked(spi, message);
2189 if (!bus_locked)
2190 mutex_unlock(&master->bus_lock_mutex);
2192 if (status == 0) {
2193 /* Push out the messages in the calling context if we
2194 * can.
2196 if (master->transfer == spi_queued_transfer)
2197 __spi_pump_messages(master, false);
2199 wait_for_completion(&done);
2200 status = message->status;
2202 message->context = NULL;
2203 return status;
2207 * spi_sync - blocking/synchronous SPI data transfers
2208 * @spi: device with which data will be exchanged
2209 * @message: describes the data transfers
2210 * Context: can sleep
2212 * This call may only be used from a context that may sleep. The sleep
2213 * is non-interruptible, and has no timeout. Low-overhead controller
2214 * drivers may DMA directly into and out of the message buffers.
2216 * Note that the SPI device's chip select is active during the message,
2217 * and then is normally disabled between messages. Drivers for some
2218 * frequently-used devices may want to minimize costs of selecting a chip,
2219 * by leaving it selected in anticipation that the next message will go
2220 * to the same chip. (That may increase power usage.)
2222 * Also, the caller is guaranteeing that the memory associated with the
2223 * message will not be freed before this call returns.
2225 * It returns zero on success, else a negative error code.
2227 int spi_sync(struct spi_device *spi, struct spi_message *message)
2229 return __spi_sync(spi, message, 0);
2231 EXPORT_SYMBOL_GPL(spi_sync);
2234 * spi_sync_locked - version of spi_sync with exclusive bus usage
2235 * @spi: device with which data will be exchanged
2236 * @message: describes the data transfers
2237 * Context: can sleep
2239 * This call may only be used from a context that may sleep. The sleep
2240 * is non-interruptible, and has no timeout. Low-overhead controller
2241 * drivers may DMA directly into and out of the message buffers.
2243 * This call should be used by drivers that require exclusive access to the
2244 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2245 * be released by a spi_bus_unlock call when the exclusive access is over.
2247 * It returns zero on success, else a negative error code.
2249 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2251 return __spi_sync(spi, message, 1);
2253 EXPORT_SYMBOL_GPL(spi_sync_locked);
2256 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2257 * @master: SPI bus master that should be locked for exclusive bus access
2258 * Context: can sleep
2260 * This call may only be used from a context that may sleep. The sleep
2261 * is non-interruptible, and has no timeout.
2263 * This call should be used by drivers that require exclusive access to the
2264 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2265 * exclusive access is over. Data transfer must be done by spi_sync_locked
2266 * and spi_async_locked calls when the SPI bus lock is held.
2268 * It returns zero on success, else a negative error code.
2270 int spi_bus_lock(struct spi_master *master)
2272 unsigned long flags;
2274 mutex_lock(&master->bus_lock_mutex);
2276 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2277 master->bus_lock_flag = 1;
2278 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2280 /* mutex remains locked until spi_bus_unlock is called */
2282 return 0;
2284 EXPORT_SYMBOL_GPL(spi_bus_lock);
2287 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2288 * @master: SPI bus master that was locked for exclusive bus access
2289 * Context: can sleep
2291 * This call may only be used from a context that may sleep. The sleep
2292 * is non-interruptible, and has no timeout.
2294 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2295 * call.
2297 * It returns zero on success, else a negative error code.
2299 int spi_bus_unlock(struct spi_master *master)
2301 master->bus_lock_flag = 0;
2303 mutex_unlock(&master->bus_lock_mutex);
2305 return 0;
2307 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2309 /* portable code must never pass more than 32 bytes */
2310 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
2312 static u8 *buf;
2315 * spi_write_then_read - SPI synchronous write followed by read
2316 * @spi: device with which data will be exchanged
2317 * @txbuf: data to be written (need not be dma-safe)
2318 * @n_tx: size of txbuf, in bytes
2319 * @rxbuf: buffer into which data will be read (need not be dma-safe)
2320 * @n_rx: size of rxbuf, in bytes
2321 * Context: can sleep
2323 * This performs a half duplex MicroWire style transaction with the
2324 * device, sending txbuf and then reading rxbuf. The return value
2325 * is zero for success, else a negative errno status code.
2326 * This call may only be used from a context that may sleep.
2328 * Parameters to this routine are always copied using a small buffer;
2329 * portable code should never use this for more than 32 bytes.
2330 * Performance-sensitive or bulk transfer code should instead use
2331 * spi_{async,sync}() calls with dma-safe buffers.
2333 int spi_write_then_read(struct spi_device *spi,
2334 const void *txbuf, unsigned n_tx,
2335 void *rxbuf, unsigned n_rx)
2337 static DEFINE_MUTEX(lock);
2339 int status;
2340 struct spi_message message;
2341 struct spi_transfer x[2];
2342 u8 *local_buf;
2344 /* Use preallocated DMA-safe buffer if we can. We can't avoid
2345 * copying here, (as a pure convenience thing), but we can
2346 * keep heap costs out of the hot path unless someone else is
2347 * using the pre-allocated buffer or the transfer is too large.
2349 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2350 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2351 GFP_KERNEL | GFP_DMA);
2352 if (!local_buf)
2353 return -ENOMEM;
2354 } else {
2355 local_buf = buf;
2358 spi_message_init(&message);
2359 memset(x, 0, sizeof(x));
2360 if (n_tx) {
2361 x[0].len = n_tx;
2362 spi_message_add_tail(&x[0], &message);
2364 if (n_rx) {
2365 x[1].len = n_rx;
2366 spi_message_add_tail(&x[1], &message);
2369 memcpy(local_buf, txbuf, n_tx);
2370 x[0].tx_buf = local_buf;
2371 x[1].rx_buf = local_buf + n_tx;
2373 /* do the i/o */
2374 status = spi_sync(spi, &message);
2375 if (status == 0)
2376 memcpy(rxbuf, x[1].rx_buf, n_rx);
2378 if (x[0].tx_buf == buf)
2379 mutex_unlock(&lock);
2380 else
2381 kfree(local_buf);
2383 return status;
2385 EXPORT_SYMBOL_GPL(spi_write_then_read);
2387 /*-------------------------------------------------------------------------*/
2389 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2390 static int __spi_of_device_match(struct device *dev, void *data)
2392 return dev->of_node == data;
2395 /* must call put_device() when done with returned spi_device device */
2396 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2398 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2399 __spi_of_device_match);
2400 return dev ? to_spi_device(dev) : NULL;
2403 static int __spi_of_master_match(struct device *dev, const void *data)
2405 return dev->of_node == data;
2408 /* the spi masters are not using spi_bus, so we find it with another way */
2409 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2411 struct device *dev;
2413 dev = class_find_device(&spi_master_class, NULL, node,
2414 __spi_of_master_match);
2415 if (!dev)
2416 return NULL;
2418 /* reference got in class_find_device */
2419 return container_of(dev, struct spi_master, dev);
2422 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2423 void *arg)
2425 struct of_reconfig_data *rd = arg;
2426 struct spi_master *master;
2427 struct spi_device *spi;
2429 switch (of_reconfig_get_state_change(action, arg)) {
2430 case OF_RECONFIG_CHANGE_ADD:
2431 master = of_find_spi_master_by_node(rd->dn->parent);
2432 if (master == NULL)
2433 return NOTIFY_OK; /* not for us */
2435 spi = of_register_spi_device(master, rd->dn);
2436 put_device(&master->dev);
2438 if (IS_ERR(spi)) {
2439 pr_err("%s: failed to create for '%s'\n",
2440 __func__, rd->dn->full_name);
2441 return notifier_from_errno(PTR_ERR(spi));
2443 break;
2445 case OF_RECONFIG_CHANGE_REMOVE:
2446 /* find our device by node */
2447 spi = of_find_spi_device_by_node(rd->dn);
2448 if (spi == NULL)
2449 return NOTIFY_OK; /* no? not meant for us */
2451 /* unregister takes one ref away */
2452 spi_unregister_device(spi);
2454 /* and put the reference of the find */
2455 put_device(&spi->dev);
2456 break;
2459 return NOTIFY_OK;
2462 static struct notifier_block spi_of_notifier = {
2463 .notifier_call = of_spi_notify,
2465 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2466 extern struct notifier_block spi_of_notifier;
2467 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2469 static int __init spi_init(void)
2471 int status;
2473 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2474 if (!buf) {
2475 status = -ENOMEM;
2476 goto err0;
2479 status = bus_register(&spi_bus_type);
2480 if (status < 0)
2481 goto err1;
2483 status = class_register(&spi_master_class);
2484 if (status < 0)
2485 goto err2;
2487 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2488 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2490 return 0;
2492 err2:
2493 bus_unregister(&spi_bus_type);
2494 err1:
2495 kfree(buf);
2496 buf = NULL;
2497 err0:
2498 return status;
2501 /* board_info is normally registered in arch_initcall(),
2502 * but even essential drivers wait till later
2504 * REVISIT only boardinfo really needs static linking. the rest (device and
2505 * driver registration) _could_ be dynamically linked (modular) ... costs
2506 * include needing to have boardinfo data structures be much more public.
2508 postcore_initcall(spi_init);