2 * Copyright (C) 2009 Texas Instruments.
3 * Copyright (C) 2010 EF Johnson Technologies
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/interrupt.h>
22 #include <linux/gpio.h>
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/platform_device.h>
26 #include <linux/err.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/edma.h>
32 #include <linux/of_device.h>
33 #include <linux/spi/spi.h>
34 #include <linux/spi/spi_bitbang.h>
35 #include <linux/slab.h>
37 #include <linux/platform_data/spi-davinci.h>
39 #define SPI_NO_RESOURCE ((resource_size_t)-1)
41 #define SPI_MAX_CHIPSELECT 2
43 #define CS_DEFAULT 0xFF
45 #define SPIFMT_PHASE_MASK BIT(16)
46 #define SPIFMT_POLARITY_MASK BIT(17)
47 #define SPIFMT_DISTIMER_MASK BIT(18)
48 #define SPIFMT_SHIFTDIR_MASK BIT(20)
49 #define SPIFMT_WAITENA_MASK BIT(21)
50 #define SPIFMT_PARITYENA_MASK BIT(22)
51 #define SPIFMT_ODD_PARITY_MASK BIT(23)
52 #define SPIFMT_WDELAY_MASK 0x3f000000u
53 #define SPIFMT_WDELAY_SHIFT 24
54 #define SPIFMT_PRESCALE_SHIFT 8
57 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
58 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
59 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
60 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
62 #define SPIINT_MASKALL 0x0101035F
63 #define SPIINT_MASKINT 0x0000015F
64 #define SPI_INTLVL_1 0x000001FF
65 #define SPI_INTLVL_0 0x00000000
67 /* SPIDAT1 (upper 16 bit defines) */
68 #define SPIDAT1_CSHOLD_MASK BIT(12)
71 #define SPIGCR1_CLKMOD_MASK BIT(1)
72 #define SPIGCR1_MASTER_MASK BIT(0)
73 #define SPIGCR1_POWERDOWN_MASK BIT(8)
74 #define SPIGCR1_LOOPBACK_MASK BIT(16)
75 #define SPIGCR1_SPIENA_MASK BIT(24)
78 #define SPIBUF_TXFULL_MASK BIT(29)
79 #define SPIBUF_RXEMPTY_MASK BIT(31)
82 #define SPIDELAY_C2TDELAY_SHIFT 24
83 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
84 #define SPIDELAY_T2CDELAY_SHIFT 16
85 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
86 #define SPIDELAY_T2EDELAY_SHIFT 8
87 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
88 #define SPIDELAY_C2EDELAY_SHIFT 0
89 #define SPIDELAY_C2EDELAY_MASK 0xFF
92 #define SPIFLG_DLEN_ERR_MASK BIT(0)
93 #define SPIFLG_TIMEOUT_MASK BIT(1)
94 #define SPIFLG_PARERR_MASK BIT(2)
95 #define SPIFLG_DESYNC_MASK BIT(3)
96 #define SPIFLG_BITERR_MASK BIT(4)
97 #define SPIFLG_OVRRUN_MASK BIT(6)
98 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
99 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
100 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
101 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
102 | SPIFLG_OVRRUN_MASK)
104 #define SPIINT_DMA_REQ_EN BIT(16)
106 /* SPI Controller registers */
115 #define SPIDELAY 0x48
119 /* SPI Controller driver's private data. */
121 struct spi_bitbang bitbang
;
125 resource_size_t pbase
;
128 struct completion done
;
135 struct dma_chan
*dma_rx
;
136 struct dma_chan
*dma_tx
;
140 struct davinci_spi_platform_data pdata
;
142 void (*get_rx
)(u32 rx_data
, struct davinci_spi
*);
143 u32 (*get_tx
)(struct davinci_spi
*);
145 u8 bytes_per_word
[SPI_MAX_CHIPSELECT
];
148 static struct davinci_spi_config davinci_spi_default_cfg
;
150 static void davinci_spi_rx_buf_u8(u32 data
, struct davinci_spi
*dspi
)
159 static void davinci_spi_rx_buf_u16(u32 data
, struct davinci_spi
*dspi
)
168 static u32
davinci_spi_tx_buf_u8(struct davinci_spi
*dspi
)
172 const u8
*tx
= dspi
->tx
;
179 static u32
davinci_spi_tx_buf_u16(struct davinci_spi
*dspi
)
183 const u16
*tx
= dspi
->tx
;
190 static inline void set_io_bits(void __iomem
*addr
, u32 bits
)
192 u32 v
= ioread32(addr
);
198 static inline void clear_io_bits(void __iomem
*addr
, u32 bits
)
200 u32 v
= ioread32(addr
);
207 * Interface to control the chip select signal
209 static void davinci_spi_chipselect(struct spi_device
*spi
, int value
)
211 struct davinci_spi
*dspi
;
212 struct davinci_spi_platform_data
*pdata
;
213 u8 chip_sel
= spi
->chip_select
;
214 u16 spidat1
= CS_DEFAULT
;
215 bool gpio_chipsel
= false;
217 dspi
= spi_master_get_devdata(spi
->master
);
218 pdata
= &dspi
->pdata
;
220 if (pdata
->chip_sel
&& chip_sel
< pdata
->num_chipselect
&&
221 pdata
->chip_sel
[chip_sel
] != SPI_INTERN_CS
)
225 * Board specific chip select logic decides the polarity and cs
226 * line for the controller
229 if (value
== BITBANG_CS_ACTIVE
)
230 gpio_set_value(pdata
->chip_sel
[chip_sel
], 0);
232 gpio_set_value(pdata
->chip_sel
[chip_sel
], 1);
234 if (value
== BITBANG_CS_ACTIVE
) {
235 spidat1
|= SPIDAT1_CSHOLD_MASK
;
236 spidat1
&= ~(0x1 << chip_sel
);
239 iowrite16(spidat1
, dspi
->base
+ SPIDAT1
+ 2);
244 * davinci_spi_get_prescale - Calculates the correct prescale value
245 * @maxspeed_hz: the maximum rate the SPI clock can run at
247 * This function calculates the prescale value that generates a clock rate
248 * less than or equal to the specified maximum.
250 * Returns: calculated prescale - 1 for easy programming into SPI registers
251 * or negative error number if valid prescalar cannot be updated.
253 static inline int davinci_spi_get_prescale(struct davinci_spi
*dspi
,
258 ret
= DIV_ROUND_UP(clk_get_rate(dspi
->clk
), max_speed_hz
);
260 if (ret
< 3 || ret
> 256)
267 * davinci_spi_setup_transfer - This functions will determine transfer method
268 * @spi: spi device on which data transfer to be done
269 * @t: spi transfer in which transfer info is filled
271 * This function determines data transfer method (8/16/32 bit transfer).
272 * It will also set the SPI Clock Control register according to
273 * SPI slave device freq.
275 static int davinci_spi_setup_transfer(struct spi_device
*spi
,
276 struct spi_transfer
*t
)
279 struct davinci_spi
*dspi
;
280 struct davinci_spi_config
*spicfg
;
281 u8 bits_per_word
= 0;
282 u32 hz
= 0, spifmt
= 0, prescale
= 0;
284 dspi
= spi_master_get_devdata(spi
->master
);
285 spicfg
= (struct davinci_spi_config
*)spi
->controller_data
;
287 spicfg
= &davinci_spi_default_cfg
;
290 bits_per_word
= t
->bits_per_word
;
294 /* if bits_per_word is not set then set it default */
296 bits_per_word
= spi
->bits_per_word
;
299 * Assign function pointer to appropriate transfer method
300 * 8bit, 16bit or 32bit transfer
302 if (bits_per_word
<= 8) {
303 dspi
->get_rx
= davinci_spi_rx_buf_u8
;
304 dspi
->get_tx
= davinci_spi_tx_buf_u8
;
305 dspi
->bytes_per_word
[spi
->chip_select
] = 1;
307 dspi
->get_rx
= davinci_spi_rx_buf_u16
;
308 dspi
->get_tx
= davinci_spi_tx_buf_u16
;
309 dspi
->bytes_per_word
[spi
->chip_select
] = 2;
313 hz
= spi
->max_speed_hz
;
315 /* Set up SPIFMTn register, unique to this chipselect. */
317 prescale
= davinci_spi_get_prescale(dspi
, hz
);
321 spifmt
= (prescale
<< SPIFMT_PRESCALE_SHIFT
) | (bits_per_word
& 0x1f);
323 if (spi
->mode
& SPI_LSB_FIRST
)
324 spifmt
|= SPIFMT_SHIFTDIR_MASK
;
326 if (spi
->mode
& SPI_CPOL
)
327 spifmt
|= SPIFMT_POLARITY_MASK
;
329 if (!(spi
->mode
& SPI_CPHA
))
330 spifmt
|= SPIFMT_PHASE_MASK
;
333 * Version 1 hardware supports two basic SPI modes:
334 * - Standard SPI mode uses 4 pins, with chipselect
335 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
336 * (distinct from SPI_3WIRE, with just one data wire;
337 * or similar variants without MOSI or without MISO)
339 * Version 2 hardware supports an optional handshaking signal,
340 * so it can support two more modes:
341 * - 5 pin SPI variant is standard SPI plus SPI_READY
342 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
345 if (dspi
->version
== SPI_VERSION_2
) {
349 spifmt
|= ((spicfg
->wdelay
<< SPIFMT_WDELAY_SHIFT
)
350 & SPIFMT_WDELAY_MASK
);
352 if (spicfg
->odd_parity
)
353 spifmt
|= SPIFMT_ODD_PARITY_MASK
;
355 if (spicfg
->parity_enable
)
356 spifmt
|= SPIFMT_PARITYENA_MASK
;
358 if (spicfg
->timer_disable
) {
359 spifmt
|= SPIFMT_DISTIMER_MASK
;
361 delay
|= (spicfg
->c2tdelay
<< SPIDELAY_C2TDELAY_SHIFT
)
362 & SPIDELAY_C2TDELAY_MASK
;
363 delay
|= (spicfg
->t2cdelay
<< SPIDELAY_T2CDELAY_SHIFT
)
364 & SPIDELAY_T2CDELAY_MASK
;
367 if (spi
->mode
& SPI_READY
) {
368 spifmt
|= SPIFMT_WAITENA_MASK
;
369 delay
|= (spicfg
->t2edelay
<< SPIDELAY_T2EDELAY_SHIFT
)
370 & SPIDELAY_T2EDELAY_MASK
;
371 delay
|= (spicfg
->c2edelay
<< SPIDELAY_C2EDELAY_SHIFT
)
372 & SPIDELAY_C2EDELAY_MASK
;
375 iowrite32(delay
, dspi
->base
+ SPIDELAY
);
378 iowrite32(spifmt
, dspi
->base
+ SPIFMT0
);
384 * davinci_spi_setup - This functions will set default transfer method
385 * @spi: spi device on which data transfer to be done
387 * This functions sets the default transfer method.
389 static int davinci_spi_setup(struct spi_device
*spi
)
392 struct davinci_spi
*dspi
;
393 struct davinci_spi_platform_data
*pdata
;
395 dspi
= spi_master_get_devdata(spi
->master
);
396 pdata
= &dspi
->pdata
;
398 /* if bits per word length is zero then set it default 8 */
399 if (!spi
->bits_per_word
)
400 spi
->bits_per_word
= 8;
402 if (!(spi
->mode
& SPI_NO_CS
)) {
403 if ((pdata
->chip_sel
== NULL
) ||
404 (pdata
->chip_sel
[spi
->chip_select
] == SPI_INTERN_CS
))
405 set_io_bits(dspi
->base
+ SPIPC0
, 1 << spi
->chip_select
);
409 if (spi
->mode
& SPI_READY
)
410 set_io_bits(dspi
->base
+ SPIPC0
, SPIPC0_SPIENA_MASK
);
412 if (spi
->mode
& SPI_LOOP
)
413 set_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_LOOPBACK_MASK
);
415 clear_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_LOOPBACK_MASK
);
420 static int davinci_spi_check_error(struct davinci_spi
*dspi
, int int_status
)
422 struct device
*sdev
= dspi
->bitbang
.master
->dev
.parent
;
424 if (int_status
& SPIFLG_TIMEOUT_MASK
) {
425 dev_dbg(sdev
, "SPI Time-out Error\n");
428 if (int_status
& SPIFLG_DESYNC_MASK
) {
429 dev_dbg(sdev
, "SPI Desynchronization Error\n");
432 if (int_status
& SPIFLG_BITERR_MASK
) {
433 dev_dbg(sdev
, "SPI Bit error\n");
437 if (dspi
->version
== SPI_VERSION_2
) {
438 if (int_status
& SPIFLG_DLEN_ERR_MASK
) {
439 dev_dbg(sdev
, "SPI Data Length Error\n");
442 if (int_status
& SPIFLG_PARERR_MASK
) {
443 dev_dbg(sdev
, "SPI Parity Error\n");
446 if (int_status
& SPIFLG_OVRRUN_MASK
) {
447 dev_dbg(sdev
, "SPI Data Overrun error\n");
450 if (int_status
& SPIFLG_BUF_INIT_ACTIVE_MASK
) {
451 dev_dbg(sdev
, "SPI Buffer Init Active\n");
460 * davinci_spi_process_events - check for and handle any SPI controller events
461 * @dspi: the controller data
463 * This function will check the SPIFLG register and handle any events that are
466 static int davinci_spi_process_events(struct davinci_spi
*dspi
)
468 u32 buf
, status
, errors
= 0, spidat1
;
470 buf
= ioread32(dspi
->base
+ SPIBUF
);
472 if (dspi
->rcount
> 0 && !(buf
& SPIBUF_RXEMPTY_MASK
)) {
473 dspi
->get_rx(buf
& 0xFFFF, dspi
);
477 status
= ioread32(dspi
->base
+ SPIFLG
);
479 if (unlikely(status
& SPIFLG_ERROR_MASK
)) {
480 errors
= status
& SPIFLG_ERROR_MASK
;
484 if (dspi
->wcount
> 0 && !(buf
& SPIBUF_TXFULL_MASK
)) {
485 spidat1
= ioread32(dspi
->base
+ SPIDAT1
);
488 spidat1
|= 0xFFFF & dspi
->get_tx(dspi
);
489 iowrite32(spidat1
, dspi
->base
+ SPIDAT1
);
496 static void davinci_spi_dma_rx_callback(void *data
)
498 struct davinci_spi
*dspi
= (struct davinci_spi
*)data
;
502 if (!dspi
->wcount
&& !dspi
->rcount
)
503 complete(&dspi
->done
);
506 static void davinci_spi_dma_tx_callback(void *data
)
508 struct davinci_spi
*dspi
= (struct davinci_spi
*)data
;
512 if (!dspi
->wcount
&& !dspi
->rcount
)
513 complete(&dspi
->done
);
517 * davinci_spi_bufs - functions which will handle transfer data
518 * @spi: spi device on which data transfer to be done
519 * @t: spi transfer in which transfer info is filled
521 * This function will put data to be transferred into data register
522 * of SPI controller and then wait until the completion will be marked
523 * by the IRQ Handler.
525 static int davinci_spi_bufs(struct spi_device
*spi
, struct spi_transfer
*t
)
527 struct davinci_spi
*dspi
;
528 int data_type
, ret
= -ENOMEM
;
529 u32 tx_data
, spidat1
;
531 struct davinci_spi_config
*spicfg
;
532 struct davinci_spi_platform_data
*pdata
;
533 unsigned uninitialized_var(rx_buf_count
);
534 void *dummy_buf
= NULL
;
535 struct scatterlist sg_rx
, sg_tx
;
537 dspi
= spi_master_get_devdata(spi
->master
);
538 pdata
= &dspi
->pdata
;
539 spicfg
= (struct davinci_spi_config
*)spi
->controller_data
;
541 spicfg
= &davinci_spi_default_cfg
;
543 /* convert len to words based on bits_per_word */
544 data_type
= dspi
->bytes_per_word
[spi
->chip_select
];
546 dspi
->tx
= t
->tx_buf
;
547 dspi
->rx
= t
->rx_buf
;
548 dspi
->wcount
= t
->len
/ data_type
;
549 dspi
->rcount
= dspi
->wcount
;
551 spidat1
= ioread32(dspi
->base
+ SPIDAT1
);
553 clear_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_POWERDOWN_MASK
);
554 set_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_SPIENA_MASK
);
556 INIT_COMPLETION(dspi
->done
);
558 if (spicfg
->io_type
== SPI_IO_TYPE_INTR
)
559 set_io_bits(dspi
->base
+ SPIINT
, SPIINT_MASKINT
);
561 if (spicfg
->io_type
!= SPI_IO_TYPE_DMA
) {
562 /* start the transfer */
564 tx_data
= dspi
->get_tx(dspi
);
565 spidat1
&= 0xFFFF0000;
566 spidat1
|= tx_data
& 0xFFFF;
567 iowrite32(spidat1
, dspi
->base
+ SPIDAT1
);
569 struct dma_slave_config dma_rx_conf
= {
570 .direction
= DMA_DEV_TO_MEM
,
571 .src_addr
= (unsigned long)dspi
->pbase
+ SPIBUF
,
572 .src_addr_width
= data_type
,
575 struct dma_slave_config dma_tx_conf
= {
576 .direction
= DMA_MEM_TO_DEV
,
577 .dst_addr
= (unsigned long)dspi
->pbase
+ SPIDAT1
,
578 .dst_addr_width
= data_type
,
581 struct dma_async_tx_descriptor
*rxdesc
;
582 struct dma_async_tx_descriptor
*txdesc
;
585 dummy_buf
= kzalloc(t
->len
, GFP_KERNEL
);
587 goto err_alloc_dummy_buf
;
589 dmaengine_slave_config(dspi
->dma_rx
, &dma_rx_conf
);
590 dmaengine_slave_config(dspi
->dma_tx
, &dma_tx_conf
);
592 sg_init_table(&sg_rx
, 1);
597 t
->rx_dma
= dma_map_single(&spi
->dev
, buf
,
598 t
->len
, DMA_FROM_DEVICE
);
603 sg_dma_address(&sg_rx
) = t
->rx_dma
;
604 sg_dma_len(&sg_rx
) = t
->len
;
606 sg_init_table(&sg_tx
, 1);
610 buf
= (void *)t
->tx_buf
;
611 t
->tx_dma
= dma_map_single(&spi
->dev
, buf
,
612 t
->len
, DMA_FROM_DEVICE
);
617 sg_dma_address(&sg_tx
) = t
->tx_dma
;
618 sg_dma_len(&sg_tx
) = t
->len
;
620 rxdesc
= dmaengine_prep_slave_sg(dspi
->dma_rx
,
621 &sg_rx
, 1, DMA_DEV_TO_MEM
,
622 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
626 txdesc
= dmaengine_prep_slave_sg(dspi
->dma_tx
,
627 &sg_tx
, 1, DMA_MEM_TO_DEV
,
628 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
632 rxdesc
->callback
= davinci_spi_dma_rx_callback
;
633 rxdesc
->callback_param
= (void *)dspi
;
634 txdesc
->callback
= davinci_spi_dma_tx_callback
;
635 txdesc
->callback_param
= (void *)dspi
;
637 if (pdata
->cshold_bug
)
638 iowrite16(spidat1
>> 16, dspi
->base
+ SPIDAT1
+ 2);
640 dmaengine_submit(rxdesc
);
641 dmaengine_submit(txdesc
);
643 dma_async_issue_pending(dspi
->dma_rx
);
644 dma_async_issue_pending(dspi
->dma_tx
);
646 set_io_bits(dspi
->base
+ SPIINT
, SPIINT_DMA_REQ_EN
);
649 /* Wait for the transfer to complete */
650 if (spicfg
->io_type
!= SPI_IO_TYPE_POLL
) {
651 wait_for_completion_interruptible(&(dspi
->done
));
653 while (dspi
->rcount
> 0 || dspi
->wcount
> 0) {
654 errors
= davinci_spi_process_events(dspi
);
661 clear_io_bits(dspi
->base
+ SPIINT
, SPIINT_MASKALL
);
662 if (spicfg
->io_type
== SPI_IO_TYPE_DMA
) {
663 clear_io_bits(dspi
->base
+ SPIINT
, SPIINT_DMA_REQ_EN
);
665 dma_unmap_single(&spi
->dev
, t
->rx_dma
,
666 t
->len
, DMA_FROM_DEVICE
);
667 dma_unmap_single(&spi
->dev
, t
->tx_dma
,
668 t
->len
, DMA_TO_DEVICE
);
672 clear_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_SPIENA_MASK
);
673 set_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_POWERDOWN_MASK
);
676 * Check for bit error, desync error,parity error,timeout error and
677 * receive overflow errors
680 ret
= davinci_spi_check_error(dspi
, errors
);
681 WARN(!ret
, "%s: error reported but no error found!\n",
682 dev_name(&spi
->dev
));
686 if (dspi
->rcount
!= 0 || dspi
->wcount
!= 0) {
687 dev_err(&spi
->dev
, "SPI data transfer error\n");
694 dma_unmap_single(&spi
->dev
, t
->tx_dma
, t
->len
, DMA_TO_DEVICE
);
696 dma_unmap_single(&spi
->dev
, t
->rx_dma
, t
->len
, DMA_FROM_DEVICE
);
704 * dummy_thread_fn - dummy thread function
705 * @irq: IRQ number for this SPI Master
706 * @context_data: structure for SPI Master controller davinci_spi
708 * This is to satisfy the request_threaded_irq() API so that the irq
709 * handler is called in interrupt context.
711 static irqreturn_t
dummy_thread_fn(s32 irq
, void *data
)
717 * davinci_spi_irq - Interrupt handler for SPI Master Controller
718 * @irq: IRQ number for this SPI Master
719 * @context_data: structure for SPI Master controller davinci_spi
721 * ISR will determine that interrupt arrives either for READ or WRITE command.
722 * According to command it will do the appropriate action. It will check
723 * transfer length and if it is not zero then dispatch transfer command again.
724 * If transfer length is zero then it will indicate the COMPLETION so that
725 * davinci_spi_bufs function can go ahead.
727 static irqreturn_t
davinci_spi_irq(s32 irq
, void *data
)
729 struct davinci_spi
*dspi
= data
;
732 status
= davinci_spi_process_events(dspi
);
733 if (unlikely(status
!= 0))
734 clear_io_bits(dspi
->base
+ SPIINT
, SPIINT_MASKINT
);
736 if ((!dspi
->rcount
&& !dspi
->wcount
) || status
)
737 complete(&dspi
->done
);
742 static int davinci_spi_request_dma(struct davinci_spi
*dspi
)
745 struct device
*sdev
= dspi
->bitbang
.master
->dev
.parent
;
749 dma_cap_set(DMA_SLAVE
, mask
);
751 dspi
->dma_rx
= dma_request_channel(mask
, edma_filter_fn
,
752 &dspi
->dma_rx_chnum
);
754 dev_err(sdev
, "request RX DMA channel failed\n");
759 dspi
->dma_tx
= dma_request_channel(mask
, edma_filter_fn
,
760 &dspi
->dma_tx_chnum
);
762 dev_err(sdev
, "request TX DMA channel failed\n");
770 dma_release_channel(dspi
->dma_rx
);
775 #if defined(CONFIG_OF)
776 static const struct of_device_id davinci_spi_of_match
[] = {
778 .compatible
= "ti,dm6441-spi",
781 .compatible
= "ti,da830-spi",
782 .data
= (void *)SPI_VERSION_2
,
786 MODULE_DEVICE_TABLE(of
, davinci_spi_of_match
);
789 * spi_davinci_get_pdata - Get platform data from DTS binding
790 * @pdev: ptr to platform data
791 * @dspi: ptr to driver data
793 * Parses and populates pdata in dspi from device tree bindings.
795 * NOTE: Not all platform data params are supported currently.
797 static int spi_davinci_get_pdata(struct platform_device
*pdev
,
798 struct davinci_spi
*dspi
)
800 struct device_node
*node
= pdev
->dev
.of_node
;
801 struct davinci_spi_platform_data
*pdata
;
802 unsigned int num_cs
, intr_line
= 0;
803 const struct of_device_id
*match
;
805 pdata
= &dspi
->pdata
;
807 pdata
->version
= SPI_VERSION_1
;
808 match
= of_match_device(of_match_ptr(davinci_spi_of_match
),
813 /* match data has the SPI version number for SPI_VERSION_2 */
814 if (match
->data
== (void *)SPI_VERSION_2
)
815 pdata
->version
= SPI_VERSION_2
;
818 * default num_cs is 1 and all chipsel are internal to the chip
819 * indicated by chip_sel being NULL. GPIO based CS is not
820 * supported yet in DT bindings.
823 of_property_read_u32(node
, "num-cs", &num_cs
);
824 pdata
->num_chipselect
= num_cs
;
825 of_property_read_u32(node
, "ti,davinci-spi-intr-line", &intr_line
);
826 pdata
->intr_line
= intr_line
;
830 #define davinci_spi_of_match NULL
831 static struct davinci_spi_platform_data
832 *spi_davinci_get_pdata(struct platform_device
*pdev
,
833 struct davinci_spi
*dspi
)
840 * davinci_spi_probe - probe function for SPI Master Controller
841 * @pdev: platform_device structure which contains plateform specific data
843 * According to Linux Device Model this function will be invoked by Linux
844 * with platform_device struct which contains the device specific info.
845 * This function will map the SPI controller's memory, register IRQ,
846 * Reset SPI controller and setting its registers to default value.
847 * It will invoke spi_bitbang_start to create work queue so that client driver
848 * can register transfer method to work queue.
850 static int davinci_spi_probe(struct platform_device
*pdev
)
852 struct spi_master
*master
;
853 struct davinci_spi
*dspi
;
854 struct davinci_spi_platform_data
*pdata
;
855 struct resource
*r
, *mem
;
856 resource_size_t dma_rx_chan
= SPI_NO_RESOURCE
;
857 resource_size_t dma_tx_chan
= SPI_NO_RESOURCE
;
861 master
= spi_alloc_master(&pdev
->dev
, sizeof(struct davinci_spi
));
862 if (master
== NULL
) {
867 platform_set_drvdata(pdev
, master
);
869 dspi
= spi_master_get_devdata(master
);
875 if (pdev
->dev
.platform_data
) {
876 pdata
= pdev
->dev
.platform_data
;
877 dspi
->pdata
= *pdata
;
879 /* update dspi pdata with that from the DT */
880 ret
= spi_davinci_get_pdata(pdev
, dspi
);
885 /* pdata in dspi is now updated and point pdata to that */
886 pdata
= &dspi
->pdata
;
888 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
894 dspi
->pbase
= r
->start
;
896 mem
= request_mem_region(r
->start
, resource_size(r
), pdev
->name
);
902 dspi
->base
= ioremap(r
->start
, resource_size(r
));
903 if (dspi
->base
== NULL
) {
908 dspi
->irq
= platform_get_irq(pdev
, 0);
909 if (dspi
->irq
<= 0) {
914 ret
= request_threaded_irq(dspi
->irq
, davinci_spi_irq
, dummy_thread_fn
,
915 0, dev_name(&pdev
->dev
), dspi
);
919 dspi
->bitbang
.master
= spi_master_get(master
);
920 if (dspi
->bitbang
.master
== NULL
) {
925 dspi
->clk
= clk_get(&pdev
->dev
, NULL
);
926 if (IS_ERR(dspi
->clk
)) {
930 clk_prepare_enable(dspi
->clk
);
932 master
->dev
.of_node
= pdev
->dev
.of_node
;
933 master
->bus_num
= pdev
->id
;
934 master
->num_chipselect
= pdata
->num_chipselect
;
935 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(2, 16);
936 master
->setup
= davinci_spi_setup
;
938 dspi
->bitbang
.chipselect
= davinci_spi_chipselect
;
939 dspi
->bitbang
.setup_transfer
= davinci_spi_setup_transfer
;
941 dspi
->version
= pdata
->version
;
943 dspi
->bitbang
.flags
= SPI_NO_CS
| SPI_LSB_FIRST
| SPI_LOOP
;
944 if (dspi
->version
== SPI_VERSION_2
)
945 dspi
->bitbang
.flags
|= SPI_READY
;
947 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
949 dma_rx_chan
= r
->start
;
950 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
952 dma_tx_chan
= r
->start
;
954 dspi
->bitbang
.txrx_bufs
= davinci_spi_bufs
;
955 if (dma_rx_chan
!= SPI_NO_RESOURCE
&&
956 dma_tx_chan
!= SPI_NO_RESOURCE
) {
957 dspi
->dma_rx_chnum
= dma_rx_chan
;
958 dspi
->dma_tx_chnum
= dma_tx_chan
;
960 ret
= davinci_spi_request_dma(dspi
);
964 dev_info(&pdev
->dev
, "DMA: supported\n");
965 dev_info(&pdev
->dev
, "DMA: RX channel: %d, TX channel: %d, "
966 "event queue: %d\n", dma_rx_chan
, dma_tx_chan
,
970 dspi
->get_rx
= davinci_spi_rx_buf_u8
;
971 dspi
->get_tx
= davinci_spi_tx_buf_u8
;
973 init_completion(&dspi
->done
);
975 /* Reset In/OUT SPI module */
976 iowrite32(0, dspi
->base
+ SPIGCR0
);
978 iowrite32(1, dspi
->base
+ SPIGCR0
);
980 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
981 spipc0
= SPIPC0_DIFUN_MASK
| SPIPC0_DOFUN_MASK
| SPIPC0_CLKFUN_MASK
;
982 iowrite32(spipc0
, dspi
->base
+ SPIPC0
);
984 /* initialize chip selects */
985 if (pdata
->chip_sel
) {
986 for (i
= 0; i
< pdata
->num_chipselect
; i
++) {
987 if (pdata
->chip_sel
[i
] != SPI_INTERN_CS
)
988 gpio_direction_output(pdata
->chip_sel
[i
], 1);
992 if (pdata
->intr_line
)
993 iowrite32(SPI_INTLVL_1
, dspi
->base
+ SPILVL
);
995 iowrite32(SPI_INTLVL_0
, dspi
->base
+ SPILVL
);
997 iowrite32(CS_DEFAULT
, dspi
->base
+ SPIDEF
);
999 /* master mode default */
1000 set_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_CLKMOD_MASK
);
1001 set_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_MASTER_MASK
);
1002 set_io_bits(dspi
->base
+ SPIGCR1
, SPIGCR1_POWERDOWN_MASK
);
1004 ret
= spi_bitbang_start(&dspi
->bitbang
);
1008 dev_info(&pdev
->dev
, "Controller at 0x%p\n", dspi
->base
);
1013 dma_release_channel(dspi
->dma_rx
);
1014 dma_release_channel(dspi
->dma_tx
);
1016 clk_disable_unprepare(dspi
->clk
);
1019 spi_master_put(master
);
1021 free_irq(dspi
->irq
, dspi
);
1023 iounmap(dspi
->base
);
1025 release_mem_region(dspi
->pbase
, resource_size(r
));
1033 * davinci_spi_remove - remove function for SPI Master Controller
1034 * @pdev: platform_device structure which contains plateform specific data
1036 * This function will do the reverse action of davinci_spi_probe function
1037 * It will free the IRQ and SPI controller's memory region.
1038 * It will also call spi_bitbang_stop to destroy the work queue which was
1039 * created by spi_bitbang_start.
1041 static int davinci_spi_remove(struct platform_device
*pdev
)
1043 struct davinci_spi
*dspi
;
1044 struct spi_master
*master
;
1047 master
= platform_get_drvdata(pdev
);
1048 dspi
= spi_master_get_devdata(master
);
1050 spi_bitbang_stop(&dspi
->bitbang
);
1052 clk_disable_unprepare(dspi
->clk
);
1054 spi_master_put(master
);
1055 free_irq(dspi
->irq
, dspi
);
1056 iounmap(dspi
->base
);
1057 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1058 release_mem_region(dspi
->pbase
, resource_size(r
));
1063 static struct platform_driver davinci_spi_driver
= {
1065 .name
= "spi_davinci",
1066 .owner
= THIS_MODULE
,
1067 .of_match_table
= davinci_spi_of_match
,
1069 .probe
= davinci_spi_probe
,
1070 .remove
= davinci_spi_remove
,
1072 module_platform_driver(davinci_spi_driver
);
1074 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1075 MODULE_LICENSE("GPL");