2 * Copyright (C) 2009 Texas Instruments.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/interrupt.h>
21 #include <linux/gpio.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/err.h>
26 #include <linux/clk.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spi/spi_bitbang.h>
30 #include <linux/slab.h>
33 #include <mach/edma.h>
35 #define SPI_NO_RESOURCE ((resource_size_t)-1)
37 #define SPI_MAX_CHIPSELECT 2
39 #define CS_DEFAULT 0xFF
41 #define SPIFMT_PHASE_MASK BIT(16)
42 #define SPIFMT_POLARITY_MASK BIT(17)
43 #define SPIFMT_DISTIMER_MASK BIT(18)
44 #define SPIFMT_SHIFTDIR_MASK BIT(20)
45 #define SPIFMT_WAITENA_MASK BIT(21)
46 #define SPIFMT_PARITYENA_MASK BIT(22)
47 #define SPIFMT_ODD_PARITY_MASK BIT(23)
48 #define SPIFMT_WDELAY_MASK 0x3f000000u
49 #define SPIFMT_WDELAY_SHIFT 24
50 #define SPIFMT_PRESCALE_SHIFT 8
54 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
55 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
56 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
57 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
59 #define SPIINT_MASKALL 0x0101035F
60 #define SPIINT_MASKINT 0x0000015F
61 #define SPI_INTLVL_1 0x000001FF
62 #define SPI_INTLVL_0 0x00000000
64 /* SPIDAT1 (upper 16 bit defines) */
65 #define SPIDAT1_CSHOLD_MASK BIT(12)
68 #define SPIGCR1_CLKMOD_MASK BIT(1)
69 #define SPIGCR1_MASTER_MASK BIT(0)
70 #define SPIGCR1_LOOPBACK_MASK BIT(16)
71 #define SPIGCR1_SPIENA_MASK BIT(24)
74 #define SPIBUF_TXFULL_MASK BIT(29)
75 #define SPIBUF_RXEMPTY_MASK BIT(31)
78 #define SPIDELAY_C2TDELAY_SHIFT 24
79 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
80 #define SPIDELAY_T2CDELAY_SHIFT 16
81 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
82 #define SPIDELAY_T2EDELAY_SHIFT 8
83 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
84 #define SPIDELAY_C2EDELAY_SHIFT 0
85 #define SPIDELAY_C2EDELAY_MASK 0xFF
88 #define SPIFLG_DLEN_ERR_MASK BIT(0)
89 #define SPIFLG_TIMEOUT_MASK BIT(1)
90 #define SPIFLG_PARERR_MASK BIT(2)
91 #define SPIFLG_DESYNC_MASK BIT(3)
92 #define SPIFLG_BITERR_MASK BIT(4)
93 #define SPIFLG_OVRRUN_MASK BIT(6)
94 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
95 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
96 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
97 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
100 #define SPIINT_DMA_REQ_EN BIT(16)
102 /* SPI Controller registers */
111 #define SPIDELAY 0x48
115 /* We have 2 DMA channels per CS, one for RX and one for TX */
116 struct davinci_spi_dma
{
121 enum dma_event_q eventq
;
123 struct completion dma_tx_completion
;
124 struct completion dma_rx_completion
;
127 /* SPI Controller driver's private data. */
129 struct spi_bitbang bitbang
;
133 resource_size_t pbase
;
137 struct completion done
;
141 #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
142 u8 rx_tmp_buf
[SPI_TMP_BUFSZ
];
145 struct davinci_spi_dma dma_channels
;
146 struct davinci_spi_platform_data
*pdata
;
148 void (*get_rx
)(u32 rx_data
, struct davinci_spi
*);
149 u32 (*get_tx
)(struct davinci_spi
*);
151 u8 bytes_per_word
[SPI_MAX_CHIPSELECT
];
154 static struct davinci_spi_config davinci_spi_default_cfg
;
156 static unsigned use_dma
;
158 static void davinci_spi_rx_buf_u8(u32 data
, struct davinci_spi
*davinci_spi
)
160 if (davinci_spi
->rx
) {
161 u8
*rx
= davinci_spi
->rx
;
163 davinci_spi
->rx
= rx
;
167 static void davinci_spi_rx_buf_u16(u32 data
, struct davinci_spi
*davinci_spi
)
169 if (davinci_spi
->rx
) {
170 u16
*rx
= davinci_spi
->rx
;
172 davinci_spi
->rx
= rx
;
176 static u32
davinci_spi_tx_buf_u8(struct davinci_spi
*davinci_spi
)
179 if (davinci_spi
->tx
) {
180 const u8
*tx
= davinci_spi
->tx
;
182 davinci_spi
->tx
= tx
;
187 static u32
davinci_spi_tx_buf_u16(struct davinci_spi
*davinci_spi
)
190 if (davinci_spi
->tx
) {
191 const u16
*tx
= davinci_spi
->tx
;
193 davinci_spi
->tx
= tx
;
198 static inline void set_io_bits(void __iomem
*addr
, u32 bits
)
200 u32 v
= ioread32(addr
);
206 static inline void clear_io_bits(void __iomem
*addr
, u32 bits
)
208 u32 v
= ioread32(addr
);
215 * Interface to control the chip select signal
217 static void davinci_spi_chipselect(struct spi_device
*spi
, int value
)
219 struct davinci_spi
*davinci_spi
;
220 struct davinci_spi_platform_data
*pdata
;
221 u8 chip_sel
= spi
->chip_select
;
222 u16 spidat1_cfg
= CS_DEFAULT
;
223 bool gpio_chipsel
= false;
225 davinci_spi
= spi_master_get_devdata(spi
->master
);
226 pdata
= davinci_spi
->pdata
;
228 if (pdata
->chip_sel
&& chip_sel
< pdata
->num_chipselect
&&
229 pdata
->chip_sel
[chip_sel
] != SPI_INTERN_CS
)
233 * Board specific chip select logic decides the polarity and cs
234 * line for the controller
237 if (value
== BITBANG_CS_ACTIVE
)
238 gpio_set_value(pdata
->chip_sel
[chip_sel
], 0);
240 gpio_set_value(pdata
->chip_sel
[chip_sel
], 1);
242 if (value
== BITBANG_CS_ACTIVE
) {
243 spidat1_cfg
|= SPIDAT1_CSHOLD_MASK
;
244 spidat1_cfg
&= ~(0x1 << chip_sel
);
247 iowrite16(spidat1_cfg
, davinci_spi
->base
+ SPIDAT1
+ 2);
252 * davinci_spi_get_prescale - Calculates the correct prescale value
253 * @maxspeed_hz: the maximum rate the SPI clock can run at
255 * This function calculates the prescale value that generates a clock rate
256 * less than or equal to the specified maximum.
258 * Returns: calculated prescale - 1 for easy programming into SPI registers
259 * or negative error number if valid prescalar cannot be updated.
261 static inline int davinci_spi_get_prescale(struct davinci_spi
*davinci_spi
,
266 ret
= DIV_ROUND_UP(clk_get_rate(davinci_spi
->clk
), max_speed_hz
);
268 if (ret
< 3 || ret
> 256)
275 * davinci_spi_setup_transfer - This functions will determine transfer method
276 * @spi: spi device on which data transfer to be done
277 * @t: spi transfer in which transfer info is filled
279 * This function determines data transfer method (8/16/32 bit transfer).
280 * It will also set the SPI Clock Control register according to
281 * SPI slave device freq.
283 static int davinci_spi_setup_transfer(struct spi_device
*spi
,
284 struct spi_transfer
*t
)
287 struct davinci_spi
*davinci_spi
;
288 struct davinci_spi_config
*spicfg
;
289 u8 bits_per_word
= 0;
290 u32 hz
= 0, spifmt
= 0, prescale
= 0;
292 davinci_spi
= spi_master_get_devdata(spi
->master
);
293 spicfg
= (struct davinci_spi_config
*)spi
->controller_data
;
295 spicfg
= &davinci_spi_default_cfg
;
298 bits_per_word
= t
->bits_per_word
;
302 /* if bits_per_word is not set then set it default */
304 bits_per_word
= spi
->bits_per_word
;
307 * Assign function pointer to appropriate transfer method
308 * 8bit, 16bit or 32bit transfer
310 if (bits_per_word
<= 8 && bits_per_word
>= 2) {
311 davinci_spi
->get_rx
= davinci_spi_rx_buf_u8
;
312 davinci_spi
->get_tx
= davinci_spi_tx_buf_u8
;
313 davinci_spi
->bytes_per_word
[spi
->chip_select
] = 1;
314 } else if (bits_per_word
<= 16 && bits_per_word
>= 2) {
315 davinci_spi
->get_rx
= davinci_spi_rx_buf_u16
;
316 davinci_spi
->get_tx
= davinci_spi_tx_buf_u16
;
317 davinci_spi
->bytes_per_word
[spi
->chip_select
] = 2;
322 hz
= spi
->max_speed_hz
;
324 /* Set up SPIFMTn register, unique to this chipselect. */
326 prescale
= davinci_spi_get_prescale(davinci_spi
, hz
);
330 spifmt
= (prescale
<< SPIFMT_PRESCALE_SHIFT
) | (bits_per_word
& 0x1f);
332 if (spi
->mode
& SPI_LSB_FIRST
)
333 spifmt
|= SPIFMT_SHIFTDIR_MASK
;
335 if (spi
->mode
& SPI_CPOL
)
336 spifmt
|= SPIFMT_POLARITY_MASK
;
338 if (!(spi
->mode
& SPI_CPHA
))
339 spifmt
|= SPIFMT_PHASE_MASK
;
342 * Version 1 hardware supports two basic SPI modes:
343 * - Standard SPI mode uses 4 pins, with chipselect
344 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
345 * (distinct from SPI_3WIRE, with just one data wire;
346 * or similar variants without MOSI or without MISO)
348 * Version 2 hardware supports an optional handshaking signal,
349 * so it can support two more modes:
350 * - 5 pin SPI variant is standard SPI plus SPI_READY
351 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
354 if (davinci_spi
->version
== SPI_VERSION_2
) {
358 spifmt
|= ((spicfg
->wdelay
<< SPIFMT_WDELAY_SHIFT
)
359 & SPIFMT_WDELAY_MASK
);
361 if (spicfg
->odd_parity
)
362 spifmt
|= SPIFMT_ODD_PARITY_MASK
;
364 if (spicfg
->parity_enable
)
365 spifmt
|= SPIFMT_PARITYENA_MASK
;
367 if (spicfg
->timer_disable
) {
368 spifmt
|= SPIFMT_DISTIMER_MASK
;
370 delay
|= (spicfg
->c2tdelay
<< SPIDELAY_C2TDELAY_SHIFT
)
371 & SPIDELAY_C2TDELAY_MASK
;
372 delay
|= (spicfg
->t2cdelay
<< SPIDELAY_T2CDELAY_SHIFT
)
373 & SPIDELAY_T2CDELAY_MASK
;
376 if (spi
->mode
& SPI_READY
) {
377 spifmt
|= SPIFMT_WAITENA_MASK
;
378 delay
|= (spicfg
->t2edelay
<< SPIDELAY_T2EDELAY_SHIFT
)
379 & SPIDELAY_T2EDELAY_MASK
;
380 delay
|= (spicfg
->c2edelay
<< SPIDELAY_C2EDELAY_SHIFT
)
381 & SPIDELAY_C2EDELAY_MASK
;
384 iowrite32(delay
, davinci_spi
->base
+ SPIDELAY
);
387 iowrite32(spifmt
, davinci_spi
->base
+ SPIFMT0
);
392 static void davinci_spi_dma_rx_callback(unsigned lch
, u16 ch_status
, void *data
)
394 struct spi_device
*spi
= (struct spi_device
*)data
;
395 struct davinci_spi
*davinci_spi
;
396 struct davinci_spi_dma
*davinci_spi_dma
;
398 davinci_spi
= spi_master_get_devdata(spi
->master
);
399 davinci_spi_dma
= &davinci_spi
->dma_channels
;
401 if (ch_status
== DMA_COMPLETE
)
402 edma_stop(davinci_spi_dma
->dma_rx_channel
);
404 edma_clean_channel(davinci_spi_dma
->dma_rx_channel
);
406 complete(&davinci_spi_dma
->dma_rx_completion
);
409 static void davinci_spi_dma_tx_callback(unsigned lch
, u16 ch_status
, void *data
)
411 struct spi_device
*spi
= (struct spi_device
*)data
;
412 struct davinci_spi
*davinci_spi
;
413 struct davinci_spi_dma
*davinci_spi_dma
;
415 davinci_spi
= spi_master_get_devdata(spi
->master
);
416 davinci_spi_dma
= &davinci_spi
->dma_channels
;
418 if (ch_status
== DMA_COMPLETE
)
419 edma_stop(davinci_spi_dma
->dma_tx_channel
);
421 edma_clean_channel(davinci_spi_dma
->dma_tx_channel
);
423 complete(&davinci_spi_dma
->dma_tx_completion
);
426 static int davinci_spi_request_dma(struct spi_device
*spi
)
428 struct davinci_spi
*davinci_spi
;
429 struct davinci_spi_dma
*davinci_spi_dma
;
433 davinci_spi
= spi_master_get_devdata(spi
->master
);
434 davinci_spi_dma
= &davinci_spi
->dma_channels
;
435 sdev
= davinci_spi
->bitbang
.master
->dev
.parent
;
437 r
= edma_alloc_channel(davinci_spi_dma
->dma_rx_sync_dev
,
438 davinci_spi_dma_rx_callback
, spi
,
439 davinci_spi_dma
->eventq
);
441 dev_dbg(sdev
, "Unable to request DMA channel for SPI RX\n");
444 davinci_spi_dma
->dma_rx_channel
= r
;
445 r
= edma_alloc_channel(davinci_spi_dma
->dma_tx_sync_dev
,
446 davinci_spi_dma_tx_callback
, spi
,
447 davinci_spi_dma
->eventq
);
449 edma_free_channel(davinci_spi_dma
->dma_rx_channel
);
450 davinci_spi_dma
->dma_rx_channel
= -1;
451 dev_dbg(sdev
, "Unable to request DMA channel for SPI TX\n");
454 davinci_spi_dma
->dma_tx_channel
= r
;
460 * davinci_spi_setup - This functions will set default transfer method
461 * @spi: spi device on which data transfer to be done
463 * This functions sets the default transfer method.
465 static int davinci_spi_setup(struct spi_device
*spi
)
468 struct davinci_spi
*davinci_spi
;
469 struct davinci_spi_dma
*davinci_spi_dma
;
470 struct davinci_spi_platform_data
*pdata
;
472 davinci_spi
= spi_master_get_devdata(spi
->master
);
473 pdata
= davinci_spi
->pdata
;
475 /* if bits per word length is zero then set it default 8 */
476 if (!spi
->bits_per_word
)
477 spi
->bits_per_word
= 8;
479 if (!(spi
->mode
& SPI_NO_CS
)) {
480 if ((pdata
->chip_sel
== NULL
) ||
481 (pdata
->chip_sel
[spi
->chip_select
] == SPI_INTERN_CS
))
482 set_io_bits(davinci_spi
->base
+ SPIPC0
,
483 1 << spi
->chip_select
);
487 if (spi
->mode
& SPI_READY
)
488 set_io_bits(davinci_spi
->base
+ SPIPC0
, SPIPC0_SPIENA_MASK
);
490 if (spi
->mode
& SPI_LOOP
)
491 set_io_bits(davinci_spi
->base
+ SPIGCR1
,
492 SPIGCR1_LOOPBACK_MASK
);
494 clear_io_bits(davinci_spi
->base
+ SPIGCR1
,
495 SPIGCR1_LOOPBACK_MASK
);
498 davinci_spi_dma
= &davinci_spi
->dma_channels
;
500 if ((davinci_spi_dma
->dma_rx_channel
== -1) ||
501 (davinci_spi_dma
->dma_tx_channel
== -1))
502 retval
= davinci_spi_request_dma(spi
);
508 static void davinci_spi_cleanup(struct spi_device
*spi
)
511 struct davinci_spi
*davinci_spi
=
512 spi_master_get_devdata(spi
->master
);
513 struct davinci_spi_dma
*davinci_spi_dma
=
514 &davinci_spi
->dma_channels
;
516 if ((davinci_spi_dma
->dma_rx_channel
!= -1)
517 && (davinci_spi_dma
->dma_tx_channel
!= -1)) {
518 edma_free_channel(davinci_spi_dma
->dma_tx_channel
);
519 edma_free_channel(davinci_spi_dma
->dma_rx_channel
);
524 static int davinci_spi_check_error(struct davinci_spi
*davinci_spi
,
527 struct device
*sdev
= davinci_spi
->bitbang
.master
->dev
.parent
;
529 if (int_status
& SPIFLG_TIMEOUT_MASK
) {
530 dev_dbg(sdev
, "SPI Time-out Error\n");
533 if (int_status
& SPIFLG_DESYNC_MASK
) {
534 dev_dbg(sdev
, "SPI Desynchronization Error\n");
537 if (int_status
& SPIFLG_BITERR_MASK
) {
538 dev_dbg(sdev
, "SPI Bit error\n");
542 if (davinci_spi
->version
== SPI_VERSION_2
) {
543 if (int_status
& SPIFLG_DLEN_ERR_MASK
) {
544 dev_dbg(sdev
, "SPI Data Length Error\n");
547 if (int_status
& SPIFLG_PARERR_MASK
) {
548 dev_dbg(sdev
, "SPI Parity Error\n");
551 if (int_status
& SPIFLG_OVRRUN_MASK
) {
552 dev_dbg(sdev
, "SPI Data Overrun error\n");
555 if (int_status
& SPIFLG_BUF_INIT_ACTIVE_MASK
) {
556 dev_dbg(sdev
, "SPI Buffer Init Active\n");
565 * davinci_spi_process_events - check for and handle any SPI controller events
566 * @davinci_spi: the controller data
568 * This function will check the SPIFLG register and handle any events that are
571 static int davinci_spi_process_events(struct davinci_spi
*davinci_spi
)
573 u32 buf
, status
, errors
= 0, data1_reg_val
;
575 buf
= ioread32(davinci_spi
->base
+ SPIBUF
);
577 if (davinci_spi
->rcount
> 0 && !(buf
& SPIBUF_RXEMPTY_MASK
)) {
578 davinci_spi
->get_rx(buf
& 0xFFFF, davinci_spi
);
579 davinci_spi
->rcount
--;
582 status
= ioread32(davinci_spi
->base
+ SPIFLG
);
584 if (unlikely(status
& SPIFLG_ERROR_MASK
)) {
585 errors
= status
& SPIFLG_ERROR_MASK
;
589 if (davinci_spi
->wcount
> 0 && !(buf
& SPIBUF_TXFULL_MASK
)) {
590 data1_reg_val
= ioread32(davinci_spi
->base
+ SPIDAT1
);
591 davinci_spi
->wcount
--;
592 data1_reg_val
&= ~0xFFFF;
593 data1_reg_val
|= 0xFFFF & davinci_spi
->get_tx(davinci_spi
);
594 iowrite32(data1_reg_val
, davinci_spi
->base
+ SPIDAT1
);
602 * davinci_spi_bufs - functions which will handle transfer data
603 * @spi: spi device on which data transfer to be done
604 * @t: spi transfer in which transfer info is filled
606 * This function will put data to be transferred into data register
607 * of SPI controller and then wait until the completion will be marked
608 * by the IRQ Handler.
610 static int davinci_spi_bufs_pio(struct spi_device
*spi
, struct spi_transfer
*t
)
612 struct davinci_spi
*davinci_spi
;
614 u32 tx_data
, data1_reg_val
;
616 struct davinci_spi_config
*spicfg
;
617 struct davinci_spi_platform_data
*pdata
;
619 davinci_spi
= spi_master_get_devdata(spi
->master
);
620 pdata
= davinci_spi
->pdata
;
621 spicfg
= (struct davinci_spi_config
*)spi
->controller_data
;
623 spicfg
= &davinci_spi_default_cfg
;
625 davinci_spi
->tx
= t
->tx_buf
;
626 davinci_spi
->rx
= t
->rx_buf
;
627 davinci_spi
->wcount
= t
->len
/
628 davinci_spi
->bytes_per_word
[spi
->chip_select
];
629 davinci_spi
->rcount
= davinci_spi
->wcount
;
631 data1_reg_val
= ioread32(davinci_spi
->base
+ SPIDAT1
);
634 set_io_bits(davinci_spi
->base
+ SPIGCR1
, SPIGCR1_SPIENA_MASK
);
636 if (spicfg
->io_type
== SPI_IO_TYPE_INTR
) {
637 set_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_MASKINT
);
638 INIT_COMPLETION(davinci_spi
->done
);
641 /* start the transfer */
642 davinci_spi
->wcount
--;
643 tx_data
= davinci_spi
->get_tx(davinci_spi
);
644 data1_reg_val
&= 0xFFFF0000;
645 data1_reg_val
|= tx_data
& 0xFFFF;
646 iowrite32(data1_reg_val
, davinci_spi
->base
+ SPIDAT1
);
648 /* Wait for the transfer to complete */
649 if (spicfg
->io_type
== SPI_IO_TYPE_INTR
) {
650 wait_for_completion_interruptible(&(davinci_spi
->done
));
652 while (davinci_spi
->rcount
> 0 || davinci_spi
->wcount
> 0) {
653 errors
= davinci_spi_process_events(davinci_spi
);
660 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_MASKALL
);
663 * Check for bit error, desync error,parity error,timeout error and
664 * receive overflow errors
667 ret
= davinci_spi_check_error(davinci_spi
, errors
);
668 WARN(!ret
, "%s: error reported but no error found!\n",
669 dev_name(&spi
->dev
));
677 * davinci_spi_irq - Interrupt handler for SPI Master Controller
678 * @irq: IRQ number for this SPI Master
679 * @context_data: structure for SPI Master controller davinci_spi
681 * ISR will determine that interrupt arrives either for READ or WRITE command.
682 * According to command it will do the appropriate action. It will check
683 * transfer length and if it is not zero then dispatch transfer command again.
684 * If transfer length is zero then it will indicate the COMPLETION so that
685 * davinci_spi_bufs function can go ahead.
687 static irqreturn_t
davinci_spi_irq(s32 irq
, void *context_data
)
689 struct davinci_spi
*davinci_spi
= context_data
;
692 status
= davinci_spi_process_events(davinci_spi
);
693 if (unlikely(status
!= 0))
694 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_MASKINT
);
696 if ((!davinci_spi
->rcount
&& !davinci_spi
->wcount
) || status
)
697 complete(&davinci_spi
->done
);
702 static int davinci_spi_bufs_dma(struct spi_device
*spi
, struct spi_transfer
*t
)
704 struct davinci_spi
*davinci_spi
;
707 unsigned rx_buf_count
;
708 struct davinci_spi_dma
*davinci_spi_dma
;
710 unsigned long tx_reg
, rx_reg
;
711 struct davinci_spi_platform_data
*pdata
;
714 struct edmacc_param param
;
716 davinci_spi
= spi_master_get_devdata(spi
->master
);
717 pdata
= davinci_spi
->pdata
;
718 sdev
= davinci_spi
->bitbang
.master
->dev
.parent
;
720 davinci_spi_dma
= &davinci_spi
->dma_channels
;
722 tx_reg
= (unsigned long)davinci_spi
->pbase
+ SPIDAT1
;
723 rx_reg
= (unsigned long)davinci_spi
->pbase
+ SPIBUF
;
725 davinci_spi
->tx
= t
->tx_buf
;
726 davinci_spi
->rx
= t
->rx_buf
;
728 /* convert len to words based on bits_per_word */
729 data_type
= davinci_spi
->bytes_per_word
[spi
->chip_select
];
731 init_completion(&davinci_spi_dma
->dma_rx_completion
);
732 init_completion(&davinci_spi_dma
->dma_tx_completion
);
734 count
= t
->len
/ data_type
; /* the number of elements */
736 /* disable all interrupts for dma transfers */
737 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_MASKALL
);
739 set_io_bits(davinci_spi
->base
+ SPIGCR1
, SPIGCR1_SPIENA_MASK
);
744 * If there is transmit data, map the transmit buffer, set it as the
745 * source of data and set the source B index to data size.
746 * If there is no transmit data, set the transmit register as the
747 * source of data, and set the source B index to zero.
749 * The destination is always the transmit register itself. And the
750 * destination never increments.
754 t
->tx_dma
= dma_map_single(&spi
->dev
, (void *)t
->tx_buf
, count
,
756 if (dma_mapping_error(&spi
->dev
, t
->tx_dma
)) {
757 dev_dbg(sdev
, "Unable to DMA map a %d bytes"
758 " TX buffer\n", count
);
763 param
.opt
= TCINTEN
| EDMA_TCC(davinci_spi_dma
->dma_tx_channel
);
764 param
.src
= t
->tx_buf
? t
->tx_dma
: tx_reg
;
765 param
.a_b_cnt
= count
<< 16 | data_type
;
767 param
.src_dst_bidx
= t
->tx_buf
? data_type
: 0;
768 param
.link_bcntrld
= 0xffff;
769 param
.src_dst_cidx
= 0;
771 edma_write_slot(davinci_spi_dma
->dma_tx_channel
, ¶m
);
776 * If there is receive buffer, use it to receive data. If there
777 * is none provided, use a temporary receive buffer. Set the
778 * destination B index to 0 so effectively only one byte is used
779 * in the temporary buffer (address does not increment).
781 * The source of receive data is the receive data register. The
782 * source address never increments.
787 rx_buf_count
= count
;
789 rx_buf
= davinci_spi
->rx_tmp_buf
;
790 rx_buf_count
= sizeof(davinci_spi
->rx_tmp_buf
);
793 t
->rx_dma
= dma_map_single(&spi
->dev
, rx_buf
, rx_buf_count
,
795 if (dma_mapping_error(&spi
->dev
, t
->rx_dma
)) {
796 dev_dbg(sdev
, "Couldn't DMA map a %d bytes RX buffer\n",
799 dma_unmap_single(NULL
, t
->tx_dma
, count
, DMA_TO_DEVICE
);
803 param
.opt
= TCINTEN
| EDMA_TCC(davinci_spi_dma
->dma_rx_channel
);
805 param
.a_b_cnt
= count
<< 16 | data_type
;
806 param
.dst
= t
->rx_dma
;
807 param
.src_dst_bidx
= (t
->rx_buf
? data_type
: 0) << 16;
808 param
.link_bcntrld
= 0xffff;
809 param
.src_dst_cidx
= 0;
811 edma_write_slot(davinci_spi_dma
->dma_rx_channel
, ¶m
);
813 if (pdata
->cshold_bug
) {
814 u16 spidat1
= ioread16(davinci_spi
->base
+ SPIDAT1
+ 2);
815 iowrite16(spidat1
, davinci_spi
->base
+ SPIDAT1
+ 2);
818 edma_start(davinci_spi_dma
->dma_rx_channel
);
819 edma_start(davinci_spi_dma
->dma_tx_channel
);
820 set_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_DMA_REQ_EN
);
822 wait_for_completion_interruptible(&davinci_spi_dma
->dma_tx_completion
);
823 wait_for_completion_interruptible(&davinci_spi_dma
->dma_rx_completion
);
826 dma_unmap_single(NULL
, t
->tx_dma
, count
, DMA_TO_DEVICE
);
828 dma_unmap_single(NULL
, t
->rx_dma
, rx_buf_count
, DMA_FROM_DEVICE
);
830 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_DMA_REQ_EN
);
833 * Check for bit error, desync error,parity error,timeout error and
834 * receive overflow errors
836 int_status
= ioread32(davinci_spi
->base
+ SPIFLG
);
838 ret
= davinci_spi_check_error(davinci_spi
, int_status
);
846 * davinci_spi_probe - probe function for SPI Master Controller
847 * @pdev: platform_device structure which contains plateform specific data
849 static int davinci_spi_probe(struct platform_device
*pdev
)
851 struct spi_master
*master
;
852 struct davinci_spi
*davinci_spi
;
853 struct davinci_spi_platform_data
*pdata
;
854 struct resource
*r
, *mem
;
855 resource_size_t dma_rx_chan
= SPI_NO_RESOURCE
;
856 resource_size_t dma_tx_chan
= SPI_NO_RESOURCE
;
857 resource_size_t dma_eventq
= SPI_NO_RESOURCE
;
861 pdata
= pdev
->dev
.platform_data
;
867 master
= spi_alloc_master(&pdev
->dev
, sizeof(struct davinci_spi
));
868 if (master
== NULL
) {
873 dev_set_drvdata(&pdev
->dev
, master
);
875 davinci_spi
= spi_master_get_devdata(master
);
876 if (davinci_spi
== NULL
) {
881 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
887 davinci_spi
->pbase
= r
->start
;
888 davinci_spi
->region_size
= resource_size(r
);
889 davinci_spi
->pdata
= pdata
;
891 mem
= request_mem_region(r
->start
, davinci_spi
->region_size
,
898 davinci_spi
->base
= ioremap(r
->start
, davinci_spi
->region_size
);
899 if (davinci_spi
->base
== NULL
) {
904 davinci_spi
->irq
= platform_get_irq(pdev
, 0);
905 if (davinci_spi
->irq
<= 0) {
910 ret
= request_irq(davinci_spi
->irq
, davinci_spi_irq
, 0,
911 dev_name(&pdev
->dev
), davinci_spi
);
915 davinci_spi
->bitbang
.master
= spi_master_get(master
);
916 if (davinci_spi
->bitbang
.master
== NULL
) {
921 davinci_spi
->clk
= clk_get(&pdev
->dev
, NULL
);
922 if (IS_ERR(davinci_spi
->clk
)) {
926 clk_enable(davinci_spi
->clk
);
928 master
->bus_num
= pdev
->id
;
929 master
->num_chipselect
= pdata
->num_chipselect
;
930 master
->setup
= davinci_spi_setup
;
931 master
->cleanup
= davinci_spi_cleanup
;
933 davinci_spi
->bitbang
.chipselect
= davinci_spi_chipselect
;
934 davinci_spi
->bitbang
.setup_transfer
= davinci_spi_setup_transfer
;
936 davinci_spi
->version
= pdata
->version
;
937 use_dma
= pdata
->use_dma
;
939 davinci_spi
->bitbang
.flags
= SPI_NO_CS
| SPI_LSB_FIRST
| SPI_LOOP
;
940 if (davinci_spi
->version
== SPI_VERSION_2
)
941 davinci_spi
->bitbang
.flags
|= SPI_READY
;
944 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
946 dma_rx_chan
= r
->start
;
947 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
949 dma_tx_chan
= r
->start
;
950 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 2);
952 dma_eventq
= r
->start
;
956 dma_rx_chan
== SPI_NO_RESOURCE
||
957 dma_tx_chan
== SPI_NO_RESOURCE
||
958 dma_eventq
== SPI_NO_RESOURCE
) {
959 davinci_spi
->bitbang
.txrx_bufs
= davinci_spi_bufs_pio
;
962 davinci_spi
->bitbang
.txrx_bufs
= davinci_spi_bufs_dma
;
964 davinci_spi
->dma_channels
.dma_rx_channel
= -1;
965 davinci_spi
->dma_channels
.dma_rx_sync_dev
= dma_rx_chan
;
966 davinci_spi
->dma_channels
.dma_tx_channel
= -1;
967 davinci_spi
->dma_channels
.dma_tx_sync_dev
= dma_tx_chan
;
968 davinci_spi
->dma_channels
.eventq
= dma_eventq
;
970 dev_info(&pdev
->dev
, "DaVinci SPI driver in EDMA mode\n"
971 "Using RX channel = %d , TX channel = %d and "
972 "event queue = %d", dma_rx_chan
, dma_tx_chan
,
976 davinci_spi
->get_rx
= davinci_spi_rx_buf_u8
;
977 davinci_spi
->get_tx
= davinci_spi_tx_buf_u8
;
979 init_completion(&davinci_spi
->done
);
981 /* Reset In/OUT SPI module */
982 iowrite32(0, davinci_spi
->base
+ SPIGCR0
);
984 iowrite32(1, davinci_spi
->base
+ SPIGCR0
);
986 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
987 spipc0
= SPIPC0_DIFUN_MASK
| SPIPC0_DOFUN_MASK
| SPIPC0_CLKFUN_MASK
;
988 iowrite32(spipc0
, davinci_spi
->base
+ SPIPC0
);
990 /* initialize chip selects */
991 if (pdata
->chip_sel
) {
992 for (i
= 0; i
< pdata
->num_chipselect
; i
++) {
993 if (pdata
->chip_sel
[i
] != SPI_INTERN_CS
)
994 gpio_direction_output(pdata
->chip_sel
[i
], 1);
999 if (davinci_spi
->pdata
->clk_internal
)
1000 set_io_bits(davinci_spi
->base
+ SPIGCR1
,
1001 SPIGCR1_CLKMOD_MASK
);
1003 clear_io_bits(davinci_spi
->base
+ SPIGCR1
,
1004 SPIGCR1_CLKMOD_MASK
);
1006 if (pdata
->intr_line
)
1007 iowrite32(SPI_INTLVL_1
, davinci_spi
->base
+ SPILVL
);
1009 iowrite32(SPI_INTLVL_0
, davinci_spi
->base
+ SPILVL
);
1011 iowrite32(CS_DEFAULT
, davinci_spi
->base
+ SPIDEF
);
1013 /* master mode default */
1014 set_io_bits(davinci_spi
->base
+ SPIGCR1
, SPIGCR1_MASTER_MASK
);
1016 ret
= spi_bitbang_start(&davinci_spi
->bitbang
);
1020 dev_info(&pdev
->dev
, "Controller at 0x%p\n", davinci_spi
->base
);
1025 clk_disable(davinci_spi
->clk
);
1026 clk_put(davinci_spi
->clk
);
1028 spi_master_put(master
);
1030 free_irq(davinci_spi
->irq
, davinci_spi
);
1032 iounmap(davinci_spi
->base
);
1034 release_mem_region(davinci_spi
->pbase
, davinci_spi
->region_size
);
1042 * davinci_spi_remove - remove function for SPI Master Controller
1043 * @pdev: platform_device structure which contains plateform specific data
1045 * This function will do the reverse action of davinci_spi_probe function
1046 * It will free the IRQ and SPI controller's memory region.
1047 * It will also call spi_bitbang_stop to destroy the work queue which was
1048 * created by spi_bitbang_start.
1050 static int __exit
davinci_spi_remove(struct platform_device
*pdev
)
1052 struct davinci_spi
*davinci_spi
;
1053 struct spi_master
*master
;
1055 master
= dev_get_drvdata(&pdev
->dev
);
1056 davinci_spi
= spi_master_get_devdata(master
);
1058 spi_bitbang_stop(&davinci_spi
->bitbang
);
1060 clk_disable(davinci_spi
->clk
);
1061 clk_put(davinci_spi
->clk
);
1062 spi_master_put(master
);
1063 free_irq(davinci_spi
->irq
, davinci_spi
);
1064 iounmap(davinci_spi
->base
);
1065 release_mem_region(davinci_spi
->pbase
, davinci_spi
->region_size
);
1070 static struct platform_driver davinci_spi_driver
= {
1071 .driver
.name
= "spi_davinci",
1072 .remove
= __exit_p(davinci_spi_remove
),
1075 static int __init
davinci_spi_init(void)
1077 return platform_driver_probe(&davinci_spi_driver
, davinci_spi_probe
);
1079 module_init(davinci_spi_init
);
1081 static void __exit
davinci_spi_exit(void)
1083 platform_driver_unregister(&davinci_spi_driver
);
1085 module_exit(davinci_spi_exit
);
1087 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1088 MODULE_LICENSE("GPL");