2 * Copyright (C) 2009 Texas Instruments.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/interrupt.h>
21 #include <linux/gpio.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/err.h>
26 #include <linux/clk.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spi/spi_bitbang.h>
30 #include <linux/slab.h>
33 #include <mach/edma.h>
35 #define SPI_NO_RESOURCE ((resource_size_t)-1)
37 #define SPI_MAX_CHIPSELECT 2
39 #define CS_DEFAULT 0xFF
41 #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
43 #define SPIFMT_PHASE_MASK BIT(16)
44 #define SPIFMT_POLARITY_MASK BIT(17)
45 #define SPIFMT_DISTIMER_MASK BIT(18)
46 #define SPIFMT_SHIFTDIR_MASK BIT(20)
47 #define SPIFMT_WAITENA_MASK BIT(21)
48 #define SPIFMT_PARITYENA_MASK BIT(22)
49 #define SPIFMT_ODD_PARITY_MASK BIT(23)
50 #define SPIFMT_WDELAY_MASK 0x3f000000u
51 #define SPIFMT_WDELAY_SHIFT 24
52 #define SPIFMT_PRESCALE_SHIFT 8
56 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
57 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
58 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
59 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
61 #define SPIINT_MASKALL 0x0101035F
63 /* SPIDAT1 (upper 16 bit defines) */
64 #define SPIDAT1_CSHOLD_MASK BIT(12)
67 #define SPIGCR1_CLKMOD_MASK BIT(1)
68 #define SPIGCR1_MASTER_MASK BIT(0)
69 #define SPIGCR1_LOOPBACK_MASK BIT(16)
70 #define SPIGCR1_SPIENA_MASK BIT(24)
73 #define SPIBUF_TXFULL_MASK BIT(29)
74 #define SPIBUF_RXEMPTY_MASK BIT(31)
77 #define SPIDELAY_C2TDELAY_SHIFT 24
78 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
79 #define SPIDELAY_T2CDELAY_SHIFT 16
80 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
81 #define SPIDELAY_T2EDELAY_SHIFT 8
82 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
83 #define SPIDELAY_C2EDELAY_SHIFT 0
84 #define SPIDELAY_C2EDELAY_MASK 0xFF
87 #define SPIFLG_DLEN_ERR_MASK BIT(0)
88 #define SPIFLG_TIMEOUT_MASK BIT(1)
89 #define SPIFLG_PARERR_MASK BIT(2)
90 #define SPIFLG_DESYNC_MASK BIT(3)
91 #define SPIFLG_BITERR_MASK BIT(4)
92 #define SPIFLG_OVRRUN_MASK BIT(6)
93 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
95 #define SPIINT_DMA_REQ_EN BIT(16)
97 /* SPI Controller registers */
106 #define SPIDELAY 0x48
110 /* We have 2 DMA channels per CS, one for RX and one for TX */
111 struct davinci_spi_dma
{
116 enum dma_event_q eventq
;
118 struct completion dma_tx_completion
;
119 struct completion dma_rx_completion
;
122 /* SPI Controller driver's private data. */
124 struct spi_bitbang bitbang
;
128 resource_size_t pbase
;
135 struct davinci_spi_dma
*dma_channels
;
136 struct davinci_spi_platform_data
*pdata
;
138 void (*get_rx
)(u32 rx_data
, struct davinci_spi
*);
139 u32 (*get_tx
)(struct davinci_spi
*);
141 u8 bytes_per_word
[SPI_MAX_CHIPSELECT
];
144 static struct davinci_spi_config davinci_spi_default_cfg
;
146 static unsigned use_dma
;
148 static void davinci_spi_rx_buf_u8(u32 data
, struct davinci_spi
*davinci_spi
)
150 if (davinci_spi
->rx
) {
151 u8
*rx
= davinci_spi
->rx
;
153 davinci_spi
->rx
= rx
;
157 static void davinci_spi_rx_buf_u16(u32 data
, struct davinci_spi
*davinci_spi
)
159 if (davinci_spi
->rx
) {
160 u16
*rx
= davinci_spi
->rx
;
162 davinci_spi
->rx
= rx
;
166 static u32
davinci_spi_tx_buf_u8(struct davinci_spi
*davinci_spi
)
169 if (davinci_spi
->tx
) {
170 const u8
*tx
= davinci_spi
->tx
;
172 davinci_spi
->tx
= tx
;
177 static u32
davinci_spi_tx_buf_u16(struct davinci_spi
*davinci_spi
)
180 if (davinci_spi
->tx
) {
181 const u16
*tx
= davinci_spi
->tx
;
183 davinci_spi
->tx
= tx
;
188 static inline void set_io_bits(void __iomem
*addr
, u32 bits
)
190 u32 v
= ioread32(addr
);
196 static inline void clear_io_bits(void __iomem
*addr
, u32 bits
)
198 u32 v
= ioread32(addr
);
204 static void davinci_spi_set_dma_req(const struct spi_device
*spi
, int enable
)
206 struct davinci_spi
*davinci_spi
= spi_master_get_devdata(spi
->master
);
209 set_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_DMA_REQ_EN
);
211 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_DMA_REQ_EN
);
215 * Interface to control the chip select signal
217 static void davinci_spi_chipselect(struct spi_device
*spi
, int value
)
219 struct davinci_spi
*davinci_spi
;
220 struct davinci_spi_platform_data
*pdata
;
221 u8 chip_sel
= spi
->chip_select
;
222 u16 spidat1_cfg
= CS_DEFAULT
;
223 bool gpio_chipsel
= false;
225 davinci_spi
= spi_master_get_devdata(spi
->master
);
226 pdata
= davinci_spi
->pdata
;
228 if (pdata
->chip_sel
&& chip_sel
< pdata
->num_chipselect
&&
229 pdata
->chip_sel
[chip_sel
] != SPI_INTERN_CS
)
233 * Board specific chip select logic decides the polarity and cs
234 * line for the controller
237 if (value
== BITBANG_CS_ACTIVE
)
238 gpio_set_value(pdata
->chip_sel
[chip_sel
], 0);
240 gpio_set_value(pdata
->chip_sel
[chip_sel
], 1);
242 if (value
== BITBANG_CS_ACTIVE
) {
243 spidat1_cfg
|= SPIDAT1_CSHOLD_MASK
;
244 spidat1_cfg
&= ~(0x1 << chip_sel
);
247 iowrite16(spidat1_cfg
, davinci_spi
->base
+ SPIDAT1
+ 2);
252 * davinci_spi_get_prescale - Calculates the correct prescale value
253 * @maxspeed_hz: the maximum rate the SPI clock can run at
255 * This function calculates the prescale value that generates a clock rate
256 * less than or equal to the specified maximum.
258 * Returns: calculated prescale - 1 for easy programming into SPI registers
259 * or negative error number if valid prescalar cannot be updated.
261 static inline int davinci_spi_get_prescale(struct davinci_spi
*davinci_spi
,
266 ret
= DIV_ROUND_UP(clk_get_rate(davinci_spi
->clk
), max_speed_hz
);
268 if (ret
< 3 || ret
> 256)
275 * davinci_spi_setup_transfer - This functions will determine transfer method
276 * @spi: spi device on which data transfer to be done
277 * @t: spi transfer in which transfer info is filled
279 * This function determines data transfer method (8/16/32 bit transfer).
280 * It will also set the SPI Clock Control register according to
281 * SPI slave device freq.
283 static int davinci_spi_setup_transfer(struct spi_device
*spi
,
284 struct spi_transfer
*t
)
287 struct davinci_spi
*davinci_spi
;
288 struct davinci_spi_config
*spicfg
;
289 u8 bits_per_word
= 0;
290 u32 hz
= 0, spifmt
= 0, prescale
= 0;
292 davinci_spi
= spi_master_get_devdata(spi
->master
);
293 spicfg
= (struct davinci_spi_config
*)spi
->controller_data
;
295 spicfg
= &davinci_spi_default_cfg
;
298 bits_per_word
= t
->bits_per_word
;
302 /* if bits_per_word is not set then set it default */
304 bits_per_word
= spi
->bits_per_word
;
307 * Assign function pointer to appropriate transfer method
308 * 8bit, 16bit or 32bit transfer
310 if (bits_per_word
<= 8 && bits_per_word
>= 2) {
311 davinci_spi
->get_rx
= davinci_spi_rx_buf_u8
;
312 davinci_spi
->get_tx
= davinci_spi_tx_buf_u8
;
313 davinci_spi
->bytes_per_word
[spi
->chip_select
] = 1;
314 } else if (bits_per_word
<= 16 && bits_per_word
>= 2) {
315 davinci_spi
->get_rx
= davinci_spi_rx_buf_u16
;
316 davinci_spi
->get_tx
= davinci_spi_tx_buf_u16
;
317 davinci_spi
->bytes_per_word
[spi
->chip_select
] = 2;
322 hz
= spi
->max_speed_hz
;
324 /* Set up SPIFMTn register, unique to this chipselect. */
326 prescale
= davinci_spi_get_prescale(davinci_spi
, hz
);
330 spifmt
= (prescale
<< SPIFMT_PRESCALE_SHIFT
) | (bits_per_word
& 0x1f);
332 if (spi
->mode
& SPI_LSB_FIRST
)
333 spifmt
|= SPIFMT_SHIFTDIR_MASK
;
335 if (spi
->mode
& SPI_CPOL
)
336 spifmt
|= SPIFMT_POLARITY_MASK
;
338 if (!(spi
->mode
& SPI_CPHA
))
339 spifmt
|= SPIFMT_PHASE_MASK
;
342 * Version 1 hardware supports two basic SPI modes:
343 * - Standard SPI mode uses 4 pins, with chipselect
344 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
345 * (distinct from SPI_3WIRE, with just one data wire;
346 * or similar variants without MOSI or without MISO)
348 * Version 2 hardware supports an optional handshaking signal,
349 * so it can support two more modes:
350 * - 5 pin SPI variant is standard SPI plus SPI_READY
351 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
354 if (davinci_spi
->version
== SPI_VERSION_2
) {
358 spifmt
|= ((spicfg
->wdelay
<< SPIFMT_WDELAY_SHIFT
)
359 & SPIFMT_WDELAY_MASK
);
361 if (spicfg
->odd_parity
)
362 spifmt
|= SPIFMT_ODD_PARITY_MASK
;
364 if (spicfg
->parity_enable
)
365 spifmt
|= SPIFMT_PARITYENA_MASK
;
367 if (spicfg
->timer_disable
) {
368 spifmt
|= SPIFMT_DISTIMER_MASK
;
370 delay
|= (spicfg
->c2tdelay
<< SPIDELAY_C2TDELAY_SHIFT
)
371 & SPIDELAY_C2TDELAY_MASK
;
372 delay
|= (spicfg
->t2cdelay
<< SPIDELAY_T2CDELAY_SHIFT
)
373 & SPIDELAY_T2CDELAY_MASK
;
376 if (spi
->mode
& SPI_READY
) {
377 spifmt
|= SPIFMT_WAITENA_MASK
;
378 delay
|= (spicfg
->t2edelay
<< SPIDELAY_T2EDELAY_SHIFT
)
379 & SPIDELAY_T2EDELAY_MASK
;
380 delay
|= (spicfg
->c2edelay
<< SPIDELAY_C2EDELAY_SHIFT
)
381 & SPIDELAY_C2EDELAY_MASK
;
384 iowrite32(delay
, davinci_spi
->base
+ SPIDELAY
);
387 iowrite32(spifmt
, davinci_spi
->base
+ SPIFMT0
);
392 static void davinci_spi_dma_rx_callback(unsigned lch
, u16 ch_status
, void *data
)
394 struct spi_device
*spi
= (struct spi_device
*)data
;
395 struct davinci_spi
*davinci_spi
;
396 struct davinci_spi_dma
*davinci_spi_dma
;
398 davinci_spi
= spi_master_get_devdata(spi
->master
);
399 davinci_spi_dma
= &(davinci_spi
->dma_channels
[spi
->chip_select
]);
401 if (ch_status
== DMA_COMPLETE
)
402 edma_stop(davinci_spi_dma
->dma_rx_channel
);
404 edma_clean_channel(davinci_spi_dma
->dma_rx_channel
);
406 complete(&davinci_spi_dma
->dma_rx_completion
);
407 /* We must disable the DMA RX request */
408 davinci_spi_set_dma_req(spi
, 0);
411 static void davinci_spi_dma_tx_callback(unsigned lch
, u16 ch_status
, void *data
)
413 struct spi_device
*spi
= (struct spi_device
*)data
;
414 struct davinci_spi
*davinci_spi
;
415 struct davinci_spi_dma
*davinci_spi_dma
;
417 davinci_spi
= spi_master_get_devdata(spi
->master
);
418 davinci_spi_dma
= &(davinci_spi
->dma_channels
[spi
->chip_select
]);
420 if (ch_status
== DMA_COMPLETE
)
421 edma_stop(davinci_spi_dma
->dma_tx_channel
);
423 edma_clean_channel(davinci_spi_dma
->dma_tx_channel
);
425 complete(&davinci_spi_dma
->dma_tx_completion
);
426 /* We must disable the DMA TX request */
427 davinci_spi_set_dma_req(spi
, 0);
430 static int davinci_spi_request_dma(struct spi_device
*spi
)
432 struct davinci_spi
*davinci_spi
;
433 struct davinci_spi_dma
*davinci_spi_dma
;
437 davinci_spi
= spi_master_get_devdata(spi
->master
);
438 davinci_spi_dma
= &davinci_spi
->dma_channels
[spi
->chip_select
];
439 sdev
= davinci_spi
->bitbang
.master
->dev
.parent
;
441 r
= edma_alloc_channel(davinci_spi_dma
->dma_rx_sync_dev
,
442 davinci_spi_dma_rx_callback
, spi
,
443 davinci_spi_dma
->eventq
);
445 dev_dbg(sdev
, "Unable to request DMA channel for SPI RX\n");
448 davinci_spi_dma
->dma_rx_channel
= r
;
449 r
= edma_alloc_channel(davinci_spi_dma
->dma_tx_sync_dev
,
450 davinci_spi_dma_tx_callback
, spi
,
451 davinci_spi_dma
->eventq
);
453 edma_free_channel(davinci_spi_dma
->dma_rx_channel
);
454 davinci_spi_dma
->dma_rx_channel
= -1;
455 dev_dbg(sdev
, "Unable to request DMA channel for SPI TX\n");
458 davinci_spi_dma
->dma_tx_channel
= r
;
464 * davinci_spi_setup - This functions will set default transfer method
465 * @spi: spi device on which data transfer to be done
467 * This functions sets the default transfer method.
469 static int davinci_spi_setup(struct spi_device
*spi
)
472 struct davinci_spi
*davinci_spi
;
473 struct davinci_spi_dma
*davinci_spi_dma
;
475 davinci_spi
= spi_master_get_devdata(spi
->master
);
477 /* if bits per word length is zero then set it default 8 */
478 if (!spi
->bits_per_word
)
479 spi
->bits_per_word
= 8;
481 if (use_dma
&& davinci_spi
->dma_channels
) {
482 davinci_spi_dma
= &davinci_spi
->dma_channels
[spi
->chip_select
];
484 if ((davinci_spi_dma
->dma_rx_channel
== -1)
485 || (davinci_spi_dma
->dma_tx_channel
== -1)) {
486 retval
= davinci_spi_request_dma(spi
);
492 retval
= davinci_spi_setup_transfer(spi
, NULL
);
497 static void davinci_spi_cleanup(struct spi_device
*spi
)
499 struct davinci_spi
*davinci_spi
= spi_master_get_devdata(spi
->master
);
500 struct davinci_spi_dma
*davinci_spi_dma
;
502 davinci_spi_dma
= &davinci_spi
->dma_channels
[spi
->chip_select
];
504 if (use_dma
&& davinci_spi
->dma_channels
) {
505 davinci_spi_dma
= &davinci_spi
->dma_channels
[spi
->chip_select
];
507 if ((davinci_spi_dma
->dma_rx_channel
!= -1)
508 && (davinci_spi_dma
->dma_tx_channel
!= -1)) {
509 edma_free_channel(davinci_spi_dma
->dma_tx_channel
);
510 edma_free_channel(davinci_spi_dma
->dma_rx_channel
);
515 static int davinci_spi_bufs_prep(struct spi_device
*spi
,
516 struct davinci_spi
*davinci_spi
)
518 struct davinci_spi_platform_data
*pdata
;
522 * REVISIT unless devices disagree about SPI_LOOP or
523 * SPI_READY (SPI_NO_CS only allows one device!), this
524 * should not need to be done before each message...
525 * optimize for both flags staying cleared.
528 op_mode
= SPIPC0_DIFUN_MASK
530 | SPIPC0_CLKFUN_MASK
;
531 if (!(spi
->mode
& SPI_NO_CS
)) {
532 pdata
= davinci_spi
->pdata
;
533 if (!pdata
->chip_sel
||
534 pdata
->chip_sel
[spi
->chip_select
] == SPI_INTERN_CS
)
535 op_mode
|= 1 << spi
->chip_select
;
537 if (spi
->mode
& SPI_READY
)
538 op_mode
|= SPIPC0_SPIENA_MASK
;
540 iowrite32(op_mode
, davinci_spi
->base
+ SPIPC0
);
542 if (spi
->mode
& SPI_LOOP
)
543 set_io_bits(davinci_spi
->base
+ SPIGCR1
,
544 SPIGCR1_LOOPBACK_MASK
);
546 clear_io_bits(davinci_spi
->base
+ SPIGCR1
,
547 SPIGCR1_LOOPBACK_MASK
);
552 static int davinci_spi_check_error(struct davinci_spi
*davinci_spi
,
555 struct device
*sdev
= davinci_spi
->bitbang
.master
->dev
.parent
;
557 if (int_status
& SPIFLG_TIMEOUT_MASK
) {
558 dev_dbg(sdev
, "SPI Time-out Error\n");
561 if (int_status
& SPIFLG_DESYNC_MASK
) {
562 dev_dbg(sdev
, "SPI Desynchronization Error\n");
565 if (int_status
& SPIFLG_BITERR_MASK
) {
566 dev_dbg(sdev
, "SPI Bit error\n");
570 if (davinci_spi
->version
== SPI_VERSION_2
) {
571 if (int_status
& SPIFLG_DLEN_ERR_MASK
) {
572 dev_dbg(sdev
, "SPI Data Length Error\n");
575 if (int_status
& SPIFLG_PARERR_MASK
) {
576 dev_dbg(sdev
, "SPI Parity Error\n");
579 if (int_status
& SPIFLG_OVRRUN_MASK
) {
580 dev_dbg(sdev
, "SPI Data Overrun error\n");
583 if (int_status
& SPIFLG_BUF_INIT_ACTIVE_MASK
) {
584 dev_dbg(sdev
, "SPI Buffer Init Active\n");
593 * davinci_spi_bufs - functions which will handle transfer data
594 * @spi: spi device on which data transfer to be done
595 * @t: spi transfer in which transfer info is filled
597 * This function will put data to be transferred into data register
598 * of SPI controller and then wait until the completion will be marked
599 * by the IRQ Handler.
601 static int davinci_spi_bufs_pio(struct spi_device
*spi
, struct spi_transfer
*t
)
603 struct davinci_spi
*davinci_spi
;
604 int status
, count
, ret
;
606 u32 tx_data
, data1_reg_val
;
607 u32 buf_val
, flg_val
;
608 struct davinci_spi_platform_data
*pdata
;
610 davinci_spi
= spi_master_get_devdata(spi
->master
);
611 pdata
= davinci_spi
->pdata
;
613 davinci_spi
->tx
= t
->tx_buf
;
614 davinci_spi
->rx
= t
->rx_buf
;
616 /* convert len to words based on bits_per_word */
617 conv
= davinci_spi
->bytes_per_word
[spi
->chip_select
];
618 data1_reg_val
= ioread32(davinci_spi
->base
+ SPIDAT1
);
620 ret
= davinci_spi_bufs_prep(spi
, davinci_spi
);
625 set_io_bits(davinci_spi
->base
+ SPIGCR1
, SPIGCR1_SPIENA_MASK
);
627 count
= t
->len
/ conv
;
629 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_MASKALL
);
631 /* Determine the command to execute READ or WRITE */
635 tx_data
= davinci_spi
->get_tx(davinci_spi
);
637 data1_reg_val
&= ~(0xFFFF);
638 data1_reg_val
|= (0xFFFF & tx_data
);
640 buf_val
= ioread32(davinci_spi
->base
+ SPIBUF
);
641 if ((buf_val
& SPIBUF_TXFULL_MASK
) == 0) {
642 iowrite32(data1_reg_val
,
643 davinci_spi
->base
+ SPIDAT1
);
647 while (ioread32(davinci_spi
->base
+ SPIBUF
)
648 & SPIBUF_RXEMPTY_MASK
)
651 /* getting the returned byte */
653 buf_val
= ioread32(davinci_spi
->base
+ SPIBUF
);
654 davinci_spi
->get_rx(buf_val
, davinci_spi
);
661 /* keeps the serial clock going */
662 if ((ioread32(davinci_spi
->base
+ SPIBUF
)
663 & SPIBUF_TXFULL_MASK
) == 0)
664 iowrite32(data1_reg_val
,
665 davinci_spi
->base
+ SPIDAT1
);
667 while (ioread32(davinci_spi
->base
+ SPIBUF
) &
671 flg_val
= ioread32(davinci_spi
->base
+ SPIFLG
);
672 buf_val
= ioread32(davinci_spi
->base
+ SPIBUF
);
674 davinci_spi
->get_rx(buf_val
, davinci_spi
);
683 * Check for bit error, desync error,parity error,timeout error and
684 * receive overflow errors
686 status
= ioread32(davinci_spi
->base
+ SPIFLG
);
688 ret
= davinci_spi_check_error(davinci_spi
, status
);
695 static int davinci_spi_bufs_dma(struct spi_device
*spi
, struct spi_transfer
*t
)
697 struct davinci_spi
*davinci_spi
;
699 int count
, temp_count
;
701 struct davinci_spi_dma
*davinci_spi_dma
;
703 unsigned long tx_reg
, rx_reg
;
706 davinci_spi
= spi_master_get_devdata(spi
->master
);
707 sdev
= davinci_spi
->bitbang
.master
->dev
.parent
;
709 davinci_spi_dma
= &davinci_spi
->dma_channels
[spi
->chip_select
];
711 tx_reg
= (unsigned long)davinci_spi
->pbase
+ SPIDAT1
;
712 rx_reg
= (unsigned long)davinci_spi
->pbase
+ SPIBUF
;
714 davinci_spi
->tx
= t
->tx_buf
;
715 davinci_spi
->rx
= t
->rx_buf
;
717 /* convert len to words based on bits_per_word */
718 data_type
= davinci_spi
->bytes_per_word
[spi
->chip_select
];
720 data1_reg_val
= ioread32(davinci_spi
->base
+ SPIDAT1
);
722 init_completion(&davinci_spi_dma
->dma_rx_completion
);
723 init_completion(&davinci_spi_dma
->dma_tx_completion
);
725 ret
= davinci_spi_bufs_prep(spi
, davinci_spi
);
729 count
= t
->len
/ data_type
; /* the number of elements */
731 /* disable all interrupts for dma transfers */
732 clear_io_bits(davinci_spi
->base
+ SPIINT
, SPIINT_MASKALL
);
734 set_io_bits(davinci_spi
->base
+ SPIGCR1
, SPIGCR1_SPIENA_MASK
);
737 t
->tx_dma
= dma_map_single(&spi
->dev
, (void *)t
->tx_buf
, count
,
739 if (dma_mapping_error(&spi
->dev
, t
->tx_dma
)) {
740 dev_dbg(sdev
, "Unable to DMA map a %d bytes"
741 " TX buffer\n", count
);
746 /* We need TX clocking for RX transaction */
747 t
->tx_dma
= dma_map_single(&spi
->dev
,
748 (void *)davinci_spi
->tmp_buf
, count
+ 1,
750 if (dma_mapping_error(&spi
->dev
, t
->tx_dma
)) {
751 dev_dbg(sdev
, "Unable to DMA map a %d bytes"
752 " TX tmp buffer\n", count
);
755 temp_count
= count
+ 1;
758 edma_set_transfer_params(davinci_spi_dma
->dma_tx_channel
,
759 data_type
, temp_count
, 1, 0, ASYNC
);
760 edma_set_dest(davinci_spi_dma
->dma_tx_channel
, tx_reg
, INCR
, W8BIT
);
761 edma_set_src(davinci_spi_dma
->dma_tx_channel
, t
->tx_dma
, INCR
, W8BIT
);
762 edma_set_src_index(davinci_spi_dma
->dma_tx_channel
, data_type
, 0);
763 edma_set_dest_index(davinci_spi_dma
->dma_tx_channel
, 0, 0);
766 /* initiate transaction */
767 iowrite32(data1_reg_val
, davinci_spi
->base
+ SPIDAT1
);
769 t
->rx_dma
= dma_map_single(&spi
->dev
, (void *)t
->rx_buf
, count
,
771 if (dma_mapping_error(&spi
->dev
, t
->rx_dma
)) {
772 dev_dbg(sdev
, "Couldn't DMA map a %d bytes RX buffer\n",
774 if (t
->tx_buf
!= NULL
)
775 dma_unmap_single(NULL
, t
->tx_dma
,
776 count
, DMA_TO_DEVICE
);
779 edma_set_transfer_params(davinci_spi_dma
->dma_rx_channel
,
780 data_type
, count
, 1, 0, ASYNC
);
781 edma_set_src(davinci_spi_dma
->dma_rx_channel
,
782 rx_reg
, INCR
, W8BIT
);
783 edma_set_dest(davinci_spi_dma
->dma_rx_channel
,
784 t
->rx_dma
, INCR
, W8BIT
);
785 edma_set_src_index(davinci_spi_dma
->dma_rx_channel
, 0, 0);
786 edma_set_dest_index(davinci_spi_dma
->dma_rx_channel
,
790 if ((t
->tx_buf
) || (t
->rx_buf
))
791 edma_start(davinci_spi_dma
->dma_tx_channel
);
794 edma_start(davinci_spi_dma
->dma_rx_channel
);
796 if ((t
->rx_buf
) || (t
->tx_buf
))
797 davinci_spi_set_dma_req(spi
, 1);
800 wait_for_completion_interruptible(
801 &davinci_spi_dma
->dma_tx_completion
);
804 wait_for_completion_interruptible(
805 &davinci_spi_dma
->dma_rx_completion
);
807 dma_unmap_single(NULL
, t
->tx_dma
, temp_count
, DMA_TO_DEVICE
);
810 dma_unmap_single(NULL
, t
->rx_dma
, count
, DMA_FROM_DEVICE
);
813 * Check for bit error, desync error,parity error,timeout error and
814 * receive overflow errors
816 int_status
= ioread32(davinci_spi
->base
+ SPIFLG
);
818 ret
= davinci_spi_check_error(davinci_spi
, int_status
);
826 * davinci_spi_probe - probe function for SPI Master Controller
827 * @pdev: platform_device structure which contains plateform specific data
829 static int davinci_spi_probe(struct platform_device
*pdev
)
831 struct spi_master
*master
;
832 struct davinci_spi
*davinci_spi
;
833 struct davinci_spi_platform_data
*pdata
;
834 struct resource
*r
, *mem
;
835 resource_size_t dma_rx_chan
= SPI_NO_RESOURCE
;
836 resource_size_t dma_tx_chan
= SPI_NO_RESOURCE
;
837 resource_size_t dma_eventq
= SPI_NO_RESOURCE
;
840 pdata
= pdev
->dev
.platform_data
;
846 master
= spi_alloc_master(&pdev
->dev
, sizeof(struct davinci_spi
));
847 if (master
== NULL
) {
852 dev_set_drvdata(&pdev
->dev
, master
);
854 davinci_spi
= spi_master_get_devdata(master
);
855 if (davinci_spi
== NULL
) {
860 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
866 davinci_spi
->pbase
= r
->start
;
867 davinci_spi
->region_size
= resource_size(r
);
868 davinci_spi
->pdata
= pdata
;
870 mem
= request_mem_region(r
->start
, davinci_spi
->region_size
,
877 davinci_spi
->base
= ioremap(r
->start
, davinci_spi
->region_size
);
878 if (davinci_spi
->base
== NULL
) {
883 /* Allocate tmp_buf for tx_buf */
884 davinci_spi
->tmp_buf
= kzalloc(SPI_BUFSIZ
, GFP_KERNEL
);
885 if (davinci_spi
->tmp_buf
== NULL
) {
890 davinci_spi
->bitbang
.master
= spi_master_get(master
);
891 if (davinci_spi
->bitbang
.master
== NULL
) {
896 davinci_spi
->clk
= clk_get(&pdev
->dev
, NULL
);
897 if (IS_ERR(davinci_spi
->clk
)) {
901 clk_enable(davinci_spi
->clk
);
903 master
->bus_num
= pdev
->id
;
904 master
->num_chipselect
= pdata
->num_chipselect
;
905 master
->setup
= davinci_spi_setup
;
906 master
->cleanup
= davinci_spi_cleanup
;
908 davinci_spi
->bitbang
.chipselect
= davinci_spi_chipselect
;
909 davinci_spi
->bitbang
.setup_transfer
= davinci_spi_setup_transfer
;
911 davinci_spi
->version
= pdata
->version
;
912 use_dma
= pdata
->use_dma
;
914 davinci_spi
->bitbang
.flags
= SPI_NO_CS
| SPI_LSB_FIRST
| SPI_LOOP
;
915 if (davinci_spi
->version
== SPI_VERSION_2
)
916 davinci_spi
->bitbang
.flags
|= SPI_READY
;
919 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
921 dma_rx_chan
= r
->start
;
922 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
924 dma_tx_chan
= r
->start
;
925 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 2);
927 dma_eventq
= r
->start
;
931 dma_rx_chan
== SPI_NO_RESOURCE
||
932 dma_tx_chan
== SPI_NO_RESOURCE
||
933 dma_eventq
== SPI_NO_RESOURCE
) {
934 davinci_spi
->bitbang
.txrx_bufs
= davinci_spi_bufs_pio
;
937 davinci_spi
->bitbang
.txrx_bufs
= davinci_spi_bufs_dma
;
938 davinci_spi
->dma_channels
= kzalloc(master
->num_chipselect
939 * sizeof(struct davinci_spi_dma
), GFP_KERNEL
);
940 if (davinci_spi
->dma_channels
== NULL
) {
945 for (i
= 0; i
< master
->num_chipselect
; i
++) {
946 davinci_spi
->dma_channels
[i
].dma_rx_channel
= -1;
947 davinci_spi
->dma_channels
[i
].dma_rx_sync_dev
=
949 davinci_spi
->dma_channels
[i
].dma_tx_channel
= -1;
950 davinci_spi
->dma_channels
[i
].dma_tx_sync_dev
=
952 davinci_spi
->dma_channels
[i
].eventq
= dma_eventq
;
954 dev_info(&pdev
->dev
, "DaVinci SPI driver in EDMA mode\n"
955 "Using RX channel = %d , TX channel = %d and "
956 "event queue = %d", dma_rx_chan
, dma_tx_chan
,
960 davinci_spi
->get_rx
= davinci_spi_rx_buf_u8
;
961 davinci_spi
->get_tx
= davinci_spi_tx_buf_u8
;
963 /* Reset In/OUT SPI module */
964 iowrite32(0, davinci_spi
->base
+ SPIGCR0
);
966 iowrite32(1, davinci_spi
->base
+ SPIGCR0
);
968 /* initialize chip selects */
969 if (pdata
->chip_sel
) {
970 for (i
= 0; i
< pdata
->num_chipselect
; i
++) {
971 if (pdata
->chip_sel
[i
] != SPI_INTERN_CS
)
972 gpio_direction_output(pdata
->chip_sel
[i
], 1);
977 if (davinci_spi
->pdata
->clk_internal
)
978 set_io_bits(davinci_spi
->base
+ SPIGCR1
,
979 SPIGCR1_CLKMOD_MASK
);
981 clear_io_bits(davinci_spi
->base
+ SPIGCR1
,
982 SPIGCR1_CLKMOD_MASK
);
984 iowrite32(CS_DEFAULT
, davinci_spi
->base
+ SPIDEF
);
986 /* master mode default */
987 set_io_bits(davinci_spi
->base
+ SPIGCR1
, SPIGCR1_MASTER_MASK
);
989 ret
= spi_bitbang_start(&davinci_spi
->bitbang
);
993 dev_info(&pdev
->dev
, "Controller at 0x%p\n", davinci_spi
->base
);
998 clk_disable(davinci_spi
->clk
);
999 clk_put(davinci_spi
->clk
);
1001 spi_master_put(master
);
1003 kfree(davinci_spi
->tmp_buf
);
1005 iounmap(davinci_spi
->base
);
1007 release_mem_region(davinci_spi
->pbase
, davinci_spi
->region_size
);
1015 * davinci_spi_remove - remove function for SPI Master Controller
1016 * @pdev: platform_device structure which contains plateform specific data
1018 * This function will do the reverse action of davinci_spi_probe function
1019 * It will free the IRQ and SPI controller's memory region.
1020 * It will also call spi_bitbang_stop to destroy the work queue which was
1021 * created by spi_bitbang_start.
1023 static int __exit
davinci_spi_remove(struct platform_device
*pdev
)
1025 struct davinci_spi
*davinci_spi
;
1026 struct spi_master
*master
;
1028 master
= dev_get_drvdata(&pdev
->dev
);
1029 davinci_spi
= spi_master_get_devdata(master
);
1031 spi_bitbang_stop(&davinci_spi
->bitbang
);
1033 clk_disable(davinci_spi
->clk
);
1034 clk_put(davinci_spi
->clk
);
1035 spi_master_put(master
);
1036 kfree(davinci_spi
->tmp_buf
);
1037 iounmap(davinci_spi
->base
);
1038 release_mem_region(davinci_spi
->pbase
, davinci_spi
->region_size
);
1043 static struct platform_driver davinci_spi_driver
= {
1044 .driver
.name
= "spi_davinci",
1045 .remove
= __exit_p(davinci_spi_remove
),
1048 static int __init
davinci_spi_init(void)
1050 return platform_driver_probe(&davinci_spi_driver
, davinci_spi_probe
);
1052 module_init(davinci_spi_init
);
1054 static void __exit
davinci_spi_exit(void)
1056 platform_driver_unregister(&davinci_spi_driver
);
1058 module_exit(davinci_spi_exit
);
1060 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1061 MODULE_LICENSE("GPL");