2 * File: drivers/spi/bfin5xx_spi.c
4 * Author: Luke Yang (Analog Devices Inc.)
6 * Created: March. 10th 2006
7 * Description: SPI controller driver for Blackfin 5xx
8 * Bugs: Enter bugs at http://blackfin.uclinux.org/
11 * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
12 * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
14 * Copyright 2004-2006 Analog Devices Inc.
16 * This program is free software ; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation ; either version 2, or (at your option)
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY ; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program ; see the file COPYING.
28 * If not, write to the Free Software Foundation,
29 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/device.h>
35 #include <linux/ioport.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/platform_device.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/spi/spi.h>
41 #include <linux/workqueue.h>
42 #include <linux/errno.h>
43 #include <linux/delay.h>
47 #include <asm/delay.h>
50 #include <asm/bfin5xx_spi.h>
52 MODULE_AUTHOR("Luke Yang");
53 MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller");
54 MODULE_LICENSE("GPL");
56 #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
58 #define DEFINE_SPI_REG(reg, off) \
59 static inline u16 read_##reg(void) \
60 { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \
61 static inline void write_##reg(u16 v) \
62 {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\
65 DEFINE_SPI_REG(CTRL
, 0x00)
66 DEFINE_SPI_REG(FLAG
, 0x04)
67 DEFINE_SPI_REG(STAT
, 0x08)
68 DEFINE_SPI_REG(TDBR
, 0x0C)
69 DEFINE_SPI_REG(RDBR
, 0x10)
70 DEFINE_SPI_REG(BAUD
, 0x14)
71 DEFINE_SPI_REG(SHAW
, 0x18)
72 #define START_STATE ((void*)0)
73 #define RUNNING_STATE ((void*)1)
74 #define DONE_STATE ((void*)2)
75 #define ERROR_STATE ((void*)-1)
76 #define QUEUE_RUNNING 0
77 #define QUEUE_STOPPED 1
81 /* Driver model hookup */
82 struct platform_device
*pdev
;
84 /* SPI framework hookup */
85 struct spi_master
*master
;
88 struct bfin5xx_spi_master
*master_info
;
90 /* Driver message queue */
91 struct workqueue_struct
*workqueue
;
92 struct work_struct pump_messages
;
94 struct list_head queue
;
98 /* Message Transfer pump */
99 struct tasklet_struct pump_transfers
;
101 /* Current message transfer state info */
102 struct spi_message
*cur_msg
;
103 struct spi_transfer
*cur_transfer
;
104 struct chip_data
*cur_chip
;
117 void (*write
) (struct driver_data
*);
118 void (*read
) (struct driver_data
*);
119 void (*duplex
) (struct driver_data
*);
129 u8 width
; /* 0 or 1 */
131 u8 bits_per_word
; /* 8 or 16 */
132 u8 cs_change_per_word
;
134 void (*write
) (struct driver_data
*);
135 void (*read
) (struct driver_data
*);
136 void (*duplex
) (struct driver_data
*);
139 static void bfin_spi_enable(struct driver_data
*drv_data
)
144 write_CTRL(cr
| BIT_CTL_ENABLE
);
148 static void bfin_spi_disable(struct driver_data
*drv_data
)
153 write_CTRL(cr
& (~BIT_CTL_ENABLE
));
157 /* Caculate the SPI_BAUD register value based on input HZ */
158 static u16
hz_to_spi_baud(u32 speed_hz
)
160 u_long sclk
= get_sclk();
161 u16 spi_baud
= (sclk
/ (2 * speed_hz
));
163 if ((sclk
% (2 * speed_hz
)) > 0)
169 static int flush(struct driver_data
*drv_data
)
171 unsigned long limit
= loops_per_jiffy
<< 1;
173 /* wait for stop and clear stat */
174 while (!(read_STAT() & BIT_STAT_SPIF
) && limit
--)
177 write_STAT(BIT_STAT_CLR
);
182 /* stop controller and re-config current chip*/
183 static void restore_state(struct driver_data
*drv_data
)
185 struct chip_data
*chip
= drv_data
->cur_chip
;
187 /* Clear status and disable clock */
188 write_STAT(BIT_STAT_CLR
);
189 bfin_spi_disable(drv_data
);
190 dev_dbg(&drv_data
->pdev
->dev
, "restoring spi ctl state\n");
192 #if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
193 dev_dbg(&drv_data
->pdev
->dev
,
194 "chip select number is %d\n", chip
->chip_select_num
);
196 switch (chip
->chip_select_num
) {
198 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00);
204 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI
);
206 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
211 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI
);
213 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840);
218 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI
);
220 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820);
225 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI
);
227 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810);
232 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI
);
234 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
240 /* Load the registers */
241 write_CTRL(chip
->ctl_reg
);
242 write_BAUD(chip
->baud
);
243 write_FLAG(chip
->flag
);
246 /* used to kick off transfer in rx mode */
247 static unsigned short dummy_read(void)
254 static void null_writer(struct driver_data
*drv_data
)
256 u8 n_bytes
= drv_data
->n_bytes
;
258 while (drv_data
->tx
< drv_data
->tx_end
) {
260 while ((read_STAT() & BIT_STAT_TXS
))
262 drv_data
->tx
+= n_bytes
;
266 static void null_reader(struct driver_data
*drv_data
)
268 u8 n_bytes
= drv_data
->n_bytes
;
271 while (drv_data
->rx
< drv_data
->rx_end
) {
272 while (!(read_STAT() & BIT_STAT_RXS
))
275 drv_data
->rx
+= n_bytes
;
279 static void u8_writer(struct driver_data
*drv_data
)
281 dev_dbg(&drv_data
->pdev
->dev
,
282 "cr8-s is 0x%x\n", read_STAT());
283 while (drv_data
->tx
< drv_data
->tx_end
) {
284 write_TDBR(*(u8
*) (drv_data
->tx
));
285 while (read_STAT() & BIT_STAT_TXS
)
290 /* poll for SPI completion before returning */
291 while (!(read_STAT() & BIT_STAT_SPIF
))
295 static void u8_cs_chg_writer(struct driver_data
*drv_data
)
297 struct chip_data
*chip
= drv_data
->cur_chip
;
299 while (drv_data
->tx
< drv_data
->tx_end
) {
300 write_FLAG(chip
->flag
);
303 write_TDBR(*(u8
*) (drv_data
->tx
));
304 while (read_STAT() & BIT_STAT_TXS
)
306 while (!(read_STAT() & BIT_STAT_SPIF
))
308 write_FLAG(0xFF00 | chip
->flag
);
310 if (chip
->cs_chg_udelay
)
311 udelay(chip
->cs_chg_udelay
);
318 static void u8_reader(struct driver_data
*drv_data
)
320 dev_dbg(&drv_data
->pdev
->dev
,
321 "cr-8 is 0x%x\n", read_STAT());
323 /* clear TDBR buffer before read(else it will be shifted out) */
328 while (drv_data
->rx
< drv_data
->rx_end
- 1) {
329 while (!(read_STAT() & BIT_STAT_RXS
))
331 *(u8
*) (drv_data
->rx
) = read_RDBR();
335 while (!(read_STAT() & BIT_STAT_RXS
))
337 *(u8
*) (drv_data
->rx
) = read_SHAW();
341 static void u8_cs_chg_reader(struct driver_data
*drv_data
)
343 struct chip_data
*chip
= drv_data
->cur_chip
;
345 while (drv_data
->rx
< drv_data
->rx_end
) {
346 write_FLAG(chip
->flag
);
349 read_RDBR(); /* kick off */
350 while (!(read_STAT() & BIT_STAT_RXS
))
352 while (!(read_STAT() & BIT_STAT_SPIF
))
354 *(u8
*) (drv_data
->rx
) = read_SHAW();
355 write_FLAG(0xFF00 | chip
->flag
);
357 if (chip
->cs_chg_udelay
)
358 udelay(chip
->cs_chg_udelay
);
365 static void u8_duplex(struct driver_data
*drv_data
)
367 /* in duplex mode, clk is triggered by writing of TDBR */
368 while (drv_data
->rx
< drv_data
->rx_end
) {
369 write_TDBR(*(u8
*) (drv_data
->tx
));
370 while (!(read_STAT() & BIT_STAT_SPIF
))
372 while (!(read_STAT() & BIT_STAT_RXS
))
374 *(u8
*) (drv_data
->rx
) = read_RDBR();
380 static void u8_cs_chg_duplex(struct driver_data
*drv_data
)
382 struct chip_data
*chip
= drv_data
->cur_chip
;
384 while (drv_data
->rx
< drv_data
->rx_end
) {
385 write_FLAG(chip
->flag
);
388 write_TDBR(*(u8
*) (drv_data
->tx
));
389 while (!(read_STAT() & BIT_STAT_SPIF
))
391 while (!(read_STAT() & BIT_STAT_RXS
))
393 *(u8
*) (drv_data
->rx
) = read_RDBR();
394 write_FLAG(0xFF00 | chip
->flag
);
396 if (chip
->cs_chg_udelay
)
397 udelay(chip
->cs_chg_udelay
);
405 static void u16_writer(struct driver_data
*drv_data
)
407 dev_dbg(&drv_data
->pdev
->dev
,
408 "cr16 is 0x%x\n", read_STAT());
410 while (drv_data
->tx
< drv_data
->tx_end
) {
411 write_TDBR(*(u16
*) (drv_data
->tx
));
412 while ((read_STAT() & BIT_STAT_TXS
))
417 /* poll for SPI completion before returning */
418 while (!(read_STAT() & BIT_STAT_SPIF
))
422 static void u16_cs_chg_writer(struct driver_data
*drv_data
)
424 struct chip_data
*chip
= drv_data
->cur_chip
;
426 while (drv_data
->tx
< drv_data
->tx_end
) {
427 write_FLAG(chip
->flag
);
430 write_TDBR(*(u16
*) (drv_data
->tx
));
431 while ((read_STAT() & BIT_STAT_TXS
))
433 while (!(read_STAT() & BIT_STAT_SPIF
))
435 write_FLAG(0xFF00 | chip
->flag
);
437 if (chip
->cs_chg_udelay
)
438 udelay(chip
->cs_chg_udelay
);
445 static void u16_reader(struct driver_data
*drv_data
)
447 dev_dbg(&drv_data
->pdev
->dev
,
448 "cr-16 is 0x%x\n", read_STAT());
451 while (drv_data
->rx
< (drv_data
->rx_end
- 2)) {
452 while (!(read_STAT() & BIT_STAT_RXS
))
454 *(u16
*) (drv_data
->rx
) = read_RDBR();
458 while (!(read_STAT() & BIT_STAT_RXS
))
460 *(u16
*) (drv_data
->rx
) = read_SHAW();
464 static void u16_cs_chg_reader(struct driver_data
*drv_data
)
466 struct chip_data
*chip
= drv_data
->cur_chip
;
468 while (drv_data
->rx
< drv_data
->rx_end
) {
469 write_FLAG(chip
->flag
);
472 read_RDBR(); /* kick off */
473 while (!(read_STAT() & BIT_STAT_RXS
))
475 while (!(read_STAT() & BIT_STAT_SPIF
))
477 *(u16
*) (drv_data
->rx
) = read_SHAW();
478 write_FLAG(0xFF00 | chip
->flag
);
480 if (chip
->cs_chg_udelay
)
481 udelay(chip
->cs_chg_udelay
);
488 static void u16_duplex(struct driver_data
*drv_data
)
490 /* in duplex mode, clk is triggered by writing of TDBR */
491 while (drv_data
->tx
< drv_data
->tx_end
) {
492 write_TDBR(*(u16
*) (drv_data
->tx
));
493 while (!(read_STAT() & BIT_STAT_SPIF
))
495 while (!(read_STAT() & BIT_STAT_RXS
))
497 *(u16
*) (drv_data
->rx
) = read_RDBR();
503 static void u16_cs_chg_duplex(struct driver_data
*drv_data
)
505 struct chip_data
*chip
= drv_data
->cur_chip
;
507 while (drv_data
->tx
< drv_data
->tx_end
) {
508 write_FLAG(chip
->flag
);
511 write_TDBR(*(u16
*) (drv_data
->tx
));
512 while (!(read_STAT() & BIT_STAT_SPIF
))
514 while (!(read_STAT() & BIT_STAT_RXS
))
516 *(u16
*) (drv_data
->rx
) = read_RDBR();
517 write_FLAG(0xFF00 | chip
->flag
);
519 if (chip
->cs_chg_udelay
)
520 udelay(chip
->cs_chg_udelay
);
528 /* test if ther is more transfer to be done */
529 static void *next_transfer(struct driver_data
*drv_data
)
531 struct spi_message
*msg
= drv_data
->cur_msg
;
532 struct spi_transfer
*trans
= drv_data
->cur_transfer
;
534 /* Move to next transfer */
535 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
536 drv_data
->cur_transfer
=
537 list_entry(trans
->transfer_list
.next
,
538 struct spi_transfer
, transfer_list
);
539 return RUNNING_STATE
;
545 * caller already set message->status;
546 * dma and pio irqs are blocked give finished message back
548 static void giveback(struct driver_data
*drv_data
)
550 struct spi_transfer
*last_transfer
;
552 struct spi_message
*msg
;
554 spin_lock_irqsave(&drv_data
->lock
, flags
);
555 msg
= drv_data
->cur_msg
;
556 drv_data
->cur_msg
= NULL
;
557 drv_data
->cur_transfer
= NULL
;
558 drv_data
->cur_chip
= NULL
;
559 queue_work(drv_data
->workqueue
, &drv_data
->pump_messages
);
560 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
562 last_transfer
= list_entry(msg
->transfers
.prev
,
563 struct spi_transfer
, transfer_list
);
567 /* disable chip select signal. And not stop spi in autobuffer mode */
568 if (drv_data
->tx_dma
!= 0xFFFF) {
570 bfin_spi_disable(drv_data
);
574 msg
->complete(msg
->context
);
577 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
579 struct driver_data
*drv_data
= (struct driver_data
*)dev_id
;
580 struct spi_message
*msg
= drv_data
->cur_msg
;
582 dev_dbg(&drv_data
->pdev
->dev
, "in dma_irq_handler\n");
583 clear_dma_irqstat(CH_SPI
);
585 /* Wait for DMA to complete */
586 while (get_dma_curr_irqstat(CH_SPI
) & DMA_RUN
)
590 * wait for the last transaction shifted out. HRM states:
591 * at this point there may still be data in the SPI DMA FIFO waiting
592 * to be transmitted ... software needs to poll TXS in the SPI_STAT
593 * register until it goes low for 2 successive reads
595 if (drv_data
->tx
!= NULL
) {
596 while ((bfin_read_SPI_STAT() & TXS
) ||
597 (bfin_read_SPI_STAT() & TXS
))
601 while (!(bfin_read_SPI_STAT() & SPIF
))
604 bfin_spi_disable(drv_data
);
606 msg
->actual_length
+= drv_data
->len_in_bytes
;
608 /* Move to next transfer */
609 msg
->state
= next_transfer(drv_data
);
611 /* Schedule transfer tasklet */
612 tasklet_schedule(&drv_data
->pump_transfers
);
614 /* free the irq handler before next transfer */
615 dev_dbg(&drv_data
->pdev
->dev
,
616 "disable dma channel irq%d\n",
618 dma_disable_irq(CH_SPI
);
623 static void pump_transfers(unsigned long data
)
625 struct driver_data
*drv_data
= (struct driver_data
*)data
;
626 struct spi_message
*message
= NULL
;
627 struct spi_transfer
*transfer
= NULL
;
628 struct spi_transfer
*previous
= NULL
;
629 struct chip_data
*chip
= NULL
;
631 u16 cr
, dma_width
, dma_config
;
632 u32 tranf_success
= 1;
634 /* Get current state information */
635 message
= drv_data
->cur_msg
;
636 transfer
= drv_data
->cur_transfer
;
637 chip
= drv_data
->cur_chip
;
640 * if msg is error or done, report it back using complete() callback
643 /* Handle for abort */
644 if (message
->state
== ERROR_STATE
) {
645 message
->status
= -EIO
;
650 /* Handle end of message */
651 if (message
->state
== DONE_STATE
) {
657 /* Delay if requested at end of transfer */
658 if (message
->state
== RUNNING_STATE
) {
659 previous
= list_entry(transfer
->transfer_list
.prev
,
660 struct spi_transfer
, transfer_list
);
661 if (previous
->delay_usecs
)
662 udelay(previous
->delay_usecs
);
665 /* Setup the transfer state based on the type of transfer */
666 if (flush(drv_data
) == 0) {
667 dev_err(&drv_data
->pdev
->dev
, "pump_transfers: flush failed\n");
668 message
->status
= -EIO
;
673 if (transfer
->tx_buf
!= NULL
) {
674 drv_data
->tx
= (void *)transfer
->tx_buf
;
675 drv_data
->tx_end
= drv_data
->tx
+ transfer
->len
;
676 dev_dbg(&drv_data
->pdev
->dev
, "tx_buf is %p, tx_end is %p\n",
677 transfer
->tx_buf
, drv_data
->tx_end
);
682 if (transfer
->rx_buf
!= NULL
) {
683 drv_data
->rx
= transfer
->rx_buf
;
684 drv_data
->rx_end
= drv_data
->rx
+ transfer
->len
;
685 dev_dbg(&drv_data
->pdev
->dev
, "rx_buf is %p, rx_end is %p\n",
686 transfer
->rx_buf
, drv_data
->rx_end
);
691 drv_data
->rx_dma
= transfer
->rx_dma
;
692 drv_data
->tx_dma
= transfer
->tx_dma
;
693 drv_data
->len_in_bytes
= transfer
->len
;
696 if (width
== CFG_SPI_WORDSIZE16
) {
697 drv_data
->len
= (transfer
->len
) >> 1;
699 drv_data
->len
= transfer
->len
;
701 drv_data
->write
= drv_data
->tx
? chip
->write
: null_writer
;
702 drv_data
->read
= drv_data
->rx
? chip
->read
: null_reader
;
703 drv_data
->duplex
= chip
->duplex
? chip
->duplex
: null_writer
;
704 dev_dbg(&drv_data
->pdev
->dev
,
705 "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n",
706 drv_data
->write
, chip
->write
, null_writer
);
708 /* speed and width has been set on per message */
709 message
->state
= RUNNING_STATE
;
712 /* restore spi status for each spi transfer */
713 if (transfer
->speed_hz
) {
714 write_BAUD(hz_to_spi_baud(transfer
->speed_hz
));
716 write_BAUD(chip
->baud
);
718 write_FLAG(chip
->flag
);
720 dev_dbg(&drv_data
->pdev
->dev
,
721 "now pumping a transfer: width is %d, len is %d\n",
722 width
, transfer
->len
);
725 * Try to map dma buffer and do a dma transfer if
726 * successful use different way to r/w according to
727 * drv_data->cur_chip->enable_dma
729 if (drv_data
->cur_chip
->enable_dma
&& drv_data
->len
> 6) {
731 write_STAT(BIT_STAT_CLR
);
733 clear_dma_irqstat(CH_SPI
);
734 bfin_spi_disable(drv_data
);
736 /* config dma channel */
737 dev_dbg(&drv_data
->pdev
->dev
, "doing dma transfer\n");
738 if (width
== CFG_SPI_WORDSIZE16
) {
739 set_dma_x_count(CH_SPI
, drv_data
->len
);
740 set_dma_x_modify(CH_SPI
, 2);
741 dma_width
= WDSIZE_16
;
743 set_dma_x_count(CH_SPI
, drv_data
->len
);
744 set_dma_x_modify(CH_SPI
, 1);
745 dma_width
= WDSIZE_8
;
748 /* set transfer width,direction. And enable spi */
749 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
));
751 /* dirty hack for autobuffer DMA mode */
752 if (drv_data
->tx_dma
== 0xFFFF) {
753 dev_dbg(&drv_data
->pdev
->dev
,
754 "doing autobuffer DMA out.\n");
756 /* no irq in autobuffer mode */
758 (DMAFLOW_AUTO
| RESTART
| dma_width
| DI_EN
);
759 set_dma_config(CH_SPI
, dma_config
);
760 set_dma_start_addr(CH_SPI
, (unsigned long)drv_data
->tx
);
762 write_CTRL(cr
| CFG_SPI_DMAWRITE
| (width
<< 8) |
763 (CFG_SPI_ENABLE
<< 14));
765 /* just return here, there can only be one transfer in this mode */
771 /* In dma mode, rx or tx must be NULL in one transfer */
772 if (drv_data
->rx
!= NULL
) {
773 /* set transfer mode, and enable SPI */
774 dev_dbg(&drv_data
->pdev
->dev
, "doing DMA in.\n");
776 /* disable SPI before write to TDBR */
777 write_CTRL(cr
& ~BIT_CTL_ENABLE
);
779 /* clear tx reg soformer data is not shifted out */
782 set_dma_x_count(CH_SPI
, drv_data
->len
);
785 dma_enable_irq(CH_SPI
);
786 dma_config
= (WNR
| RESTART
| dma_width
| DI_EN
);
787 set_dma_config(CH_SPI
, dma_config
);
788 set_dma_start_addr(CH_SPI
, (unsigned long)drv_data
->rx
);
792 CFG_SPI_DMAREAD
| (width
<< 8) | (CFG_SPI_ENABLE
<<
794 /* set transfer mode, and enable SPI */
796 } else if (drv_data
->tx
!= NULL
) {
797 dev_dbg(&drv_data
->pdev
->dev
, "doing DMA out.\n");
800 dma_enable_irq(CH_SPI
);
801 dma_config
= (RESTART
| dma_width
| DI_EN
);
802 set_dma_config(CH_SPI
, dma_config
);
803 set_dma_start_addr(CH_SPI
, (unsigned long)drv_data
->tx
);
806 write_CTRL(cr
| CFG_SPI_DMAWRITE
| (width
<< 8) |
807 (CFG_SPI_ENABLE
<< 14));
811 /* IO mode write then read */
812 dev_dbg(&drv_data
->pdev
->dev
, "doing IO transfer\n");
814 write_STAT(BIT_STAT_CLR
);
816 if (drv_data
->tx
!= NULL
&& drv_data
->rx
!= NULL
) {
817 /* full duplex mode */
818 BUG_ON((drv_data
->tx_end
- drv_data
->tx
) !=
819 (drv_data
->rx_end
- drv_data
->rx
));
820 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
));
821 cr
|= CFG_SPI_WRITE
| (width
<< 8) |
822 (CFG_SPI_ENABLE
<< 14);
823 dev_dbg(&drv_data
->pdev
->dev
,
824 "IO duplex: cr is 0x%x\n", cr
);
829 drv_data
->duplex(drv_data
);
831 if (drv_data
->tx
!= drv_data
->tx_end
)
833 } else if (drv_data
->tx
!= NULL
) {
834 /* write only half duplex */
835 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
));
836 cr
|= CFG_SPI_WRITE
| (width
<< 8) |
837 (CFG_SPI_ENABLE
<< 14);
838 dev_dbg(&drv_data
->pdev
->dev
,
839 "IO write: cr is 0x%x\n", cr
);
844 drv_data
->write(drv_data
);
846 if (drv_data
->tx
!= drv_data
->tx_end
)
848 } else if (drv_data
->rx
!= NULL
) {
849 /* read only half duplex */
850 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
));
851 cr
|= CFG_SPI_READ
| (width
<< 8) |
852 (CFG_SPI_ENABLE
<< 14);
853 dev_dbg(&drv_data
->pdev
->dev
,
854 "IO read: cr is 0x%x\n", cr
);
859 drv_data
->read(drv_data
);
860 if (drv_data
->rx
!= drv_data
->rx_end
)
864 if (!tranf_success
) {
865 dev_dbg(&drv_data
->pdev
->dev
,
866 "IO write error!\n");
867 message
->state
= ERROR_STATE
;
869 /* Update total byte transfered */
870 message
->actual_length
+= drv_data
->len
;
872 /* Move to next transfer of this msg */
873 message
->state
= next_transfer(drv_data
);
876 /* Schedule next transfer tasklet */
877 tasklet_schedule(&drv_data
->pump_transfers
);
882 /* pop a msg from queue and kick off real transfer */
883 static void pump_messages(struct work_struct
*work
)
885 struct driver_data
*drv_data
= container_of(work
, struct driver_data
, pump_messages
);
888 /* Lock queue and check for queue work */
889 spin_lock_irqsave(&drv_data
->lock
, flags
);
890 if (list_empty(&drv_data
->queue
) || drv_data
->run
== QUEUE_STOPPED
) {
891 /* pumper kicked off but no work to do */
893 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
897 /* Make sure we are not already running a message */
898 if (drv_data
->cur_msg
) {
899 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
903 /* Extract head of queue */
904 drv_data
->cur_msg
= list_entry(drv_data
->queue
.next
,
905 struct spi_message
, queue
);
906 list_del_init(&drv_data
->cur_msg
->queue
);
908 /* Initial message state */
909 drv_data
->cur_msg
->state
= START_STATE
;
910 drv_data
->cur_transfer
= list_entry(drv_data
->cur_msg
->transfers
.next
,
911 struct spi_transfer
, transfer_list
);
913 /* Setup the SSP using the per chip configuration */
914 drv_data
->cur_chip
= spi_get_ctldata(drv_data
->cur_msg
->spi
);
915 restore_state(drv_data
);
916 dev_dbg(&drv_data
->pdev
->dev
,
917 "got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
918 drv_data
->cur_chip
->baud
, drv_data
->cur_chip
->flag
,
919 drv_data
->cur_chip
->ctl_reg
);
921 dev_dbg(&drv_data
->pdev
->dev
,
922 "the first transfer len is %d\n",
923 drv_data
->cur_transfer
->len
);
925 /* Mark as busy and launch transfers */
926 tasklet_schedule(&drv_data
->pump_transfers
);
929 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
933 * got a msg to transfer, queue it in drv_data->queue.
934 * And kick off message pumper
936 static int transfer(struct spi_device
*spi
, struct spi_message
*msg
)
938 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
941 spin_lock_irqsave(&drv_data
->lock
, flags
);
943 if (drv_data
->run
== QUEUE_STOPPED
) {
944 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
948 msg
->actual_length
= 0;
949 msg
->status
= -EINPROGRESS
;
950 msg
->state
= START_STATE
;
952 dev_dbg(&spi
->dev
, "adding an msg in transfer() \n");
953 list_add_tail(&msg
->queue
, &drv_data
->queue
);
955 if (drv_data
->run
== QUEUE_RUNNING
&& !drv_data
->busy
)
956 queue_work(drv_data
->workqueue
, &drv_data
->pump_messages
);
958 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
963 /* first setup for new devices */
964 static int setup(struct spi_device
*spi
)
966 struct bfin5xx_spi_chip
*chip_info
= NULL
;
967 struct chip_data
*chip
;
968 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
971 /* Abort device setup if requested features are not supported */
972 if (spi
->mode
& ~(SPI_CPOL
| SPI_CPHA
| SPI_LSB_FIRST
)) {
973 dev_err(&spi
->dev
, "requested mode not fully supported\n");
977 /* Zero (the default) here means 8 bits */
978 if (!spi
->bits_per_word
)
979 spi
->bits_per_word
= 8;
981 if (spi
->bits_per_word
!= 8 && spi
->bits_per_word
!= 16)
984 /* Only alloc (or use chip_info) on first setup */
985 chip
= spi_get_ctldata(spi
);
987 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
991 chip
->enable_dma
= 0;
992 chip_info
= spi
->controller_data
;
995 /* chip_info isn't always needed */
997 chip
->enable_dma
= chip_info
->enable_dma
!= 0
998 && drv_data
->master_info
->enable_dma
;
999 chip
->ctl_reg
= chip_info
->ctl_reg
;
1000 chip
->bits_per_word
= chip_info
->bits_per_word
;
1001 chip
->cs_change_per_word
= chip_info
->cs_change_per_word
;
1002 chip
->cs_chg_udelay
= chip_info
->cs_chg_udelay
;
1005 /* translate common spi framework into our register */
1006 if (spi
->mode
& SPI_CPOL
)
1007 chip
->ctl_reg
|= CPOL
;
1008 if (spi
->mode
& SPI_CPHA
)
1009 chip
->ctl_reg
|= CPHA
;
1010 if (spi
->mode
& SPI_LSB_FIRST
)
1011 chip
->ctl_reg
|= LSBF
;
1012 /* we dont support running in slave mode (yet?) */
1013 chip
->ctl_reg
|= MSTR
;
1016 * if any one SPI chip is registered and wants DMA, request the
1017 * DMA channel for it
1019 if (chip
->enable_dma
&& !dma_requested
) {
1020 /* register dma irq handler */
1021 if (request_dma(CH_SPI
, "BF53x_SPI_DMA") < 0) {
1023 "Unable to request BlackFin SPI DMA channel\n");
1026 if (set_dma_callback(CH_SPI
, (void *)dma_irq_handler
, drv_data
)
1028 dev_dbg(&spi
->dev
, "Unable to set dma callback\n");
1031 dma_disable_irq(CH_SPI
);
1036 * Notice: for blackfin, the speed_hz is the value of register
1037 * SPI_BAUD, not the real baudrate
1039 chip
->baud
= hz_to_spi_baud(spi
->max_speed_hz
);
1040 spi_flg
= ~(1 << (spi
->chip_select
));
1041 chip
->flag
= ((u16
) spi_flg
<< 8) | (1 << (spi
->chip_select
));
1042 chip
->chip_select_num
= spi
->chip_select
;
1044 switch (chip
->bits_per_word
) {
1047 chip
->width
= CFG_SPI_WORDSIZE8
;
1048 chip
->read
= chip
->cs_change_per_word
?
1049 u8_cs_chg_reader
: u8_reader
;
1050 chip
->write
= chip
->cs_change_per_word
?
1051 u8_cs_chg_writer
: u8_writer
;
1052 chip
->duplex
= chip
->cs_change_per_word
?
1053 u8_cs_chg_duplex
: u8_duplex
;
1058 chip
->width
= CFG_SPI_WORDSIZE16
;
1059 chip
->read
= chip
->cs_change_per_word
?
1060 u16_cs_chg_reader
: u16_reader
;
1061 chip
->write
= chip
->cs_change_per_word
?
1062 u16_cs_chg_writer
: u16_writer
;
1063 chip
->duplex
= chip
->cs_change_per_word
?
1064 u16_cs_chg_duplex
: u16_duplex
;
1068 dev_err(&spi
->dev
, "%d bits_per_word is not supported\n",
1069 chip
->bits_per_word
);
1074 dev_dbg(&spi
->dev
, "setup spi chip %s, width is %d, dma is %d,",
1075 spi
->modalias
, chip
->width
, chip
->enable_dma
);
1076 dev_dbg(&spi
->dev
, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
1077 chip
->ctl_reg
, chip
->flag
);
1079 spi_set_ctldata(spi
, chip
);
1085 * callback for spi framework.
1086 * clean driver specific data
1088 static void cleanup(struct spi_device
*spi
)
1090 struct chip_data
*chip
= spi_get_ctldata(spi
);
1095 static inline int init_queue(struct driver_data
*drv_data
)
1097 INIT_LIST_HEAD(&drv_data
->queue
);
1098 spin_lock_init(&drv_data
->lock
);
1100 drv_data
->run
= QUEUE_STOPPED
;
1103 /* init transfer tasklet */
1104 tasklet_init(&drv_data
->pump_transfers
,
1105 pump_transfers
, (unsigned long)drv_data
);
1107 /* init messages workqueue */
1108 INIT_WORK(&drv_data
->pump_messages
, pump_messages
);
1109 drv_data
->workqueue
=
1110 create_singlethread_workqueue(drv_data
->master
->cdev
.dev
->bus_id
);
1111 if (drv_data
->workqueue
== NULL
)
1117 static inline int start_queue(struct driver_data
*drv_data
)
1119 unsigned long flags
;
1121 spin_lock_irqsave(&drv_data
->lock
, flags
);
1123 if (drv_data
->run
== QUEUE_RUNNING
|| drv_data
->busy
) {
1124 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1128 drv_data
->run
= QUEUE_RUNNING
;
1129 drv_data
->cur_msg
= NULL
;
1130 drv_data
->cur_transfer
= NULL
;
1131 drv_data
->cur_chip
= NULL
;
1132 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1134 queue_work(drv_data
->workqueue
, &drv_data
->pump_messages
);
1139 static inline int stop_queue(struct driver_data
*drv_data
)
1141 unsigned long flags
;
1142 unsigned limit
= 500;
1145 spin_lock_irqsave(&drv_data
->lock
, flags
);
1148 * This is a bit lame, but is optimized for the common execution path.
1149 * A wait_queue on the drv_data->busy could be used, but then the common
1150 * execution path (pump_messages) would be required to call wake_up or
1151 * friends on every SPI message. Do this instead
1153 drv_data
->run
= QUEUE_STOPPED
;
1154 while (!list_empty(&drv_data
->queue
) && drv_data
->busy
&& limit
--) {
1155 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1157 spin_lock_irqsave(&drv_data
->lock
, flags
);
1160 if (!list_empty(&drv_data
->queue
) || drv_data
->busy
)
1163 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1168 static inline int destroy_queue(struct driver_data
*drv_data
)
1172 status
= stop_queue(drv_data
);
1176 destroy_workqueue(drv_data
->workqueue
);
1181 static int __init
bfin5xx_spi_probe(struct platform_device
*pdev
)
1183 struct device
*dev
= &pdev
->dev
;
1184 struct bfin5xx_spi_master
*platform_info
;
1185 struct spi_master
*master
;
1186 struct driver_data
*drv_data
= 0;
1189 platform_info
= dev
->platform_data
;
1191 /* Allocate master with space for drv_data */
1192 master
= spi_alloc_master(dev
, sizeof(struct driver_data
) + 16);
1194 dev_err(&pdev
->dev
, "can not alloc spi_master\n");
1197 drv_data
= spi_master_get_devdata(master
);
1198 drv_data
->master
= master
;
1199 drv_data
->master_info
= platform_info
;
1200 drv_data
->pdev
= pdev
;
1202 master
->bus_num
= pdev
->id
;
1203 master
->num_chipselect
= platform_info
->num_chipselect
;
1204 master
->cleanup
= cleanup
;
1205 master
->setup
= setup
;
1206 master
->transfer
= transfer
;
1208 /* Initial and start queue */
1209 status
= init_queue(drv_data
);
1211 dev_err(&pdev
->dev
, "problem initializing queue\n");
1212 goto out_error_queue_alloc
;
1214 status
= start_queue(drv_data
);
1216 dev_err(&pdev
->dev
, "problem starting queue\n");
1217 goto out_error_queue_alloc
;
1220 /* Register with the SPI framework */
1221 platform_set_drvdata(pdev
, drv_data
);
1222 status
= spi_register_master(master
);
1224 dev_err(&pdev
->dev
, "problem registering spi master\n");
1225 goto out_error_queue_alloc
;
1227 dev_dbg(&pdev
->dev
, "controller probe successfully\n");
1230 out_error_queue_alloc
:
1231 destroy_queue(drv_data
);
1232 spi_master_put(master
);
1236 /* stop hardware and remove the driver */
1237 static int __devexit
bfin5xx_spi_remove(struct platform_device
*pdev
)
1239 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1245 /* Remove the queue */
1246 status
= destroy_queue(drv_data
);
1250 /* Disable the SSP at the peripheral and SOC level */
1251 bfin_spi_disable(drv_data
);
1254 if (drv_data
->master_info
->enable_dma
) {
1255 if (dma_channel_active(CH_SPI
))
1259 /* Disconnect from the SPI framework */
1260 spi_unregister_master(drv_data
->master
);
1262 /* Prevent double remove */
1263 platform_set_drvdata(pdev
, NULL
);
1269 static int bfin5xx_spi_suspend(struct platform_device
*pdev
, pm_message_t state
)
1271 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1274 status
= stop_queue(drv_data
);
1279 bfin_spi_disable(drv_data
);
1284 static int bfin5xx_spi_resume(struct platform_device
*pdev
)
1286 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1289 /* Enable the SPI interface */
1290 bfin_spi_enable(drv_data
);
1292 /* Start the queue running */
1293 status
= start_queue(drv_data
);
1295 dev_err(&pdev
->dev
, "problem starting queue (%d)\n", status
);
1302 #define bfin5xx_spi_suspend NULL
1303 #define bfin5xx_spi_resume NULL
1304 #endif /* CONFIG_PM */
1306 MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */
1307 static struct platform_driver bfin5xx_spi_driver
= {
1309 .name
= "bfin-spi-master",
1310 .owner
= THIS_MODULE
,
1312 .suspend
= bfin5xx_spi_suspend
,
1313 .resume
= bfin5xx_spi_resume
,
1314 .remove
= __devexit_p(bfin5xx_spi_remove
),
1317 static int __init
bfin5xx_spi_init(void)
1319 return platform_driver_probe(&bfin5xx_spi_driver
, bfin5xx_spi_probe
);
1321 module_init(bfin5xx_spi_init
);
1323 static void __exit
bfin5xx_spi_exit(void)
1325 platform_driver_unregister(&bfin5xx_spi_driver
);
1327 module_exit(bfin5xx_spi_exit
);