4 * Copyright (C) 2012 Renesas Solutions Corp.
7 * Copyright (C) 2011 Renesas Solutions Corp.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/list.h>
29 #include <linux/workqueue.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
33 #include <linux/clk.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/sh_dma.h>
37 #include <linux/spi/spi.h>
38 #include <linux/spi/rspi.h>
40 #define RSPI_SPCR 0x00
41 #define RSPI_SSLP 0x01
42 #define RSPI_SPPCR 0x02
43 #define RSPI_SPSR 0x03
44 #define RSPI_SPDR 0x04
45 #define RSPI_SPSCR 0x08
46 #define RSPI_SPSSR 0x09
47 #define RSPI_SPBR 0x0a
48 #define RSPI_SPDCR 0x0b
49 #define RSPI_SPCKD 0x0c
50 #define RSPI_SSLND 0x0d
51 #define RSPI_SPND 0x0e
52 #define RSPI_SPCR2 0x0f
53 #define RSPI_SPCMD0 0x10
54 #define RSPI_SPCMD1 0x12
55 #define RSPI_SPCMD2 0x14
56 #define RSPI_SPCMD3 0x16
57 #define RSPI_SPCMD4 0x18
58 #define RSPI_SPCMD5 0x1a
59 #define RSPI_SPCMD6 0x1c
60 #define RSPI_SPCMD7 0x1e
63 #define SPCR_SPRIE 0x80
65 #define SPCR_SPTIE 0x20
66 #define SPCR_SPEIE 0x10
67 #define SPCR_MSTR 0x08
68 #define SPCR_MODFEN 0x04
69 #define SPCR_TXMD 0x02
70 #define SPCR_SPMS 0x01
73 #define SSLP_SSL1P 0x02
74 #define SSLP_SSL0P 0x01
77 #define SPPCR_MOIFE 0x20
78 #define SPPCR_MOIFV 0x10
79 #define SPPCR_SPOM 0x04
80 #define SPPCR_SPLP2 0x02
81 #define SPPCR_SPLP 0x01
84 #define SPSR_SPRF 0x80
85 #define SPSR_SPTEF 0x20
86 #define SPSR_PERF 0x08
87 #define SPSR_MODF 0x04
88 #define SPSR_IDLNF 0x02
89 #define SPSR_OVRF 0x01
92 #define SPSCR_SPSLN_MASK 0x07
95 #define SPSSR_SPECM_MASK 0x70
96 #define SPSSR_SPCP_MASK 0x07
99 #define SPDCR_SPLW 0x20
100 #define SPDCR_SPRDTD 0x10
101 #define SPDCR_SLSEL1 0x08
102 #define SPDCR_SLSEL0 0x04
103 #define SPDCR_SLSEL_MASK 0x0c
104 #define SPDCR_SPFC1 0x02
105 #define SPDCR_SPFC0 0x01
108 #define SPCKD_SCKDL_MASK 0x07
111 #define SSLND_SLNDL_MASK 0x07
114 #define SPND_SPNDL_MASK 0x07
117 #define SPCR2_PTE 0x08
118 #define SPCR2_SPIE 0x04
119 #define SPCR2_SPOE 0x02
120 #define SPCR2_SPPE 0x01
123 #define SPCMD_SCKDEN 0x8000
124 #define SPCMD_SLNDEN 0x4000
125 #define SPCMD_SPNDEN 0x2000
126 #define SPCMD_LSBF 0x1000
127 #define SPCMD_SPB_MASK 0x0f00
128 #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
129 #define SPCMD_SPB_20BIT 0x0000
130 #define SPCMD_SPB_24BIT 0x0100
131 #define SPCMD_SPB_32BIT 0x0200
132 #define SPCMD_SSLKP 0x0080
133 #define SPCMD_SSLA_MASK 0x0030
134 #define SPCMD_BRDV_MASK 0x000c
135 #define SPCMD_CPOL 0x0002
136 #define SPCMD_CPHA 0x0001
141 struct spi_master
*master
;
142 struct list_head queue
;
143 struct work_struct ws
;
144 wait_queue_head_t wait
;
150 struct dma_chan
*chan_tx
;
151 struct dma_chan
*chan_rx
;
154 unsigned dma_width_16bit
:1;
155 unsigned dma_callbacked
:1;
158 static void rspi_write8(struct rspi_data
*rspi
, u8 data
, u16 offset
)
160 iowrite8(data
, rspi
->addr
+ offset
);
163 static void rspi_write16(struct rspi_data
*rspi
, u16 data
, u16 offset
)
165 iowrite16(data
, rspi
->addr
+ offset
);
168 static u8
rspi_read8(struct rspi_data
*rspi
, u16 offset
)
170 return ioread8(rspi
->addr
+ offset
);
173 static u16
rspi_read16(struct rspi_data
*rspi
, u16 offset
)
175 return ioread16(rspi
->addr
+ offset
);
178 static unsigned char rspi_calc_spbr(struct rspi_data
*rspi
)
183 tmp
= clk_get_rate(rspi
->clk
) / (2 * rspi
->max_speed_hz
) - 1;
184 spbr
= clamp(tmp
, 0, 255);
189 static void rspi_enable_irq(struct rspi_data
*rspi
, u8 enable
)
191 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) | enable
, RSPI_SPCR
);
194 static void rspi_disable_irq(struct rspi_data
*rspi
, u8 disable
)
196 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) & ~disable
, RSPI_SPCR
);
199 static int rspi_wait_for_interrupt(struct rspi_data
*rspi
, u8 wait_mask
,
204 rspi
->spsr
= rspi_read8(rspi
, RSPI_SPSR
);
205 rspi_enable_irq(rspi
, enable_bit
);
206 ret
= wait_event_timeout(rspi
->wait
, rspi
->spsr
& wait_mask
, HZ
);
207 if (ret
== 0 && !(rspi
->spsr
& wait_mask
))
213 static void rspi_assert_ssl(struct rspi_data
*rspi
)
215 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) | SPCR_SPE
, RSPI_SPCR
);
218 static void rspi_negate_ssl(struct rspi_data
*rspi
)
220 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) & ~SPCR_SPE
, RSPI_SPCR
);
223 static int rspi_set_config_register(struct rspi_data
*rspi
, int access_size
)
225 /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
226 rspi_write8(rspi
, 0x00, RSPI_SPPCR
);
228 /* Sets transfer bit rate */
229 rspi_write8(rspi
, rspi_calc_spbr(rspi
), RSPI_SPBR
);
231 /* Sets number of frames to be used: 1 frame */
232 rspi_write8(rspi
, 0x00, RSPI_SPDCR
);
234 /* Sets RSPCK, SSL, next-access delay value */
235 rspi_write8(rspi
, 0x00, RSPI_SPCKD
);
236 rspi_write8(rspi
, 0x00, RSPI_SSLND
);
237 rspi_write8(rspi
, 0x00, RSPI_SPND
);
239 /* Sets parity, interrupt mask */
240 rspi_write8(rspi
, 0x00, RSPI_SPCR2
);
243 rspi_write16(rspi
, SPCMD_SPB_8_TO_16(access_size
) | SPCMD_SSLKP
,
247 rspi_write8(rspi
, SPCR_MSTR
, RSPI_SPCR
);
252 static int rspi_send_pio(struct rspi_data
*rspi
, struct spi_message
*mesg
,
253 struct spi_transfer
*t
)
258 data
= (u8
*)t
->tx_buf
;
260 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) | SPCR_TXMD
,
263 if (rspi_wait_for_interrupt(rspi
, SPSR_SPTEF
, SPCR_SPTIE
) < 0) {
264 dev_err(&rspi
->master
->dev
,
265 "%s: tx empty timeout\n", __func__
);
269 rspi_write16(rspi
, *data
, RSPI_SPDR
);
274 /* Waiting for the last transmition */
275 rspi_wait_for_interrupt(rspi
, SPSR_SPTEF
, SPCR_SPTIE
);
280 static void rspi_dma_complete(void *arg
)
282 struct rspi_data
*rspi
= arg
;
284 rspi
->dma_callbacked
= 1;
285 wake_up_interruptible(&rspi
->wait
);
288 static int rspi_dma_map_sg(struct scatterlist
*sg
, void *buf
, unsigned len
,
289 struct dma_chan
*chan
,
290 enum dma_transfer_direction dir
)
292 sg_init_table(sg
, 1);
293 sg_set_buf(sg
, buf
, len
);
294 sg_dma_len(sg
) = len
;
295 return dma_map_sg(chan
->device
->dev
, sg
, 1, dir
);
298 static void rspi_dma_unmap_sg(struct scatterlist
*sg
, struct dma_chan
*chan
,
299 enum dma_transfer_direction dir
)
301 dma_unmap_sg(chan
->device
->dev
, sg
, 1, dir
);
304 static void rspi_memory_to_8bit(void *buf
, const void *data
, unsigned len
)
307 const u8
*src
= data
;
310 *dst
++ = (u16
)(*src
++);
315 static void rspi_memory_from_8bit(void *buf
, const void *data
, unsigned len
)
318 const u16
*src
= data
;
326 static int rspi_send_dma(struct rspi_data
*rspi
, struct spi_transfer
*t
)
328 struct scatterlist sg
;
330 struct dma_async_tx_descriptor
*desc
;
334 if (rspi
->dma_width_16bit
) {
336 * If DMAC bus width is 16-bit, the driver allocates a dummy
337 * buffer. And, the driver converts original data into the
338 * DMAC data as the following format:
339 * original data: 1st byte, 2nd byte ...
340 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
343 buf
= kmalloc(len
, GFP_KERNEL
);
346 rspi_memory_to_8bit(buf
, t
->tx_buf
, t
->len
);
349 buf
= (void *)t
->tx_buf
;
352 if (!rspi_dma_map_sg(&sg
, buf
, len
, rspi
->chan_tx
, DMA_TO_DEVICE
)) {
356 desc
= dmaengine_prep_slave_sg(rspi
->chan_tx
, &sg
, 1, DMA_TO_DEVICE
,
357 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
364 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
365 * called. So, this driver disables the IRQ while DMA transfer.
367 disable_irq(rspi
->irq
);
369 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) | SPCR_TXMD
, RSPI_SPCR
);
370 rspi_enable_irq(rspi
, SPCR_SPTIE
);
371 rspi
->dma_callbacked
= 0;
373 desc
->callback
= rspi_dma_complete
;
374 desc
->callback_param
= rspi
;
375 dmaengine_submit(desc
);
376 dma_async_issue_pending(rspi
->chan_tx
);
378 ret
= wait_event_interruptible_timeout(rspi
->wait
,
379 rspi
->dma_callbacked
, HZ
);
380 if (ret
> 0 && rspi
->dma_callbacked
)
384 rspi_disable_irq(rspi
, SPCR_SPTIE
);
386 enable_irq(rspi
->irq
);
389 rspi_dma_unmap_sg(&sg
, rspi
->chan_tx
, DMA_TO_DEVICE
);
391 if (rspi
->dma_width_16bit
)
397 static void rspi_receive_init(struct rspi_data
*rspi
)
401 spsr
= rspi_read8(rspi
, RSPI_SPSR
);
402 if (spsr
& SPSR_SPRF
)
403 rspi_read16(rspi
, RSPI_SPDR
); /* dummy read */
404 if (spsr
& SPSR_OVRF
)
405 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPSR
) & ~SPSR_OVRF
,
409 static int rspi_receive_pio(struct rspi_data
*rspi
, struct spi_message
*mesg
,
410 struct spi_transfer
*t
)
415 rspi_receive_init(rspi
);
417 data
= (u8
*)t
->rx_buf
;
419 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) & ~SPCR_TXMD
,
422 if (rspi_wait_for_interrupt(rspi
, SPSR_SPTEF
, SPCR_SPTIE
) < 0) {
423 dev_err(&rspi
->master
->dev
,
424 "%s: tx empty timeout\n", __func__
);
427 /* dummy write for generate clock */
428 rspi_write16(rspi
, 0x00, RSPI_SPDR
);
430 if (rspi_wait_for_interrupt(rspi
, SPSR_SPRF
, SPCR_SPRIE
) < 0) {
431 dev_err(&rspi
->master
->dev
,
432 "%s: receive timeout\n", __func__
);
435 /* SPDR allows 16 or 32-bit access only */
436 *data
= (u8
)rspi_read16(rspi
, RSPI_SPDR
);
445 static int rspi_receive_dma(struct rspi_data
*rspi
, struct spi_transfer
*t
)
447 struct scatterlist sg
, sg_dummy
;
448 void *dummy
= NULL
, *rx_buf
= NULL
;
449 struct dma_async_tx_descriptor
*desc
, *desc_dummy
;
453 if (rspi
->dma_width_16bit
) {
455 * If DMAC bus width is 16-bit, the driver allocates a dummy
456 * buffer. And, finally the driver converts the DMAC data into
457 * actual data as the following format:
458 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
459 * actual data: 1st byte, 2nd byte ...
462 rx_buf
= kmalloc(len
, GFP_KERNEL
);
470 /* prepare dummy transfer to generate SPI clocks */
471 dummy
= kzalloc(len
, GFP_KERNEL
);
476 if (!rspi_dma_map_sg(&sg_dummy
, dummy
, len
, rspi
->chan_tx
,
481 desc_dummy
= dmaengine_prep_slave_sg(rspi
->chan_tx
, &sg_dummy
, 1,
482 DMA_TO_DEVICE
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
485 goto end_dummy_mapped
;
488 /* prepare receive transfer */
489 if (!rspi_dma_map_sg(&sg
, rx_buf
, len
, rspi
->chan_rx
,
492 goto end_dummy_mapped
;
495 desc
= dmaengine_prep_slave_sg(rspi
->chan_rx
, &sg
, 1, DMA_FROM_DEVICE
,
496 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
502 rspi_receive_init(rspi
);
505 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
506 * called. So, this driver disables the IRQ while DMA transfer.
508 disable_irq(rspi
->irq
);
510 rspi_write8(rspi
, rspi_read8(rspi
, RSPI_SPCR
) & ~SPCR_TXMD
, RSPI_SPCR
);
511 rspi_enable_irq(rspi
, SPCR_SPTIE
| SPCR_SPRIE
);
512 rspi
->dma_callbacked
= 0;
514 desc
->callback
= rspi_dma_complete
;
515 desc
->callback_param
= rspi
;
516 dmaengine_submit(desc
);
517 dma_async_issue_pending(rspi
->chan_rx
);
519 desc_dummy
->callback
= NULL
; /* No callback */
520 dmaengine_submit(desc_dummy
);
521 dma_async_issue_pending(rspi
->chan_tx
);
523 ret
= wait_event_interruptible_timeout(rspi
->wait
,
524 rspi
->dma_callbacked
, HZ
);
525 if (ret
> 0 && rspi
->dma_callbacked
)
529 rspi_disable_irq(rspi
, SPCR_SPTIE
| SPCR_SPRIE
);
531 enable_irq(rspi
->irq
);
534 rspi_dma_unmap_sg(&sg
, rspi
->chan_rx
, DMA_FROM_DEVICE
);
536 rspi_dma_unmap_sg(&sg_dummy
, rspi
->chan_tx
, DMA_TO_DEVICE
);
538 if (rspi
->dma_width_16bit
) {
540 rspi_memory_from_8bit(t
->rx_buf
, rx_buf
, t
->len
);
548 static int rspi_is_dma(struct rspi_data
*rspi
, struct spi_transfer
*t
)
550 if (t
->tx_buf
&& rspi
->chan_tx
)
552 /* If the module receives data by DMAC, it also needs TX DMAC */
553 if (t
->rx_buf
&& rspi
->chan_tx
&& rspi
->chan_rx
)
559 static void rspi_work(struct work_struct
*work
)
561 struct rspi_data
*rspi
= container_of(work
, struct rspi_data
, ws
);
562 struct spi_message
*mesg
;
563 struct spi_transfer
*t
;
567 spin_lock_irqsave(&rspi
->lock
, flags
);
568 while (!list_empty(&rspi
->queue
)) {
569 mesg
= list_entry(rspi
->queue
.next
, struct spi_message
, queue
);
570 list_del_init(&mesg
->queue
);
571 spin_unlock_irqrestore(&rspi
->lock
, flags
);
573 rspi_assert_ssl(rspi
);
575 list_for_each_entry(t
, &mesg
->transfers
, transfer_list
) {
577 if (rspi_is_dma(rspi
, t
))
578 ret
= rspi_send_dma(rspi
, t
);
580 ret
= rspi_send_pio(rspi
, mesg
, t
);
585 if (rspi_is_dma(rspi
, t
))
586 ret
= rspi_receive_dma(rspi
, t
);
588 ret
= rspi_receive_pio(rspi
, mesg
, t
);
592 mesg
->actual_length
+= t
->len
;
594 rspi_negate_ssl(rspi
);
597 mesg
->complete(mesg
->context
);
599 spin_lock_irqsave(&rspi
->lock
, flags
);
606 mesg
->complete(mesg
->context
);
609 static int rspi_setup(struct spi_device
*spi
)
611 struct rspi_data
*rspi
= spi_master_get_devdata(spi
->master
);
613 if (!spi
->bits_per_word
)
614 spi
->bits_per_word
= 8;
615 rspi
->max_speed_hz
= spi
->max_speed_hz
;
617 rspi_set_config_register(rspi
, 8);
622 static int rspi_transfer(struct spi_device
*spi
, struct spi_message
*mesg
)
624 struct rspi_data
*rspi
= spi_master_get_devdata(spi
->master
);
627 mesg
->actual_length
= 0;
628 mesg
->status
= -EINPROGRESS
;
630 spin_lock_irqsave(&rspi
->lock
, flags
);
631 list_add_tail(&mesg
->queue
, &rspi
->queue
);
632 schedule_work(&rspi
->ws
);
633 spin_unlock_irqrestore(&rspi
->lock
, flags
);
638 static void rspi_cleanup(struct spi_device
*spi
)
642 static irqreturn_t
rspi_irq(int irq
, void *_sr
)
644 struct rspi_data
*rspi
= (struct rspi_data
*)_sr
;
646 irqreturn_t ret
= IRQ_NONE
;
647 unsigned char disable_irq
= 0;
649 rspi
->spsr
= spsr
= rspi_read8(rspi
, RSPI_SPSR
);
650 if (spsr
& SPSR_SPRF
)
651 disable_irq
|= SPCR_SPRIE
;
652 if (spsr
& SPSR_SPTEF
)
653 disable_irq
|= SPCR_SPTIE
;
657 rspi_disable_irq(rspi
, disable_irq
);
658 wake_up(&rspi
->wait
);
664 static int rspi_request_dma(struct rspi_data
*rspi
,
665 struct platform_device
*pdev
)
667 struct rspi_plat_data
*rspi_pd
= pdev
->dev
.platform_data
;
669 struct dma_slave_config cfg
;
673 return 0; /* The driver assumes no error. */
675 rspi
->dma_width_16bit
= rspi_pd
->dma_width_16bit
;
677 /* If the module receives data by DMAC, it also needs TX DMAC */
678 if (rspi_pd
->dma_rx_id
&& rspi_pd
->dma_tx_id
) {
680 dma_cap_set(DMA_SLAVE
, mask
);
681 rspi
->chan_rx
= dma_request_channel(mask
, shdma_chan_filter
,
682 (void *)rspi_pd
->dma_rx_id
);
684 cfg
.slave_id
= rspi_pd
->dma_rx_id
;
685 cfg
.direction
= DMA_DEV_TO_MEM
;
686 ret
= dmaengine_slave_config(rspi
->chan_rx
, &cfg
);
688 dev_info(&pdev
->dev
, "Use DMA when rx.\n");
693 if (rspi_pd
->dma_tx_id
) {
695 dma_cap_set(DMA_SLAVE
, mask
);
696 rspi
->chan_tx
= dma_request_channel(mask
, shdma_chan_filter
,
697 (void *)rspi_pd
->dma_tx_id
);
699 cfg
.slave_id
= rspi_pd
->dma_tx_id
;
700 cfg
.direction
= DMA_MEM_TO_DEV
;
701 ret
= dmaengine_slave_config(rspi
->chan_tx
, &cfg
);
703 dev_info(&pdev
->dev
, "Use DMA when tx\n");
712 static void rspi_release_dma(struct rspi_data
*rspi
)
715 dma_release_channel(rspi
->chan_tx
);
717 dma_release_channel(rspi
->chan_rx
);
720 static int rspi_remove(struct platform_device
*pdev
)
722 struct rspi_data
*rspi
= dev_get_drvdata(&pdev
->dev
);
724 spi_unregister_master(rspi
->master
);
725 rspi_release_dma(rspi
);
726 free_irq(platform_get_irq(pdev
, 0), rspi
);
729 spi_master_put(rspi
->master
);
734 static int rspi_probe(struct platform_device
*pdev
)
736 struct resource
*res
;
737 struct spi_master
*master
;
738 struct rspi_data
*rspi
;
743 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
744 if (unlikely(res
== NULL
)) {
745 dev_err(&pdev
->dev
, "invalid resource\n");
749 irq
= platform_get_irq(pdev
, 0);
751 dev_err(&pdev
->dev
, "platform_get_irq error\n");
755 master
= spi_alloc_master(&pdev
->dev
, sizeof(struct rspi_data
));
756 if (master
== NULL
) {
757 dev_err(&pdev
->dev
, "spi_alloc_master error.\n");
761 rspi
= spi_master_get_devdata(master
);
762 dev_set_drvdata(&pdev
->dev
, rspi
);
764 rspi
->master
= master
;
765 rspi
->addr
= ioremap(res
->start
, resource_size(res
));
766 if (rspi
->addr
== NULL
) {
767 dev_err(&pdev
->dev
, "ioremap error.\n");
772 snprintf(clk_name
, sizeof(clk_name
), "rspi%d", pdev
->id
);
773 rspi
->clk
= clk_get(&pdev
->dev
, clk_name
);
774 if (IS_ERR(rspi
->clk
)) {
775 dev_err(&pdev
->dev
, "cannot get clock\n");
776 ret
= PTR_ERR(rspi
->clk
);
779 clk_enable(rspi
->clk
);
781 INIT_LIST_HEAD(&rspi
->queue
);
782 spin_lock_init(&rspi
->lock
);
783 INIT_WORK(&rspi
->ws
, rspi_work
);
784 init_waitqueue_head(&rspi
->wait
);
786 master
->num_chipselect
= 2;
787 master
->bus_num
= pdev
->id
;
788 master
->setup
= rspi_setup
;
789 master
->transfer
= rspi_transfer
;
790 master
->cleanup
= rspi_cleanup
;
792 ret
= request_irq(irq
, rspi_irq
, 0, dev_name(&pdev
->dev
), rspi
);
794 dev_err(&pdev
->dev
, "request_irq error\n");
799 ret
= rspi_request_dma(rspi
, pdev
);
801 dev_err(&pdev
->dev
, "rspi_request_dma failed.\n");
805 ret
= spi_register_master(master
);
807 dev_err(&pdev
->dev
, "spi_register_master error.\n");
811 dev_info(&pdev
->dev
, "probed\n");
816 rspi_release_dma(rspi
);
823 spi_master_put(master
);
828 static struct platform_driver rspi_driver
= {
830 .remove
= rspi_remove
,
833 .owner
= THIS_MODULE
,
836 module_platform_driver(rspi_driver
);
838 MODULE_DESCRIPTION("Renesas RSPI bus driver");
839 MODULE_LICENSE("GPL v2");
840 MODULE_AUTHOR("Yoshihiro Shimoda");
841 MODULE_ALIAS("platform:rspi");