RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / spi / spi_mpc8xxx.c
blob79799d58d7fe4bfecf51e0883f2b134cd0468fbd
1 /*
2 * MPC8xxx SPI controller driver.
4 * Maintainer: Kumar Gala
6 * Copyright (C) 2006 Polycom, Inc.
8 * CPM SPI and QE buffer descriptors mode support:
9 * Copyright (c) 2009 MontaVista Software, Inc.
10 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/bug.h>
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/completion.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/irq.h>
29 #include <linux/device.h>
30 #include <linux/spi/spi.h>
31 #include <linux/spi/spi_bitbang.h>
32 #include <linux/platform_device.h>
33 #include <linux/fsl_devices.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mm.h>
36 #include <linux/mutex.h>
37 #include <linux/of.h>
38 #include <linux/of_platform.h>
39 #include <linux/gpio.h>
40 #include <linux/of_gpio.h>
41 #include <linux/slab.h>
43 #include <sysdev/fsl_soc.h>
44 #include <asm/cpm.h>
45 #include <asm/qe.h>
46 #include <asm/irq.h>
48 /* CPM1 and CPM2 are mutually exclusive. */
49 #ifdef CONFIG_CPM1
50 #include <asm/cpm1.h>
51 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
52 #else
53 #include <asm/cpm2.h>
54 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
55 #endif
57 /* SPI Controller registers */
58 struct mpc8xxx_spi_reg {
59 u8 res1[0x20];
60 __be32 mode;
61 __be32 event;
62 __be32 mask;
63 __be32 command;
64 __be32 transmit;
65 __be32 receive;
68 /* SPI Controller mode register definitions */
69 #define SPMODE_LOOP (1 << 30)
70 #define SPMODE_CI_INACTIVEHIGH (1 << 29)
71 #define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
72 #define SPMODE_DIV16 (1 << 27)
73 #define SPMODE_REV (1 << 26)
74 #define SPMODE_MS (1 << 25)
75 #define SPMODE_ENABLE (1 << 24)
76 #define SPMODE_LEN(x) ((x) << 20)
77 #define SPMODE_PM(x) ((x) << 16)
78 #define SPMODE_OP (1 << 14)
79 #define SPMODE_CG(x) ((x) << 7)
82 * Default for SPI Mode:
83 * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
85 #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
86 SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
88 /* SPIE register values */
89 #define SPIE_NE 0x00000200 /* Not empty */
90 #define SPIE_NF 0x00000100 /* Not full */
92 /* SPIM register values */
93 #define SPIM_NE 0x00000200 /* Not empty */
94 #define SPIM_NF 0x00000100 /* Not full */
96 #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
97 #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
99 /* SPCOM register values */
100 #define SPCOM_STR (1 << 23) /* Start transmit */
102 #define SPI_PRAM_SIZE 0x100
103 #define SPI_MRBLR ((unsigned int)PAGE_SIZE)
105 /* SPI Controller driver's private data. */
106 struct mpc8xxx_spi {
107 struct device *dev;
108 struct mpc8xxx_spi_reg __iomem *base;
110 /* rx & tx bufs from the spi_transfer */
111 const void *tx;
112 void *rx;
114 int subblock;
115 struct spi_pram __iomem *pram;
116 struct cpm_buf_desc __iomem *tx_bd;
117 struct cpm_buf_desc __iomem *rx_bd;
119 struct spi_transfer *xfer_in_progress;
121 /* dma addresses for CPM transfers */
122 dma_addr_t tx_dma;
123 dma_addr_t rx_dma;
124 bool map_tx_dma;
125 bool map_rx_dma;
127 dma_addr_t dma_dummy_tx;
128 dma_addr_t dma_dummy_rx;
130 /* functions to deal with different sized buffers */
131 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
132 u32(*get_tx) (struct mpc8xxx_spi *);
134 unsigned int count;
135 unsigned int irq;
137 unsigned nsecs; /* (clock cycle time)/2 */
139 u32 spibrg; /* SPIBRG input clock */
140 u32 rx_shift; /* RX data reg shift when in qe mode */
141 u32 tx_shift; /* TX data reg shift when in qe mode */
143 unsigned int flags;
145 struct workqueue_struct *workqueue;
146 struct work_struct work;
148 struct list_head queue;
149 spinlock_t lock;
151 struct completion done;
154 static void *mpc8xxx_dummy_rx;
155 static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
156 static int mpc8xxx_dummy_rx_refcnt;
158 struct spi_mpc8xxx_cs {
159 /* functions to deal with different sized buffers */
160 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
161 u32 (*get_tx) (struct mpc8xxx_spi *);
162 u32 rx_shift; /* RX data reg shift when in qe mode */
163 u32 tx_shift; /* TX data reg shift when in qe mode */
164 u32 hw_mode; /* Holds HW mode register settings */
167 static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
169 out_be32(reg, val);
172 static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
174 return in_be32(reg);
177 #define MPC83XX_SPI_RX_BUF(type) \
178 static \
179 void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
181 type *rx = mpc8xxx_spi->rx; \
182 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
183 mpc8xxx_spi->rx = rx; \
186 #define MPC83XX_SPI_TX_BUF(type) \
187 static \
188 u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
190 u32 data; \
191 const type *tx = mpc8xxx_spi->tx; \
192 if (!tx) \
193 return 0; \
194 data = *tx++ << mpc8xxx_spi->tx_shift; \
195 mpc8xxx_spi->tx = tx; \
196 return data; \
199 MPC83XX_SPI_RX_BUF(u8)
200 MPC83XX_SPI_RX_BUF(u16)
201 MPC83XX_SPI_RX_BUF(u32)
202 MPC83XX_SPI_TX_BUF(u8)
203 MPC83XX_SPI_TX_BUF(u16)
204 MPC83XX_SPI_TX_BUF(u32)
206 static void mpc8xxx_spi_change_mode(struct spi_device *spi)
208 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
209 struct spi_mpc8xxx_cs *cs = spi->controller_state;
210 __be32 __iomem *mode = &mspi->base->mode;
211 unsigned long flags;
213 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
214 return;
216 /* Turn off IRQs locally to minimize time that SPI is disabled. */
217 local_irq_save(flags);
219 /* Turn off SPI unit prior changing mode */
220 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
222 /* When in CPM mode, we need to reinit tx and rx. */
223 if (mspi->flags & SPI_CPM_MODE) {
224 if (mspi->flags & SPI_QE) {
225 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
226 QE_CR_PROTOCOL_UNSPECIFIED, 0);
227 } else {
228 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
229 if (mspi->flags & SPI_CPM1) {
230 out_be16(&mspi->pram->rbptr,
231 in_be16(&mspi->pram->rbase));
232 out_be16(&mspi->pram->tbptr,
233 in_be16(&mspi->pram->tbase));
237 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
238 local_irq_restore(flags);
241 static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
243 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
244 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
245 bool pol = spi->mode & SPI_CS_HIGH;
246 struct spi_mpc8xxx_cs *cs = spi->controller_state;
248 if (value == BITBANG_CS_INACTIVE) {
249 if (pdata->cs_control)
250 pdata->cs_control(spi, !pol);
253 if (value == BITBANG_CS_ACTIVE) {
254 mpc8xxx_spi->rx_shift = cs->rx_shift;
255 mpc8xxx_spi->tx_shift = cs->tx_shift;
256 mpc8xxx_spi->get_rx = cs->get_rx;
257 mpc8xxx_spi->get_tx = cs->get_tx;
259 mpc8xxx_spi_change_mode(spi);
261 if (pdata->cs_control)
262 pdata->cs_control(spi, pol);
266 static int
267 mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
268 struct spi_device *spi,
269 struct mpc8xxx_spi *mpc8xxx_spi,
270 int bits_per_word)
272 cs->rx_shift = 0;
273 cs->tx_shift = 0;
274 if (bits_per_word <= 8) {
275 cs->get_rx = mpc8xxx_spi_rx_buf_u8;
276 cs->get_tx = mpc8xxx_spi_tx_buf_u8;
277 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
278 cs->rx_shift = 16;
279 cs->tx_shift = 24;
281 } else if (bits_per_word <= 16) {
282 cs->get_rx = mpc8xxx_spi_rx_buf_u16;
283 cs->get_tx = mpc8xxx_spi_tx_buf_u16;
284 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
285 cs->rx_shift = 16;
286 cs->tx_shift = 16;
288 } else if (bits_per_word <= 32) {
289 cs->get_rx = mpc8xxx_spi_rx_buf_u32;
290 cs->get_tx = mpc8xxx_spi_tx_buf_u32;
291 } else
292 return -EINVAL;
294 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
295 spi->mode & SPI_LSB_FIRST) {
296 cs->tx_shift = 0;
297 if (bits_per_word <= 8)
298 cs->rx_shift = 8;
299 else
300 cs->rx_shift = 0;
302 mpc8xxx_spi->rx_shift = cs->rx_shift;
303 mpc8xxx_spi->tx_shift = cs->tx_shift;
304 mpc8xxx_spi->get_rx = cs->get_rx;
305 mpc8xxx_spi->get_tx = cs->get_tx;
307 return bits_per_word;
310 static int
311 mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
312 struct spi_device *spi,
313 int bits_per_word)
315 /* QE uses Little Endian for words > 8
316 * so transform all words > 8 into 8 bits
317 * Unfortnatly that doesn't work for LSB so
318 * reject these for now */
319 /* Note: 32 bits word, LSB works iff
320 * tfcr/rfcr is set to CPMFCR_GBL */
321 if (spi->mode & SPI_LSB_FIRST &&
322 bits_per_word > 8)
323 return -EINVAL;
324 if (bits_per_word > 8)
325 return 8; /* pretend its 8 bits */
326 return bits_per_word;
329 static
330 int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
332 struct mpc8xxx_spi *mpc8xxx_spi;
333 int bits_per_word;
334 u8 pm;
335 u32 hz;
336 struct spi_mpc8xxx_cs *cs = spi->controller_state;
338 mpc8xxx_spi = spi_master_get_devdata(spi->master);
340 if (t) {
341 bits_per_word = t->bits_per_word;
342 hz = t->speed_hz;
343 } else {
344 bits_per_word = 0;
345 hz = 0;
348 /* spi_transfer level calls that work per-word */
349 if (!bits_per_word)
350 bits_per_word = spi->bits_per_word;
352 /* Make sure its a bit width we support [4..16, 32] */
353 if ((bits_per_word < 4)
354 || ((bits_per_word > 16) && (bits_per_word != 32)))
355 return -EINVAL;
357 if (!hz)
358 hz = spi->max_speed_hz;
360 if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
361 bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
362 mpc8xxx_spi,
363 bits_per_word);
364 else if (mpc8xxx_spi->flags & SPI_QE)
365 bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
366 bits_per_word);
368 if (bits_per_word < 0)
369 return bits_per_word;
371 if (bits_per_word == 32)
372 bits_per_word = 0;
373 else
374 bits_per_word = bits_per_word - 1;
376 /* mask out bits we are going to set */
377 cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
378 | SPMODE_PM(0xF));
380 cs->hw_mode |= SPMODE_LEN(bits_per_word);
382 if ((mpc8xxx_spi->spibrg / hz) > 64) {
383 cs->hw_mode |= SPMODE_DIV16;
384 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
386 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
387 "Will use %d Hz instead.\n", dev_name(&spi->dev),
388 hz, mpc8xxx_spi->spibrg / 1024);
389 if (pm > 16)
390 pm = 16;
391 } else
392 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
393 if (pm)
394 pm--;
396 cs->hw_mode |= SPMODE_PM(pm);
398 mpc8xxx_spi_change_mode(spi);
399 return 0;
402 static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
404 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
405 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
406 unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
407 unsigned int xfer_ofs;
409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
411 if (mspi->rx_dma == mspi->dma_dummy_rx)
412 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
413 else
414 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
415 out_be16(&rx_bd->cbd_datlen, 0);
416 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
418 if (mspi->tx_dma == mspi->dma_dummy_tx)
419 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
420 else
421 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
422 out_be16(&tx_bd->cbd_datlen, xfer_len);
423 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
424 BD_SC_LAST);
426 /* start transfer */
427 mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
430 static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
431 struct spi_transfer *t, bool is_dma_mapped)
433 struct device *dev = mspi->dev;
435 if (is_dma_mapped) {
436 mspi->map_tx_dma = 0;
437 mspi->map_rx_dma = 0;
438 } else {
439 mspi->map_tx_dma = 1;
440 mspi->map_rx_dma = 1;
443 if (!t->tx_buf) {
444 mspi->tx_dma = mspi->dma_dummy_tx;
445 mspi->map_tx_dma = 0;
448 if (!t->rx_buf) {
449 mspi->rx_dma = mspi->dma_dummy_rx;
450 mspi->map_rx_dma = 0;
453 if (mspi->map_tx_dma) {
454 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
456 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
457 DMA_TO_DEVICE);
458 if (dma_mapping_error(dev, mspi->tx_dma)) {
459 dev_err(dev, "unable to map tx dma\n");
460 return -ENOMEM;
462 } else if (t->tx_buf) {
463 mspi->tx_dma = t->tx_dma;
466 if (mspi->map_rx_dma) {
467 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(dev, mspi->rx_dma)) {
470 dev_err(dev, "unable to map rx dma\n");
471 goto err_rx_dma;
473 } else if (t->rx_buf) {
474 mspi->rx_dma = t->rx_dma;
477 /* enable rx ints */
478 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
480 mspi->xfer_in_progress = t;
481 mspi->count = t->len;
483 /* start CPM transfers */
484 mpc8xxx_spi_cpm_bufs_start(mspi);
486 return 0;
488 err_rx_dma:
489 if (mspi->map_tx_dma)
490 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
491 return -ENOMEM;
494 static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
496 struct device *dev = mspi->dev;
497 struct spi_transfer *t = mspi->xfer_in_progress;
499 if (mspi->map_tx_dma)
500 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
501 if (mspi->map_rx_dma)
502 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
503 mspi->xfer_in_progress = NULL;
506 static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
507 struct spi_transfer *t, unsigned int len)
509 u32 word;
511 mspi->count = len;
513 /* enable rx ints */
514 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
516 /* transmit word */
517 word = mspi->get_tx(mspi);
518 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
520 return 0;
523 static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
524 bool is_dma_mapped)
526 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
527 unsigned int len = t->len;
528 u8 bits_per_word;
529 int ret;
531 bits_per_word = spi->bits_per_word;
532 if (t->bits_per_word)
533 bits_per_word = t->bits_per_word;
535 if (bits_per_word > 8) {
536 /* invalid length? */
537 if (len & 1)
538 return -EINVAL;
539 len /= 2;
541 if (bits_per_word > 16) {
542 /* invalid length? */
543 if (len & 1)
544 return -EINVAL;
545 len /= 2;
548 mpc8xxx_spi->tx = t->tx_buf;
549 mpc8xxx_spi->rx = t->rx_buf;
551 INIT_COMPLETION(mpc8xxx_spi->done);
553 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
554 ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
555 else
556 ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
557 if (ret)
558 return ret;
560 wait_for_completion(&mpc8xxx_spi->done);
562 /* disable rx ints */
563 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
565 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
566 mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
568 return mpc8xxx_spi->count;
571 static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
573 struct spi_device *spi = m->spi;
574 struct spi_transfer *t;
575 unsigned int cs_change;
576 const int nsecs = 50;
577 int status;
579 cs_change = 1;
580 status = 0;
581 list_for_each_entry(t, &m->transfers, transfer_list) {
582 if (t->bits_per_word || t->speed_hz) {
583 /* Don't allow changes if CS is active */
584 status = -EINVAL;
586 if (cs_change)
587 status = mpc8xxx_spi_setup_transfer(spi, t);
588 if (status < 0)
589 break;
592 if (cs_change) {
593 mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE);
594 ndelay(nsecs);
596 cs_change = t->cs_change;
597 if (t->len)
598 status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
599 if (status) {
600 status = -EMSGSIZE;
601 break;
603 m->actual_length += t->len;
605 if (t->delay_usecs)
606 udelay(t->delay_usecs);
608 if (cs_change) {
609 ndelay(nsecs);
610 mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
611 ndelay(nsecs);
615 m->status = status;
616 m->complete(m->context);
618 if (status || !cs_change) {
619 ndelay(nsecs);
620 mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
623 mpc8xxx_spi_setup_transfer(spi, NULL);
626 static void mpc8xxx_spi_work(struct work_struct *work)
628 struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
629 work);
631 spin_lock_irq(&mpc8xxx_spi->lock);
632 while (!list_empty(&mpc8xxx_spi->queue)) {
633 struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
634 struct spi_message, queue);
636 list_del_init(&m->queue);
637 spin_unlock_irq(&mpc8xxx_spi->lock);
639 mpc8xxx_spi_do_one_msg(m);
641 spin_lock_irq(&mpc8xxx_spi->lock);
643 spin_unlock_irq(&mpc8xxx_spi->lock);
646 static int mpc8xxx_spi_setup(struct spi_device *spi)
648 struct mpc8xxx_spi *mpc8xxx_spi;
649 int retval;
650 u32 hw_mode;
651 struct spi_mpc8xxx_cs *cs = spi->controller_state;
653 if (!spi->max_speed_hz)
654 return -EINVAL;
656 if (!cs) {
657 cs = kzalloc(sizeof *cs, GFP_KERNEL);
658 if (!cs)
659 return -ENOMEM;
660 spi->controller_state = cs;
662 mpc8xxx_spi = spi_master_get_devdata(spi->master);
664 hw_mode = cs->hw_mode; /* Save original settings */
665 cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
666 /* mask out bits we are going to set */
667 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
668 | SPMODE_REV | SPMODE_LOOP);
670 if (spi->mode & SPI_CPHA)
671 cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
672 if (spi->mode & SPI_CPOL)
673 cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
674 if (!(spi->mode & SPI_LSB_FIRST))
675 cs->hw_mode |= SPMODE_REV;
676 if (spi->mode & SPI_LOOP)
677 cs->hw_mode |= SPMODE_LOOP;
679 retval = mpc8xxx_spi_setup_transfer(spi, NULL);
680 if (retval < 0) {
681 cs->hw_mode = hw_mode; /* Restore settings */
682 return retval;
684 return 0;
687 static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
689 u16 len;
691 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
692 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
694 len = in_be16(&mspi->rx_bd->cbd_datlen);
695 if (len > mspi->count) {
696 WARN_ON(1);
697 len = mspi->count;
700 /* Clear the events */
701 mpc8xxx_spi_write_reg(&mspi->base->event, events);
703 mspi->count -= len;
704 if (mspi->count)
705 mpc8xxx_spi_cpm_bufs_start(mspi);
706 else
707 complete(&mspi->done);
710 static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
712 /* We need handle RX first */
713 if (events & SPIE_NE) {
714 u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
716 if (mspi->rx)
717 mspi->get_rx(rx_data, mspi);
720 if ((events & SPIE_NF) == 0)
721 /* spin until TX is done */
722 while (((events =
723 mpc8xxx_spi_read_reg(&mspi->base->event)) &
724 SPIE_NF) == 0)
725 cpu_relax();
727 /* Clear the events */
728 mpc8xxx_spi_write_reg(&mspi->base->event, events);
730 mspi->count -= 1;
731 if (mspi->count) {
732 u32 word = mspi->get_tx(mspi);
734 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
735 } else {
736 complete(&mspi->done);
740 static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
742 struct mpc8xxx_spi *mspi = context_data;
743 irqreturn_t ret = IRQ_NONE;
744 u32 events;
746 /* Get interrupt events(tx/rx) */
747 events = mpc8xxx_spi_read_reg(&mspi->base->event);
748 if (events)
749 ret = IRQ_HANDLED;
751 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
753 if (mspi->flags & SPI_CPM_MODE)
754 mpc8xxx_spi_cpm_irq(mspi, events);
755 else
756 mpc8xxx_spi_cpu_irq(mspi, events);
758 return ret;
761 static int mpc8xxx_spi_transfer(struct spi_device *spi,
762 struct spi_message *m)
764 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
765 unsigned long flags;
767 m->actual_length = 0;
768 m->status = -EINPROGRESS;
770 spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
771 list_add_tail(&m->queue, &mpc8xxx_spi->queue);
772 queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
773 spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
775 return 0;
779 static void mpc8xxx_spi_cleanup(struct spi_device *spi)
781 kfree(spi->controller_state);
784 static void *mpc8xxx_spi_alloc_dummy_rx(void)
786 mutex_lock(&mpc8xxx_dummy_rx_lock);
788 if (!mpc8xxx_dummy_rx)
789 mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
790 if (mpc8xxx_dummy_rx)
791 mpc8xxx_dummy_rx_refcnt++;
793 mutex_unlock(&mpc8xxx_dummy_rx_lock);
795 return mpc8xxx_dummy_rx;
798 static void mpc8xxx_spi_free_dummy_rx(void)
800 mutex_lock(&mpc8xxx_dummy_rx_lock);
802 switch (mpc8xxx_dummy_rx_refcnt) {
803 case 0:
804 WARN_ON(1);
805 break;
806 case 1:
807 kfree(mpc8xxx_dummy_rx);
808 mpc8xxx_dummy_rx = NULL;
809 /* fall through */
810 default:
811 mpc8xxx_dummy_rx_refcnt--;
812 break;
815 mutex_unlock(&mpc8xxx_dummy_rx_lock);
818 static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
820 struct device *dev = mspi->dev;
821 struct device_node *np = dev->of_node;
822 const u32 *iprop;
823 int size;
824 unsigned long spi_base_ofs;
825 unsigned long pram_ofs = -ENOMEM;
827 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
828 iprop = of_get_property(np, "reg", &size);
830 /* QE with a fixed pram location? */
831 if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
832 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
834 /* QE but with a dynamic pram location? */
835 if (mspi->flags & SPI_QE) {
836 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
837 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
838 QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
839 return pram_ofs;
842 /* CPM1 and CPM2 pram must be at a fixed addr. */
843 if (!iprop || size != sizeof(*iprop) * 4)
844 return -ENOMEM;
846 spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
847 if (IS_ERR_VALUE(spi_base_ofs))
848 return -ENOMEM;
850 if (mspi->flags & SPI_CPM2) {
851 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
852 if (!IS_ERR_VALUE(pram_ofs)) {
853 u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
855 out_be16(spi_base, pram_ofs);
857 } else {
858 struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
859 u16 rpbase = in_be16(&pram->rpbase);
861 /* Microcode relocation patch applied? */
862 if (rpbase)
863 pram_ofs = rpbase;
864 else
865 return spi_base_ofs;
868 cpm_muram_free(spi_base_ofs);
869 return pram_ofs;
872 static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
874 struct device *dev = mspi->dev;
875 struct device_node *np = dev->of_node;
876 const u32 *iprop;
877 int size;
878 unsigned long pram_ofs;
879 unsigned long bds_ofs;
881 if (!(mspi->flags & SPI_CPM_MODE))
882 return 0;
884 if (!mpc8xxx_spi_alloc_dummy_rx())
885 return -ENOMEM;
887 if (mspi->flags & SPI_QE) {
888 iprop = of_get_property(np, "cell-index", &size);
889 if (iprop && size == sizeof(*iprop))
890 mspi->subblock = *iprop;
892 switch (mspi->subblock) {
893 default:
894 dev_warn(dev, "cell-index unspecified, assuming SPI1");
895 /* fall through */
896 case 0:
897 mspi->subblock = QE_CR_SUBBLOCK_SPI1;
898 break;
899 case 1:
900 mspi->subblock = QE_CR_SUBBLOCK_SPI2;
901 break;
905 pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
906 if (IS_ERR_VALUE(pram_ofs)) {
907 dev_err(dev, "can't allocate spi parameter ram\n");
908 goto err_pram;
911 bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
912 sizeof(*mspi->rx_bd), 8);
913 if (IS_ERR_VALUE(bds_ofs)) {
914 dev_err(dev, "can't allocate bds\n");
915 goto err_bds;
918 mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
919 DMA_TO_DEVICE);
920 if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
921 dev_err(dev, "unable to map dummy tx buffer\n");
922 goto err_dummy_tx;
925 mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
926 DMA_FROM_DEVICE);
927 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
928 dev_err(dev, "unable to map dummy rx buffer\n");
929 goto err_dummy_rx;
932 mspi->pram = cpm_muram_addr(pram_ofs);
934 mspi->tx_bd = cpm_muram_addr(bds_ofs);
935 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
937 /* Initialize parameter ram. */
938 out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
939 out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
940 out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
941 out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
942 out_be16(&mspi->pram->mrblr, SPI_MRBLR);
943 out_be32(&mspi->pram->rstate, 0);
944 out_be32(&mspi->pram->rdp, 0);
945 out_be16(&mspi->pram->rbptr, 0);
946 out_be16(&mspi->pram->rbc, 0);
947 out_be32(&mspi->pram->rxtmp, 0);
948 out_be32(&mspi->pram->tstate, 0);
949 out_be32(&mspi->pram->tdp, 0);
950 out_be16(&mspi->pram->tbptr, 0);
951 out_be16(&mspi->pram->tbc, 0);
952 out_be32(&mspi->pram->txtmp, 0);
954 return 0;
956 err_dummy_rx:
957 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
958 err_dummy_tx:
959 cpm_muram_free(bds_ofs);
960 err_bds:
961 cpm_muram_free(pram_ofs);
962 err_pram:
963 mpc8xxx_spi_free_dummy_rx();
964 return -ENOMEM;
967 static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
969 struct device *dev = mspi->dev;
971 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
972 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
973 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
974 cpm_muram_free(cpm_muram_offset(mspi->pram));
975 mpc8xxx_spi_free_dummy_rx();
978 static const char *mpc8xxx_spi_strmode(unsigned int flags)
980 if (flags & SPI_QE_CPU_MODE) {
981 return "QE CPU";
982 } else if (flags & SPI_CPM_MODE) {
983 if (flags & SPI_QE)
984 return "QE";
985 else if (flags & SPI_CPM2)
986 return "CPM2";
987 else
988 return "CPM1";
990 return "CPU";
993 static struct spi_master * __devinit
994 mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
996 struct fsl_spi_platform_data *pdata = dev->platform_data;
997 struct spi_master *master;
998 struct mpc8xxx_spi *mpc8xxx_spi;
999 u32 regval;
1000 int ret = 0;
1002 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
1003 if (master == NULL) {
1004 ret = -ENOMEM;
1005 goto err;
1008 dev_set_drvdata(dev, master);
1010 /* the spi->mode bits understood by this driver: */
1011 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
1012 | SPI_LSB_FIRST | SPI_LOOP;
1014 master->setup = mpc8xxx_spi_setup;
1015 master->transfer = mpc8xxx_spi_transfer;
1016 master->cleanup = mpc8xxx_spi_cleanup;
1017 master->dev.of_node = dev->of_node;
1019 mpc8xxx_spi = spi_master_get_devdata(master);
1020 mpc8xxx_spi->dev = dev;
1021 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
1022 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
1023 mpc8xxx_spi->flags = pdata->flags;
1024 mpc8xxx_spi->spibrg = pdata->sysclk;
1026 ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
1027 if (ret)
1028 goto err_cpm_init;
1030 mpc8xxx_spi->rx_shift = 0;
1031 mpc8xxx_spi->tx_shift = 0;
1032 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
1033 mpc8xxx_spi->rx_shift = 16;
1034 mpc8xxx_spi->tx_shift = 24;
1037 init_completion(&mpc8xxx_spi->done);
1039 mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
1040 if (mpc8xxx_spi->base == NULL) {
1041 ret = -ENOMEM;
1042 goto err_ioremap;
1045 mpc8xxx_spi->irq = irq;
1047 /* Register for SPI Interrupt */
1048 ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq,
1049 0, "mpc8xxx_spi", mpc8xxx_spi);
1051 if (ret != 0)
1052 goto unmap_io;
1054 master->bus_num = pdata->bus_num;
1055 master->num_chipselect = pdata->max_chipselect;
1057 /* SPI controller initializations */
1058 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0);
1059 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
1060 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0);
1061 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff);
1063 /* Enable SPI interface */
1064 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
1065 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
1066 regval |= SPMODE_OP;
1068 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
1069 spin_lock_init(&mpc8xxx_spi->lock);
1070 init_completion(&mpc8xxx_spi->done);
1071 INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
1072 INIT_LIST_HEAD(&mpc8xxx_spi->queue);
1074 mpc8xxx_spi->workqueue = create_singlethread_workqueue(
1075 dev_name(master->dev.parent));
1076 if (mpc8xxx_spi->workqueue == NULL) {
1077 ret = -EBUSY;
1078 goto free_irq;
1081 ret = spi_register_master(master);
1082 if (ret < 0)
1083 goto unreg_master;
1085 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
1086 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
1088 return master;
1090 unreg_master:
1091 destroy_workqueue(mpc8xxx_spi->workqueue);
1092 free_irq:
1093 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
1094 unmap_io:
1095 iounmap(mpc8xxx_spi->base);
1096 err_ioremap:
1097 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
1098 err_cpm_init:
1099 spi_master_put(master);
1100 err:
1101 return ERR_PTR(ret);
1104 static int __devexit mpc8xxx_spi_remove(struct device *dev)
1106 struct mpc8xxx_spi *mpc8xxx_spi;
1107 struct spi_master *master;
1109 master = dev_get_drvdata(dev);
1110 mpc8xxx_spi = spi_master_get_devdata(master);
1112 flush_workqueue(mpc8xxx_spi->workqueue);
1113 destroy_workqueue(mpc8xxx_spi->workqueue);
1114 spi_unregister_master(master);
1116 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
1117 iounmap(mpc8xxx_spi->base);
1118 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
1120 return 0;
1123 struct mpc8xxx_spi_probe_info {
1124 struct fsl_spi_platform_data pdata;
1125 int *gpios;
1126 bool *alow_flags;
1129 static struct mpc8xxx_spi_probe_info *
1130 to_of_pinfo(struct fsl_spi_platform_data *pdata)
1132 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
1135 static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
1137 struct device *dev = spi->dev.parent;
1138 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
1139 u16 cs = spi->chip_select;
1140 int gpio = pinfo->gpios[cs];
1141 bool alow = pinfo->alow_flags[cs];
1143 gpio_set_value(gpio, on ^ alow);
1146 static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
1148 struct device_node *np = dev->of_node;
1149 struct fsl_spi_platform_data *pdata = dev->platform_data;
1150 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
1151 unsigned int ngpios;
1152 int i = 0;
1153 int ret;
1155 ngpios = of_gpio_count(np);
1156 if (!ngpios) {
1158 * SPI w/o chip-select line. One SPI device is still permitted
1159 * though.
1161 pdata->max_chipselect = 1;
1162 return 0;
1165 pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
1166 if (!pinfo->gpios)
1167 return -ENOMEM;
1168 memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
1170 pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
1171 GFP_KERNEL);
1172 if (!pinfo->alow_flags) {
1173 ret = -ENOMEM;
1174 goto err_alloc_flags;
1177 for (; i < ngpios; i++) {
1178 int gpio;
1179 enum of_gpio_flags flags;
1181 gpio = of_get_gpio_flags(np, i, &flags);
1182 if (!gpio_is_valid(gpio)) {
1183 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
1184 ret = gpio;
1185 goto err_loop;
1188 ret = gpio_request(gpio, dev_name(dev));
1189 if (ret) {
1190 dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
1191 goto err_loop;
1194 pinfo->gpios[i] = gpio;
1195 pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
1197 ret = gpio_direction_output(pinfo->gpios[i],
1198 pinfo->alow_flags[i]);
1199 if (ret) {
1200 dev_err(dev, "can't set output direction for gpio "
1201 "#%d: %d\n", i, ret);
1202 goto err_loop;
1206 pdata->max_chipselect = ngpios;
1207 pdata->cs_control = mpc8xxx_spi_cs_control;
1209 return 0;
1211 err_loop:
1212 while (i >= 0) {
1213 if (gpio_is_valid(pinfo->gpios[i]))
1214 gpio_free(pinfo->gpios[i]);
1215 i--;
1218 kfree(pinfo->alow_flags);
1219 pinfo->alow_flags = NULL;
1220 err_alloc_flags:
1221 kfree(pinfo->gpios);
1222 pinfo->gpios = NULL;
1223 return ret;
1226 static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
1228 struct fsl_spi_platform_data *pdata = dev->platform_data;
1229 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
1230 int i;
1232 if (!pinfo->gpios)
1233 return 0;
1235 for (i = 0; i < pdata->max_chipselect; i++) {
1236 if (gpio_is_valid(pinfo->gpios[i]))
1237 gpio_free(pinfo->gpios[i]);
1240 kfree(pinfo->gpios);
1241 kfree(pinfo->alow_flags);
1242 return 0;
1245 static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
1246 const struct of_device_id *ofid)
1248 struct device *dev = &ofdev->dev;
1249 struct device_node *np = ofdev->dev.of_node;
1250 struct mpc8xxx_spi_probe_info *pinfo;
1251 struct fsl_spi_platform_data *pdata;
1252 struct spi_master *master;
1253 struct resource mem;
1254 struct resource irq;
1255 const void *prop;
1256 int ret = -ENOMEM;
1258 pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
1259 if (!pinfo)
1260 return -ENOMEM;
1262 pdata = &pinfo->pdata;
1263 dev->platform_data = pdata;
1265 /* Allocate bus num dynamically. */
1266 pdata->bus_num = -1;
1268 /* SPI controller is either clocked from QE or SoC clock. */
1269 pdata->sysclk = get_brgfreq();
1270 if (pdata->sysclk == -1) {
1271 pdata->sysclk = fsl_get_sys_freq();
1272 if (pdata->sysclk == -1) {
1273 ret = -ENODEV;
1274 goto err_clk;
1278 prop = of_get_property(np, "mode", NULL);
1279 if (prop && !strcmp(prop, "cpu-qe"))
1280 pdata->flags = SPI_QE_CPU_MODE;
1281 else if (prop && !strcmp(prop, "qe"))
1282 pdata->flags = SPI_CPM_MODE | SPI_QE;
1283 else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
1284 pdata->flags = SPI_CPM_MODE | SPI_CPM2;
1285 else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
1286 pdata->flags = SPI_CPM_MODE | SPI_CPM1;
1288 ret = of_mpc8xxx_spi_get_chipselects(dev);
1289 if (ret)
1290 goto err;
1292 ret = of_address_to_resource(np, 0, &mem);
1293 if (ret)
1294 goto err;
1296 ret = of_irq_to_resource(np, 0, &irq);
1297 if (!ret) {
1298 ret = -EINVAL;
1299 goto err;
1302 master = mpc8xxx_spi_probe(dev, &mem, irq.start);
1303 if (IS_ERR(master)) {
1304 ret = PTR_ERR(master);
1305 goto err;
1308 return 0;
1310 err:
1311 of_mpc8xxx_spi_free_chipselects(dev);
1312 err_clk:
1313 kfree(pinfo);
1314 return ret;
1317 static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev)
1319 int ret;
1321 ret = mpc8xxx_spi_remove(&ofdev->dev);
1322 if (ret)
1323 return ret;
1324 of_mpc8xxx_spi_free_chipselects(&ofdev->dev);
1325 return 0;
1328 static const struct of_device_id of_mpc8xxx_spi_match[] = {
1329 { .compatible = "fsl,spi" },
1332 MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match);
1334 static struct of_platform_driver of_mpc8xxx_spi_driver = {
1335 .driver = {
1336 .name = "mpc8xxx_spi",
1337 .owner = THIS_MODULE,
1338 .of_match_table = of_mpc8xxx_spi_match,
1340 .probe = of_mpc8xxx_spi_probe,
1341 .remove = __devexit_p(of_mpc8xxx_spi_remove),
1344 #ifdef CONFIG_MPC832x_RDB
1345 static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
1347 struct resource *mem;
1348 int irq;
1349 struct spi_master *master;
1351 if (!pdev->dev.platform_data)
1352 return -EINVAL;
1354 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1355 if (!mem)
1356 return -EINVAL;
1358 irq = platform_get_irq(pdev, 0);
1359 if (irq <= 0)
1360 return -EINVAL;
1362 master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
1363 if (IS_ERR(master))
1364 return PTR_ERR(master);
1365 return 0;
1368 static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
1370 return mpc8xxx_spi_remove(&pdev->dev);
1373 MODULE_ALIAS("platform:mpc8xxx_spi");
1374 static struct platform_driver mpc8xxx_spi_driver = {
1375 .probe = plat_mpc8xxx_spi_probe,
1376 .remove = __devexit_p(plat_mpc8xxx_spi_remove),
1377 .driver = {
1378 .name = "mpc8xxx_spi",
1379 .owner = THIS_MODULE,
1383 static bool legacy_driver_failed;
1385 static void __init legacy_driver_register(void)
1387 legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
1390 static void __exit legacy_driver_unregister(void)
1392 if (legacy_driver_failed)
1393 return;
1394 platform_driver_unregister(&mpc8xxx_spi_driver);
1396 #else
1397 static void __init legacy_driver_register(void) {}
1398 static void __exit legacy_driver_unregister(void) {}
1399 #endif /* CONFIG_MPC832x_RDB */
1401 static int __init mpc8xxx_spi_init(void)
1403 legacy_driver_register();
1404 return of_register_platform_driver(&of_mpc8xxx_spi_driver);
1407 static void __exit mpc8xxx_spi_exit(void)
1409 of_unregister_platform_driver(&of_mpc8xxx_spi_driver);
1410 legacy_driver_unregister();
1413 module_init(mpc8xxx_spi_init);
1414 module_exit(mpc8xxx_spi_exit);
1416 MODULE_AUTHOR("Kumar Gala");
1417 MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver");
1418 MODULE_LICENSE("GPL");