staging: ti dspbridge: deh: trivial cleanups
[linux-2.6/libata-dev.git] / drivers / spi / dw_spi.c
blobd256cb00604c55db5cc2ce25233b8d8e189c69be
1 /*
2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/highmem.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
26 #include <linux/spi/dw_spi.h>
27 #include <linux/spi/spi.h>
29 #ifdef CONFIG_DEBUG_FS
30 #include <linux/debugfs.h>
31 #endif
33 #define START_STATE ((void *)0)
34 #define RUNNING_STATE ((void *)1)
35 #define DONE_STATE ((void *)2)
36 #define ERROR_STATE ((void *)-1)
38 #define QUEUE_RUNNING 0
39 #define QUEUE_STOPPED 1
41 #define MRST_SPI_DEASSERT 0
42 #define MRST_SPI_ASSERT 1
44 /* Slave spi_dev related */
45 struct chip_data {
46 u16 cr0;
47 u8 cs; /* chip select pin */
48 u8 n_bytes; /* current is a 1/2/4 byte op */
49 u8 tmode; /* TR/TO/RO/EEPROM */
50 u8 type; /* SPI/SSP/MicroWire */
52 u8 poll_mode; /* 1 means use poll mode */
54 u32 dma_width;
55 u32 rx_threshold;
56 u32 tx_threshold;
57 u8 enable_dma;
58 u8 bits_per_word;
59 u16 clk_div; /* baud rate divider */
60 u32 speed_hz; /* baud rate */
61 int (*write)(struct dw_spi *dws);
62 int (*read)(struct dw_spi *dws);
63 void (*cs_control)(u32 command);
66 #ifdef CONFIG_DEBUG_FS
67 static int spi_show_regs_open(struct inode *inode, struct file *file)
69 file->private_data = inode->i_private;
70 return 0;
73 #define SPI_REGS_BUFSIZE 1024
74 static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
75 size_t count, loff_t *ppos)
77 struct dw_spi *dws;
78 char *buf;
79 u32 len = 0;
80 ssize_t ret;
82 dws = file->private_data;
84 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
85 if (!buf)
86 return 0;
88 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
89 "MRST SPI0 registers:\n");
90 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
91 "=================================\n");
92 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
93 "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
94 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
95 "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
96 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
97 "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
98 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
99 "SER: \t\t0x%08x\n", dw_readl(dws, ser));
100 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
101 "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
102 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
103 "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
104 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
105 "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
106 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
107 "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
108 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
109 "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
110 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
111 "SR: \t\t0x%08x\n", dw_readl(dws, sr));
112 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
113 "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
114 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
115 "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
116 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
117 "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
118 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
119 "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
120 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
121 "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
122 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
123 "=================================\n");
125 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
126 kfree(buf);
127 return ret;
130 static const struct file_operations mrst_spi_regs_ops = {
131 .owner = THIS_MODULE,
132 .open = spi_show_regs_open,
133 .read = spi_show_regs,
136 static int mrst_spi_debugfs_init(struct dw_spi *dws)
138 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
139 if (!dws->debugfs)
140 return -ENOMEM;
142 debugfs_create_file("registers", S_IFREG | S_IRUGO,
143 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
144 return 0;
147 static void mrst_spi_debugfs_remove(struct dw_spi *dws)
149 if (dws->debugfs)
150 debugfs_remove_recursive(dws->debugfs);
153 #else
154 static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
156 return 0;
159 static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
162 #endif /* CONFIG_DEBUG_FS */
164 static void wait_till_not_busy(struct dw_spi *dws)
166 unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
168 while (time_before(jiffies, end)) {
169 if (!(dw_readw(dws, sr) & SR_BUSY))
170 return;
172 dev_err(&dws->master->dev,
173 "DW SPI: Status keeps busy for 1000us after a read/write!\n");
176 static void flush(struct dw_spi *dws)
178 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
179 dw_readw(dws, dr);
181 wait_till_not_busy(dws);
184 static void null_cs_control(u32 command)
188 static int null_writer(struct dw_spi *dws)
190 u8 n_bytes = dws->n_bytes;
192 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
193 || (dws->tx == dws->tx_end))
194 return 0;
195 dw_writew(dws, dr, 0);
196 dws->tx += n_bytes;
198 wait_till_not_busy(dws);
199 return 1;
202 static int null_reader(struct dw_spi *dws)
204 u8 n_bytes = dws->n_bytes;
206 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
207 && (dws->rx < dws->rx_end)) {
208 dw_readw(dws, dr);
209 dws->rx += n_bytes;
211 wait_till_not_busy(dws);
212 return dws->rx == dws->rx_end;
215 static int u8_writer(struct dw_spi *dws)
217 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
218 || (dws->tx == dws->tx_end))
219 return 0;
221 dw_writew(dws, dr, *(u8 *)(dws->tx));
222 ++dws->tx;
224 wait_till_not_busy(dws);
225 return 1;
228 static int u8_reader(struct dw_spi *dws)
230 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
231 && (dws->rx < dws->rx_end)) {
232 *(u8 *)(dws->rx) = dw_readw(dws, dr);
233 ++dws->rx;
236 wait_till_not_busy(dws);
237 return dws->rx == dws->rx_end;
240 static int u16_writer(struct dw_spi *dws)
242 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
243 || (dws->tx == dws->tx_end))
244 return 0;
246 dw_writew(dws, dr, *(u16 *)(dws->tx));
247 dws->tx += 2;
249 wait_till_not_busy(dws);
250 return 1;
253 static int u16_reader(struct dw_spi *dws)
255 u16 temp;
257 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
258 && (dws->rx < dws->rx_end)) {
259 temp = dw_readw(dws, dr);
260 *(u16 *)(dws->rx) = temp;
261 dws->rx += 2;
264 wait_till_not_busy(dws);
265 return dws->rx == dws->rx_end;
268 static void *next_transfer(struct dw_spi *dws)
270 struct spi_message *msg = dws->cur_msg;
271 struct spi_transfer *trans = dws->cur_transfer;
273 /* Move to next transfer */
274 if (trans->transfer_list.next != &msg->transfers) {
275 dws->cur_transfer =
276 list_entry(trans->transfer_list.next,
277 struct spi_transfer,
278 transfer_list);
279 return RUNNING_STATE;
280 } else
281 return DONE_STATE;
285 * Note: first step is the protocol driver prepares
286 * a dma-capable memory, and this func just need translate
287 * the virt addr to physical
289 static int map_dma_buffers(struct dw_spi *dws)
291 if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
292 || !dws->cur_chip->enable_dma)
293 return 0;
295 if (dws->cur_transfer->tx_dma)
296 dws->tx_dma = dws->cur_transfer->tx_dma;
298 if (dws->cur_transfer->rx_dma)
299 dws->rx_dma = dws->cur_transfer->rx_dma;
301 return 1;
304 /* Caller already set message->status; dma and pio irqs are blocked */
305 static void giveback(struct dw_spi *dws)
307 struct spi_transfer *last_transfer;
308 unsigned long flags;
309 struct spi_message *msg;
311 spin_lock_irqsave(&dws->lock, flags);
312 msg = dws->cur_msg;
313 dws->cur_msg = NULL;
314 dws->cur_transfer = NULL;
315 dws->prev_chip = dws->cur_chip;
316 dws->cur_chip = NULL;
317 dws->dma_mapped = 0;
318 queue_work(dws->workqueue, &dws->pump_messages);
319 spin_unlock_irqrestore(&dws->lock, flags);
321 last_transfer = list_entry(msg->transfers.prev,
322 struct spi_transfer,
323 transfer_list);
325 if (!last_transfer->cs_change)
326 dws->cs_control(MRST_SPI_DEASSERT);
328 msg->state = NULL;
329 if (msg->complete)
330 msg->complete(msg->context);
333 static void int_error_stop(struct dw_spi *dws, const char *msg)
335 /* Stop and reset hw */
336 flush(dws);
337 spi_enable_chip(dws, 0);
339 dev_err(&dws->master->dev, "%s\n", msg);
340 dws->cur_msg->state = ERROR_STATE;
341 tasklet_schedule(&dws->pump_transfers);
344 static void transfer_complete(struct dw_spi *dws)
346 /* Update total byte transfered return count actual bytes read */
347 dws->cur_msg->actual_length += dws->len;
349 /* Move to next transfer */
350 dws->cur_msg->state = next_transfer(dws);
352 /* Handle end of message */
353 if (dws->cur_msg->state == DONE_STATE) {
354 dws->cur_msg->status = 0;
355 giveback(dws);
356 } else
357 tasklet_schedule(&dws->pump_transfers);
360 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
362 u16 irq_status, irq_mask = 0x3f;
363 u32 int_level = dws->fifo_len / 2;
364 u32 left;
366 irq_status = dw_readw(dws, isr) & irq_mask;
367 /* Error handling */
368 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
369 dw_readw(dws, txoicr);
370 dw_readw(dws, rxoicr);
371 dw_readw(dws, rxuicr);
372 int_error_stop(dws, "interrupt_transfer: fifo overrun");
373 return IRQ_HANDLED;
376 if (irq_status & SPI_INT_TXEI) {
377 spi_mask_intr(dws, SPI_INT_TXEI);
379 left = (dws->tx_end - dws->tx) / dws->n_bytes;
380 left = (left > int_level) ? int_level : left;
382 while (left--)
383 dws->write(dws);
384 dws->read(dws);
386 /* Re-enable the IRQ if there is still data left to tx */
387 if (dws->tx_end > dws->tx)
388 spi_umask_intr(dws, SPI_INT_TXEI);
389 else
390 transfer_complete(dws);
393 return IRQ_HANDLED;
396 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
398 struct dw_spi *dws = dev_id;
400 if (!dws->cur_msg) {
401 spi_mask_intr(dws, SPI_INT_TXEI);
402 /* Never fail */
403 return IRQ_HANDLED;
406 return dws->transfer_handler(dws);
409 /* Must be called inside pump_transfers() */
410 static void poll_transfer(struct dw_spi *dws)
412 while (dws->write(dws))
413 dws->read(dws);
415 transfer_complete(dws);
418 static void dma_transfer(struct dw_spi *dws, int cs_change)
422 static void pump_transfers(unsigned long data)
424 struct dw_spi *dws = (struct dw_spi *)data;
425 struct spi_message *message = NULL;
426 struct spi_transfer *transfer = NULL;
427 struct spi_transfer *previous = NULL;
428 struct spi_device *spi = NULL;
429 struct chip_data *chip = NULL;
430 u8 bits = 0;
431 u8 imask = 0;
432 u8 cs_change = 0;
433 u16 txint_level = 0;
434 u16 clk_div = 0;
435 u32 speed = 0;
436 u32 cr0 = 0;
438 /* Get current state information */
439 message = dws->cur_msg;
440 transfer = dws->cur_transfer;
441 chip = dws->cur_chip;
442 spi = message->spi;
444 if (unlikely(!chip->clk_div))
445 chip->clk_div = dws->max_freq / chip->speed_hz;
447 if (message->state == ERROR_STATE) {
448 message->status = -EIO;
449 goto early_exit;
452 /* Handle end of message */
453 if (message->state == DONE_STATE) {
454 message->status = 0;
455 goto early_exit;
458 /* Delay if requested at end of transfer*/
459 if (message->state == RUNNING_STATE) {
460 previous = list_entry(transfer->transfer_list.prev,
461 struct spi_transfer,
462 transfer_list);
463 if (previous->delay_usecs)
464 udelay(previous->delay_usecs);
467 dws->n_bytes = chip->n_bytes;
468 dws->dma_width = chip->dma_width;
469 dws->cs_control = chip->cs_control;
471 dws->rx_dma = transfer->rx_dma;
472 dws->tx_dma = transfer->tx_dma;
473 dws->tx = (void *)transfer->tx_buf;
474 dws->tx_end = dws->tx + transfer->len;
475 dws->rx = transfer->rx_buf;
476 dws->rx_end = dws->rx + transfer->len;
477 dws->write = dws->tx ? chip->write : null_writer;
478 dws->read = dws->rx ? chip->read : null_reader;
479 dws->cs_change = transfer->cs_change;
480 dws->len = dws->cur_transfer->len;
481 if (chip != dws->prev_chip)
482 cs_change = 1;
484 cr0 = chip->cr0;
486 /* Handle per transfer options for bpw and speed */
487 if (transfer->speed_hz) {
488 speed = chip->speed_hz;
490 if (transfer->speed_hz != speed) {
491 speed = transfer->speed_hz;
492 if (speed > dws->max_freq) {
493 printk(KERN_ERR "MRST SPI0: unsupported"
494 "freq: %dHz\n", speed);
495 message->status = -EIO;
496 goto early_exit;
499 /* clk_div doesn't support odd number */
500 clk_div = dws->max_freq / speed;
501 clk_div = (clk_div + 1) & 0xfffe;
503 chip->speed_hz = speed;
504 chip->clk_div = clk_div;
507 if (transfer->bits_per_word) {
508 bits = transfer->bits_per_word;
510 switch (bits) {
511 case 8:
512 dws->n_bytes = 1;
513 dws->dma_width = 1;
514 dws->read = (dws->read != null_reader) ?
515 u8_reader : null_reader;
516 dws->write = (dws->write != null_writer) ?
517 u8_writer : null_writer;
518 break;
519 case 16:
520 dws->n_bytes = 2;
521 dws->dma_width = 2;
522 dws->read = (dws->read != null_reader) ?
523 u16_reader : null_reader;
524 dws->write = (dws->write != null_writer) ?
525 u16_writer : null_writer;
526 break;
527 default:
528 printk(KERN_ERR "MRST SPI0: unsupported bits:"
529 "%db\n", bits);
530 message->status = -EIO;
531 goto early_exit;
534 cr0 = (bits - 1)
535 | (chip->type << SPI_FRF_OFFSET)
536 | (spi->mode << SPI_MODE_OFFSET)
537 | (chip->tmode << SPI_TMOD_OFFSET);
539 message->state = RUNNING_STATE;
542 * Adjust transfer mode if necessary. Requires platform dependent
543 * chipselect mechanism.
545 if (dws->cs_control) {
546 if (dws->rx && dws->tx)
547 chip->tmode = 0x00;
548 else if (dws->rx)
549 chip->tmode = 0x02;
550 else
551 chip->tmode = 0x01;
553 cr0 &= ~(0x3 << SPI_MODE_OFFSET);
554 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
557 /* Check if current transfer is a DMA transaction */
558 dws->dma_mapped = map_dma_buffers(dws);
561 * Interrupt mode
562 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
564 if (!dws->dma_mapped && !chip->poll_mode) {
565 int templen = dws->len / dws->n_bytes;
566 txint_level = dws->fifo_len / 2;
567 txint_level = (templen > txint_level) ? txint_level : templen;
569 imask |= SPI_INT_TXEI;
570 dws->transfer_handler = interrupt_transfer;
574 * Reprogram registers only if
575 * 1. chip select changes
576 * 2. clk_div is changed
577 * 3. control value changes
579 if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) {
580 spi_enable_chip(dws, 0);
582 if (dw_readw(dws, ctrl0) != cr0)
583 dw_writew(dws, ctrl0, cr0);
585 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
586 spi_chip_sel(dws, spi->chip_select);
588 /* Set the interrupt mask, for poll mode just diable all int */
589 spi_mask_intr(dws, 0xff);
590 if (imask)
591 spi_umask_intr(dws, imask);
592 if (txint_level)
593 dw_writew(dws, txfltr, txint_level);
595 spi_enable_chip(dws, 1);
596 if (cs_change)
597 dws->prev_chip = chip;
600 if (dws->dma_mapped)
601 dma_transfer(dws, cs_change);
603 if (chip->poll_mode)
604 poll_transfer(dws);
606 return;
608 early_exit:
609 giveback(dws);
610 return;
613 static void pump_messages(struct work_struct *work)
615 struct dw_spi *dws =
616 container_of(work, struct dw_spi, pump_messages);
617 unsigned long flags;
619 /* Lock queue and check for queue work */
620 spin_lock_irqsave(&dws->lock, flags);
621 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
622 dws->busy = 0;
623 spin_unlock_irqrestore(&dws->lock, flags);
624 return;
627 /* Make sure we are not already running a message */
628 if (dws->cur_msg) {
629 spin_unlock_irqrestore(&dws->lock, flags);
630 return;
633 /* Extract head of queue */
634 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
635 list_del_init(&dws->cur_msg->queue);
637 /* Initial message state*/
638 dws->cur_msg->state = START_STATE;
639 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
640 struct spi_transfer,
641 transfer_list);
642 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
644 /* Mark as busy and launch transfers */
645 tasklet_schedule(&dws->pump_transfers);
647 dws->busy = 1;
648 spin_unlock_irqrestore(&dws->lock, flags);
651 /* spi_device use this to queue in their spi_msg */
652 static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
654 struct dw_spi *dws = spi_master_get_devdata(spi->master);
655 unsigned long flags;
657 spin_lock_irqsave(&dws->lock, flags);
659 if (dws->run == QUEUE_STOPPED) {
660 spin_unlock_irqrestore(&dws->lock, flags);
661 return -ESHUTDOWN;
664 msg->actual_length = 0;
665 msg->status = -EINPROGRESS;
666 msg->state = START_STATE;
668 list_add_tail(&msg->queue, &dws->queue);
670 if (dws->run == QUEUE_RUNNING && !dws->busy) {
672 if (dws->cur_transfer || dws->cur_msg)
673 queue_work(dws->workqueue,
674 &dws->pump_messages);
675 else {
676 /* If no other data transaction in air, just go */
677 spin_unlock_irqrestore(&dws->lock, flags);
678 pump_messages(&dws->pump_messages);
679 return 0;
683 spin_unlock_irqrestore(&dws->lock, flags);
684 return 0;
687 /* This may be called twice for each spi dev */
688 static int dw_spi_setup(struct spi_device *spi)
690 struct dw_spi_chip *chip_info = NULL;
691 struct chip_data *chip;
693 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
694 return -EINVAL;
696 /* Only alloc on first setup */
697 chip = spi_get_ctldata(spi);
698 if (!chip) {
699 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
700 if (!chip)
701 return -ENOMEM;
703 chip->cs_control = null_cs_control;
704 chip->enable_dma = 0;
708 * Protocol drivers may change the chip settings, so...
709 * if chip_info exists, use it
711 chip_info = spi->controller_data;
713 /* chip_info doesn't always exist */
714 if (chip_info) {
715 if (chip_info->cs_control)
716 chip->cs_control = chip_info->cs_control;
718 chip->poll_mode = chip_info->poll_mode;
719 chip->type = chip_info->type;
721 chip->rx_threshold = 0;
722 chip->tx_threshold = 0;
724 chip->enable_dma = chip_info->enable_dma;
727 if (spi->bits_per_word <= 8) {
728 chip->n_bytes = 1;
729 chip->dma_width = 1;
730 chip->read = u8_reader;
731 chip->write = u8_writer;
732 } else if (spi->bits_per_word <= 16) {
733 chip->n_bytes = 2;
734 chip->dma_width = 2;
735 chip->read = u16_reader;
736 chip->write = u16_writer;
737 } else {
738 /* Never take >16b case for MRST SPIC */
739 dev_err(&spi->dev, "invalid wordsize\n");
740 return -EINVAL;
742 chip->bits_per_word = spi->bits_per_word;
744 if (!spi->max_speed_hz) {
745 dev_err(&spi->dev, "No max speed HZ parameter\n");
746 return -EINVAL;
748 chip->speed_hz = spi->max_speed_hz;
750 chip->tmode = 0; /* Tx & Rx */
751 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
752 chip->cr0 = (chip->bits_per_word - 1)
753 | (chip->type << SPI_FRF_OFFSET)
754 | (spi->mode << SPI_MODE_OFFSET)
755 | (chip->tmode << SPI_TMOD_OFFSET);
757 spi_set_ctldata(spi, chip);
758 return 0;
761 static void dw_spi_cleanup(struct spi_device *spi)
763 struct chip_data *chip = spi_get_ctldata(spi);
764 kfree(chip);
767 static int __devinit init_queue(struct dw_spi *dws)
769 INIT_LIST_HEAD(&dws->queue);
770 spin_lock_init(&dws->lock);
772 dws->run = QUEUE_STOPPED;
773 dws->busy = 0;
775 tasklet_init(&dws->pump_transfers,
776 pump_transfers, (unsigned long)dws);
778 INIT_WORK(&dws->pump_messages, pump_messages);
779 dws->workqueue = create_singlethread_workqueue(
780 dev_name(dws->master->dev.parent));
781 if (dws->workqueue == NULL)
782 return -EBUSY;
784 return 0;
787 static int start_queue(struct dw_spi *dws)
789 unsigned long flags;
791 spin_lock_irqsave(&dws->lock, flags);
793 if (dws->run == QUEUE_RUNNING || dws->busy) {
794 spin_unlock_irqrestore(&dws->lock, flags);
795 return -EBUSY;
798 dws->run = QUEUE_RUNNING;
799 dws->cur_msg = NULL;
800 dws->cur_transfer = NULL;
801 dws->cur_chip = NULL;
802 dws->prev_chip = NULL;
803 spin_unlock_irqrestore(&dws->lock, flags);
805 queue_work(dws->workqueue, &dws->pump_messages);
807 return 0;
810 static int stop_queue(struct dw_spi *dws)
812 unsigned long flags;
813 unsigned limit = 50;
814 int status = 0;
816 spin_lock_irqsave(&dws->lock, flags);
817 dws->run = QUEUE_STOPPED;
818 while (!list_empty(&dws->queue) && dws->busy && limit--) {
819 spin_unlock_irqrestore(&dws->lock, flags);
820 msleep(10);
821 spin_lock_irqsave(&dws->lock, flags);
824 if (!list_empty(&dws->queue) || dws->busy)
825 status = -EBUSY;
826 spin_unlock_irqrestore(&dws->lock, flags);
828 return status;
831 static int destroy_queue(struct dw_spi *dws)
833 int status;
835 status = stop_queue(dws);
836 if (status != 0)
837 return status;
838 destroy_workqueue(dws->workqueue);
839 return 0;
842 /* Restart the controller, disable all interrupts, clean rx fifo */
843 static void spi_hw_init(struct dw_spi *dws)
845 spi_enable_chip(dws, 0);
846 spi_mask_intr(dws, 0xff);
847 spi_enable_chip(dws, 1);
848 flush(dws);
851 * Try to detect the FIFO depth if not set by interface driver,
852 * the depth could be from 2 to 256 from HW spec
854 if (!dws->fifo_len) {
855 u32 fifo;
856 for (fifo = 2; fifo <= 257; fifo++) {
857 dw_writew(dws, txfltr, fifo);
858 if (fifo != dw_readw(dws, txfltr))
859 break;
862 dws->fifo_len = (fifo == 257) ? 0 : fifo;
863 dw_writew(dws, txfltr, 0);
867 int __devinit dw_spi_add_host(struct dw_spi *dws)
869 struct spi_master *master;
870 int ret;
872 BUG_ON(dws == NULL);
874 master = spi_alloc_master(dws->parent_dev, 0);
875 if (!master) {
876 ret = -ENOMEM;
877 goto exit;
880 dws->master = master;
881 dws->type = SSI_MOTO_SPI;
882 dws->prev_chip = NULL;
883 dws->dma_inited = 0;
884 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
886 ret = request_irq(dws->irq, dw_spi_irq, 0,
887 "dw_spi", dws);
888 if (ret < 0) {
889 dev_err(&master->dev, "can not get IRQ\n");
890 goto err_free_master;
893 master->mode_bits = SPI_CPOL | SPI_CPHA;
894 master->bus_num = dws->bus_num;
895 master->num_chipselect = dws->num_cs;
896 master->cleanup = dw_spi_cleanup;
897 master->setup = dw_spi_setup;
898 master->transfer = dw_spi_transfer;
900 dws->dma_inited = 0;
902 /* Basic HW init */
903 spi_hw_init(dws);
905 /* Initial and start queue */
906 ret = init_queue(dws);
907 if (ret) {
908 dev_err(&master->dev, "problem initializing queue\n");
909 goto err_diable_hw;
911 ret = start_queue(dws);
912 if (ret) {
913 dev_err(&master->dev, "problem starting queue\n");
914 goto err_diable_hw;
917 spi_master_set_devdata(master, dws);
918 ret = spi_register_master(master);
919 if (ret) {
920 dev_err(&master->dev, "problem registering spi master\n");
921 goto err_queue_alloc;
924 mrst_spi_debugfs_init(dws);
925 return 0;
927 err_queue_alloc:
928 destroy_queue(dws);
929 err_diable_hw:
930 spi_enable_chip(dws, 0);
931 free_irq(dws->irq, dws);
932 err_free_master:
933 spi_master_put(master);
934 exit:
935 return ret;
937 EXPORT_SYMBOL(dw_spi_add_host);
939 void __devexit dw_spi_remove_host(struct dw_spi *dws)
941 int status = 0;
943 if (!dws)
944 return;
945 mrst_spi_debugfs_remove(dws);
947 /* Remove the queue */
948 status = destroy_queue(dws);
949 if (status != 0)
950 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
951 "complete, message memory not freed\n");
953 spi_enable_chip(dws, 0);
954 /* Disable clk */
955 spi_set_clk(dws, 0);
956 free_irq(dws->irq, dws);
958 /* Disconnect from the SPI framework */
959 spi_unregister_master(dws->master);
961 EXPORT_SYMBOL(dw_spi_remove_host);
963 int dw_spi_suspend_host(struct dw_spi *dws)
965 int ret = 0;
967 ret = stop_queue(dws);
968 if (ret)
969 return ret;
970 spi_enable_chip(dws, 0);
971 spi_set_clk(dws, 0);
972 return ret;
974 EXPORT_SYMBOL(dw_spi_suspend_host);
976 int dw_spi_resume_host(struct dw_spi *dws)
978 int ret;
980 spi_hw_init(dws);
981 ret = start_queue(dws);
982 if (ret)
983 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
984 return ret;
986 EXPORT_SYMBOL(dw_spi_resume_host);
988 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
989 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
990 MODULE_LICENSE("GPL v2");