2 * linux/drivers/mmc/tmio_mmc.c
4 * Copyright (C) 2004 Ian Molton
5 * Copyright (C) 2007 Ian Molton
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Driver for the MMC / SD / SDIO cell found in:
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
20 * Investigate using a workqueue for PIO transfers
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/dmaengine.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/core.h>
37 #include <linux/mfd/tmio.h>
38 #include <linux/mmc/host.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/scatterlist.h>
42 #include <linux/workqueue.h>
43 #include <linux/spinlock.h>
45 #define CTL_SD_CMD 0x00
46 #define CTL_ARG_REG 0x04
47 #define CTL_STOP_INTERNAL_ACTION 0x08
48 #define CTL_XFER_BLK_COUNT 0xa
49 #define CTL_RESPONSE 0x0c
50 #define CTL_STATUS 0x1c
51 #define CTL_IRQ_MASK 0x20
52 #define CTL_SD_CARD_CLK_CTL 0x24
53 #define CTL_SD_XFER_LEN 0x26
54 #define CTL_SD_MEM_CARD_OPT 0x28
55 #define CTL_SD_ERROR_DETAIL_STATUS 0x2c
56 #define CTL_SD_DATA_PORT 0x30
57 #define CTL_TRANSACTION_CTL 0x34
58 #define CTL_SDIO_STATUS 0x36
59 #define CTL_SDIO_IRQ_MASK 0x38
60 #define CTL_RESET_SD 0xe0
61 #define CTL_SDIO_REGS 0x100
62 #define CTL_CLK_AND_WAIT_CTL 0x138
63 #define CTL_RESET_SDIO 0x1e0
65 /* Definitions for values the CTRL_STATUS register can take. */
66 #define TMIO_STAT_CMDRESPEND 0x00000001
67 #define TMIO_STAT_DATAEND 0x00000004
68 #define TMIO_STAT_CARD_REMOVE 0x00000008
69 #define TMIO_STAT_CARD_INSERT 0x00000010
70 #define TMIO_STAT_SIGSTATE 0x00000020
71 #define TMIO_STAT_WRPROTECT 0x00000080
72 #define TMIO_STAT_CARD_REMOVE_A 0x00000100
73 #define TMIO_STAT_CARD_INSERT_A 0x00000200
74 #define TMIO_STAT_SIGSTATE_A 0x00000400
75 #define TMIO_STAT_CMD_IDX_ERR 0x00010000
76 #define TMIO_STAT_CRCFAIL 0x00020000
77 #define TMIO_STAT_STOPBIT_ERR 0x00040000
78 #define TMIO_STAT_DATATIMEOUT 0x00080000
79 #define TMIO_STAT_RXOVERFLOW 0x00100000
80 #define TMIO_STAT_TXUNDERRUN 0x00200000
81 #define TMIO_STAT_CMDTIMEOUT 0x00400000
82 #define TMIO_STAT_RXRDY 0x01000000
83 #define TMIO_STAT_TXRQ 0x02000000
84 #define TMIO_STAT_ILL_FUNC 0x20000000
85 #define TMIO_STAT_CMD_BUSY 0x40000000
86 #define TMIO_STAT_ILL_ACCESS 0x80000000
88 /* Definitions for values the CTRL_SDIO_STATUS register can take. */
89 #define TMIO_SDIO_STAT_IOIRQ 0x0001
90 #define TMIO_SDIO_STAT_EXPUB52 0x4000
91 #define TMIO_SDIO_STAT_EXWT 0x8000
92 #define TMIO_SDIO_MASK_ALL 0xc007
94 /* Define some IRQ masks */
95 /* This is the mask used at reset by the chip */
96 #define TMIO_MASK_ALL 0x837f031d
97 #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
98 #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
99 #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
100 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
101 #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
103 #define TMIO_MIN_DMA_LEN 8
105 #define enable_mmc_irqs(host, i) \
108 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
109 mask &= ~((i) & TMIO_MASK_IRQ); \
110 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
113 #define disable_mmc_irqs(host, i) \
116 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
117 mask |= ((i) & TMIO_MASK_IRQ); \
118 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
121 #define ack_mmc_irqs(host, i) \
123 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
126 /* This is arbitrary, just noone needed any higher alignment yet */
129 struct tmio_mmc_host
{
131 unsigned long bus_shift
;
132 struct mmc_command
*cmd
;
133 struct mmc_request
*mrq
;
134 struct mmc_data
*data
;
135 struct mmc_host
*mmc
;
137 unsigned int sdio_irq_enabled
;
139 /* Callbacks for clock / power control */
140 void (*set_pwr
)(struct platform_device
*host
, int state
);
141 void (*set_clk_div
)(struct platform_device
*host
, int state
);
143 /* pio related stuff */
144 struct scatterlist
*sg_ptr
;
145 struct scatterlist
*sg_orig
;
149 struct platform_device
*pdev
;
153 struct dma_chan
*chan_rx
;
154 struct dma_chan
*chan_tx
;
155 struct tasklet_struct dma_complete
;
156 struct tasklet_struct dma_issue
;
157 #ifdef CONFIG_TMIO_MMC_DMA
158 u8 bounce_buf
[PAGE_CACHE_SIZE
] __attribute__((aligned(MAX_ALIGN
)));
159 struct scatterlist bounce_sg
;
162 /* Track lost interrupts */
163 struct delayed_work delayed_reset_work
;
165 unsigned long last_req_ts
;
168 static void tmio_check_bounce_buffer(struct tmio_mmc_host
*host
);
170 static u16
sd_ctrl_read16(struct tmio_mmc_host
*host
, int addr
)
172 return readw(host
->ctl
+ (addr
<< host
->bus_shift
));
175 static void sd_ctrl_read16_rep(struct tmio_mmc_host
*host
, int addr
,
178 readsw(host
->ctl
+ (addr
<< host
->bus_shift
), buf
, count
);
181 static u32
sd_ctrl_read32(struct tmio_mmc_host
*host
, int addr
)
183 return readw(host
->ctl
+ (addr
<< host
->bus_shift
)) |
184 readw(host
->ctl
+ ((addr
+ 2) << host
->bus_shift
)) << 16;
187 static void sd_ctrl_write16(struct tmio_mmc_host
*host
, int addr
, u16 val
)
189 writew(val
, host
->ctl
+ (addr
<< host
->bus_shift
));
192 static void sd_ctrl_write16_rep(struct tmio_mmc_host
*host
, int addr
,
195 writesw(host
->ctl
+ (addr
<< host
->bus_shift
), buf
, count
);
198 static void sd_ctrl_write32(struct tmio_mmc_host
*host
, int addr
, u32 val
)
200 writew(val
, host
->ctl
+ (addr
<< host
->bus_shift
));
201 writew(val
>> 16, host
->ctl
+ ((addr
+ 2) << host
->bus_shift
));
204 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
206 host
->sg_len
= data
->sg_len
;
207 host
->sg_ptr
= data
->sg
;
208 host
->sg_orig
= data
->sg
;
212 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
214 host
->sg_ptr
= sg_next(host
->sg_ptr
);
216 return --host
->sg_len
;
219 static char *tmio_mmc_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
221 local_irq_save(*flags
);
222 return kmap_atomic(sg_page(sg
), KM_BIO_SRC_IRQ
) + sg
->offset
;
225 static void tmio_mmc_kunmap_atomic(struct scatterlist
*sg
, unsigned long *flags
, void *virt
)
227 kunmap_atomic(virt
- sg
->offset
, KM_BIO_SRC_IRQ
);
228 local_irq_restore(*flags
);
231 #ifdef CONFIG_MMC_DEBUG
233 #define STATUS_TO_TEXT(a, status, i) \
235 if (status & TMIO_STAT_##a) { \
242 void pr_debug_status(u32 status
)
245 printk(KERN_DEBUG
"status: %08x = ", status
);
246 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
247 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
248 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
249 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
250 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
251 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
252 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
253 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
254 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
255 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
256 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
257 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
258 STATUS_TO_TEXT(DATAEND
, status
, i
);
259 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
260 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
261 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
262 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
263 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
264 STATUS_TO_TEXT(RXRDY
, status
, i
);
265 STATUS_TO_TEXT(TXRQ
, status
, i
);
266 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
271 #define pr_debug_status(s) do { } while (0)
274 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
276 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
279 host
->sdio_irq_enabled
= 1;
280 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
281 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
,
282 (TMIO_SDIO_MASK_ALL
& ~TMIO_SDIO_STAT_IOIRQ
));
284 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, TMIO_SDIO_MASK_ALL
);
285 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
286 host
->sdio_irq_enabled
= 0;
290 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
295 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
296 new_clock
>= (clock
<<1); clk
>>= 1)
301 if (host
->set_clk_div
)
302 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
304 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
307 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
309 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
312 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
313 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
314 * device IRQ here and restore the SDIO IRQ mask before
315 * re-enabling the device IRQ.
317 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
318 disable_irq(host
->irq
);
319 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
321 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
322 tmio_mmc_enable_sdio_irq(host
->mmc
, host
->sdio_irq_enabled
);
323 enable_irq(host
->irq
);
325 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
326 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
330 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
332 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
334 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
335 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
337 /* see comment in tmio_mmc_clk_stop above */
338 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
339 disable_irq(host
->irq
);
340 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
342 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
343 tmio_mmc_enable_sdio_irq(host
->mmc
, host
->sdio_irq_enabled
);
344 enable_irq(host
->irq
);
348 static void reset(struct tmio_mmc_host
*host
)
350 /* FIXME - should we set stop clock reg here */
351 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
352 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
354 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
355 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
359 static void tmio_mmc_reset_work(struct work_struct
*work
)
361 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
362 delayed_reset_work
.work
);
363 struct mmc_request
*mrq
;
366 spin_lock_irqsave(&host
->lock
, flags
);
369 /* request already finished */
371 || time_is_after_jiffies(host
->last_req_ts
+
372 msecs_to_jiffies(2000))) {
373 spin_unlock_irqrestore(&host
->lock
, flags
);
377 dev_warn(&host
->pdev
->dev
,
378 "timeout waiting for hardware interrupt (CMD%u)\n",
382 host
->data
->error
= -ETIMEDOUT
;
384 host
->cmd
->error
= -ETIMEDOUT
;
386 mrq
->cmd
->error
= -ETIMEDOUT
;
391 host
->force_pio
= false;
393 spin_unlock_irqrestore(&host
->lock
, flags
);
397 mmc_request_done(host
->mmc
, mrq
);
401 tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
403 struct mmc_request
*mrq
= host
->mrq
;
411 host
->force_pio
= false;
413 cancel_delayed_work(&host
->delayed_reset_work
);
415 mmc_request_done(host
->mmc
, mrq
);
418 /* These are the bitmasks the tmio chip requires to implement the MMC response
419 * types. Note that R1 and R6 are the same in this scheme. */
420 #define APP_CMD 0x0040
421 #define RESP_NONE 0x0300
422 #define RESP_R1 0x0400
423 #define RESP_R1B 0x0500
424 #define RESP_R2 0x0600
425 #define RESP_R3 0x0700
426 #define DATA_PRESENT 0x0800
427 #define TRANSFER_READ 0x1000
428 #define TRANSFER_MULTI 0x2000
429 #define SECURITY_CMD 0x4000
432 tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
434 struct mmc_data
*data
= host
->data
;
437 /* Command 12 is handled by hardware */
438 if (cmd
->opcode
== 12 && !cmd
->arg
) {
439 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
443 switch (mmc_resp_type(cmd
)) {
444 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
445 case MMC_RSP_R1
: c
|= RESP_R1
; break;
446 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
447 case MMC_RSP_R2
: c
|= RESP_R2
; break;
448 case MMC_RSP_R3
: c
|= RESP_R3
; break;
450 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
456 /* FIXME - this seems to be ok commented out but the spec suggest this bit
457 * should be set when issuing app commands.
458 * if(cmd->flags & MMC_FLAG_ACMD)
463 if (data
->blocks
> 1) {
464 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
467 if (data
->flags
& MMC_DATA_READ
)
471 enable_mmc_irqs(host
, TMIO_MASK_CMD
);
473 /* Fire off the command */
474 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
475 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
481 * This chip always returns (at least?) as much data as you ask for.
482 * I'm unsure what happens if you ask for less than a block. This should be
483 * looked into to ensure that a funny length read doesnt hose the controller.
485 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
487 struct mmc_data
*data
= host
->data
;
493 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
494 pr_err("PIO IRQ in DMA mode!\n");
497 pr_debug("Spurious PIO IRQ\n");
501 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
502 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
504 count
= host
->sg_ptr
->length
- host
->sg_off
;
505 if (count
> data
->blksz
)
508 pr_debug("count: %08x offset: %08x flags %08x\n",
509 count
, host
->sg_off
, data
->flags
);
511 /* Transfer the data */
512 if (data
->flags
& MMC_DATA_READ
)
513 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
515 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
517 host
->sg_off
+= count
;
519 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
521 if (host
->sg_off
== host
->sg_ptr
->length
)
522 tmio_mmc_next_sg(host
);
527 /* needs to be called with host->lock held */
528 static void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
530 struct mmc_data
*data
= host
->data
;
531 struct mmc_command
*stop
;
536 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
541 /* FIXME - return correct transfer count on errors */
543 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
545 data
->bytes_xfered
= 0;
547 pr_debug("Completed data request\n");
550 * FIXME: other drivers allow an optional stop command of any given type
551 * which we dont do, as the chip can auto generate them.
552 * Perhaps we can be smarter about when to use auto CMD12 and
553 * only issue the auto request when we know this is the desired
554 * stop command, allowing fallback to the stop command the
555 * upper layers expect. For now, we do what works.
558 if (data
->flags
& MMC_DATA_READ
) {
559 if (host
->chan_rx
&& !host
->force_pio
)
560 tmio_check_bounce_buffer(host
);
561 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
564 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
569 if (stop
->opcode
== 12 && !stop
->arg
)
570 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
575 tmio_mmc_finish_request(host
);
578 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
580 struct mmc_data
*data
;
581 spin_lock(&host
->lock
);
587 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
589 * Has all data been written out yet? Testing on SuperH showed,
590 * that in most cases the first interrupt comes already with the
591 * BUSY status bit clear, but on some operations, like mount or
592 * in the beginning of a write / sync / umount, there is one
593 * DATAEND interrupt with the BUSY bit set, in this cases
594 * waiting for one more interrupt fixes the problem.
596 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
597 disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
598 tasklet_schedule(&host
->dma_complete
);
600 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
601 disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
602 tasklet_schedule(&host
->dma_complete
);
604 tmio_mmc_do_data_irq(host
);
605 disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
608 spin_unlock(&host
->lock
);
611 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
614 struct mmc_command
*cmd
= host
->cmd
;
617 spin_lock(&host
->lock
);
620 pr_debug("Spurious CMD irq\n");
626 /* This controller is sicker than the PXA one. Not only do we need to
627 * drop the top 8 bits of the first response word, we also need to
628 * modify the order of the response for short response command types.
631 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
632 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
634 if (cmd
->flags
& MMC_RSP_136
) {
635 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
636 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
637 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
639 } else if (cmd
->flags
& MMC_RSP_R3
) {
640 cmd
->resp
[0] = cmd
->resp
[3];
643 if (stat
& TMIO_STAT_CMDTIMEOUT
)
644 cmd
->error
= -ETIMEDOUT
;
645 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
646 cmd
->error
= -EILSEQ
;
648 /* If there is data to handle we enable data IRQs here, and
649 * we will ultimatley finish the request in the data_end handler.
650 * If theres no data or we encountered an error, finish now.
652 if (host
->data
&& !cmd
->error
) {
653 if (host
->data
->flags
& MMC_DATA_READ
) {
654 if (host
->force_pio
|| !host
->chan_rx
)
655 enable_mmc_irqs(host
, TMIO_MASK_READOP
);
657 tasklet_schedule(&host
->dma_issue
);
659 if (host
->force_pio
|| !host
->chan_tx
)
660 enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
662 tasklet_schedule(&host
->dma_issue
);
665 tmio_mmc_finish_request(host
);
669 spin_unlock(&host
->lock
);
674 static irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
676 struct tmio_mmc_host
*host
= devid
;
677 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
678 unsigned int ireg
, irq_mask
, status
;
679 unsigned int sdio_ireg
, sdio_irq_mask
, sdio_status
;
681 pr_debug("MMC IRQ begin\n");
683 status
= sd_ctrl_read32(host
, CTL_STATUS
);
684 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
685 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
688 if (!ireg
&& pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
689 sdio_status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
690 sdio_irq_mask
= sd_ctrl_read16(host
, CTL_SDIO_IRQ_MASK
);
691 sdio_ireg
= sdio_status
& TMIO_SDIO_MASK_ALL
& ~sdio_irq_mask
;
693 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
& ~TMIO_SDIO_MASK_ALL
);
695 if (sdio_ireg
&& !host
->sdio_irq_enabled
) {
696 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
697 sdio_status
, sdio_irq_mask
, sdio_ireg
);
698 tmio_mmc_enable_sdio_irq(host
->mmc
, 0);
702 if (host
->mmc
->caps
& MMC_CAP_SDIO_IRQ
&&
703 sdio_ireg
& TMIO_SDIO_STAT_IOIRQ
)
704 mmc_signal_sdio_irq(host
->mmc
);
710 pr_debug_status(status
);
711 pr_debug_status(ireg
);
714 disable_mmc_irqs(host
, status
& ~irq_mask
);
716 pr_warning("tmio_mmc: Spurious irq, disabling! "
717 "0x%08x 0x%08x 0x%08x\n", status
, irq_mask
, ireg
);
718 pr_debug_status(status
);
724 /* Card insert / remove attempts */
725 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
726 ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
727 TMIO_STAT_CARD_REMOVE
);
728 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
731 /* CRC and other errors */
732 /* if (ireg & TMIO_STAT_ERR_IRQ)
733 * handled |= tmio_error_irq(host, irq, stat);
736 /* Command completion */
737 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
739 TMIO_STAT_CMDRESPEND
|
740 TMIO_STAT_CMDTIMEOUT
);
741 tmio_mmc_cmd_irq(host
, status
);
745 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
746 ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
747 tmio_mmc_pio_irq(host
);
750 /* Data transfer completion */
751 if (ireg
& TMIO_STAT_DATAEND
) {
752 ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
753 tmio_mmc_data_irq(host
);
756 /* Check status - keep going until we've handled it all */
757 status
= sd_ctrl_read32(host
, CTL_STATUS
);
758 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
759 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
761 pr_debug("Status at end of loop: %08x\n", status
);
762 pr_debug_status(status
);
764 pr_debug("MMC IRQ end\n");
770 #ifdef CONFIG_TMIO_MMC_DMA
771 static void tmio_check_bounce_buffer(struct tmio_mmc_host
*host
)
773 if (host
->sg_ptr
== &host
->bounce_sg
) {
775 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
776 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
777 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
781 static void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
783 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
784 /* Switch DMA mode on or off - SuperH specific? */
785 sd_ctrl_write16(host
, 0xd8, enable
? 2 : 0);
789 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
791 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
792 struct dma_async_tx_descriptor
*desc
= NULL
;
793 struct dma_chan
*chan
= host
->chan_rx
;
794 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
797 bool aligned
= true, multiple
= true;
798 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
800 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
801 if (sg_tmp
->offset
& align
)
803 if (sg_tmp
->length
& align
) {
809 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
810 align
>= MAX_ALIGN
)) || !multiple
) {
815 if (sg
->length
< TMIO_MIN_DMA_LEN
) {
816 host
->force_pio
= true;
820 disable_mmc_irqs(host
, TMIO_STAT_RXRDY
);
822 /* The only sg element can be unaligned, use our bounce buffer then */
824 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
825 host
->sg_ptr
= &host
->bounce_sg
;
829 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
831 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
832 DMA_FROM_DEVICE
, DMA_CTRL_ACK
);
835 cookie
= dmaengine_submit(desc
);
837 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
838 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
842 /* DMA failed, fall back to PIO */
845 host
->chan_rx
= NULL
;
846 dma_release_channel(chan
);
847 /* Free the Tx channel too */
848 chan
= host
->chan_tx
;
850 host
->chan_tx
= NULL
;
851 dma_release_channel(chan
);
853 dev_warn(&host
->pdev
->dev
,
854 "DMA failed: %d, falling back to PIO\n", ret
);
855 tmio_mmc_enable_dma(host
, false);
858 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
859 desc
, cookie
, host
->sg_len
);
862 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
864 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
865 struct dma_async_tx_descriptor
*desc
= NULL
;
866 struct dma_chan
*chan
= host
->chan_tx
;
867 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
870 bool aligned
= true, multiple
= true;
871 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
873 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
874 if (sg_tmp
->offset
& align
)
876 if (sg_tmp
->length
& align
) {
882 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
883 align
>= MAX_ALIGN
)) || !multiple
) {
888 if (sg
->length
< TMIO_MIN_DMA_LEN
) {
889 host
->force_pio
= true;
893 disable_mmc_irqs(host
, TMIO_STAT_TXRQ
);
895 /* The only sg element can be unaligned, use our bounce buffer then */
898 void *sg_vaddr
= tmio_mmc_kmap_atomic(sg
, &flags
);
899 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
900 memcpy(host
->bounce_buf
, sg_vaddr
, host
->bounce_sg
.length
);
901 tmio_mmc_kunmap_atomic(sg
, &flags
, sg_vaddr
);
902 host
->sg_ptr
= &host
->bounce_sg
;
906 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
908 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
909 DMA_TO_DEVICE
, DMA_CTRL_ACK
);
912 cookie
= dmaengine_submit(desc
);
914 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
915 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
919 /* DMA failed, fall back to PIO */
922 host
->chan_tx
= NULL
;
923 dma_release_channel(chan
);
924 /* Free the Rx channel too */
925 chan
= host
->chan_rx
;
927 host
->chan_rx
= NULL
;
928 dma_release_channel(chan
);
930 dev_warn(&host
->pdev
->dev
,
931 "DMA failed: %d, falling back to PIO\n", ret
);
932 tmio_mmc_enable_dma(host
, false);
935 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
939 static void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
940 struct mmc_data
*data
)
942 if (data
->flags
& MMC_DATA_READ
) {
944 tmio_mmc_start_dma_rx(host
);
947 tmio_mmc_start_dma_tx(host
);
951 static void tmio_issue_tasklet_fn(unsigned long priv
)
953 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
954 struct dma_chan
*chan
= NULL
;
956 spin_lock_irq(&host
->lock
);
958 if (host
&& host
->data
) {
959 if (host
->data
->flags
& MMC_DATA_READ
)
960 chan
= host
->chan_rx
;
962 chan
= host
->chan_tx
;
965 spin_unlock_irq(&host
->lock
);
967 enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
970 dma_async_issue_pending(chan
);
973 static void tmio_tasklet_fn(unsigned long arg
)
975 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
977 spin_lock_irq(&host
->lock
);
982 if (host
->data
->flags
& MMC_DATA_READ
)
983 dma_unmap_sg(host
->chan_rx
->device
->dev
,
984 host
->sg_ptr
, host
->sg_len
,
987 dma_unmap_sg(host
->chan_tx
->device
->dev
,
988 host
->sg_ptr
, host
->sg_len
,
991 tmio_mmc_do_data_irq(host
);
993 spin_unlock_irq(&host
->lock
);
996 /* It might be necessary to make filter MFD specific */
997 static bool tmio_mmc_filter(struct dma_chan
*chan
, void *arg
)
999 dev_dbg(chan
->device
->dev
, "%s: slave data %p\n", __func__
, arg
);
1000 chan
->private = arg
;
1004 static void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
1005 struct tmio_mmc_data
*pdata
)
1007 /* We can only either use DMA for both Tx and Rx or not use it at all */
1009 dma_cap_mask_t mask
;
1012 dma_cap_set(DMA_SLAVE
, mask
);
1014 host
->chan_tx
= dma_request_channel(mask
, tmio_mmc_filter
,
1015 pdata
->dma
->chan_priv_tx
);
1016 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
1022 host
->chan_rx
= dma_request_channel(mask
, tmio_mmc_filter
,
1023 pdata
->dma
->chan_priv_rx
);
1024 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
1027 if (!host
->chan_rx
) {
1028 dma_release_channel(host
->chan_tx
);
1029 host
->chan_tx
= NULL
;
1033 tasklet_init(&host
->dma_complete
, tmio_tasklet_fn
, (unsigned long)host
);
1034 tasklet_init(&host
->dma_issue
, tmio_issue_tasklet_fn
, (unsigned long)host
);
1036 tmio_mmc_enable_dma(host
, true);
1040 static void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
1042 if (host
->chan_tx
) {
1043 struct dma_chan
*chan
= host
->chan_tx
;
1044 host
->chan_tx
= NULL
;
1045 dma_release_channel(chan
);
1047 if (host
->chan_rx
) {
1048 struct dma_chan
*chan
= host
->chan_rx
;
1049 host
->chan_rx
= NULL
;
1050 dma_release_channel(chan
);
1054 static void tmio_check_bounce_buffer(struct tmio_mmc_host
*host
)
1058 static void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
1059 struct mmc_data
*data
)
1063 static void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
1064 struct tmio_mmc_data
*pdata
)
1066 host
->chan_tx
= NULL
;
1067 host
->chan_rx
= NULL
;
1070 static void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
1075 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
1076 struct mmc_data
*data
)
1078 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
1080 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
1081 data
->blksz
, data
->blocks
);
1083 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
1084 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
1085 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
1087 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
1088 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
1089 mmc_hostname(host
->mmc
), data
->blksz
);
1094 tmio_mmc_init_sg(host
, data
);
1097 /* Set transfer length / blocksize */
1098 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
1099 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
1101 tmio_mmc_start_dma(host
, data
);
1106 /* Process requests from the MMC layer */
1107 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1109 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1113 pr_debug("request not null\n");
1115 host
->last_req_ts
= jiffies
;
1120 ret
= tmio_mmc_start_data(host
, mrq
->data
);
1125 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
1127 schedule_delayed_work(&host
->delayed_reset_work
,
1128 msecs_to_jiffies(2000));
1134 host
->force_pio
= false;
1135 mrq
->cmd
->error
= ret
;
1136 mmc_request_done(mmc
, mrq
);
1139 /* Set MMC clock / power.
1140 * Note: This controller uses a simple divider scheme therefore it cannot
1141 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
1142 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
1145 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1147 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1150 tmio_mmc_set_clock(host
, ios
->clock
);
1152 /* Power sequence - OFF -> ON -> UP */
1153 switch (ios
->power_mode
) {
1154 case MMC_POWER_OFF
: /* power down SD bus */
1156 host
->set_pwr(host
->pdev
, 0);
1157 tmio_mmc_clk_stop(host
);
1159 case MMC_POWER_ON
: /* power up SD bus */
1161 host
->set_pwr(host
->pdev
, 1);
1163 case MMC_POWER_UP
: /* start bus clock */
1164 tmio_mmc_clk_start(host
);
1168 switch (ios
->bus_width
) {
1169 case MMC_BUS_WIDTH_1
:
1170 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
1172 case MMC_BUS_WIDTH_4
:
1173 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
1177 /* Let things settle. delay taken from winCE driver */
1181 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
1183 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1184 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
1186 return ((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
1187 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
)) ? 0 : 1;
1190 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
1192 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1193 struct tmio_mmc_data
*pdata
= mfd_get_data(host
->pdev
);
1198 return pdata
->get_cd(host
->pdev
);
1201 static const struct mmc_host_ops tmio_mmc_ops
= {
1202 .request
= tmio_mmc_request
,
1203 .set_ios
= tmio_mmc_set_ios
,
1204 .get_ro
= tmio_mmc_get_ro
,
1205 .get_cd
= tmio_mmc_get_cd
,
1206 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
1210 static int tmio_mmc_suspend(struct platform_device
*dev
, pm_message_t state
)
1212 const struct mfd_cell
*cell
= mfd_get_cell(dev
);
1213 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1216 ret
= mmc_suspend_host(mmc
);
1218 /* Tell MFD core it can disable us now.*/
1219 if (!ret
&& cell
->disable
)
1225 static int tmio_mmc_resume(struct platform_device
*dev
)
1227 const struct mfd_cell
*cell
= mfd_get_cell(dev
);
1228 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1231 /* Tell the MFD core we are ready to be enabled */
1233 ret
= cell
->resume(dev
);
1238 mmc_resume_host(mmc
);
1244 #define tmio_mmc_suspend NULL
1245 #define tmio_mmc_resume NULL
1248 static int __devinit
tmio_mmc_probe(struct platform_device
*dev
)
1250 const struct mfd_cell
*cell
= mfd_get_cell(dev
);
1251 struct tmio_mmc_data
*pdata
;
1252 struct resource
*res_ctl
;
1253 struct tmio_mmc_host
*host
;
1254 struct mmc_host
*mmc
;
1256 u32 irq_mask
= TMIO_MASK_CMD
;
1258 if (dev
->num_resources
!= 2)
1261 res_ctl
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
1265 pdata
= mfd_get_data(dev
);
1266 if (!pdata
|| !pdata
->hclk
)
1271 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &dev
->dev
);
1275 host
= mmc_priv(mmc
);
1278 platform_set_drvdata(dev
, mmc
);
1280 host
->set_pwr
= pdata
->set_pwr
;
1281 host
->set_clk_div
= pdata
->set_clk_div
;
1283 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
1284 host
->bus_shift
= resource_size(res_ctl
) >> 10;
1286 host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
1290 mmc
->ops
= &tmio_mmc_ops
;
1291 mmc
->caps
= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
1292 mmc
->f_max
= pdata
->hclk
;
1293 mmc
->f_min
= mmc
->f_max
/ 512;
1295 mmc
->max_blk_size
= 512;
1296 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
1298 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1299 mmc
->max_seg_size
= mmc
->max_req_size
;
1300 if (pdata
->ocr_mask
)
1301 mmc
->ocr_avail
= pdata
->ocr_mask
;
1303 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1305 /* Tell the MFD core we are ready to be enabled */
1307 ret
= cell
->enable(dev
);
1312 tmio_mmc_clk_stop(host
);
1315 ret
= platform_get_irq(dev
, 0);
1321 disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1322 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1323 tmio_mmc_enable_sdio_irq(mmc
, 0);
1325 ret
= request_irq(host
->irq
, tmio_mmc_irq
, IRQF_DISABLED
|
1326 IRQF_TRIGGER_FALLING
, dev_name(&dev
->dev
), host
);
1330 spin_lock_init(&host
->lock
);
1332 /* Init delayed work for request timeouts */
1333 INIT_DELAYED_WORK(&host
->delayed_reset_work
, tmio_mmc_reset_work
);
1335 /* See if we also get DMA */
1336 tmio_mmc_request_dma(host
, pdata
);
1340 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host
->mmc
),
1341 (unsigned long)host
->ctl
, host
->irq
);
1343 /* Unmask the IRQs we want to know about */
1345 irq_mask
|= TMIO_MASK_READOP
;
1347 irq_mask
|= TMIO_MASK_WRITEOP
;
1348 enable_mmc_irqs(host
, irq_mask
);
1363 static int __devexit
tmio_mmc_remove(struct platform_device
*dev
)
1365 const struct mfd_cell
*cell
= mfd_get_cell(dev
);
1366 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1368 platform_set_drvdata(dev
, NULL
);
1371 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1372 mmc_remove_host(mmc
);
1373 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1374 tmio_mmc_release_dma(host
);
1375 free_irq(host
->irq
, host
);
1385 /* ------------------- device registration ----------------------- */
1387 static struct platform_driver tmio_mmc_driver
= {
1390 .owner
= THIS_MODULE
,
1392 .probe
= tmio_mmc_probe
,
1393 .remove
= __devexit_p(tmio_mmc_remove
),
1394 .suspend
= tmio_mmc_suspend
,
1395 .resume
= tmio_mmc_resume
,
1399 static int __init
tmio_mmc_init(void)
1401 return platform_driver_register(&tmio_mmc_driver
);
1404 static void __exit
tmio_mmc_exit(void)
1406 platform_driver_unregister(&tmio_mmc_driver
);
1409 module_init(tmio_mmc_init
);
1410 module_exit(tmio_mmc_exit
);
1412 MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
1413 MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
1414 MODULE_LICENSE("GPL v2");
1415 MODULE_ALIAS("platform:tmio-mmc");