GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / ata / pata_bf54x.c
blob9cae65de750e10777260d2e3e9d9bbab2026b768
1 /*
2 * File: drivers/ata/pata_bf54x.c
3 * Author: Sonic Zhang <sonic.zhang@analog.com>
5 * Created:
6 * Description: PATA Driver for blackfin 54x
8 * Modified:
9 * Copyright 2007 Analog Devices Inc.
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/blkdev.h>
34 #include <linux/delay.h>
35 #include <linux/device.h>
36 #include <scsi/scsi_host.h>
37 #include <linux/libata.h>
38 #include <linux/platform_device.h>
39 #include <asm/dma.h>
40 #include <asm/gpio.h>
41 #include <asm/portmux.h>
43 #define DRV_NAME "pata-bf54x"
44 #define DRV_VERSION "0.9"
46 #define ATA_REG_CTRL 0x0E
47 #define ATA_REG_ALTSTATUS ATA_REG_CTRL
49 /* These are the offset of the controller's registers */
50 #define ATAPI_OFFSET_CONTROL 0x00
51 #define ATAPI_OFFSET_STATUS 0x04
52 #define ATAPI_OFFSET_DEV_ADDR 0x08
53 #define ATAPI_OFFSET_DEV_TXBUF 0x0c
54 #define ATAPI_OFFSET_DEV_RXBUF 0x10
55 #define ATAPI_OFFSET_INT_MASK 0x14
56 #define ATAPI_OFFSET_INT_STATUS 0x18
57 #define ATAPI_OFFSET_XFER_LEN 0x1c
58 #define ATAPI_OFFSET_LINE_STATUS 0x20
59 #define ATAPI_OFFSET_SM_STATE 0x24
60 #define ATAPI_OFFSET_TERMINATE 0x28
61 #define ATAPI_OFFSET_PIO_TFRCNT 0x2c
62 #define ATAPI_OFFSET_DMA_TFRCNT 0x30
63 #define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
64 #define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
65 #define ATAPI_OFFSET_REG_TIM_0 0x40
66 #define ATAPI_OFFSET_PIO_TIM_0 0x44
67 #define ATAPI_OFFSET_PIO_TIM_1 0x48
68 #define ATAPI_OFFSET_MULTI_TIM_0 0x50
69 #define ATAPI_OFFSET_MULTI_TIM_1 0x54
70 #define ATAPI_OFFSET_MULTI_TIM_2 0x58
71 #define ATAPI_OFFSET_ULTRA_TIM_0 0x60
72 #define ATAPI_OFFSET_ULTRA_TIM_1 0x64
73 #define ATAPI_OFFSET_ULTRA_TIM_2 0x68
74 #define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
77 #define ATAPI_GET_CONTROL(base)\
78 bfin_read16(base + ATAPI_OFFSET_CONTROL)
79 #define ATAPI_SET_CONTROL(base, val)\
80 bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
81 #define ATAPI_GET_STATUS(base)\
82 bfin_read16(base + ATAPI_OFFSET_STATUS)
83 #define ATAPI_GET_DEV_ADDR(base)\
84 bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
85 #define ATAPI_SET_DEV_ADDR(base, val)\
86 bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
87 #define ATAPI_GET_DEV_TXBUF(base)\
88 bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
89 #define ATAPI_SET_DEV_TXBUF(base, val)\
90 bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
91 #define ATAPI_GET_DEV_RXBUF(base)\
92 bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
93 #define ATAPI_SET_DEV_RXBUF(base, val)\
94 bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
95 #define ATAPI_GET_INT_MASK(base)\
96 bfin_read16(base + ATAPI_OFFSET_INT_MASK)
97 #define ATAPI_SET_INT_MASK(base, val)\
98 bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
99 #define ATAPI_GET_INT_STATUS(base)\
100 bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
101 #define ATAPI_SET_INT_STATUS(base, val)\
102 bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
103 #define ATAPI_GET_XFER_LEN(base)\
104 bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
105 #define ATAPI_SET_XFER_LEN(base, val)\
106 bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
107 #define ATAPI_GET_LINE_STATUS(base)\
108 bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
109 #define ATAPI_GET_SM_STATE(base)\
110 bfin_read16(base + ATAPI_OFFSET_SM_STATE)
111 #define ATAPI_GET_TERMINATE(base)\
112 bfin_read16(base + ATAPI_OFFSET_TERMINATE)
113 #define ATAPI_SET_TERMINATE(base, val)\
114 bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
115 #define ATAPI_GET_PIO_TFRCNT(base)\
116 bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
117 #define ATAPI_GET_DMA_TFRCNT(base)\
118 bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
119 #define ATAPI_GET_UMAIN_TFRCNT(base)\
120 bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
121 #define ATAPI_GET_UDMAOUT_TFRCNT(base)\
122 bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
123 #define ATAPI_GET_REG_TIM_0(base)\
124 bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
125 #define ATAPI_SET_REG_TIM_0(base, val)\
126 bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
127 #define ATAPI_GET_PIO_TIM_0(base)\
128 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
129 #define ATAPI_SET_PIO_TIM_0(base, val)\
130 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
131 #define ATAPI_GET_PIO_TIM_1(base)\
132 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
133 #define ATAPI_SET_PIO_TIM_1(base, val)\
134 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
135 #define ATAPI_GET_MULTI_TIM_0(base)\
136 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
137 #define ATAPI_SET_MULTI_TIM_0(base, val)\
138 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
139 #define ATAPI_GET_MULTI_TIM_1(base)\
140 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
141 #define ATAPI_SET_MULTI_TIM_1(base, val)\
142 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
143 #define ATAPI_GET_MULTI_TIM_2(base)\
144 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
145 #define ATAPI_SET_MULTI_TIM_2(base, val)\
146 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
147 #define ATAPI_GET_ULTRA_TIM_0(base)\
148 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
149 #define ATAPI_SET_ULTRA_TIM_0(base, val)\
150 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
151 #define ATAPI_GET_ULTRA_TIM_1(base)\
152 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
153 #define ATAPI_SET_ULTRA_TIM_1(base, val)\
154 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
155 #define ATAPI_GET_ULTRA_TIM_2(base)\
156 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
157 #define ATAPI_SET_ULTRA_TIM_2(base, val)\
158 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
159 #define ATAPI_GET_ULTRA_TIM_3(base)\
160 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
161 #define ATAPI_SET_ULTRA_TIM_3(base, val)\
162 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
165 * PIO Mode - Frequency compatibility
167 /* mode: 0 1 2 3 4 */
168 static const u32 pio_fsclk[] =
169 { 33333333, 33333333, 33333333, 33333333, 33333333 };
172 * MDMA Mode - Frequency compatibility
174 /* mode: 0 1 2 */
175 static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
178 * UDMA Mode - Frequency compatibility
180 * UDMA5 - 100 MB/s - SCLK = 133 MHz
181 * UDMA4 - 66 MB/s - SCLK >= 80 MHz
182 * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz
183 * UDMA2 - 33 MB/s - SCLK >= 40 MHz
185 /* mode: 0 1 2 3 4 5 */
186 static const u32 udma_fsclk[] =
187 { 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
190 * Register transfer timing table
192 /* mode: 0 1 2 3 4 */
193 /* Cycle Time */
194 static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
195 /* DIOR/DIOW to end cycle */
196 static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
197 /* DIOR/DIOW asserted pulse width */
198 static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
201 * PIO timing table
203 /* mode: 0 1 2 3 4 */
204 /* Cycle Time */
205 static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
206 /* Address valid to DIOR/DIORW */
207 static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
208 /* DIOR/DIOW to end cycle */
209 static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
210 /* DIOR/DIOW asserted pulse width */
211 static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
212 /* DIOW data hold */
213 static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
215 /* ******************************************************************
216 * Multiword DMA timing table
217 * ******************************************************************
219 /* mode: 0 1 2 */
220 /* Cycle Time */
221 static const u32 mdma_t0min[] = { 480, 150, 120 };
222 /* DIOR/DIOW asserted pulse width */
223 static const u32 mdma_tdmin[] = { 215, 80, 70 };
224 /* DMACK to read data released */
225 static const u32 mdma_thmin[] = { 20, 15, 10 };
226 /* DIOR/DIOW to DMACK hold */
227 static const u32 mdma_tjmin[] = { 20, 5, 5 };
228 /* DIOR negated pulse width */
229 static const u32 mdma_tkrmin[] = { 50, 50, 25 };
230 /* DIOR negated pulse width */
231 static const u32 mdma_tkwmin[] = { 215, 50, 25 };
232 /* CS[1:0] valid to DIOR/DIOW */
233 static const u32 mdma_tmmin[] = { 50, 30, 25 };
234 /* DMACK to read data released */
235 static const u32 mdma_tzmax[] = { 20, 25, 25 };
238 * Ultra DMA timing table
240 /* mode: 0 1 2 3 4 5 */
241 static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
242 static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
243 static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
244 static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
245 static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
248 static const u32 udma_tmlimin = 20;
249 static const u32 udma_tzahmin = 20;
250 static const u32 udma_tenvmin = 20;
251 static const u32 udma_tackmin = 20;
252 static const u32 udma_tssmin = 50;
256 * Function: num_clocks_min
258 * Description:
259 * calculate number of SCLK cycles to meet minimum timing
261 static unsigned short num_clocks_min(unsigned long tmin,
262 unsigned long fsclk)
264 unsigned long tmp ;
265 unsigned short result;
267 tmp = tmin * (fsclk/1000/1000) / 1000;
268 result = (unsigned short)tmp;
269 if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
270 result++;
273 return result;
277 * bfin_set_piomode - Initialize host controller PATA PIO timings
278 * @ap: Port whose timings we are configuring
279 * @adev: um
281 * Set PIO mode for device.
283 * LOCKING:
284 * None (inherited from caller).
287 static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
289 int mode = adev->pio_mode - XFER_PIO_0;
290 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
291 unsigned int fsclk = get_sclk();
292 unsigned short teoc_reg, t2_reg, teoc_pio;
293 unsigned short t4_reg, t2_pio, t1_reg;
294 unsigned short n0, n6, t6min = 5;
296 /* the most restrictive timing value is t6 and tc, the DIOW - data hold
297 * If one SCLK pulse is longer than this minimum value then register
298 * transfers cannot be supported at this frequency.
300 n6 = num_clocks_min(t6min, fsclk);
301 if (mode >= 0 && mode <= 4 && n6 >= 1) {
302 dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
303 /* calculate the timing values for register transfers. */
304 while (mode > 0 && pio_fsclk[mode] > fsclk)
305 mode--;
307 /* DIOR/DIOW to end cycle time */
308 t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
309 /* DIOR/DIOW asserted pulse width */
310 teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
311 /* Cycle Time */
312 n0 = num_clocks_min(reg_t0min[mode], fsclk);
314 /* increase t2 until we meed the minimum cycle length */
315 if (t2_reg + teoc_reg < n0)
316 t2_reg = n0 - teoc_reg;
318 /* calculate the timing values for pio transfers. */
320 /* DIOR/DIOW to end cycle time */
321 t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
322 /* DIOR/DIOW asserted pulse width */
323 teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
324 /* Cycle Time */
325 n0 = num_clocks_min(pio_t0min[mode], fsclk);
327 /* increase t2 until we meed the minimum cycle length */
328 if (t2_pio + teoc_pio < n0)
329 t2_pio = n0 - teoc_pio;
331 /* Address valid to DIOR/DIORW */
332 t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
334 /* DIOW data hold */
335 t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
337 ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
338 ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
339 ATAPI_SET_PIO_TIM_1(base, teoc_pio);
340 if (mode > 2) {
341 ATAPI_SET_CONTROL(base,
342 ATAPI_GET_CONTROL(base) | IORDY_EN);
343 } else {
344 ATAPI_SET_CONTROL(base,
345 ATAPI_GET_CONTROL(base) & ~IORDY_EN);
348 /* Disable host ATAPI PIO interrupts */
349 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
350 & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
351 SSYNC();
356 * bfin_set_dmamode - Initialize host controller PATA DMA timings
357 * @ap: Port whose timings we are configuring
358 * @adev: um
360 * Set UDMA mode for device.
362 * LOCKING:
363 * None (inherited from caller).
366 static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
368 int mode;
369 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
370 unsigned long fsclk = get_sclk();
371 unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
372 unsigned short tm, td, tkr, tkw, teoc, th;
373 unsigned short n0, nf, tfmin = 5;
374 unsigned short nmin, tcyc;
376 mode = adev->dma_mode - XFER_UDMA_0;
377 if (mode >= 0 && mode <= 5) {
378 dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode);
379 /* the most restrictive timing value is t6 and tc,
380 * the DIOW - data hold. If one SCLK pulse is longer
381 * than this minimum value then register
382 * transfers cannot be supported at this frequency.
384 while (mode > 0 && udma_fsclk[mode] > fsclk)
385 mode--;
387 nmin = num_clocks_min(udma_tmin[mode], fsclk);
388 if (nmin >= 1) {
389 /* calculate the timing values for Ultra DMA. */
390 tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
391 tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
392 tcyc_tdvs = 2;
394 /* increase tcyc - tdvs (tcyc_tdvs) until we meed
395 * the minimum cycle length
397 if (tdvs + tcyc_tdvs < tcyc)
398 tcyc_tdvs = tcyc - tdvs;
400 /* Mow assign the values required for the timing
401 * registers
403 if (tcyc_tdvs < 2)
404 tcyc_tdvs = 2;
406 if (tdvs < 2)
407 tdvs = 2;
409 tack = num_clocks_min(udma_tackmin, fsclk);
410 tss = num_clocks_min(udma_tssmin, fsclk);
411 tmli = num_clocks_min(udma_tmlimin, fsclk);
412 tzah = num_clocks_min(udma_tzahmin, fsclk);
413 trp = num_clocks_min(udma_trpmin[mode], fsclk);
414 tenv = num_clocks_min(udma_tenvmin, fsclk);
415 if (tenv <= udma_tenvmax[mode]) {
416 ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
417 ATAPI_SET_ULTRA_TIM_1(base,
418 (tcyc_tdvs<<8 | tdvs));
419 ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
420 ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
422 /* Enable host ATAPI Untra DMA interrupts */
423 ATAPI_SET_INT_MASK(base,
424 ATAPI_GET_INT_MASK(base)
425 | UDMAIN_DONE_MASK
426 | UDMAOUT_DONE_MASK
427 | UDMAIN_TERM_MASK
428 | UDMAOUT_TERM_MASK);
433 mode = adev->dma_mode - XFER_MW_DMA_0;
434 if (mode >= 0 && mode <= 2) {
435 dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode);
436 /* the most restrictive timing value is tf, the DMACK to
437 * read data released. If one SCLK pulse is longer than
438 * this maximum value then the MDMA mode
439 * cannot be supported at this frequency.
441 while (mode > 0 && mdma_fsclk[mode] > fsclk)
442 mode--;
444 nf = num_clocks_min(tfmin, fsclk);
445 if (nf >= 1) {
446 /* calculate the timing values for Multi-word DMA. */
448 /* DIOR/DIOW asserted pulse width */
449 td = num_clocks_min(mdma_tdmin[mode], fsclk);
451 /* DIOR negated pulse width */
452 tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
454 /* Cycle Time */
455 n0 = num_clocks_min(mdma_t0min[mode], fsclk);
457 /* increase tk until we meed the minimum cycle length */
458 if (tkw + td < n0)
459 tkw = n0 - td;
461 /* DIOR negated pulse width - read */
462 tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
463 /* CS{1:0] valid to DIOR/DIOW */
464 tm = num_clocks_min(mdma_tmmin[mode], fsclk);
465 /* DIOR/DIOW to DMACK hold */
466 teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
467 /* DIOW Data hold */
468 th = num_clocks_min(mdma_thmin[mode], fsclk);
470 ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
471 ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
472 ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
474 /* Enable host ATAPI Multi DMA interrupts */
475 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
476 | MULTI_DONE_MASK | MULTI_TERM_MASK);
477 SSYNC();
480 return;
485 * Function: wait_complete
487 * Description: Waits the interrupt from device
490 static inline void wait_complete(void __iomem *base, unsigned short mask)
492 unsigned short status;
493 unsigned int i = 0;
495 #define PATA_BF54X_WAIT_TIMEOUT 10000
497 for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
498 status = ATAPI_GET_INT_STATUS(base) & mask;
499 if (status)
500 break;
503 ATAPI_SET_INT_STATUS(base, mask);
508 * Function: write_atapi_register
510 * Description: Writes to ATA Device Resgister
514 static void write_atapi_register(void __iomem *base,
515 unsigned long ata_reg, unsigned short value)
517 /* Program the ATA_DEV_TXBUF register with write data (to be
518 * written into the device).
520 ATAPI_SET_DEV_TXBUF(base, value);
522 /* Program the ATA_DEV_ADDR register with address of the
523 * device register (0x01 to 0x0F).
525 ATAPI_SET_DEV_ADDR(base, ata_reg);
527 /* Program the ATA_CTRL register with dir set to write (1)
529 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
531 /* ensure PIO DMA is not set */
532 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
534 /* and start the transfer */
535 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
537 /* Wait for the interrupt to indicate the end of the transfer.
538 * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
540 wait_complete(base, PIO_DONE_INT);
545 * Function: read_atapi_register
547 *Description: Reads from ATA Device Resgister
551 static unsigned short read_atapi_register(void __iomem *base,
552 unsigned long ata_reg)
554 /* Program the ATA_DEV_ADDR register with address of the
555 * device register (0x01 to 0x0F).
557 ATAPI_SET_DEV_ADDR(base, ata_reg);
559 /* Program the ATA_CTRL register with dir set to read (0) and
561 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
563 /* ensure PIO DMA is not set */
564 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
566 /* and start the transfer */
567 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
569 /* Wait for the interrupt to indicate the end of the transfer.
570 * (PIO_DONE interrupt is set and it doesn't seem to matter
571 * that we don't clear it)
573 wait_complete(base, PIO_DONE_INT);
575 /* Read the ATA_DEV_RXBUF register with write data (to be
576 * written into the device).
578 return ATAPI_GET_DEV_RXBUF(base);
583 * Function: write_atapi_register_data
585 * Description: Writes to ATA Device Resgister
589 static void write_atapi_data(void __iomem *base,
590 int len, unsigned short *buf)
592 int i;
594 /* Set transfer length to 1 */
595 ATAPI_SET_XFER_LEN(base, 1);
597 /* Program the ATA_DEV_ADDR register with address of the
598 * ATA_REG_DATA
600 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
602 /* Program the ATA_CTRL register with dir set to write (1)
604 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
606 /* ensure PIO DMA is not set */
607 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
609 for (i = 0; i < len; i++) {
610 /* Program the ATA_DEV_TXBUF register with write data (to be
611 * written into the device).
613 ATAPI_SET_DEV_TXBUF(base, buf[i]);
615 /* and start the transfer */
616 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
618 /* Wait for the interrupt to indicate the end of the transfer.
619 * (We need to wait on and clear rhe ATA_DEV_INT
620 * interrupt status)
622 wait_complete(base, PIO_DONE_INT);
628 * Function: read_atapi_register_data
630 * Description: Reads from ATA Device Resgister
634 static void read_atapi_data(void __iomem *base,
635 int len, unsigned short *buf)
637 int i;
639 /* Set transfer length to 1 */
640 ATAPI_SET_XFER_LEN(base, 1);
642 /* Program the ATA_DEV_ADDR register with address of the
643 * ATA_REG_DATA
645 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
647 /* Program the ATA_CTRL register with dir set to read (0) and
649 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
651 /* ensure PIO DMA is not set */
652 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
654 for (i = 0; i < len; i++) {
655 /* and start the transfer */
656 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
658 /* Wait for the interrupt to indicate the end of the transfer.
659 * (PIO_DONE interrupt is set and it doesn't seem to matter
660 * that we don't clear it)
662 wait_complete(base, PIO_DONE_INT);
664 /* Read the ATA_DEV_RXBUF register with write data (to be
665 * written into the device).
667 buf[i] = ATAPI_GET_DEV_RXBUF(base);
672 * bfin_tf_load - send taskfile registers to host controller
673 * @ap: Port to which output is sent
674 * @tf: ATA taskfile register set
676 * Note: Original code is ata_sff_tf_load().
679 static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
681 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
682 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
684 if (tf->ctl != ap->last_ctl) {
685 write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
686 ap->last_ctl = tf->ctl;
687 ata_wait_idle(ap);
690 if (is_addr) {
691 if (tf->flags & ATA_TFLAG_LBA48) {
692 write_atapi_register(base, ATA_REG_FEATURE,
693 tf->hob_feature);
694 write_atapi_register(base, ATA_REG_NSECT,
695 tf->hob_nsect);
696 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
697 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
698 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
699 dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X "
700 "0x%X 0x%X\n",
701 tf->hob_feature,
702 tf->hob_nsect,
703 tf->hob_lbal,
704 tf->hob_lbam,
705 tf->hob_lbah);
708 write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
709 write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
710 write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
711 write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
712 write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
713 dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
714 tf->feature,
715 tf->nsect,
716 tf->lbal,
717 tf->lbam,
718 tf->lbah);
721 if (tf->flags & ATA_TFLAG_DEVICE) {
722 write_atapi_register(base, ATA_REG_DEVICE, tf->device);
723 dev_dbg(ap->dev, "device 0x%X\n", tf->device);
726 ata_wait_idle(ap);
730 * bfin_check_status - Read device status reg & clear interrupt
731 * @ap: port where the device is
733 * Note: Original code is ata_check_status().
736 static u8 bfin_check_status(struct ata_port *ap)
738 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
739 return read_atapi_register(base, ATA_REG_STATUS);
743 * bfin_tf_read - input device's ATA taskfile shadow registers
744 * @ap: Port from which input is read
745 * @tf: ATA taskfile register set for storing input
747 * Note: Original code is ata_sff_tf_read().
750 static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
752 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
754 tf->command = bfin_check_status(ap);
755 tf->feature = read_atapi_register(base, ATA_REG_ERR);
756 tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
757 tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
758 tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
759 tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
760 tf->device = read_atapi_register(base, ATA_REG_DEVICE);
762 if (tf->flags & ATA_TFLAG_LBA48) {
763 write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
764 tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
765 tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
766 tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
767 tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
768 tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
773 * bfin_exec_command - issue ATA command to host controller
774 * @ap: port to which command is being issued
775 * @tf: ATA taskfile register set
777 * Note: Original code is ata_sff_exec_command().
780 static void bfin_exec_command(struct ata_port *ap,
781 const struct ata_taskfile *tf)
783 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
784 dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
786 write_atapi_register(base, ATA_REG_CMD, tf->command);
787 ata_sff_pause(ap);
791 * bfin_check_altstatus - Read device alternate status reg
792 * @ap: port where the device is
795 static u8 bfin_check_altstatus(struct ata_port *ap)
797 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
798 return read_atapi_register(base, ATA_REG_ALTSTATUS);
802 * bfin_dev_select - Select device 0/1 on ATA bus
803 * @ap: ATA channel to manipulate
804 * @device: ATA device (numbered from zero) to select
806 * Note: Original code is ata_sff_dev_select().
809 static void bfin_dev_select(struct ata_port *ap, unsigned int device)
811 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
812 u8 tmp;
814 if (device == 0)
815 tmp = ATA_DEVICE_OBS;
816 else
817 tmp = ATA_DEVICE_OBS | ATA_DEV1;
819 write_atapi_register(base, ATA_REG_DEVICE, tmp);
820 ata_sff_pause(ap);
824 * bfin_set_devctl - Write device control reg
825 * @ap: port where the device is
826 * @ctl: value to write
829 static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl)
831 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
832 write_atapi_register(base, ATA_REG_CTRL, ctl);
836 * bfin_bmdma_setup - Set up IDE DMA transaction
837 * @qc: Info associated with this ATA transaction.
839 * Note: Original code is ata_bmdma_setup().
842 static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
844 unsigned short config = WDSIZE_16;
845 struct scatterlist *sg;
846 unsigned int si;
848 dev_dbg(qc->ap->dev, "in atapi dma setup\n");
849 /* Program the ATA_CTRL register with dir */
850 if (qc->tf.flags & ATA_TFLAG_WRITE) {
851 /* fill the ATAPI DMA controller */
852 set_dma_config(CH_ATAPI_TX, config);
853 set_dma_x_modify(CH_ATAPI_TX, 2);
854 for_each_sg(qc->sg, sg, qc->n_elem, si) {
855 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
856 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
858 } else {
859 config |= WNR;
860 /* fill the ATAPI DMA controller */
861 set_dma_config(CH_ATAPI_RX, config);
862 set_dma_x_modify(CH_ATAPI_RX, 2);
863 for_each_sg(qc->sg, sg, qc->n_elem, si) {
864 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
865 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
871 * bfin_bmdma_start - Start an IDE DMA transaction
872 * @qc: Info associated with this ATA transaction.
874 * Note: Original code is ata_bmdma_start().
877 static void bfin_bmdma_start(struct ata_queued_cmd *qc)
879 struct ata_port *ap = qc->ap;
880 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
881 struct scatterlist *sg;
882 unsigned int si;
884 dev_dbg(qc->ap->dev, "in atapi dma start\n");
885 if (!(ap->udma_mask || ap->mwdma_mask))
886 return;
888 /* start ATAPI DMA controller*/
889 if (qc->tf.flags & ATA_TFLAG_WRITE) {
891 * On blackfin arch, uncacheable memory is not
892 * allocated with flag GFP_DMA. DMA buffer from
893 * common kenel code should be flushed if WB
894 * data cache is enabled. Otherwise, this loop
895 * is an empty loop and optimized out.
897 for_each_sg(qc->sg, sg, qc->n_elem, si) {
898 flush_dcache_range(sg_dma_address(sg),
899 sg_dma_address(sg) + sg_dma_len(sg));
901 enable_dma(CH_ATAPI_TX);
902 dev_dbg(qc->ap->dev, "enable udma write\n");
904 /* Send ATA DMA write command */
905 bfin_exec_command(ap, &qc->tf);
907 /* set ATA DMA write direction */
908 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
909 | XFER_DIR));
910 } else {
911 enable_dma(CH_ATAPI_RX);
912 dev_dbg(qc->ap->dev, "enable udma read\n");
914 /* Send ATA DMA read command */
915 bfin_exec_command(ap, &qc->tf);
917 /* set ATA DMA read direction */
918 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
919 & ~XFER_DIR));
922 /* Reset all transfer count */
923 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
925 /* Set ATAPI state machine contorl in terminate sequence */
926 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
928 /* Set transfer length to buffer len */
929 for_each_sg(qc->sg, sg, qc->n_elem, si) {
930 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
933 /* Enable ATA DMA operation*/
934 if (ap->udma_mask)
935 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
936 | ULTRA_START);
937 else
938 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
939 | MULTI_START);
943 * bfin_bmdma_stop - Stop IDE DMA transfer
944 * @qc: Command we are ending DMA for
947 static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
949 struct ata_port *ap = qc->ap;
950 struct scatterlist *sg;
951 unsigned int si;
953 dev_dbg(qc->ap->dev, "in atapi dma stop\n");
954 if (!(ap->udma_mask || ap->mwdma_mask))
955 return;
957 /* stop ATAPI DMA controller*/
958 if (qc->tf.flags & ATA_TFLAG_WRITE)
959 disable_dma(CH_ATAPI_TX);
960 else {
961 disable_dma(CH_ATAPI_RX);
962 if (ap->hsm_task_state & HSM_ST_LAST) {
964 * On blackfin arch, uncacheable memory is not
965 * allocated with flag GFP_DMA. DMA buffer from
966 * common kenel code should be invalidated if
967 * data cache is enabled. Otherwise, this loop
968 * is an empty loop and optimized out.
970 for_each_sg(qc->sg, sg, qc->n_elem, si) {
971 invalidate_dcache_range(
972 sg_dma_address(sg),
973 sg_dma_address(sg)
974 + sg_dma_len(sg));
981 * bfin_devchk - PATA device presence detection
982 * @ap: ATA channel to examine
983 * @device: Device to examine (starting at zero)
985 * Note: Original code is ata_devchk().
988 static unsigned int bfin_devchk(struct ata_port *ap,
989 unsigned int device)
991 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
992 u8 nsect, lbal;
994 bfin_dev_select(ap, device);
996 write_atapi_register(base, ATA_REG_NSECT, 0x55);
997 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
999 write_atapi_register(base, ATA_REG_NSECT, 0xaa);
1000 write_atapi_register(base, ATA_REG_LBAL, 0x55);
1002 write_atapi_register(base, ATA_REG_NSECT, 0x55);
1003 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
1005 nsect = read_atapi_register(base, ATA_REG_NSECT);
1006 lbal = read_atapi_register(base, ATA_REG_LBAL);
1008 if ((nsect == 0x55) && (lbal == 0xaa))
1009 return 1; /* we found a device */
1011 return 0; /* nothing found */
1015 * bfin_bus_post_reset - PATA device post reset
1017 * Note: Original code is ata_bus_post_reset().
1020 static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1022 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1023 unsigned int dev0 = devmask & (1 << 0);
1024 unsigned int dev1 = devmask & (1 << 1);
1025 unsigned long deadline;
1027 /* if device 0 was found in ata_devchk, wait for its
1028 * BSY bit to clear
1030 if (dev0)
1031 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1033 /* if device 1 was found in ata_devchk, wait for
1034 * register access, then wait for BSY to clear
1036 deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
1037 while (dev1) {
1038 u8 nsect, lbal;
1040 bfin_dev_select(ap, 1);
1041 nsect = read_atapi_register(base, ATA_REG_NSECT);
1042 lbal = read_atapi_register(base, ATA_REG_LBAL);
1043 if ((nsect == 1) && (lbal == 1))
1044 break;
1045 if (time_after(jiffies, deadline)) {
1046 dev1 = 0;
1047 break;
1049 msleep(50); /* give drive a breather */
1051 if (dev1)
1052 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1054 /* is all this really necessary? */
1055 bfin_dev_select(ap, 0);
1056 if (dev1)
1057 bfin_dev_select(ap, 1);
1058 if (dev0)
1059 bfin_dev_select(ap, 0);
1063 * bfin_bus_softreset - PATA device software reset
1065 * Note: Original code is ata_bus_softreset().
1068 static unsigned int bfin_bus_softreset(struct ata_port *ap,
1069 unsigned int devmask)
1071 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1073 /* software reset. causes dev0 to be selected */
1074 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1075 udelay(20);
1076 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
1077 udelay(20);
1078 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1080 /* spec mandates ">= 2ms" before checking status.
1081 * We wait 150ms, because that was the magic delay used for
1082 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1083 * between when the ATA command register is written, and then
1084 * status is checked. Because waiting for "a while" before
1085 * checking status is fine, post SRST, we perform this magic
1086 * delay here as well.
1088 * Old drivers/ide uses the 2mS rule and then waits for ready
1090 msleep(150);
1092 /* Before we perform post reset processing we want to see if
1093 * the bus shows 0xFF because the odd clown forgets the D7
1094 * pulldown resistor.
1096 if (bfin_check_status(ap) == 0xFF)
1097 return 0;
1099 bfin_bus_post_reset(ap, devmask);
1101 return 0;
1105 * bfin_softreset - reset host port via ATA SRST
1106 * @ap: port to reset
1107 * @classes: resulting classes of attached devices
1109 * Note: Original code is ata_sff_softreset().
1112 static int bfin_softreset(struct ata_link *link, unsigned int *classes,
1113 unsigned long deadline)
1115 struct ata_port *ap = link->ap;
1116 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1117 unsigned int devmask = 0, err_mask;
1118 u8 err;
1120 /* determine if device 0/1 are present */
1121 if (bfin_devchk(ap, 0))
1122 devmask |= (1 << 0);
1123 if (slave_possible && bfin_devchk(ap, 1))
1124 devmask |= (1 << 1);
1126 /* select device 0 again */
1127 bfin_dev_select(ap, 0);
1129 /* issue bus reset */
1130 err_mask = bfin_bus_softreset(ap, devmask);
1131 if (err_mask) {
1132 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
1133 err_mask);
1134 return -EIO;
1137 /* determine by signature whether we have ATA or ATAPI devices */
1138 classes[0] = ata_sff_dev_classify(&ap->link.device[0],
1139 devmask & (1 << 0), &err);
1140 if (slave_possible && err != 0x81)
1141 classes[1] = ata_sff_dev_classify(&ap->link.device[1],
1142 devmask & (1 << 1), &err);
1144 return 0;
1148 * bfin_bmdma_status - Read IDE DMA status
1149 * @ap: Port associated with this ATA transaction.
1152 static unsigned char bfin_bmdma_status(struct ata_port *ap)
1154 unsigned char host_stat = 0;
1155 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1156 unsigned short int_status = ATAPI_GET_INT_STATUS(base);
1158 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
1159 host_stat |= ATA_DMA_ACTIVE;
1160 if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
1161 ATAPI_DEV_INT))
1162 host_stat |= ATA_DMA_INTR;
1163 if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
1164 host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
1166 dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
1168 return host_stat;
1172 * bfin_data_xfer - Transfer data by PIO
1173 * @adev: device for this I/O
1174 * @buf: data buffer
1175 * @buflen: buffer length
1176 * @write_data: read/write
1178 * Note: Original code is ata_sff_data_xfer().
1181 static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
1182 unsigned int buflen, int rw)
1184 struct ata_port *ap = dev->link->ap;
1185 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1186 unsigned int words = buflen >> 1;
1187 unsigned short *buf16 = (u16 *)buf;
1189 /* Transfer multiple of 2 bytes */
1190 if (rw == READ)
1191 read_atapi_data(base, words, buf16);
1192 else
1193 write_atapi_data(base, words, buf16);
1195 /* Transfer trailing 1 byte, if any. */
1196 if (unlikely(buflen & 0x01)) {
1197 unsigned short align_buf[1] = { 0 };
1198 unsigned char *trailing_buf = buf + buflen - 1;
1200 if (rw == READ) {
1201 read_atapi_data(base, 1, align_buf);
1202 memcpy(trailing_buf, align_buf, 1);
1203 } else {
1204 memcpy(align_buf, trailing_buf, 1);
1205 write_atapi_data(base, 1, align_buf);
1207 words++;
1210 return words << 1;
1214 * bfin_irq_clear - Clear ATAPI interrupt.
1215 * @ap: Port associated with this ATA transaction.
1217 * Note: Original code is ata_bmdma_irq_clear().
1220 static void bfin_irq_clear(struct ata_port *ap)
1222 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1224 dev_dbg(ap->dev, "in atapi irq clear\n");
1225 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
1226 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
1227 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
1231 * bfin_thaw - Thaw DMA controller port
1232 * @ap: port to thaw
1234 * Note: Original code is ata_sff_thaw().
1237 void bfin_thaw(struct ata_port *ap)
1239 dev_dbg(ap->dev, "in atapi dma thaw\n");
1240 bfin_check_status(ap);
1241 ata_sff_irq_on(ap);
1245 * bfin_postreset - standard postreset callback
1246 * @ap: the target ata_port
1247 * @classes: classes of attached devices
1249 * Note: Original code is ata_sff_postreset().
1252 static void bfin_postreset(struct ata_link *link, unsigned int *classes)
1254 struct ata_port *ap = link->ap;
1255 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1257 /* re-enable interrupts */
1258 ata_sff_irq_on(ap);
1260 /* is double-select really necessary? */
1261 if (classes[0] != ATA_DEV_NONE)
1262 bfin_dev_select(ap, 1);
1263 if (classes[1] != ATA_DEV_NONE)
1264 bfin_dev_select(ap, 0);
1266 /* bail out if no device is present */
1267 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
1268 return;
1271 /* set up device control */
1272 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1275 static void bfin_port_stop(struct ata_port *ap)
1277 dev_dbg(ap->dev, "in atapi port stop\n");
1278 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1279 free_dma(CH_ATAPI_RX);
1280 free_dma(CH_ATAPI_TX);
1284 static int bfin_port_start(struct ata_port *ap)
1286 dev_dbg(ap->dev, "in atapi port start\n");
1287 if (!(ap->udma_mask || ap->mwdma_mask))
1288 return 0;
1290 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
1291 if (request_dma(CH_ATAPI_TX,
1292 "BFIN ATAPI TX DMA") >= 0)
1293 return 0;
1295 free_dma(CH_ATAPI_RX);
1298 ap->udma_mask = 0;
1299 ap->mwdma_mask = 0;
1300 dev_err(ap->dev, "Unable to request ATAPI DMA!"
1301 " Continue in PIO mode.\n");
1303 return 0;
1306 static unsigned int bfin_ata_host_intr(struct ata_port *ap,
1307 struct ata_queued_cmd *qc)
1309 struct ata_eh_info *ehi = &ap->link.eh_info;
1310 u8 status, host_stat = 0;
1312 VPRINTK("ata%u: protocol %d task_state %d\n",
1313 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1315 /* Check whether we are expecting interrupt in this state */
1316 switch (ap->hsm_task_state) {
1317 case HSM_ST_FIRST:
1318 /* Some pre-ATAPI-4 devices assert INTRQ
1319 * at this state when ready to receive CDB.
1322 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1323 * The flag was turned on only for atapi devices.
1324 * No need to check is_atapi_taskfile(&qc->tf) again.
1326 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1327 goto idle_irq;
1328 break;
1329 case HSM_ST_LAST:
1330 if (qc->tf.protocol == ATA_PROT_DMA ||
1331 qc->tf.protocol == ATAPI_PROT_DMA) {
1332 /* check status of DMA engine */
1333 host_stat = ap->ops->bmdma_status(ap);
1334 VPRINTK("ata%u: host_stat 0x%X\n",
1335 ap->print_id, host_stat);
1337 /* if it's not our irq... */
1338 if (!(host_stat & ATA_DMA_INTR))
1339 goto idle_irq;
1341 /* before we do anything else, clear DMA-Start bit */
1342 ap->ops->bmdma_stop(qc);
1344 if (unlikely(host_stat & ATA_DMA_ERR)) {
1345 /* error when transfering data to/from memory */
1346 qc->err_mask |= AC_ERR_HOST_BUS;
1347 ap->hsm_task_state = HSM_ST_ERR;
1350 break;
1351 case HSM_ST:
1352 break;
1353 default:
1354 goto idle_irq;
1357 /* check altstatus */
1358 status = ap->ops->sff_check_altstatus(ap);
1359 if (status & ATA_BUSY)
1360 goto busy_ata;
1362 /* check main status, clearing INTRQ */
1363 status = ap->ops->sff_check_status(ap);
1364 if (unlikely(status & ATA_BUSY))
1365 goto busy_ata;
1367 /* ack bmdma irq events */
1368 ap->ops->sff_irq_clear(ap);
1370 ata_sff_hsm_move(ap, qc, status, 0);
1372 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1373 qc->tf.protocol == ATAPI_PROT_DMA))
1374 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1376 busy_ata:
1377 return 1; /* irq handled */
1379 idle_irq:
1380 ap->stats.idle_irq++;
1382 #ifdef ATA_IRQ_TRAP
1383 if ((ap->stats.idle_irq % 1000) == 0) {
1384 ap->ops->irq_ack(ap, 0); /* debug trap */
1385 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1386 return 1;
1388 #endif
1389 return 0; /* irq not handled */
1392 static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1394 struct ata_host *host = dev_instance;
1395 unsigned int i;
1396 unsigned int handled = 0;
1397 unsigned long flags;
1399 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1400 spin_lock_irqsave(&host->lock, flags);
1402 for (i = 0; i < host->n_ports; i++) {
1403 struct ata_port *ap = host->ports[i];
1404 struct ata_queued_cmd *qc;
1406 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1407 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1408 handled |= bfin_ata_host_intr(ap, qc);
1411 spin_unlock_irqrestore(&host->lock, flags);
1413 return IRQ_RETVAL(handled);
1417 static struct scsi_host_template bfin_sht = {
1418 ATA_BASE_SHT(DRV_NAME),
1419 .sg_tablesize = SG_NONE,
1420 .dma_boundary = ATA_DMA_BOUNDARY,
1423 static struct ata_port_operations bfin_pata_ops = {
1424 .inherits = &ata_bmdma_port_ops,
1426 .set_piomode = bfin_set_piomode,
1427 .set_dmamode = bfin_set_dmamode,
1429 .sff_tf_load = bfin_tf_load,
1430 .sff_tf_read = bfin_tf_read,
1431 .sff_exec_command = bfin_exec_command,
1432 .sff_check_status = bfin_check_status,
1433 .sff_check_altstatus = bfin_check_altstatus,
1434 .sff_dev_select = bfin_dev_select,
1435 .sff_set_devctl = bfin_set_devctl,
1437 .bmdma_setup = bfin_bmdma_setup,
1438 .bmdma_start = bfin_bmdma_start,
1439 .bmdma_stop = bfin_bmdma_stop,
1440 .bmdma_status = bfin_bmdma_status,
1441 .sff_data_xfer = bfin_data_xfer,
1443 .qc_prep = ata_noop_qc_prep,
1445 .thaw = bfin_thaw,
1446 .softreset = bfin_softreset,
1447 .postreset = bfin_postreset,
1449 .sff_irq_clear = bfin_irq_clear,
1451 .port_start = bfin_port_start,
1452 .port_stop = bfin_port_stop,
1455 static struct ata_port_info bfin_port_info[] = {
1457 .flags = ATA_FLAG_SLAVE_POSS
1458 | ATA_FLAG_MMIO
1459 | ATA_FLAG_NO_LEGACY,
1460 .pio_mask = ATA_PIO4,
1461 .mwdma_mask = 0,
1462 .udma_mask = 0,
1463 .port_ops = &bfin_pata_ops,
1468 * bfin_reset_controller - initialize BF54x ATAPI controller.
1471 static int bfin_reset_controller(struct ata_host *host)
1473 void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
1474 int count;
1475 unsigned short status;
1477 /* Disable all ATAPI interrupts */
1478 ATAPI_SET_INT_MASK(base, 0);
1479 SSYNC();
1481 /* Assert the RESET signal 25us*/
1482 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
1483 udelay(30);
1485 /* Negate the RESET signal for 2ms*/
1486 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
1487 msleep(2);
1489 /* Wait on Busy flag to clear */
1490 count = 10000000;
1491 do {
1492 status = read_atapi_register(base, ATA_REG_STATUS);
1493 } while (--count && (status & ATA_BUSY));
1495 /* Enable only ATAPI Device interrupt */
1496 ATAPI_SET_INT_MASK(base, 1);
1497 SSYNC();
1499 return (!count);
1503 * atapi_io_port - define atapi peripheral port pins.
1505 static unsigned short atapi_io_port[] = {
1506 P_ATAPI_RESET,
1507 P_ATAPI_DIOR,
1508 P_ATAPI_DIOW,
1509 P_ATAPI_CS0,
1510 P_ATAPI_CS1,
1511 P_ATAPI_DMACK,
1512 P_ATAPI_DMARQ,
1513 P_ATAPI_INTRQ,
1514 P_ATAPI_IORDY,
1515 P_ATAPI_D0A,
1516 P_ATAPI_D1A,
1517 P_ATAPI_D2A,
1518 P_ATAPI_D3A,
1519 P_ATAPI_D4A,
1520 P_ATAPI_D5A,
1521 P_ATAPI_D6A,
1522 P_ATAPI_D7A,
1523 P_ATAPI_D8A,
1524 P_ATAPI_D9A,
1525 P_ATAPI_D10A,
1526 P_ATAPI_D11A,
1527 P_ATAPI_D12A,
1528 P_ATAPI_D13A,
1529 P_ATAPI_D14A,
1530 P_ATAPI_D15A,
1531 P_ATAPI_A0A,
1532 P_ATAPI_A1A,
1533 P_ATAPI_A2A,
1538 * bfin_atapi_probe - attach a bfin atapi interface
1539 * @pdev: platform device
1541 * Register a bfin atapi interface.
1544 * Platform devices are expected to contain 2 resources per port:
1546 * - I/O Base (IORESOURCE_IO)
1547 * - IRQ (IORESOURCE_IRQ)
1550 static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1552 int board_idx = 0;
1553 struct resource *res;
1554 struct ata_host *host;
1555 unsigned int fsclk = get_sclk();
1556 int udma_mode = 5;
1557 const struct ata_port_info *ppi[] =
1558 { &bfin_port_info[board_idx], NULL };
1561 * Simple resource validation ..
1563 if (unlikely(pdev->num_resources != 2)) {
1564 dev_err(&pdev->dev, "invalid number of resources\n");
1565 return -EINVAL;
1569 * Get the register base first
1571 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1572 if (res == NULL)
1573 return -EINVAL;
1575 while (bfin_port_info[board_idx].udma_mask > 0 &&
1576 udma_fsclk[udma_mode] > fsclk) {
1577 udma_mode--;
1578 bfin_port_info[board_idx].udma_mask >>= 1;
1582 * Now that that's out of the way, wire up the port..
1584 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1585 if (!host)
1586 return -ENOMEM;
1588 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1590 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1591 dev_err(&pdev->dev, "Requesting Peripherals faild\n");
1592 return -EFAULT;
1595 if (bfin_reset_controller(host)) {
1596 peripheral_free_list(atapi_io_port);
1597 dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
1598 return -EFAULT;
1601 if (ata_host_activate(host, platform_get_irq(pdev, 0),
1602 bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
1603 peripheral_free_list(atapi_io_port);
1604 dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
1605 return -ENODEV;
1608 dev_set_drvdata(&pdev->dev, host);
1610 return 0;
1614 * bfin_atapi_remove - unplug a bfin atapi interface
1615 * @pdev: platform device
1617 * A bfin atapi device has been unplugged. Perform the needed
1618 * cleanup. Also called on module unload for any active devices.
1620 static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1622 struct device *dev = &pdev->dev;
1623 struct ata_host *host = dev_get_drvdata(dev);
1625 ata_host_detach(host);
1626 dev_set_drvdata(&pdev->dev, NULL);
1628 peripheral_free_list(atapi_io_port);
1630 return 0;
1633 #ifdef CONFIG_PM
1634 static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1636 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1637 if (host)
1638 return ata_host_suspend(host, state);
1639 else
1640 return 0;
1643 static int bfin_atapi_resume(struct platform_device *pdev)
1645 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1646 int ret;
1648 if (host) {
1649 ret = bfin_reset_controller(host);
1650 if (ret) {
1651 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
1652 return ret;
1654 ata_host_resume(host);
1657 return 0;
1659 #else
1660 #define bfin_atapi_suspend NULL
1661 #define bfin_atapi_resume NULL
1662 #endif
1664 static struct platform_driver bfin_atapi_driver = {
1665 .probe = bfin_atapi_probe,
1666 .remove = __devexit_p(bfin_atapi_remove),
1667 .suspend = bfin_atapi_suspend,
1668 .resume = bfin_atapi_resume,
1669 .driver = {
1670 .name = DRV_NAME,
1671 .owner = THIS_MODULE,
1675 #define ATAPI_MODE_SIZE 10
1676 static char bfin_atapi_mode[ATAPI_MODE_SIZE];
1678 static int __init bfin_atapi_init(void)
1680 pr_info("register bfin atapi driver\n");
1682 switch(bfin_atapi_mode[0]) {
1683 case 'p':
1684 case 'P':
1685 break;
1686 case 'm':
1687 case 'M':
1688 bfin_port_info[0].mwdma_mask = ATA_MWDMA2;
1689 break;
1690 default:
1691 bfin_port_info[0].udma_mask = ATA_UDMA5;
1694 return platform_driver_register(&bfin_atapi_driver);
1697 static void __exit bfin_atapi_exit(void)
1699 platform_driver_unregister(&bfin_atapi_driver);
1702 module_init(bfin_atapi_init);
1703 module_exit(bfin_atapi_exit);
1705 * ATAPI mode:
1706 * pio/PIO
1707 * udma/UDMA (default)
1708 * mwdma/MWDMA
1710 module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0);
1712 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
1713 MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
1714 MODULE_LICENSE("GPL");
1715 MODULE_VERSION(DRV_VERSION);
1716 MODULE_ALIAS("platform:" DRV_NAME);