2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "2.0"
56 AHCI_MAX_SG
= 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY
= 0xffffffff,
58 AHCI_USE_CLUSTERING
= 0,
61 AHCI_CMD_SLOT_SZ
= AHCI_MAX_CMDS
* AHCI_CMD_SZ
,
63 AHCI_CMD_TBL_CDB
= 0x40,
64 AHCI_CMD_TBL_HDR_SZ
= 0x80,
65 AHCI_CMD_TBL_SZ
= AHCI_CMD_TBL_HDR_SZ
+ (AHCI_MAX_SG
* 16),
66 AHCI_CMD_TBL_AR_SZ
= AHCI_CMD_TBL_SZ
* AHCI_MAX_CMDS
,
67 AHCI_PORT_PRIV_DMA_SZ
= AHCI_CMD_SLOT_SZ
+ AHCI_CMD_TBL_AR_SZ
+
69 AHCI_IRQ_ON_SG
= (1 << 31),
70 AHCI_CMD_ATAPI
= (1 << 5),
71 AHCI_CMD_WRITE
= (1 << 6),
72 AHCI_CMD_PREFETCH
= (1 << 7),
73 AHCI_CMD_RESET
= (1 << 8),
74 AHCI_CMD_CLR_BUSY
= (1 << 10),
76 RX_FIS_D2H_REG
= 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK
= 0x60, /* offset of Unknown FIS data */
80 board_ahci_vt8251
= 1,
82 /* global controller registers */
83 HOST_CAP
= 0x00, /* host capabilities */
84 HOST_CTL
= 0x04, /* global host control */
85 HOST_IRQ_STAT
= 0x08, /* interrupt status */
86 HOST_PORTS_IMPL
= 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION
= 0x10, /* AHCI spec. version compliancy */
90 HOST_RESET
= (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN
= (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN
= (1 << 31), /* AHCI enabled */
95 HOST_CAP_SSC
= (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO
= (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS
= (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ
= (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64
= (1 << 31), /* PCI DAC (64-bit DMA) support */
101 /* registers for each SATA port */
102 PORT_LST_ADDR
= 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI
= 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR
= 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI
= 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT
= 0x10, /* interrupt status */
107 PORT_IRQ_MASK
= 0x14, /* interrupt enable/disable mask */
108 PORT_CMD
= 0x18, /* port command */
109 PORT_TFDATA
= 0x20, /* taskfile data */
110 PORT_SIG
= 0x24, /* device TF signature */
111 PORT_CMD_ISSUE
= 0x38, /* command issue */
112 PORT_SCR
= 0x28, /* SATA phy register block */
113 PORT_SCR_STAT
= 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL
= 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR
= 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT
= 0x34, /* SATA phy register: SActive */
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES
= (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR
= (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR
= (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR
= (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR
= (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL
= (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW
= (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP
= (1 << 23), /* incorrect port multiplier */
128 PORT_IRQ_PHYRDY
= (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK
= (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT
= (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE
= (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS
= (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS
= (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS
= (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS
= (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS
= (1 << 0), /* D2H Register FIS rx'd */
138 PORT_IRQ_FREEZE
= PORT_IRQ_HBUS_ERR
|
143 PORT_IRQ_ERROR
= PORT_IRQ_FREEZE
|
145 PORT_IRQ_HBUS_DATA_ERR
,
146 DEF_PORT_IRQ
= PORT_IRQ_ERROR
| PORT_IRQ_SG_DONE
|
147 PORT_IRQ_SDB_FIS
| PORT_IRQ_DMAS_FIS
|
148 PORT_IRQ_PIOS_FIS
| PORT_IRQ_D2H_REG_FIS
,
151 PORT_CMD_ATAPI
= (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON
= (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON
= (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX
= (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO
= (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON
= (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP
= (1 << 1), /* Spin up device */
158 PORT_CMD_START
= (1 << 0), /* Enable port DMA engine */
160 PORT_CMD_ICC_MASK
= (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE
= (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL
= (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER
= (0x6 << 28), /* Put i/f in slumber state */
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI
= (1 << 0),
169 AHCI_FLAG_RESET_NEEDS_CLO
= (1 << 24),
170 AHCI_FLAG_NO_NCQ
= (1 << 25),
173 struct ahci_cmd_hdr
{
188 struct ahci_host_priv
{
190 u32 cap
; /* cache of HOST_CAP register */
191 u32 port_map
; /* cache of HOST_PORTS_IMPL reg */
194 struct ahci_port_priv
{
195 struct ahci_cmd_hdr
*cmd_slot
;
196 dma_addr_t cmd_slot_dma
;
198 dma_addr_t cmd_tbl_dma
;
200 dma_addr_t rx_fis_dma
;
203 static u32
ahci_scr_read (struct ata_port
*ap
, unsigned int sc_reg
);
204 static void ahci_scr_write (struct ata_port
*ap
, unsigned int sc_reg
, u32 val
);
205 static int ahci_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
206 static unsigned int ahci_qc_issue(struct ata_queued_cmd
*qc
);
207 static irqreturn_t
ahci_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
);
208 static void ahci_irq_clear(struct ata_port
*ap
);
209 static int ahci_port_start(struct ata_port
*ap
);
210 static void ahci_port_stop(struct ata_port
*ap
);
211 static void ahci_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
);
212 static void ahci_qc_prep(struct ata_queued_cmd
*qc
);
213 static u8
ahci_check_status(struct ata_port
*ap
);
214 static void ahci_freeze(struct ata_port
*ap
);
215 static void ahci_thaw(struct ata_port
*ap
);
216 static void ahci_error_handler(struct ata_port
*ap
);
217 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
);
218 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
219 static int ahci_port_resume(struct ata_port
*ap
);
220 static int ahci_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
);
221 static int ahci_pci_device_resume(struct pci_dev
*pdev
);
222 static void ahci_remove_one (struct pci_dev
*pdev
);
224 static struct scsi_host_template ahci_sht
= {
225 .module
= THIS_MODULE
,
227 .ioctl
= ata_scsi_ioctl
,
228 .queuecommand
= ata_scsi_queuecmd
,
229 .change_queue_depth
= ata_scsi_change_queue_depth
,
230 .can_queue
= AHCI_MAX_CMDS
- 1,
231 .this_id
= ATA_SHT_THIS_ID
,
232 .sg_tablesize
= AHCI_MAX_SG
,
233 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
234 .emulated
= ATA_SHT_EMULATED
,
235 .use_clustering
= AHCI_USE_CLUSTERING
,
236 .proc_name
= DRV_NAME
,
237 .dma_boundary
= AHCI_DMA_BOUNDARY
,
238 .slave_configure
= ata_scsi_slave_config
,
239 .slave_destroy
= ata_scsi_slave_destroy
,
240 .bios_param
= ata_std_bios_param
,
241 .suspend
= ata_scsi_device_suspend
,
242 .resume
= ata_scsi_device_resume
,
245 static const struct ata_port_operations ahci_ops
= {
246 .port_disable
= ata_port_disable
,
248 .check_status
= ahci_check_status
,
249 .check_altstatus
= ahci_check_status
,
250 .dev_select
= ata_noop_dev_select
,
252 .tf_read
= ahci_tf_read
,
254 .qc_prep
= ahci_qc_prep
,
255 .qc_issue
= ahci_qc_issue
,
257 .irq_handler
= ahci_interrupt
,
258 .irq_clear
= ahci_irq_clear
,
260 .scr_read
= ahci_scr_read
,
261 .scr_write
= ahci_scr_write
,
263 .freeze
= ahci_freeze
,
266 .error_handler
= ahci_error_handler
,
267 .post_internal_cmd
= ahci_post_internal_cmd
,
269 .port_suspend
= ahci_port_suspend
,
270 .port_resume
= ahci_port_resume
,
272 .port_start
= ahci_port_start
,
273 .port_stop
= ahci_port_stop
,
276 static const struct ata_port_info ahci_port_info
[] = {
280 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
281 ATA_FLAG_MMIO
| ATA_FLAG_PIO_DMA
|
282 ATA_FLAG_SKIP_D2H_BSY
,
283 .pio_mask
= 0x1f, /* pio0-4 */
284 .udma_mask
= 0x7f, /* udma0-6 ; FIXME */
285 .port_ops
= &ahci_ops
,
287 /* board_ahci_vt8251 */
290 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
291 ATA_FLAG_MMIO
| ATA_FLAG_PIO_DMA
|
292 ATA_FLAG_SKIP_D2H_BSY
|
293 AHCI_FLAG_RESET_NEEDS_CLO
| AHCI_FLAG_NO_NCQ
,
294 .pio_mask
= 0x1f, /* pio0-4 */
295 .udma_mask
= 0x7f, /* udma0-6 ; FIXME */
296 .port_ops
= &ahci_ops
,
300 static const struct pci_device_id ahci_pci_tbl
[] = {
302 { PCI_VENDOR_ID_INTEL
, 0x2652, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
303 board_ahci
}, /* ICH6 */
304 { PCI_VENDOR_ID_INTEL
, 0x2653, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
305 board_ahci
}, /* ICH6M */
306 { PCI_VENDOR_ID_INTEL
, 0x27c1, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
307 board_ahci
}, /* ICH7 */
308 { PCI_VENDOR_ID_INTEL
, 0x27c5, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
309 board_ahci
}, /* ICH7M */
310 { PCI_VENDOR_ID_INTEL
, 0x27c3, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
311 board_ahci
}, /* ICH7R */
312 { PCI_VENDOR_ID_AL
, 0x5288, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
313 board_ahci
}, /* ULi M5288 */
314 { PCI_VENDOR_ID_INTEL
, 0x2681, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
315 board_ahci
}, /* ESB2 */
316 { PCI_VENDOR_ID_INTEL
, 0x2682, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
317 board_ahci
}, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL
, 0x2683, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
319 board_ahci
}, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL
, 0x27c6, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
321 board_ahci
}, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL
, 0x2821, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
323 board_ahci
}, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL
, 0x2822, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
325 board_ahci
}, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL
, 0x2824, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
327 board_ahci
}, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL
, 0x2829, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
329 board_ahci
}, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL
, 0x282a, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
331 board_ahci
}, /* ICH8M */
334 { 0x197b, 0x2360, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
335 board_ahci
}, /* JMicron JMB360 */
336 { 0x197b, 0x2361, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
337 board_ahci
}, /* JMicron JMB361 */
338 { 0x197b, 0x2363, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
339 board_ahci
}, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
341 board_ahci
}, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
343 board_ahci
}, /* JMicron JMB366 */
346 { PCI_VENDOR_ID_ATI
, 0x4380, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
347 board_ahci
}, /* ATI SB600 non-raid */
348 { PCI_VENDOR_ID_ATI
, 0x4381, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
349 board_ahci
}, /* ATI SB600 raid */
352 { PCI_VENDOR_ID_VIA
, 0x3349, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
353 board_ahci_vt8251
}, /* VIA VT8251 */
356 { PCI_VENDOR_ID_NVIDIA
, 0x044c, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
357 board_ahci
}, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA
, 0x044d, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
359 board_ahci
}, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA
, 0x044e, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
361 board_ahci
}, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA
, 0x044f, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
363 board_ahci
}, /* MCP65 */
366 { PCI_VENDOR_ID_SI
, 0x1184, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
367 board_ahci
}, /* SiS 966 */
368 { PCI_VENDOR_ID_SI
, 0x1185, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
369 board_ahci
}, /* SiS 966 */
370 { PCI_VENDOR_ID_SI
, 0x0186, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
371 board_ahci
}, /* SiS 968 */
373 { } /* terminate list */
377 static struct pci_driver ahci_pci_driver
= {
379 .id_table
= ahci_pci_tbl
,
380 .probe
= ahci_init_one
,
381 .suspend
= ahci_pci_device_suspend
,
382 .resume
= ahci_pci_device_resume
,
383 .remove
= ahci_remove_one
,
387 static inline unsigned long ahci_port_base_ul (unsigned long base
, unsigned int port
)
389 return base
+ 0x100 + (port
* 0x80);
392 static inline void __iomem
*ahci_port_base (void __iomem
*base
, unsigned int port
)
394 return (void __iomem
*) ahci_port_base_ul((unsigned long)base
, port
);
397 static u32
ahci_scr_read (struct ata_port
*ap
, unsigned int sc_reg_in
)
402 case SCR_STATUS
: sc_reg
= 0; break;
403 case SCR_CONTROL
: sc_reg
= 1; break;
404 case SCR_ERROR
: sc_reg
= 2; break;
405 case SCR_ACTIVE
: sc_reg
= 3; break;
410 return readl((void __iomem
*) ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
414 static void ahci_scr_write (struct ata_port
*ap
, unsigned int sc_reg_in
,
420 case SCR_STATUS
: sc_reg
= 0; break;
421 case SCR_CONTROL
: sc_reg
= 1; break;
422 case SCR_ERROR
: sc_reg
= 2; break;
423 case SCR_ACTIVE
: sc_reg
= 3; break;
428 writel(val
, (void __iomem
*) ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
431 static void ahci_start_engine(void __iomem
*port_mmio
)
436 tmp
= readl(port_mmio
+ PORT_CMD
);
437 tmp
|= PORT_CMD_START
;
438 writel(tmp
, port_mmio
+ PORT_CMD
);
439 readl(port_mmio
+ PORT_CMD
); /* flush */
442 static int ahci_stop_engine(void __iomem
*port_mmio
)
446 tmp
= readl(port_mmio
+ PORT_CMD
);
448 /* check if the HBA is idle */
449 if ((tmp
& (PORT_CMD_START
| PORT_CMD_LIST_ON
)) == 0)
452 /* setting HBA to idle */
453 tmp
&= ~PORT_CMD_START
;
454 writel(tmp
, port_mmio
+ PORT_CMD
);
456 /* wait for engine to stop. This could be as long as 500 msec */
457 tmp
= ata_wait_register(port_mmio
+ PORT_CMD
,
458 PORT_CMD_LIST_ON
, PORT_CMD_LIST_ON
, 1, 500);
459 if (tmp
& PORT_CMD_LIST_ON
)
465 static void ahci_start_fis_rx(void __iomem
*port_mmio
, u32 cap
,
466 dma_addr_t cmd_slot_dma
, dma_addr_t rx_fis_dma
)
470 /* set FIS registers */
471 if (cap
& HOST_CAP_64
)
472 writel((cmd_slot_dma
>> 16) >> 16, port_mmio
+ PORT_LST_ADDR_HI
);
473 writel(cmd_slot_dma
& 0xffffffff, port_mmio
+ PORT_LST_ADDR
);
475 if (cap
& HOST_CAP_64
)
476 writel((rx_fis_dma
>> 16) >> 16, port_mmio
+ PORT_FIS_ADDR_HI
);
477 writel(rx_fis_dma
& 0xffffffff, port_mmio
+ PORT_FIS_ADDR
);
479 /* enable FIS reception */
480 tmp
= readl(port_mmio
+ PORT_CMD
);
481 tmp
|= PORT_CMD_FIS_RX
;
482 writel(tmp
, port_mmio
+ PORT_CMD
);
485 readl(port_mmio
+ PORT_CMD
);
488 static int ahci_stop_fis_rx(void __iomem
*port_mmio
)
492 /* disable FIS reception */
493 tmp
= readl(port_mmio
+ PORT_CMD
);
494 tmp
&= ~PORT_CMD_FIS_RX
;
495 writel(tmp
, port_mmio
+ PORT_CMD
);
497 /* wait for completion, spec says 500ms, give it 1000 */
498 tmp
= ata_wait_register(port_mmio
+ PORT_CMD
, PORT_CMD_FIS_ON
,
499 PORT_CMD_FIS_ON
, 10, 1000);
500 if (tmp
& PORT_CMD_FIS_ON
)
506 static void ahci_power_up(void __iomem
*port_mmio
, u32 cap
)
510 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
513 if (cap
& HOST_CAP_SSS
) {
514 cmd
|= PORT_CMD_SPIN_UP
;
515 writel(cmd
, port_mmio
+ PORT_CMD
);
519 writel(cmd
| PORT_CMD_ICC_ACTIVE
, port_mmio
+ PORT_CMD
);
522 static void ahci_power_down(void __iomem
*port_mmio
, u32 cap
)
526 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
528 if (cap
& HOST_CAP_SSC
) {
529 /* enable transitions to slumber mode */
530 scontrol
= readl(port_mmio
+ PORT_SCR_CTL
);
531 if ((scontrol
& 0x0f00) > 0x100) {
533 writel(scontrol
, port_mmio
+ PORT_SCR_CTL
);
536 /* put device into slumber mode */
537 writel(cmd
| PORT_CMD_ICC_SLUMBER
, port_mmio
+ PORT_CMD
);
539 /* wait for the transition to complete */
540 ata_wait_register(port_mmio
+ PORT_CMD
, PORT_CMD_ICC_SLUMBER
,
541 PORT_CMD_ICC_SLUMBER
, 1, 50);
544 /* put device into listen mode */
545 if (cap
& HOST_CAP_SSS
) {
546 /* first set PxSCTL.DET to 0 */
547 scontrol
= readl(port_mmio
+ PORT_SCR_CTL
);
549 writel(scontrol
, port_mmio
+ PORT_SCR_CTL
);
551 /* then set PxCMD.SUD to 0 */
552 cmd
&= ~PORT_CMD_SPIN_UP
;
553 writel(cmd
, port_mmio
+ PORT_CMD
);
557 static void ahci_init_port(void __iomem
*port_mmio
, u32 cap
,
558 dma_addr_t cmd_slot_dma
, dma_addr_t rx_fis_dma
)
561 ahci_power_up(port_mmio
, cap
);
563 /* enable FIS reception */
564 ahci_start_fis_rx(port_mmio
, cap
, cmd_slot_dma
, rx_fis_dma
);
567 ahci_start_engine(port_mmio
);
570 static int ahci_deinit_port(void __iomem
*port_mmio
, u32 cap
, const char **emsg
)
575 rc
= ahci_stop_engine(port_mmio
);
577 *emsg
= "failed to stop engine";
581 /* disable FIS reception */
582 rc
= ahci_stop_fis_rx(port_mmio
);
584 *emsg
= "failed stop FIS RX";
588 /* put device into slumber mode */
589 ahci_power_down(port_mmio
, cap
);
594 static int ahci_reset_controller(void __iomem
*mmio
, struct pci_dev
*pdev
)
598 cap_save
= readl(mmio
+ HOST_CAP
);
599 cap_save
&= ( (1<<28) | (1<<17) );
600 cap_save
|= (1 << 27);
602 /* global controller reset */
603 tmp
= readl(mmio
+ HOST_CTL
);
604 if ((tmp
& HOST_RESET
) == 0) {
605 writel(tmp
| HOST_RESET
, mmio
+ HOST_CTL
);
606 readl(mmio
+ HOST_CTL
); /* flush */
609 /* reset must complete within 1 second, or
610 * the hardware should be considered fried.
614 tmp
= readl(mmio
+ HOST_CTL
);
615 if (tmp
& HOST_RESET
) {
616 dev_printk(KERN_ERR
, &pdev
->dev
,
617 "controller reset failed (0x%x)\n", tmp
);
621 writel(HOST_AHCI_EN
, mmio
+ HOST_CTL
);
622 (void) readl(mmio
+ HOST_CTL
); /* flush */
623 writel(cap_save
, mmio
+ HOST_CAP
);
624 writel(0xf, mmio
+ HOST_PORTS_IMPL
);
625 (void) readl(mmio
+ HOST_PORTS_IMPL
); /* flush */
627 if (pdev
->vendor
== PCI_VENDOR_ID_INTEL
) {
631 pci_read_config_word(pdev
, 0x92, &tmp16
);
633 pci_write_config_word(pdev
, 0x92, tmp16
);
639 static void ahci_init_controller(void __iomem
*mmio
, struct pci_dev
*pdev
,
640 int n_ports
, u32 cap
)
645 for (i
= 0; i
< n_ports
; i
++) {
646 void __iomem
*port_mmio
= ahci_port_base(mmio
, i
);
647 const char *emsg
= NULL
;
649 #if 0 /* BIOSen initialize this incorrectly */
650 if (!(hpriv
->port_map
& (1 << i
)))
654 /* make sure port is not active */
655 rc
= ahci_deinit_port(port_mmio
, cap
, &emsg
);
657 dev_printk(KERN_WARNING
, &pdev
->dev
,
658 "%s (%d)\n", emsg
, rc
);
661 tmp
= readl(port_mmio
+ PORT_SCR_ERR
);
662 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp
);
663 writel(tmp
, port_mmio
+ PORT_SCR_ERR
);
666 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
667 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp
);
669 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
671 writel(1 << i
, mmio
+ HOST_IRQ_STAT
);
674 tmp
= readl(mmio
+ HOST_CTL
);
675 VPRINTK("HOST_CTL 0x%x\n", tmp
);
676 writel(tmp
| HOST_IRQ_EN
, mmio
+ HOST_CTL
);
677 tmp
= readl(mmio
+ HOST_CTL
);
678 VPRINTK("HOST_CTL 0x%x\n", tmp
);
681 static unsigned int ahci_dev_classify(struct ata_port
*ap
)
683 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
684 struct ata_taskfile tf
;
687 tmp
= readl(port_mmio
+ PORT_SIG
);
688 tf
.lbah
= (tmp
>> 24) & 0xff;
689 tf
.lbam
= (tmp
>> 16) & 0xff;
690 tf
.lbal
= (tmp
>> 8) & 0xff;
691 tf
.nsect
= (tmp
) & 0xff;
693 return ata_dev_classify(&tf
);
696 static void ahci_fill_cmd_slot(struct ahci_port_priv
*pp
, unsigned int tag
,
699 dma_addr_t cmd_tbl_dma
;
701 cmd_tbl_dma
= pp
->cmd_tbl_dma
+ tag
* AHCI_CMD_TBL_SZ
;
703 pp
->cmd_slot
[tag
].opts
= cpu_to_le32(opts
);
704 pp
->cmd_slot
[tag
].status
= 0;
705 pp
->cmd_slot
[tag
].tbl_addr
= cpu_to_le32(cmd_tbl_dma
& 0xffffffff);
706 pp
->cmd_slot
[tag
].tbl_addr_hi
= cpu_to_le32((cmd_tbl_dma
>> 16) >> 16);
709 static int ahci_clo(struct ata_port
*ap
)
711 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
712 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
715 if (!(hpriv
->cap
& HOST_CAP_CLO
))
718 tmp
= readl(port_mmio
+ PORT_CMD
);
720 writel(tmp
, port_mmio
+ PORT_CMD
);
722 tmp
= ata_wait_register(port_mmio
+ PORT_CMD
,
723 PORT_CMD_CLO
, PORT_CMD_CLO
, 1, 500);
724 if (tmp
& PORT_CMD_CLO
)
730 static int ahci_prereset(struct ata_port
*ap
)
732 if ((ap
->flags
& AHCI_FLAG_RESET_NEEDS_CLO
) &&
733 (ata_busy_wait(ap
, ATA_BUSY
, 1000) & ATA_BUSY
)) {
734 /* ATA_BUSY hasn't cleared, so send a CLO */
738 return ata_std_prereset(ap
);
741 static int ahci_softreset(struct ata_port
*ap
, unsigned int *class)
743 struct ahci_port_priv
*pp
= ap
->private_data
;
744 void __iomem
*mmio
= ap
->host
->mmio_base
;
745 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
746 const u32 cmd_fis_len
= 5; /* five dwords */
747 const char *reason
= NULL
;
748 struct ata_taskfile tf
;
755 if (ata_port_offline(ap
)) {
756 DPRINTK("PHY reports no device\n");
757 *class = ATA_DEV_NONE
;
761 /* prepare for SRST (AHCI-1.1 10.4.1) */
762 rc
= ahci_stop_engine(port_mmio
);
764 reason
= "failed to stop engine";
768 /* check BUSY/DRQ, perform Command List Override if necessary */
769 ahci_tf_read(ap
, &tf
);
770 if (tf
.command
& (ATA_BUSY
| ATA_DRQ
)) {
773 if (rc
== -EOPNOTSUPP
) {
774 reason
= "port busy but CLO unavailable";
777 reason
= "port busy but CLO failed";
783 ahci_start_engine(port_mmio
);
785 ata_tf_init(ap
->device
, &tf
);
788 /* issue the first D2H Register FIS */
789 ahci_fill_cmd_slot(pp
, 0,
790 cmd_fis_len
| AHCI_CMD_RESET
| AHCI_CMD_CLR_BUSY
);
793 ata_tf_to_fis(&tf
, fis
, 0);
794 fis
[1] &= ~(1 << 7); /* turn off Command FIS bit */
796 writel(1, port_mmio
+ PORT_CMD_ISSUE
);
798 tmp
= ata_wait_register(port_mmio
+ PORT_CMD_ISSUE
, 0x1, 0x1, 1, 500);
801 reason
= "1st FIS failed";
805 /* spec says at least 5us, but be generous and sleep for 1ms */
808 /* issue the second D2H Register FIS */
809 ahci_fill_cmd_slot(pp
, 0, cmd_fis_len
);
812 ata_tf_to_fis(&tf
, fis
, 0);
813 fis
[1] &= ~(1 << 7); /* turn off Command FIS bit */
815 writel(1, port_mmio
+ PORT_CMD_ISSUE
);
816 readl(port_mmio
+ PORT_CMD_ISSUE
); /* flush */
818 /* spec mandates ">= 2ms" before checking status.
819 * We wait 150ms, because that was the magic delay used for
820 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
821 * between when the ATA command register is written, and then
822 * status is checked. Because waiting for "a while" before
823 * checking status is fine, post SRST, we perform this magic
824 * delay here as well.
828 *class = ATA_DEV_NONE
;
829 if (ata_port_online(ap
)) {
830 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
832 reason
= "device not ready";
835 *class = ahci_dev_classify(ap
);
838 DPRINTK("EXIT, class=%u\n", *class);
842 ahci_start_engine(port_mmio
);
844 ata_port_printk(ap
, KERN_ERR
, "softreset failed (%s)\n", reason
);
848 static int ahci_hardreset(struct ata_port
*ap
, unsigned int *class)
850 struct ahci_port_priv
*pp
= ap
->private_data
;
851 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
852 struct ata_taskfile tf
;
853 void __iomem
*mmio
= ap
->host
->mmio_base
;
854 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
859 ahci_stop_engine(port_mmio
);
861 /* clear D2H reception area to properly wait for D2H FIS */
862 ata_tf_init(ap
->device
, &tf
);
864 ata_tf_to_fis(&tf
, d2h_fis
, 0);
866 rc
= sata_std_hardreset(ap
, class);
868 ahci_start_engine(port_mmio
);
870 if (rc
== 0 && ata_port_online(ap
))
871 *class = ahci_dev_classify(ap
);
872 if (*class == ATA_DEV_UNKNOWN
)
873 *class = ATA_DEV_NONE
;
875 DPRINTK("EXIT, rc=%d, class=%u\n", rc
, *class);
879 static void ahci_postreset(struct ata_port
*ap
, unsigned int *class)
881 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
884 ata_std_postreset(ap
, class);
886 /* Make sure port's ATAPI bit is set appropriately */
887 new_tmp
= tmp
= readl(port_mmio
+ PORT_CMD
);
888 if (*class == ATA_DEV_ATAPI
)
889 new_tmp
|= PORT_CMD_ATAPI
;
891 new_tmp
&= ~PORT_CMD_ATAPI
;
892 if (new_tmp
!= tmp
) {
893 writel(new_tmp
, port_mmio
+ PORT_CMD
);
894 readl(port_mmio
+ PORT_CMD
); /* flush */
898 static u8
ahci_check_status(struct ata_port
*ap
)
900 void __iomem
*mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
902 return readl(mmio
+ PORT_TFDATA
) & 0xFF;
905 static void ahci_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
907 struct ahci_port_priv
*pp
= ap
->private_data
;
908 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
910 ata_tf_from_fis(d2h_fis
, tf
);
913 static unsigned int ahci_fill_sg(struct ata_queued_cmd
*qc
, void *cmd_tbl
)
915 struct scatterlist
*sg
;
916 struct ahci_sg
*ahci_sg
;
917 unsigned int n_sg
= 0;
922 * Next, the S/G list.
924 ahci_sg
= cmd_tbl
+ AHCI_CMD_TBL_HDR_SZ
;
925 ata_for_each_sg(sg
, qc
) {
926 dma_addr_t addr
= sg_dma_address(sg
);
927 u32 sg_len
= sg_dma_len(sg
);
929 ahci_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
930 ahci_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
931 ahci_sg
->flags_size
= cpu_to_le32(sg_len
- 1);
940 static void ahci_qc_prep(struct ata_queued_cmd
*qc
)
942 struct ata_port
*ap
= qc
->ap
;
943 struct ahci_port_priv
*pp
= ap
->private_data
;
944 int is_atapi
= is_atapi_taskfile(&qc
->tf
);
947 const u32 cmd_fis_len
= 5; /* five dwords */
951 * Fill in command table information. First, the header,
952 * a SATA Register - Host to Device command FIS.
954 cmd_tbl
= pp
->cmd_tbl
+ qc
->tag
* AHCI_CMD_TBL_SZ
;
956 ata_tf_to_fis(&qc
->tf
, cmd_tbl
, 0);
958 memset(cmd_tbl
+ AHCI_CMD_TBL_CDB
, 0, 32);
959 memcpy(cmd_tbl
+ AHCI_CMD_TBL_CDB
, qc
->cdb
, qc
->dev
->cdb_len
);
963 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
964 n_elem
= ahci_fill_sg(qc
, cmd_tbl
);
967 * Fill in command slot information.
969 opts
= cmd_fis_len
| n_elem
<< 16;
970 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
971 opts
|= AHCI_CMD_WRITE
;
973 opts
|= AHCI_CMD_ATAPI
| AHCI_CMD_PREFETCH
;
975 ahci_fill_cmd_slot(pp
, qc
->tag
, opts
);
978 static void ahci_error_intr(struct ata_port
*ap
, u32 irq_stat
)
980 struct ahci_port_priv
*pp
= ap
->private_data
;
981 struct ata_eh_info
*ehi
= &ap
->eh_info
;
982 unsigned int err_mask
= 0, action
= 0;
983 struct ata_queued_cmd
*qc
;
986 ata_ehi_clear_desc(ehi
);
988 /* AHCI needs SError cleared; otherwise, it might lock up */
989 serror
= ahci_scr_read(ap
, SCR_ERROR
);
990 ahci_scr_write(ap
, SCR_ERROR
, serror
);
992 /* analyze @irq_stat */
993 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x", irq_stat
);
995 if (irq_stat
& PORT_IRQ_TF_ERR
)
996 err_mask
|= AC_ERR_DEV
;
998 if (irq_stat
& (PORT_IRQ_HBUS_ERR
| PORT_IRQ_HBUS_DATA_ERR
)) {
999 err_mask
|= AC_ERR_HOST_BUS
;
1000 action
|= ATA_EH_SOFTRESET
;
1003 if (irq_stat
& PORT_IRQ_IF_ERR
) {
1004 err_mask
|= AC_ERR_ATA_BUS
;
1005 action
|= ATA_EH_SOFTRESET
;
1006 ata_ehi_push_desc(ehi
, ", interface fatal error");
1009 if (irq_stat
& (PORT_IRQ_CONNECT
| PORT_IRQ_PHYRDY
)) {
1010 ata_ehi_hotplugged(ehi
);
1011 ata_ehi_push_desc(ehi
, ", %s", irq_stat
& PORT_IRQ_CONNECT
?
1012 "connection status changed" : "PHY RDY changed");
1015 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1016 u32
*unk
= (u32
*)(pp
->rx_fis
+ RX_FIS_UNK
);
1018 err_mask
|= AC_ERR_HSM
;
1019 action
|= ATA_EH_SOFTRESET
;
1020 ata_ehi_push_desc(ehi
, ", unknown FIS %08x %08x %08x %08x",
1021 unk
[0], unk
[1], unk
[2], unk
[3]);
1024 /* okay, let's hand over to EH */
1025 ehi
->serror
|= serror
;
1026 ehi
->action
|= action
;
1028 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1030 qc
->err_mask
|= err_mask
;
1032 ehi
->err_mask
|= err_mask
;
1034 if (irq_stat
& PORT_IRQ_FREEZE
)
1035 ata_port_freeze(ap
);
1040 static void ahci_host_intr(struct ata_port
*ap
)
1042 void __iomem
*mmio
= ap
->host
->mmio_base
;
1043 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1044 struct ata_eh_info
*ehi
= &ap
->eh_info
;
1045 u32 status
, qc_active
;
1048 status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1049 writel(status
, port_mmio
+ PORT_IRQ_STAT
);
1051 if (unlikely(status
& PORT_IRQ_ERROR
)) {
1052 ahci_error_intr(ap
, status
);
1057 qc_active
= readl(port_mmio
+ PORT_SCR_ACT
);
1059 qc_active
= readl(port_mmio
+ PORT_CMD_ISSUE
);
1061 rc
= ata_qc_complete_multiple(ap
, qc_active
, NULL
);
1065 ehi
->err_mask
|= AC_ERR_HSM
;
1066 ehi
->action
|= ATA_EH_SOFTRESET
;
1067 ata_port_freeze(ap
);
1071 /* hmmm... a spurious interupt */
1073 /* some devices send D2H reg with I bit set during NCQ command phase */
1074 if (ap
->sactive
&& status
& PORT_IRQ_D2H_REG_FIS
)
1077 /* ignore interim PIO setup fis interrupts */
1078 if (ata_tag_valid(ap
->active_tag
) && (status
& PORT_IRQ_PIOS_FIS
))
1081 if (ata_ratelimit())
1082 ata_port_printk(ap
, KERN_INFO
, "spurious interrupt "
1083 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1084 status
, ap
->active_tag
, ap
->sactive
);
1087 static void ahci_irq_clear(struct ata_port
*ap
)
1092 static irqreturn_t
ahci_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
1094 struct ata_host
*host
= dev_instance
;
1095 struct ahci_host_priv
*hpriv
;
1096 unsigned int i
, handled
= 0;
1098 u32 irq_stat
, irq_ack
= 0;
1102 hpriv
= host
->private_data
;
1103 mmio
= host
->mmio_base
;
1105 /* sigh. 0xffffffff is a valid return from h/w */
1106 irq_stat
= readl(mmio
+ HOST_IRQ_STAT
);
1107 irq_stat
&= hpriv
->port_map
;
1111 spin_lock(&host
->lock
);
1113 for (i
= 0; i
< host
->n_ports
; i
++) {
1114 struct ata_port
*ap
;
1116 if (!(irq_stat
& (1 << i
)))
1119 ap
= host
->ports
[i
];
1122 VPRINTK("port %u\n", i
);
1124 VPRINTK("port %u (no irq)\n", i
);
1125 if (ata_ratelimit())
1126 dev_printk(KERN_WARNING
, host
->dev
,
1127 "interrupt on disabled port %u\n", i
);
1130 irq_ack
|= (1 << i
);
1134 writel(irq_ack
, mmio
+ HOST_IRQ_STAT
);
1138 spin_unlock(&host
->lock
);
1142 return IRQ_RETVAL(handled
);
1145 static unsigned int ahci_qc_issue(struct ata_queued_cmd
*qc
)
1147 struct ata_port
*ap
= qc
->ap
;
1148 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
1150 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1151 writel(1 << qc
->tag
, port_mmio
+ PORT_SCR_ACT
);
1152 writel(1 << qc
->tag
, port_mmio
+ PORT_CMD_ISSUE
);
1153 readl(port_mmio
+ PORT_CMD_ISSUE
); /* flush */
1158 static void ahci_freeze(struct ata_port
*ap
)
1160 void __iomem
*mmio
= ap
->host
->mmio_base
;
1161 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1164 writel(0, port_mmio
+ PORT_IRQ_MASK
);
1167 static void ahci_thaw(struct ata_port
*ap
)
1169 void __iomem
*mmio
= ap
->host
->mmio_base
;
1170 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1174 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
1175 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
1176 writel(1 << ap
->id
, mmio
+ HOST_IRQ_STAT
);
1178 /* turn IRQ back on */
1179 writel(DEF_PORT_IRQ
, port_mmio
+ PORT_IRQ_MASK
);
1182 static void ahci_error_handler(struct ata_port
*ap
)
1184 void __iomem
*mmio
= ap
->host
->mmio_base
;
1185 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1187 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1188 /* restart engine */
1189 ahci_stop_engine(port_mmio
);
1190 ahci_start_engine(port_mmio
);
1193 /* perform recovery */
1194 ata_do_eh(ap
, ahci_prereset
, ahci_softreset
, ahci_hardreset
,
1198 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
)
1200 struct ata_port
*ap
= qc
->ap
;
1201 void __iomem
*mmio
= ap
->host
->mmio_base
;
1202 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1204 if (qc
->flags
& ATA_QCFLAG_FAILED
)
1205 qc
->err_mask
|= AC_ERR_OTHER
;
1208 /* make DMA engine forget about the failed command */
1209 ahci_stop_engine(port_mmio
);
1210 ahci_start_engine(port_mmio
);
1214 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1216 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1217 struct ahci_port_priv
*pp
= ap
->private_data
;
1218 void __iomem
*mmio
= ap
->host
->mmio_base
;
1219 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1220 const char *emsg
= NULL
;
1223 rc
= ahci_deinit_port(port_mmio
, hpriv
->cap
, &emsg
);
1225 ata_port_printk(ap
, KERN_ERR
, "%s (%d)\n", emsg
, rc
);
1226 ahci_init_port(port_mmio
, hpriv
->cap
,
1227 pp
->cmd_slot_dma
, pp
->rx_fis_dma
);
1233 static int ahci_port_resume(struct ata_port
*ap
)
1235 struct ahci_port_priv
*pp
= ap
->private_data
;
1236 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1237 void __iomem
*mmio
= ap
->host
->mmio_base
;
1238 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1240 ahci_init_port(port_mmio
, hpriv
->cap
, pp
->cmd_slot_dma
, pp
->rx_fis_dma
);
1245 static int ahci_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
1247 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
1248 void __iomem
*mmio
= host
->mmio_base
;
1251 if (mesg
.event
== PM_EVENT_SUSPEND
) {
1252 /* AHCI spec rev1.1 section 8.3.3:
1253 * Software must disable interrupts prior to requesting a
1254 * transition of the HBA to D3 state.
1256 ctl
= readl(mmio
+ HOST_CTL
);
1257 ctl
&= ~HOST_IRQ_EN
;
1258 writel(ctl
, mmio
+ HOST_CTL
);
1259 readl(mmio
+ HOST_CTL
); /* flush */
1262 return ata_pci_device_suspend(pdev
, mesg
);
1265 static int ahci_pci_device_resume(struct pci_dev
*pdev
)
1267 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
1268 struct ahci_host_priv
*hpriv
= host
->private_data
;
1269 void __iomem
*mmio
= host
->mmio_base
;
1272 ata_pci_device_do_resume(pdev
);
1274 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
1275 rc
= ahci_reset_controller(mmio
, pdev
);
1279 ahci_init_controller(mmio
, pdev
, host
->n_ports
, hpriv
->cap
);
1282 ata_host_resume(host
);
1287 static int ahci_port_start(struct ata_port
*ap
)
1289 struct device
*dev
= ap
->host
->dev
;
1290 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1291 struct ahci_port_priv
*pp
;
1292 void __iomem
*mmio
= ap
->host
->mmio_base
;
1293 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1298 pp
= kmalloc(sizeof(*pp
), GFP_KERNEL
);
1301 memset(pp
, 0, sizeof(*pp
));
1303 rc
= ata_pad_alloc(ap
, dev
);
1309 mem
= dma_alloc_coherent(dev
, AHCI_PORT_PRIV_DMA_SZ
, &mem_dma
, GFP_KERNEL
);
1311 ata_pad_free(ap
, dev
);
1315 memset(mem
, 0, AHCI_PORT_PRIV_DMA_SZ
);
1318 * First item in chunk of DMA memory: 32-slot command table,
1319 * 32 bytes each in size
1322 pp
->cmd_slot_dma
= mem_dma
;
1324 mem
+= AHCI_CMD_SLOT_SZ
;
1325 mem_dma
+= AHCI_CMD_SLOT_SZ
;
1328 * Second item: Received-FIS area
1331 pp
->rx_fis_dma
= mem_dma
;
1333 mem
+= AHCI_RX_FIS_SZ
;
1334 mem_dma
+= AHCI_RX_FIS_SZ
;
1337 * Third item: data area for storing a single command
1338 * and its scatter-gather table
1341 pp
->cmd_tbl_dma
= mem_dma
;
1343 ap
->private_data
= pp
;
1345 /* initialize port */
1346 ahci_init_port(port_mmio
, hpriv
->cap
, pp
->cmd_slot_dma
, pp
->rx_fis_dma
);
1351 static void ahci_port_stop(struct ata_port
*ap
)
1353 struct device
*dev
= ap
->host
->dev
;
1354 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1355 struct ahci_port_priv
*pp
= ap
->private_data
;
1356 void __iomem
*mmio
= ap
->host
->mmio_base
;
1357 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1358 const char *emsg
= NULL
;
1361 /* de-initialize port */
1362 rc
= ahci_deinit_port(port_mmio
, hpriv
->cap
, &emsg
);
1364 ata_port_printk(ap
, KERN_WARNING
, "%s (%d)\n", emsg
, rc
);
1366 ap
->private_data
= NULL
;
1367 dma_free_coherent(dev
, AHCI_PORT_PRIV_DMA_SZ
,
1368 pp
->cmd_slot
, pp
->cmd_slot_dma
);
1369 ata_pad_free(ap
, dev
);
1373 static void ahci_setup_port(struct ata_ioports
*port
, unsigned long base
,
1374 unsigned int port_idx
)
1376 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base
, port_idx
);
1377 base
= ahci_port_base_ul(base
, port_idx
);
1378 VPRINTK("base now==0x%lx\n", base
);
1380 port
->cmd_addr
= base
;
1381 port
->scr_addr
= base
+ PORT_SCR
;
1386 static int ahci_host_init(struct ata_probe_ent
*probe_ent
)
1388 struct ahci_host_priv
*hpriv
= probe_ent
->private_data
;
1389 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
1390 void __iomem
*mmio
= probe_ent
->mmio_base
;
1391 unsigned int i
, using_dac
;
1394 rc
= ahci_reset_controller(mmio
, pdev
);
1398 hpriv
->cap
= readl(mmio
+ HOST_CAP
);
1399 hpriv
->port_map
= readl(mmio
+ HOST_PORTS_IMPL
);
1400 probe_ent
->n_ports
= (hpriv
->cap
& 0x1f) + 1;
1402 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1403 hpriv
->cap
, hpriv
->port_map
, probe_ent
->n_ports
);
1405 using_dac
= hpriv
->cap
& HOST_CAP_64
;
1407 !pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
1408 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
1410 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
1412 dev_printk(KERN_ERR
, &pdev
->dev
,
1413 "64-bit DMA enable failed\n");
1418 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
1420 dev_printk(KERN_ERR
, &pdev
->dev
,
1421 "32-bit DMA enable failed\n");
1424 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
1426 dev_printk(KERN_ERR
, &pdev
->dev
,
1427 "32-bit consistent DMA enable failed\n");
1432 for (i
= 0; i
< probe_ent
->n_ports
; i
++)
1433 ahci_setup_port(&probe_ent
->port
[i
], (unsigned long) mmio
, i
);
1435 ahci_init_controller(mmio
, pdev
, probe_ent
->n_ports
, hpriv
->cap
);
1437 pci_set_master(pdev
);
1442 static void ahci_print_info(struct ata_probe_ent
*probe_ent
)
1444 struct ahci_host_priv
*hpriv
= probe_ent
->private_data
;
1445 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
1446 void __iomem
*mmio
= probe_ent
->mmio_base
;
1447 u32 vers
, cap
, impl
, speed
;
1448 const char *speed_s
;
1452 vers
= readl(mmio
+ HOST_VERSION
);
1454 impl
= hpriv
->port_map
;
1456 speed
= (cap
>> 20) & 0xf;
1459 else if (speed
== 2)
1464 pci_read_config_word(pdev
, 0x0a, &cc
);
1467 else if (cc
== 0x0106)
1469 else if (cc
== 0x0104)
1474 dev_printk(KERN_INFO
, &pdev
->dev
,
1475 "AHCI %02x%02x.%02x%02x "
1476 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1479 (vers
>> 24) & 0xff,
1480 (vers
>> 16) & 0xff,
1484 ((cap
>> 8) & 0x1f) + 1,
1490 dev_printk(KERN_INFO
, &pdev
->dev
,
1496 cap
& (1 << 31) ? "64bit " : "",
1497 cap
& (1 << 30) ? "ncq " : "",
1498 cap
& (1 << 28) ? "ilck " : "",
1499 cap
& (1 << 27) ? "stag " : "",
1500 cap
& (1 << 26) ? "pm " : "",
1501 cap
& (1 << 25) ? "led " : "",
1503 cap
& (1 << 24) ? "clo " : "",
1504 cap
& (1 << 19) ? "nz " : "",
1505 cap
& (1 << 18) ? "only " : "",
1506 cap
& (1 << 17) ? "pmp " : "",
1507 cap
& (1 << 15) ? "pio " : "",
1508 cap
& (1 << 14) ? "slum " : "",
1509 cap
& (1 << 13) ? "part " : ""
1513 static int ahci_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1515 static int printed_version
;
1516 struct ata_probe_ent
*probe_ent
= NULL
;
1517 struct ahci_host_priv
*hpriv
;
1519 void __iomem
*mmio_base
;
1520 unsigned int board_idx
= (unsigned int) ent
->driver_data
;
1521 int have_msi
, pci_dev_busy
= 0;
1526 WARN_ON(ATA_MAX_QUEUE
> AHCI_MAX_CMDS
);
1528 if (!printed_version
++)
1529 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
1531 /* JMicron-specific fixup: make sure we're in AHCI mode */
1532 /* This is protected from races with ata_jmicron by the pci probe
1534 if (pdev
->vendor
== PCI_VENDOR_ID_JMICRON
) {
1535 /* AHCI enable, AHCI on function 0 */
1536 pci_write_config_byte(pdev
, 0x41, 0xa1);
1537 /* Function 1 is the PATA controller */
1538 if (PCI_FUNC(pdev
->devfn
))
1542 rc
= pci_enable_device(pdev
);
1546 rc
= pci_request_regions(pdev
, DRV_NAME
);
1552 if (pci_enable_msi(pdev
) == 0)
1559 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
1560 if (probe_ent
== NULL
) {
1565 memset(probe_ent
, 0, sizeof(*probe_ent
));
1566 probe_ent
->dev
= pci_dev_to_dev(pdev
);
1567 INIT_LIST_HEAD(&probe_ent
->node
);
1569 mmio_base
= pci_iomap(pdev
, AHCI_PCI_BAR
, 0);
1570 if (mmio_base
== NULL
) {
1572 goto err_out_free_ent
;
1574 base
= (unsigned long) mmio_base
;
1576 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
1579 goto err_out_iounmap
;
1581 memset(hpriv
, 0, sizeof(*hpriv
));
1583 probe_ent
->sht
= ahci_port_info
[board_idx
].sht
;
1584 probe_ent
->port_flags
= ahci_port_info
[board_idx
].flags
;
1585 probe_ent
->pio_mask
= ahci_port_info
[board_idx
].pio_mask
;
1586 probe_ent
->udma_mask
= ahci_port_info
[board_idx
].udma_mask
;
1587 probe_ent
->port_ops
= ahci_port_info
[board_idx
].port_ops
;
1589 probe_ent
->irq
= pdev
->irq
;
1590 probe_ent
->irq_flags
= IRQF_SHARED
;
1591 probe_ent
->mmio_base
= mmio_base
;
1592 probe_ent
->private_data
= hpriv
;
1595 hpriv
->flags
|= AHCI_FLAG_MSI
;
1597 /* initialize adapter */
1598 rc
= ahci_host_init(probe_ent
);
1602 if (!(probe_ent
->port_flags
& AHCI_FLAG_NO_NCQ
) &&
1603 (hpriv
->cap
& HOST_CAP_NCQ
))
1604 probe_ent
->port_flags
|= ATA_FLAG_NCQ
;
1606 ahci_print_info(probe_ent
);
1608 /* FIXME: check ata_device_add return value */
1609 ata_device_add(probe_ent
);
1617 pci_iounmap(pdev
, mmio_base
);
1622 pci_disable_msi(pdev
);
1625 pci_release_regions(pdev
);
1628 pci_disable_device(pdev
);
1632 static void ahci_remove_one (struct pci_dev
*pdev
)
1634 struct device
*dev
= pci_dev_to_dev(pdev
);
1635 struct ata_host
*host
= dev_get_drvdata(dev
);
1636 struct ahci_host_priv
*hpriv
= host
->private_data
;
1640 for (i
= 0; i
< host
->n_ports
; i
++)
1641 ata_port_detach(host
->ports
[i
]);
1643 have_msi
= hpriv
->flags
& AHCI_FLAG_MSI
;
1644 free_irq(host
->irq
, host
);
1646 for (i
= 0; i
< host
->n_ports
; i
++) {
1647 struct ata_port
*ap
= host
->ports
[i
];
1649 ata_scsi_release(ap
->scsi_host
);
1650 scsi_host_put(ap
->scsi_host
);
1654 pci_iounmap(pdev
, host
->mmio_base
);
1658 pci_disable_msi(pdev
);
1661 pci_release_regions(pdev
);
1662 pci_disable_device(pdev
);
1663 dev_set_drvdata(dev
, NULL
);
1666 static int __init
ahci_init(void)
1668 return pci_register_driver(&ahci_pci_driver
);
1671 static void __exit
ahci_exit(void)
1673 pci_unregister_driver(&ahci_pci_driver
);
1677 MODULE_AUTHOR("Jeff Garzik");
1678 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1679 MODULE_LICENSE("GPL");
1680 MODULE_DEVICE_TABLE(pci
, ahci_pci_tbl
);
1681 MODULE_VERSION(DRV_VERSION
);
1683 module_init(ahci_init
);
1684 module_exit(ahci_exit
);