2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "2.0"
56 AHCI_MAX_SG
= 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY
= 0xffffffff,
58 AHCI_USE_CLUSTERING
= 0,
61 AHCI_CMD_SLOT_SZ
= AHCI_MAX_CMDS
* AHCI_CMD_SZ
,
63 AHCI_CMD_TBL_CDB
= 0x40,
64 AHCI_CMD_TBL_HDR_SZ
= 0x80,
65 AHCI_CMD_TBL_SZ
= AHCI_CMD_TBL_HDR_SZ
+ (AHCI_MAX_SG
* 16),
66 AHCI_CMD_TBL_AR_SZ
= AHCI_CMD_TBL_SZ
* AHCI_MAX_CMDS
,
67 AHCI_PORT_PRIV_DMA_SZ
= AHCI_CMD_SLOT_SZ
+ AHCI_CMD_TBL_AR_SZ
+
69 AHCI_IRQ_ON_SG
= (1 << 31),
70 AHCI_CMD_ATAPI
= (1 << 5),
71 AHCI_CMD_WRITE
= (1 << 6),
72 AHCI_CMD_PREFETCH
= (1 << 7),
73 AHCI_CMD_RESET
= (1 << 8),
74 AHCI_CMD_CLR_BUSY
= (1 << 10),
76 RX_FIS_D2H_REG
= 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK
= 0x60, /* offset of Unknown FIS data */
80 board_ahci_vt8251
= 1,
82 /* global controller registers */
83 HOST_CAP
= 0x00, /* host capabilities */
84 HOST_CTL
= 0x04, /* global host control */
85 HOST_IRQ_STAT
= 0x08, /* interrupt status */
86 HOST_PORTS_IMPL
= 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION
= 0x10, /* AHCI spec. version compliancy */
90 HOST_RESET
= (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN
= (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN
= (1 << 31), /* AHCI enabled */
95 HOST_CAP_SSC
= (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO
= (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS
= (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ
= (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64
= (1 << 31), /* PCI DAC (64-bit DMA) support */
101 /* registers for each SATA port */
102 PORT_LST_ADDR
= 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI
= 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR
= 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI
= 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT
= 0x10, /* interrupt status */
107 PORT_IRQ_MASK
= 0x14, /* interrupt enable/disable mask */
108 PORT_CMD
= 0x18, /* port command */
109 PORT_TFDATA
= 0x20, /* taskfile data */
110 PORT_SIG
= 0x24, /* device TF signature */
111 PORT_CMD_ISSUE
= 0x38, /* command issue */
112 PORT_SCR
= 0x28, /* SATA phy register block */
113 PORT_SCR_STAT
= 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL
= 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR
= 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT
= 0x34, /* SATA phy register: SActive */
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES
= (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR
= (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR
= (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR
= (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR
= (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL
= (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW
= (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP
= (1 << 23), /* incorrect port multiplier */
128 PORT_IRQ_PHYRDY
= (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK
= (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT
= (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE
= (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS
= (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS
= (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS
= (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS
= (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS
= (1 << 0), /* D2H Register FIS rx'd */
138 PORT_IRQ_FREEZE
= PORT_IRQ_HBUS_ERR
|
143 PORT_IRQ_ERROR
= PORT_IRQ_FREEZE
|
145 PORT_IRQ_HBUS_DATA_ERR
,
146 DEF_PORT_IRQ
= PORT_IRQ_ERROR
| PORT_IRQ_SG_DONE
|
147 PORT_IRQ_SDB_FIS
| PORT_IRQ_DMAS_FIS
|
148 PORT_IRQ_PIOS_FIS
| PORT_IRQ_D2H_REG_FIS
,
151 PORT_CMD_ATAPI
= (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON
= (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON
= (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX
= (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO
= (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON
= (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP
= (1 << 1), /* Spin up device */
158 PORT_CMD_START
= (1 << 0), /* Enable port DMA engine */
160 PORT_CMD_ICC_MASK
= (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE
= (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL
= (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER
= (0x6 << 28), /* Put i/f in slumber state */
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI
= (1 << 0),
169 AHCI_FLAG_RESET_NEEDS_CLO
= (1 << 24),
170 AHCI_FLAG_NO_NCQ
= (1 << 25),
173 struct ahci_cmd_hdr
{
188 struct ahci_host_priv
{
190 u32 cap
; /* cache of HOST_CAP register */
191 u32 port_map
; /* cache of HOST_PORTS_IMPL reg */
194 struct ahci_port_priv
{
195 struct ahci_cmd_hdr
*cmd_slot
;
196 dma_addr_t cmd_slot_dma
;
198 dma_addr_t cmd_tbl_dma
;
200 dma_addr_t rx_fis_dma
;
203 static u32
ahci_scr_read (struct ata_port
*ap
, unsigned int sc_reg
);
204 static void ahci_scr_write (struct ata_port
*ap
, unsigned int sc_reg
, u32 val
);
205 static int ahci_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
206 static unsigned int ahci_qc_issue(struct ata_queued_cmd
*qc
);
207 static irqreturn_t
ahci_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
);
208 static void ahci_irq_clear(struct ata_port
*ap
);
209 static int ahci_port_start(struct ata_port
*ap
);
210 static void ahci_port_stop(struct ata_port
*ap
);
211 static void ahci_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
);
212 static void ahci_qc_prep(struct ata_queued_cmd
*qc
);
213 static u8
ahci_check_status(struct ata_port
*ap
);
214 static void ahci_freeze(struct ata_port
*ap
);
215 static void ahci_thaw(struct ata_port
*ap
);
216 static void ahci_error_handler(struct ata_port
*ap
);
217 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
);
218 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
219 static int ahci_port_resume(struct ata_port
*ap
);
220 static int ahci_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
);
221 static int ahci_pci_device_resume(struct pci_dev
*pdev
);
222 static void ahci_remove_one (struct pci_dev
*pdev
);
224 static struct scsi_host_template ahci_sht
= {
225 .module
= THIS_MODULE
,
227 .ioctl
= ata_scsi_ioctl
,
228 .queuecommand
= ata_scsi_queuecmd
,
229 .change_queue_depth
= ata_scsi_change_queue_depth
,
230 .can_queue
= AHCI_MAX_CMDS
- 1,
231 .this_id
= ATA_SHT_THIS_ID
,
232 .sg_tablesize
= AHCI_MAX_SG
,
233 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
234 .emulated
= ATA_SHT_EMULATED
,
235 .use_clustering
= AHCI_USE_CLUSTERING
,
236 .proc_name
= DRV_NAME
,
237 .dma_boundary
= AHCI_DMA_BOUNDARY
,
238 .slave_configure
= ata_scsi_slave_config
,
239 .slave_destroy
= ata_scsi_slave_destroy
,
240 .bios_param
= ata_std_bios_param
,
241 .suspend
= ata_scsi_device_suspend
,
242 .resume
= ata_scsi_device_resume
,
245 static const struct ata_port_operations ahci_ops
= {
246 .port_disable
= ata_port_disable
,
248 .check_status
= ahci_check_status
,
249 .check_altstatus
= ahci_check_status
,
250 .dev_select
= ata_noop_dev_select
,
252 .tf_read
= ahci_tf_read
,
254 .qc_prep
= ahci_qc_prep
,
255 .qc_issue
= ahci_qc_issue
,
257 .irq_handler
= ahci_interrupt
,
258 .irq_clear
= ahci_irq_clear
,
260 .scr_read
= ahci_scr_read
,
261 .scr_write
= ahci_scr_write
,
263 .freeze
= ahci_freeze
,
266 .error_handler
= ahci_error_handler
,
267 .post_internal_cmd
= ahci_post_internal_cmd
,
269 .port_suspend
= ahci_port_suspend
,
270 .port_resume
= ahci_port_resume
,
272 .port_start
= ahci_port_start
,
273 .port_stop
= ahci_port_stop
,
276 static const struct ata_port_info ahci_port_info
[] = {
280 .host_flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
281 ATA_FLAG_MMIO
| ATA_FLAG_PIO_DMA
|
282 ATA_FLAG_SKIP_D2H_BSY
,
283 .pio_mask
= 0x1f, /* pio0-4 */
284 .udma_mask
= 0x7f, /* udma0-6 ; FIXME */
285 .port_ops
= &ahci_ops
,
287 /* board_ahci_vt8251 */
290 .host_flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
291 ATA_FLAG_MMIO
| ATA_FLAG_PIO_DMA
|
292 ATA_FLAG_SKIP_D2H_BSY
|
293 AHCI_FLAG_RESET_NEEDS_CLO
| AHCI_FLAG_NO_NCQ
,
294 .pio_mask
= 0x1f, /* pio0-4 */
295 .udma_mask
= 0x7f, /* udma0-6 ; FIXME */
296 .port_ops
= &ahci_ops
,
300 static const struct pci_device_id ahci_pci_tbl
[] = {
302 { PCI_VENDOR_ID_INTEL
, 0x2652, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
303 board_ahci
}, /* ICH6 */
304 { PCI_VENDOR_ID_INTEL
, 0x2653, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
305 board_ahci
}, /* ICH6M */
306 { PCI_VENDOR_ID_INTEL
, 0x27c1, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
307 board_ahci
}, /* ICH7 */
308 { PCI_VENDOR_ID_INTEL
, 0x27c5, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
309 board_ahci
}, /* ICH7M */
310 { PCI_VENDOR_ID_INTEL
, 0x27c3, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
311 board_ahci
}, /* ICH7R */
312 { PCI_VENDOR_ID_AL
, 0x5288, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
313 board_ahci
}, /* ULi M5288 */
314 { PCI_VENDOR_ID_INTEL
, 0x2681, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
315 board_ahci
}, /* ESB2 */
316 { PCI_VENDOR_ID_INTEL
, 0x2682, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
317 board_ahci
}, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL
, 0x2683, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
319 board_ahci
}, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL
, 0x27c6, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
321 board_ahci
}, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL
, 0x2821, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
323 board_ahci
}, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL
, 0x2822, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
325 board_ahci
}, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL
, 0x2824, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
327 board_ahci
}, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL
, 0x2829, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
329 board_ahci
}, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL
, 0x282a, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
331 board_ahci
}, /* ICH8M */
334 { 0x197b, 0x2360, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
335 board_ahci
}, /* JMicron JMB360 */
336 { 0x197b, 0x2361, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
337 board_ahci
}, /* JMicron JMB361 */
338 { 0x197b, 0x2363, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
339 board_ahci
}, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
341 board_ahci
}, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
343 board_ahci
}, /* JMicron JMB366 */
346 { PCI_VENDOR_ID_ATI
, 0x4380, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
347 board_ahci
}, /* ATI SB600 non-raid */
348 { PCI_VENDOR_ID_ATI
, 0x4381, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
349 board_ahci
}, /* ATI SB600 raid */
352 { PCI_VENDOR_ID_VIA
, 0x3349, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
353 board_ahci_vt8251
}, /* VIA VT8251 */
356 { PCI_VENDOR_ID_NVIDIA
, 0x044c, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
357 board_ahci
}, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA
, 0x044d, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
359 board_ahci
}, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA
, 0x044e, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
361 board_ahci
}, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA
, 0x044f, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
363 board_ahci
}, /* MCP65 */
365 { } /* terminate list */
369 static struct pci_driver ahci_pci_driver
= {
371 .id_table
= ahci_pci_tbl
,
372 .probe
= ahci_init_one
,
373 .suspend
= ahci_pci_device_suspend
,
374 .resume
= ahci_pci_device_resume
,
375 .remove
= ahci_remove_one
,
379 static inline unsigned long ahci_port_base_ul (unsigned long base
, unsigned int port
)
381 return base
+ 0x100 + (port
* 0x80);
384 static inline void __iomem
*ahci_port_base (void __iomem
*base
, unsigned int port
)
386 return (void __iomem
*) ahci_port_base_ul((unsigned long)base
, port
);
389 static u32
ahci_scr_read (struct ata_port
*ap
, unsigned int sc_reg_in
)
394 case SCR_STATUS
: sc_reg
= 0; break;
395 case SCR_CONTROL
: sc_reg
= 1; break;
396 case SCR_ERROR
: sc_reg
= 2; break;
397 case SCR_ACTIVE
: sc_reg
= 3; break;
402 return readl((void __iomem
*) ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
406 static void ahci_scr_write (struct ata_port
*ap
, unsigned int sc_reg_in
,
412 case SCR_STATUS
: sc_reg
= 0; break;
413 case SCR_CONTROL
: sc_reg
= 1; break;
414 case SCR_ERROR
: sc_reg
= 2; break;
415 case SCR_ACTIVE
: sc_reg
= 3; break;
420 writel(val
, (void __iomem
*) ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
423 static void ahci_start_engine(void __iomem
*port_mmio
)
428 tmp
= readl(port_mmio
+ PORT_CMD
);
429 tmp
|= PORT_CMD_START
;
430 writel(tmp
, port_mmio
+ PORT_CMD
);
431 readl(port_mmio
+ PORT_CMD
); /* flush */
434 static int ahci_stop_engine(void __iomem
*port_mmio
)
438 tmp
= readl(port_mmio
+ PORT_CMD
);
440 /* check if the HBA is idle */
441 if ((tmp
& (PORT_CMD_START
| PORT_CMD_LIST_ON
)) == 0)
444 /* setting HBA to idle */
445 tmp
&= ~PORT_CMD_START
;
446 writel(tmp
, port_mmio
+ PORT_CMD
);
448 /* wait for engine to stop. This could be as long as 500 msec */
449 tmp
= ata_wait_register(port_mmio
+ PORT_CMD
,
450 PORT_CMD_LIST_ON
, PORT_CMD_LIST_ON
, 1, 500);
451 if (tmp
& PORT_CMD_LIST_ON
)
457 static void ahci_start_fis_rx(void __iomem
*port_mmio
, u32 cap
,
458 dma_addr_t cmd_slot_dma
, dma_addr_t rx_fis_dma
)
462 /* set FIS registers */
463 if (cap
& HOST_CAP_64
)
464 writel((cmd_slot_dma
>> 16) >> 16, port_mmio
+ PORT_LST_ADDR_HI
);
465 writel(cmd_slot_dma
& 0xffffffff, port_mmio
+ PORT_LST_ADDR
);
467 if (cap
& HOST_CAP_64
)
468 writel((rx_fis_dma
>> 16) >> 16, port_mmio
+ PORT_FIS_ADDR_HI
);
469 writel(rx_fis_dma
& 0xffffffff, port_mmio
+ PORT_FIS_ADDR
);
471 /* enable FIS reception */
472 tmp
= readl(port_mmio
+ PORT_CMD
);
473 tmp
|= PORT_CMD_FIS_RX
;
474 writel(tmp
, port_mmio
+ PORT_CMD
);
477 readl(port_mmio
+ PORT_CMD
);
480 static int ahci_stop_fis_rx(void __iomem
*port_mmio
)
484 /* disable FIS reception */
485 tmp
= readl(port_mmio
+ PORT_CMD
);
486 tmp
&= ~PORT_CMD_FIS_RX
;
487 writel(tmp
, port_mmio
+ PORT_CMD
);
489 /* wait for completion, spec says 500ms, give it 1000 */
490 tmp
= ata_wait_register(port_mmio
+ PORT_CMD
, PORT_CMD_FIS_ON
,
491 PORT_CMD_FIS_ON
, 10, 1000);
492 if (tmp
& PORT_CMD_FIS_ON
)
498 static void ahci_power_up(void __iomem
*port_mmio
, u32 cap
)
502 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
505 if (cap
& HOST_CAP_SSS
) {
506 cmd
|= PORT_CMD_SPIN_UP
;
507 writel(cmd
, port_mmio
+ PORT_CMD
);
511 writel(cmd
| PORT_CMD_ICC_ACTIVE
, port_mmio
+ PORT_CMD
);
514 static void ahci_power_down(void __iomem
*port_mmio
, u32 cap
)
518 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
520 if (cap
& HOST_CAP_SSC
) {
521 /* enable transitions to slumber mode */
522 scontrol
= readl(port_mmio
+ PORT_SCR_CTL
);
523 if ((scontrol
& 0x0f00) > 0x100) {
525 writel(scontrol
, port_mmio
+ PORT_SCR_CTL
);
528 /* put device into slumber mode */
529 writel(cmd
| PORT_CMD_ICC_SLUMBER
, port_mmio
+ PORT_CMD
);
531 /* wait for the transition to complete */
532 ata_wait_register(port_mmio
+ PORT_CMD
, PORT_CMD_ICC_SLUMBER
,
533 PORT_CMD_ICC_SLUMBER
, 1, 50);
536 /* put device into listen mode */
537 if (cap
& HOST_CAP_SSS
) {
538 /* first set PxSCTL.DET to 0 */
539 scontrol
= readl(port_mmio
+ PORT_SCR_CTL
);
541 writel(scontrol
, port_mmio
+ PORT_SCR_CTL
);
543 /* then set PxCMD.SUD to 0 */
544 cmd
&= ~PORT_CMD_SPIN_UP
;
545 writel(cmd
, port_mmio
+ PORT_CMD
);
549 static void ahci_init_port(void __iomem
*port_mmio
, u32 cap
,
550 dma_addr_t cmd_slot_dma
, dma_addr_t rx_fis_dma
)
553 ahci_power_up(port_mmio
, cap
);
555 /* enable FIS reception */
556 ahci_start_fis_rx(port_mmio
, cap
, cmd_slot_dma
, rx_fis_dma
);
559 ahci_start_engine(port_mmio
);
562 static int ahci_deinit_port(void __iomem
*port_mmio
, u32 cap
, const char **emsg
)
567 rc
= ahci_stop_engine(port_mmio
);
569 *emsg
= "failed to stop engine";
573 /* disable FIS reception */
574 rc
= ahci_stop_fis_rx(port_mmio
);
576 *emsg
= "failed stop FIS RX";
580 /* put device into slumber mode */
581 ahci_power_down(port_mmio
, cap
);
586 static int ahci_reset_controller(void __iomem
*mmio
, struct pci_dev
*pdev
)
590 cap_save
= readl(mmio
+ HOST_CAP
);
591 cap_save
&= ( (1<<28) | (1<<17) );
592 cap_save
|= (1 << 27);
594 /* global controller reset */
595 tmp
= readl(mmio
+ HOST_CTL
);
596 if ((tmp
& HOST_RESET
) == 0) {
597 writel(tmp
| HOST_RESET
, mmio
+ HOST_CTL
);
598 readl(mmio
+ HOST_CTL
); /* flush */
601 /* reset must complete within 1 second, or
602 * the hardware should be considered fried.
606 tmp
= readl(mmio
+ HOST_CTL
);
607 if (tmp
& HOST_RESET
) {
608 dev_printk(KERN_ERR
, &pdev
->dev
,
609 "controller reset failed (0x%x)\n", tmp
);
613 writel(HOST_AHCI_EN
, mmio
+ HOST_CTL
);
614 (void) readl(mmio
+ HOST_CTL
); /* flush */
615 writel(cap_save
, mmio
+ HOST_CAP
);
616 writel(0xf, mmio
+ HOST_PORTS_IMPL
);
617 (void) readl(mmio
+ HOST_PORTS_IMPL
); /* flush */
619 if (pdev
->vendor
== PCI_VENDOR_ID_INTEL
) {
623 pci_read_config_word(pdev
, 0x92, &tmp16
);
625 pci_write_config_word(pdev
, 0x92, tmp16
);
631 static void ahci_init_controller(void __iomem
*mmio
, struct pci_dev
*pdev
,
632 int n_ports
, u32 cap
)
637 for (i
= 0; i
< n_ports
; i
++) {
638 void __iomem
*port_mmio
= ahci_port_base(mmio
, i
);
639 const char *emsg
= NULL
;
641 #if 0 /* BIOSen initialize this incorrectly */
642 if (!(hpriv
->port_map
& (1 << i
)))
646 /* make sure port is not active */
647 rc
= ahci_deinit_port(port_mmio
, cap
, &emsg
);
649 dev_printk(KERN_WARNING
, &pdev
->dev
,
650 "%s (%d)\n", emsg
, rc
);
653 tmp
= readl(port_mmio
+ PORT_SCR_ERR
);
654 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp
);
655 writel(tmp
, port_mmio
+ PORT_SCR_ERR
);
657 /* clear & turn off port IRQ */
658 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
659 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp
);
661 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
663 writel(1 << i
, mmio
+ HOST_IRQ_STAT
);
664 writel(0, port_mmio
+ PORT_IRQ_MASK
);
667 tmp
= readl(mmio
+ HOST_CTL
);
668 VPRINTK("HOST_CTL 0x%x\n", tmp
);
669 writel(tmp
| HOST_IRQ_EN
, mmio
+ HOST_CTL
);
670 tmp
= readl(mmio
+ HOST_CTL
);
671 VPRINTK("HOST_CTL 0x%x\n", tmp
);
674 static unsigned int ahci_dev_classify(struct ata_port
*ap
)
676 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
677 struct ata_taskfile tf
;
680 tmp
= readl(port_mmio
+ PORT_SIG
);
681 tf
.lbah
= (tmp
>> 24) & 0xff;
682 tf
.lbam
= (tmp
>> 16) & 0xff;
683 tf
.lbal
= (tmp
>> 8) & 0xff;
684 tf
.nsect
= (tmp
) & 0xff;
686 return ata_dev_classify(&tf
);
689 static void ahci_fill_cmd_slot(struct ahci_port_priv
*pp
, unsigned int tag
,
692 dma_addr_t cmd_tbl_dma
;
694 cmd_tbl_dma
= pp
->cmd_tbl_dma
+ tag
* AHCI_CMD_TBL_SZ
;
696 pp
->cmd_slot
[tag
].opts
= cpu_to_le32(opts
);
697 pp
->cmd_slot
[tag
].status
= 0;
698 pp
->cmd_slot
[tag
].tbl_addr
= cpu_to_le32(cmd_tbl_dma
& 0xffffffff);
699 pp
->cmd_slot
[tag
].tbl_addr_hi
= cpu_to_le32((cmd_tbl_dma
>> 16) >> 16);
702 static int ahci_clo(struct ata_port
*ap
)
704 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
705 struct ahci_host_priv
*hpriv
= ap
->host_set
->private_data
;
708 if (!(hpriv
->cap
& HOST_CAP_CLO
))
711 tmp
= readl(port_mmio
+ PORT_CMD
);
713 writel(tmp
, port_mmio
+ PORT_CMD
);
715 tmp
= ata_wait_register(port_mmio
+ PORT_CMD
,
716 PORT_CMD_CLO
, PORT_CMD_CLO
, 1, 500);
717 if (tmp
& PORT_CMD_CLO
)
723 static int ahci_prereset(struct ata_port
*ap
)
725 if ((ap
->flags
& AHCI_FLAG_RESET_NEEDS_CLO
) &&
726 (ata_busy_wait(ap
, ATA_BUSY
, 1000) & ATA_BUSY
)) {
727 /* ATA_BUSY hasn't cleared, so send a CLO */
731 return ata_std_prereset(ap
);
734 static int ahci_softreset(struct ata_port
*ap
, unsigned int *class)
736 struct ahci_port_priv
*pp
= ap
->private_data
;
737 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
738 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
739 const u32 cmd_fis_len
= 5; /* five dwords */
740 const char *reason
= NULL
;
741 struct ata_taskfile tf
;
748 if (ata_port_offline(ap
)) {
749 DPRINTK("PHY reports no device\n");
750 *class = ATA_DEV_NONE
;
754 /* prepare for SRST (AHCI-1.1 10.4.1) */
755 rc
= ahci_stop_engine(port_mmio
);
757 reason
= "failed to stop engine";
761 /* check BUSY/DRQ, perform Command List Override if necessary */
762 ahci_tf_read(ap
, &tf
);
763 if (tf
.command
& (ATA_BUSY
| ATA_DRQ
)) {
766 if (rc
== -EOPNOTSUPP
) {
767 reason
= "port busy but CLO unavailable";
770 reason
= "port busy but CLO failed";
776 ahci_start_engine(port_mmio
);
778 ata_tf_init(ap
->device
, &tf
);
781 /* issue the first D2H Register FIS */
782 ahci_fill_cmd_slot(pp
, 0,
783 cmd_fis_len
| AHCI_CMD_RESET
| AHCI_CMD_CLR_BUSY
);
786 ata_tf_to_fis(&tf
, fis
, 0);
787 fis
[1] &= ~(1 << 7); /* turn off Command FIS bit */
789 writel(1, port_mmio
+ PORT_CMD_ISSUE
);
791 tmp
= ata_wait_register(port_mmio
+ PORT_CMD_ISSUE
, 0x1, 0x1, 1, 500);
794 reason
= "1st FIS failed";
798 /* spec says at least 5us, but be generous and sleep for 1ms */
801 /* issue the second D2H Register FIS */
802 ahci_fill_cmd_slot(pp
, 0, cmd_fis_len
);
805 ata_tf_to_fis(&tf
, fis
, 0);
806 fis
[1] &= ~(1 << 7); /* turn off Command FIS bit */
808 writel(1, port_mmio
+ PORT_CMD_ISSUE
);
809 readl(port_mmio
+ PORT_CMD_ISSUE
); /* flush */
811 /* spec mandates ">= 2ms" before checking status.
812 * We wait 150ms, because that was the magic delay used for
813 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
814 * between when the ATA command register is written, and then
815 * status is checked. Because waiting for "a while" before
816 * checking status is fine, post SRST, we perform this magic
817 * delay here as well.
821 *class = ATA_DEV_NONE
;
822 if (ata_port_online(ap
)) {
823 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
825 reason
= "device not ready";
828 *class = ahci_dev_classify(ap
);
831 DPRINTK("EXIT, class=%u\n", *class);
835 ahci_start_engine(port_mmio
);
837 ata_port_printk(ap
, KERN_ERR
, "softreset failed (%s)\n", reason
);
841 static int ahci_hardreset(struct ata_port
*ap
, unsigned int *class)
843 struct ahci_port_priv
*pp
= ap
->private_data
;
844 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
845 struct ata_taskfile tf
;
846 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
847 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
852 ahci_stop_engine(port_mmio
);
854 /* clear D2H reception area to properly wait for D2H FIS */
855 ata_tf_init(ap
->device
, &tf
);
857 ata_tf_to_fis(&tf
, d2h_fis
, 0);
859 rc
= sata_std_hardreset(ap
, class);
861 ahci_start_engine(port_mmio
);
863 if (rc
== 0 && ata_port_online(ap
))
864 *class = ahci_dev_classify(ap
);
865 if (*class == ATA_DEV_UNKNOWN
)
866 *class = ATA_DEV_NONE
;
868 DPRINTK("EXIT, rc=%d, class=%u\n", rc
, *class);
872 static void ahci_postreset(struct ata_port
*ap
, unsigned int *class)
874 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
877 ata_std_postreset(ap
, class);
879 /* Make sure port's ATAPI bit is set appropriately */
880 new_tmp
= tmp
= readl(port_mmio
+ PORT_CMD
);
881 if (*class == ATA_DEV_ATAPI
)
882 new_tmp
|= PORT_CMD_ATAPI
;
884 new_tmp
&= ~PORT_CMD_ATAPI
;
885 if (new_tmp
!= tmp
) {
886 writel(new_tmp
, port_mmio
+ PORT_CMD
);
887 readl(port_mmio
+ PORT_CMD
); /* flush */
891 static u8
ahci_check_status(struct ata_port
*ap
)
893 void __iomem
*mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
895 return readl(mmio
+ PORT_TFDATA
) & 0xFF;
898 static void ahci_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
900 struct ahci_port_priv
*pp
= ap
->private_data
;
901 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
903 ata_tf_from_fis(d2h_fis
, tf
);
906 static unsigned int ahci_fill_sg(struct ata_queued_cmd
*qc
, void *cmd_tbl
)
908 struct scatterlist
*sg
;
909 struct ahci_sg
*ahci_sg
;
910 unsigned int n_sg
= 0;
915 * Next, the S/G list.
917 ahci_sg
= cmd_tbl
+ AHCI_CMD_TBL_HDR_SZ
;
918 ata_for_each_sg(sg
, qc
) {
919 dma_addr_t addr
= sg_dma_address(sg
);
920 u32 sg_len
= sg_dma_len(sg
);
922 ahci_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
923 ahci_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
924 ahci_sg
->flags_size
= cpu_to_le32(sg_len
- 1);
933 static void ahci_qc_prep(struct ata_queued_cmd
*qc
)
935 struct ata_port
*ap
= qc
->ap
;
936 struct ahci_port_priv
*pp
= ap
->private_data
;
937 int is_atapi
= is_atapi_taskfile(&qc
->tf
);
940 const u32 cmd_fis_len
= 5; /* five dwords */
944 * Fill in command table information. First, the header,
945 * a SATA Register - Host to Device command FIS.
947 cmd_tbl
= pp
->cmd_tbl
+ qc
->tag
* AHCI_CMD_TBL_SZ
;
949 ata_tf_to_fis(&qc
->tf
, cmd_tbl
, 0);
951 memset(cmd_tbl
+ AHCI_CMD_TBL_CDB
, 0, 32);
952 memcpy(cmd_tbl
+ AHCI_CMD_TBL_CDB
, qc
->cdb
, qc
->dev
->cdb_len
);
956 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
957 n_elem
= ahci_fill_sg(qc
, cmd_tbl
);
960 * Fill in command slot information.
962 opts
= cmd_fis_len
| n_elem
<< 16;
963 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
964 opts
|= AHCI_CMD_WRITE
;
966 opts
|= AHCI_CMD_ATAPI
| AHCI_CMD_PREFETCH
;
968 ahci_fill_cmd_slot(pp
, qc
->tag
, opts
);
971 static void ahci_error_intr(struct ata_port
*ap
, u32 irq_stat
)
973 struct ahci_port_priv
*pp
= ap
->private_data
;
974 struct ata_eh_info
*ehi
= &ap
->eh_info
;
975 unsigned int err_mask
= 0, action
= 0;
976 struct ata_queued_cmd
*qc
;
979 ata_ehi_clear_desc(ehi
);
981 /* AHCI needs SError cleared; otherwise, it might lock up */
982 serror
= ahci_scr_read(ap
, SCR_ERROR
);
983 ahci_scr_write(ap
, SCR_ERROR
, serror
);
985 /* analyze @irq_stat */
986 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x", irq_stat
);
988 if (irq_stat
& PORT_IRQ_TF_ERR
)
989 err_mask
|= AC_ERR_DEV
;
991 if (irq_stat
& (PORT_IRQ_HBUS_ERR
| PORT_IRQ_HBUS_DATA_ERR
)) {
992 err_mask
|= AC_ERR_HOST_BUS
;
993 action
|= ATA_EH_SOFTRESET
;
996 if (irq_stat
& PORT_IRQ_IF_ERR
) {
997 err_mask
|= AC_ERR_ATA_BUS
;
998 action
|= ATA_EH_SOFTRESET
;
999 ata_ehi_push_desc(ehi
, ", interface fatal error");
1002 if (irq_stat
& (PORT_IRQ_CONNECT
| PORT_IRQ_PHYRDY
)) {
1003 ata_ehi_hotplugged(ehi
);
1004 ata_ehi_push_desc(ehi
, ", %s", irq_stat
& PORT_IRQ_CONNECT
?
1005 "connection status changed" : "PHY RDY changed");
1008 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1009 u32
*unk
= (u32
*)(pp
->rx_fis
+ RX_FIS_UNK
);
1011 err_mask
|= AC_ERR_HSM
;
1012 action
|= ATA_EH_SOFTRESET
;
1013 ata_ehi_push_desc(ehi
, ", unknown FIS %08x %08x %08x %08x",
1014 unk
[0], unk
[1], unk
[2], unk
[3]);
1017 /* okay, let's hand over to EH */
1018 ehi
->serror
|= serror
;
1019 ehi
->action
|= action
;
1021 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1023 qc
->err_mask
|= err_mask
;
1025 ehi
->err_mask
|= err_mask
;
1027 if (irq_stat
& PORT_IRQ_FREEZE
)
1028 ata_port_freeze(ap
);
1033 static void ahci_host_intr(struct ata_port
*ap
)
1035 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1036 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1037 struct ata_eh_info
*ehi
= &ap
->eh_info
;
1038 u32 status
, qc_active
;
1041 status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1042 writel(status
, port_mmio
+ PORT_IRQ_STAT
);
1044 if (unlikely(status
& PORT_IRQ_ERROR
)) {
1045 ahci_error_intr(ap
, status
);
1050 qc_active
= readl(port_mmio
+ PORT_SCR_ACT
);
1052 qc_active
= readl(port_mmio
+ PORT_CMD_ISSUE
);
1054 rc
= ata_qc_complete_multiple(ap
, qc_active
, NULL
);
1058 ehi
->err_mask
|= AC_ERR_HSM
;
1059 ehi
->action
|= ATA_EH_SOFTRESET
;
1060 ata_port_freeze(ap
);
1064 /* hmmm... a spurious interupt */
1066 /* some devices send D2H reg with I bit set during NCQ command phase */
1067 if (ap
->sactive
&& status
& PORT_IRQ_D2H_REG_FIS
)
1070 /* ignore interim PIO setup fis interrupts */
1071 if (ata_tag_valid(ap
->active_tag
)) {
1072 struct ata_queued_cmd
*qc
=
1073 ata_qc_from_tag(ap
, ap
->active_tag
);
1075 if (qc
&& qc
->tf
.protocol
== ATA_PROT_PIO
&&
1076 (status
& PORT_IRQ_PIOS_FIS
))
1080 if (ata_ratelimit())
1081 ata_port_printk(ap
, KERN_INFO
, "spurious interrupt "
1082 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1083 status
, ap
->active_tag
, ap
->sactive
);
1086 static void ahci_irq_clear(struct ata_port
*ap
)
1091 static irqreturn_t
ahci_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
1093 struct ata_host_set
*host_set
= dev_instance
;
1094 struct ahci_host_priv
*hpriv
;
1095 unsigned int i
, handled
= 0;
1097 u32 irq_stat
, irq_ack
= 0;
1101 hpriv
= host_set
->private_data
;
1102 mmio
= host_set
->mmio_base
;
1104 /* sigh. 0xffffffff is a valid return from h/w */
1105 irq_stat
= readl(mmio
+ HOST_IRQ_STAT
);
1106 irq_stat
&= hpriv
->port_map
;
1110 spin_lock(&host_set
->lock
);
1112 for (i
= 0; i
< host_set
->n_ports
; i
++) {
1113 struct ata_port
*ap
;
1115 if (!(irq_stat
& (1 << i
)))
1118 ap
= host_set
->ports
[i
];
1121 VPRINTK("port %u\n", i
);
1123 VPRINTK("port %u (no irq)\n", i
);
1124 if (ata_ratelimit())
1125 dev_printk(KERN_WARNING
, host_set
->dev
,
1126 "interrupt on disabled port %u\n", i
);
1129 irq_ack
|= (1 << i
);
1133 writel(irq_ack
, mmio
+ HOST_IRQ_STAT
);
1137 spin_unlock(&host_set
->lock
);
1141 return IRQ_RETVAL(handled
);
1144 static unsigned int ahci_qc_issue(struct ata_queued_cmd
*qc
)
1146 struct ata_port
*ap
= qc
->ap
;
1147 void __iomem
*port_mmio
= (void __iomem
*) ap
->ioaddr
.cmd_addr
;
1149 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1150 writel(1 << qc
->tag
, port_mmio
+ PORT_SCR_ACT
);
1151 writel(1 << qc
->tag
, port_mmio
+ PORT_CMD_ISSUE
);
1152 readl(port_mmio
+ PORT_CMD_ISSUE
); /* flush */
1157 static void ahci_freeze(struct ata_port
*ap
)
1159 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1160 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1163 writel(0, port_mmio
+ PORT_IRQ_MASK
);
1166 static void ahci_thaw(struct ata_port
*ap
)
1168 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1169 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1173 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
1174 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
1175 writel(1 << ap
->id
, mmio
+ HOST_IRQ_STAT
);
1177 /* turn IRQ back on */
1178 writel(DEF_PORT_IRQ
, port_mmio
+ PORT_IRQ_MASK
);
1181 static void ahci_error_handler(struct ata_port
*ap
)
1183 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1184 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1186 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1187 /* restart engine */
1188 ahci_stop_engine(port_mmio
);
1189 ahci_start_engine(port_mmio
);
1192 /* perform recovery */
1193 ata_do_eh(ap
, ahci_prereset
, ahci_softreset
, ahci_hardreset
,
1197 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
)
1199 struct ata_port
*ap
= qc
->ap
;
1200 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1201 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1203 if (qc
->flags
& ATA_QCFLAG_FAILED
)
1204 qc
->err_mask
|= AC_ERR_OTHER
;
1207 /* make DMA engine forget about the failed command */
1208 ahci_stop_engine(port_mmio
);
1209 ahci_start_engine(port_mmio
);
1213 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1215 struct ahci_host_priv
*hpriv
= ap
->host_set
->private_data
;
1216 struct ahci_port_priv
*pp
= ap
->private_data
;
1217 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1218 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1219 const char *emsg
= NULL
;
1222 rc
= ahci_deinit_port(port_mmio
, hpriv
->cap
, &emsg
);
1224 ata_port_printk(ap
, KERN_ERR
, "%s (%d)\n", emsg
, rc
);
1225 ahci_init_port(port_mmio
, hpriv
->cap
,
1226 pp
->cmd_slot_dma
, pp
->rx_fis_dma
);
1232 static int ahci_port_resume(struct ata_port
*ap
)
1234 struct ahci_port_priv
*pp
= ap
->private_data
;
1235 struct ahci_host_priv
*hpriv
= ap
->host_set
->private_data
;
1236 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1237 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1239 ahci_init_port(port_mmio
, hpriv
->cap
, pp
->cmd_slot_dma
, pp
->rx_fis_dma
);
1244 static int ahci_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
1246 struct ata_host_set
*host_set
= dev_get_drvdata(&pdev
->dev
);
1247 void __iomem
*mmio
= host_set
->mmio_base
;
1250 if (mesg
.event
== PM_EVENT_SUSPEND
) {
1251 /* AHCI spec rev1.1 section 8.3.3:
1252 * Software must disable interrupts prior to requesting a
1253 * transition of the HBA to D3 state.
1255 ctl
= readl(mmio
+ HOST_CTL
);
1256 ctl
&= ~HOST_IRQ_EN
;
1257 writel(ctl
, mmio
+ HOST_CTL
);
1258 readl(mmio
+ HOST_CTL
); /* flush */
1261 return ata_pci_device_suspend(pdev
, mesg
);
1264 static int ahci_pci_device_resume(struct pci_dev
*pdev
)
1266 struct ata_host_set
*host_set
= dev_get_drvdata(&pdev
->dev
);
1267 struct ahci_host_priv
*hpriv
= host_set
->private_data
;
1268 void __iomem
*mmio
= host_set
->mmio_base
;
1271 ata_pci_device_do_resume(pdev
);
1273 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
1274 rc
= ahci_reset_controller(mmio
, pdev
);
1278 ahci_init_controller(mmio
, pdev
, host_set
->n_ports
, hpriv
->cap
);
1281 ata_host_set_resume(host_set
);
1286 static int ahci_port_start(struct ata_port
*ap
)
1288 struct device
*dev
= ap
->host_set
->dev
;
1289 struct ahci_host_priv
*hpriv
= ap
->host_set
->private_data
;
1290 struct ahci_port_priv
*pp
;
1291 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1292 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1297 pp
= kmalloc(sizeof(*pp
), GFP_KERNEL
);
1300 memset(pp
, 0, sizeof(*pp
));
1302 rc
= ata_pad_alloc(ap
, dev
);
1308 mem
= dma_alloc_coherent(dev
, AHCI_PORT_PRIV_DMA_SZ
, &mem_dma
, GFP_KERNEL
);
1310 ata_pad_free(ap
, dev
);
1314 memset(mem
, 0, AHCI_PORT_PRIV_DMA_SZ
);
1317 * First item in chunk of DMA memory: 32-slot command table,
1318 * 32 bytes each in size
1321 pp
->cmd_slot_dma
= mem_dma
;
1323 mem
+= AHCI_CMD_SLOT_SZ
;
1324 mem_dma
+= AHCI_CMD_SLOT_SZ
;
1327 * Second item: Received-FIS area
1330 pp
->rx_fis_dma
= mem_dma
;
1332 mem
+= AHCI_RX_FIS_SZ
;
1333 mem_dma
+= AHCI_RX_FIS_SZ
;
1336 * Third item: data area for storing a single command
1337 * and its scatter-gather table
1340 pp
->cmd_tbl_dma
= mem_dma
;
1342 ap
->private_data
= pp
;
1344 /* initialize port */
1345 ahci_init_port(port_mmio
, hpriv
->cap
, pp
->cmd_slot_dma
, pp
->rx_fis_dma
);
1350 static void ahci_port_stop(struct ata_port
*ap
)
1352 struct device
*dev
= ap
->host_set
->dev
;
1353 struct ahci_host_priv
*hpriv
= ap
->host_set
->private_data
;
1354 struct ahci_port_priv
*pp
= ap
->private_data
;
1355 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1356 void __iomem
*port_mmio
= ahci_port_base(mmio
, ap
->port_no
);
1357 const char *emsg
= NULL
;
1360 /* de-initialize port */
1361 rc
= ahci_deinit_port(port_mmio
, hpriv
->cap
, &emsg
);
1363 ata_port_printk(ap
, KERN_WARNING
, "%s (%d)\n", emsg
, rc
);
1365 ap
->private_data
= NULL
;
1366 dma_free_coherent(dev
, AHCI_PORT_PRIV_DMA_SZ
,
1367 pp
->cmd_slot
, pp
->cmd_slot_dma
);
1368 ata_pad_free(ap
, dev
);
1372 static void ahci_setup_port(struct ata_ioports
*port
, unsigned long base
,
1373 unsigned int port_idx
)
1375 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base
, port_idx
);
1376 base
= ahci_port_base_ul(base
, port_idx
);
1377 VPRINTK("base now==0x%lx\n", base
);
1379 port
->cmd_addr
= base
;
1380 port
->scr_addr
= base
+ PORT_SCR
;
1385 static int ahci_host_init(struct ata_probe_ent
*probe_ent
)
1387 struct ahci_host_priv
*hpriv
= probe_ent
->private_data
;
1388 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
1389 void __iomem
*mmio
= probe_ent
->mmio_base
;
1390 unsigned int i
, using_dac
;
1393 rc
= ahci_reset_controller(mmio
, pdev
);
1397 hpriv
->cap
= readl(mmio
+ HOST_CAP
);
1398 hpriv
->port_map
= readl(mmio
+ HOST_PORTS_IMPL
);
1399 probe_ent
->n_ports
= (hpriv
->cap
& 0x1f) + 1;
1401 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1402 hpriv
->cap
, hpriv
->port_map
, probe_ent
->n_ports
);
1404 using_dac
= hpriv
->cap
& HOST_CAP_64
;
1406 !pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
1407 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
1409 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
1411 dev_printk(KERN_ERR
, &pdev
->dev
,
1412 "64-bit DMA enable failed\n");
1417 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
1419 dev_printk(KERN_ERR
, &pdev
->dev
,
1420 "32-bit DMA enable failed\n");
1423 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
1425 dev_printk(KERN_ERR
, &pdev
->dev
,
1426 "32-bit consistent DMA enable failed\n");
1431 for (i
= 0; i
< probe_ent
->n_ports
; i
++)
1432 ahci_setup_port(&probe_ent
->port
[i
], (unsigned long) mmio
, i
);
1434 ahci_init_controller(mmio
, pdev
, probe_ent
->n_ports
, hpriv
->cap
);
1436 pci_set_master(pdev
);
1441 static void ahci_print_info(struct ata_probe_ent
*probe_ent
)
1443 struct ahci_host_priv
*hpriv
= probe_ent
->private_data
;
1444 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
1445 void __iomem
*mmio
= probe_ent
->mmio_base
;
1446 u32 vers
, cap
, impl
, speed
;
1447 const char *speed_s
;
1451 vers
= readl(mmio
+ HOST_VERSION
);
1453 impl
= hpriv
->port_map
;
1455 speed
= (cap
>> 20) & 0xf;
1458 else if (speed
== 2)
1463 pci_read_config_word(pdev
, 0x0a, &cc
);
1466 else if (cc
== 0x0106)
1468 else if (cc
== 0x0104)
1473 dev_printk(KERN_INFO
, &pdev
->dev
,
1474 "AHCI %02x%02x.%02x%02x "
1475 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1478 (vers
>> 24) & 0xff,
1479 (vers
>> 16) & 0xff,
1483 ((cap
>> 8) & 0x1f) + 1,
1489 dev_printk(KERN_INFO
, &pdev
->dev
,
1495 cap
& (1 << 31) ? "64bit " : "",
1496 cap
& (1 << 30) ? "ncq " : "",
1497 cap
& (1 << 28) ? "ilck " : "",
1498 cap
& (1 << 27) ? "stag " : "",
1499 cap
& (1 << 26) ? "pm " : "",
1500 cap
& (1 << 25) ? "led " : "",
1502 cap
& (1 << 24) ? "clo " : "",
1503 cap
& (1 << 19) ? "nz " : "",
1504 cap
& (1 << 18) ? "only " : "",
1505 cap
& (1 << 17) ? "pmp " : "",
1506 cap
& (1 << 15) ? "pio " : "",
1507 cap
& (1 << 14) ? "slum " : "",
1508 cap
& (1 << 13) ? "part " : ""
1512 static int ahci_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1514 static int printed_version
;
1515 struct ata_probe_ent
*probe_ent
= NULL
;
1516 struct ahci_host_priv
*hpriv
;
1518 void __iomem
*mmio_base
;
1519 unsigned int board_idx
= (unsigned int) ent
->driver_data
;
1520 int have_msi
, pci_dev_busy
= 0;
1525 WARN_ON(ATA_MAX_QUEUE
> AHCI_MAX_CMDS
);
1527 if (!printed_version
++)
1528 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
1530 /* JMicron-specific fixup: make sure we're in AHCI mode */
1531 /* This is protected from races with ata_jmicron by the pci probe
1533 if (pdev
->vendor
== PCI_VENDOR_ID_JMICRON
) {
1534 /* AHCI enable, AHCI on function 0 */
1535 pci_write_config_byte(pdev
, 0x41, 0xa1);
1536 /* Function 1 is the PATA controller */
1537 if (PCI_FUNC(pdev
->devfn
))
1541 rc
= pci_enable_device(pdev
);
1545 rc
= pci_request_regions(pdev
, DRV_NAME
);
1551 if (pci_enable_msi(pdev
) == 0)
1558 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
1559 if (probe_ent
== NULL
) {
1564 memset(probe_ent
, 0, sizeof(*probe_ent
));
1565 probe_ent
->dev
= pci_dev_to_dev(pdev
);
1566 INIT_LIST_HEAD(&probe_ent
->node
);
1568 mmio_base
= pci_iomap(pdev
, AHCI_PCI_BAR
, 0);
1569 if (mmio_base
== NULL
) {
1571 goto err_out_free_ent
;
1573 base
= (unsigned long) mmio_base
;
1575 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
1578 goto err_out_iounmap
;
1580 memset(hpriv
, 0, sizeof(*hpriv
));
1582 probe_ent
->sht
= ahci_port_info
[board_idx
].sht
;
1583 probe_ent
->host_flags
= ahci_port_info
[board_idx
].host_flags
;
1584 probe_ent
->pio_mask
= ahci_port_info
[board_idx
].pio_mask
;
1585 probe_ent
->udma_mask
= ahci_port_info
[board_idx
].udma_mask
;
1586 probe_ent
->port_ops
= ahci_port_info
[board_idx
].port_ops
;
1588 probe_ent
->irq
= pdev
->irq
;
1589 probe_ent
->irq_flags
= IRQF_SHARED
;
1590 probe_ent
->mmio_base
= mmio_base
;
1591 probe_ent
->private_data
= hpriv
;
1594 hpriv
->flags
|= AHCI_FLAG_MSI
;
1596 /* initialize adapter */
1597 rc
= ahci_host_init(probe_ent
);
1601 if (!(probe_ent
->host_flags
& AHCI_FLAG_NO_NCQ
) &&
1602 (hpriv
->cap
& HOST_CAP_NCQ
))
1603 probe_ent
->host_flags
|= ATA_FLAG_NCQ
;
1605 ahci_print_info(probe_ent
);
1607 /* FIXME: check ata_device_add return value */
1608 ata_device_add(probe_ent
);
1616 pci_iounmap(pdev
, mmio_base
);
1621 pci_disable_msi(pdev
);
1624 pci_release_regions(pdev
);
1627 pci_disable_device(pdev
);
1631 static void ahci_remove_one (struct pci_dev
*pdev
)
1633 struct device
*dev
= pci_dev_to_dev(pdev
);
1634 struct ata_host_set
*host_set
= dev_get_drvdata(dev
);
1635 struct ahci_host_priv
*hpriv
= host_set
->private_data
;
1639 for (i
= 0; i
< host_set
->n_ports
; i
++)
1640 ata_port_detach(host_set
->ports
[i
]);
1642 have_msi
= hpriv
->flags
& AHCI_FLAG_MSI
;
1643 free_irq(host_set
->irq
, host_set
);
1645 for (i
= 0; i
< host_set
->n_ports
; i
++) {
1646 struct ata_port
*ap
= host_set
->ports
[i
];
1648 ata_scsi_release(ap
->host
);
1649 scsi_host_put(ap
->host
);
1653 pci_iounmap(pdev
, host_set
->mmio_base
);
1657 pci_disable_msi(pdev
);
1660 pci_release_regions(pdev
);
1661 pci_disable_device(pdev
);
1662 dev_set_drvdata(dev
, NULL
);
1665 static int __init
ahci_init(void)
1667 return pci_module_init(&ahci_pci_driver
);
1670 static void __exit
ahci_exit(void)
1672 pci_unregister_driver(&ahci_pci_driver
);
1676 MODULE_AUTHOR("Jeff Garzik");
1677 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1678 MODULE_LICENSE("GPL");
1679 MODULE_DEVICE_TABLE(pci
, ahci_pci_tbl
);
1680 MODULE_VERSION(DRV_VERSION
);
1682 module_init(ahci_init
);
1683 module_exit(ahci_exit
);