ahci: Get rid of pci_dev argument in ahci_save_initial_config()
[linux-2.6.git] / drivers / ata / ahci.c
blobe49c32d51fdade4c49e921bec5e6643162540750
1 /*
2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <linux/gfp.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <linux/libata.h>
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "3.0"
53 /* Enclosure Management Control */
54 #define EM_CTRL_MSG_TYPE 0x000f0000
56 /* Enclosure Management LED Message Type */
57 #define EM_MSG_LED_HBA_PORT 0x0000000f
58 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
59 #define EM_MSG_LED_VALUE 0xffff0000
60 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
61 #define EM_MSG_LED_VALUE_OFF 0xfff80000
62 #define EM_MSG_LED_VALUE_ON 0x00010000
64 static int ahci_skip_host_reset;
65 static int ahci_ignore_sss;
67 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
68 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
70 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
71 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
73 static int ahci_enable_alpm(struct ata_port *ap,
74 enum link_pm policy);
75 static void ahci_disable_alpm(struct ata_port *ap);
76 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
77 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 size_t size);
79 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
80 ssize_t size);
82 enum {
83 AHCI_PCI_BAR = 5,
84 AHCI_MAX_PORTS = 32,
85 AHCI_MAX_SG = 168, /* hardware max is 64K */
86 AHCI_DMA_BOUNDARY = 0xffffffff,
87 AHCI_MAX_CMDS = 32,
88 AHCI_CMD_SZ = 32,
89 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 AHCI_RX_FIS_SZ = 256,
91 AHCI_CMD_TBL_CDB = 0x40,
92 AHCI_CMD_TBL_HDR_SZ = 0x80,
93 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 AHCI_RX_FIS_SZ,
97 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
98 AHCI_CMD_TBL_AR_SZ +
99 (AHCI_RX_FIS_SZ * 16),
100 AHCI_IRQ_ON_SG = (1 << 31),
101 AHCI_CMD_ATAPI = (1 << 5),
102 AHCI_CMD_WRITE = (1 << 6),
103 AHCI_CMD_PREFETCH = (1 << 7),
104 AHCI_CMD_RESET = (1 << 8),
105 AHCI_CMD_CLR_BUSY = (1 << 10),
107 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
108 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
109 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
111 board_ahci = 0,
112 board_ahci_vt8251 = 1,
113 board_ahci_ign_iferr = 2,
114 board_ahci_sb600 = 3,
115 board_ahci_mv = 4,
116 board_ahci_sb700 = 5, /* for SB700 and SB800 */
117 board_ahci_mcp65 = 6,
118 board_ahci_nopmp = 7,
119 board_ahci_yesncq = 8,
120 board_ahci_nosntf = 9,
122 /* global controller registers */
123 HOST_CAP = 0x00, /* host capabilities */
124 HOST_CTL = 0x04, /* global host control */
125 HOST_IRQ_STAT = 0x08, /* interrupt status */
126 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
127 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
128 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
129 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
130 HOST_CAP2 = 0x24, /* host capabilities, extended */
132 /* HOST_CTL bits */
133 HOST_RESET = (1 << 0), /* reset controller; self-clear */
134 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
135 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
137 /* HOST_CAP bits */
138 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
139 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
140 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
141 HOST_CAP_PART = (1 << 13), /* Partial state capable */
142 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
143 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
144 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
145 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
146 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
147 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
148 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
149 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
150 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
151 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
152 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
153 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
154 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
156 /* HOST_CAP2 bits */
157 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
158 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
159 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
161 /* registers for each SATA port */
162 PORT_LST_ADDR = 0x00, /* command list DMA addr */
163 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
164 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
165 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
166 PORT_IRQ_STAT = 0x10, /* interrupt status */
167 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
168 PORT_CMD = 0x18, /* port command */
169 PORT_TFDATA = 0x20, /* taskfile data */
170 PORT_SIG = 0x24, /* device TF signature */
171 PORT_CMD_ISSUE = 0x38, /* command issue */
172 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
173 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
174 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
175 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
176 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
177 PORT_FBS = 0x40, /* FIS-based Switching */
179 /* PORT_IRQ_{STAT,MASK} bits */
180 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
181 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
182 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
183 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
184 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
185 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
186 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
187 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
189 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
190 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
191 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
192 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
193 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
194 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
195 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
196 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
197 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
199 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
200 PORT_IRQ_IF_ERR |
201 PORT_IRQ_CONNECT |
202 PORT_IRQ_PHYRDY |
203 PORT_IRQ_UNK_FIS |
204 PORT_IRQ_BAD_PMP,
205 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
206 PORT_IRQ_TF_ERR |
207 PORT_IRQ_HBUS_DATA_ERR,
208 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
209 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
210 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
212 /* PORT_CMD bits */
213 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
214 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
215 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
216 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
217 PORT_CMD_PMP = (1 << 17), /* PMP attached */
218 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
219 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
220 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
221 PORT_CMD_CLO = (1 << 3), /* Command list override */
222 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
223 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
224 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
226 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
227 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
228 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
229 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
231 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
232 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
233 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
234 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
235 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
236 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
237 PORT_FBS_EN = (1 << 0), /* Enable FBS */
239 /* hpriv->flags bits */
240 AHCI_HFLAG_NO_NCQ = (1 << 0),
241 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
242 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
243 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
244 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
245 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
246 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
247 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
248 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
249 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
250 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
251 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
252 link offline */
253 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
255 /* ap->flags bits */
257 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
258 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
259 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
260 ATA_FLAG_IPM,
262 ICH_MAP = 0x90, /* ICH MAP register */
264 /* em constants */
265 EM_MAX_SLOTS = 8,
266 EM_MAX_RETRY = 5,
268 /* em_ctl bits */
269 EM_CTL_RST = (1 << 9), /* Reset */
270 EM_CTL_TM = (1 << 8), /* Transmit Message */
271 EM_CTL_ALHD = (1 << 26), /* Activity LED */
274 struct ahci_cmd_hdr {
275 __le32 opts;
276 __le32 status;
277 __le32 tbl_addr;
278 __le32 tbl_addr_hi;
279 __le32 reserved[4];
282 struct ahci_sg {
283 __le32 addr;
284 __le32 addr_hi;
285 __le32 reserved;
286 __le32 flags_size;
289 struct ahci_em_priv {
290 enum sw_activity blink_policy;
291 struct timer_list timer;
292 unsigned long saved_activity;
293 unsigned long activity;
294 unsigned long led_state;
297 struct ahci_host_priv {
298 void __iomem * mmio; /* bus-independant mem map */
299 unsigned int flags; /* AHCI_HFLAG_* */
300 u32 cap; /* cap to use */
301 u32 cap2; /* cap2 to use */
302 u32 port_map; /* port map to use */
303 u32 saved_cap; /* saved initial cap */
304 u32 saved_cap2; /* saved initial cap2 */
305 u32 saved_port_map; /* saved initial port_map */
306 u32 em_loc; /* enclosure management location */
309 struct ahci_port_priv {
310 struct ata_link *active_link;
311 struct ahci_cmd_hdr *cmd_slot;
312 dma_addr_t cmd_slot_dma;
313 void *cmd_tbl;
314 dma_addr_t cmd_tbl_dma;
315 void *rx_fis;
316 dma_addr_t rx_fis_dma;
317 /* for NCQ spurious interrupt analysis */
318 unsigned int ncq_saw_d2h:1;
319 unsigned int ncq_saw_dmas:1;
320 unsigned int ncq_saw_sdb:1;
321 u32 intr_mask; /* interrupts to enable */
322 bool fbs_supported; /* set iff FBS is supported */
323 bool fbs_enabled; /* set iff FBS is enabled */
324 int fbs_last_dev; /* save FBS.DEV of last FIS */
325 /* enclosure management info per PM slot */
326 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
329 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
330 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
331 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
332 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
333 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
334 static int ahci_port_start(struct ata_port *ap);
335 static void ahci_port_stop(struct ata_port *ap);
336 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
337 static void ahci_qc_prep(struct ata_queued_cmd *qc);
338 static void ahci_freeze(struct ata_port *ap);
339 static void ahci_thaw(struct ata_port *ap);
340 static void ahci_enable_fbs(struct ata_port *ap);
341 static void ahci_disable_fbs(struct ata_port *ap);
342 static void ahci_pmp_attach(struct ata_port *ap);
343 static void ahci_pmp_detach(struct ata_port *ap);
344 static int ahci_softreset(struct ata_link *link, unsigned int *class,
345 unsigned long deadline);
346 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
347 unsigned long deadline);
348 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
349 unsigned long deadline);
350 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
351 unsigned long deadline);
352 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
353 unsigned long deadline);
354 static void ahci_postreset(struct ata_link *link, unsigned int *class);
355 static void ahci_error_handler(struct ata_port *ap);
356 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
357 static int ahci_port_resume(struct ata_port *ap);
358 static void ahci_dev_config(struct ata_device *dev);
359 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
360 u32 opts);
361 #ifdef CONFIG_PM
362 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
363 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
364 static int ahci_pci_device_resume(struct pci_dev *pdev);
365 #endif
366 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
367 static ssize_t ahci_activity_store(struct ata_device *dev,
368 enum sw_activity val);
369 static void ahci_init_sw_activity(struct ata_link *link);
371 static ssize_t ahci_show_host_caps(struct device *dev,
372 struct device_attribute *attr, char *buf);
373 static ssize_t ahci_show_host_cap2(struct device *dev,
374 struct device_attribute *attr, char *buf);
375 static ssize_t ahci_show_host_version(struct device *dev,
376 struct device_attribute *attr, char *buf);
377 static ssize_t ahci_show_port_cmd(struct device *dev,
378 struct device_attribute *attr, char *buf);
380 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
381 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
382 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
383 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
385 static struct device_attribute *ahci_shost_attrs[] = {
386 &dev_attr_link_power_management_policy,
387 &dev_attr_em_message_type,
388 &dev_attr_em_message,
389 &dev_attr_ahci_host_caps,
390 &dev_attr_ahci_host_cap2,
391 &dev_attr_ahci_host_version,
392 &dev_attr_ahci_port_cmd,
393 NULL
396 static struct device_attribute *ahci_sdev_attrs[] = {
397 &dev_attr_sw_activity,
398 &dev_attr_unload_heads,
399 NULL
402 static struct scsi_host_template ahci_sht = {
403 ATA_NCQ_SHT(DRV_NAME),
404 .can_queue = AHCI_MAX_CMDS - 1,
405 .sg_tablesize = AHCI_MAX_SG,
406 .dma_boundary = AHCI_DMA_BOUNDARY,
407 .shost_attrs = ahci_shost_attrs,
408 .sdev_attrs = ahci_sdev_attrs,
411 static struct ata_port_operations ahci_ops = {
412 .inherits = &sata_pmp_port_ops,
414 .qc_defer = ahci_pmp_qc_defer,
415 .qc_prep = ahci_qc_prep,
416 .qc_issue = ahci_qc_issue,
417 .qc_fill_rtf = ahci_qc_fill_rtf,
419 .freeze = ahci_freeze,
420 .thaw = ahci_thaw,
421 .softreset = ahci_softreset,
422 .hardreset = ahci_hardreset,
423 .postreset = ahci_postreset,
424 .pmp_softreset = ahci_softreset,
425 .error_handler = ahci_error_handler,
426 .post_internal_cmd = ahci_post_internal_cmd,
427 .dev_config = ahci_dev_config,
429 .scr_read = ahci_scr_read,
430 .scr_write = ahci_scr_write,
431 .pmp_attach = ahci_pmp_attach,
432 .pmp_detach = ahci_pmp_detach,
434 .enable_pm = ahci_enable_alpm,
435 .disable_pm = ahci_disable_alpm,
436 .em_show = ahci_led_show,
437 .em_store = ahci_led_store,
438 .sw_activity_show = ahci_activity_show,
439 .sw_activity_store = ahci_activity_store,
440 #ifdef CONFIG_PM
441 .port_suspend = ahci_port_suspend,
442 .port_resume = ahci_port_resume,
443 #endif
444 .port_start = ahci_port_start,
445 .port_stop = ahci_port_stop,
448 static struct ata_port_operations ahci_vt8251_ops = {
449 .inherits = &ahci_ops,
450 .hardreset = ahci_vt8251_hardreset,
453 static struct ata_port_operations ahci_p5wdh_ops = {
454 .inherits = &ahci_ops,
455 .hardreset = ahci_p5wdh_hardreset,
458 static struct ata_port_operations ahci_sb600_ops = {
459 .inherits = &ahci_ops,
460 .softreset = ahci_sb600_softreset,
461 .pmp_softreset = ahci_sb600_softreset,
464 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
466 static const struct ata_port_info ahci_port_info[] = {
467 [board_ahci] =
469 .flags = AHCI_FLAG_COMMON,
470 .pio_mask = ATA_PIO4,
471 .udma_mask = ATA_UDMA6,
472 .port_ops = &ahci_ops,
474 [board_ahci_vt8251] =
476 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
477 .flags = AHCI_FLAG_COMMON,
478 .pio_mask = ATA_PIO4,
479 .udma_mask = ATA_UDMA6,
480 .port_ops = &ahci_vt8251_ops,
482 [board_ahci_ign_iferr] =
484 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
485 .flags = AHCI_FLAG_COMMON,
486 .pio_mask = ATA_PIO4,
487 .udma_mask = ATA_UDMA6,
488 .port_ops = &ahci_ops,
490 [board_ahci_sb600] =
492 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
493 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
494 AHCI_HFLAG_32BIT_ONLY),
495 .flags = AHCI_FLAG_COMMON,
496 .pio_mask = ATA_PIO4,
497 .udma_mask = ATA_UDMA6,
498 .port_ops = &ahci_sb600_ops,
500 [board_ahci_mv] =
502 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
503 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
504 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
505 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
506 .pio_mask = ATA_PIO4,
507 .udma_mask = ATA_UDMA6,
508 .port_ops = &ahci_ops,
510 [board_ahci_sb700] = /* for SB700 and SB800 */
512 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
513 .flags = AHCI_FLAG_COMMON,
514 .pio_mask = ATA_PIO4,
515 .udma_mask = ATA_UDMA6,
516 .port_ops = &ahci_sb600_ops,
518 [board_ahci_mcp65] =
520 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
521 .flags = AHCI_FLAG_COMMON,
522 .pio_mask = ATA_PIO4,
523 .udma_mask = ATA_UDMA6,
524 .port_ops = &ahci_ops,
526 [board_ahci_nopmp] =
528 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
529 .flags = AHCI_FLAG_COMMON,
530 .pio_mask = ATA_PIO4,
531 .udma_mask = ATA_UDMA6,
532 .port_ops = &ahci_ops,
534 [board_ahci_yesncq] =
536 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
537 .flags = AHCI_FLAG_COMMON,
538 .pio_mask = ATA_PIO4,
539 .udma_mask = ATA_UDMA6,
540 .port_ops = &ahci_ops,
542 [board_ahci_nosntf] =
544 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
545 .flags = AHCI_FLAG_COMMON,
546 .pio_mask = ATA_PIO4,
547 .udma_mask = ATA_UDMA6,
548 .port_ops = &ahci_ops,
552 static const struct pci_device_id ahci_pci_tbl[] = {
553 /* Intel */
554 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
555 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
556 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
557 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
558 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
559 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
560 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
561 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
562 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
563 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
564 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
565 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
566 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
567 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
568 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
569 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
570 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
571 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
572 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
573 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
574 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
575 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
576 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
577 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
578 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
579 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
580 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
581 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
582 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
583 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
584 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
585 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
586 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
587 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
588 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
589 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
590 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
591 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
592 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
593 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
594 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
595 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
596 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
597 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
598 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
599 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
601 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
602 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
603 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
605 /* ATI */
606 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
607 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
608 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
609 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
610 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
611 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
612 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
614 /* AMD */
615 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
616 /* AMD is using RAID class only for ahci controllers */
617 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
618 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
620 /* VIA */
621 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
622 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
624 /* NVIDIA */
625 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
626 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
627 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
628 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
629 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
630 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
631 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
632 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
633 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
634 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
635 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
636 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
637 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
638 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
639 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
640 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
641 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
644 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
645 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
659 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
660 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
661 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
662 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
663 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
664 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
665 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
666 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
667 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
668 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
669 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
670 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
671 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
672 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
673 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
674 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
675 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
676 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
677 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
678 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
679 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
680 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
681 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
682 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
683 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
684 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
685 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
686 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
687 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
688 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
689 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
690 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
691 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
692 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
693 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
694 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
695 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
696 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
697 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
698 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
699 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
700 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
701 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
702 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
703 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
704 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
705 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
706 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
707 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
708 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
710 /* SiS */
711 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
712 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
713 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
715 /* Marvell */
716 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
717 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
719 /* Promise */
720 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
722 /* Generic, PCI class code for AHCI */
723 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
724 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
726 { } /* terminate list */
730 static struct pci_driver ahci_pci_driver = {
731 .name = DRV_NAME,
732 .id_table = ahci_pci_tbl,
733 .probe = ahci_init_one,
734 .remove = ata_pci_remove_one,
735 #ifdef CONFIG_PM
736 .suspend = ahci_pci_device_suspend,
737 .resume = ahci_pci_device_resume,
738 #endif
741 static int ahci_em_messages = 1;
742 module_param(ahci_em_messages, int, 0444);
743 /* add other LED protocol types when they become supported */
744 MODULE_PARM_DESC(ahci_em_messages,
745 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
747 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
748 static int marvell_enable;
749 #else
750 static int marvell_enable = 1;
751 #endif
752 module_param(marvell_enable, int, 0644);
753 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
756 static inline int ahci_nr_ports(u32 cap)
758 return (cap & 0x1f) + 1;
761 static inline void __iomem *__ahci_port_base(struct ata_host *host,
762 unsigned int port_no)
764 struct ahci_host_priv *hpriv = host->private_data;
765 void __iomem *mmio = hpriv->mmio;
767 return mmio + 0x100 + (port_no * 0x80);
770 static inline void __iomem *ahci_port_base(struct ata_port *ap)
772 return __ahci_port_base(ap->host, ap->port_no);
775 static void ahci_enable_ahci(void __iomem *mmio)
777 int i;
778 u32 tmp;
780 /* turn on AHCI_EN */
781 tmp = readl(mmio + HOST_CTL);
782 if (tmp & HOST_AHCI_EN)
783 return;
785 /* Some controllers need AHCI_EN to be written multiple times.
786 * Try a few times before giving up.
788 for (i = 0; i < 5; i++) {
789 tmp |= HOST_AHCI_EN;
790 writel(tmp, mmio + HOST_CTL);
791 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
792 if (tmp & HOST_AHCI_EN)
793 return;
794 msleep(10);
797 WARN_ON(1);
800 static ssize_t ahci_show_host_caps(struct device *dev,
801 struct device_attribute *attr, char *buf)
803 struct Scsi_Host *shost = class_to_shost(dev);
804 struct ata_port *ap = ata_shost_to_port(shost);
805 struct ahci_host_priv *hpriv = ap->host->private_data;
807 return sprintf(buf, "%x\n", hpriv->cap);
810 static ssize_t ahci_show_host_cap2(struct device *dev,
811 struct device_attribute *attr, char *buf)
813 struct Scsi_Host *shost = class_to_shost(dev);
814 struct ata_port *ap = ata_shost_to_port(shost);
815 struct ahci_host_priv *hpriv = ap->host->private_data;
817 return sprintf(buf, "%x\n", hpriv->cap2);
820 static ssize_t ahci_show_host_version(struct device *dev,
821 struct device_attribute *attr, char *buf)
823 struct Scsi_Host *shost = class_to_shost(dev);
824 struct ata_port *ap = ata_shost_to_port(shost);
825 struct ahci_host_priv *hpriv = ap->host->private_data;
826 void __iomem *mmio = hpriv->mmio;
828 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
831 static ssize_t ahci_show_port_cmd(struct device *dev,
832 struct device_attribute *attr, char *buf)
834 struct Scsi_Host *shost = class_to_shost(dev);
835 struct ata_port *ap = ata_shost_to_port(shost);
836 void __iomem *port_mmio = ahci_port_base(ap);
838 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
842 * ahci_save_initial_config - Save and fixup initial config values
843 * @dev: target AHCI device
844 * @hpriv: host private area to store config values
845 * @force_port_map: force port map to a specified value
846 * @mask_port_map: mask out particular bits from port map
848 * Some registers containing configuration info might be setup by
849 * BIOS and might be cleared on reset. This function saves the
850 * initial values of those registers into @hpriv such that they
851 * can be restored after controller reset.
853 * If inconsistent, config values are fixed up by this function.
855 * LOCKING:
856 * None.
858 static void ahci_save_initial_config(struct device *dev,
859 struct ahci_host_priv *hpriv,
860 unsigned int force_port_map,
861 unsigned int mask_port_map)
863 void __iomem *mmio = hpriv->mmio;
864 u32 cap, cap2, vers, port_map;
865 int i;
867 /* make sure AHCI mode is enabled before accessing CAP */
868 ahci_enable_ahci(mmio);
870 /* Values prefixed with saved_ are written back to host after
871 * reset. Values without are used for driver operation.
873 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
874 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
876 /* CAP2 register is only defined for AHCI 1.2 and later */
877 vers = readl(mmio + HOST_VERSION);
878 if ((vers >> 16) > 1 ||
879 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
880 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
881 else
882 hpriv->saved_cap2 = cap2 = 0;
884 /* some chips have errata preventing 64bit use */
885 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
886 dev_printk(KERN_INFO, dev,
887 "controller can't do 64bit DMA, forcing 32bit\n");
888 cap &= ~HOST_CAP_64;
891 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
892 dev_printk(KERN_INFO, dev,
893 "controller can't do NCQ, turning off CAP_NCQ\n");
894 cap &= ~HOST_CAP_NCQ;
897 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
898 dev_printk(KERN_INFO, dev,
899 "controller can do NCQ, turning on CAP_NCQ\n");
900 cap |= HOST_CAP_NCQ;
903 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
904 dev_printk(KERN_INFO, dev,
905 "controller can't do PMP, turning off CAP_PMP\n");
906 cap &= ~HOST_CAP_PMP;
909 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
910 dev_printk(KERN_INFO, dev,
911 "controller can't do SNTF, turning off CAP_SNTF\n");
912 cap &= ~HOST_CAP_SNTF;
915 if (force_port_map && port_map != force_port_map) {
916 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
917 port_map, force_port_map);
918 port_map = force_port_map;
921 if (mask_port_map) {
922 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
923 port_map,
924 port_map & mask_port_map);
925 port_map &= mask_port_map;
928 /* cross check port_map and cap.n_ports */
929 if (port_map) {
930 int map_ports = 0;
932 for (i = 0; i < AHCI_MAX_PORTS; i++)
933 if (port_map & (1 << i))
934 map_ports++;
936 /* If PI has more ports than n_ports, whine, clear
937 * port_map and let it be generated from n_ports.
939 if (map_ports > ahci_nr_ports(cap)) {
940 dev_printk(KERN_WARNING, dev,
941 "implemented port map (0x%x) contains more "
942 "ports than nr_ports (%u), using nr_ports\n",
943 port_map, ahci_nr_ports(cap));
944 port_map = 0;
948 /* fabricate port_map from cap.nr_ports */
949 if (!port_map) {
950 port_map = (1 << ahci_nr_ports(cap)) - 1;
951 dev_printk(KERN_WARNING, dev,
952 "forcing PORTS_IMPL to 0x%x\n", port_map);
954 /* write the fixed up value to the PI register */
955 hpriv->saved_port_map = port_map;
958 /* record values to use during operation */
959 hpriv->cap = cap;
960 hpriv->cap2 = cap2;
961 hpriv->port_map = port_map;
964 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
965 struct ahci_host_priv *hpriv)
967 unsigned int force_port_map = 0;
968 unsigned int mask_port_map = 0;
970 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
971 dev_info(&pdev->dev, "JMB361 has only one port\n");
972 force_port_map = 1;
976 * Temporary Marvell 6145 hack: PATA port presence
977 * is asserted through the standard AHCI port
978 * presence register, as bit 4 (counting from 0)
980 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
981 if (pdev->device == 0x6121)
982 mask_port_map = 0x3;
983 else
984 mask_port_map = 0xf;
985 dev_info(&pdev->dev,
986 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
989 ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
990 mask_port_map);
994 * ahci_restore_initial_config - Restore initial config
995 * @host: target ATA host
997 * Restore initial config stored by ahci_save_initial_config().
999 * LOCKING:
1000 * None.
1002 static void ahci_restore_initial_config(struct ata_host *host)
1004 struct ahci_host_priv *hpriv = host->private_data;
1005 void __iomem *mmio = hpriv->mmio;
1007 writel(hpriv->saved_cap, mmio + HOST_CAP);
1008 if (hpriv->saved_cap2)
1009 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
1010 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
1011 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1014 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
1016 static const int offset[] = {
1017 [SCR_STATUS] = PORT_SCR_STAT,
1018 [SCR_CONTROL] = PORT_SCR_CTL,
1019 [SCR_ERROR] = PORT_SCR_ERR,
1020 [SCR_ACTIVE] = PORT_SCR_ACT,
1021 [SCR_NOTIFICATION] = PORT_SCR_NTF,
1023 struct ahci_host_priv *hpriv = ap->host->private_data;
1025 if (sc_reg < ARRAY_SIZE(offset) &&
1026 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
1027 return offset[sc_reg];
1028 return 0;
1031 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1033 void __iomem *port_mmio = ahci_port_base(link->ap);
1034 int offset = ahci_scr_offset(link->ap, sc_reg);
1036 if (offset) {
1037 *val = readl(port_mmio + offset);
1038 return 0;
1040 return -EINVAL;
1043 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1045 void __iomem *port_mmio = ahci_port_base(link->ap);
1046 int offset = ahci_scr_offset(link->ap, sc_reg);
1048 if (offset) {
1049 writel(val, port_mmio + offset);
1050 return 0;
1052 return -EINVAL;
1055 static void ahci_start_engine(struct ata_port *ap)
1057 void __iomem *port_mmio = ahci_port_base(ap);
1058 u32 tmp;
1060 /* start DMA */
1061 tmp = readl(port_mmio + PORT_CMD);
1062 tmp |= PORT_CMD_START;
1063 writel(tmp, port_mmio + PORT_CMD);
1064 readl(port_mmio + PORT_CMD); /* flush */
1067 static int ahci_stop_engine(struct ata_port *ap)
1069 void __iomem *port_mmio = ahci_port_base(ap);
1070 u32 tmp;
1072 tmp = readl(port_mmio + PORT_CMD);
1074 /* check if the HBA is idle */
1075 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1076 return 0;
1078 /* setting HBA to idle */
1079 tmp &= ~PORT_CMD_START;
1080 writel(tmp, port_mmio + PORT_CMD);
1082 /* wait for engine to stop. This could be as long as 500 msec */
1083 tmp = ata_wait_register(port_mmio + PORT_CMD,
1084 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1085 if (tmp & PORT_CMD_LIST_ON)
1086 return -EIO;
1088 return 0;
1091 static void ahci_start_fis_rx(struct ata_port *ap)
1093 void __iomem *port_mmio = ahci_port_base(ap);
1094 struct ahci_host_priv *hpriv = ap->host->private_data;
1095 struct ahci_port_priv *pp = ap->private_data;
1096 u32 tmp;
1098 /* set FIS registers */
1099 if (hpriv->cap & HOST_CAP_64)
1100 writel((pp->cmd_slot_dma >> 16) >> 16,
1101 port_mmio + PORT_LST_ADDR_HI);
1102 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1104 if (hpriv->cap & HOST_CAP_64)
1105 writel((pp->rx_fis_dma >> 16) >> 16,
1106 port_mmio + PORT_FIS_ADDR_HI);
1107 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1109 /* enable FIS reception */
1110 tmp = readl(port_mmio + PORT_CMD);
1111 tmp |= PORT_CMD_FIS_RX;
1112 writel(tmp, port_mmio + PORT_CMD);
1114 /* flush */
1115 readl(port_mmio + PORT_CMD);
1118 static int ahci_stop_fis_rx(struct ata_port *ap)
1120 void __iomem *port_mmio = ahci_port_base(ap);
1121 u32 tmp;
1123 /* disable FIS reception */
1124 tmp = readl(port_mmio + PORT_CMD);
1125 tmp &= ~PORT_CMD_FIS_RX;
1126 writel(tmp, port_mmio + PORT_CMD);
1128 /* wait for completion, spec says 500ms, give it 1000 */
1129 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1130 PORT_CMD_FIS_ON, 10, 1000);
1131 if (tmp & PORT_CMD_FIS_ON)
1132 return -EBUSY;
1134 return 0;
1137 static void ahci_power_up(struct ata_port *ap)
1139 struct ahci_host_priv *hpriv = ap->host->private_data;
1140 void __iomem *port_mmio = ahci_port_base(ap);
1141 u32 cmd;
1143 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1145 /* spin up device */
1146 if (hpriv->cap & HOST_CAP_SSS) {
1147 cmd |= PORT_CMD_SPIN_UP;
1148 writel(cmd, port_mmio + PORT_CMD);
1151 /* wake up link */
1152 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1155 static void ahci_disable_alpm(struct ata_port *ap)
1157 struct ahci_host_priv *hpriv = ap->host->private_data;
1158 void __iomem *port_mmio = ahci_port_base(ap);
1159 u32 cmd;
1160 struct ahci_port_priv *pp = ap->private_data;
1162 /* IPM bits should be disabled by libata-core */
1163 /* get the existing command bits */
1164 cmd = readl(port_mmio + PORT_CMD);
1166 /* disable ALPM and ASP */
1167 cmd &= ~PORT_CMD_ASP;
1168 cmd &= ~PORT_CMD_ALPE;
1170 /* force the interface back to active */
1171 cmd |= PORT_CMD_ICC_ACTIVE;
1173 /* write out new cmd value */
1174 writel(cmd, port_mmio + PORT_CMD);
1175 cmd = readl(port_mmio + PORT_CMD);
1177 /* wait 10ms to be sure we've come out of any low power state */
1178 msleep(10);
1180 /* clear out any PhyRdy stuff from interrupt status */
1181 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1183 /* go ahead and clean out PhyRdy Change from Serror too */
1184 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1187 * Clear flag to indicate that we should ignore all PhyRdy
1188 * state changes
1190 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1193 * Enable interrupts on Phy Ready.
1195 pp->intr_mask |= PORT_IRQ_PHYRDY;
1196 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1199 * don't change the link pm policy - we can be called
1200 * just to turn of link pm temporarily
1204 static int ahci_enable_alpm(struct ata_port *ap,
1205 enum link_pm policy)
1207 struct ahci_host_priv *hpriv = ap->host->private_data;
1208 void __iomem *port_mmio = ahci_port_base(ap);
1209 u32 cmd;
1210 struct ahci_port_priv *pp = ap->private_data;
1211 u32 asp;
1213 /* Make sure the host is capable of link power management */
1214 if (!(hpriv->cap & HOST_CAP_ALPM))
1215 return -EINVAL;
1217 switch (policy) {
1218 case MAX_PERFORMANCE:
1219 case NOT_AVAILABLE:
1221 * if we came here with NOT_AVAILABLE,
1222 * it just means this is the first time we
1223 * have tried to enable - default to max performance,
1224 * and let the user go to lower power modes on request.
1226 ahci_disable_alpm(ap);
1227 return 0;
1228 case MIN_POWER:
1229 /* configure HBA to enter SLUMBER */
1230 asp = PORT_CMD_ASP;
1231 break;
1232 case MEDIUM_POWER:
1233 /* configure HBA to enter PARTIAL */
1234 asp = 0;
1235 break;
1236 default:
1237 return -EINVAL;
1241 * Disable interrupts on Phy Ready. This keeps us from
1242 * getting woken up due to spurious phy ready interrupts
1243 * TBD - Hot plug should be done via polling now, is
1244 * that even supported?
1246 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1247 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1250 * Set a flag to indicate that we should ignore all PhyRdy
1251 * state changes since these can happen now whenever we
1252 * change link state
1254 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1256 /* get the existing command bits */
1257 cmd = readl(port_mmio + PORT_CMD);
1260 * Set ASP based on Policy
1262 cmd |= asp;
1265 * Setting this bit will instruct the HBA to aggressively
1266 * enter a lower power link state when it's appropriate and
1267 * based on the value set above for ASP
1269 cmd |= PORT_CMD_ALPE;
1271 /* write out new cmd value */
1272 writel(cmd, port_mmio + PORT_CMD);
1273 cmd = readl(port_mmio + PORT_CMD);
1275 /* IPM bits should be set by libata-core */
1276 return 0;
1279 #ifdef CONFIG_PM
1280 static void ahci_power_down(struct ata_port *ap)
1282 struct ahci_host_priv *hpriv = ap->host->private_data;
1283 void __iomem *port_mmio = ahci_port_base(ap);
1284 u32 cmd, scontrol;
1286 if (!(hpriv->cap & HOST_CAP_SSS))
1287 return;
1289 /* put device into listen mode, first set PxSCTL.DET to 0 */
1290 scontrol = readl(port_mmio + PORT_SCR_CTL);
1291 scontrol &= ~0xf;
1292 writel(scontrol, port_mmio + PORT_SCR_CTL);
1294 /* then set PxCMD.SUD to 0 */
1295 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1296 cmd &= ~PORT_CMD_SPIN_UP;
1297 writel(cmd, port_mmio + PORT_CMD);
1299 #endif
1301 static void ahci_start_port(struct ata_port *ap)
1303 struct ahci_port_priv *pp = ap->private_data;
1304 struct ata_link *link;
1305 struct ahci_em_priv *emp;
1306 ssize_t rc;
1307 int i;
1309 /* enable FIS reception */
1310 ahci_start_fis_rx(ap);
1312 /* enable DMA */
1313 ahci_start_engine(ap);
1315 /* turn on LEDs */
1316 if (ap->flags & ATA_FLAG_EM) {
1317 ata_for_each_link(link, ap, EDGE) {
1318 emp = &pp->em_priv[link->pmp];
1320 /* EM Transmit bit maybe busy during init */
1321 for (i = 0; i < EM_MAX_RETRY; i++) {
1322 rc = ahci_transmit_led_message(ap,
1323 emp->led_state,
1325 if (rc == -EBUSY)
1326 msleep(1);
1327 else
1328 break;
1333 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1334 ata_for_each_link(link, ap, EDGE)
1335 ahci_init_sw_activity(link);
1339 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1341 int rc;
1343 /* disable DMA */
1344 rc = ahci_stop_engine(ap);
1345 if (rc) {
1346 *emsg = "failed to stop engine";
1347 return rc;
1350 /* disable FIS reception */
1351 rc = ahci_stop_fis_rx(ap);
1352 if (rc) {
1353 *emsg = "failed stop FIS RX";
1354 return rc;
1357 return 0;
1360 static int ahci_reset_controller(struct ata_host *host)
1362 struct pci_dev *pdev = to_pci_dev(host->dev);
1363 struct ahci_host_priv *hpriv = host->private_data;
1364 void __iomem *mmio = hpriv->mmio;
1365 u32 tmp;
1367 /* we must be in AHCI mode, before using anything
1368 * AHCI-specific, such as HOST_RESET.
1370 ahci_enable_ahci(mmio);
1372 /* global controller reset */
1373 if (!ahci_skip_host_reset) {
1374 tmp = readl(mmio + HOST_CTL);
1375 if ((tmp & HOST_RESET) == 0) {
1376 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1377 readl(mmio + HOST_CTL); /* flush */
1381 * to perform host reset, OS should set HOST_RESET
1382 * and poll until this bit is read to be "0".
1383 * reset must complete within 1 second, or
1384 * the hardware should be considered fried.
1386 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1387 HOST_RESET, 10, 1000);
1389 if (tmp & HOST_RESET) {
1390 dev_printk(KERN_ERR, host->dev,
1391 "controller reset failed (0x%x)\n", tmp);
1392 return -EIO;
1395 /* turn on AHCI mode */
1396 ahci_enable_ahci(mmio);
1398 /* Some registers might be cleared on reset. Restore
1399 * initial values.
1401 ahci_restore_initial_config(host);
1402 } else
1403 dev_printk(KERN_INFO, host->dev,
1404 "skipping global host reset\n");
1406 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1407 u16 tmp16;
1409 /* configure PCS */
1410 pci_read_config_word(pdev, 0x92, &tmp16);
1411 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1412 tmp16 |= hpriv->port_map;
1413 pci_write_config_word(pdev, 0x92, tmp16);
1417 return 0;
1420 static void ahci_sw_activity(struct ata_link *link)
1422 struct ata_port *ap = link->ap;
1423 struct ahci_port_priv *pp = ap->private_data;
1424 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1426 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1427 return;
1429 emp->activity++;
1430 if (!timer_pending(&emp->timer))
1431 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1434 static void ahci_sw_activity_blink(unsigned long arg)
1436 struct ata_link *link = (struct ata_link *)arg;
1437 struct ata_port *ap = link->ap;
1438 struct ahci_port_priv *pp = ap->private_data;
1439 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1440 unsigned long led_message = emp->led_state;
1441 u32 activity_led_state;
1442 unsigned long flags;
1444 led_message &= EM_MSG_LED_VALUE;
1445 led_message |= ap->port_no | (link->pmp << 8);
1447 /* check to see if we've had activity. If so,
1448 * toggle state of LED and reset timer. If not,
1449 * turn LED to desired idle state.
1451 spin_lock_irqsave(ap->lock, flags);
1452 if (emp->saved_activity != emp->activity) {
1453 emp->saved_activity = emp->activity;
1454 /* get the current LED state */
1455 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1457 if (activity_led_state)
1458 activity_led_state = 0;
1459 else
1460 activity_led_state = 1;
1462 /* clear old state */
1463 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1465 /* toggle state */
1466 led_message |= (activity_led_state << 16);
1467 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1468 } else {
1469 /* switch to idle */
1470 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1471 if (emp->blink_policy == BLINK_OFF)
1472 led_message |= (1 << 16);
1474 spin_unlock_irqrestore(ap->lock, flags);
1475 ahci_transmit_led_message(ap, led_message, 4);
1478 static void ahci_init_sw_activity(struct ata_link *link)
1480 struct ata_port *ap = link->ap;
1481 struct ahci_port_priv *pp = ap->private_data;
1482 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1484 /* init activity stats, setup timer */
1485 emp->saved_activity = emp->activity = 0;
1486 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1488 /* check our blink policy and set flag for link if it's enabled */
1489 if (emp->blink_policy)
1490 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1493 static int ahci_reset_em(struct ata_host *host)
1495 struct ahci_host_priv *hpriv = host->private_data;
1496 void __iomem *mmio = hpriv->mmio;
1497 u32 em_ctl;
1499 em_ctl = readl(mmio + HOST_EM_CTL);
1500 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1501 return -EINVAL;
1503 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1504 return 0;
1507 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1508 ssize_t size)
1510 struct ahci_host_priv *hpriv = ap->host->private_data;
1511 struct ahci_port_priv *pp = ap->private_data;
1512 void __iomem *mmio = hpriv->mmio;
1513 u32 em_ctl;
1514 u32 message[] = {0, 0};
1515 unsigned long flags;
1516 int pmp;
1517 struct ahci_em_priv *emp;
1519 /* get the slot number from the message */
1520 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1521 if (pmp < EM_MAX_SLOTS)
1522 emp = &pp->em_priv[pmp];
1523 else
1524 return -EINVAL;
1526 spin_lock_irqsave(ap->lock, flags);
1529 * if we are still busy transmitting a previous message,
1530 * do not allow
1532 em_ctl = readl(mmio + HOST_EM_CTL);
1533 if (em_ctl & EM_CTL_TM) {
1534 spin_unlock_irqrestore(ap->lock, flags);
1535 return -EBUSY;
1539 * create message header - this is all zero except for
1540 * the message size, which is 4 bytes.
1542 message[0] |= (4 << 8);
1544 /* ignore 0:4 of byte zero, fill in port info yourself */
1545 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1547 /* write message to EM_LOC */
1548 writel(message[0], mmio + hpriv->em_loc);
1549 writel(message[1], mmio + hpriv->em_loc+4);
1551 /* save off new led state for port/slot */
1552 emp->led_state = state;
1555 * tell hardware to transmit the message
1557 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1559 spin_unlock_irqrestore(ap->lock, flags);
1560 return size;
1563 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1565 struct ahci_port_priv *pp = ap->private_data;
1566 struct ata_link *link;
1567 struct ahci_em_priv *emp;
1568 int rc = 0;
1570 ata_for_each_link(link, ap, EDGE) {
1571 emp = &pp->em_priv[link->pmp];
1572 rc += sprintf(buf, "%lx\n", emp->led_state);
1574 return rc;
1577 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1578 size_t size)
1580 int state;
1581 int pmp;
1582 struct ahci_port_priv *pp = ap->private_data;
1583 struct ahci_em_priv *emp;
1585 state = simple_strtoul(buf, NULL, 0);
1587 /* get the slot number from the message */
1588 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1589 if (pmp < EM_MAX_SLOTS)
1590 emp = &pp->em_priv[pmp];
1591 else
1592 return -EINVAL;
1594 /* mask off the activity bits if we are in sw_activity
1595 * mode, user should turn off sw_activity before setting
1596 * activity led through em_message
1598 if (emp->blink_policy)
1599 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1601 return ahci_transmit_led_message(ap, state, size);
1604 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1606 struct ata_link *link = dev->link;
1607 struct ata_port *ap = link->ap;
1608 struct ahci_port_priv *pp = ap->private_data;
1609 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1610 u32 port_led_state = emp->led_state;
1612 /* save the desired Activity LED behavior */
1613 if (val == OFF) {
1614 /* clear LFLAG */
1615 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1617 /* set the LED to OFF */
1618 port_led_state &= EM_MSG_LED_VALUE_OFF;
1619 port_led_state |= (ap->port_no | (link->pmp << 8));
1620 ahci_transmit_led_message(ap, port_led_state, 4);
1621 } else {
1622 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1623 if (val == BLINK_OFF) {
1624 /* set LED to ON for idle */
1625 port_led_state &= EM_MSG_LED_VALUE_OFF;
1626 port_led_state |= (ap->port_no | (link->pmp << 8));
1627 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1628 ahci_transmit_led_message(ap, port_led_state, 4);
1631 emp->blink_policy = val;
1632 return 0;
1635 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1637 struct ata_link *link = dev->link;
1638 struct ata_port *ap = link->ap;
1639 struct ahci_port_priv *pp = ap->private_data;
1640 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1642 /* display the saved value of activity behavior for this
1643 * disk.
1645 return sprintf(buf, "%d\n", emp->blink_policy);
1648 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1649 int port_no, void __iomem *mmio,
1650 void __iomem *port_mmio)
1652 const char *emsg = NULL;
1653 int rc;
1654 u32 tmp;
1656 /* make sure port is not active */
1657 rc = ahci_deinit_port(ap, &emsg);
1658 if (rc)
1659 dev_printk(KERN_WARNING, &pdev->dev,
1660 "%s (%d)\n", emsg, rc);
1662 /* clear SError */
1663 tmp = readl(port_mmio + PORT_SCR_ERR);
1664 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1665 writel(tmp, port_mmio + PORT_SCR_ERR);
1667 /* clear port IRQ */
1668 tmp = readl(port_mmio + PORT_IRQ_STAT);
1669 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1670 if (tmp)
1671 writel(tmp, port_mmio + PORT_IRQ_STAT);
1673 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1676 static void ahci_init_controller(struct ata_host *host)
1678 struct ahci_host_priv *hpriv = host->private_data;
1679 struct pci_dev *pdev = to_pci_dev(host->dev);
1680 void __iomem *mmio = hpriv->mmio;
1681 int i;
1682 void __iomem *port_mmio;
1683 u32 tmp;
1684 int mv;
1686 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1687 if (pdev->device == 0x6121)
1688 mv = 2;
1689 else
1690 mv = 4;
1691 port_mmio = __ahci_port_base(host, mv);
1693 writel(0, port_mmio + PORT_IRQ_MASK);
1695 /* clear port IRQ */
1696 tmp = readl(port_mmio + PORT_IRQ_STAT);
1697 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1698 if (tmp)
1699 writel(tmp, port_mmio + PORT_IRQ_STAT);
1702 for (i = 0; i < host->n_ports; i++) {
1703 struct ata_port *ap = host->ports[i];
1705 port_mmio = ahci_port_base(ap);
1706 if (ata_port_is_dummy(ap))
1707 continue;
1709 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1712 tmp = readl(mmio + HOST_CTL);
1713 VPRINTK("HOST_CTL 0x%x\n", tmp);
1714 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1715 tmp = readl(mmio + HOST_CTL);
1716 VPRINTK("HOST_CTL 0x%x\n", tmp);
1719 static void ahci_dev_config(struct ata_device *dev)
1721 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1723 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1724 dev->max_sectors = 255;
1725 ata_dev_printk(dev, KERN_INFO,
1726 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1730 static unsigned int ahci_dev_classify(struct ata_port *ap)
1732 void __iomem *port_mmio = ahci_port_base(ap);
1733 struct ata_taskfile tf;
1734 u32 tmp;
1736 tmp = readl(port_mmio + PORT_SIG);
1737 tf.lbah = (tmp >> 24) & 0xff;
1738 tf.lbam = (tmp >> 16) & 0xff;
1739 tf.lbal = (tmp >> 8) & 0xff;
1740 tf.nsect = (tmp) & 0xff;
1742 return ata_dev_classify(&tf);
1745 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1746 u32 opts)
1748 dma_addr_t cmd_tbl_dma;
1750 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1752 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1753 pp->cmd_slot[tag].status = 0;
1754 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1755 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1758 static int ahci_kick_engine(struct ata_port *ap)
1760 void __iomem *port_mmio = ahci_port_base(ap);
1761 struct ahci_host_priv *hpriv = ap->host->private_data;
1762 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1763 u32 tmp;
1764 int busy, rc;
1766 /* stop engine */
1767 rc = ahci_stop_engine(ap);
1768 if (rc)
1769 goto out_restart;
1771 /* need to do CLO?
1772 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1774 busy = status & (ATA_BUSY | ATA_DRQ);
1775 if (!busy && !sata_pmp_attached(ap)) {
1776 rc = 0;
1777 goto out_restart;
1780 if (!(hpriv->cap & HOST_CAP_CLO)) {
1781 rc = -EOPNOTSUPP;
1782 goto out_restart;
1785 /* perform CLO */
1786 tmp = readl(port_mmio + PORT_CMD);
1787 tmp |= PORT_CMD_CLO;
1788 writel(tmp, port_mmio + PORT_CMD);
1790 rc = 0;
1791 tmp = ata_wait_register(port_mmio + PORT_CMD,
1792 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1793 if (tmp & PORT_CMD_CLO)
1794 rc = -EIO;
1796 /* restart engine */
1797 out_restart:
1798 ahci_start_engine(ap);
1799 return rc;
1802 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1803 struct ata_taskfile *tf, int is_cmd, u16 flags,
1804 unsigned long timeout_msec)
1806 const u32 cmd_fis_len = 5; /* five dwords */
1807 struct ahci_port_priv *pp = ap->private_data;
1808 void __iomem *port_mmio = ahci_port_base(ap);
1809 u8 *fis = pp->cmd_tbl;
1810 u32 tmp;
1812 /* prep the command */
1813 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1814 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1816 /* issue & wait */
1817 writel(1, port_mmio + PORT_CMD_ISSUE);
1819 if (timeout_msec) {
1820 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1821 1, timeout_msec);
1822 if (tmp & 0x1) {
1823 ahci_kick_engine(ap);
1824 return -EBUSY;
1826 } else
1827 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1829 return 0;
1832 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1833 int pmp, unsigned long deadline,
1834 int (*check_ready)(struct ata_link *link))
1836 struct ata_port *ap = link->ap;
1837 struct ahci_host_priv *hpriv = ap->host->private_data;
1838 const char *reason = NULL;
1839 unsigned long now, msecs;
1840 struct ata_taskfile tf;
1841 int rc;
1843 DPRINTK("ENTER\n");
1845 /* prepare for SRST (AHCI-1.1 10.4.1) */
1846 rc = ahci_kick_engine(ap);
1847 if (rc && rc != -EOPNOTSUPP)
1848 ata_link_printk(link, KERN_WARNING,
1849 "failed to reset engine (errno=%d)\n", rc);
1851 ata_tf_init(link->device, &tf);
1853 /* issue the first D2H Register FIS */
1854 msecs = 0;
1855 now = jiffies;
1856 if (time_after(now, deadline))
1857 msecs = jiffies_to_msecs(deadline - now);
1859 tf.ctl |= ATA_SRST;
1860 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1861 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1862 rc = -EIO;
1863 reason = "1st FIS failed";
1864 goto fail;
1867 /* spec says at least 5us, but be generous and sleep for 1ms */
1868 msleep(1);
1870 /* issue the second D2H Register FIS */
1871 tf.ctl &= ~ATA_SRST;
1872 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1874 /* wait for link to become ready */
1875 rc = ata_wait_after_reset(link, deadline, check_ready);
1876 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1878 * Workaround for cases where link online status can't
1879 * be trusted. Treat device readiness timeout as link
1880 * offline.
1882 ata_link_printk(link, KERN_INFO,
1883 "device not ready, treating as offline\n");
1884 *class = ATA_DEV_NONE;
1885 } else if (rc) {
1886 /* link occupied, -ENODEV too is an error */
1887 reason = "device not ready";
1888 goto fail;
1889 } else
1890 *class = ahci_dev_classify(ap);
1892 DPRINTK("EXIT, class=%u\n", *class);
1893 return 0;
1895 fail:
1896 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1897 return rc;
1900 static int ahci_check_ready(struct ata_link *link)
1902 void __iomem *port_mmio = ahci_port_base(link->ap);
1903 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1905 return ata_check_ready(status);
1908 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1909 unsigned long deadline)
1911 int pmp = sata_srst_pmp(link);
1913 DPRINTK("ENTER\n");
1915 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1918 static int ahci_sb600_check_ready(struct ata_link *link)
1920 void __iomem *port_mmio = ahci_port_base(link->ap);
1921 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1922 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1925 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1926 * which can save timeout delay.
1928 if (irq_status & PORT_IRQ_BAD_PMP)
1929 return -EIO;
1931 return ata_check_ready(status);
1934 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1935 unsigned long deadline)
1937 struct ata_port *ap = link->ap;
1938 void __iomem *port_mmio = ahci_port_base(ap);
1939 int pmp = sata_srst_pmp(link);
1940 int rc;
1941 u32 irq_sts;
1943 DPRINTK("ENTER\n");
1945 rc = ahci_do_softreset(link, class, pmp, deadline,
1946 ahci_sb600_check_ready);
1949 * Soft reset fails on some ATI chips with IPMS set when PMP
1950 * is enabled but SATA HDD/ODD is connected to SATA port,
1951 * do soft reset again to port 0.
1953 if (rc == -EIO) {
1954 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1955 if (irq_sts & PORT_IRQ_BAD_PMP) {
1956 ata_link_printk(link, KERN_WARNING,
1957 "applying SB600 PMP SRST workaround "
1958 "and retrying\n");
1959 rc = ahci_do_softreset(link, class, 0, deadline,
1960 ahci_check_ready);
1964 return rc;
1967 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1968 unsigned long deadline)
1970 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1971 struct ata_port *ap = link->ap;
1972 struct ahci_port_priv *pp = ap->private_data;
1973 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1974 struct ata_taskfile tf;
1975 bool online;
1976 int rc;
1978 DPRINTK("ENTER\n");
1980 ahci_stop_engine(ap);
1982 /* clear D2H reception area to properly wait for D2H FIS */
1983 ata_tf_init(link->device, &tf);
1984 tf.command = 0x80;
1985 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1987 rc = sata_link_hardreset(link, timing, deadline, &online,
1988 ahci_check_ready);
1990 ahci_start_engine(ap);
1992 if (online)
1993 *class = ahci_dev_classify(ap);
1995 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1996 return rc;
1999 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
2000 unsigned long deadline)
2002 struct ata_port *ap = link->ap;
2003 bool online;
2004 int rc;
2006 DPRINTK("ENTER\n");
2008 ahci_stop_engine(ap);
2010 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2011 deadline, &online, NULL);
2013 ahci_start_engine(ap);
2015 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
2017 /* vt8251 doesn't clear BSY on signature FIS reception,
2018 * request follow-up softreset.
2020 return online ? -EAGAIN : rc;
2023 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
2024 unsigned long deadline)
2026 struct ata_port *ap = link->ap;
2027 struct ahci_port_priv *pp = ap->private_data;
2028 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2029 struct ata_taskfile tf;
2030 bool online;
2031 int rc;
2033 ahci_stop_engine(ap);
2035 /* clear D2H reception area to properly wait for D2H FIS */
2036 ata_tf_init(link->device, &tf);
2037 tf.command = 0x80;
2038 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
2040 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2041 deadline, &online, NULL);
2043 ahci_start_engine(ap);
2045 /* The pseudo configuration device on SIMG4726 attached to
2046 * ASUS P5W-DH Deluxe doesn't send signature FIS after
2047 * hardreset if no device is attached to the first downstream
2048 * port && the pseudo device locks up on SRST w/ PMP==0. To
2049 * work around this, wait for !BSY only briefly. If BSY isn't
2050 * cleared, perform CLO and proceed to IDENTIFY (achieved by
2051 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2053 * Wait for two seconds. Devices attached to downstream port
2054 * which can't process the following IDENTIFY after this will
2055 * have to be reset again. For most cases, this should
2056 * suffice while making probing snappish enough.
2058 if (online) {
2059 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2060 ahci_check_ready);
2061 if (rc)
2062 ahci_kick_engine(ap);
2064 return rc;
2067 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2069 struct ata_port *ap = link->ap;
2070 void __iomem *port_mmio = ahci_port_base(ap);
2071 u32 new_tmp, tmp;
2073 ata_std_postreset(link, class);
2075 /* Make sure port's ATAPI bit is set appropriately */
2076 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2077 if (*class == ATA_DEV_ATAPI)
2078 new_tmp |= PORT_CMD_ATAPI;
2079 else
2080 new_tmp &= ~PORT_CMD_ATAPI;
2081 if (new_tmp != tmp) {
2082 writel(new_tmp, port_mmio + PORT_CMD);
2083 readl(port_mmio + PORT_CMD); /* flush */
2087 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2089 struct scatterlist *sg;
2090 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2091 unsigned int si;
2093 VPRINTK("ENTER\n");
2096 * Next, the S/G list.
2098 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2099 dma_addr_t addr = sg_dma_address(sg);
2100 u32 sg_len = sg_dma_len(sg);
2102 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2103 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2104 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2107 return si;
2110 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2112 struct ata_port *ap = qc->ap;
2113 struct ahci_port_priv *pp = ap->private_data;
2115 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2116 return ata_std_qc_defer(qc);
2117 else
2118 return sata_pmp_qc_defer_cmd_switch(qc);
2121 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2123 struct ata_port *ap = qc->ap;
2124 struct ahci_port_priv *pp = ap->private_data;
2125 int is_atapi = ata_is_atapi(qc->tf.protocol);
2126 void *cmd_tbl;
2127 u32 opts;
2128 const u32 cmd_fis_len = 5; /* five dwords */
2129 unsigned int n_elem;
2132 * Fill in command table information. First, the header,
2133 * a SATA Register - Host to Device command FIS.
2135 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2137 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2138 if (is_atapi) {
2139 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2140 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2143 n_elem = 0;
2144 if (qc->flags & ATA_QCFLAG_DMAMAP)
2145 n_elem = ahci_fill_sg(qc, cmd_tbl);
2148 * Fill in command slot information.
2150 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2151 if (qc->tf.flags & ATA_TFLAG_WRITE)
2152 opts |= AHCI_CMD_WRITE;
2153 if (is_atapi)
2154 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2156 ahci_fill_cmd_slot(pp, qc->tag, opts);
2159 static void ahci_fbs_dec_intr(struct ata_port *ap)
2161 struct ahci_port_priv *pp = ap->private_data;
2162 void __iomem *port_mmio = ahci_port_base(ap);
2163 u32 fbs = readl(port_mmio + PORT_FBS);
2164 int retries = 3;
2166 DPRINTK("ENTER\n");
2167 BUG_ON(!pp->fbs_enabled);
2169 /* time to wait for DEC is not specified by AHCI spec,
2170 * add a retry loop for safety.
2172 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2173 fbs = readl(port_mmio + PORT_FBS);
2174 while ((fbs & PORT_FBS_DEC) && retries--) {
2175 udelay(1);
2176 fbs = readl(port_mmio + PORT_FBS);
2179 if (fbs & PORT_FBS_DEC)
2180 dev_printk(KERN_ERR, ap->host->dev,
2181 "failed to clear device error\n");
2184 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2186 struct ahci_host_priv *hpriv = ap->host->private_data;
2187 struct ahci_port_priv *pp = ap->private_data;
2188 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2189 struct ata_link *link = NULL;
2190 struct ata_queued_cmd *active_qc;
2191 struct ata_eh_info *active_ehi;
2192 bool fbs_need_dec = false;
2193 u32 serror;
2195 /* determine active link with error */
2196 if (pp->fbs_enabled) {
2197 void __iomem *port_mmio = ahci_port_base(ap);
2198 u32 fbs = readl(port_mmio + PORT_FBS);
2199 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2201 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2202 ata_link_online(&ap->pmp_link[pmp])) {
2203 link = &ap->pmp_link[pmp];
2204 fbs_need_dec = true;
2207 } else
2208 ata_for_each_link(link, ap, EDGE)
2209 if (ata_link_active(link))
2210 break;
2212 if (!link)
2213 link = &ap->link;
2215 active_qc = ata_qc_from_tag(ap, link->active_tag);
2216 active_ehi = &link->eh_info;
2218 /* record irq stat */
2219 ata_ehi_clear_desc(host_ehi);
2220 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2222 /* AHCI needs SError cleared; otherwise, it might lock up */
2223 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2224 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2225 host_ehi->serror |= serror;
2227 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2228 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2229 irq_stat &= ~PORT_IRQ_IF_ERR;
2231 if (irq_stat & PORT_IRQ_TF_ERR) {
2232 /* If qc is active, charge it; otherwise, the active
2233 * link. There's no active qc on NCQ errors. It will
2234 * be determined by EH by reading log page 10h.
2236 if (active_qc)
2237 active_qc->err_mask |= AC_ERR_DEV;
2238 else
2239 active_ehi->err_mask |= AC_ERR_DEV;
2241 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2242 host_ehi->serror &= ~SERR_INTERNAL;
2245 if (irq_stat & PORT_IRQ_UNK_FIS) {
2246 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2248 active_ehi->err_mask |= AC_ERR_HSM;
2249 active_ehi->action |= ATA_EH_RESET;
2250 ata_ehi_push_desc(active_ehi,
2251 "unknown FIS %08x %08x %08x %08x" ,
2252 unk[0], unk[1], unk[2], unk[3]);
2255 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2256 active_ehi->err_mask |= AC_ERR_HSM;
2257 active_ehi->action |= ATA_EH_RESET;
2258 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2261 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2262 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2263 host_ehi->action |= ATA_EH_RESET;
2264 ata_ehi_push_desc(host_ehi, "host bus error");
2267 if (irq_stat & PORT_IRQ_IF_ERR) {
2268 if (fbs_need_dec)
2269 active_ehi->err_mask |= AC_ERR_DEV;
2270 else {
2271 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2272 host_ehi->action |= ATA_EH_RESET;
2275 ata_ehi_push_desc(host_ehi, "interface fatal error");
2278 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2279 ata_ehi_hotplugged(host_ehi);
2280 ata_ehi_push_desc(host_ehi, "%s",
2281 irq_stat & PORT_IRQ_CONNECT ?
2282 "connection status changed" : "PHY RDY changed");
2285 /* okay, let's hand over to EH */
2287 if (irq_stat & PORT_IRQ_FREEZE)
2288 ata_port_freeze(ap);
2289 else if (fbs_need_dec) {
2290 ata_link_abort(link);
2291 ahci_fbs_dec_intr(ap);
2292 } else
2293 ata_port_abort(ap);
2296 static void ahci_port_intr(struct ata_port *ap)
2298 void __iomem *port_mmio = ahci_port_base(ap);
2299 struct ata_eh_info *ehi = &ap->link.eh_info;
2300 struct ahci_port_priv *pp = ap->private_data;
2301 struct ahci_host_priv *hpriv = ap->host->private_data;
2302 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2303 u32 status, qc_active = 0;
2304 int rc;
2306 status = readl(port_mmio + PORT_IRQ_STAT);
2307 writel(status, port_mmio + PORT_IRQ_STAT);
2309 /* ignore BAD_PMP while resetting */
2310 if (unlikely(resetting))
2311 status &= ~PORT_IRQ_BAD_PMP;
2313 /* If we are getting PhyRdy, this is
2314 * just a power state change, we should
2315 * clear out this, plus the PhyRdy/Comm
2316 * Wake bits from Serror
2318 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2319 (status & PORT_IRQ_PHYRDY)) {
2320 status &= ~PORT_IRQ_PHYRDY;
2321 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2324 if (unlikely(status & PORT_IRQ_ERROR)) {
2325 ahci_error_intr(ap, status);
2326 return;
2329 if (status & PORT_IRQ_SDB_FIS) {
2330 /* If SNotification is available, leave notification
2331 * handling to sata_async_notification(). If not,
2332 * emulate it by snooping SDB FIS RX area.
2334 * Snooping FIS RX area is probably cheaper than
2335 * poking SNotification but some constrollers which
2336 * implement SNotification, ICH9 for example, don't
2337 * store AN SDB FIS into receive area.
2339 if (hpriv->cap & HOST_CAP_SNTF)
2340 sata_async_notification(ap);
2341 else {
2342 /* If the 'N' bit in word 0 of the FIS is set,
2343 * we just received asynchronous notification.
2344 * Tell libata about it.
2346 * Lack of SNotification should not appear in
2347 * ahci 1.2, so the workaround is unnecessary
2348 * when FBS is enabled.
2350 if (pp->fbs_enabled)
2351 WARN_ON_ONCE(1);
2352 else {
2353 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2354 u32 f0 = le32_to_cpu(f[0]);
2355 if (f0 & (1 << 15))
2356 sata_async_notification(ap);
2361 /* pp->active_link is not reliable once FBS is enabled, both
2362 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2363 * NCQ and non-NCQ commands may be in flight at the same time.
2365 if (pp->fbs_enabled) {
2366 if (ap->qc_active) {
2367 qc_active = readl(port_mmio + PORT_SCR_ACT);
2368 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2370 } else {
2371 /* pp->active_link is valid iff any command is in flight */
2372 if (ap->qc_active && pp->active_link->sactive)
2373 qc_active = readl(port_mmio + PORT_SCR_ACT);
2374 else
2375 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2378 rc = ata_qc_complete_multiple(ap, qc_active);
2380 /* while resetting, invalid completions are expected */
2381 if (unlikely(rc < 0 && !resetting)) {
2382 ehi->err_mask |= AC_ERR_HSM;
2383 ehi->action |= ATA_EH_RESET;
2384 ata_port_freeze(ap);
2388 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2390 struct ata_host *host = dev_instance;
2391 struct ahci_host_priv *hpriv;
2392 unsigned int i, handled = 0;
2393 void __iomem *mmio;
2394 u32 irq_stat, irq_masked;
2396 VPRINTK("ENTER\n");
2398 hpriv = host->private_data;
2399 mmio = hpriv->mmio;
2401 /* sigh. 0xffffffff is a valid return from h/w */
2402 irq_stat = readl(mmio + HOST_IRQ_STAT);
2403 if (!irq_stat)
2404 return IRQ_NONE;
2406 irq_masked = irq_stat & hpriv->port_map;
2408 spin_lock(&host->lock);
2410 for (i = 0; i < host->n_ports; i++) {
2411 struct ata_port *ap;
2413 if (!(irq_masked & (1 << i)))
2414 continue;
2416 ap = host->ports[i];
2417 if (ap) {
2418 ahci_port_intr(ap);
2419 VPRINTK("port %u\n", i);
2420 } else {
2421 VPRINTK("port %u (no irq)\n", i);
2422 if (ata_ratelimit())
2423 dev_printk(KERN_WARNING, host->dev,
2424 "interrupt on disabled port %u\n", i);
2427 handled = 1;
2430 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2431 * it should be cleared after all the port events are cleared;
2432 * otherwise, it will raise a spurious interrupt after each
2433 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2434 * information.
2436 * Also, use the unmasked value to clear interrupt as spurious
2437 * pending event on a dummy port might cause screaming IRQ.
2439 writel(irq_stat, mmio + HOST_IRQ_STAT);
2441 spin_unlock(&host->lock);
2443 VPRINTK("EXIT\n");
2445 return IRQ_RETVAL(handled);
2448 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2450 struct ata_port *ap = qc->ap;
2451 void __iomem *port_mmio = ahci_port_base(ap);
2452 struct ahci_port_priv *pp = ap->private_data;
2454 /* Keep track of the currently active link. It will be used
2455 * in completion path to determine whether NCQ phase is in
2456 * progress.
2458 pp->active_link = qc->dev->link;
2460 if (qc->tf.protocol == ATA_PROT_NCQ)
2461 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2463 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2464 u32 fbs = readl(port_mmio + PORT_FBS);
2465 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2466 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2467 writel(fbs, port_mmio + PORT_FBS);
2468 pp->fbs_last_dev = qc->dev->link->pmp;
2471 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2473 ahci_sw_activity(qc->dev->link);
2475 return 0;
2478 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2480 struct ahci_port_priv *pp = qc->ap->private_data;
2481 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2483 if (pp->fbs_enabled)
2484 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2486 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2487 return true;
2490 static void ahci_freeze(struct ata_port *ap)
2492 void __iomem *port_mmio = ahci_port_base(ap);
2494 /* turn IRQ off */
2495 writel(0, port_mmio + PORT_IRQ_MASK);
2498 static void ahci_thaw(struct ata_port *ap)
2500 struct ahci_host_priv *hpriv = ap->host->private_data;
2501 void __iomem *mmio = hpriv->mmio;
2502 void __iomem *port_mmio = ahci_port_base(ap);
2503 u32 tmp;
2504 struct ahci_port_priv *pp = ap->private_data;
2506 /* clear IRQ */
2507 tmp = readl(port_mmio + PORT_IRQ_STAT);
2508 writel(tmp, port_mmio + PORT_IRQ_STAT);
2509 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2511 /* turn IRQ back on */
2512 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2515 static void ahci_error_handler(struct ata_port *ap)
2517 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2518 /* restart engine */
2519 ahci_stop_engine(ap);
2520 ahci_start_engine(ap);
2523 sata_pmp_error_handler(ap);
2526 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2528 struct ata_port *ap = qc->ap;
2530 /* make DMA engine forget about the failed command */
2531 if (qc->flags & ATA_QCFLAG_FAILED)
2532 ahci_kick_engine(ap);
2535 static void ahci_enable_fbs(struct ata_port *ap)
2537 struct ahci_port_priv *pp = ap->private_data;
2538 void __iomem *port_mmio = ahci_port_base(ap);
2539 u32 fbs;
2540 int rc;
2542 if (!pp->fbs_supported)
2543 return;
2545 fbs = readl(port_mmio + PORT_FBS);
2546 if (fbs & PORT_FBS_EN) {
2547 pp->fbs_enabled = true;
2548 pp->fbs_last_dev = -1; /* initialization */
2549 return;
2552 rc = ahci_stop_engine(ap);
2553 if (rc)
2554 return;
2556 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2557 fbs = readl(port_mmio + PORT_FBS);
2558 if (fbs & PORT_FBS_EN) {
2559 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2560 pp->fbs_enabled = true;
2561 pp->fbs_last_dev = -1; /* initialization */
2562 } else
2563 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2565 ahci_start_engine(ap);
2568 static void ahci_disable_fbs(struct ata_port *ap)
2570 struct ahci_port_priv *pp = ap->private_data;
2571 void __iomem *port_mmio = ahci_port_base(ap);
2572 u32 fbs;
2573 int rc;
2575 if (!pp->fbs_supported)
2576 return;
2578 fbs = readl(port_mmio + PORT_FBS);
2579 if ((fbs & PORT_FBS_EN) == 0) {
2580 pp->fbs_enabled = false;
2581 return;
2584 rc = ahci_stop_engine(ap);
2585 if (rc)
2586 return;
2588 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2589 fbs = readl(port_mmio + PORT_FBS);
2590 if (fbs & PORT_FBS_EN)
2591 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2592 else {
2593 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2594 pp->fbs_enabled = false;
2597 ahci_start_engine(ap);
2600 static void ahci_pmp_attach(struct ata_port *ap)
2602 void __iomem *port_mmio = ahci_port_base(ap);
2603 struct ahci_port_priv *pp = ap->private_data;
2604 u32 cmd;
2606 cmd = readl(port_mmio + PORT_CMD);
2607 cmd |= PORT_CMD_PMP;
2608 writel(cmd, port_mmio + PORT_CMD);
2610 ahci_enable_fbs(ap);
2612 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2613 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2616 static void ahci_pmp_detach(struct ata_port *ap)
2618 void __iomem *port_mmio = ahci_port_base(ap);
2619 struct ahci_port_priv *pp = ap->private_data;
2620 u32 cmd;
2622 ahci_disable_fbs(ap);
2624 cmd = readl(port_mmio + PORT_CMD);
2625 cmd &= ~PORT_CMD_PMP;
2626 writel(cmd, port_mmio + PORT_CMD);
2628 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2629 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2632 static int ahci_port_resume(struct ata_port *ap)
2634 ahci_power_up(ap);
2635 ahci_start_port(ap);
2637 if (sata_pmp_attached(ap))
2638 ahci_pmp_attach(ap);
2639 else
2640 ahci_pmp_detach(ap);
2642 return 0;
2645 #ifdef CONFIG_PM
2646 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2648 const char *emsg = NULL;
2649 int rc;
2651 rc = ahci_deinit_port(ap, &emsg);
2652 if (rc == 0)
2653 ahci_power_down(ap);
2654 else {
2655 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2656 ahci_start_port(ap);
2659 return rc;
2662 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2664 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2665 struct ahci_host_priv *hpriv = host->private_data;
2666 void __iomem *mmio = hpriv->mmio;
2667 u32 ctl;
2669 if (mesg.event & PM_EVENT_SUSPEND &&
2670 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2671 dev_printk(KERN_ERR, &pdev->dev,
2672 "BIOS update required for suspend/resume\n");
2673 return -EIO;
2676 if (mesg.event & PM_EVENT_SLEEP) {
2677 /* AHCI spec rev1.1 section 8.3.3:
2678 * Software must disable interrupts prior to requesting a
2679 * transition of the HBA to D3 state.
2681 ctl = readl(mmio + HOST_CTL);
2682 ctl &= ~HOST_IRQ_EN;
2683 writel(ctl, mmio + HOST_CTL);
2684 readl(mmio + HOST_CTL); /* flush */
2687 return ata_pci_device_suspend(pdev, mesg);
2690 static int ahci_pci_device_resume(struct pci_dev *pdev)
2692 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2693 int rc;
2695 rc = ata_pci_device_do_resume(pdev);
2696 if (rc)
2697 return rc;
2699 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2700 rc = ahci_reset_controller(host);
2701 if (rc)
2702 return rc;
2704 ahci_init_controller(host);
2707 ata_host_resume(host);
2709 return 0;
2711 #endif
2713 static int ahci_port_start(struct ata_port *ap)
2715 struct ahci_host_priv *hpriv = ap->host->private_data;
2716 struct device *dev = ap->host->dev;
2717 struct ahci_port_priv *pp;
2718 void *mem;
2719 dma_addr_t mem_dma;
2720 size_t dma_sz, rx_fis_sz;
2722 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2723 if (!pp)
2724 return -ENOMEM;
2726 /* check FBS capability */
2727 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2728 void __iomem *port_mmio = ahci_port_base(ap);
2729 u32 cmd = readl(port_mmio + PORT_CMD);
2730 if (cmd & PORT_CMD_FBSCP)
2731 pp->fbs_supported = true;
2732 else
2733 dev_printk(KERN_WARNING, dev,
2734 "The port is not capable of FBS\n");
2737 if (pp->fbs_supported) {
2738 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2739 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2740 } else {
2741 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2742 rx_fis_sz = AHCI_RX_FIS_SZ;
2745 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2746 if (!mem)
2747 return -ENOMEM;
2748 memset(mem, 0, dma_sz);
2751 * First item in chunk of DMA memory: 32-slot command table,
2752 * 32 bytes each in size
2754 pp->cmd_slot = mem;
2755 pp->cmd_slot_dma = mem_dma;
2757 mem += AHCI_CMD_SLOT_SZ;
2758 mem_dma += AHCI_CMD_SLOT_SZ;
2761 * Second item: Received-FIS area
2763 pp->rx_fis = mem;
2764 pp->rx_fis_dma = mem_dma;
2766 mem += rx_fis_sz;
2767 mem_dma += rx_fis_sz;
2770 * Third item: data area for storing a single command
2771 * and its scatter-gather table
2773 pp->cmd_tbl = mem;
2774 pp->cmd_tbl_dma = mem_dma;
2777 * Save off initial list of interrupts to be enabled.
2778 * This could be changed later
2780 pp->intr_mask = DEF_PORT_IRQ;
2782 ap->private_data = pp;
2784 /* engage engines, captain */
2785 return ahci_port_resume(ap);
2788 static void ahci_port_stop(struct ata_port *ap)
2790 const char *emsg = NULL;
2791 int rc;
2793 /* de-initialize port */
2794 rc = ahci_deinit_port(ap, &emsg);
2795 if (rc)
2796 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2799 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2801 int rc;
2803 if (using_dac &&
2804 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2805 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2806 if (rc) {
2807 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2808 if (rc) {
2809 dev_printk(KERN_ERR, &pdev->dev,
2810 "64-bit DMA enable failed\n");
2811 return rc;
2814 } else {
2815 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2816 if (rc) {
2817 dev_printk(KERN_ERR, &pdev->dev,
2818 "32-bit DMA enable failed\n");
2819 return rc;
2821 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2822 if (rc) {
2823 dev_printk(KERN_ERR, &pdev->dev,
2824 "32-bit consistent DMA enable failed\n");
2825 return rc;
2828 return 0;
2831 static void ahci_print_info(struct ata_host *host)
2833 struct ahci_host_priv *hpriv = host->private_data;
2834 struct pci_dev *pdev = to_pci_dev(host->dev);
2835 void __iomem *mmio = hpriv->mmio;
2836 u32 vers, cap, cap2, impl, speed;
2837 const char *speed_s;
2838 u16 cc;
2839 const char *scc_s;
2841 vers = readl(mmio + HOST_VERSION);
2842 cap = hpriv->cap;
2843 cap2 = hpriv->cap2;
2844 impl = hpriv->port_map;
2846 speed = (cap >> 20) & 0xf;
2847 if (speed == 1)
2848 speed_s = "1.5";
2849 else if (speed == 2)
2850 speed_s = "3";
2851 else if (speed == 3)
2852 speed_s = "6";
2853 else
2854 speed_s = "?";
2856 pci_read_config_word(pdev, 0x0a, &cc);
2857 if (cc == PCI_CLASS_STORAGE_IDE)
2858 scc_s = "IDE";
2859 else if (cc == PCI_CLASS_STORAGE_SATA)
2860 scc_s = "SATA";
2861 else if (cc == PCI_CLASS_STORAGE_RAID)
2862 scc_s = "RAID";
2863 else
2864 scc_s = "unknown";
2866 dev_printk(KERN_INFO, &pdev->dev,
2867 "AHCI %02x%02x.%02x%02x "
2868 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2871 (vers >> 24) & 0xff,
2872 (vers >> 16) & 0xff,
2873 (vers >> 8) & 0xff,
2874 vers & 0xff,
2876 ((cap >> 8) & 0x1f) + 1,
2877 (cap & 0x1f) + 1,
2878 speed_s,
2879 impl,
2880 scc_s);
2882 dev_printk(KERN_INFO, &pdev->dev,
2883 "flags: "
2884 "%s%s%s%s%s%s%s"
2885 "%s%s%s%s%s%s%s"
2886 "%s%s%s%s%s%s\n"
2889 cap & HOST_CAP_64 ? "64bit " : "",
2890 cap & HOST_CAP_NCQ ? "ncq " : "",
2891 cap & HOST_CAP_SNTF ? "sntf " : "",
2892 cap & HOST_CAP_MPS ? "ilck " : "",
2893 cap & HOST_CAP_SSS ? "stag " : "",
2894 cap & HOST_CAP_ALPM ? "pm " : "",
2895 cap & HOST_CAP_LED ? "led " : "",
2896 cap & HOST_CAP_CLO ? "clo " : "",
2897 cap & HOST_CAP_ONLY ? "only " : "",
2898 cap & HOST_CAP_PMP ? "pmp " : "",
2899 cap & HOST_CAP_FBS ? "fbs " : "",
2900 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2901 cap & HOST_CAP_SSC ? "slum " : "",
2902 cap & HOST_CAP_PART ? "part " : "",
2903 cap & HOST_CAP_CCC ? "ccc " : "",
2904 cap & HOST_CAP_EMS ? "ems " : "",
2905 cap & HOST_CAP_SXS ? "sxs " : "",
2906 cap2 & HOST_CAP2_APST ? "apst " : "",
2907 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2908 cap2 & HOST_CAP2_BOH ? "boh " : ""
2912 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2913 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2914 * support PMP and the 4726 either directly exports the device
2915 * attached to the first downstream port or acts as a hardware storage
2916 * controller and emulate a single ATA device (can be RAID 0/1 or some
2917 * other configuration).
2919 * When there's no device attached to the first downstream port of the
2920 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2921 * configure the 4726. However, ATA emulation of the device is very
2922 * lame. It doesn't send signature D2H Reg FIS after the initial
2923 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2925 * The following function works around the problem by always using
2926 * hardreset on the port and not depending on receiving signature FIS
2927 * afterward. If signature FIS isn't received soon, ATA class is
2928 * assumed without follow-up softreset.
2930 static void ahci_p5wdh_workaround(struct ata_host *host)
2932 static struct dmi_system_id sysids[] = {
2934 .ident = "P5W DH Deluxe",
2935 .matches = {
2936 DMI_MATCH(DMI_SYS_VENDOR,
2937 "ASUSTEK COMPUTER INC"),
2938 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2943 struct pci_dev *pdev = to_pci_dev(host->dev);
2945 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2946 dmi_check_system(sysids)) {
2947 struct ata_port *ap = host->ports[1];
2949 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2950 "Deluxe on-board SIMG4726 workaround\n");
2952 ap->ops = &ahci_p5wdh_ops;
2953 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2957 /* only some SB600 ahci controllers can do 64bit DMA */
2958 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2960 static const struct dmi_system_id sysids[] = {
2962 * The oldest version known to be broken is 0901 and
2963 * working is 1501 which was released on 2007-10-26.
2964 * Enable 64bit DMA on 1501 and anything newer.
2966 * Please read bko#9412 for more info.
2969 .ident = "ASUS M2A-VM",
2970 .matches = {
2971 DMI_MATCH(DMI_BOARD_VENDOR,
2972 "ASUSTeK Computer INC."),
2973 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2975 .driver_data = "20071026", /* yyyymmdd */
2978 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2979 * support 64bit DMA.
2981 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2982 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2983 * This spelling mistake was fixed in BIOS version 1.5, so
2984 * 1.5 and later have the Manufacturer as
2985 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2986 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2988 * BIOS versions earlier than 1.9 had a Board Product Name
2989 * DMI field of "MS-7376". This was changed to be
2990 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2991 * match on DMI_BOARD_NAME of "MS-7376".
2994 .ident = "MSI K9A2 Platinum",
2995 .matches = {
2996 DMI_MATCH(DMI_BOARD_VENDOR,
2997 "MICRO-STAR INTER"),
2998 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
3003 const struct dmi_system_id *match;
3004 int year, month, date;
3005 char buf[9];
3007 match = dmi_first_match(sysids);
3008 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
3009 !match)
3010 return false;
3012 if (!match->driver_data)
3013 goto enable_64bit;
3015 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3016 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3018 if (strcmp(buf, match->driver_data) >= 0)
3019 goto enable_64bit;
3020 else {
3021 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
3022 "forcing 32bit DMA, update BIOS\n", match->ident);
3023 return false;
3026 enable_64bit:
3027 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
3028 match->ident);
3029 return true;
3032 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
3034 static const struct dmi_system_id broken_systems[] = {
3036 .ident = "HP Compaq nx6310",
3037 .matches = {
3038 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3039 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
3041 /* PCI slot number of the controller */
3042 .driver_data = (void *)0x1FUL,
3045 .ident = "HP Compaq 6720s",
3046 .matches = {
3047 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3048 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
3050 /* PCI slot number of the controller */
3051 .driver_data = (void *)0x1FUL,
3054 { } /* terminate list */
3056 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
3058 if (dmi) {
3059 unsigned long slot = (unsigned long)dmi->driver_data;
3060 /* apply the quirk only to on-board controllers */
3061 return slot == PCI_SLOT(pdev->devfn);
3064 return false;
3067 static bool ahci_broken_suspend(struct pci_dev *pdev)
3069 static const struct dmi_system_id sysids[] = {
3071 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
3072 * to the harddisk doesn't become online after
3073 * resuming from STR. Warn and fail suspend.
3075 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
3077 * Use dates instead of versions to match as HP is
3078 * apparently recycling both product and version
3079 * strings.
3081 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
3084 .ident = "dv4",
3085 .matches = {
3086 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3087 DMI_MATCH(DMI_PRODUCT_NAME,
3088 "HP Pavilion dv4 Notebook PC"),
3090 .driver_data = "20090105", /* F.30 */
3093 .ident = "dv5",
3094 .matches = {
3095 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3096 DMI_MATCH(DMI_PRODUCT_NAME,
3097 "HP Pavilion dv5 Notebook PC"),
3099 .driver_data = "20090506", /* F.16 */
3102 .ident = "dv6",
3103 .matches = {
3104 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3105 DMI_MATCH(DMI_PRODUCT_NAME,
3106 "HP Pavilion dv6 Notebook PC"),
3108 .driver_data = "20090423", /* F.21 */
3111 .ident = "HDX18",
3112 .matches = {
3113 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3114 DMI_MATCH(DMI_PRODUCT_NAME,
3115 "HP HDX18 Notebook PC"),
3117 .driver_data = "20090430", /* F.23 */
3120 * Acer eMachines G725 has the same problem. BIOS
3121 * V1.03 is known to be broken. V3.04 is known to
3122 * work. Inbetween, there are V1.06, V2.06 and V3.03
3123 * that we don't have much idea about. For now,
3124 * blacklist anything older than V3.04.
3126 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
3129 .ident = "G725",
3130 .matches = {
3131 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
3132 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
3134 .driver_data = "20091216", /* V3.04 */
3136 { } /* terminate list */
3138 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3139 int year, month, date;
3140 char buf[9];
3142 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
3143 return false;
3145 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3146 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3148 return strcmp(buf, dmi->driver_data) < 0;
3151 static bool ahci_broken_online(struct pci_dev *pdev)
3153 #define ENCODE_BUSDEVFN(bus, slot, func) \
3154 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
3155 static const struct dmi_system_id sysids[] = {
3157 * There are several gigabyte boards which use
3158 * SIMG5723s configured as hardware RAID. Certain
3159 * 5723 firmware revisions shipped there keep the link
3160 * online but fail to answer properly to SRST or
3161 * IDENTIFY when no device is attached downstream
3162 * causing libata to retry quite a few times leading
3163 * to excessive detection delay.
3165 * As these firmwares respond to the second reset try
3166 * with invalid device signature, considering unknown
3167 * sig as offline works around the problem acceptably.
3170 .ident = "EP45-DQ6",
3171 .matches = {
3172 DMI_MATCH(DMI_BOARD_VENDOR,
3173 "Gigabyte Technology Co., Ltd."),
3174 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
3176 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
3179 .ident = "EP45-DS5",
3180 .matches = {
3181 DMI_MATCH(DMI_BOARD_VENDOR,
3182 "Gigabyte Technology Co., Ltd."),
3183 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
3185 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
3187 { } /* terminate list */
3189 #undef ENCODE_BUSDEVFN
3190 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3191 unsigned int val;
3193 if (!dmi)
3194 return false;
3196 val = (unsigned long)dmi->driver_data;
3198 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
3201 #ifdef CONFIG_ATA_ACPI
3202 static void ahci_gtf_filter_workaround(struct ata_host *host)
3204 static const struct dmi_system_id sysids[] = {
3206 * Aspire 3810T issues a bunch of SATA enable commands
3207 * via _GTF including an invalid one and one which is
3208 * rejected by the device. Among the successful ones
3209 * is FPDMA non-zero offset enable which when enabled
3210 * only on the drive side leads to NCQ command
3211 * failures. Filter it out.
3214 .ident = "Aspire 3810T",
3215 .matches = {
3216 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3217 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
3219 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
3223 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3224 unsigned int filter;
3225 int i;
3227 if (!dmi)
3228 return;
3230 filter = (unsigned long)dmi->driver_data;
3231 dev_printk(KERN_INFO, host->dev,
3232 "applying extra ACPI _GTF filter 0x%x for %s\n",
3233 filter, dmi->ident);
3235 for (i = 0; i < host->n_ports; i++) {
3236 struct ata_port *ap = host->ports[i];
3237 struct ata_link *link;
3238 struct ata_device *dev;
3240 ata_for_each_link(link, ap, EDGE)
3241 ata_for_each_dev(dev, link, ALL)
3242 dev->gtf_filter |= filter;
3245 #else
3246 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
3248 #endif
3250 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3252 static int printed_version;
3253 unsigned int board_id = ent->driver_data;
3254 struct ata_port_info pi = ahci_port_info[board_id];
3255 const struct ata_port_info *ppi[] = { &pi, NULL };
3256 struct device *dev = &pdev->dev;
3257 struct ahci_host_priv *hpriv;
3258 struct ata_host *host;
3259 int n_ports, i, rc;
3261 VPRINTK("ENTER\n");
3263 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3265 if (!printed_version++)
3266 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3268 /* The AHCI driver can only drive the SATA ports, the PATA driver
3269 can drive them all so if both drivers are selected make sure
3270 AHCI stays out of the way */
3271 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3272 return -ENODEV;
3274 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3275 * At the moment, we can only use the AHCI mode. Let the users know
3276 * that for SAS drives they're out of luck.
3278 if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
3279 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
3280 "can only drive SATA devices with this driver\n");
3282 /* acquire resources */
3283 rc = pcim_enable_device(pdev);
3284 if (rc)
3285 return rc;
3287 /* AHCI controllers often implement SFF compatible interface.
3288 * Grab all PCI BARs just in case.
3290 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3291 if (rc == -EBUSY)
3292 pcim_pin_device(pdev);
3293 if (rc)
3294 return rc;
3296 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3297 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3298 u8 map;
3300 /* ICH6s share the same PCI ID for both piix and ahci
3301 * modes. Enabling ahci mode while MAP indicates
3302 * combined mode is a bad idea. Yield to ata_piix.
3304 pci_read_config_byte(pdev, ICH_MAP, &map);
3305 if (map & 0x3) {
3306 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3307 "combined mode, can't enable AHCI mode\n");
3308 return -ENODEV;
3312 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3313 if (!hpriv)
3314 return -ENOMEM;
3315 hpriv->flags |= (unsigned long)pi.private_data;
3317 /* MCP65 revision A1 and A2 can't do MSI */
3318 if (board_id == board_ahci_mcp65 &&
3319 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3320 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3322 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3323 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3324 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3326 /* only some SB600s can do 64bit DMA */
3327 if (ahci_sb600_enable_64bit(pdev))
3328 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3330 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3331 pci_intx(pdev, 1);
3333 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3335 /* save initial config */
3336 ahci_pci_save_initial_config(pdev, hpriv);
3338 /* prepare host */
3339 if (hpriv->cap & HOST_CAP_NCQ) {
3340 pi.flags |= ATA_FLAG_NCQ;
3341 /* Auto-activate optimization is supposed to be supported on
3342 all AHCI controllers indicating NCQ support, but it seems
3343 to be broken at least on some NVIDIA MCP79 chipsets.
3344 Until we get info on which NVIDIA chipsets don't have this
3345 issue, if any, disable AA on all NVIDIA AHCIs. */
3346 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
3347 pi.flags |= ATA_FLAG_FPDMA_AA;
3350 if (hpriv->cap & HOST_CAP_PMP)
3351 pi.flags |= ATA_FLAG_PMP;
3353 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3354 u8 messages;
3355 void __iomem *mmio = hpriv->mmio;
3356 u32 em_loc = readl(mmio + HOST_EM_LOC);
3357 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3359 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3361 /* we only support LED message type right now */
3362 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3363 /* store em_loc */
3364 hpriv->em_loc = ((em_loc >> 16) * 4);
3365 pi.flags |= ATA_FLAG_EM;
3366 if (!(em_ctl & EM_CTL_ALHD))
3367 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3371 if (ahci_broken_system_poweroff(pdev)) {
3372 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3373 dev_info(&pdev->dev,
3374 "quirky BIOS, skipping spindown on poweroff\n");
3377 if (ahci_broken_suspend(pdev)) {
3378 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3379 dev_printk(KERN_WARNING, &pdev->dev,
3380 "BIOS update required for suspend/resume\n");
3383 if (ahci_broken_online(pdev)) {
3384 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3385 dev_info(&pdev->dev,
3386 "online status unreliable, applying workaround\n");
3389 /* CAP.NP sometimes indicate the index of the last enabled
3390 * port, at other times, that of the last possible port, so
3391 * determining the maximum port number requires looking at
3392 * both CAP.NP and port_map.
3394 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3396 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3397 if (!host)
3398 return -ENOMEM;
3399 host->private_data = hpriv;
3401 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3402 host->flags |= ATA_HOST_PARALLEL_SCAN;
3403 else
3404 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3406 if (pi.flags & ATA_FLAG_EM)
3407 ahci_reset_em(host);
3409 for (i = 0; i < host->n_ports; i++) {
3410 struct ata_port *ap = host->ports[i];
3412 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3413 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3414 0x100 + ap->port_no * 0x80, "port");
3416 /* set initial link pm policy */
3417 ap->pm_policy = NOT_AVAILABLE;
3419 /* set enclosure management message type */
3420 if (ap->flags & ATA_FLAG_EM)
3421 ap->em_message_type = ahci_em_messages;
3424 /* disabled/not-implemented port */
3425 if (!(hpriv->port_map & (1 << i)))
3426 ap->ops = &ata_dummy_port_ops;
3429 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3430 ahci_p5wdh_workaround(host);
3432 /* apply gtf filter quirk */
3433 ahci_gtf_filter_workaround(host);
3435 /* initialize adapter */
3436 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3437 if (rc)
3438 return rc;
3440 rc = ahci_reset_controller(host);
3441 if (rc)
3442 return rc;
3444 ahci_init_controller(host);
3445 ahci_print_info(host);
3447 pci_set_master(pdev);
3448 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3449 &ahci_sht);
3452 static int __init ahci_init(void)
3454 return pci_register_driver(&ahci_pci_driver);
3457 static void __exit ahci_exit(void)
3459 pci_unregister_driver(&ahci_pci_driver);
3463 MODULE_AUTHOR("Jeff Garzik");
3464 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3465 MODULE_LICENSE("GPL");
3466 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3467 MODULE_VERSION(DRV_VERSION);
3469 module_init(ahci_init);
3470 module_exit(ahci_exit);