1 #include <linux/kernel.h>
5 int generic_ide_suspend(struct device
*dev
, pm_message_t mesg
)
7 ide_drive_t
*drive
= dev_get_drvdata(dev
);
8 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
9 ide_hwif_t
*hwif
= drive
->hwif
;
11 struct request_pm_state rqpm
;
14 if (ide_port_acpi(hwif
)) {
15 /* call ACPI _GTM only once */
16 if ((drive
->dn
& 1) == 0 || pair
== NULL
)
17 ide_acpi_get_timing(hwif
);
20 memset(&rqpm
, 0, sizeof(rqpm
));
21 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
22 rq
->cmd_type
= REQ_TYPE_PM_SUSPEND
;
24 rqpm
.pm_step
= IDE_PM_START_SUSPEND
;
25 if (mesg
.event
== PM_EVENT_PRETHAW
)
26 mesg
.event
= PM_EVENT_FREEZE
;
27 rqpm
.pm_state
= mesg
.event
;
29 ret
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
32 if (ret
== 0 && ide_port_acpi(hwif
)) {
33 /* call ACPI _PS3 only after both devices are suspended */
34 if ((drive
->dn
& 1) || pair
== NULL
)
35 ide_acpi_set_state(hwif
, 0);
41 int generic_ide_resume(struct device
*dev
)
43 ide_drive_t
*drive
= dev_get_drvdata(dev
);
44 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
45 ide_hwif_t
*hwif
= drive
->hwif
;
47 struct request_pm_state rqpm
;
50 if (ide_port_acpi(hwif
)) {
51 /* call ACPI _PS0 / _STM only once */
52 if ((drive
->dn
& 1) == 0 || pair
== NULL
) {
53 ide_acpi_set_state(hwif
, 1);
54 ide_acpi_push_timing(hwif
);
57 ide_acpi_exec_tfs(drive
);
60 memset(&rqpm
, 0, sizeof(rqpm
));
61 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
62 rq
->cmd_type
= REQ_TYPE_PM_RESUME
;
63 rq
->cmd_flags
|= REQ_PREEMPT
;
65 rqpm
.pm_step
= IDE_PM_START_RESUME
;
66 rqpm
.pm_state
= PM_EVENT_ON
;
68 err
= blk_execute_rq(drive
->queue
, NULL
, rq
, 1);
71 if (err
== 0 && dev
->driver
) {
72 struct ide_driver
*drv
= to_ide_driver(dev
->driver
);
81 void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
)
83 struct request_pm_state
*pm
= rq
->special
;
86 printk(KERN_INFO
"%s: complete_power_step(step: %d)\n",
87 drive
->name
, pm
->pm_step
);
89 if (drive
->media
!= ide_disk
)
92 switch (pm
->pm_step
) {
93 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
94 if (pm
->pm_state
== PM_EVENT_FREEZE
)
95 pm
->pm_step
= IDE_PM_COMPLETED
;
97 pm
->pm_step
= IDE_PM_STANDBY
;
99 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
100 pm
->pm_step
= IDE_PM_COMPLETED
;
102 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
103 pm
->pm_step
= IDE_PM_IDLE
;
105 case IDE_PM_IDLE
: /* Resume step 2 (idle)*/
106 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
111 ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
113 struct request_pm_state
*pm
= rq
->special
;
114 struct ide_cmd cmd
= { };
116 switch (pm
->pm_step
) {
117 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
118 if (drive
->media
!= ide_disk
)
120 /* Not supported? Switch to next step now. */
121 if (ata_id_flush_enabled(drive
->id
) == 0 ||
122 (drive
->dev_flags
& IDE_DFLAG_WCACHE
) == 0) {
123 ide_complete_power_step(drive
, rq
);
126 if (ata_id_flush_ext_enabled(drive
->id
))
127 cmd
.tf
.command
= ATA_CMD_FLUSH_EXT
;
129 cmd
.tf
.command
= ATA_CMD_FLUSH
;
131 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
132 cmd
.tf
.command
= ATA_CMD_STANDBYNOW1
;
134 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
135 ide_set_max_pio(drive
);
137 * skip IDE_PM_IDLE for ATAPI devices
139 if (drive
->media
!= ide_disk
)
140 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
142 ide_complete_power_step(drive
, rq
);
144 case IDE_PM_IDLE
: /* Resume step 2 (idle) */
145 cmd
.tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
147 case IDE_PM_RESTORE_DMA
: /* Resume step 3 (restore DMA) */
149 * Right now, all we do is call ide_set_dma(drive),
150 * we could be smarter and check for current xfer_speed
151 * in struct drive etc...
153 if (drive
->hwif
->dma_ops
== NULL
)
156 * TODO: respect IDE_DFLAG_USING_DMA
162 pm
->pm_step
= IDE_PM_COMPLETED
;
167 cmd
.valid
.out
.tf
= IDE_VALID_OUT_TF
| IDE_VALID_DEVICE
;
168 cmd
.valid
.in
.tf
= IDE_VALID_IN_TF
| IDE_VALID_DEVICE
;
169 cmd
.protocol
= ATA_PROT_NODATA
;
171 return do_rw_taskfile(drive
, &cmd
);
175 * ide_complete_pm_rq - end the current Power Management request
176 * @drive: target drive
179 * This function cleans up the current PM request and stops the queue
182 void ide_complete_pm_rq(ide_drive_t
*drive
, struct request
*rq
)
184 struct request_queue
*q
= drive
->queue
;
185 struct request_pm_state
*pm
= rq
->special
;
188 ide_complete_power_step(drive
, rq
);
189 if (pm
->pm_step
!= IDE_PM_COMPLETED
)
193 printk("%s: completing PM request, %s\n", drive
->name
,
194 (rq
->cmd_type
== REQ_TYPE_PM_SUSPEND
) ? "suspend" : "resume");
196 spin_lock_irqsave(q
->queue_lock
, flags
);
197 if (rq
->cmd_type
== REQ_TYPE_PM_SUSPEND
)
200 drive
->dev_flags
&= ~IDE_DFLAG_BLOCKED
;
201 spin_unlock_irqrestore(q
->queue_lock
, flags
);
203 drive
->hwif
->rq
= NULL
;
205 if (blk_end_request(rq
, 0, 0))
209 void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
211 struct request_pm_state
*pm
= rq
->special
;
213 if (rq
->cmd_type
== REQ_TYPE_PM_SUSPEND
&&
214 pm
->pm_step
== IDE_PM_START_SUSPEND
)
215 /* Mark drive blocked when starting the suspend sequence. */
216 drive
->dev_flags
|= IDE_DFLAG_BLOCKED
;
217 else if (rq
->cmd_type
== REQ_TYPE_PM_RESUME
&&
218 pm
->pm_step
== IDE_PM_START_RESUME
) {
220 * The first thing we do on wakeup is to wait for BSY bit to
221 * go away (with a looong timeout) as a drive on this hwif may
222 * just be POSTing itself.
223 * We do that before even selecting as the "other" device on
224 * the bus may be broken enough to walk on our toes at this
227 ide_hwif_t
*hwif
= drive
->hwif
;
228 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
229 struct request_queue
*q
= drive
->queue
;
233 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
235 rc
= ide_wait_not_busy(hwif
, 35000);
237 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
238 tp_ops
->dev_select(drive
);
239 tp_ops
->write_devctl(hwif
, ATA_DEVCTL_OBS
);
240 rc
= ide_wait_not_busy(hwif
, 100000);
242 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
244 spin_lock_irqsave(q
->queue_lock
, flags
);
246 spin_unlock_irqrestore(q
->queue_lock
, flags
);