1 #include <linux/kernel.h>
4 int generic_ide_suspend(struct device
*dev
, pm_message_t mesg
)
6 ide_drive_t
*drive
= dev_get_drvdata(dev
);
7 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
8 ide_hwif_t
*hwif
= drive
->hwif
;
10 struct request_pm_state rqpm
;
13 if (ide_port_acpi(hwif
)) {
14 /* call ACPI _GTM only once */
15 if ((drive
->dn
& 1) == 0 || pair
== NULL
)
16 ide_acpi_get_timing(hwif
);
19 memset(&rqpm
, 0, sizeof(rqpm
));
20 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
21 rq
->cmd_type
= REQ_TYPE_PM_SUSPEND
;
23 rqpm
.pm_step
= IDE_PM_START_SUSPEND
;
24 if (mesg
.event
== PM_EVENT_PRETHAW
)
25 mesg
.event
= PM_EVENT_FREEZE
;
26 rqpm
.pm_state
= mesg
.event
;
28 ret
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
31 if (ret
== 0 && ide_port_acpi(hwif
)) {
32 /* call ACPI _PS3 only after both devices are suspended */
33 if ((drive
->dn
& 1) || pair
== NULL
)
34 ide_acpi_set_state(hwif
, 0);
40 int generic_ide_resume(struct device
*dev
)
42 ide_drive_t
*drive
= dev_get_drvdata(dev
);
43 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
44 ide_hwif_t
*hwif
= drive
->hwif
;
46 struct request_pm_state rqpm
;
49 if (ide_port_acpi(hwif
)) {
50 /* call ACPI _PS0 / _STM only once */
51 if ((drive
->dn
& 1) == 0 || pair
== NULL
) {
52 ide_acpi_set_state(hwif
, 1);
53 ide_acpi_push_timing(hwif
);
56 ide_acpi_exec_tfs(drive
);
59 memset(&rqpm
, 0, sizeof(rqpm
));
60 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
61 rq
->cmd_type
= REQ_TYPE_PM_RESUME
;
62 rq
->cmd_flags
|= REQ_PREEMPT
;
64 rqpm
.pm_step
= IDE_PM_START_RESUME
;
65 rqpm
.pm_state
= PM_EVENT_ON
;
67 err
= blk_execute_rq(drive
->queue
, NULL
, rq
, 1);
70 if (err
== 0 && dev
->driver
) {
71 struct ide_driver
*drv
= to_ide_driver(dev
->driver
);
80 void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
)
82 struct request_pm_state
*pm
= rq
->special
;
85 printk(KERN_INFO
"%s: complete_power_step(step: %d)\n",
86 drive
->name
, pm
->pm_step
);
88 if (drive
->media
!= ide_disk
)
91 switch (pm
->pm_step
) {
92 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
93 if (pm
->pm_state
== PM_EVENT_FREEZE
)
94 pm
->pm_step
= IDE_PM_COMPLETED
;
96 pm
->pm_step
= IDE_PM_STANDBY
;
98 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
99 pm
->pm_step
= IDE_PM_COMPLETED
;
101 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
102 pm
->pm_step
= IDE_PM_IDLE
;
104 case IDE_PM_IDLE
: /* Resume step 2 (idle)*/
105 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
110 ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
112 struct request_pm_state
*pm
= rq
->special
;
113 struct ide_cmd cmd
= { };
115 switch (pm
->pm_step
) {
116 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
117 if (drive
->media
!= ide_disk
)
119 /* Not supported? Switch to next step now. */
120 if (ata_id_flush_enabled(drive
->id
) == 0 ||
121 (drive
->dev_flags
& IDE_DFLAG_WCACHE
) == 0) {
122 ide_complete_power_step(drive
, rq
);
125 if (ata_id_flush_ext_enabled(drive
->id
))
126 cmd
.tf
.command
= ATA_CMD_FLUSH_EXT
;
128 cmd
.tf
.command
= ATA_CMD_FLUSH
;
130 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
131 cmd
.tf
.command
= ATA_CMD_STANDBYNOW1
;
133 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
134 ide_set_max_pio(drive
);
136 * skip IDE_PM_IDLE for ATAPI devices
138 if (drive
->media
!= ide_disk
)
139 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
141 ide_complete_power_step(drive
, rq
);
143 case IDE_PM_IDLE
: /* Resume step 2 (idle) */
144 cmd
.tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
146 case IDE_PM_RESTORE_DMA
: /* Resume step 3 (restore DMA) */
148 * Right now, all we do is call ide_set_dma(drive),
149 * we could be smarter and check for current xfer_speed
150 * in struct drive etc...
152 if (drive
->hwif
->dma_ops
== NULL
)
155 * TODO: respect IDE_DFLAG_USING_DMA
161 pm
->pm_step
= IDE_PM_COMPLETED
;
166 cmd
.valid
.out
.tf
= IDE_VALID_OUT_TF
| IDE_VALID_DEVICE
;
167 cmd
.valid
.in
.tf
= IDE_VALID_IN_TF
| IDE_VALID_DEVICE
;
168 cmd
.protocol
= ATA_PROT_NODATA
;
170 return do_rw_taskfile(drive
, &cmd
);
174 * ide_complete_pm_rq - end the current Power Management request
175 * @drive: target drive
178 * This function cleans up the current PM request and stops the queue
181 void ide_complete_pm_rq(ide_drive_t
*drive
, struct request
*rq
)
183 struct request_queue
*q
= drive
->queue
;
184 struct request_pm_state
*pm
= rq
->special
;
187 ide_complete_power_step(drive
, rq
);
188 if (pm
->pm_step
!= IDE_PM_COMPLETED
)
192 printk("%s: completing PM request, %s\n", drive
->name
,
193 blk_pm_suspend_request(rq
) ? "suspend" : "resume");
195 spin_lock_irqsave(q
->queue_lock
, flags
);
196 if (blk_pm_suspend_request(rq
))
199 drive
->dev_flags
&= ~IDE_DFLAG_BLOCKED
;
200 spin_unlock_irqrestore(q
->queue_lock
, flags
);
202 drive
->hwif
->rq
= NULL
;
204 if (blk_end_request(rq
, 0, 0))
208 void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
210 struct request_pm_state
*pm
= rq
->special
;
212 if (blk_pm_suspend_request(rq
) &&
213 pm
->pm_step
== IDE_PM_START_SUSPEND
)
214 /* Mark drive blocked when starting the suspend sequence. */
215 drive
->dev_flags
|= IDE_DFLAG_BLOCKED
;
216 else if (blk_pm_resume_request(rq
) &&
217 pm
->pm_step
== IDE_PM_START_RESUME
) {
219 * The first thing we do on wakeup is to wait for BSY bit to
220 * go away (with a looong timeout) as a drive on this hwif may
221 * just be POSTing itself.
222 * We do that before even selecting as the "other" device on
223 * the bus may be broken enough to walk on our toes at this
226 ide_hwif_t
*hwif
= drive
->hwif
;
227 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
228 struct request_queue
*q
= drive
->queue
;
232 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
234 rc
= ide_wait_not_busy(hwif
, 35000);
236 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
237 tp_ops
->dev_select(drive
);
238 tp_ops
->write_devctl(hwif
, ATA_DEVCTL_OBS
);
239 rc
= ide_wait_not_busy(hwif
, 100000);
241 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
243 spin_lock_irqsave(q
->queue_lock
, flags
);
245 spin_unlock_irqrestore(q
->queue_lock
, flags
);