2 * drivers/s390/cio/device_ops.c
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14 #include <linux/device.h>
15 #include <linux/delay.h>
17 #include <asm/ccwdev.h>
18 #include <asm/idals.h>
19 #include <asm/chpid.h>
22 #include "cio_debug.h"
28 int ccw_device_set_options_mask(struct ccw_device
*cdev
, unsigned long flags
)
31 * The flag usage is mutal exclusive ...
33 if ((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
34 (flags
& CCWDEV_REPORT_ALL
))
36 cdev
->private->options
.fast
= (flags
& CCWDEV_EARLY_NOTIFICATION
) != 0;
37 cdev
->private->options
.repall
= (flags
& CCWDEV_REPORT_ALL
) != 0;
38 cdev
->private->options
.pgroup
= (flags
& CCWDEV_DO_PATHGROUP
) != 0;
39 cdev
->private->options
.force
= (flags
& CCWDEV_ALLOW_FORCE
) != 0;
43 int ccw_device_set_options(struct ccw_device
*cdev
, unsigned long flags
)
46 * The flag usage is mutal exclusive ...
48 if (((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
49 (flags
& CCWDEV_REPORT_ALL
)) ||
50 ((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
51 cdev
->private->options
.repall
) ||
52 ((flags
& CCWDEV_REPORT_ALL
) &&
53 cdev
->private->options
.fast
))
55 cdev
->private->options
.fast
|= (flags
& CCWDEV_EARLY_NOTIFICATION
) != 0;
56 cdev
->private->options
.repall
|= (flags
& CCWDEV_REPORT_ALL
) != 0;
57 cdev
->private->options
.pgroup
|= (flags
& CCWDEV_DO_PATHGROUP
) != 0;
58 cdev
->private->options
.force
|= (flags
& CCWDEV_ALLOW_FORCE
) != 0;
62 void ccw_device_clear_options(struct ccw_device
*cdev
, unsigned long flags
)
64 cdev
->private->options
.fast
&= (flags
& CCWDEV_EARLY_NOTIFICATION
) == 0;
65 cdev
->private->options
.repall
&= (flags
& CCWDEV_REPORT_ALL
) == 0;
66 cdev
->private->options
.pgroup
&= (flags
& CCWDEV_DO_PATHGROUP
) == 0;
67 cdev
->private->options
.force
&= (flags
& CCWDEV_ALLOW_FORCE
) == 0;
71 ccw_device_clear(struct ccw_device
*cdev
, unsigned long intparm
)
73 struct subchannel
*sch
;
78 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
80 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
81 cdev
->private->state
!= DEV_STATE_W4SENSE
)
83 sch
= to_subchannel(cdev
->dev
.parent
);
88 cdev
->private->intparm
= intparm
;
93 ccw_device_start_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
94 unsigned long intparm
, __u8 lpm
, __u8 key
,
97 struct subchannel
*sch
;
102 sch
= to_subchannel(cdev
->dev
.parent
);
105 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
107 if (cdev
->private->state
== DEV_STATE_VERIFY
||
108 cdev
->private->state
== DEV_STATE_CLEAR_VERIFY
) {
109 /* Remember to fake irb when finished. */
110 if (!cdev
->private->flags
.fake_irb
) {
111 cdev
->private->flags
.fake_irb
= 1;
112 cdev
->private->intparm
= intparm
;
115 /* There's already a fake I/O around. */
118 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
119 ((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
120 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
121 cdev
->private->flags
.doverify
)
123 ret
= cio_set_options (sch
, flags
);
126 /* Adjust requested path mask to excluded varied off paths. */
132 ret
= cio_start_key (sch
, cpa
, lpm
, key
);
134 cdev
->private->intparm
= intparm
;
140 ccw_device_start_timeout_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
141 unsigned long intparm
, __u8 lpm
, __u8 key
,
142 unsigned long flags
, int expires
)
148 ccw_device_set_timeout(cdev
, expires
);
149 ret
= ccw_device_start_key(cdev
, cpa
, intparm
, lpm
, key
, flags
);
151 ccw_device_set_timeout(cdev
, 0);
156 ccw_device_start(struct ccw_device
*cdev
, struct ccw1
*cpa
,
157 unsigned long intparm
, __u8 lpm
, unsigned long flags
)
159 return ccw_device_start_key(cdev
, cpa
, intparm
, lpm
,
160 PAGE_DEFAULT_KEY
, flags
);
164 ccw_device_start_timeout(struct ccw_device
*cdev
, struct ccw1
*cpa
,
165 unsigned long intparm
, __u8 lpm
, unsigned long flags
,
168 return ccw_device_start_timeout_key(cdev
, cpa
, intparm
, lpm
,
169 PAGE_DEFAULT_KEY
, flags
,
175 ccw_device_halt(struct ccw_device
*cdev
, unsigned long intparm
)
177 struct subchannel
*sch
;
182 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
184 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
185 cdev
->private->state
!= DEV_STATE_W4SENSE
)
187 sch
= to_subchannel(cdev
->dev
.parent
);
192 cdev
->private->intparm
= intparm
;
197 ccw_device_resume(struct ccw_device
*cdev
)
199 struct subchannel
*sch
;
203 sch
= to_subchannel(cdev
->dev
.parent
);
206 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
208 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
209 !(sch
->schib
.scsw
.actl
& SCSW_ACTL_SUSPENDED
))
211 return cio_resume(sch
);
215 * Pass interrupt to device driver.
218 ccw_device_call_handler(struct ccw_device
*cdev
)
220 struct subchannel
*sch
;
224 sch
= to_subchannel(cdev
->dev
.parent
);
227 * we allow for the device action handler if .
228 * - we received ending status
229 * - the action handler requested to see all interrupts
230 * - we received an intermediate status
231 * - fast notification was requested (primary status)
232 * - unsolicited interrupts
234 stctl
= cdev
->private->irb
.scsw
.stctl
;
235 ending_status
= (stctl
& SCSW_STCTL_SEC_STATUS
) ||
236 (stctl
== (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)) ||
237 (stctl
== SCSW_STCTL_STATUS_PEND
);
238 if (!ending_status
&&
239 !cdev
->private->options
.repall
&&
240 !(stctl
& SCSW_STCTL_INTER_STATUS
) &&
241 !(cdev
->private->options
.fast
&&
242 (stctl
& SCSW_STCTL_PRIM_STATUS
)))
245 /* Clear pending timers for device driver initiated I/O. */
247 ccw_device_set_timeout(cdev
, 0);
249 * Now we are ready to call the device driver interrupt handler.
252 cdev
->handler(cdev
, cdev
->private->intparm
,
253 &cdev
->private->irb
);
256 * Clear the old and now useless interrupt response block.
258 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
264 * Search for CIW command in extended sense data.
267 ccw_device_get_ciw(struct ccw_device
*cdev
, __u32 ct
)
271 if (cdev
->private->flags
.esid
== 0)
273 for (ciw_cnt
= 0; ciw_cnt
< MAX_CIWS
; ciw_cnt
++)
274 if (cdev
->private->senseid
.ciw
[ciw_cnt
].ct
== ct
)
275 return cdev
->private->senseid
.ciw
+ ciw_cnt
;
280 ccw_device_get_path_mask(struct ccw_device
*cdev
)
282 struct subchannel
*sch
;
284 sch
= to_subchannel(cdev
->dev
.parent
);
292 * Try to break the lock on a boxed device.
295 ccw_device_stlck(struct ccw_device
*cdev
)
299 struct subchannel
*sch
;
305 if (cdev
->drv
&& !cdev
->private->options
.force
)
308 sch
= to_subchannel(cdev
->dev
.parent
);
310 CIO_TRACE_EVENT(2, "stl lock");
311 CIO_TRACE_EVENT(2, cdev
->dev
.bus_id
);
313 buf
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
316 buf2
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
321 spin_lock_irqsave(sch
->lock
, flags
);
322 ret
= cio_enable_subchannel(sch
, 3);
326 * Setup ccw. We chain an unconditional reserve and a release so we
327 * only break the lock.
329 cdev
->private->iccws
[0].cmd_code
= CCW_CMD_STLCK
;
330 cdev
->private->iccws
[0].cda
= (__u32
) __pa(buf
);
331 cdev
->private->iccws
[0].count
= 32;
332 cdev
->private->iccws
[0].flags
= CCW_FLAG_CC
;
333 cdev
->private->iccws
[1].cmd_code
= CCW_CMD_RELEASE
;
334 cdev
->private->iccws
[1].cda
= (__u32
) __pa(buf2
);
335 cdev
->private->iccws
[1].count
= 32;
336 cdev
->private->iccws
[1].flags
= 0;
337 ret
= cio_start(sch
, cdev
->private->iccws
, 0);
339 cio_disable_subchannel(sch
); //FIXME: return code?
342 cdev
->private->irb
.scsw
.actl
|= SCSW_ACTL_START_PEND
;
343 spin_unlock_irqrestore(sch
->lock
, flags
);
344 wait_event(cdev
->private->wait_q
, cdev
->private->irb
.scsw
.actl
== 0);
345 spin_lock_irqsave(sch
->lock
, flags
);
346 cio_disable_subchannel(sch
); //FIXME: return code?
347 if ((cdev
->private->irb
.scsw
.dstat
!=
348 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
349 (cdev
->private->irb
.scsw
.cstat
!= 0))
352 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
356 spin_unlock_irqrestore(sch
->lock
, flags
);
361 ccw_device_get_chp_desc(struct ccw_device
*cdev
, int chp_no
)
363 struct subchannel
*sch
;
366 sch
= to_subchannel(cdev
->dev
.parent
);
368 chpid
.id
= sch
->schib
.pmcw
.chpid
[chp_no
];
369 return chp_get_chp_desc(chpid
);
373 * ccw_device_get_id - obtain a ccw device id
374 * @cdev: device to obtain the id for
375 * @dev_id: where to fill in the values
377 void ccw_device_get_id(struct ccw_device
*cdev
, struct ccw_dev_id
*dev_id
)
379 *dev_id
= cdev
->private->dev_id
;
381 EXPORT_SYMBOL(ccw_device_get_id
);
383 // FIXME: these have to go:
386 _ccw_device_get_subchannel_number(struct ccw_device
*cdev
)
388 return cdev
->private->schid
.sch_no
;
392 MODULE_LICENSE("GPL");
393 EXPORT_SYMBOL(ccw_device_set_options_mask
);
394 EXPORT_SYMBOL(ccw_device_set_options
);
395 EXPORT_SYMBOL(ccw_device_clear_options
);
396 EXPORT_SYMBOL(ccw_device_clear
);
397 EXPORT_SYMBOL(ccw_device_halt
);
398 EXPORT_SYMBOL(ccw_device_resume
);
399 EXPORT_SYMBOL(ccw_device_start_timeout
);
400 EXPORT_SYMBOL(ccw_device_start
);
401 EXPORT_SYMBOL(ccw_device_start_timeout_key
);
402 EXPORT_SYMBOL(ccw_device_start_key
);
403 EXPORT_SYMBOL(ccw_device_get_ciw
);
404 EXPORT_SYMBOL(ccw_device_get_path_mask
);
405 EXPORT_SYMBOL(_ccw_device_get_subchannel_number
);
406 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc
);