2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
21 #include "cio_debug.h"
26 static void *sei_page
;
28 static int new_channel_path(struct chp_id chpid
);
30 static inline struct channel_path
*chpid_to_chp(struct chp_id chpid
)
32 return css
[chpid
.cssid
]->chps
[chpid
.id
];
35 static void set_chp_logically_online(struct chp_id chpid
, int onoff
)
37 chpid_to_chp(chpid
)->state
= onoff
;
40 static int get_chp_status(struct chp_id chpid
)
42 return (chpid_to_chp(chpid
) ? chpid_to_chp(chpid
)->state
: -ENODEV
);
45 void chsc_validate_chpids(struct subchannel
*sch
)
51 for (chp
= 0; chp
<= 7; chp
++) {
53 chpid
.id
= sch
->schib
.pmcw
.chpid
[chp
];
54 if (!get_chp_status(chpid
))
55 /* disable using this path */
60 void chpid_is_actually_online(struct chp_id chpid
)
64 state
= get_chp_status(chpid
);
67 queue_work(slow_path_wq
, &slow_path_work
);
72 /* FIXME: this is _always_ called for every subchannel. shouldn't we
73 * process more than one at a time? */
75 chsc_get_sch_desc_irq(struct subchannel
*sch
, void *page
)
80 struct chsc_header request
;
84 u16 f_sch
; /* first subchannel */
86 u16 l_sch
; /* last subchannel */
88 struct chsc_header response
;
92 u8 st
: 3; /* subchannel type */
94 u8 unit_addr
; /* unit address */
95 u16 devno
; /* device number */
98 u16 sch
; /* subchannel */
99 u8 chpid
[8]; /* chpids 0-7 */
100 u16 fla
[8]; /* full link addresses 0-7 */
101 } __attribute__ ((packed
)) *ssd_area
;
105 ssd_area
->request
.length
= 0x0010;
106 ssd_area
->request
.code
= 0x0004;
108 ssd_area
->ssid
= sch
->schid
.ssid
;
109 ssd_area
->f_sch
= sch
->schid
.sch_no
;
110 ssd_area
->l_sch
= sch
->schid
.sch_no
;
112 ccode
= chsc(ssd_area
);
114 pr_debug("chsc returned with ccode = %d\n", ccode
);
115 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
118 switch (ssd_area
->response
.code
) {
119 case 0x0001: /* everything ok */
122 CIO_CRW_EVENT(2, "Invalid command!\n");
125 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
128 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
131 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
132 ssd_area
->response
.code
);
137 * ssd_area->st stores the type of the detected
138 * subchannel, with the following definitions:
140 * 0: I/O subchannel: All fields have meaning
141 * 1: CHSC subchannel: Only sch_val, st and sch
143 * 2: Message subchannel: All fields except unit_addr
145 * 3: ADM subchannel: Only sch_val, st and sch
148 * Other types are currently undefined.
150 if (ssd_area
->st
> 3) { /* uhm, that looks strange... */
151 CIO_CRW_EVENT(0, "Strange subchannel type %d"
152 " for sch 0.%x.%04x\n", ssd_area
->st
,
153 sch
->schid
.ssid
, sch
->schid
.sch_no
);
155 * There may have been a new subchannel type defined in the
156 * time since this code was written; since we don't know which
157 * fields have meaning and what to do with it we just jump out
161 const char *type
[4] = {"I/O", "chsc", "message", "ADM"};
162 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
163 sch
->schid
.ssid
, sch
->schid
.sch_no
,
166 sch
->ssd_info
.valid
= 1;
167 sch
->ssd_info
.type
= ssd_area
->st
;
170 if (ssd_area
->st
== 0 || ssd_area
->st
== 2) {
171 for (j
= 0; j
< 8; j
++) {
172 if (!((0x80 >> j
) & ssd_area
->path_mask
&
173 ssd_area
->fla_valid_mask
))
175 sch
->ssd_info
.chpid
[j
] = ssd_area
->chpid
[j
];
176 sch
->ssd_info
.fla
[j
] = ssd_area
->fla
[j
];
183 css_get_ssd_info(struct subchannel
*sch
)
188 page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
191 spin_lock_irq(sch
->lock
);
192 ret
= chsc_get_sch_desc_irq(sch
, page
);
194 static int cio_chsc_err_msg
;
196 if (!cio_chsc_err_msg
) {
198 "chsc_get_sch_descriptions:"
199 " Error %d while doing chsc; "
200 "processing some machine checks may "
202 cio_chsc_err_msg
= 1;
205 spin_unlock_irq(sch
->lock
);
206 free_page((unsigned long)page
);
212 /* Allocate channel path structures, if needed. */
213 for (j
= 0; j
< 8; j
++) {
215 chpid
.id
= sch
->ssd_info
.chpid
[j
];
216 if ((sch
->schib
.pmcw
.pim
& mask
) &&
217 (get_chp_status(chpid
) < 0))
218 new_channel_path(chpid
);
225 s390_subchannel_remove_chpid(struct device
*dev
, void *data
)
229 struct subchannel
*sch
;
230 struct channel_path
*chpid
;
233 sch
= to_subchannel(dev
);
235 for (j
= 0; j
< 8; j
++) {
237 if ((sch
->schib
.pmcw
.pim
& mask
) &&
238 (sch
->schib
.pmcw
.chpid
[j
] == chpid
->chpid
.id
))
244 spin_lock_irq(sch
->lock
);
246 stsch(sch
->schid
, &schib
);
249 memcpy(&sch
->schib
, &schib
, sizeof(struct schib
));
250 /* Check for single path devices. */
251 if (sch
->schib
.pmcw
.pim
== 0x80)
254 if ((sch
->schib
.scsw
.actl
& SCSW_ACTL_DEVACT
) &&
255 (sch
->schib
.scsw
.actl
& SCSW_ACTL_SCHACT
) &&
256 (sch
->schib
.pmcw
.lpum
== mask
)) {
262 /* Request retry of internal operation. */
263 device_set_intretry(sch
);
265 if (sch
->driver
&& sch
->driver
->termination
)
266 sch
->driver
->termination(&sch
->dev
);
270 /* trigger path verification. */
271 if (sch
->driver
&& sch
->driver
->verify
)
272 sch
->driver
->verify(&sch
->dev
);
273 else if (sch
->lpm
== mask
)
276 spin_unlock_irq(sch
->lock
);
279 spin_unlock_irq(sch
->lock
);
281 if (css_enqueue_subchannel_slow(sch
->schid
)) {
282 css_clear_subchannel_slow_list();
288 static void s390_set_chpid_offline(struct chp_id chpid
)
293 sprintf(dbf_txt
, "chpr%x.%02x", chpid
.cssid
, chpid
.id
);
294 CIO_TRACE_EVENT(2, dbf_txt
);
296 if (get_chp_status(chpid
) <= 0)
298 dev
= get_device(&(chpid_to_chp(chpid
)->dev
));
299 bus_for_each_dev(&css_bus_type
, NULL
, to_channelpath(dev
),
300 s390_subchannel_remove_chpid
);
302 if (need_rescan
|| css_slow_subchannels_exist())
303 queue_work(slow_path_wq
, &slow_path_work
);
307 struct res_acc_data
{
308 struct channel_path
*chp
;
314 s390_process_res_acc_sch(struct res_acc_data
*res_data
, struct subchannel
*sch
)
321 for (chp
= 0; chp
<= 7; chp
++)
323 * check if chpid is in information updated by ssd
325 if (sch
->ssd_info
.valid
&&
326 sch
->ssd_info
.chpid
[chp
] == res_data
->chp
->chpid
.id
&&
327 (sch
->ssd_info
.fla
[chp
] & res_data
->fla_mask
)
337 * Do a stsch to update our subchannel structure with the
338 * new path information and eventually check for logically
341 ccode
= stsch(sch
->schid
, &sch
->schib
);
349 s390_process_res_acc_new_sch(struct subchannel_id schid
)
354 * We don't know the device yet, but since a path
355 * may be available now to the device we'll have
356 * to do recognition again.
357 * Since we don't have any idea about which chpid
358 * that beast may be on we'll have to do a stsch
359 * on all devices, grr...
361 if (stsch_err(schid
, &schib
))
363 return need_rescan
? -EAGAIN
: -ENXIO
;
365 /* Put it on the slow path. */
366 ret
= css_enqueue_subchannel_slow(schid
);
368 css_clear_subchannel_slow_list();
376 __s390_process_res_acc(struct subchannel_id schid
, void *data
)
378 int chp_mask
, old_lpm
;
379 struct res_acc_data
*res_data
;
380 struct subchannel
*sch
;
383 sch
= get_subchannel_by_schid(schid
);
385 /* Check if a subchannel is newly available. */
386 return s390_process_res_acc_new_sch(schid
);
388 spin_lock_irq(sch
->lock
);
390 chp_mask
= s390_process_res_acc_sch(res_data
, sch
);
393 spin_unlock_irq(sch
->lock
);
394 put_device(&sch
->dev
);
398 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
399 sch
->schib
.pmcw
.pam
&
401 | chp_mask
) & sch
->opm
;
402 if (!old_lpm
&& sch
->lpm
)
403 device_trigger_reprobe(sch
);
404 else if (sch
->driver
&& sch
->driver
->verify
)
405 sch
->driver
->verify(&sch
->dev
);
407 spin_unlock_irq(sch
->lock
);
408 put_device(&sch
->dev
);
414 s390_process_res_acc (struct res_acc_data
*res_data
)
419 sprintf(dbf_txt
, "accpr%x.%02x", res_data
->chp
->chpid
.cssid
,
420 res_data
->chp
->chpid
.id
);
421 CIO_TRACE_EVENT( 2, dbf_txt
);
422 if (res_data
->fla
!= 0) {
423 sprintf(dbf_txt
, "fla%x", res_data
->fla
);
424 CIO_TRACE_EVENT( 2, dbf_txt
);
428 * I/O resources may have become accessible.
429 * Scan through all subchannels that may be concerned and
430 * do a validation on those.
431 * The more information we have (info), the less scanning
432 * will we have to do.
434 rc
= for_each_subchannel(__s390_process_res_acc
, res_data
);
435 if (css_slow_subchannels_exist())
437 else if (rc
!= -EAGAIN
)
443 __get_chpid_from_lir(void *data
)
449 /* incident-node descriptor */
451 /* attached-node descriptor */
453 /* incident-specific information */
455 } __attribute__ ((packed
)) *lir
;
459 /* NULL link incident record */
461 if (!(lir
->indesc
[0]&0xc0000000))
462 /* node descriptor not valid */
464 if (!(lir
->indesc
[0]&0x10000000))
465 /* don't handle device-type nodes - FIXME */
467 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
469 return (u16
) (lir
->indesc
[0]&0x000000ff);
472 struct chsc_sei_area
{
473 struct chsc_header request
;
477 struct chsc_header response
;
480 u8 vf
; /* validity flags */
481 u8 rs
; /* reporting source */
482 u8 cc
; /* content code */
483 u16 fla
; /* full link address */
484 u16 rsid
; /* reporting source id */
487 u8 ccdf
[4096 - 16 - 24]; /* content-code dependent field */
488 /* ccdf has to be big enough for a link-incident record */
489 } __attribute__ ((packed
));
491 static int chsc_process_sei_link_incident(struct chsc_sei_area
*sei_area
)
496 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
497 sei_area
->rs
, sei_area
->rsid
);
498 if (sei_area
->rs
!= 4)
500 id
= __get_chpid_from_lir(sei_area
->ccdf
);
502 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
506 s390_set_chpid_offline(chpid
);
512 static int chsc_process_sei_res_acc(struct chsc_sei_area
*sei_area
)
514 struct res_acc_data res_data
;
520 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
521 "rs_id=%04x)\n", sei_area
->rs
, sei_area
->rsid
);
522 if (sei_area
->rs
!= 4)
525 chpid
.id
= sei_area
->rsid
;
526 /* allocate a new channel path structure, if needed */
527 status
= get_chp_status(chpid
);
529 new_channel_path(chpid
);
532 dev
= get_device(&(chpid_to_chp(chpid
)->dev
));
533 memset(&res_data
, 0, sizeof(struct res_acc_data
));
534 res_data
.chp
= to_channelpath(dev
);
535 if ((sei_area
->vf
& 0xc0) != 0) {
536 res_data
.fla
= sei_area
->fla
;
537 if ((sei_area
->vf
& 0xc0) == 0xc0)
538 /* full link address */
539 res_data
.fla_mask
= 0xffff;
542 res_data
.fla_mask
= 0xff00;
544 rc
= s390_process_res_acc(&res_data
);
550 static int chsc_process_sei(struct chsc_sei_area
*sei_area
)
554 /* Check if we might have lost some information. */
555 if (sei_area
->flags
& 0x40)
556 CIO_CRW_EVENT(2, "chsc: event overflow\n");
557 /* which kind of information was stored? */
559 switch (sei_area
->cc
) {
560 case 1: /* link incident*/
561 rc
= chsc_process_sei_link_incident(sei_area
);
563 case 2: /* i/o resource accessibiliy */
564 rc
= chsc_process_sei_res_acc(sei_area
);
566 default: /* other stuff */
567 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
575 int chsc_process_crw(void)
577 struct chsc_sei_area
*sei_area
;
583 /* Access to sei_page is serialized through machine check handler
584 * thread, so no need for locking. */
587 CIO_TRACE_EVENT( 2, "prcss");
590 memset(sei_area
, 0, sizeof(*sei_area
));
591 sei_area
->request
.length
= 0x0010;
592 sei_area
->request
.code
= 0x000e;
596 if (sei_area
->response
.code
== 0x0001) {
597 CIO_CRW_EVENT(4, "chsc: sei successful\n");
598 rc
= chsc_process_sei(sei_area
);
602 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
603 sei_area
->response
.code
);
607 } while (sei_area
->flags
& 0x80);
613 __chp_add_new_sch(struct subchannel_id schid
)
618 if (stsch_err(schid
, &schib
))
620 return need_rescan
? -EAGAIN
: -ENXIO
;
622 /* Put it on the slow path. */
623 ret
= css_enqueue_subchannel_slow(schid
);
625 css_clear_subchannel_slow_list();
634 __chp_add(struct subchannel_id schid
, void *data
)
637 struct channel_path
*chp
;
638 struct subchannel
*sch
;
641 sch
= get_subchannel_by_schid(schid
);
643 /* Check if the subchannel is now available. */
644 return __chp_add_new_sch(schid
);
645 spin_lock_irq(sch
->lock
);
646 for (i
=0; i
<8; i
++) {
648 if ((sch
->schib
.pmcw
.pim
& mask
) &&
649 (sch
->schib
.pmcw
.chpid
[i
] == chp
->chpid
.id
)) {
650 if (stsch(sch
->schid
, &sch
->schib
) != 0) {
652 spin_unlock_irq(sch
->lock
);
659 spin_unlock_irq(sch
->lock
);
662 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
663 sch
->schib
.pmcw
.pam
&
667 if (sch
->driver
&& sch
->driver
->verify
)
668 sch
->driver
->verify(&sch
->dev
);
670 spin_unlock_irq(sch
->lock
);
671 put_device(&sch
->dev
);
675 static int chp_add(struct chp_id chpid
)
681 if (!get_chp_status(chpid
))
682 return 0; /* no need to do the rest */
684 sprintf(dbf_txt
, "cadd%x.%02x", chpid
.cssid
, chpid
.id
);
685 CIO_TRACE_EVENT(2, dbf_txt
);
687 dev
= get_device(&(chpid_to_chp(chpid
)->dev
));
688 rc
= for_each_subchannel(__chp_add
, to_channelpath(dev
));
689 if (css_slow_subchannels_exist())
698 * Handling of crw machine checks with channel path source.
700 int chp_process_crw(int id
, int on
)
707 /* Path has gone. We use the link incident routine.*/
708 s390_set_chpid_offline(chpid
);
709 return 0; /* De-register is async anyway. */
712 * Path has come. Allocate a new channel path structure,
715 if (get_chp_status(chpid
) < 0)
716 new_channel_path(chpid
);
717 /* Avoid the extra overhead in process_rec_acc. */
718 return chp_add(chpid
);
721 static int check_for_io_on_path(struct subchannel
*sch
, int index
)
725 cc
= stsch(sch
->schid
, &sch
->schib
);
728 if (sch
->schib
.scsw
.actl
&& sch
->schib
.pmcw
.lpum
== (0x80 >> index
))
733 static void terminate_internal_io(struct subchannel
*sch
)
735 if (cio_clear(sch
)) {
736 /* Recheck device in case clear failed. */
738 if (device_trigger_verify(sch
) != 0) {
739 if(css_enqueue_subchannel_slow(sch
->schid
)) {
740 css_clear_subchannel_slow_list();
746 /* Request retry of internal operation. */
747 device_set_intretry(sch
);
749 if (sch
->driver
&& sch
->driver
->termination
)
750 sch
->driver
->termination(&sch
->dev
);
753 static void __s390_subchannel_vary_chpid(struct subchannel
*sch
,
754 struct chp_id chpid
, int on
)
759 if (!sch
->ssd_info
.valid
)
762 spin_lock_irqsave(sch
->lock
, flags
);
764 for (chp
= 0; chp
< 8; chp
++) {
765 if (sch
->ssd_info
.chpid
[chp
] != chpid
.id
)
769 sch
->opm
|= (0x80 >> chp
);
770 sch
->lpm
|= (0x80 >> chp
);
772 device_trigger_reprobe(sch
);
773 else if (sch
->driver
&& sch
->driver
->verify
)
774 sch
->driver
->verify(&sch
->dev
);
777 sch
->opm
&= ~(0x80 >> chp
);
778 sch
->lpm
&= ~(0x80 >> chp
);
779 if (check_for_io_on_path(sch
, chp
)) {
780 if (device_is_online(sch
))
781 /* Path verification is done after killing. */
784 /* Kill and retry internal I/O. */
785 terminate_internal_io(sch
);
786 } else if (!sch
->lpm
) {
787 if (device_trigger_verify(sch
) != 0) {
788 if (css_enqueue_subchannel_slow(sch
->schid
)) {
789 css_clear_subchannel_slow_list();
793 } else if (sch
->driver
&& sch
->driver
->verify
)
794 sch
->driver
->verify(&sch
->dev
);
797 spin_unlock_irqrestore(sch
->lock
, flags
);
800 static int s390_subchannel_vary_chpid_off(struct device
*dev
, void *data
)
802 struct subchannel
*sch
;
803 struct chp_id
*chpid
;
805 sch
= to_subchannel(dev
);
808 __s390_subchannel_vary_chpid(sch
, *chpid
, 0);
812 static int s390_subchannel_vary_chpid_on(struct device
*dev
, void *data
)
814 struct subchannel
*sch
;
815 struct chp_id
*chpid
;
817 sch
= to_subchannel(dev
);
820 __s390_subchannel_vary_chpid(sch
, *chpid
, 1);
825 __s390_vary_chpid_on(struct subchannel_id schid
, void *data
)
828 struct subchannel
*sch
;
830 sch
= get_subchannel_by_schid(schid
);
832 put_device(&sch
->dev
);
835 if (stsch_err(schid
, &schib
))
838 /* Put it on the slow path. */
839 if (css_enqueue_subchannel_slow(schid
)) {
840 css_clear_subchannel_slow_list();
848 * Function: s390_vary_chpid
849 * Varies the specified chpid online or offline
851 static int s390_vary_chpid(struct chp_id chpid
, int on
)
856 sprintf(dbf_text
, on
?"varyon%x.%02x":"varyoff%x.%02x", chpid
.cssid
,
858 CIO_TRACE_EVENT( 2, dbf_text
);
860 status
= get_chp_status(chpid
);
862 printk(KERN_ERR
"Can't vary unknown chpid %x.%02x\n",
863 chpid
.cssid
, chpid
.id
);
867 if (!on
&& !status
) {
868 printk(KERN_ERR
"chpid %x.%02x is already offline\n",
869 chpid
.cssid
, chpid
.id
);
873 set_chp_logically_online(chpid
, on
);
876 * Redo PathVerification on the devices the chpid connects to
879 bus_for_each_dev(&css_bus_type
, NULL
, &chpid
, on
?
880 s390_subchannel_vary_chpid_on
:
881 s390_subchannel_vary_chpid_off
);
883 /* Scan for new devices on varied on path. */
884 for_each_subchannel(__s390_vary_chpid_on
, NULL
);
885 if (need_rescan
|| css_slow_subchannels_exist())
886 queue_work(slow_path_wq
, &slow_path_work
);
891 * Channel measurement related functions
894 chp_measurement_chars_read(struct kobject
*kobj
, char *buf
, loff_t off
,
897 struct channel_path
*chp
;
900 chp
= to_channelpath(container_of(kobj
, struct device
, kobj
));
904 size
= sizeof(struct cmg_chars
);
908 if (off
+ count
> size
)
910 memcpy(buf
, chp
->cmg_chars
+ off
, count
);
914 static struct bin_attribute chp_measurement_chars_attr
= {
916 .name
= "measurement_chars",
918 .owner
= THIS_MODULE
,
920 .size
= sizeof(struct cmg_chars
),
921 .read
= chp_measurement_chars_read
,
924 static void chp_measurement_copy_block(struct cmg_entry
*buf
,
925 struct channel_subsystem
*css
, struct chp_id chpid
)
928 struct cmg_entry
*entry
, reference_buf
;
931 if (chpid
.id
< 128) {
932 area
= css
->cub_addr1
;
935 area
= css
->cub_addr2
;
936 idx
= chpid
.id
- 128;
938 entry
= area
+ (idx
* sizeof(struct cmg_entry
));
940 memcpy(buf
, entry
, sizeof(*entry
));
941 memcpy(&reference_buf
, entry
, sizeof(*entry
));
942 } while (reference_buf
.values
[0] != buf
->values
[0]);
946 chp_measurement_read(struct kobject
*kobj
, char *buf
, loff_t off
, size_t count
)
948 struct channel_path
*chp
;
949 struct channel_subsystem
*css
;
952 chp
= to_channelpath(container_of(kobj
, struct device
, kobj
));
953 css
= to_css(chp
->dev
.parent
);
955 size
= sizeof(struct cmg_entry
);
957 /* Only allow single reads. */
958 if (off
|| count
< size
)
960 chp_measurement_copy_block((struct cmg_entry
*)buf
, css
, chp
->chpid
);
965 static struct bin_attribute chp_measurement_attr
= {
967 .name
= "measurement",
969 .owner
= THIS_MODULE
,
971 .size
= sizeof(struct cmg_entry
),
972 .read
= chp_measurement_read
,
976 chsc_remove_chp_cmg_attr(struct channel_path
*chp
)
978 device_remove_bin_file(&chp
->dev
, &chp_measurement_chars_attr
);
979 device_remove_bin_file(&chp
->dev
, &chp_measurement_attr
);
983 chsc_add_chp_cmg_attr(struct channel_path
*chp
)
987 ret
= device_create_bin_file(&chp
->dev
, &chp_measurement_chars_attr
);
990 ret
= device_create_bin_file(&chp
->dev
, &chp_measurement_attr
);
992 device_remove_bin_file(&chp
->dev
, &chp_measurement_chars_attr
);
997 chsc_remove_cmg_attr(struct channel_subsystem
*css
)
1001 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
1004 chsc_remove_chp_cmg_attr(css
->chps
[i
]);
1009 chsc_add_cmg_attr(struct channel_subsystem
*css
)
1014 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
1017 ret
= chsc_add_chp_cmg_attr(css
->chps
[i
]);
1023 for (--i
; i
>= 0; i
--) {
1026 chsc_remove_chp_cmg_attr(css
->chps
[i
]);
1033 __chsc_do_secm(struct channel_subsystem
*css
, int enable
, void *page
)
1036 struct chsc_header request
;
1037 u32 operation_code
: 2;
1046 struct chsc_header response
;
1051 } __attribute__ ((packed
)) *secm_area
;
1055 secm_area
->request
.length
= 0x0050;
1056 secm_area
->request
.code
= 0x0016;
1058 secm_area
->key
= PAGE_DEFAULT_KEY
;
1059 secm_area
->cub_addr1
= (u64
)(unsigned long)css
->cub_addr1
;
1060 secm_area
->cub_addr2
= (u64
)(unsigned long)css
->cub_addr2
;
1062 secm_area
->operation_code
= enable
? 0 : 1;
1064 ccode
= chsc(secm_area
);
1066 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
1068 switch (secm_area
->response
.code
) {
1069 case 0x0001: /* Success. */
1072 case 0x0003: /* Invalid block. */
1073 case 0x0007: /* Invalid format. */
1074 case 0x0008: /* Other invalid block. */
1075 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1078 case 0x0004: /* Command not provided in model. */
1079 CIO_CRW_EVENT(2, "Model does not provide secm\n");
1082 case 0x0102: /* cub adresses incorrect */
1083 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
1086 case 0x0103: /* key error */
1087 CIO_CRW_EVENT(2, "Access key error in secm\n");
1090 case 0x0105: /* error while starting */
1091 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
1095 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1096 secm_area
->response
.code
);
1103 chsc_secm(struct channel_subsystem
*css
, int enable
)
1108 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1112 mutex_lock(&css
->mutex
);
1113 if (enable
&& !css
->cm_enabled
) {
1114 css
->cub_addr1
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1115 css
->cub_addr2
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1116 if (!css
->cub_addr1
|| !css
->cub_addr2
) {
1117 free_page((unsigned long)css
->cub_addr1
);
1118 free_page((unsigned long)css
->cub_addr2
);
1119 free_page((unsigned long)secm_area
);
1120 mutex_unlock(&css
->mutex
);
1124 ret
= __chsc_do_secm(css
, enable
, secm_area
);
1126 css
->cm_enabled
= enable
;
1127 if (css
->cm_enabled
) {
1128 ret
= chsc_add_cmg_attr(css
);
1130 memset(secm_area
, 0, PAGE_SIZE
);
1131 __chsc_do_secm(css
, 0, secm_area
);
1132 css
->cm_enabled
= 0;
1135 chsc_remove_cmg_attr(css
);
1137 if (enable
&& !css
->cm_enabled
) {
1138 free_page((unsigned long)css
->cub_addr1
);
1139 free_page((unsigned long)css
->cub_addr2
);
1141 mutex_unlock(&css
->mutex
);
1142 free_page((unsigned long)secm_area
);
1147 * Files for the channel path entries.
1150 chp_status_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1152 struct channel_path
*chp
= container_of(dev
, struct channel_path
, dev
);
1156 return (get_chp_status(chp
->chpid
) ? sprintf(buf
, "online\n") :
1157 sprintf(buf
, "offline\n"));
1161 chp_status_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
1163 struct channel_path
*cp
= container_of(dev
, struct channel_path
, dev
);
1168 num_args
= sscanf(buf
, "%5s", cmd
);
1172 if (!strnicmp(cmd
, "on", 2))
1173 error
= s390_vary_chpid(cp
->chpid
, 1);
1174 else if (!strnicmp(cmd
, "off", 3))
1175 error
= s390_vary_chpid(cp
->chpid
, 0);
1179 return error
< 0 ? error
: count
;
1183 static DEVICE_ATTR(status
, 0644, chp_status_show
, chp_status_write
);
1186 chp_type_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1188 struct channel_path
*chp
= container_of(dev
, struct channel_path
, dev
);
1192 return sprintf(buf
, "%x\n", chp
->desc
.desc
);
1195 static DEVICE_ATTR(type
, 0444, chp_type_show
, NULL
);
1198 chp_cmg_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1200 struct channel_path
*chp
= to_channelpath(dev
);
1204 if (chp
->cmg
== -1) /* channel measurements not available */
1205 return sprintf(buf
, "unknown\n");
1206 return sprintf(buf
, "%x\n", chp
->cmg
);
1209 static DEVICE_ATTR(cmg
, 0444, chp_cmg_show
, NULL
);
1212 chp_shared_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1214 struct channel_path
*chp
= to_channelpath(dev
);
1218 if (chp
->shared
== -1) /* channel measurements not available */
1219 return sprintf(buf
, "unknown\n");
1220 return sprintf(buf
, "%x\n", chp
->shared
);
1223 static DEVICE_ATTR(shared
, 0444, chp_shared_show
, NULL
);
1225 static struct attribute
* chp_attrs
[] = {
1226 &dev_attr_status
.attr
,
1227 &dev_attr_type
.attr
,
1229 &dev_attr_shared
.attr
,
1233 static struct attribute_group chp_attr_group
= {
1238 chp_release(struct device
*dev
)
1240 struct channel_path
*cp
;
1242 cp
= container_of(dev
, struct channel_path
, dev
);
1246 static int chsc_determine_channel_path_description(struct chp_id chpid
,
1247 struct channel_path_desc
*desc
)
1252 struct chsc_header request
;
1254 u32 first_chpid
: 8;
1258 struct chsc_header response
;
1260 struct channel_path_desc desc
;
1261 } __attribute__ ((packed
)) *scpd_area
;
1263 scpd_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1267 scpd_area
->request
.length
= 0x0010;
1268 scpd_area
->request
.code
= 0x0002;
1270 scpd_area
->first_chpid
= chpid
.id
;
1271 scpd_area
->last_chpid
= chpid
.id
;
1273 ccode
= chsc(scpd_area
);
1275 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
1279 switch (scpd_area
->response
.code
) {
1280 case 0x0001: /* Success. */
1281 memcpy(desc
, &scpd_area
->desc
,
1282 sizeof(struct channel_path_desc
));
1285 case 0x0003: /* Invalid block. */
1286 case 0x0007: /* Invalid format. */
1287 case 0x0008: /* Other invalid block. */
1288 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1291 case 0x0004: /* Command not provided in model. */
1292 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1296 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1297 scpd_area
->response
.code
);
1301 free_page((unsigned long)scpd_area
);
1306 chsc_initialize_cmg_chars(struct channel_path
*chp
, u8 cmcv
,
1307 struct cmg_chars
*chars
)
1312 chp
->cmg_chars
= kmalloc(sizeof(struct cmg_chars
),
1314 if (chp
->cmg_chars
) {
1316 struct cmg_chars
*cmg_chars
;
1318 cmg_chars
= chp
->cmg_chars
;
1319 for (i
= 0; i
< NR_MEASUREMENT_CHARS
; i
++) {
1320 mask
= 0x80 >> (i
+ 3);
1322 cmg_chars
->values
[i
] = chars
->values
[i
];
1324 cmg_chars
->values
[i
] = 0;
1329 /* No cmg-dependent data. */
1335 chsc_get_channel_measurement_chars(struct channel_path
*chp
)
1340 struct chsc_header request
;
1342 u32 first_chpid
: 8;
1346 struct chsc_header response
;
1357 u32 data
[NR_MEASUREMENT_CHARS
];
1358 } __attribute__ ((packed
)) *scmc_area
;
1360 scmc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1364 scmc_area
->request
.length
= 0x0010;
1365 scmc_area
->request
.code
= 0x0022;
1367 scmc_area
->first_chpid
= chp
->chpid
.id
;
1368 scmc_area
->last_chpid
= chp
->chpid
.id
;
1370 ccode
= chsc(scmc_area
);
1372 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
1376 switch (scmc_area
->response
.code
) {
1377 case 0x0001: /* Success. */
1378 if (!scmc_area
->not_valid
) {
1379 chp
->cmg
= scmc_area
->cmg
;
1380 chp
->shared
= scmc_area
->shared
;
1381 chsc_initialize_cmg_chars(chp
, scmc_area
->cmcv
,
1382 (struct cmg_chars
*)
1390 case 0x0003: /* Invalid block. */
1391 case 0x0007: /* Invalid format. */
1392 case 0x0008: /* Invalid bit combination. */
1393 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1396 case 0x0004: /* Command not provided. */
1397 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1401 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1402 scmc_area
->response
.code
);
1406 free_page((unsigned long)scmc_area
);
1411 * Entries for chpids on the system bus.
1412 * This replaces /proc/chpids.
1414 static int new_channel_path(struct chp_id chpid
)
1416 struct channel_path
*chp
;
1419 chp
= kzalloc(sizeof(struct channel_path
), GFP_KERNEL
);
1423 /* fill in status, etc. */
1426 chp
->dev
.parent
= &css
[chpid
.cssid
]->device
;
1427 chp
->dev
.release
= chp_release
;
1428 snprintf(chp
->dev
.bus_id
, BUS_ID_SIZE
, "chp%x.%02x", chpid
.cssid
,
1431 /* Obtain channel path description and fill it in. */
1432 ret
= chsc_determine_channel_path_description(chpid
, &chp
->desc
);
1435 /* Get channel-measurement characteristics. */
1436 if (css_characteristics_avail
&& css_chsc_characteristics
.scmc
1437 && css_chsc_characteristics
.secm
) {
1438 ret
= chsc_get_channel_measurement_chars(chp
);
1442 static int msg_done
;
1445 printk(KERN_WARNING
"cio: Channel measurements not "
1446 "available, continuing.\n");
1452 /* make it known to the system */
1453 ret
= device_register(&chp
->dev
);
1455 printk(KERN_WARNING
"%s: could not register %x.%02x\n",
1456 __func__
, chpid
.cssid
, chpid
.id
);
1459 ret
= sysfs_create_group(&chp
->dev
.kobj
, &chp_attr_group
);
1461 device_unregister(&chp
->dev
);
1464 mutex_lock(&css
[chpid
.cssid
]->mutex
);
1465 if (css
[chpid
.cssid
]->cm_enabled
) {
1466 ret
= chsc_add_chp_cmg_attr(chp
);
1468 sysfs_remove_group(&chp
->dev
.kobj
, &chp_attr_group
);
1469 device_unregister(&chp
->dev
);
1470 mutex_unlock(&css
[chpid
.cssid
]->mutex
);
1474 css
[chpid
.cssid
]->chps
[chpid
.id
] = chp
;
1475 mutex_unlock(&css
[chpid
.cssid
]->mutex
);
1483 chsc_get_chp_desc(struct subchannel
*sch
, int chp_no
)
1485 struct channel_path
*chp
;
1486 struct channel_path_desc
*desc
;
1487 struct chp_id chpid
;
1489 chp_id_init(&chpid
);
1490 chpid
.id
= sch
->schib
.pmcw
.chpid
[chp_no
];
1491 chp
= chpid_to_chp(chpid
);
1494 desc
= kmalloc(sizeof(struct channel_path_desc
), GFP_KERNEL
);
1497 memcpy(desc
, &chp
->desc
, sizeof(struct channel_path_desc
));
1502 chsc_alloc_sei_area(void)
1504 sei_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1506 printk(KERN_WARNING
"Can't allocate page for processing of " \
1507 "chsc machine checks!\n");
1508 return (sei_page
? 0 : -ENOMEM
);
1512 chsc_enable_facility(int operation_code
)
1516 struct chsc_header request
;
1523 u32 operation_data_area
[252];
1524 struct chsc_header response
;
1528 } __attribute__ ((packed
)) *sda_area
;
1530 sda_area
= (void *)get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
1533 sda_area
->request
.length
= 0x0400;
1534 sda_area
->request
.code
= 0x0031;
1535 sda_area
->operation_code
= operation_code
;
1537 ret
= chsc(sda_area
);
1539 ret
= (ret
== 3) ? -ENODEV
: -EBUSY
;
1542 switch (sda_area
->response
.code
) {
1543 case 0x0001: /* everything ok */
1546 case 0x0003: /* invalid request block */
1550 case 0x0004: /* command not provided */
1551 case 0x0101: /* facility not provided */
1554 default: /* something went wrong */
1558 free_page((unsigned long)sda_area
);
1562 subsys_initcall(chsc_alloc_sei_area
);
1564 struct css_general_char css_general_characteristics
;
1565 struct css_chsc_char css_chsc_characteristics
;
1568 chsc_determine_css_characteristics(void)
1572 struct chsc_header request
;
1576 struct chsc_header response
;
1578 u32 general_char
[510];
1580 } __attribute__ ((packed
)) *scsc_area
;
1582 scsc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1584 printk(KERN_WARNING
"cio: Was not able to determine available" \
1585 "CHSCs due to no memory.\n");
1589 scsc_area
->request
.length
= 0x0010;
1590 scsc_area
->request
.code
= 0x0010;
1592 result
= chsc(scsc_area
);
1594 printk(KERN_WARNING
"cio: Was not able to determine " \
1595 "available CHSCs, cc=%i.\n", result
);
1600 if (scsc_area
->response
.code
!= 1) {
1601 printk(KERN_WARNING
"cio: Was not able to determine " \
1602 "available CHSCs.\n");
1606 memcpy(&css_general_characteristics
, scsc_area
->general_char
,
1607 sizeof(css_general_characteristics
));
1608 memcpy(&css_chsc_characteristics
, scsc_area
->chsc_char
,
1609 sizeof(css_chsc_characteristics
));
1611 free_page ((unsigned long) scsc_area
);
1615 EXPORT_SYMBOL_GPL(css_general_characteristics
);
1616 EXPORT_SYMBOL_GPL(css_chsc_characteristics
);