2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
18 #include <asm/chpid.h>
22 #include "cio_debug.h"
27 static void *sei_page
;
29 struct chsc_ssd_area
{
30 struct chsc_header request
;
34 u16 f_sch
; /* first subchannel */
36 u16 l_sch
; /* last subchannel */
38 struct chsc_header response
;
42 u8 st
: 3; /* subchannel type */
44 u8 unit_addr
; /* unit address */
45 u16 devno
; /* device number */
48 u16 sch
; /* subchannel */
49 u8 chpid
[8]; /* chpids 0-7 */
50 u16 fla
[8]; /* full link addresses 0-7 */
51 } __attribute__ ((packed
));
53 int chsc_get_ssd_info(struct subchannel_id schid
, struct chsc_ssd_info
*ssd
)
56 struct chsc_ssd_area
*ssd_area
;
62 page
= get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
65 ssd_area
= (struct chsc_ssd_area
*) page
;
66 ssd_area
->request
.length
= 0x0010;
67 ssd_area
->request
.code
= 0x0004;
68 ssd_area
->ssid
= schid
.ssid
;
69 ssd_area
->f_sch
= schid
.sch_no
;
70 ssd_area
->l_sch
= schid
.sch_no
;
72 ccode
= chsc(ssd_area
);
75 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
78 if (ssd_area
->response
.code
!= 0x0001) {
79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 schid
.ssid
, schid
.sch_no
,
81 ssd_area
->response
.code
);
85 if (!ssd_area
->sch_valid
) {
91 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
92 if ((ssd_area
->st
!= SUBCHANNEL_TYPE_IO
) &&
93 (ssd_area
->st
!= SUBCHANNEL_TYPE_MSG
))
95 ssd
->path_mask
= ssd_area
->path_mask
;
96 ssd
->fla_valid_mask
= ssd_area
->fla_valid_mask
;
97 for (i
= 0; i
< 8; i
++) {
99 if (ssd_area
->path_mask
& mask
) {
100 chp_id_init(&ssd
->chpid
[i
]);
101 ssd
->chpid
[i
].id
= ssd_area
->chpid
[i
];
103 if (ssd_area
->fla_valid_mask
& mask
)
104 ssd
->fla
[i
] = ssd_area
->fla
[i
];
111 static int check_for_io_on_path(struct subchannel
*sch
, int mask
)
115 cc
= stsch(sch
->schid
, &sch
->schib
);
118 if (sch
->schib
.scsw
.actl
&& sch
->schib
.pmcw
.lpum
== mask
)
123 static void terminate_internal_io(struct subchannel
*sch
)
125 if (cio_clear(sch
)) {
126 /* Recheck device in case clear failed. */
128 if (device_trigger_verify(sch
) != 0)
129 css_schedule_eval(sch
->schid
);
132 /* Request retry of internal operation. */
133 device_set_intretry(sch
);
135 if (sch
->driver
&& sch
->driver
->termination
)
136 sch
->driver
->termination(sch
);
139 static int s390_subchannel_remove_chpid(struct subchannel
*sch
, void *data
)
143 struct chp_id
*chpid
= data
;
146 for (j
= 0; j
< 8; j
++) {
148 if ((sch
->schib
.pmcw
.pim
& mask
) &&
149 (sch
->schib
.pmcw
.chpid
[j
] == chpid
->id
))
155 spin_lock_irq(sch
->lock
);
157 stsch(sch
->schid
, &schib
);
158 if (!css_sch_is_valid(&schib
))
160 memcpy(&sch
->schib
, &schib
, sizeof(struct schib
));
161 /* Check for single path devices. */
162 if (sch
->schib
.pmcw
.pim
== 0x80)
165 if (check_for_io_on_path(sch
, mask
)) {
166 if (device_is_online(sch
))
169 terminate_internal_io(sch
);
170 /* Re-start path verification. */
171 if (sch
->driver
&& sch
->driver
->verify
)
172 sch
->driver
->verify(sch
);
175 /* trigger path verification. */
176 if (sch
->driver
&& sch
->driver
->verify
)
177 sch
->driver
->verify(sch
);
178 else if (sch
->lpm
== mask
)
182 spin_unlock_irq(sch
->lock
);
187 spin_unlock_irq(sch
->lock
);
188 css_schedule_eval(sch
->schid
);
192 void chsc_chp_offline(struct chp_id chpid
)
196 sprintf(dbf_txt
, "chpr%x.%02x", chpid
.cssid
, chpid
.id
);
197 CIO_TRACE_EVENT(2, dbf_txt
);
199 if (chp_get_status(chpid
) <= 0)
201 for_each_subchannel_staged(s390_subchannel_remove_chpid
, NULL
, &chpid
);
204 static int s390_process_res_acc_new_sch(struct subchannel_id schid
, void *data
)
208 * We don't know the device yet, but since a path
209 * may be available now to the device we'll have
210 * to do recognition again.
211 * Since we don't have any idea about which chpid
212 * that beast may be on we'll have to do a stsch
213 * on all devices, grr...
215 if (stsch_err(schid
, &schib
))
219 /* Put it on the slow path. */
220 css_schedule_eval(schid
);
224 struct res_acc_data
{
230 static int get_res_chpid_mask(struct chsc_ssd_info
*ssd
,
231 struct res_acc_data
*data
)
236 for (i
= 0; i
< 8; i
++) {
238 if (!(ssd
->path_mask
& mask
))
240 if (!chp_id_is_equal(&ssd
->chpid
[i
], &data
->chpid
))
242 if ((ssd
->fla_valid_mask
& mask
) &&
243 ((ssd
->fla
[i
] & data
->fla_mask
) != data
->fla
))
250 static int __s390_process_res_acc(struct subchannel
*sch
, void *data
)
252 int chp_mask
, old_lpm
;
253 struct res_acc_data
*res_data
= data
;
255 spin_lock_irq(sch
->lock
);
256 chp_mask
= get_res_chpid_mask(&sch
->ssd_info
, res_data
);
259 if (stsch(sch
->schid
, &sch
->schib
))
262 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
263 sch
->schib
.pmcw
.pam
&
265 | chp_mask
) & sch
->opm
;
266 if (!old_lpm
&& sch
->lpm
)
267 device_trigger_reprobe(sch
);
268 else if (sch
->driver
&& sch
->driver
->verify
)
269 sch
->driver
->verify(sch
);
271 spin_unlock_irq(sch
->lock
);
276 static void s390_process_res_acc (struct res_acc_data
*res_data
)
280 sprintf(dbf_txt
, "accpr%x.%02x", res_data
->chpid
.cssid
,
282 CIO_TRACE_EVENT( 2, dbf_txt
);
283 if (res_data
->fla
!= 0) {
284 sprintf(dbf_txt
, "fla%x", res_data
->fla
);
285 CIO_TRACE_EVENT( 2, dbf_txt
);
289 * I/O resources may have become accessible.
290 * Scan through all subchannels that may be concerned and
291 * do a validation on those.
292 * The more information we have (info), the less scanning
293 * will we have to do.
295 for_each_subchannel_staged(__s390_process_res_acc
,
296 s390_process_res_acc_new_sch
, res_data
);
300 __get_chpid_from_lir(void *data
)
306 /* incident-node descriptor */
308 /* attached-node descriptor */
310 /* incident-specific information */
312 } __attribute__ ((packed
)) *lir
;
316 /* NULL link incident record */
318 if (!(lir
->indesc
[0]&0xc0000000))
319 /* node descriptor not valid */
321 if (!(lir
->indesc
[0]&0x10000000))
322 /* don't handle device-type nodes - FIXME */
324 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
326 return (u16
) (lir
->indesc
[0]&0x000000ff);
329 struct chsc_sei_area
{
330 struct chsc_header request
;
334 struct chsc_header response
;
337 u8 vf
; /* validity flags */
338 u8 rs
; /* reporting source */
339 u8 cc
; /* content code */
340 u16 fla
; /* full link address */
341 u16 rsid
; /* reporting source id */
344 u8 ccdf
[4096 - 16 - 24]; /* content-code dependent field */
345 /* ccdf has to be big enough for a link-incident record */
346 } __attribute__ ((packed
));
348 static void chsc_process_sei_link_incident(struct chsc_sei_area
*sei_area
)
353 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
354 sei_area
->rs
, sei_area
->rsid
);
355 if (sei_area
->rs
!= 4)
357 id
= __get_chpid_from_lir(sei_area
->ccdf
);
359 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
363 chsc_chp_offline(chpid
);
367 static void chsc_process_sei_res_acc(struct chsc_sei_area
*sei_area
)
369 struct res_acc_data res_data
;
373 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
374 "rs_id=%04x)\n", sei_area
->rs
, sei_area
->rsid
);
375 if (sei_area
->rs
!= 4)
378 chpid
.id
= sei_area
->rsid
;
379 /* allocate a new channel path structure, if needed */
380 status
= chp_get_status(chpid
);
385 memset(&res_data
, 0, sizeof(struct res_acc_data
));
386 res_data
.chpid
= chpid
;
387 if ((sei_area
->vf
& 0xc0) != 0) {
388 res_data
.fla
= sei_area
->fla
;
389 if ((sei_area
->vf
& 0xc0) == 0xc0)
390 /* full link address */
391 res_data
.fla_mask
= 0xffff;
394 res_data
.fla_mask
= 0xff00;
396 s390_process_res_acc(&res_data
);
399 struct chp_config_data
{
405 static void chsc_process_sei_chp_config(struct chsc_sei_area
*sei_area
)
407 struct chp_config_data
*data
;
411 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
412 if (sei_area
->rs
!= 0)
414 data
= (struct chp_config_data
*) &(sei_area
->ccdf
);
416 for (num
= 0; num
<= __MAX_CHPID
; num
++) {
417 if (!chp_test_bit(data
->map
, num
))
420 printk(KERN_WARNING
"cio: processing configure event %d for "
421 "chpid %x.%02x\n", data
->op
, chpid
.cssid
, chpid
.id
);
424 chp_cfg_schedule(chpid
, 1);
427 chp_cfg_schedule(chpid
, 0);
430 chp_cfg_cancel_deconfigure(chpid
);
436 static void chsc_process_sei(struct chsc_sei_area
*sei_area
)
438 /* Check if we might have lost some information. */
439 if (sei_area
->flags
& 0x40) {
440 CIO_CRW_EVENT(2, "chsc: event overflow\n");
441 css_schedule_eval_all();
443 /* which kind of information was stored? */
444 switch (sei_area
->cc
) {
445 case 1: /* link incident*/
446 chsc_process_sei_link_incident(sei_area
);
448 case 2: /* i/o resource accessibiliy */
449 chsc_process_sei_res_acc(sei_area
);
451 case 8: /* channel-path-configuration notification */
452 chsc_process_sei_chp_config(sei_area
);
454 default: /* other stuff */
455 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
461 void chsc_process_crw(void)
463 struct chsc_sei_area
*sei_area
;
467 /* Access to sei_page is serialized through machine check handler
468 * thread, so no need for locking. */
471 CIO_TRACE_EVENT( 2, "prcss");
473 memset(sei_area
, 0, sizeof(*sei_area
));
474 sei_area
->request
.length
= 0x0010;
475 sei_area
->request
.code
= 0x000e;
479 if (sei_area
->response
.code
== 0x0001) {
480 CIO_CRW_EVENT(4, "chsc: sei successful\n");
481 chsc_process_sei(sei_area
);
483 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
484 sei_area
->response
.code
);
487 } while (sei_area
->flags
& 0x80);
490 static int __chp_add_new_sch(struct subchannel_id schid
, void *data
)
494 if (stsch_err(schid
, &schib
))
498 /* Put it on the slow path. */
499 css_schedule_eval(schid
);
504 static int __chp_add(struct subchannel
*sch
, void *data
)
507 struct chp_id
*chpid
= data
;
509 spin_lock_irq(sch
->lock
);
510 for (i
=0; i
<8; i
++) {
512 if ((sch
->schib
.pmcw
.pim
& mask
) &&
513 (sch
->schib
.pmcw
.chpid
[i
] == chpid
->id
))
517 spin_unlock_irq(sch
->lock
);
520 if (stsch(sch
->schid
, &sch
->schib
)) {
521 spin_unlock_irq(sch
->lock
);
522 css_schedule_eval(sch
->schid
);
525 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
526 sch
->schib
.pmcw
.pam
&
530 if (sch
->driver
&& sch
->driver
->verify
)
531 sch
->driver
->verify(sch
);
533 spin_unlock_irq(sch
->lock
);
538 void chsc_chp_online(struct chp_id chpid
)
542 sprintf(dbf_txt
, "cadd%x.%02x", chpid
.cssid
, chpid
.id
);
543 CIO_TRACE_EVENT(2, dbf_txt
);
545 if (chp_get_status(chpid
) != 0)
546 for_each_subchannel_staged(__chp_add
, __chp_add_new_sch
,
550 static void __s390_subchannel_vary_chpid(struct subchannel
*sch
,
551 struct chp_id chpid
, int on
)
557 spin_lock_irqsave(sch
->lock
, flags
);
559 for (chp
= 0; chp
< 8; chp
++) {
561 if (!(sch
->ssd_info
.path_mask
& mask
))
563 if (!chp_id_is_equal(&sch
->ssd_info
.chpid
[chp
], &chpid
))
570 device_trigger_reprobe(sch
);
571 else if (sch
->driver
&& sch
->driver
->verify
)
572 sch
->driver
->verify(sch
);
577 if (check_for_io_on_path(sch
, mask
)) {
578 if (device_is_online(sch
))
579 /* Path verification is done after killing. */
582 /* Kill and retry internal I/O. */
583 terminate_internal_io(sch
);
584 /* Re-start path verification. */
585 if (sch
->driver
&& sch
->driver
->verify
)
586 sch
->driver
->verify(sch
);
588 } else if (!sch
->lpm
) {
589 if (device_trigger_verify(sch
) != 0)
590 css_schedule_eval(sch
->schid
);
591 } else if (sch
->driver
&& sch
->driver
->verify
)
592 sch
->driver
->verify(sch
);
595 spin_unlock_irqrestore(sch
->lock
, flags
);
598 static int s390_subchannel_vary_chpid_off(struct subchannel
*sch
, void *data
)
600 struct chp_id
*chpid
= data
;
602 __s390_subchannel_vary_chpid(sch
, *chpid
, 0);
606 static int s390_subchannel_vary_chpid_on(struct subchannel
*sch
, void *data
)
608 struct chp_id
*chpid
= data
;
610 __s390_subchannel_vary_chpid(sch
, *chpid
, 1);
615 __s390_vary_chpid_on(struct subchannel_id schid
, void *data
)
619 if (stsch_err(schid
, &schib
))
622 /* Put it on the slow path. */
623 css_schedule_eval(schid
);
628 * chsc_chp_vary - propagate channel-path vary operation to subchannels
629 * @chpid: channl-path ID
630 * @on: non-zero for vary online, zero for vary offline
632 int chsc_chp_vary(struct chp_id chpid
, int on
)
635 * Redo PathVerification on the devices the chpid connects to
639 for_each_subchannel_staged(s390_subchannel_vary_chpid_on
,
640 __s390_vary_chpid_on
, &chpid
);
642 for_each_subchannel_staged(s390_subchannel_vary_chpid_off
,
649 chsc_remove_cmg_attr(struct channel_subsystem
*css
)
653 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
656 chp_remove_cmg_attr(css
->chps
[i
]);
661 chsc_add_cmg_attr(struct channel_subsystem
*css
)
666 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
669 ret
= chp_add_cmg_attr(css
->chps
[i
]);
675 for (--i
; i
>= 0; i
--) {
678 chp_remove_cmg_attr(css
->chps
[i
]);
684 __chsc_do_secm(struct channel_subsystem
*css
, int enable
, void *page
)
687 struct chsc_header request
;
688 u32 operation_code
: 2;
697 struct chsc_header response
;
702 } __attribute__ ((packed
)) *secm_area
;
706 secm_area
->request
.length
= 0x0050;
707 secm_area
->request
.code
= 0x0016;
709 secm_area
->key
= PAGE_DEFAULT_KEY
;
710 secm_area
->cub_addr1
= (u64
)(unsigned long)css
->cub_addr1
;
711 secm_area
->cub_addr2
= (u64
)(unsigned long)css
->cub_addr2
;
713 secm_area
->operation_code
= enable
? 0 : 1;
715 ccode
= chsc(secm_area
);
717 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
719 switch (secm_area
->response
.code
) {
720 case 0x0001: /* Success. */
723 case 0x0003: /* Invalid block. */
724 case 0x0007: /* Invalid format. */
725 case 0x0008: /* Other invalid block. */
726 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
729 case 0x0004: /* Command not provided in model. */
730 CIO_CRW_EVENT(2, "Model does not provide secm\n");
733 case 0x0102: /* cub adresses incorrect */
734 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
737 case 0x0103: /* key error */
738 CIO_CRW_EVENT(2, "Access key error in secm\n");
741 case 0x0105: /* error while starting */
742 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
746 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
747 secm_area
->response
.code
);
754 chsc_secm(struct channel_subsystem
*css
, int enable
)
759 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
763 mutex_lock(&css
->mutex
);
764 if (enable
&& !css
->cm_enabled
) {
765 css
->cub_addr1
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
766 css
->cub_addr2
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
767 if (!css
->cub_addr1
|| !css
->cub_addr2
) {
768 free_page((unsigned long)css
->cub_addr1
);
769 free_page((unsigned long)css
->cub_addr2
);
770 free_page((unsigned long)secm_area
);
771 mutex_unlock(&css
->mutex
);
775 ret
= __chsc_do_secm(css
, enable
, secm_area
);
777 css
->cm_enabled
= enable
;
778 if (css
->cm_enabled
) {
779 ret
= chsc_add_cmg_attr(css
);
781 memset(secm_area
, 0, PAGE_SIZE
);
782 __chsc_do_secm(css
, 0, secm_area
);
786 chsc_remove_cmg_attr(css
);
788 if (!css
->cm_enabled
) {
789 free_page((unsigned long)css
->cub_addr1
);
790 free_page((unsigned long)css
->cub_addr2
);
792 mutex_unlock(&css
->mutex
);
793 free_page((unsigned long)secm_area
);
797 int chsc_determine_channel_path_description(struct chp_id chpid
,
798 struct channel_path_desc
*desc
)
803 struct chsc_header request
;
809 struct chsc_header response
;
811 struct channel_path_desc desc
;
812 } __attribute__ ((packed
)) *scpd_area
;
814 scpd_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
818 scpd_area
->request
.length
= 0x0010;
819 scpd_area
->request
.code
= 0x0002;
821 scpd_area
->first_chpid
= chpid
.id
;
822 scpd_area
->last_chpid
= chpid
.id
;
824 ccode
= chsc(scpd_area
);
826 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
830 switch (scpd_area
->response
.code
) {
831 case 0x0001: /* Success. */
832 memcpy(desc
, &scpd_area
->desc
,
833 sizeof(struct channel_path_desc
));
836 case 0x0003: /* Invalid block. */
837 case 0x0007: /* Invalid format. */
838 case 0x0008: /* Other invalid block. */
839 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
842 case 0x0004: /* Command not provided in model. */
843 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
847 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
848 scpd_area
->response
.code
);
852 free_page((unsigned long)scpd_area
);
857 chsc_initialize_cmg_chars(struct channel_path
*chp
, u8 cmcv
,
858 struct cmg_chars
*chars
)
863 chp
->cmg_chars
= kmalloc(sizeof(struct cmg_chars
),
865 if (chp
->cmg_chars
) {
867 struct cmg_chars
*cmg_chars
;
869 cmg_chars
= chp
->cmg_chars
;
870 for (i
= 0; i
< NR_MEASUREMENT_CHARS
; i
++) {
871 mask
= 0x80 >> (i
+ 3);
873 cmg_chars
->values
[i
] = chars
->values
[i
];
875 cmg_chars
->values
[i
] = 0;
880 /* No cmg-dependent data. */
885 int chsc_get_channel_measurement_chars(struct channel_path
*chp
)
890 struct chsc_header request
;
896 struct chsc_header response
;
907 u32 data
[NR_MEASUREMENT_CHARS
];
908 } __attribute__ ((packed
)) *scmc_area
;
910 scmc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
914 scmc_area
->request
.length
= 0x0010;
915 scmc_area
->request
.code
= 0x0022;
917 scmc_area
->first_chpid
= chp
->chpid
.id
;
918 scmc_area
->last_chpid
= chp
->chpid
.id
;
920 ccode
= chsc(scmc_area
);
922 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
926 switch (scmc_area
->response
.code
) {
927 case 0x0001: /* Success. */
928 if (!scmc_area
->not_valid
) {
929 chp
->cmg
= scmc_area
->cmg
;
930 chp
->shared
= scmc_area
->shared
;
931 chsc_initialize_cmg_chars(chp
, scmc_area
->cmcv
,
940 case 0x0003: /* Invalid block. */
941 case 0x0007: /* Invalid format. */
942 case 0x0008: /* Invalid bit combination. */
943 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
946 case 0x0004: /* Command not provided. */
947 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
951 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
952 scmc_area
->response
.code
);
956 free_page((unsigned long)scmc_area
);
960 int __init
chsc_alloc_sei_area(void)
962 sei_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
964 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
965 "chsc machine checks!\n");
966 return (sei_page
? 0 : -ENOMEM
);
969 void __init
chsc_free_sei_area(void)
975 chsc_enable_facility(int operation_code
)
979 struct chsc_header request
;
986 u32 operation_data_area
[252];
987 struct chsc_header response
;
991 } __attribute__ ((packed
)) *sda_area
;
993 sda_area
= (void *)get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
996 sda_area
->request
.length
= 0x0400;
997 sda_area
->request
.code
= 0x0031;
998 sda_area
->operation_code
= operation_code
;
1000 ret
= chsc(sda_area
);
1002 ret
= (ret
== 3) ? -ENODEV
: -EBUSY
;
1005 switch (sda_area
->response
.code
) {
1006 case 0x0001: /* everything ok */
1009 case 0x0003: /* invalid request block */
1013 case 0x0004: /* command not provided */
1014 case 0x0101: /* facility not provided */
1017 default: /* something went wrong */
1021 free_page((unsigned long)sda_area
);
1025 struct css_general_char css_general_characteristics
;
1026 struct css_chsc_char css_chsc_characteristics
;
1029 chsc_determine_css_characteristics(void)
1033 struct chsc_header request
;
1037 struct chsc_header response
;
1039 u32 general_char
[510];
1041 } __attribute__ ((packed
)) *scsc_area
;
1043 scsc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1045 CIO_MSG_EVENT(0, "Was not able to determine available"
1046 "CHSCs due to no memory.\n");
1050 scsc_area
->request
.length
= 0x0010;
1051 scsc_area
->request
.code
= 0x0010;
1053 result
= chsc(scsc_area
);
1055 CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
1056 "cc=%i.\n", result
);
1061 if (scsc_area
->response
.code
!= 1) {
1062 CIO_MSG_EVENT(0, "Was not able to determine "
1063 "available CHSCs.\n");
1067 memcpy(&css_general_characteristics
, scsc_area
->general_char
,
1068 sizeof(css_general_characteristics
));
1069 memcpy(&css_chsc_characteristics
, scsc_area
->chsc_char
,
1070 sizeof(css_chsc_characteristics
));
1072 free_page ((unsigned long) scsc_area
);
1076 EXPORT_SYMBOL_GPL(css_general_characteristics
);
1077 EXPORT_SYMBOL_GPL(css_chsc_characteristics
);