[S390] cio: Introduce struct chp_id.
[linux-2.6/mini2440.git] / drivers / s390 / cio / chsc.c
blobbb6f876e53c2f0f2aa462852faa5256fdd2f8d62
1 /*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
17 #include <asm/cio.h>
19 #include "css.h"
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "ioasm.h"
23 #include "chpid.h"
24 #include "chsc.h"
26 static void *sei_page;
28 static int new_channel_path(struct chp_id chpid);
30 static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
32 return css[chpid.cssid]->chps[chpid.id];
35 static void set_chp_logically_online(struct chp_id chpid, int onoff)
37 chpid_to_chp(chpid)->state = onoff;
40 static int get_chp_status(struct chp_id chpid)
42 return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
45 void chsc_validate_chpids(struct subchannel *sch)
47 int mask, chp;
48 struct chp_id chpid;
50 chp_id_init(&chpid);
51 for (chp = 0; chp <= 7; chp++) {
52 mask = 0x80 >> chp;
53 chpid.id = sch->schib.pmcw.chpid[chp];
54 if (!get_chp_status(chpid))
55 /* disable using this path */
56 sch->opm &= ~mask;
60 void chpid_is_actually_online(struct chp_id chpid)
62 int state;
64 state = get_chp_status(chpid);
65 if (state < 0) {
66 need_rescan = 1;
67 queue_work(slow_path_wq, &slow_path_work);
68 } else
69 WARN_ON(!state);
72 /* FIXME: this is _always_ called for every subchannel. shouldn't we
73 * process more than one at a time? */
74 static int
75 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
77 int ccode, j;
79 struct {
80 struct chsc_header request;
81 u16 reserved1a:10;
82 u16 ssid:2;
83 u16 reserved1b:4;
84 u16 f_sch; /* first subchannel */
85 u16 reserved2;
86 u16 l_sch; /* last subchannel */
87 u32 reserved3;
88 struct chsc_header response;
89 u32 reserved4;
90 u8 sch_valid : 1;
91 u8 dev_valid : 1;
92 u8 st : 3; /* subchannel type */
93 u8 zeroes : 3;
94 u8 unit_addr; /* unit address */
95 u16 devno; /* device number */
96 u8 path_mask;
97 u8 fla_valid_mask;
98 u16 sch; /* subchannel */
99 u8 chpid[8]; /* chpids 0-7 */
100 u16 fla[8]; /* full link addresses 0-7 */
101 } __attribute__ ((packed)) *ssd_area;
103 ssd_area = page;
105 ssd_area->request.length = 0x0010;
106 ssd_area->request.code = 0x0004;
108 ssd_area->ssid = sch->schid.ssid;
109 ssd_area->f_sch = sch->schid.sch_no;
110 ssd_area->l_sch = sch->schid.sch_no;
112 ccode = chsc(ssd_area);
113 if (ccode > 0) {
114 pr_debug("chsc returned with ccode = %d\n", ccode);
115 return (ccode == 3) ? -ENODEV : -EBUSY;
118 switch (ssd_area->response.code) {
119 case 0x0001: /* everything ok */
120 break;
121 case 0x0002:
122 CIO_CRW_EVENT(2, "Invalid command!\n");
123 return -EINVAL;
124 case 0x0003:
125 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
126 return -EINVAL;
127 case 0x0004:
128 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
129 return -EOPNOTSUPP;
130 default:
131 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
132 ssd_area->response.code);
133 return -EIO;
137 * ssd_area->st stores the type of the detected
138 * subchannel, with the following definitions:
140 * 0: I/O subchannel: All fields have meaning
141 * 1: CHSC subchannel: Only sch_val, st and sch
142 * have meaning
143 * 2: Message subchannel: All fields except unit_addr
144 * have meaning
145 * 3: ADM subchannel: Only sch_val, st and sch
146 * have meaning
148 * Other types are currently undefined.
150 if (ssd_area->st > 3) { /* uhm, that looks strange... */
151 CIO_CRW_EVENT(0, "Strange subchannel type %d"
152 " for sch 0.%x.%04x\n", ssd_area->st,
153 sch->schid.ssid, sch->schid.sch_no);
155 * There may have been a new subchannel type defined in the
156 * time since this code was written; since we don't know which
157 * fields have meaning and what to do with it we just jump out
159 return 0;
160 } else {
161 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
162 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
163 sch->schid.ssid, sch->schid.sch_no,
164 type[ssd_area->st]);
166 sch->ssd_info.valid = 1;
167 sch->ssd_info.type = ssd_area->st;
170 if (ssd_area->st == 0 || ssd_area->st == 2) {
171 for (j = 0; j < 8; j++) {
172 if (!((0x80 >> j) & ssd_area->path_mask &
173 ssd_area->fla_valid_mask))
174 continue;
175 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
176 sch->ssd_info.fla[j] = ssd_area->fla[j];
179 return 0;
183 css_get_ssd_info(struct subchannel *sch)
185 int ret;
186 void *page;
188 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
189 if (!page)
190 return -ENOMEM;
191 spin_lock_irq(sch->lock);
192 ret = chsc_get_sch_desc_irq(sch, page);
193 if (ret) {
194 static int cio_chsc_err_msg;
196 if (!cio_chsc_err_msg) {
197 printk(KERN_ERR
198 "chsc_get_sch_descriptions:"
199 " Error %d while doing chsc; "
200 "processing some machine checks may "
201 "not work\n", ret);
202 cio_chsc_err_msg = 1;
205 spin_unlock_irq(sch->lock);
206 free_page((unsigned long)page);
207 if (!ret) {
208 int j, mask;
209 struct chp_id chpid;
211 chp_id_init(&chpid);
212 /* Allocate channel path structures, if needed. */
213 for (j = 0; j < 8; j++) {
214 mask = 0x80 >> j;
215 chpid.id = sch->ssd_info.chpid[j];
216 if ((sch->schib.pmcw.pim & mask) &&
217 (get_chp_status(chpid) < 0))
218 new_channel_path(chpid);
221 return ret;
224 static int
225 s390_subchannel_remove_chpid(struct device *dev, void *data)
227 int j;
228 int mask;
229 struct subchannel *sch;
230 struct channel_path *chpid;
231 struct schib schib;
233 sch = to_subchannel(dev);
234 chpid = data;
235 for (j = 0; j < 8; j++) {
236 mask = 0x80 >> j;
237 if ((sch->schib.pmcw.pim & mask) &&
238 (sch->schib.pmcw.chpid[j] == chpid->chpid.id))
239 break;
241 if (j >= 8)
242 return 0;
244 spin_lock_irq(sch->lock);
246 stsch(sch->schid, &schib);
247 if (!schib.pmcw.dnv)
248 goto out_unreg;
249 memcpy(&sch->schib, &schib, sizeof(struct schib));
250 /* Check for single path devices. */
251 if (sch->schib.pmcw.pim == 0x80)
252 goto out_unreg;
254 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
255 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
256 (sch->schib.pmcw.lpum == mask)) {
257 int cc;
259 cc = cio_clear(sch);
260 if (cc == -ENODEV)
261 goto out_unreg;
262 /* Request retry of internal operation. */
263 device_set_intretry(sch);
264 /* Call handler. */
265 if (sch->driver && sch->driver->termination)
266 sch->driver->termination(&sch->dev);
267 goto out_unlock;
270 /* trigger path verification. */
271 if (sch->driver && sch->driver->verify)
272 sch->driver->verify(&sch->dev);
273 else if (sch->lpm == mask)
274 goto out_unreg;
275 out_unlock:
276 spin_unlock_irq(sch->lock);
277 return 0;
278 out_unreg:
279 spin_unlock_irq(sch->lock);
280 sch->lpm = 0;
281 if (css_enqueue_subchannel_slow(sch->schid)) {
282 css_clear_subchannel_slow_list();
283 need_rescan = 1;
285 return 0;
288 static void s390_set_chpid_offline(struct chp_id chpid)
290 char dbf_txt[15];
291 struct device *dev;
293 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
294 CIO_TRACE_EVENT(2, dbf_txt);
296 if (get_chp_status(chpid) <= 0)
297 return;
298 dev = get_device(&(chpid_to_chp(chpid)->dev));
299 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
300 s390_subchannel_remove_chpid);
302 if (need_rescan || css_slow_subchannels_exist())
303 queue_work(slow_path_wq, &slow_path_work);
304 put_device(dev);
307 struct res_acc_data {
308 struct channel_path *chp;
309 u32 fla_mask;
310 u16 fla;
313 static int
314 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
316 int found;
317 int chp;
318 int ccode;
320 found = 0;
321 for (chp = 0; chp <= 7; chp++)
323 * check if chpid is in information updated by ssd
325 if (sch->ssd_info.valid &&
326 sch->ssd_info.chpid[chp] == res_data->chp->chpid.id &&
327 (sch->ssd_info.fla[chp] & res_data->fla_mask)
328 == res_data->fla) {
329 found = 1;
330 break;
333 if (found == 0)
334 return 0;
337 * Do a stsch to update our subchannel structure with the
338 * new path information and eventually check for logically
339 * offline chpids.
341 ccode = stsch(sch->schid, &sch->schib);
342 if (ccode > 0)
343 return 0;
345 return 0x80 >> chp;
348 static int
349 s390_process_res_acc_new_sch(struct subchannel_id schid)
351 struct schib schib;
352 int ret;
354 * We don't know the device yet, but since a path
355 * may be available now to the device we'll have
356 * to do recognition again.
357 * Since we don't have any idea about which chpid
358 * that beast may be on we'll have to do a stsch
359 * on all devices, grr...
361 if (stsch_err(schid, &schib))
362 /* We're through */
363 return need_rescan ? -EAGAIN : -ENXIO;
365 /* Put it on the slow path. */
366 ret = css_enqueue_subchannel_slow(schid);
367 if (ret) {
368 css_clear_subchannel_slow_list();
369 need_rescan = 1;
370 return -EAGAIN;
372 return 0;
375 static int
376 __s390_process_res_acc(struct subchannel_id schid, void *data)
378 int chp_mask, old_lpm;
379 struct res_acc_data *res_data;
380 struct subchannel *sch;
382 res_data = data;
383 sch = get_subchannel_by_schid(schid);
384 if (!sch)
385 /* Check if a subchannel is newly available. */
386 return s390_process_res_acc_new_sch(schid);
388 spin_lock_irq(sch->lock);
390 chp_mask = s390_process_res_acc_sch(res_data, sch);
392 if (chp_mask == 0) {
393 spin_unlock_irq(sch->lock);
394 put_device(&sch->dev);
395 return 0;
397 old_lpm = sch->lpm;
398 sch->lpm = ((sch->schib.pmcw.pim &
399 sch->schib.pmcw.pam &
400 sch->schib.pmcw.pom)
401 | chp_mask) & sch->opm;
402 if (!old_lpm && sch->lpm)
403 device_trigger_reprobe(sch);
404 else if (sch->driver && sch->driver->verify)
405 sch->driver->verify(&sch->dev);
407 spin_unlock_irq(sch->lock);
408 put_device(&sch->dev);
409 return 0;
413 static int
414 s390_process_res_acc (struct res_acc_data *res_data)
416 int rc;
417 char dbf_txt[15];
419 sprintf(dbf_txt, "accpr%x.%02x", res_data->chp->chpid.cssid,
420 res_data->chp->chpid.id);
421 CIO_TRACE_EVENT( 2, dbf_txt);
422 if (res_data->fla != 0) {
423 sprintf(dbf_txt, "fla%x", res_data->fla);
424 CIO_TRACE_EVENT( 2, dbf_txt);
428 * I/O resources may have become accessible.
429 * Scan through all subchannels that may be concerned and
430 * do a validation on those.
431 * The more information we have (info), the less scanning
432 * will we have to do.
434 rc = for_each_subchannel(__s390_process_res_acc, res_data);
435 if (css_slow_subchannels_exist())
436 rc = -EAGAIN;
437 else if (rc != -EAGAIN)
438 rc = 0;
439 return rc;
442 static int
443 __get_chpid_from_lir(void *data)
445 struct lir {
446 u8 iq;
447 u8 ic;
448 u16 sci;
449 /* incident-node descriptor */
450 u32 indesc[28];
451 /* attached-node descriptor */
452 u32 andesc[28];
453 /* incident-specific information */
454 u32 isinfo[28];
455 } __attribute__ ((packed)) *lir;
457 lir = data;
458 if (!(lir->iq&0x80))
459 /* NULL link incident record */
460 return -EINVAL;
461 if (!(lir->indesc[0]&0xc0000000))
462 /* node descriptor not valid */
463 return -EINVAL;
464 if (!(lir->indesc[0]&0x10000000))
465 /* don't handle device-type nodes - FIXME */
466 return -EINVAL;
467 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
469 return (u16) (lir->indesc[0]&0x000000ff);
472 struct chsc_sei_area {
473 struct chsc_header request;
474 u32 reserved1;
475 u32 reserved2;
476 u32 reserved3;
477 struct chsc_header response;
478 u32 reserved4;
479 u8 flags;
480 u8 vf; /* validity flags */
481 u8 rs; /* reporting source */
482 u8 cc; /* content code */
483 u16 fla; /* full link address */
484 u16 rsid; /* reporting source id */
485 u32 reserved5;
486 u32 reserved6;
487 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
488 /* ccdf has to be big enough for a link-incident record */
489 } __attribute__ ((packed));
491 static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
493 struct chp_id chpid;
494 int id;
496 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
497 sei_area->rs, sei_area->rsid);
498 if (sei_area->rs != 4)
499 return 0;
500 id = __get_chpid_from_lir(sei_area->ccdf);
501 if (id < 0)
502 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
503 else {
504 chp_id_init(&chpid);
505 chpid.id = id;
506 s390_set_chpid_offline(chpid);
509 return 0;
512 static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
514 struct res_acc_data res_data;
515 struct device *dev;
516 struct chp_id chpid;
517 int status;
518 int rc;
520 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
521 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
522 if (sei_area->rs != 4)
523 return 0;
524 chp_id_init(&chpid);
525 chpid.id = sei_area->rsid;
526 /* allocate a new channel path structure, if needed */
527 status = get_chp_status(chpid);
528 if (status < 0)
529 new_channel_path(chpid);
530 else if (!status)
531 return 0;
532 dev = get_device(&(chpid_to_chp(chpid)->dev));
533 memset(&res_data, 0, sizeof(struct res_acc_data));
534 res_data.chp = to_channelpath(dev);
535 if ((sei_area->vf & 0xc0) != 0) {
536 res_data.fla = sei_area->fla;
537 if ((sei_area->vf & 0xc0) == 0xc0)
538 /* full link address */
539 res_data.fla_mask = 0xffff;
540 else
541 /* link address */
542 res_data.fla_mask = 0xff00;
544 rc = s390_process_res_acc(&res_data);
545 put_device(dev);
547 return rc;
550 static int chsc_process_sei(struct chsc_sei_area *sei_area)
552 int rc;
554 /* Check if we might have lost some information. */
555 if (sei_area->flags & 0x40)
556 CIO_CRW_EVENT(2, "chsc: event overflow\n");
557 /* which kind of information was stored? */
558 rc = 0;
559 switch (sei_area->cc) {
560 case 1: /* link incident*/
561 rc = chsc_process_sei_link_incident(sei_area);
562 break;
563 case 2: /* i/o resource accessibiliy */
564 rc = chsc_process_sei_res_acc(sei_area);
565 break;
566 default: /* other stuff */
567 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
568 sei_area->cc);
569 break;
572 return rc;
575 int chsc_process_crw(void)
577 struct chsc_sei_area *sei_area;
578 int ret;
579 int rc;
581 if (!sei_page)
582 return 0;
583 /* Access to sei_page is serialized through machine check handler
584 * thread, so no need for locking. */
585 sei_area = sei_page;
587 CIO_TRACE_EVENT( 2, "prcss");
588 ret = 0;
589 do {
590 memset(sei_area, 0, sizeof(*sei_area));
591 sei_area->request.length = 0x0010;
592 sei_area->request.code = 0x000e;
593 if (chsc(sei_area))
594 break;
596 if (sei_area->response.code == 0x0001) {
597 CIO_CRW_EVENT(4, "chsc: sei successful\n");
598 rc = chsc_process_sei(sei_area);
599 if (rc)
600 ret = rc;
601 } else {
602 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
603 sei_area->response.code);
604 ret = 0;
605 break;
607 } while (sei_area->flags & 0x80);
609 return ret;
612 static int
613 __chp_add_new_sch(struct subchannel_id schid)
615 struct schib schib;
616 int ret;
618 if (stsch_err(schid, &schib))
619 /* We're through */
620 return need_rescan ? -EAGAIN : -ENXIO;
622 /* Put it on the slow path. */
623 ret = css_enqueue_subchannel_slow(schid);
624 if (ret) {
625 css_clear_subchannel_slow_list();
626 need_rescan = 1;
627 return -EAGAIN;
629 return 0;
633 static int
634 __chp_add(struct subchannel_id schid, void *data)
636 int i, mask;
637 struct channel_path *chp;
638 struct subchannel *sch;
640 chp = data;
641 sch = get_subchannel_by_schid(schid);
642 if (!sch)
643 /* Check if the subchannel is now available. */
644 return __chp_add_new_sch(schid);
645 spin_lock_irq(sch->lock);
646 for (i=0; i<8; i++) {
647 mask = 0x80 >> i;
648 if ((sch->schib.pmcw.pim & mask) &&
649 (sch->schib.pmcw.chpid[i] == chp->chpid.id)) {
650 if (stsch(sch->schid, &sch->schib) != 0) {
651 /* Endgame. */
652 spin_unlock_irq(sch->lock);
653 return -ENXIO;
655 break;
658 if (i==8) {
659 spin_unlock_irq(sch->lock);
660 return 0;
662 sch->lpm = ((sch->schib.pmcw.pim &
663 sch->schib.pmcw.pam &
664 sch->schib.pmcw.pom)
665 | mask) & sch->opm;
667 if (sch->driver && sch->driver->verify)
668 sch->driver->verify(&sch->dev);
670 spin_unlock_irq(sch->lock);
671 put_device(&sch->dev);
672 return 0;
675 static int chp_add(struct chp_id chpid)
677 int rc;
678 char dbf_txt[15];
679 struct device *dev;
681 if (!get_chp_status(chpid))
682 return 0; /* no need to do the rest */
684 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
685 CIO_TRACE_EVENT(2, dbf_txt);
687 dev = get_device(&(chpid_to_chp(chpid)->dev));
688 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
689 if (css_slow_subchannels_exist())
690 rc = -EAGAIN;
691 if (rc != -EAGAIN)
692 rc = 0;
693 put_device(dev);
694 return rc;
698 * Handling of crw machine checks with channel path source.
700 int chp_process_crw(int id, int on)
702 struct chp_id chpid;
704 chp_id_init(&chpid);
705 chpid.id = id;
706 if (on == 0) {
707 /* Path has gone. We use the link incident routine.*/
708 s390_set_chpid_offline(chpid);
709 return 0; /* De-register is async anyway. */
712 * Path has come. Allocate a new channel path structure,
713 * if needed.
715 if (get_chp_status(chpid) < 0)
716 new_channel_path(chpid);
717 /* Avoid the extra overhead in process_rec_acc. */
718 return chp_add(chpid);
721 static int check_for_io_on_path(struct subchannel *sch, int index)
723 int cc;
725 cc = stsch(sch->schid, &sch->schib);
726 if (cc)
727 return 0;
728 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
729 return 1;
730 return 0;
733 static void terminate_internal_io(struct subchannel *sch)
735 if (cio_clear(sch)) {
736 /* Recheck device in case clear failed. */
737 sch->lpm = 0;
738 if (device_trigger_verify(sch) != 0) {
739 if(css_enqueue_subchannel_slow(sch->schid)) {
740 css_clear_subchannel_slow_list();
741 need_rescan = 1;
744 return;
746 /* Request retry of internal operation. */
747 device_set_intretry(sch);
748 /* Call handler. */
749 if (sch->driver && sch->driver->termination)
750 sch->driver->termination(&sch->dev);
753 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
754 struct chp_id chpid, int on)
756 int chp, old_lpm;
757 unsigned long flags;
759 if (!sch->ssd_info.valid)
760 return;
762 spin_lock_irqsave(sch->lock, flags);
763 old_lpm = sch->lpm;
764 for (chp = 0; chp < 8; chp++) {
765 if (sch->ssd_info.chpid[chp] != chpid.id)
766 continue;
768 if (on) {
769 sch->opm |= (0x80 >> chp);
770 sch->lpm |= (0x80 >> chp);
771 if (!old_lpm)
772 device_trigger_reprobe(sch);
773 else if (sch->driver && sch->driver->verify)
774 sch->driver->verify(&sch->dev);
775 break;
777 sch->opm &= ~(0x80 >> chp);
778 sch->lpm &= ~(0x80 >> chp);
779 if (check_for_io_on_path(sch, chp)) {
780 if (device_is_online(sch))
781 /* Path verification is done after killing. */
782 device_kill_io(sch);
783 else
784 /* Kill and retry internal I/O. */
785 terminate_internal_io(sch);
786 } else if (!sch->lpm) {
787 if (device_trigger_verify(sch) != 0) {
788 if (css_enqueue_subchannel_slow(sch->schid)) {
789 css_clear_subchannel_slow_list();
790 need_rescan = 1;
793 } else if (sch->driver && sch->driver->verify)
794 sch->driver->verify(&sch->dev);
795 break;
797 spin_unlock_irqrestore(sch->lock, flags);
800 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
802 struct subchannel *sch;
803 struct chp_id *chpid;
805 sch = to_subchannel(dev);
806 chpid = data;
808 __s390_subchannel_vary_chpid(sch, *chpid, 0);
809 return 0;
812 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
814 struct subchannel *sch;
815 struct chp_id *chpid;
817 sch = to_subchannel(dev);
818 chpid = data;
820 __s390_subchannel_vary_chpid(sch, *chpid, 1);
821 return 0;
824 static int
825 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
827 struct schib schib;
828 struct subchannel *sch;
830 sch = get_subchannel_by_schid(schid);
831 if (sch) {
832 put_device(&sch->dev);
833 return 0;
835 if (stsch_err(schid, &schib))
836 /* We're through */
837 return -ENXIO;
838 /* Put it on the slow path. */
839 if (css_enqueue_subchannel_slow(schid)) {
840 css_clear_subchannel_slow_list();
841 need_rescan = 1;
842 return -EAGAIN;
844 return 0;
848 * Function: s390_vary_chpid
849 * Varies the specified chpid online or offline
851 static int s390_vary_chpid(struct chp_id chpid, int on)
853 char dbf_text[15];
854 int status;
856 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
857 chpid.id);
858 CIO_TRACE_EVENT( 2, dbf_text);
860 status = get_chp_status(chpid);
861 if (status < 0) {
862 printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
863 chpid.cssid, chpid.id);
864 return -EINVAL;
867 if (!on && !status) {
868 printk(KERN_ERR "chpid %x.%02x is already offline\n",
869 chpid.cssid, chpid.id);
870 return -EINVAL;
873 set_chp_logically_online(chpid, on);
876 * Redo PathVerification on the devices the chpid connects to
879 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
880 s390_subchannel_vary_chpid_on :
881 s390_subchannel_vary_chpid_off);
882 if (on)
883 /* Scan for new devices on varied on path. */
884 for_each_subchannel(__s390_vary_chpid_on, NULL);
885 if (need_rescan || css_slow_subchannels_exist())
886 queue_work(slow_path_wq, &slow_path_work);
887 return 0;
891 * Channel measurement related functions
893 static ssize_t
894 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
895 size_t count)
897 struct channel_path *chp;
898 unsigned int size;
900 chp = to_channelpath(container_of(kobj, struct device, kobj));
901 if (!chp->cmg_chars)
902 return 0;
904 size = sizeof(struct cmg_chars);
906 if (off > size)
907 return 0;
908 if (off + count > size)
909 count = size - off;
910 memcpy(buf, chp->cmg_chars + off, count);
911 return count;
914 static struct bin_attribute chp_measurement_chars_attr = {
915 .attr = {
916 .name = "measurement_chars",
917 .mode = S_IRUSR,
918 .owner = THIS_MODULE,
920 .size = sizeof(struct cmg_chars),
921 .read = chp_measurement_chars_read,
924 static void chp_measurement_copy_block(struct cmg_entry *buf,
925 struct channel_subsystem *css, struct chp_id chpid)
927 void *area;
928 struct cmg_entry *entry, reference_buf;
929 int idx;
931 if (chpid.id < 128) {
932 area = css->cub_addr1;
933 idx = chpid.id;
934 } else {
935 area = css->cub_addr2;
936 idx = chpid.id - 128;
938 entry = area + (idx * sizeof(struct cmg_entry));
939 do {
940 memcpy(buf, entry, sizeof(*entry));
941 memcpy(&reference_buf, entry, sizeof(*entry));
942 } while (reference_buf.values[0] != buf->values[0]);
945 static ssize_t
946 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
948 struct channel_path *chp;
949 struct channel_subsystem *css;
950 unsigned int size;
952 chp = to_channelpath(container_of(kobj, struct device, kobj));
953 css = to_css(chp->dev.parent);
955 size = sizeof(struct cmg_entry);
957 /* Only allow single reads. */
958 if (off || count < size)
959 return 0;
960 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
961 count = size;
962 return count;
965 static struct bin_attribute chp_measurement_attr = {
966 .attr = {
967 .name = "measurement",
968 .mode = S_IRUSR,
969 .owner = THIS_MODULE,
971 .size = sizeof(struct cmg_entry),
972 .read = chp_measurement_read,
975 static void
976 chsc_remove_chp_cmg_attr(struct channel_path *chp)
978 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
979 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
982 static int
983 chsc_add_chp_cmg_attr(struct channel_path *chp)
985 int ret;
987 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
988 if (ret)
989 return ret;
990 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
991 if (ret)
992 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
993 return ret;
996 static void
997 chsc_remove_cmg_attr(struct channel_subsystem *css)
999 int i;
1001 for (i = 0; i <= __MAX_CHPID; i++) {
1002 if (!css->chps[i])
1003 continue;
1004 chsc_remove_chp_cmg_attr(css->chps[i]);
1008 static int
1009 chsc_add_cmg_attr(struct channel_subsystem *css)
1011 int i, ret;
1013 ret = 0;
1014 for (i = 0; i <= __MAX_CHPID; i++) {
1015 if (!css->chps[i])
1016 continue;
1017 ret = chsc_add_chp_cmg_attr(css->chps[i]);
1018 if (ret)
1019 goto cleanup;
1021 return ret;
1022 cleanup:
1023 for (--i; i >= 0; i--) {
1024 if (!css->chps[i])
1025 continue;
1026 chsc_remove_chp_cmg_attr(css->chps[i]);
1028 return ret;
1032 static int
1033 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1035 struct {
1036 struct chsc_header request;
1037 u32 operation_code : 2;
1038 u32 : 30;
1039 u32 key : 4;
1040 u32 : 28;
1041 u32 zeroes1;
1042 u32 cub_addr1;
1043 u32 zeroes2;
1044 u32 cub_addr2;
1045 u32 reserved[13];
1046 struct chsc_header response;
1047 u32 status : 8;
1048 u32 : 4;
1049 u32 fmt : 4;
1050 u32 : 16;
1051 } __attribute__ ((packed)) *secm_area;
1052 int ret, ccode;
1054 secm_area = page;
1055 secm_area->request.length = 0x0050;
1056 secm_area->request.code = 0x0016;
1058 secm_area->key = PAGE_DEFAULT_KEY;
1059 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
1060 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
1062 secm_area->operation_code = enable ? 0 : 1;
1064 ccode = chsc(secm_area);
1065 if (ccode > 0)
1066 return (ccode == 3) ? -ENODEV : -EBUSY;
1068 switch (secm_area->response.code) {
1069 case 0x0001: /* Success. */
1070 ret = 0;
1071 break;
1072 case 0x0003: /* Invalid block. */
1073 case 0x0007: /* Invalid format. */
1074 case 0x0008: /* Other invalid block. */
1075 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1076 ret = -EINVAL;
1077 break;
1078 case 0x0004: /* Command not provided in model. */
1079 CIO_CRW_EVENT(2, "Model does not provide secm\n");
1080 ret = -EOPNOTSUPP;
1081 break;
1082 case 0x0102: /* cub adresses incorrect */
1083 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
1084 ret = -EINVAL;
1085 break;
1086 case 0x0103: /* key error */
1087 CIO_CRW_EVENT(2, "Access key error in secm\n");
1088 ret = -EINVAL;
1089 break;
1090 case 0x0105: /* error while starting */
1091 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
1092 ret = -EIO;
1093 break;
1094 default:
1095 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1096 secm_area->response.code);
1097 ret = -EIO;
1099 return ret;
1103 chsc_secm(struct channel_subsystem *css, int enable)
1105 void *secm_area;
1106 int ret;
1108 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1109 if (!secm_area)
1110 return -ENOMEM;
1112 mutex_lock(&css->mutex);
1113 if (enable && !css->cm_enabled) {
1114 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1115 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1116 if (!css->cub_addr1 || !css->cub_addr2) {
1117 free_page((unsigned long)css->cub_addr1);
1118 free_page((unsigned long)css->cub_addr2);
1119 free_page((unsigned long)secm_area);
1120 mutex_unlock(&css->mutex);
1121 return -ENOMEM;
1124 ret = __chsc_do_secm(css, enable, secm_area);
1125 if (!ret) {
1126 css->cm_enabled = enable;
1127 if (css->cm_enabled) {
1128 ret = chsc_add_cmg_attr(css);
1129 if (ret) {
1130 memset(secm_area, 0, PAGE_SIZE);
1131 __chsc_do_secm(css, 0, secm_area);
1132 css->cm_enabled = 0;
1134 } else
1135 chsc_remove_cmg_attr(css);
1137 if (enable && !css->cm_enabled) {
1138 free_page((unsigned long)css->cub_addr1);
1139 free_page((unsigned long)css->cub_addr2);
1141 mutex_unlock(&css->mutex);
1142 free_page((unsigned long)secm_area);
1143 return ret;
1147 * Files for the channel path entries.
1149 static ssize_t
1150 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1152 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1154 if (!chp)
1155 return 0;
1156 return (get_chp_status(chp->chpid) ? sprintf(buf, "online\n") :
1157 sprintf(buf, "offline\n"));
1160 static ssize_t
1161 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1163 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1164 char cmd[10];
1165 int num_args;
1166 int error;
1168 num_args = sscanf(buf, "%5s", cmd);
1169 if (!num_args)
1170 return count;
1172 if (!strnicmp(cmd, "on", 2))
1173 error = s390_vary_chpid(cp->chpid, 1);
1174 else if (!strnicmp(cmd, "off", 3))
1175 error = s390_vary_chpid(cp->chpid, 0);
1176 else
1177 error = -EINVAL;
1179 return error < 0 ? error : count;
1183 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1185 static ssize_t
1186 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1188 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1190 if (!chp)
1191 return 0;
1192 return sprintf(buf, "%x\n", chp->desc.desc);
1195 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1197 static ssize_t
1198 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1200 struct channel_path *chp = to_channelpath(dev);
1202 if (!chp)
1203 return 0;
1204 if (chp->cmg == -1) /* channel measurements not available */
1205 return sprintf(buf, "unknown\n");
1206 return sprintf(buf, "%x\n", chp->cmg);
1209 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1211 static ssize_t
1212 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1214 struct channel_path *chp = to_channelpath(dev);
1216 if (!chp)
1217 return 0;
1218 if (chp->shared == -1) /* channel measurements not available */
1219 return sprintf(buf, "unknown\n");
1220 return sprintf(buf, "%x\n", chp->shared);
1223 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1225 static struct attribute * chp_attrs[] = {
1226 &dev_attr_status.attr,
1227 &dev_attr_type.attr,
1228 &dev_attr_cmg.attr,
1229 &dev_attr_shared.attr,
1230 NULL,
1233 static struct attribute_group chp_attr_group = {
1234 .attrs = chp_attrs,
1237 static void
1238 chp_release(struct device *dev)
1240 struct channel_path *cp;
1242 cp = container_of(dev, struct channel_path, dev);
1243 kfree(cp);
1246 static int chsc_determine_channel_path_description(struct chp_id chpid,
1247 struct channel_path_desc *desc)
1249 int ccode, ret;
1251 struct {
1252 struct chsc_header request;
1253 u32 : 24;
1254 u32 first_chpid : 8;
1255 u32 : 24;
1256 u32 last_chpid : 8;
1257 u32 zeroes1;
1258 struct chsc_header response;
1259 u32 zeroes2;
1260 struct channel_path_desc desc;
1261 } __attribute__ ((packed)) *scpd_area;
1263 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1264 if (!scpd_area)
1265 return -ENOMEM;
1267 scpd_area->request.length = 0x0010;
1268 scpd_area->request.code = 0x0002;
1270 scpd_area->first_chpid = chpid.id;
1271 scpd_area->last_chpid = chpid.id;
1273 ccode = chsc(scpd_area);
1274 if (ccode > 0) {
1275 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1276 goto out;
1279 switch (scpd_area->response.code) {
1280 case 0x0001: /* Success. */
1281 memcpy(desc, &scpd_area->desc,
1282 sizeof(struct channel_path_desc));
1283 ret = 0;
1284 break;
1285 case 0x0003: /* Invalid block. */
1286 case 0x0007: /* Invalid format. */
1287 case 0x0008: /* Other invalid block. */
1288 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1289 ret = -EINVAL;
1290 break;
1291 case 0x0004: /* Command not provided in model. */
1292 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1293 ret = -EOPNOTSUPP;
1294 break;
1295 default:
1296 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1297 scpd_area->response.code);
1298 ret = -EIO;
1300 out:
1301 free_page((unsigned long)scpd_area);
1302 return ret;
1305 static void
1306 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1307 struct cmg_chars *chars)
1309 switch (chp->cmg) {
1310 case 2:
1311 case 3:
1312 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1313 GFP_KERNEL);
1314 if (chp->cmg_chars) {
1315 int i, mask;
1316 struct cmg_chars *cmg_chars;
1318 cmg_chars = chp->cmg_chars;
1319 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1320 mask = 0x80 >> (i + 3);
1321 if (cmcv & mask)
1322 cmg_chars->values[i] = chars->values[i];
1323 else
1324 cmg_chars->values[i] = 0;
1327 break;
1328 default:
1329 /* No cmg-dependent data. */
1330 break;
1334 static int
1335 chsc_get_channel_measurement_chars(struct channel_path *chp)
1337 int ccode, ret;
1339 struct {
1340 struct chsc_header request;
1341 u32 : 24;
1342 u32 first_chpid : 8;
1343 u32 : 24;
1344 u32 last_chpid : 8;
1345 u32 zeroes1;
1346 struct chsc_header response;
1347 u32 zeroes2;
1348 u32 not_valid : 1;
1349 u32 shared : 1;
1350 u32 : 22;
1351 u32 chpid : 8;
1352 u32 cmcv : 5;
1353 u32 : 11;
1354 u32 cmgq : 8;
1355 u32 cmg : 8;
1356 u32 zeroes3;
1357 u32 data[NR_MEASUREMENT_CHARS];
1358 } __attribute__ ((packed)) *scmc_area;
1360 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1361 if (!scmc_area)
1362 return -ENOMEM;
1364 scmc_area->request.length = 0x0010;
1365 scmc_area->request.code = 0x0022;
1367 scmc_area->first_chpid = chp->chpid.id;
1368 scmc_area->last_chpid = chp->chpid.id;
1370 ccode = chsc(scmc_area);
1371 if (ccode > 0) {
1372 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1373 goto out;
1376 switch (scmc_area->response.code) {
1377 case 0x0001: /* Success. */
1378 if (!scmc_area->not_valid) {
1379 chp->cmg = scmc_area->cmg;
1380 chp->shared = scmc_area->shared;
1381 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1382 (struct cmg_chars *)
1383 &scmc_area->data);
1384 } else {
1385 chp->cmg = -1;
1386 chp->shared = -1;
1388 ret = 0;
1389 break;
1390 case 0x0003: /* Invalid block. */
1391 case 0x0007: /* Invalid format. */
1392 case 0x0008: /* Invalid bit combination. */
1393 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1394 ret = -EINVAL;
1395 break;
1396 case 0x0004: /* Command not provided. */
1397 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1398 ret = -EOPNOTSUPP;
1399 break;
1400 default:
1401 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1402 scmc_area->response.code);
1403 ret = -EIO;
1405 out:
1406 free_page((unsigned long)scmc_area);
1407 return ret;
1411 * Entries for chpids on the system bus.
1412 * This replaces /proc/chpids.
1414 static int new_channel_path(struct chp_id chpid)
1416 struct channel_path *chp;
1417 int ret;
1419 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1420 if (!chp)
1421 return -ENOMEM;
1423 /* fill in status, etc. */
1424 chp->chpid = chpid;
1425 chp->state = 1;
1426 chp->dev.parent = &css[chpid.cssid]->device;
1427 chp->dev.release = chp_release;
1428 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
1429 chpid.id);
1431 /* Obtain channel path description and fill it in. */
1432 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1433 if (ret)
1434 goto out_free;
1435 /* Get channel-measurement characteristics. */
1436 if (css_characteristics_avail && css_chsc_characteristics.scmc
1437 && css_chsc_characteristics.secm) {
1438 ret = chsc_get_channel_measurement_chars(chp);
1439 if (ret)
1440 goto out_free;
1441 } else {
1442 static int msg_done;
1444 if (!msg_done) {
1445 printk(KERN_WARNING "cio: Channel measurements not "
1446 "available, continuing.\n");
1447 msg_done = 1;
1449 chp->cmg = -1;
1452 /* make it known to the system */
1453 ret = device_register(&chp->dev);
1454 if (ret) {
1455 printk(KERN_WARNING "%s: could not register %x.%02x\n",
1456 __func__, chpid.cssid, chpid.id);
1457 goto out_free;
1459 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1460 if (ret) {
1461 device_unregister(&chp->dev);
1462 goto out_free;
1464 mutex_lock(&css[chpid.cssid]->mutex);
1465 if (css[chpid.cssid]->cm_enabled) {
1466 ret = chsc_add_chp_cmg_attr(chp);
1467 if (ret) {
1468 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1469 device_unregister(&chp->dev);
1470 mutex_unlock(&css[chpid.cssid]->mutex);
1471 goto out_free;
1474 css[chpid.cssid]->chps[chpid.id] = chp;
1475 mutex_unlock(&css[chpid.cssid]->mutex);
1476 return ret;
1477 out_free:
1478 kfree(chp);
1479 return ret;
1482 void *
1483 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1485 struct channel_path *chp;
1486 struct channel_path_desc *desc;
1487 struct chp_id chpid;
1489 chp_id_init(&chpid);
1490 chpid.id = sch->schib.pmcw.chpid[chp_no];
1491 chp = chpid_to_chp(chpid);
1492 if (!chp)
1493 return NULL;
1494 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1495 if (!desc)
1496 return NULL;
1497 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1498 return desc;
1501 static int __init
1502 chsc_alloc_sei_area(void)
1504 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1505 if (!sei_page)
1506 printk(KERN_WARNING"Can't allocate page for processing of " \
1507 "chsc machine checks!\n");
1508 return (sei_page ? 0 : -ENOMEM);
1511 int __init
1512 chsc_enable_facility(int operation_code)
1514 int ret;
1515 struct {
1516 struct chsc_header request;
1517 u8 reserved1:4;
1518 u8 format:4;
1519 u8 reserved2;
1520 u16 operation_code;
1521 u32 reserved3;
1522 u32 reserved4;
1523 u32 operation_data_area[252];
1524 struct chsc_header response;
1525 u32 reserved5:4;
1526 u32 format2:4;
1527 u32 reserved6:24;
1528 } __attribute__ ((packed)) *sda_area;
1530 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1531 if (!sda_area)
1532 return -ENOMEM;
1533 sda_area->request.length = 0x0400;
1534 sda_area->request.code = 0x0031;
1535 sda_area->operation_code = operation_code;
1537 ret = chsc(sda_area);
1538 if (ret > 0) {
1539 ret = (ret == 3) ? -ENODEV : -EBUSY;
1540 goto out;
1542 switch (sda_area->response.code) {
1543 case 0x0001: /* everything ok */
1544 ret = 0;
1545 break;
1546 case 0x0003: /* invalid request block */
1547 case 0x0007:
1548 ret = -EINVAL;
1549 break;
1550 case 0x0004: /* command not provided */
1551 case 0x0101: /* facility not provided */
1552 ret = -EOPNOTSUPP;
1553 break;
1554 default: /* something went wrong */
1555 ret = -EIO;
1557 out:
1558 free_page((unsigned long)sda_area);
1559 return ret;
1562 subsys_initcall(chsc_alloc_sei_area);
1564 struct css_general_char css_general_characteristics;
1565 struct css_chsc_char css_chsc_characteristics;
1567 int __init
1568 chsc_determine_css_characteristics(void)
1570 int result;
1571 struct {
1572 struct chsc_header request;
1573 u32 reserved1;
1574 u32 reserved2;
1575 u32 reserved3;
1576 struct chsc_header response;
1577 u32 reserved4;
1578 u32 general_char[510];
1579 u32 chsc_char[518];
1580 } __attribute__ ((packed)) *scsc_area;
1582 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1583 if (!scsc_area) {
1584 printk(KERN_WARNING"cio: Was not able to determine available" \
1585 "CHSCs due to no memory.\n");
1586 return -ENOMEM;
1589 scsc_area->request.length = 0x0010;
1590 scsc_area->request.code = 0x0010;
1592 result = chsc(scsc_area);
1593 if (result) {
1594 printk(KERN_WARNING"cio: Was not able to determine " \
1595 "available CHSCs, cc=%i.\n", result);
1596 result = -EIO;
1597 goto exit;
1600 if (scsc_area->response.code != 1) {
1601 printk(KERN_WARNING"cio: Was not able to determine " \
1602 "available CHSCs.\n");
1603 result = -EIO;
1604 goto exit;
1606 memcpy(&css_general_characteristics, scsc_area->general_char,
1607 sizeof(css_general_characteristics));
1608 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1609 sizeof(css_chsc_characteristics));
1610 exit:
1611 free_page ((unsigned long) scsc_area);
1612 return result;
1615 EXPORT_SYMBOL_GPL(css_general_characteristics);
1616 EXPORT_SYMBOL_GPL(css_chsc_characteristics);