[S390] cio: fix storage key handling
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / s390 / cio / chsc_sch.c
blobdf7388e88d314c92b19bbc6f90fdd3c33759e408
1 /*
2 * Driver for s390 chsc subchannels
4 * Copyright IBM Corp. 2008, 2009
6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
8 */
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/miscdevice.h>
15 #include <asm/compat.h>
16 #include <asm/cio.h>
17 #include <asm/chsc.h>
18 #include <asm/isc.h>
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "css.h"
23 #include "chsc_sch.h"
24 #include "ioasm.h"
26 static debug_info_t *chsc_debug_msg_id;
27 static debug_info_t *chsc_debug_log_id;
29 #define CHSC_MSG(imp, args...) do { \
30 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
31 } while (0)
33 #define CHSC_LOG(imp, txt) do { \
34 debug_text_event(chsc_debug_log_id, imp , txt); \
35 } while (0)
37 static void CHSC_LOG_HEX(int level, void *data, int length)
39 while (length > 0) {
40 debug_event(chsc_debug_log_id, level, data, length);
41 length -= chsc_debug_log_id->buf_size;
42 data += chsc_debug_log_id->buf_size;
46 MODULE_AUTHOR("IBM Corporation");
47 MODULE_DESCRIPTION("driver for s390 chsc subchannels");
48 MODULE_LICENSE("GPL");
50 static void chsc_subchannel_irq(struct subchannel *sch)
52 struct chsc_private *private = sch->private;
53 struct chsc_request *request = private->request;
54 struct irb *irb = (struct irb *)__LC_IRB;
56 CHSC_LOG(4, "irb");
57 CHSC_LOG_HEX(4, irb, sizeof(*irb));
58 /* Copy irb to provided request and set done. */
59 if (!request) {
60 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
61 sch->schid.ssid, sch->schid.sch_no);
62 return;
64 private->request = NULL;
65 memcpy(&request->irb, irb, sizeof(*irb));
66 cio_update_schib(sch);
67 complete(&request->completion);
68 put_device(&sch->dev);
71 static int chsc_subchannel_probe(struct subchannel *sch)
73 struct chsc_private *private;
74 int ret;
76 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
77 sch->schid.ssid, sch->schid.sch_no);
78 sch->isc = CHSC_SCH_ISC;
79 private = kzalloc(sizeof(*private), GFP_KERNEL);
80 if (!private)
81 return -ENOMEM;
82 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
83 if (ret) {
84 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
85 sch->schid.ssid, sch->schid.sch_no, ret);
86 kfree(private);
87 } else {
88 sch->private = private;
89 if (dev_get_uevent_suppress(&sch->dev)) {
90 dev_set_uevent_suppress(&sch->dev, 0);
91 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
94 return ret;
97 static int chsc_subchannel_remove(struct subchannel *sch)
99 struct chsc_private *private;
101 cio_disable_subchannel(sch);
102 private = sch->private;
103 sch->private = NULL;
104 if (private->request) {
105 complete(&private->request->completion);
106 put_device(&sch->dev);
108 kfree(private);
109 return 0;
112 static void chsc_subchannel_shutdown(struct subchannel *sch)
114 cio_disable_subchannel(sch);
117 static int chsc_subchannel_prepare(struct subchannel *sch)
119 int cc;
120 struct schib schib;
122 * Don't allow suspend while the subchannel is not idle
123 * since we don't have a way to clear the subchannel and
124 * cannot disable it with a request running.
126 cc = stsch(sch->schid, &schib);
127 if (!cc && scsw_stctl(&schib.scsw))
128 return -EAGAIN;
129 return 0;
132 static int chsc_subchannel_freeze(struct subchannel *sch)
134 return cio_disable_subchannel(sch);
137 static int chsc_subchannel_restore(struct subchannel *sch)
139 return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
142 static struct css_device_id chsc_subchannel_ids[] = {
143 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
144 { /* end of list */ },
146 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
148 static struct css_driver chsc_subchannel_driver = {
149 .owner = THIS_MODULE,
150 .subchannel_type = chsc_subchannel_ids,
151 .irq = chsc_subchannel_irq,
152 .probe = chsc_subchannel_probe,
153 .remove = chsc_subchannel_remove,
154 .shutdown = chsc_subchannel_shutdown,
155 .prepare = chsc_subchannel_prepare,
156 .freeze = chsc_subchannel_freeze,
157 .thaw = chsc_subchannel_restore,
158 .restore = chsc_subchannel_restore,
159 .name = "chsc_subchannel",
162 static int __init chsc_init_dbfs(void)
164 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
165 16 * sizeof(long));
166 if (!chsc_debug_msg_id)
167 goto out;
168 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
169 debug_set_level(chsc_debug_msg_id, 2);
170 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
171 if (!chsc_debug_log_id)
172 goto out;
173 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
174 debug_set_level(chsc_debug_log_id, 2);
175 return 0;
176 out:
177 if (chsc_debug_msg_id)
178 debug_unregister(chsc_debug_msg_id);
179 return -ENOMEM;
182 static void chsc_remove_dbfs(void)
184 debug_unregister(chsc_debug_log_id);
185 debug_unregister(chsc_debug_msg_id);
188 static int __init chsc_init_sch_driver(void)
190 return css_driver_register(&chsc_subchannel_driver);
193 static void chsc_cleanup_sch_driver(void)
195 css_driver_unregister(&chsc_subchannel_driver);
198 static DEFINE_SPINLOCK(chsc_lock);
200 static int chsc_subchannel_match_next_free(struct device *dev, void *data)
202 struct subchannel *sch = to_subchannel(dev);
204 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
207 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
209 struct device *dev;
211 dev = driver_find_device(&chsc_subchannel_driver.drv,
212 sch ? &sch->dev : NULL, NULL,
213 chsc_subchannel_match_next_free);
214 return dev ? to_subchannel(dev) : NULL;
218 * chsc_async() - try to start a chsc request asynchronously
219 * @chsc_area: request to be started
220 * @request: request structure to associate
222 * Tries to start a chsc request on one of the existing chsc subchannels.
223 * Returns:
224 * %0 if the request was performed synchronously
225 * %-EINPROGRESS if the request was successfully started
226 * %-EBUSY if all chsc subchannels are busy
227 * %-ENODEV if no chsc subchannels are available
228 * Context:
229 * interrupts disabled, chsc_lock held
231 static int chsc_async(struct chsc_async_area *chsc_area,
232 struct chsc_request *request)
234 int cc;
235 struct chsc_private *private;
236 struct subchannel *sch = NULL;
237 int ret = -ENODEV;
238 char dbf[10];
240 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
241 while ((sch = chsc_get_next_subchannel(sch))) {
242 spin_lock(sch->lock);
243 private = sch->private;
244 if (private->request) {
245 spin_unlock(sch->lock);
246 ret = -EBUSY;
247 continue;
249 chsc_area->header.sid = sch->schid;
250 CHSC_LOG(2, "schid");
251 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
252 cc = chsc(chsc_area);
253 sprintf(dbf, "cc:%d", cc);
254 CHSC_LOG(2, dbf);
255 switch (cc) {
256 case 0:
257 ret = 0;
258 break;
259 case 1:
260 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
261 ret = -EINPROGRESS;
262 private->request = request;
263 break;
264 case 2:
265 ret = -EBUSY;
266 break;
267 default:
268 ret = -ENODEV;
270 spin_unlock(sch->lock);
271 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
272 sch->schid.ssid, sch->schid.sch_no, cc);
273 if (ret == -EINPROGRESS)
274 return -EINPROGRESS;
275 put_device(&sch->dev);
276 if (ret == 0)
277 return 0;
279 return ret;
282 static void chsc_log_command(struct chsc_async_area *chsc_area)
284 char dbf[10];
286 sprintf(dbf, "CHSC:%x", chsc_area->header.code);
287 CHSC_LOG(0, dbf);
288 CHSC_LOG_HEX(0, chsc_area, 32);
291 static int chsc_examine_irb(struct chsc_request *request)
293 int backed_up;
295 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
296 return -EIO;
297 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
298 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
299 if (scsw_cstat(&request->irb.scsw) == 0)
300 return 0;
301 if (!backed_up)
302 return 0;
303 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
304 return -EIO;
305 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
306 return -EPERM;
307 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
308 return -EAGAIN;
309 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
310 return -EAGAIN;
311 return -EIO;
314 static int chsc_ioctl_start(void __user *user_area)
316 struct chsc_request *request;
317 struct chsc_async_area *chsc_area;
318 int ret;
319 char dbf[10];
321 if (!css_general_characteristics.dynio)
322 /* It makes no sense to try. */
323 return -EOPNOTSUPP;
324 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
325 if (!chsc_area)
326 return -ENOMEM;
327 request = kzalloc(sizeof(*request), GFP_KERNEL);
328 if (!request) {
329 ret = -ENOMEM;
330 goto out_free;
332 init_completion(&request->completion);
333 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
334 ret = -EFAULT;
335 goto out_free;
337 chsc_log_command(chsc_area);
338 spin_lock_irq(&chsc_lock);
339 ret = chsc_async(chsc_area, request);
340 spin_unlock_irq(&chsc_lock);
341 if (ret == -EINPROGRESS) {
342 wait_for_completion(&request->completion);
343 ret = chsc_examine_irb(request);
345 /* copy area back to user */
346 if (!ret)
347 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
348 ret = -EFAULT;
349 out_free:
350 sprintf(dbf, "ret:%d", ret);
351 CHSC_LOG(0, dbf);
352 kfree(request);
353 free_page((unsigned long)chsc_area);
354 return ret;
357 static int chsc_ioctl_info_channel_path(void __user *user_cd)
359 struct chsc_chp_cd *cd;
360 int ret, ccode;
361 struct {
362 struct chsc_header request;
363 u32 : 2;
364 u32 m : 1;
365 u32 : 1;
366 u32 fmt1 : 4;
367 u32 cssid : 8;
368 u32 : 8;
369 u32 first_chpid : 8;
370 u32 : 24;
371 u32 last_chpid : 8;
372 u32 : 32;
373 struct chsc_header response;
374 u8 data[PAGE_SIZE - 20];
375 } __attribute__ ((packed)) *scpcd_area;
377 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
378 if (!scpcd_area)
379 return -ENOMEM;
380 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
381 if (!cd) {
382 ret = -ENOMEM;
383 goto out_free;
385 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
386 ret = -EFAULT;
387 goto out_free;
389 scpcd_area->request.length = 0x0010;
390 scpcd_area->request.code = 0x0028;
391 scpcd_area->m = cd->m;
392 scpcd_area->fmt1 = cd->fmt;
393 scpcd_area->cssid = cd->chpid.cssid;
394 scpcd_area->first_chpid = cd->chpid.id;
395 scpcd_area->last_chpid = cd->chpid.id;
397 ccode = chsc(scpcd_area);
398 if (ccode != 0) {
399 ret = -EIO;
400 goto out_free;
402 if (scpcd_area->response.code != 0x0001) {
403 ret = -EIO;
404 CHSC_MSG(0, "scpcd: response code=%x\n",
405 scpcd_area->response.code);
406 goto out_free;
408 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
409 if (copy_to_user(user_cd, cd, sizeof(*cd)))
410 ret = -EFAULT;
411 else
412 ret = 0;
413 out_free:
414 kfree(cd);
415 free_page((unsigned long)scpcd_area);
416 return ret;
419 static int chsc_ioctl_info_cu(void __user *user_cd)
421 struct chsc_cu_cd *cd;
422 int ret, ccode;
423 struct {
424 struct chsc_header request;
425 u32 : 2;
426 u32 m : 1;
427 u32 : 1;
428 u32 fmt1 : 4;
429 u32 cssid : 8;
430 u32 : 8;
431 u32 first_cun : 8;
432 u32 : 24;
433 u32 last_cun : 8;
434 u32 : 32;
435 struct chsc_header response;
436 u8 data[PAGE_SIZE - 20];
437 } __attribute__ ((packed)) *scucd_area;
439 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
440 if (!scucd_area)
441 return -ENOMEM;
442 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
443 if (!cd) {
444 ret = -ENOMEM;
445 goto out_free;
447 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
448 ret = -EFAULT;
449 goto out_free;
451 scucd_area->request.length = 0x0010;
452 scucd_area->request.code = 0x0028;
453 scucd_area->m = cd->m;
454 scucd_area->fmt1 = cd->fmt;
455 scucd_area->cssid = cd->cssid;
456 scucd_area->first_cun = cd->cun;
457 scucd_area->last_cun = cd->cun;
459 ccode = chsc(scucd_area);
460 if (ccode != 0) {
461 ret = -EIO;
462 goto out_free;
464 if (scucd_area->response.code != 0x0001) {
465 ret = -EIO;
466 CHSC_MSG(0, "scucd: response code=%x\n",
467 scucd_area->response.code);
468 goto out_free;
470 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
471 if (copy_to_user(user_cd, cd, sizeof(*cd)))
472 ret = -EFAULT;
473 else
474 ret = 0;
475 out_free:
476 kfree(cd);
477 free_page((unsigned long)scucd_area);
478 return ret;
481 static int chsc_ioctl_info_sch_cu(void __user *user_cud)
483 struct chsc_sch_cud *cud;
484 int ret, ccode;
485 struct {
486 struct chsc_header request;
487 u32 : 2;
488 u32 m : 1;
489 u32 : 5;
490 u32 fmt1 : 4;
491 u32 : 2;
492 u32 ssid : 2;
493 u32 first_sch : 16;
494 u32 : 8;
495 u32 cssid : 8;
496 u32 last_sch : 16;
497 u32 : 32;
498 struct chsc_header response;
499 u8 data[PAGE_SIZE - 20];
500 } __attribute__ ((packed)) *sscud_area;
502 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
503 if (!sscud_area)
504 return -ENOMEM;
505 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
506 if (!cud) {
507 ret = -ENOMEM;
508 goto out_free;
510 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
511 ret = -EFAULT;
512 goto out_free;
514 sscud_area->request.length = 0x0010;
515 sscud_area->request.code = 0x0006;
516 sscud_area->m = cud->schid.m;
517 sscud_area->fmt1 = cud->fmt;
518 sscud_area->ssid = cud->schid.ssid;
519 sscud_area->first_sch = cud->schid.sch_no;
520 sscud_area->cssid = cud->schid.cssid;
521 sscud_area->last_sch = cud->schid.sch_no;
523 ccode = chsc(sscud_area);
524 if (ccode != 0) {
525 ret = -EIO;
526 goto out_free;
528 if (sscud_area->response.code != 0x0001) {
529 ret = -EIO;
530 CHSC_MSG(0, "sscud: response code=%x\n",
531 sscud_area->response.code);
532 goto out_free;
534 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
535 if (copy_to_user(user_cud, cud, sizeof(*cud)))
536 ret = -EFAULT;
537 else
538 ret = 0;
539 out_free:
540 kfree(cud);
541 free_page((unsigned long)sscud_area);
542 return ret;
545 static int chsc_ioctl_conf_info(void __user *user_ci)
547 struct chsc_conf_info *ci;
548 int ret, ccode;
549 struct {
550 struct chsc_header request;
551 u32 : 2;
552 u32 m : 1;
553 u32 : 1;
554 u32 fmt1 : 4;
555 u32 cssid : 8;
556 u32 : 6;
557 u32 ssid : 2;
558 u32 : 8;
559 u64 : 64;
560 struct chsc_header response;
561 u8 data[PAGE_SIZE - 20];
562 } __attribute__ ((packed)) *sci_area;
564 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
565 if (!sci_area)
566 return -ENOMEM;
567 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
568 if (!ci) {
569 ret = -ENOMEM;
570 goto out_free;
572 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
573 ret = -EFAULT;
574 goto out_free;
576 sci_area->request.length = 0x0010;
577 sci_area->request.code = 0x0012;
578 sci_area->m = ci->id.m;
579 sci_area->fmt1 = ci->fmt;
580 sci_area->cssid = ci->id.cssid;
581 sci_area->ssid = ci->id.ssid;
583 ccode = chsc(sci_area);
584 if (ccode != 0) {
585 ret = -EIO;
586 goto out_free;
588 if (sci_area->response.code != 0x0001) {
589 ret = -EIO;
590 CHSC_MSG(0, "sci: response code=%x\n",
591 sci_area->response.code);
592 goto out_free;
594 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
595 if (copy_to_user(user_ci, ci, sizeof(*ci)))
596 ret = -EFAULT;
597 else
598 ret = 0;
599 out_free:
600 kfree(ci);
601 free_page((unsigned long)sci_area);
602 return ret;
605 static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
607 struct chsc_comp_list *ccl;
608 int ret, ccode;
609 struct {
610 struct chsc_header request;
611 u32 ctype : 8;
612 u32 : 4;
613 u32 fmt : 4;
614 u32 : 16;
615 u64 : 64;
616 u32 list_parm[2];
617 u64 : 64;
618 struct chsc_header response;
619 u8 data[PAGE_SIZE - 36];
620 } __attribute__ ((packed)) *sccl_area;
621 struct {
622 u32 m : 1;
623 u32 : 31;
624 u32 cssid : 8;
625 u32 : 16;
626 u32 chpid : 8;
627 } __attribute__ ((packed)) *chpid_parm;
628 struct {
629 u32 f_cssid : 8;
630 u32 l_cssid : 8;
631 u32 : 16;
632 u32 res;
633 } __attribute__ ((packed)) *cssids_parm;
635 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
636 if (!sccl_area)
637 return -ENOMEM;
638 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
639 if (!ccl) {
640 ret = -ENOMEM;
641 goto out_free;
643 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
644 ret = -EFAULT;
645 goto out_free;
647 sccl_area->request.length = 0x0020;
648 sccl_area->request.code = 0x0030;
649 sccl_area->fmt = ccl->req.fmt;
650 sccl_area->ctype = ccl->req.ctype;
651 switch (sccl_area->ctype) {
652 case CCL_CU_ON_CHP:
653 case CCL_IOP_CHP:
654 chpid_parm = (void *)&sccl_area->list_parm;
655 chpid_parm->m = ccl->req.chpid.m;
656 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
657 chpid_parm->chpid = ccl->req.chpid.chp.id;
658 break;
659 case CCL_CSS_IMG:
660 case CCL_CSS_IMG_CONF_CHAR:
661 cssids_parm = (void *)&sccl_area->list_parm;
662 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
663 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
664 break;
666 ccode = chsc(sccl_area);
667 if (ccode != 0) {
668 ret = -EIO;
669 goto out_free;
671 if (sccl_area->response.code != 0x0001) {
672 ret = -EIO;
673 CHSC_MSG(0, "sccl: response code=%x\n",
674 sccl_area->response.code);
675 goto out_free;
677 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
678 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
679 ret = -EFAULT;
680 else
681 ret = 0;
682 out_free:
683 kfree(ccl);
684 free_page((unsigned long)sccl_area);
685 return ret;
688 static int chsc_ioctl_chpd(void __user *user_chpd)
690 struct chsc_cpd_info *chpd;
691 int ret;
693 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
694 if (!chpd)
695 return -ENOMEM;
696 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
697 ret = -EFAULT;
698 goto out_free;
700 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
701 chpd->rfmt, chpd->c, chpd->m,
702 &chpd->chpdb);
703 if (ret)
704 goto out_free;
705 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
706 ret = -EFAULT;
707 out_free:
708 kfree(chpd);
709 return ret;
712 static int chsc_ioctl_dcal(void __user *user_dcal)
714 struct chsc_dcal *dcal;
715 int ret, ccode;
716 struct {
717 struct chsc_header request;
718 u32 atype : 8;
719 u32 : 4;
720 u32 fmt : 4;
721 u32 : 16;
722 u32 res0[2];
723 u32 list_parm[2];
724 u32 res1[2];
725 struct chsc_header response;
726 u8 data[PAGE_SIZE - 36];
727 } __attribute__ ((packed)) *sdcal_area;
729 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
730 if (!sdcal_area)
731 return -ENOMEM;
732 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
733 if (!dcal) {
734 ret = -ENOMEM;
735 goto out_free;
737 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
738 ret = -EFAULT;
739 goto out_free;
741 sdcal_area->request.length = 0x0020;
742 sdcal_area->request.code = 0x0034;
743 sdcal_area->atype = dcal->req.atype;
744 sdcal_area->fmt = dcal->req.fmt;
745 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
746 sizeof(sdcal_area->list_parm));
748 ccode = chsc(sdcal_area);
749 if (ccode != 0) {
750 ret = -EIO;
751 goto out_free;
753 if (sdcal_area->response.code != 0x0001) {
754 ret = -EIO;
755 CHSC_MSG(0, "sdcal: response code=%x\n",
756 sdcal_area->response.code);
757 goto out_free;
759 memcpy(&dcal->sdcal, &sdcal_area->response,
760 sdcal_area->response.length);
761 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
762 ret = -EFAULT;
763 else
764 ret = 0;
765 out_free:
766 kfree(dcal);
767 free_page((unsigned long)sdcal_area);
768 return ret;
771 static long chsc_ioctl(struct file *filp, unsigned int cmd,
772 unsigned long arg)
774 void __user *argp;
776 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
777 if (is_compat_task())
778 argp = compat_ptr(arg);
779 else
780 argp = (void __user *)arg;
781 switch (cmd) {
782 case CHSC_START:
783 return chsc_ioctl_start(argp);
784 case CHSC_INFO_CHANNEL_PATH:
785 return chsc_ioctl_info_channel_path(argp);
786 case CHSC_INFO_CU:
787 return chsc_ioctl_info_cu(argp);
788 case CHSC_INFO_SCH_CU:
789 return chsc_ioctl_info_sch_cu(argp);
790 case CHSC_INFO_CI:
791 return chsc_ioctl_conf_info(argp);
792 case CHSC_INFO_CCL:
793 return chsc_ioctl_conf_comp_list(argp);
794 case CHSC_INFO_CPD:
795 return chsc_ioctl_chpd(argp);
796 case CHSC_INFO_DCAL:
797 return chsc_ioctl_dcal(argp);
798 default: /* unknown ioctl number */
799 return -ENOIOCTLCMD;
803 static const struct file_operations chsc_fops = {
804 .owner = THIS_MODULE,
805 .unlocked_ioctl = chsc_ioctl,
806 .compat_ioctl = chsc_ioctl,
809 static struct miscdevice chsc_misc_device = {
810 .minor = MISC_DYNAMIC_MINOR,
811 .name = "chsc",
812 .fops = &chsc_fops,
815 static int __init chsc_misc_init(void)
817 return misc_register(&chsc_misc_device);
820 static void chsc_misc_cleanup(void)
822 misc_deregister(&chsc_misc_device);
825 static int __init chsc_sch_init(void)
827 int ret;
829 ret = chsc_init_dbfs();
830 if (ret)
831 return ret;
832 isc_register(CHSC_SCH_ISC);
833 ret = chsc_init_sch_driver();
834 if (ret)
835 goto out_dbf;
836 ret = chsc_misc_init();
837 if (ret)
838 goto out_driver;
839 return ret;
840 out_driver:
841 chsc_cleanup_sch_driver();
842 out_dbf:
843 isc_unregister(CHSC_SCH_ISC);
844 chsc_remove_dbfs();
845 return ret;
848 static void __exit chsc_sch_exit(void)
850 chsc_misc_cleanup();
851 chsc_cleanup_sch_driver();
852 isc_unregister(CHSC_SCH_ISC);
853 chsc_remove_dbfs();
856 module_init(chsc_sch_init);
857 module_exit(chsc_sch_exit);