[PATCH] remove CONFIG_KOBJECT_UEVENT option
[linux-2.6/kvm.git] / drivers / s390 / crypto / z90main.c
blob790fcbb74b436bd8a48de9660611d0ef1c2df409
1 /*
2 * linux/drivers/s390/crypto/z90main.c
4 * z90crypt 1.3.2
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <asm/uaccess.h> // copy_(from|to)_user
28 #include <linux/compat.h>
29 #include <linux/compiler.h>
30 #include <linux/delay.h> // mdelay
31 #include <linux/init.h>
32 #include <linux/interrupt.h> // for tasklets
33 #include <linux/ioctl32.h>
34 #include <linux/miscdevice.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/proc_fs.h>
38 #include <linux/syscalls.h>
39 #include "z90crypt.h"
40 #include "z90common.h"
42 #define VERSION_Z90MAIN_C "$Revision: 1.62 $"
44 static char z90main_version[] __initdata =
45 "z90main.o (" VERSION_Z90MAIN_C "/"
46 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
48 extern char z90hardware_version[];
50 /**
51 * Defaults that may be modified.
54 /**
55 * You can specify a different minor at compile time.
57 #ifndef Z90CRYPT_MINOR
58 #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
59 #endif
61 /**
62 * You can specify a different domain at compile time or on the insmod
63 * command line.
65 #ifndef DOMAIN_INDEX
66 #define DOMAIN_INDEX -1
67 #endif
69 /**
70 * This is the name under which the device is registered in /proc/modules.
72 #define REG_NAME "z90crypt"
74 /**
75 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
76 * older than CLEANUPTIME seconds in the past.
78 #ifndef CLEANUPTIME
79 #define CLEANUPTIME 15
80 #endif
82 /**
83 * Config should run every CONFIGTIME seconds
85 #ifndef CONFIGTIME
86 #define CONFIGTIME 30
87 #endif
89 /**
90 * The first execution of the config task should take place
91 * immediately after initialization
93 #ifndef INITIAL_CONFIGTIME
94 #define INITIAL_CONFIGTIME 1
95 #endif
97 /**
98 * Reader should run every READERTIME milliseconds
99 * With the 100Hz patch for s390, z90crypt can lock the system solid while
100 * under heavy load. We'll try to avoid that.
102 #ifndef READERTIME
103 #if HZ > 1000
104 #define READERTIME 2
105 #else
106 #define READERTIME 10
107 #endif
108 #endif
111 * turn long device array index into device pointer
113 #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
116 * turn short device array index into long device array index
118 #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
121 * turn short device array index into device pointer
123 #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
126 * Status for a work-element
128 #define STAT_DEFAULT 0x00 // request has not been processed
130 #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
131 // else, device is determined each write
132 #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
133 // before being sent to the hardware.
134 #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
135 // 0x20 // UNUSED state
136 #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
137 #define STAT_NOWORK 0x00 // bits off: no work on any queue
138 #define STAT_RDWRMASK 0x30 // mask for bits 5-4
141 * Macros to check the status RDWRMASK
143 #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
144 #define SET_RDWRMASK(statbyte, newval) \
145 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
148 * Audit Trail. Progress of a Work element
149 * audit[0]: Unless noted otherwise, these bits are all set by the process
151 #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
152 #define FP_BUFFREQ 0x40 // Low Level buffer requested
153 #define FP_BUFFGOT 0x20 // Low Level buffer obtained
154 #define FP_SENT 0x10 // Work element sent to a crypto device
155 // (may be set by process or by reader task)
156 #define FP_PENDING 0x08 // Work element placed on pending queue
157 // (may be set by process or by reader task)
158 #define FP_REQUEST 0x04 // Work element placed on request queue
159 #define FP_ASLEEP 0x02 // Work element about to sleep
160 #define FP_AWAKE 0x01 // Work element has been awakened
163 * audit[1]: These bits are set by the reader task and/or the cleanup task
165 #define FP_NOTPENDING 0x80 // Work element removed from pending queue
166 #define FP_AWAKENING 0x40 // Caller about to be awakened
167 #define FP_TIMEDOUT 0x20 // Caller timed out
168 #define FP_RESPSIZESET 0x10 // Response size copied to work element
169 #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
170 #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
171 #define FP_REMREQUEST 0x02 // Work element removed from request queue
172 #define FP_SIGNALED 0x01 // Work element was awakened by a signal
175 * audit[2]: unused
179 * state of the file handle in private_data.status
181 #define STAT_OPEN 0
182 #define STAT_CLOSED 1
185 * PID() expands to the process ID of the current process
187 #define PID() (current->pid)
190 * Selected Constants. The number of APs and the number of devices
192 #ifndef Z90CRYPT_NUM_APS
193 #define Z90CRYPT_NUM_APS 64
194 #endif
195 #ifndef Z90CRYPT_NUM_DEVS
196 #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
197 #endif
200 * Buffer size for receiving responses. The maximum Response Size
201 * is actually the maximum request size, since in an error condition
202 * the request itself may be returned unchanged.
204 #define MAX_RESPONSE_SIZE 0x0000077C
207 * A count and status-byte mask
209 struct status {
210 int st_count; // # of enabled devices
211 int disabled_count; // # of disabled devices
212 int user_disabled_count; // # of devices disabled via proc fs
213 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
217 * The array of device indexes is a mechanism for fast indexing into
218 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
219 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
220 * z90CDeviceIndex[2] is 47.
222 struct device_x {
223 int device_index[Z90CRYPT_NUM_DEVS];
227 * All devices are arranged in a single array: 64 APs
229 struct device {
230 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
231 // PCIXCC_MCL3, CEX2C
232 enum devstat dev_stat; // current device status
233 int dev_self_x; // Index in array
234 int disabled; // Set when device is in error
235 int user_disabled; // Set when device is disabled by user
236 int dev_q_depth; // q depth
237 unsigned char * dev_resp_p; // Response buffer address
238 int dev_resp_l; // Response Buffer length
239 int dev_caller_count; // Number of callers
240 int dev_total_req_cnt; // # requests for device since load
241 struct list_head dev_caller_list; // List of callers
245 * There's a struct status and a struct device_x for each device type.
247 struct hdware_block {
248 struct status hdware_mask;
249 struct status type_mask[Z90CRYPT_NUM_TYPES];
250 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
251 unsigned char device_type_array[Z90CRYPT_NUM_APS];
255 * z90crypt is the topmost data structure in the hierarchy.
257 struct z90crypt {
258 int max_count; // Nr of possible crypto devices
259 struct status mask;
260 int q_depth_array[Z90CRYPT_NUM_DEVS];
261 int dev_type_array[Z90CRYPT_NUM_DEVS];
262 struct device_x overall_device_x; // array device indexes
263 struct device * device_p[Z90CRYPT_NUM_DEVS];
264 int terminating;
265 int domain_established;// TRUE: domain has been found
266 int cdx; // Crypto Domain Index
267 int len; // Length of this data structure
268 struct hdware_block *hdware_info;
272 * An array of these structures is pointed to from dev_caller
273 * The length of the array depends on the device type. For APs,
274 * there are 8.
276 * The caller buffer is allocated to the user at OPEN. At WRITE,
277 * it contains the request; at READ, the response. The function
278 * send_to_crypto_device converts the request to device-dependent
279 * form and use the caller's OPEN-allocated buffer for the response.
281 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
282 * because that points to it, see the discussion in z90hardware.c.
283 * Search for "extended request message block".
285 struct caller {
286 int caller_buf_l; // length of original request
287 unsigned char * caller_buf_p; // Original request on WRITE
288 int caller_dev_dep_req_l; // len device dependent request
289 unsigned char * caller_dev_dep_req_p; // Device dependent form
290 unsigned char caller_id[8]; // caller-supplied message id
291 struct list_head caller_liste;
292 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
296 * Function prototypes from z90hardware.c
298 enum hdstat query_online(int, int, int, int *, int *);
299 enum devstat reset_device(int, int, int);
300 enum devstat send_to_AP(int, int, int, unsigned char *);
301 enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
302 int convert_request(unsigned char *, int, short, int, int, int *,
303 unsigned char *);
304 int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
307 * Low level function prototypes
309 static int create_z90crypt(int *);
310 static int refresh_z90crypt(int *);
311 static int find_crypto_devices(struct status *);
312 static int create_crypto_device(int);
313 static int destroy_crypto_device(int);
314 static void destroy_z90crypt(void);
315 static int refresh_index_array(struct status *, struct device_x *);
316 static int probe_device_type(struct device *);
317 static int probe_PCIXCC_type(struct device *);
320 * proc fs definitions
322 static struct proc_dir_entry *z90crypt_entry;
325 * data structures
329 * work_element.opener points back to this structure
331 struct priv_data {
332 pid_t opener_pid;
333 unsigned char status; // 0: open 1: closed
337 * A work element is allocated for each request
339 struct work_element {
340 struct priv_data *priv_data;
341 pid_t pid;
342 int devindex; // index of device processing this w_e
343 // (If request did not specify device,
344 // -1 until placed onto a queue)
345 int devtype;
346 struct list_head liste; // used for requestq and pendingq
347 char buffer[128]; // local copy of user request
348 int buff_size; // size of the buffer for the request
349 char resp_buff[RESPBUFFSIZE];
350 int resp_buff_size;
351 char __user * resp_addr; // address of response in user space
352 unsigned int funccode; // function code of request
353 wait_queue_head_t waitq;
354 unsigned long requestsent; // time at which the request was sent
355 atomic_t alarmrung; // wake-up signal
356 unsigned char caller_id[8]; // pid + counter, for this w_e
357 unsigned char status[1]; // bits to mark status of the request
358 unsigned char audit[3]; // record of work element's progress
359 unsigned char * requestptr; // address of request buffer
360 int retcode; // return code of request
364 * High level function prototypes
366 static int z90crypt_open(struct inode *, struct file *);
367 static int z90crypt_release(struct inode *, struct file *);
368 static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
369 static ssize_t z90crypt_write(struct file *, const char __user *,
370 size_t, loff_t *);
371 static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
372 static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
374 static void z90crypt_reader_task(unsigned long);
375 static void z90crypt_schedule_reader_task(unsigned long);
376 static void z90crypt_config_task(unsigned long);
377 static void z90crypt_cleanup_task(unsigned long);
379 static int z90crypt_status(char *, char **, off_t, int, int *, void *);
380 static int z90crypt_status_write(struct file *, const char __user *,
381 unsigned long, void *);
384 * Storage allocated at initialization and used throughout the life of
385 * this insmod
387 static int domain = DOMAIN_INDEX;
388 static struct z90crypt z90crypt;
389 static int quiesce_z90crypt;
390 static spinlock_t queuespinlock;
391 static struct list_head request_list;
392 static int requestq_count;
393 static struct list_head pending_list;
394 static int pendingq_count;
396 static struct tasklet_struct reader_tasklet;
397 static struct timer_list reader_timer;
398 static struct timer_list config_timer;
399 static struct timer_list cleanup_timer;
400 static atomic_t total_open;
401 static atomic_t z90crypt_step;
403 static struct file_operations z90crypt_fops = {
404 .owner = THIS_MODULE,
405 .read = z90crypt_read,
406 .write = z90crypt_write,
407 .unlocked_ioctl = z90crypt_unlocked_ioctl,
408 #ifdef CONFIG_COMPAT
409 .compat_ioctl = z90crypt_compat_ioctl,
410 #endif
411 .open = z90crypt_open,
412 .release = z90crypt_release
415 static struct miscdevice z90crypt_misc_device = {
416 .minor = Z90CRYPT_MINOR,
417 .name = DEV_NAME,
418 .fops = &z90crypt_fops,
419 .devfs_name = DEV_NAME
423 * Documentation values.
425 MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
426 "and Jochen Roehrig");
427 MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
428 "Copyright 2001, 2004 IBM Corporation");
429 MODULE_LICENSE("GPL");
430 module_param(domain, int, 0);
431 MODULE_PARM_DESC(domain, "domain index for device");
433 #ifdef CONFIG_COMPAT
435 * ioctl32 conversion routines
437 struct ica_rsa_modexpo_32 { // For 32-bit callers
438 compat_uptr_t inputdata;
439 unsigned int inputdatalength;
440 compat_uptr_t outputdata;
441 unsigned int outputdatalength;
442 compat_uptr_t b_key;
443 compat_uptr_t n_modulus;
446 static long
447 trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
449 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
450 struct ica_rsa_modexpo_32 mex32k;
451 struct ica_rsa_modexpo __user *mex64;
452 long ret = 0;
453 unsigned int i;
455 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
456 return -EFAULT;
457 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
458 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
459 return -EFAULT;
460 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
461 return -EFAULT;
462 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
463 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
464 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
465 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
466 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
467 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
468 return -EFAULT;
469 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
470 if (!ret)
471 if (__get_user(i, &mex64->outputdatalength) ||
472 __put_user(i, &mex32u->outputdatalength))
473 ret = -EFAULT;
474 return ret;
477 struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
478 compat_uptr_t inputdata;
479 unsigned int inputdatalength;
480 compat_uptr_t outputdata;
481 unsigned int outputdatalength;
482 compat_uptr_t bp_key;
483 compat_uptr_t bq_key;
484 compat_uptr_t np_prime;
485 compat_uptr_t nq_prime;
486 compat_uptr_t u_mult_inv;
489 static long
490 trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
492 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
493 struct ica_rsa_modexpo_crt_32 crt32k;
494 struct ica_rsa_modexpo_crt __user *crt64;
495 long ret = 0;
496 unsigned int i;
498 if (!access_ok(VERIFY_WRITE, crt32u,
499 sizeof(struct ica_rsa_modexpo_crt_32)))
500 return -EFAULT;
501 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
502 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
503 return -EFAULT;
504 if (copy_from_user(&crt32k, crt32u,
505 sizeof(struct ica_rsa_modexpo_crt_32)))
506 return -EFAULT;
507 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
508 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
509 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
510 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
511 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
512 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
513 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
514 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
515 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
516 return -EFAULT;
517 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
518 if (!ret)
519 if (__get_user(i, &crt64->outputdatalength) ||
520 __put_user(i, &crt32u->outputdatalength))
521 ret = -EFAULT;
522 return ret;
525 static long
526 z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
528 switch (cmd) {
529 case ICAZ90STATUS:
530 case Z90QUIESCE:
531 case Z90STAT_TOTALCOUNT:
532 case Z90STAT_PCICACOUNT:
533 case Z90STAT_PCICCCOUNT:
534 case Z90STAT_PCIXCCCOUNT:
535 case Z90STAT_PCIXCCMCL2COUNT:
536 case Z90STAT_PCIXCCMCL3COUNT:
537 case Z90STAT_CEX2CCOUNT:
538 case Z90STAT_REQUESTQ_COUNT:
539 case Z90STAT_PENDINGQ_COUNT:
540 case Z90STAT_TOTALOPEN_COUNT:
541 case Z90STAT_DOMAIN_INDEX:
542 case Z90STAT_STATUS_MASK:
543 case Z90STAT_QDEPTH_MASK:
544 case Z90STAT_PERDEV_REQCNT:
545 return z90crypt_unlocked_ioctl(filp, cmd, arg);
546 case ICARSAMODEXPO:
547 return trans_modexpo32(filp, cmd, arg);
548 case ICARSACRT:
549 return trans_modexpo_crt32(filp, cmd, arg);
550 default:
551 return -ENOIOCTLCMD;
554 #endif
557 * The module initialization code.
559 static int __init
560 z90crypt_init_module(void)
562 int result, nresult;
563 struct proc_dir_entry *entry;
565 PDEBUG("PID %d\n", PID());
567 if ((domain < -1) || (domain > 15)) {
568 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
569 return -EINVAL;
572 /* Register as misc device with given minor (or get a dynamic one). */
573 result = misc_register(&z90crypt_misc_device);
574 if (result < 0) {
575 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
576 z90crypt_misc_device.minor, result);
577 return result;
580 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
582 result = create_z90crypt(&domain);
583 if (result != 0) {
584 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
585 domain, result);
586 result = -ENOMEM;
587 goto init_module_cleanup;
590 if (result == 0) {
591 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
592 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
593 __DATE__, __TIME__);
594 PRINTKN("%s\n", z90main_version);
595 PRINTKN("%s\n", z90hardware_version);
596 PDEBUG("create_z90crypt (domain index %d) successful.\n",
597 domain);
598 } else
599 PRINTK("No devices at startup\n");
601 /* Initialize globals. */
602 spin_lock_init(&queuespinlock);
604 INIT_LIST_HEAD(&pending_list);
605 pendingq_count = 0;
607 INIT_LIST_HEAD(&request_list);
608 requestq_count = 0;
610 quiesce_z90crypt = 0;
612 atomic_set(&total_open, 0);
613 atomic_set(&z90crypt_step, 0);
615 /* Set up the cleanup task. */
616 init_timer(&cleanup_timer);
617 cleanup_timer.function = z90crypt_cleanup_task;
618 cleanup_timer.data = 0;
619 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
620 add_timer(&cleanup_timer);
622 /* Set up the proc file system */
623 entry = create_proc_entry("driver/z90crypt", 0644, 0);
624 if (entry) {
625 entry->nlink = 1;
626 entry->data = 0;
627 entry->read_proc = z90crypt_status;
628 entry->write_proc = z90crypt_status_write;
630 else
631 PRINTK("Couldn't create z90crypt proc entry\n");
632 z90crypt_entry = entry;
634 /* Set up the configuration task. */
635 init_timer(&config_timer);
636 config_timer.function = z90crypt_config_task;
637 config_timer.data = 0;
638 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
639 add_timer(&config_timer);
641 /* Set up the reader task */
642 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
643 init_timer(&reader_timer);
644 reader_timer.function = z90crypt_schedule_reader_task;
645 reader_timer.data = 0;
646 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
647 add_timer(&reader_timer);
649 return 0; // success
651 init_module_cleanup:
652 if ((nresult = misc_deregister(&z90crypt_misc_device)))
653 PRINTK("misc_deregister failed with %d.\n", nresult);
654 else
655 PDEBUG("misc_deregister successful.\n");
657 return result; // failure
661 * The module termination code
663 static void __exit
664 z90crypt_cleanup_module(void)
666 int nresult;
668 PDEBUG("PID %d\n", PID());
670 remove_proc_entry("driver/z90crypt", 0);
672 if ((nresult = misc_deregister(&z90crypt_misc_device)))
673 PRINTK("misc_deregister failed with %d.\n", nresult);
674 else
675 PDEBUG("misc_deregister successful.\n");
677 /* Remove the tasks */
678 tasklet_kill(&reader_tasklet);
679 del_timer(&reader_timer);
680 del_timer(&config_timer);
681 del_timer(&cleanup_timer);
683 destroy_z90crypt();
685 PRINTKN("Unloaded.\n");
689 * Functions running under a process id
691 * The I/O functions:
692 * z90crypt_open
693 * z90crypt_release
694 * z90crypt_read
695 * z90crypt_write
696 * z90crypt_unlocked_ioctl
697 * z90crypt_status
698 * z90crypt_status_write
699 * disable_card
700 * enable_card
702 * Helper functions:
703 * z90crypt_rsa
704 * z90crypt_prepare
705 * z90crypt_send
706 * z90crypt_process_results
709 static int
710 z90crypt_open(struct inode *inode, struct file *filp)
712 struct priv_data *private_data_p;
714 if (quiesce_z90crypt)
715 return -EQUIESCE;
717 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
718 if (!private_data_p) {
719 PRINTK("Memory allocate failed\n");
720 return -ENOMEM;
723 memset((void *)private_data_p, 0, sizeof(struct priv_data));
724 private_data_p->status = STAT_OPEN;
725 private_data_p->opener_pid = PID();
726 filp->private_data = private_data_p;
727 atomic_inc(&total_open);
729 return 0;
732 static int
733 z90crypt_release(struct inode *inode, struct file *filp)
735 struct priv_data *private_data_p = filp->private_data;
737 PDEBUG("PID %d (filp %p)\n", PID(), filp);
739 private_data_p->status = STAT_CLOSED;
740 memset(private_data_p, 0, sizeof(struct priv_data));
741 kfree(private_data_p);
742 atomic_dec(&total_open);
744 return 0;
748 * there are two read functions, of which compile options will choose one
749 * without USE_GET_RANDOM_BYTES
750 * => read() always returns -EPERM;
751 * otherwise
752 * => read() uses get_random_bytes() kernel function
754 #ifndef USE_GET_RANDOM_BYTES
756 * z90crypt_read will not be supported beyond z90crypt 1.3.1
758 static ssize_t
759 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
761 PDEBUG("filp %p (PID %d)\n", filp, PID());
762 return -EPERM;
764 #else // we want to use get_random_bytes
766 * read() just returns a string of random bytes. Since we have no way
767 * to generate these cryptographically, we just execute get_random_bytes
768 * for the length specified.
770 #include <linux/random.h>
771 static ssize_t
772 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
774 unsigned char *temp_buff;
776 PDEBUG("filp %p (PID %d)\n", filp, PID());
778 if (quiesce_z90crypt)
779 return -EQUIESCE;
780 if (count < 0) {
781 PRINTK("Requested random byte count negative: %ld\n", count);
782 return -EINVAL;
784 if (count > RESPBUFFSIZE) {
785 PDEBUG("count[%d] > RESPBUFFSIZE", count);
786 return -EINVAL;
788 if (count == 0)
789 return 0;
790 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
791 if (!temp_buff) {
792 PRINTK("Memory allocate failed\n");
793 return -ENOMEM;
795 get_random_bytes(temp_buff, count);
797 if (copy_to_user(buf, temp_buff, count) != 0) {
798 kfree(temp_buff);
799 return -EFAULT;
801 kfree(temp_buff);
802 return count;
804 #endif
807 * Write is is not allowed
809 static ssize_t
810 z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
812 PDEBUG("filp %p (PID %d)\n", filp, PID());
813 return -EPERM;
817 * New status functions
819 static inline int
820 get_status_totalcount(void)
822 return z90crypt.hdware_info->hdware_mask.st_count;
825 static inline int
826 get_status_PCICAcount(void)
828 return z90crypt.hdware_info->type_mask[PCICA].st_count;
831 static inline int
832 get_status_PCICCcount(void)
834 return z90crypt.hdware_info->type_mask[PCICC].st_count;
837 static inline int
838 get_status_PCIXCCcount(void)
840 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
841 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
844 static inline int
845 get_status_PCIXCCMCL2count(void)
847 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
850 static inline int
851 get_status_PCIXCCMCL3count(void)
853 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
856 static inline int
857 get_status_CEX2Ccount(void)
859 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
862 static inline int
863 get_status_requestq_count(void)
865 return requestq_count;
868 static inline int
869 get_status_pendingq_count(void)
871 return pendingq_count;
874 static inline int
875 get_status_totalopen_count(void)
877 return atomic_read(&total_open);
880 static inline int
881 get_status_domain_index(void)
883 return z90crypt.cdx;
886 static inline unsigned char *
887 get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
889 int i, ix;
891 memcpy(status, z90crypt.hdware_info->device_type_array,
892 Z90CRYPT_NUM_APS);
894 for (i = 0; i < get_status_totalcount(); i++) {
895 ix = SHRT2LONG(i);
896 if (LONG2DEVPTR(ix)->user_disabled)
897 status[ix] = 0x0d;
900 return status;
903 static inline unsigned char *
904 get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
906 int i, ix;
908 memset(qdepth, 0, Z90CRYPT_NUM_APS);
910 for (i = 0; i < get_status_totalcount(); i++) {
911 ix = SHRT2LONG(i);
912 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
915 return qdepth;
918 static inline unsigned int *
919 get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
921 int i, ix;
923 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
925 for (i = 0; i < get_status_totalcount(); i++) {
926 ix = SHRT2LONG(i);
927 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
930 return reqcnt;
933 static inline void
934 init_work_element(struct work_element *we_p,
935 struct priv_data *priv_data, pid_t pid)
937 int step;
939 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
940 /* Come up with a unique id for this caller. */
941 step = atomic_inc_return(&z90crypt_step);
942 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
943 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
944 we_p->pid = pid;
945 we_p->priv_data = priv_data;
946 we_p->status[0] = STAT_DEFAULT;
947 we_p->audit[0] = 0x00;
948 we_p->audit[1] = 0x00;
949 we_p->audit[2] = 0x00;
950 we_p->resp_buff_size = 0;
951 we_p->retcode = 0;
952 we_p->devindex = -1;
953 we_p->devtype = -1;
954 atomic_set(&we_p->alarmrung, 0);
955 init_waitqueue_head(&we_p->waitq);
956 INIT_LIST_HEAD(&(we_p->liste));
959 static inline int
960 allocate_work_element(struct work_element **we_pp,
961 struct priv_data *priv_data_p, pid_t pid)
963 struct work_element *we_p;
965 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
966 if (!we_p)
967 return -ENOMEM;
968 init_work_element(we_p, priv_data_p, pid);
969 *we_pp = we_p;
970 return 0;
973 static inline void
974 remove_device(struct device *device_p)
976 if (!device_p || (device_p->disabled != 0))
977 return;
978 device_p->disabled = 1;
979 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
980 z90crypt.hdware_info->hdware_mask.disabled_count++;
984 * Bitlength limits for each card
986 * There are new MCLs which allow more bitlengths. See the table for details.
987 * The MCL must be applied and the newer bitlengths enabled for these to work.
989 * Card Type Old limit New limit
990 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
991 * PCICC 512-1024 512-2048
992 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
993 * PCIXCC_MCL3 ----- 128-2048
994 * CEX2C 512-2048 128-2048
996 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
997 * MCL to just one card in a machine. We assume, at first, that all cards have
998 * these capabilities.
1000 int ext_bitlens = 1; // This is global
1001 #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1002 #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1003 #define PCICC_MIN_MOD_SIZE 64 // 512 bits
1004 #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1005 #define MAX_MOD_SIZE 256 // 2048 bits
1007 static inline int
1008 select_device_type(int *dev_type_p, int bytelength)
1010 static int count = 0;
1011 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
1012 struct status *stat;
1013 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1014 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1015 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
1016 return -1;
1017 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1019 if (stat->st_count >
1020 (stat->disabled_count + stat->user_disabled_count))
1021 return 0;
1022 return -1;
1025 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
1026 stat = &z90crypt.hdware_info->type_mask[PCICA];
1027 PCICA_avail = stat->st_count -
1028 (stat->disabled_count + stat->user_disabled_count);
1029 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1030 PCIXCC_MCL3_avail = stat->st_count -
1031 (stat->disabled_count + stat->user_disabled_count);
1032 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1033 CEX2C_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count);
1035 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
1037 * bitlength is a factor, PCICA is the most capable, even with
1038 * the new MCL for PCIXCC.
1040 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1041 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1042 if (!PCICA_avail)
1043 return -1;
1044 else {
1045 *dev_type_p = PCICA;
1046 return 0;
1050 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1051 CEX2C_avail);
1052 if (index_to_use < PCICA_avail)
1053 *dev_type_p = PCICA;
1054 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1055 *dev_type_p = PCIXCC_MCL3;
1056 else
1057 *dev_type_p = CEX2C;
1058 count++;
1059 return 0;
1062 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1063 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1064 return -1;
1065 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1066 if (stat->st_count >
1067 (stat->disabled_count + stat->user_disabled_count)) {
1068 *dev_type_p = PCIXCC_MCL2;
1069 return 0;
1073 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1074 * (if we don't have the MCL applied and the newer bitlengths enabled)
1075 * cannot go to a PCICC
1077 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1078 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1079 return -1;
1081 stat = &z90crypt.hdware_info->type_mask[PCICC];
1082 if (stat->st_count >
1083 (stat->disabled_count + stat->user_disabled_count)) {
1084 *dev_type_p = PCICC;
1085 return 0;
1088 return -1;
1092 * Try the selected number, then the selected type (can be ANYDEV)
1094 static inline int
1095 select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1097 int i, indx, devTp, low_count, low_indx;
1098 struct device_x *index_p;
1099 struct device *dev_ptr;
1101 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1102 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1103 PDEBUG("trying index = %d\n", *device_nr_p);
1104 dev_ptr = z90crypt.device_p[*device_nr_p];
1106 if (dev_ptr &&
1107 (dev_ptr->dev_stat != DEV_GONE) &&
1108 (dev_ptr->disabled == 0) &&
1109 (dev_ptr->user_disabled == 0)) {
1110 PDEBUG("selected by number, index = %d\n",
1111 *device_nr_p);
1112 *dev_type_p = dev_ptr->dev_type;
1113 return *device_nr_p;
1116 *device_nr_p = -1;
1117 PDEBUG("trying type = %d\n", *dev_type_p);
1118 devTp = *dev_type_p;
1119 if (select_device_type(&devTp, bytelength) == -1) {
1120 PDEBUG("failed to select by type\n");
1121 return -1;
1123 PDEBUG("selected type = %d\n", devTp);
1124 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1125 low_count = 0x0000FFFF;
1126 low_indx = -1;
1127 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1128 indx = index_p->device_index[i];
1129 dev_ptr = z90crypt.device_p[indx];
1130 if (dev_ptr &&
1131 (dev_ptr->dev_stat != DEV_GONE) &&
1132 (dev_ptr->disabled == 0) &&
1133 (dev_ptr->user_disabled == 0) &&
1134 (devTp == dev_ptr->dev_type) &&
1135 (low_count > dev_ptr->dev_caller_count)) {
1136 low_count = dev_ptr->dev_caller_count;
1137 low_indx = indx;
1140 *device_nr_p = low_indx;
1141 return low_indx;
1144 static inline int
1145 send_to_crypto_device(struct work_element *we_p)
1147 struct caller *caller_p;
1148 struct device *device_p;
1149 int dev_nr;
1150 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1152 if (!we_p->requestptr)
1153 return SEN_FATAL_ERROR;
1154 caller_p = (struct caller *)we_p->requestptr;
1155 dev_nr = we_p->devindex;
1156 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1157 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1158 return SEN_RETRY;
1159 else
1160 return SEN_NOT_AVAIL;
1162 we_p->devindex = dev_nr;
1163 device_p = z90crypt.device_p[dev_nr];
1164 if (!device_p)
1165 return SEN_NOT_AVAIL;
1166 if (device_p->dev_type != we_p->devtype)
1167 return SEN_RETRY;
1168 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1169 return SEN_QUEUE_FULL;
1170 PDEBUG("device number prior to send: %d\n", dev_nr);
1171 switch (send_to_AP(dev_nr, z90crypt.cdx,
1172 caller_p->caller_dev_dep_req_l,
1173 caller_p->caller_dev_dep_req_p)) {
1174 case DEV_SEN_EXCEPTION:
1175 PRINTKC("Exception during send to device %d\n", dev_nr);
1176 z90crypt.terminating = 1;
1177 return SEN_FATAL_ERROR;
1178 case DEV_GONE:
1179 PRINTK("Device %d not available\n", dev_nr);
1180 remove_device(device_p);
1181 return SEN_NOT_AVAIL;
1182 case DEV_EMPTY:
1183 return SEN_NOT_AVAIL;
1184 case DEV_NO_WORK:
1185 return SEN_FATAL_ERROR;
1186 case DEV_BAD_MESSAGE:
1187 return SEN_USER_ERROR;
1188 case DEV_QUEUE_FULL:
1189 return SEN_QUEUE_FULL;
1190 default:
1191 case DEV_ONLINE:
1192 break;
1194 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1195 device_p->dev_caller_count++;
1196 return 0;
1200 * Send puts the user's work on one of two queues:
1201 * the pending queue if the send was successful
1202 * the request queue if the send failed because device full or busy
1204 static inline int
1205 z90crypt_send(struct work_element *we_p, const char *buf)
1207 int rv;
1209 PDEBUG("PID %d\n", PID());
1211 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1212 PDEBUG("PID %d tried to send more work but has outstanding "
1213 "work.\n", PID());
1214 return -EWORKPEND;
1216 we_p->devindex = -1; // Reset device number
1217 spin_lock_irq(&queuespinlock);
1218 rv = send_to_crypto_device(we_p);
1219 switch (rv) {
1220 case 0:
1221 we_p->requestsent = jiffies;
1222 we_p->audit[0] |= FP_SENT;
1223 list_add_tail(&we_p->liste, &pending_list);
1224 ++pendingq_count;
1225 we_p->audit[0] |= FP_PENDING;
1226 break;
1227 case SEN_BUSY:
1228 case SEN_QUEUE_FULL:
1229 rv = 0;
1230 we_p->devindex = -1; // any device will do
1231 we_p->requestsent = jiffies;
1232 list_add_tail(&we_p->liste, &request_list);
1233 ++requestq_count;
1234 we_p->audit[0] |= FP_REQUEST;
1235 break;
1236 case SEN_RETRY:
1237 rv = -ERESTARTSYS;
1238 break;
1239 case SEN_NOT_AVAIL:
1240 PRINTK("*** No devices available.\n");
1241 rv = we_p->retcode = -ENODEV;
1242 we_p->status[0] |= STAT_FAILED;
1243 break;
1244 case REC_OPERAND_INV:
1245 case REC_OPERAND_SIZE:
1246 case REC_EVEN_MOD:
1247 case REC_INVALID_PAD:
1248 rv = we_p->retcode = -EINVAL;
1249 we_p->status[0] |= STAT_FAILED;
1250 break;
1251 default:
1252 we_p->retcode = rv;
1253 we_p->status[0] |= STAT_FAILED;
1254 break;
1256 if (rv != -ERESTARTSYS)
1257 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1258 spin_unlock_irq(&queuespinlock);
1259 if (rv == 0)
1260 tasklet_schedule(&reader_tasklet);
1261 return rv;
1265 * process_results copies the user's work from kernel space.
1267 static inline int
1268 z90crypt_process_results(struct work_element *we_p, char __user *buf)
1270 int rv;
1272 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1274 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1275 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1277 rv = 0;
1278 if (!we_p->buffer) {
1279 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1280 we_p, PID());
1281 rv = -ENOBUFF;
1284 if (!rv)
1285 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1286 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1287 rv = -EFAULT;
1290 if (!rv)
1291 rv = we_p->retcode;
1292 if (!rv)
1293 if (we_p->resp_buff_size
1294 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1295 we_p->resp_buff_size))
1296 rv = -EFAULT;
1298 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1299 return rv;
1302 static unsigned char NULL_psmid[8] =
1303 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1306 * Used in device configuration functions
1308 #define MAX_RESET 90
1311 * This is used only for PCICC support
1313 static inline int
1314 is_PKCS11_padded(unsigned char *buffer, int length)
1316 int i;
1317 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1318 return 0;
1319 for (i = 2; i < length; i++)
1320 if (buffer[i] != 0xFF)
1321 break;
1322 if ((i < 10) || (i == length))
1323 return 0;
1324 if (buffer[i] != 0x00)
1325 return 0;
1326 return 1;
1330 * This is used only for PCICC support
1332 static inline int
1333 is_PKCS12_padded(unsigned char *buffer, int length)
1335 int i;
1336 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1337 return 0;
1338 for (i = 2; i < length; i++)
1339 if (buffer[i] == 0x00)
1340 break;
1341 if ((i < 10) || (i == length))
1342 return 0;
1343 if (buffer[i] != 0x00)
1344 return 0;
1345 return 1;
1349 * builds struct caller and converts message from generic format to
1350 * device-dependent format
1351 * func is ICARSAMODEXPO or ICARSACRT
1352 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1354 static inline int
1355 build_caller(struct work_element *we_p, short function)
1357 int rv;
1358 struct caller *caller_p = (struct caller *)we_p->requestptr;
1360 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1361 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1362 (we_p->devtype != CEX2C))
1363 return SEN_NOT_AVAIL;
1365 memcpy(caller_p->caller_id, we_p->caller_id,
1366 sizeof(caller_p->caller_id));
1367 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1368 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1369 caller_p->caller_buf_p = we_p->buffer;
1370 INIT_LIST_HEAD(&(caller_p->caller_liste));
1372 rv = convert_request(we_p->buffer, we_p->funccode, function,
1373 z90crypt.cdx, we_p->devtype,
1374 &caller_p->caller_dev_dep_req_l,
1375 caller_p->caller_dev_dep_req_p);
1376 if (rv) {
1377 if (rv == SEN_NOT_AVAIL)
1378 PDEBUG("request can't be processed on hdwr avail\n");
1379 else
1380 PRINTK("Error from convert_request: %d\n", rv);
1382 else
1383 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1384 return rv;
1387 static inline void
1388 unbuild_caller(struct device *device_p, struct caller *caller_p)
1390 if (!caller_p)
1391 return;
1392 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1393 if (!list_empty(&caller_p->caller_liste)) {
1394 list_del_init(&caller_p->caller_liste);
1395 device_p->dev_caller_count--;
1397 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1400 static inline int
1401 get_crypto_request_buffer(struct work_element *we_p)
1403 struct ica_rsa_modexpo *mex_p;
1404 struct ica_rsa_modexpo_crt *crt_p;
1405 unsigned char *temp_buffer;
1406 short function;
1407 int rv;
1409 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1410 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1412 PDEBUG("device type input = %d\n", we_p->devtype);
1414 if (z90crypt.terminating)
1415 return REC_NO_RESPONSE;
1416 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1417 PRINTK("psmid zeroes\n");
1418 return SEN_FATAL_ERROR;
1420 if (!we_p->buffer) {
1421 PRINTK("buffer pointer NULL\n");
1422 return SEN_USER_ERROR;
1424 if (!we_p->requestptr) {
1425 PRINTK("caller pointer NULL\n");
1426 return SEN_USER_ERROR;
1429 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1430 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1431 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
1432 PRINTK("invalid device type\n");
1433 return SEN_USER_ERROR;
1436 if ((mex_p->inputdatalength < 1) ||
1437 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1438 PRINTK("inputdatalength[%d] is not valid\n",
1439 mex_p->inputdatalength);
1440 return SEN_USER_ERROR;
1443 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1444 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1445 mex_p->outputdatalength, mex_p->inputdatalength);
1446 return SEN_USER_ERROR;
1449 if (!mex_p->inputdata || !mex_p->outputdata) {
1450 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1451 mex_p->outputdata, mex_p->inputdata);
1452 return SEN_USER_ERROR;
1456 * As long as outputdatalength is big enough, we can set the
1457 * outputdatalength equal to the inputdatalength, since that is the
1458 * number of bytes we will copy in any case
1460 mex_p->outputdatalength = mex_p->inputdatalength;
1462 rv = 0;
1463 switch (we_p->funccode) {
1464 case ICARSAMODEXPO:
1465 if (!mex_p->b_key || !mex_p->n_modulus)
1466 rv = SEN_USER_ERROR;
1467 break;
1468 case ICARSACRT:
1469 if (!IS_EVEN(crt_p->inputdatalength)) {
1470 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1471 crt_p->inputdatalength);
1472 rv = SEN_USER_ERROR;
1473 break;
1475 if (!crt_p->bp_key ||
1476 !crt_p->bq_key ||
1477 !crt_p->np_prime ||
1478 !crt_p->nq_prime ||
1479 !crt_p->u_mult_inv) {
1480 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1481 crt_p->bp_key, crt_p->bq_key,
1482 crt_p->np_prime, crt_p->nq_prime,
1483 crt_p->u_mult_inv);
1484 rv = SEN_USER_ERROR;
1486 break;
1487 default:
1488 PRINTK("bad func = %d\n", we_p->funccode);
1489 rv = SEN_USER_ERROR;
1490 break;
1492 if (rv != 0)
1493 return rv;
1495 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1496 return SEN_NOT_AVAIL;
1498 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1499 sizeof(struct caller);
1500 if (copy_from_user(temp_buffer, mex_p->inputdata,
1501 mex_p->inputdatalength) != 0)
1502 return SEN_RELEASED;
1504 function = PCI_FUNC_KEY_ENCRYPT;
1505 switch (we_p->devtype) {
1506 /* PCICA does everything with a simple RSA mod-expo operation */
1507 case PCICA:
1508 function = PCI_FUNC_KEY_ENCRYPT;
1509 break;
1511 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1512 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1513 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1514 * mod-expo operation
1516 case PCIXCC_MCL2:
1517 if (we_p->funccode == ICARSAMODEXPO)
1518 function = PCI_FUNC_KEY_ENCRYPT;
1519 else
1520 function = PCI_FUNC_KEY_DECRYPT;
1521 break;
1522 case PCIXCC_MCL3:
1523 case CEX2C:
1524 if (we_p->funccode == ICARSAMODEXPO)
1525 function = PCI_FUNC_KEY_ENCRYPT;
1526 else
1527 function = PCI_FUNC_KEY_DECRYPT;
1528 break;
1530 * PCICC does everything as a PKCS-1.2 format request
1532 case PCICC:
1533 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1534 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1535 return SEN_NOT_AVAIL;
1537 if (we_p->funccode == ICARSAMODEXPO) {
1538 if (is_PKCS12_padded(temp_buffer,
1539 mex_p->inputdatalength))
1540 function = PCI_FUNC_KEY_ENCRYPT;
1541 else
1542 function = PCI_FUNC_KEY_DECRYPT;
1543 } else
1544 /* all CRT forms are decrypts */
1545 function = PCI_FUNC_KEY_DECRYPT;
1546 break;
1548 PDEBUG("function: %04x\n", function);
1549 rv = build_caller(we_p, function);
1550 PDEBUG("rv from build_caller = %d\n", rv);
1551 return rv;
1554 static inline int
1555 z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1556 const char __user *buffer)
1558 int rv;
1560 we_p->devindex = -1;
1561 if (funccode == ICARSAMODEXPO)
1562 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1563 else
1564 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1566 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1567 return -EFAULT;
1569 we_p->audit[0] |= FP_COPYFROM;
1570 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1571 we_p->funccode = funccode;
1572 we_p->devtype = -1;
1573 we_p->audit[0] |= FP_BUFFREQ;
1574 rv = get_crypto_request_buffer(we_p);
1575 switch (rv) {
1576 case 0:
1577 we_p->audit[0] |= FP_BUFFGOT;
1578 break;
1579 case SEN_USER_ERROR:
1580 rv = -EINVAL;
1581 break;
1582 case SEN_QUEUE_FULL:
1583 rv = 0;
1584 break;
1585 case SEN_RELEASED:
1586 rv = -EFAULT;
1587 break;
1588 case REC_NO_RESPONSE:
1589 rv = -ENODEV;
1590 break;
1591 case SEN_NOT_AVAIL:
1592 case EGETBUFF:
1593 rv = -EGETBUFF;
1594 break;
1595 default:
1596 PRINTK("rv = %d\n", rv);
1597 rv = -EGETBUFF;
1598 break;
1600 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1601 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1602 return rv;
1605 static inline void
1606 purge_work_element(struct work_element *we_p)
1608 struct list_head *lptr;
1610 spin_lock_irq(&queuespinlock);
1611 list_for_each(lptr, &request_list) {
1612 if (lptr == &we_p->liste) {
1613 list_del_init(lptr);
1614 requestq_count--;
1615 break;
1618 list_for_each(lptr, &pending_list) {
1619 if (lptr == &we_p->liste) {
1620 list_del_init(lptr);
1621 pendingq_count--;
1622 break;
1625 spin_unlock_irq(&queuespinlock);
1629 * Build the request and send it.
1631 static inline int
1632 z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1633 unsigned int cmd, unsigned long arg)
1635 struct work_element *we_p;
1636 int rv;
1638 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1639 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1640 return rv;
1642 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1643 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1644 if (!rv)
1645 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1646 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1647 if (!rv) {
1648 we_p->audit[0] |= FP_ASLEEP;
1649 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1650 we_p->audit[0] |= FP_AWAKE;
1651 rv = we_p->retcode;
1653 if (!rv)
1654 rv = z90crypt_process_results(we_p, (char __user *)arg);
1656 if ((we_p->status[0] & STAT_FAILED)) {
1657 switch (rv) {
1659 * EINVAL *after* receive is almost always a padding error or
1660 * length error issued by a coprocessor (not an accelerator).
1661 * We convert this return value to -EGETBUFF which should
1662 * trigger a fallback to software.
1664 case -EINVAL:
1665 if (we_p->devtype != PCICA)
1666 rv = -EGETBUFF;
1667 break;
1668 case -ETIMEOUT:
1669 if (z90crypt.mask.st_count > 0)
1670 rv = -ERESTARTSYS; // retry with another
1671 else
1672 rv = -ENODEV; // no cards left
1673 /* fall through to clean up request queue */
1674 case -ERESTARTSYS:
1675 case -ERELEASED:
1676 switch (CHK_RDWRMASK(we_p->status[0])) {
1677 case STAT_WRITTEN:
1678 purge_work_element(we_p);
1679 break;
1680 case STAT_READPEND:
1681 case STAT_NOWORK:
1682 default:
1683 break;
1685 break;
1686 default:
1687 we_p->status[0] ^= STAT_FAILED;
1688 break;
1691 free_page((long)we_p);
1692 return rv;
1696 * This function is a little long, but it's really just one large switch
1697 * statement.
1699 static long
1700 z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1702 struct priv_data *private_data_p = filp->private_data;
1703 unsigned char *status;
1704 unsigned char *qdepth;
1705 unsigned int *reqcnt;
1706 struct ica_z90_status *pstat;
1707 int ret, i, loopLim, tempstat;
1708 static int deprecated_msg_count1 = 0;
1709 static int deprecated_msg_count2 = 0;
1711 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1712 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1713 cmd,
1714 !_IOC_DIR(cmd) ? "NO"
1715 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1716 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1717 : "WR")),
1718 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1720 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1721 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1722 return -ENOTTY;
1725 ret = 0;
1726 switch (cmd) {
1727 case ICARSAMODEXPO:
1728 case ICARSACRT:
1729 if (quiesce_z90crypt) {
1730 ret = -EQUIESCE;
1731 break;
1733 ret = -ENODEV; // Default if no devices
1734 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1735 (z90crypt.hdware_info->hdware_mask.disabled_count +
1736 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1737 for (i = 0; i < loopLim; i++) {
1738 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1739 if (ret != -ERESTARTSYS)
1740 break;
1742 if (ret == -ERESTARTSYS)
1743 ret = -ENODEV;
1744 break;
1746 case Z90STAT_TOTALCOUNT:
1747 tempstat = get_status_totalcount();
1748 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1749 ret = -EFAULT;
1750 break;
1752 case Z90STAT_PCICACOUNT:
1753 tempstat = get_status_PCICAcount();
1754 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1755 ret = -EFAULT;
1756 break;
1758 case Z90STAT_PCICCCOUNT:
1759 tempstat = get_status_PCICCcount();
1760 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1761 ret = -EFAULT;
1762 break;
1764 case Z90STAT_PCIXCCMCL2COUNT:
1765 tempstat = get_status_PCIXCCMCL2count();
1766 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1767 ret = -EFAULT;
1768 break;
1770 case Z90STAT_PCIXCCMCL3COUNT:
1771 tempstat = get_status_PCIXCCMCL3count();
1772 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1773 ret = -EFAULT;
1774 break;
1776 case Z90STAT_CEX2CCOUNT:
1777 tempstat = get_status_CEX2Ccount();
1778 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1779 ret = -EFAULT;
1780 break;
1782 case Z90STAT_REQUESTQ_COUNT:
1783 tempstat = get_status_requestq_count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1785 ret = -EFAULT;
1786 break;
1788 case Z90STAT_PENDINGQ_COUNT:
1789 tempstat = get_status_pendingq_count();
1790 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1791 ret = -EFAULT;
1792 break;
1794 case Z90STAT_TOTALOPEN_COUNT:
1795 tempstat = get_status_totalopen_count();
1796 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1797 ret = -EFAULT;
1798 break;
1800 case Z90STAT_DOMAIN_INDEX:
1801 tempstat = get_status_domain_index();
1802 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1803 ret = -EFAULT;
1804 break;
1806 case Z90STAT_STATUS_MASK:
1807 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1808 if (!status) {
1809 PRINTK("kmalloc for status failed!\n");
1810 ret = -ENOMEM;
1811 break;
1813 get_status_status_mask(status);
1814 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1815 != 0)
1816 ret = -EFAULT;
1817 kfree(status);
1818 break;
1820 case Z90STAT_QDEPTH_MASK:
1821 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1822 if (!qdepth) {
1823 PRINTK("kmalloc for qdepth failed!\n");
1824 ret = -ENOMEM;
1825 break;
1827 get_status_qdepth_mask(qdepth);
1828 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1829 ret = -EFAULT;
1830 kfree(qdepth);
1831 break;
1833 case Z90STAT_PERDEV_REQCNT:
1834 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1835 if (!reqcnt) {
1836 PRINTK("kmalloc for reqcnt failed!\n");
1837 ret = -ENOMEM;
1838 break;
1840 get_status_perdevice_reqcnt(reqcnt);
1841 if (copy_to_user((char __user *) arg, reqcnt,
1842 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1843 ret = -EFAULT;
1844 kfree(reqcnt);
1845 break;
1847 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1848 case ICAZ90STATUS:
1849 if (deprecated_msg_count1 < 20) {
1850 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1851 deprecated_msg_count1++;
1852 if (deprecated_msg_count1 == 20)
1853 PRINTK("No longer issuing messages related to "
1854 "deprecated call to ICAZ90STATUS.\n");
1857 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1858 if (!pstat) {
1859 PRINTK("kmalloc for pstat failed!\n");
1860 ret = -ENOMEM;
1861 break;
1864 pstat->totalcount = get_status_totalcount();
1865 pstat->leedslitecount = get_status_PCICAcount();
1866 pstat->leeds2count = get_status_PCICCcount();
1867 pstat->requestqWaitCount = get_status_requestq_count();
1868 pstat->pendingqWaitCount = get_status_pendingq_count();
1869 pstat->totalOpenCount = get_status_totalopen_count();
1870 pstat->cryptoDomain = get_status_domain_index();
1871 get_status_status_mask(pstat->status);
1872 get_status_qdepth_mask(pstat->qdepth);
1874 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1875 sizeof(struct ica_z90_status)) != 0)
1876 ret = -EFAULT;
1877 kfree(pstat);
1878 break;
1880 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1881 case Z90STAT_PCIXCCCOUNT:
1882 if (deprecated_msg_count2 < 20) {
1883 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1884 deprecated_msg_count2++;
1885 if (deprecated_msg_count2 == 20)
1886 PRINTK("No longer issuing messages about depre"
1887 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1890 tempstat = get_status_PCIXCCcount();
1891 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1892 ret = -EFAULT;
1893 break;
1895 case Z90QUIESCE:
1896 if (current->euid != 0) {
1897 PRINTK("QUIESCE fails: euid %d\n",
1898 current->euid);
1899 ret = -EACCES;
1900 } else {
1901 PRINTK("QUIESCE device from PID %d\n", PID());
1902 quiesce_z90crypt = 1;
1904 break;
1906 default:
1907 /* user passed an invalid IOCTL number */
1908 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1909 ret = -ENOTTY;
1910 break;
1913 return ret;
1916 static inline int
1917 sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1919 int hl, i;
1921 hl = 0;
1922 for (i = 0; i < len; i++)
1923 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1924 hl += sprintf(outaddr+hl, " ");
1926 return hl;
1929 static inline int
1930 sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1932 int hl, inl, c, cx;
1934 hl = sprintf(outaddr, " ");
1935 inl = 0;
1936 for (c = 0; c < (len / 16); c++) {
1937 hl += sprintcl(outaddr+hl, addr+inl, 16);
1938 inl += 16;
1941 cx = len%16;
1942 if (cx) {
1943 hl += sprintcl(outaddr+hl, addr+inl, cx);
1944 inl += cx;
1947 hl += sprintf(outaddr+hl, "\n");
1949 return hl;
1952 static inline int
1953 sprinthx(unsigned char *title, unsigned char *outaddr,
1954 unsigned char *addr, unsigned int len)
1956 int hl, inl, r, rx;
1958 hl = sprintf(outaddr, "\n%s\n", title);
1959 inl = 0;
1960 for (r = 0; r < (len / 64); r++) {
1961 hl += sprintrw(outaddr+hl, addr+inl, 64);
1962 inl += 64;
1964 rx = len % 64;
1965 if (rx) {
1966 hl += sprintrw(outaddr+hl, addr+inl, rx);
1967 inl += rx;
1970 hl += sprintf(outaddr+hl, "\n");
1972 return hl;
1975 static inline int
1976 sprinthx4(unsigned char *title, unsigned char *outaddr,
1977 unsigned int *array, unsigned int len)
1979 int hl, r;
1981 hl = sprintf(outaddr, "\n%s\n", title);
1983 for (r = 0; r < len; r++) {
1984 if ((r % 8) == 0)
1985 hl += sprintf(outaddr+hl, " ");
1986 hl += sprintf(outaddr+hl, "%08X ", array[r]);
1987 if ((r % 8) == 7)
1988 hl += sprintf(outaddr+hl, "\n");
1991 hl += sprintf(outaddr+hl, "\n");
1993 return hl;
1996 static int
1997 z90crypt_status(char *resp_buff, char **start, off_t offset,
1998 int count, int *eof, void *data)
2000 unsigned char *workarea;
2001 int len;
2003 /* resp_buff is a page. Use the right half for a work area */
2004 workarea = resp_buff+2000;
2005 len = 0;
2006 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2007 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2008 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2009 get_status_domain_index());
2010 len += sprintf(resp_buff+len, "Total device count: %d\n",
2011 get_status_totalcount());
2012 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2013 get_status_PCICAcount());
2014 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2015 get_status_PCICCcount());
2016 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2017 get_status_PCIXCCMCL2count());
2018 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2019 get_status_PCIXCCMCL3count());
2020 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2021 get_status_CEX2Ccount());
2022 len += sprintf(resp_buff+len, "requestq count: %d\n",
2023 get_status_requestq_count());
2024 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2025 get_status_pendingq_count());
2026 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2027 get_status_totalopen_count());
2028 len += sprinthx(
2029 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
2030 "4: PCIXCC (MCL3), 5: CEX2C",
2031 resp_buff+len,
2032 get_status_status_mask(workarea),
2033 Z90CRYPT_NUM_APS);
2034 len += sprinthx("Waiting work element counts",
2035 resp_buff+len,
2036 get_status_qdepth_mask(workarea),
2037 Z90CRYPT_NUM_APS);
2038 len += sprinthx4(
2039 "Per-device successfully completed request counts",
2040 resp_buff+len,
2041 get_status_perdevice_reqcnt((unsigned int *)workarea),
2042 Z90CRYPT_NUM_APS);
2043 *eof = 1;
2044 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2045 return len;
2048 static inline void
2049 disable_card(int card_index)
2051 struct device *devp;
2053 devp = LONG2DEVPTR(card_index);
2054 if (!devp || devp->user_disabled)
2055 return;
2056 devp->user_disabled = 1;
2057 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2058 if (devp->dev_type == -1)
2059 return;
2060 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2063 static inline void
2064 enable_card(int card_index)
2066 struct device *devp;
2068 devp = LONG2DEVPTR(card_index);
2069 if (!devp || !devp->user_disabled)
2070 return;
2071 devp->user_disabled = 0;
2072 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2073 if (devp->dev_type == -1)
2074 return;
2075 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2078 static int
2079 z90crypt_status_write(struct file *file, const char __user *buffer,
2080 unsigned long count, void *data)
2082 int j, eol;
2083 unsigned char *lbuf, *ptr;
2084 unsigned int local_count;
2086 #define LBUFSIZE 1200
2087 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2088 if (!lbuf) {
2089 PRINTK("kmalloc failed!\n");
2090 return 0;
2093 if (count <= 0)
2094 return 0;
2096 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2098 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2099 kfree(lbuf);
2100 return -EFAULT;
2103 lbuf[local_count] = '\0';
2105 ptr = strstr(lbuf, "Online devices");
2106 if (ptr == 0) {
2107 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2108 kfree(lbuf);
2109 return count;
2112 ptr = strstr(ptr, "\n");
2113 if (ptr == 0) {
2114 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2115 kfree(lbuf);
2116 return count;
2118 ptr++;
2120 if (strstr(ptr, "Waiting work element counts") == NULL) {
2121 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2122 kfree(lbuf);
2123 return count;
2126 j = 0;
2127 eol = 0;
2128 while ((j < 64) && (*ptr != '\0')) {
2129 switch (*ptr) {
2130 case '\t':
2131 case ' ':
2132 break;
2133 case '\n':
2134 default:
2135 eol = 1;
2136 break;
2137 case '0': // no device
2138 case '1': // PCICA
2139 case '2': // PCICC
2140 case '3': // PCIXCC_MCL2
2141 case '4': // PCIXCC_MCL3
2142 case '5': // CEX2C
2143 j++;
2144 break;
2145 case 'd':
2146 case 'D':
2147 disable_card(j);
2148 j++;
2149 break;
2150 case 'e':
2151 case 'E':
2152 enable_card(j);
2153 j++;
2154 break;
2156 if (eol)
2157 break;
2158 ptr++;
2161 kfree(lbuf);
2162 return count;
2166 * Functions that run under a timer, with no process id
2168 * The task functions:
2169 * z90crypt_reader_task
2170 * helper_send_work
2171 * helper_handle_work_element
2172 * helper_receive_rc
2173 * z90crypt_config_task
2174 * z90crypt_cleanup_task
2176 * Helper functions:
2177 * z90crypt_schedule_reader_timer
2178 * z90crypt_schedule_reader_task
2179 * z90crypt_schedule_config_task
2180 * z90crypt_schedule_cleanup_task
2182 static inline int
2183 receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2184 unsigned char *buff, unsigned char __user **dest_p_p)
2186 int dv, rv;
2187 struct device *dev_ptr;
2188 struct caller *caller_p;
2189 struct ica_rsa_modexpo *icaMsg_p;
2190 struct list_head *ptr, *tptr;
2192 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2194 if (z90crypt.terminating)
2195 return REC_FATAL_ERROR;
2197 caller_p = 0;
2198 dev_ptr = z90crypt.device_p[index];
2199 rv = 0;
2200 do {
2201 if (!dev_ptr || dev_ptr->disabled) {
2202 rv = REC_NO_WORK; // a disabled device can't return work
2203 break;
2205 if (dev_ptr->dev_self_x != index) {
2206 PRINTKC("Corrupt dev ptr\n");
2207 z90crypt.terminating = 1;
2208 rv = REC_FATAL_ERROR;
2209 break;
2211 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2212 dv = DEV_REC_EXCEPTION;
2213 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2214 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2215 } else {
2216 PDEBUG("Dequeue called for device %d\n", index);
2217 dv = receive_from_AP(index, z90crypt.cdx,
2218 dev_ptr->dev_resp_l,
2219 dev_ptr->dev_resp_p, psmid);
2221 switch (dv) {
2222 case DEV_REC_EXCEPTION:
2223 rv = REC_FATAL_ERROR;
2224 z90crypt.terminating = 1;
2225 PRINTKC("Exception in receive from device %d\n",
2226 index);
2227 break;
2228 case DEV_ONLINE:
2229 rv = 0;
2230 break;
2231 case DEV_EMPTY:
2232 rv = REC_EMPTY;
2233 break;
2234 case DEV_NO_WORK:
2235 rv = REC_NO_WORK;
2236 break;
2237 case DEV_BAD_MESSAGE:
2238 case DEV_GONE:
2239 case REC_HARDWAR_ERR:
2240 default:
2241 rv = REC_NO_RESPONSE;
2242 break;
2244 if (rv)
2245 break;
2246 if (dev_ptr->dev_caller_count <= 0) {
2247 rv = REC_USER_GONE;
2248 break;
2251 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2252 caller_p = list_entry(ptr, struct caller, caller_liste);
2253 if (!memcmp(caller_p->caller_id, psmid,
2254 sizeof(caller_p->caller_id))) {
2255 if (!list_empty(&caller_p->caller_liste)) {
2256 list_del_init(ptr);
2257 dev_ptr->dev_caller_count--;
2258 break;
2261 caller_p = 0;
2263 if (!caller_p) {
2264 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2265 "%02X%02X%02X in device list\n",
2266 psmid[0], psmid[1], psmid[2], psmid[3],
2267 psmid[4], psmid[5], psmid[6], psmid[7]);
2268 rv = REC_USER_GONE;
2269 break;
2272 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2273 rv = convert_response(dev_ptr->dev_resp_p,
2274 caller_p->caller_buf_p, buff_len_p, buff);
2275 switch (rv) {
2276 case REC_USE_PCICA:
2277 break;
2278 case REC_OPERAND_INV:
2279 case REC_OPERAND_SIZE:
2280 case REC_EVEN_MOD:
2281 case REC_INVALID_PAD:
2282 PDEBUG("device %d: 'user error' %d\n", index, rv);
2283 break;
2284 case WRONG_DEVICE_TYPE:
2285 case REC_HARDWAR_ERR:
2286 case REC_BAD_MESSAGE:
2287 PRINTKW("device %d: hardware error %d\n", index, rv);
2288 rv = REC_NO_RESPONSE;
2289 break;
2290 default:
2291 PDEBUG("device %d: rv = %d\n", index, rv);
2292 break;
2294 } while (0);
2296 switch (rv) {
2297 case 0:
2298 PDEBUG("Successful receive from device %d\n", index);
2299 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2300 *dest_p_p = icaMsg_p->outputdata;
2301 if (*buff_len_p == 0)
2302 PRINTK("Zero *buff_len_p\n");
2303 break;
2304 case REC_NO_RESPONSE:
2305 PRINTKW("Removing device %d from availability\n", index);
2306 remove_device(dev_ptr);
2307 break;
2310 if (caller_p)
2311 unbuild_caller(dev_ptr, caller_p);
2313 return rv;
2316 static inline void
2317 helper_send_work(int index)
2319 struct work_element *rq_p;
2320 int rv;
2322 if (list_empty(&request_list))
2323 return;
2324 requestq_count--;
2325 rq_p = list_entry(request_list.next, struct work_element, liste);
2326 list_del_init(&rq_p->liste);
2327 rq_p->audit[1] |= FP_REMREQUEST;
2328 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2329 rq_p->devindex = SHRT2LONG(index);
2330 rv = send_to_crypto_device(rq_p);
2331 if (rv == 0) {
2332 rq_p->requestsent = jiffies;
2333 rq_p->audit[0] |= FP_SENT;
2334 list_add_tail(&rq_p->liste, &pending_list);
2335 ++pendingq_count;
2336 rq_p->audit[0] |= FP_PENDING;
2337 } else {
2338 switch (rv) {
2339 case REC_OPERAND_INV:
2340 case REC_OPERAND_SIZE:
2341 case REC_EVEN_MOD:
2342 case REC_INVALID_PAD:
2343 rq_p->retcode = -EINVAL;
2344 break;
2345 case SEN_NOT_AVAIL:
2346 case SEN_RETRY:
2347 case REC_NO_RESPONSE:
2348 default:
2349 if (z90crypt.mask.st_count > 1)
2350 rq_p->retcode =
2351 -ERESTARTSYS;
2352 else
2353 rq_p->retcode = -ENODEV;
2354 break;
2356 rq_p->status[0] |= STAT_FAILED;
2357 rq_p->audit[1] |= FP_AWAKENING;
2358 atomic_set(&rq_p->alarmrung, 1);
2359 wake_up(&rq_p->waitq);
2361 } else {
2362 if (z90crypt.mask.st_count > 1)
2363 rq_p->retcode = -ERESTARTSYS;
2364 else
2365 rq_p->retcode = -ENODEV;
2366 rq_p->status[0] |= STAT_FAILED;
2367 rq_p->audit[1] |= FP_AWAKENING;
2368 atomic_set(&rq_p->alarmrung, 1);
2369 wake_up(&rq_p->waitq);
2373 static inline void
2374 helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2375 int buff_len, unsigned char *buff,
2376 unsigned char __user *resp_addr)
2378 struct work_element *pq_p;
2379 struct list_head *lptr, *tptr;
2381 pq_p = 0;
2382 list_for_each_safe(lptr, tptr, &pending_list) {
2383 pq_p = list_entry(lptr, struct work_element, liste);
2384 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2385 list_del_init(lptr);
2386 pendingq_count--;
2387 pq_p->audit[1] |= FP_NOTPENDING;
2388 break;
2390 pq_p = 0;
2393 if (!pq_p) {
2394 PRINTK("device %d has work but no caller exists on pending Q\n",
2395 SHRT2LONG(index));
2396 return;
2399 switch (rc) {
2400 case 0:
2401 pq_p->resp_buff_size = buff_len;
2402 pq_p->audit[1] |= FP_RESPSIZESET;
2403 if (buff_len) {
2404 pq_p->resp_addr = resp_addr;
2405 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2406 memcpy(pq_p->resp_buff, buff, buff_len);
2407 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2409 break;
2410 case REC_OPERAND_INV:
2411 case REC_OPERAND_SIZE:
2412 case REC_EVEN_MOD:
2413 case REC_INVALID_PAD:
2414 PDEBUG("-EINVAL after application error %d\n", rc);
2415 pq_p->retcode = -EINVAL;
2416 pq_p->status[0] |= STAT_FAILED;
2417 break;
2418 case REC_USE_PCICA:
2419 pq_p->retcode = -ERESTARTSYS;
2420 pq_p->status[0] |= STAT_FAILED;
2421 break;
2422 case REC_NO_RESPONSE:
2423 default:
2424 if (z90crypt.mask.st_count > 1)
2425 pq_p->retcode = -ERESTARTSYS;
2426 else
2427 pq_p->retcode = -ENODEV;
2428 pq_p->status[0] |= STAT_FAILED;
2429 break;
2431 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2432 pq_p->audit[1] |= FP_AWAKENING;
2433 atomic_set(&pq_p->alarmrung, 1);
2434 wake_up(&pq_p->waitq);
2439 * return TRUE if the work element should be removed from the queue
2441 static inline int
2442 helper_receive_rc(int index, int *rc_p)
2444 switch (*rc_p) {
2445 case 0:
2446 case REC_OPERAND_INV:
2447 case REC_OPERAND_SIZE:
2448 case REC_EVEN_MOD:
2449 case REC_INVALID_PAD:
2450 case REC_USE_PCICA:
2451 break;
2453 case REC_BUSY:
2454 case REC_NO_WORK:
2455 case REC_EMPTY:
2456 case REC_RETRY_DEV:
2457 case REC_FATAL_ERROR:
2458 return 0;
2460 case REC_NO_RESPONSE:
2461 break;
2463 default:
2464 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2465 *rc_p, SHRT2LONG(index));
2466 *rc_p = REC_NO_RESPONSE;
2467 break;
2469 return 1;
2472 static inline void
2473 z90crypt_schedule_reader_timer(void)
2475 if (timer_pending(&reader_timer))
2476 return;
2477 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2478 PRINTK("Timer pending while modifying reader timer\n");
2481 static void
2482 z90crypt_reader_task(unsigned long ptr)
2484 int workavail, index, rc, buff_len;
2485 unsigned char psmid[8];
2486 unsigned char __user *resp_addr;
2487 static unsigned char buff[1024];
2490 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2491 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2492 * loop, there is no work remaining on the queues.
2494 resp_addr = 0;
2495 workavail = 2;
2496 buff_len = 0;
2497 while (workavail) {
2498 workavail--;
2499 rc = 0;
2500 spin_lock_irq(&queuespinlock);
2501 memset(buff, 0x00, sizeof(buff));
2503 /* Dequeue once from each device in round robin. */
2504 for (index = 0; index < z90crypt.mask.st_count; index++) {
2505 PDEBUG("About to receive.\n");
2506 rc = receive_from_crypto_device(SHRT2LONG(index),
2507 psmid,
2508 &buff_len,
2509 buff,
2510 &resp_addr);
2511 PDEBUG("Dequeued: rc = %d.\n", rc);
2513 if (helper_receive_rc(index, &rc)) {
2514 if (rc != REC_NO_RESPONSE) {
2515 helper_send_work(index);
2516 workavail = 2;
2519 helper_handle_work_element(index, psmid, rc,
2520 buff_len, buff,
2521 resp_addr);
2524 if (rc == REC_FATAL_ERROR)
2525 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2526 SHRT2LONG(index));
2528 spin_unlock_irq(&queuespinlock);
2531 if (pendingq_count + requestq_count)
2532 z90crypt_schedule_reader_timer();
2535 static inline void
2536 z90crypt_schedule_config_task(unsigned int expiration)
2538 if (timer_pending(&config_timer))
2539 return;
2540 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2541 PRINTK("Timer pending while modifying config timer\n");
2544 static void
2545 z90crypt_config_task(unsigned long ptr)
2547 int rc;
2549 PDEBUG("jiffies %ld\n", jiffies);
2551 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2552 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2553 /* If return was fatal, don't bother reconfiguring */
2554 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2555 z90crypt_schedule_config_task(CONFIGTIME);
2558 static inline void
2559 z90crypt_schedule_cleanup_task(void)
2561 if (timer_pending(&cleanup_timer))
2562 return;
2563 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2564 PRINTK("Timer pending while modifying cleanup timer\n");
2567 static inline void
2568 helper_drain_queues(void)
2570 struct work_element *pq_p;
2571 struct list_head *lptr, *tptr;
2573 list_for_each_safe(lptr, tptr, &pending_list) {
2574 pq_p = list_entry(lptr, struct work_element, liste);
2575 pq_p->retcode = -ENODEV;
2576 pq_p->status[0] |= STAT_FAILED;
2577 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2578 (struct caller *)pq_p->requestptr);
2579 list_del_init(lptr);
2580 pendingq_count--;
2581 pq_p->audit[1] |= FP_NOTPENDING;
2582 pq_p->audit[1] |= FP_AWAKENING;
2583 atomic_set(&pq_p->alarmrung, 1);
2584 wake_up(&pq_p->waitq);
2587 list_for_each_safe(lptr, tptr, &request_list) {
2588 pq_p = list_entry(lptr, struct work_element, liste);
2589 pq_p->retcode = -ENODEV;
2590 pq_p->status[0] |= STAT_FAILED;
2591 list_del_init(lptr);
2592 requestq_count--;
2593 pq_p->audit[1] |= FP_REMREQUEST;
2594 pq_p->audit[1] |= FP_AWAKENING;
2595 atomic_set(&pq_p->alarmrung, 1);
2596 wake_up(&pq_p->waitq);
2600 static inline void
2601 helper_timeout_requests(void)
2603 struct work_element *pq_p;
2604 struct list_head *lptr, *tptr;
2605 long timelimit;
2607 timelimit = jiffies - (CLEANUPTIME * HZ);
2608 /* The list is in strict chronological order */
2609 list_for_each_safe(lptr, tptr, &pending_list) {
2610 pq_p = list_entry(lptr, struct work_element, liste);
2611 if (pq_p->requestsent >= timelimit)
2612 break;
2613 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2614 ((struct caller *)pq_p->requestptr)->caller_id[0],
2615 ((struct caller *)pq_p->requestptr)->caller_id[1],
2616 ((struct caller *)pq_p->requestptr)->caller_id[2],
2617 ((struct caller *)pq_p->requestptr)->caller_id[3],
2618 ((struct caller *)pq_p->requestptr)->caller_id[4],
2619 ((struct caller *)pq_p->requestptr)->caller_id[5],
2620 ((struct caller *)pq_p->requestptr)->caller_id[6],
2621 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2622 pq_p->retcode = -ETIMEOUT;
2623 pq_p->status[0] |= STAT_FAILED;
2624 /* get this off any caller queue it may be on */
2625 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2626 (struct caller *) pq_p->requestptr);
2627 list_del_init(lptr);
2628 pendingq_count--;
2629 pq_p->audit[1] |= FP_TIMEDOUT;
2630 pq_p->audit[1] |= FP_NOTPENDING;
2631 pq_p->audit[1] |= FP_AWAKENING;
2632 atomic_set(&pq_p->alarmrung, 1);
2633 wake_up(&pq_p->waitq);
2637 * If pending count is zero, items left on the request queue may
2638 * never be processed.
2640 if (pendingq_count <= 0) {
2641 list_for_each_safe(lptr, tptr, &request_list) {
2642 pq_p = list_entry(lptr, struct work_element, liste);
2643 if (pq_p->requestsent >= timelimit)
2644 break;
2645 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2646 ((struct caller *)pq_p->requestptr)->caller_id[0],
2647 ((struct caller *)pq_p->requestptr)->caller_id[1],
2648 ((struct caller *)pq_p->requestptr)->caller_id[2],
2649 ((struct caller *)pq_p->requestptr)->caller_id[3],
2650 ((struct caller *)pq_p->requestptr)->caller_id[4],
2651 ((struct caller *)pq_p->requestptr)->caller_id[5],
2652 ((struct caller *)pq_p->requestptr)->caller_id[6],
2653 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2654 pq_p->retcode = -ETIMEOUT;
2655 pq_p->status[0] |= STAT_FAILED;
2656 list_del_init(lptr);
2657 requestq_count--;
2658 pq_p->audit[1] |= FP_TIMEDOUT;
2659 pq_p->audit[1] |= FP_REMREQUEST;
2660 pq_p->audit[1] |= FP_AWAKENING;
2661 atomic_set(&pq_p->alarmrung, 1);
2662 wake_up(&pq_p->waitq);
2667 static void
2668 z90crypt_cleanup_task(unsigned long ptr)
2670 PDEBUG("jiffies %ld\n", jiffies);
2671 spin_lock_irq(&queuespinlock);
2672 if (z90crypt.mask.st_count <= 0) // no devices!
2673 helper_drain_queues();
2674 else
2675 helper_timeout_requests();
2676 spin_unlock_irq(&queuespinlock);
2677 z90crypt_schedule_cleanup_task();
2680 static void
2681 z90crypt_schedule_reader_task(unsigned long ptr)
2683 tasklet_schedule(&reader_tasklet);
2687 * Lowlevel Functions:
2689 * create_z90crypt: creates and initializes basic data structures
2690 * refresh_z90crypt: re-initializes basic data structures
2691 * find_crypto_devices: returns a count and mask of hardware status
2692 * create_crypto_device: builds the descriptor for a device
2693 * destroy_crypto_device: unallocates the descriptor for a device
2694 * destroy_z90crypt: drains all work, unallocates structs
2698 * build the z90crypt root structure using the given domain index
2700 static int
2701 create_z90crypt(int *cdx_p)
2703 struct hdware_block *hdware_blk_p;
2705 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2706 z90crypt.domain_established = 0;
2707 z90crypt.len = sizeof(struct z90crypt);
2708 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2709 z90crypt.cdx = *cdx_p;
2711 hdware_blk_p = (struct hdware_block *)
2712 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2713 if (!hdware_blk_p) {
2714 PDEBUG("kmalloc for hardware block failed\n");
2715 return ENOMEM;
2717 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2718 z90crypt.hdware_info = hdware_blk_p;
2720 return 0;
2723 static inline int
2724 helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2726 enum hdstat hd_stat;
2727 int q_depth, dev_type;
2728 int indx, chkdom, numdomains;
2730 q_depth = dev_type = numdomains = 0;
2731 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2732 for (indx = 0; indx < z90crypt.max_count; indx++) {
2733 hd_stat = HD_NOT_THERE;
2734 numdomains = 0;
2735 for (chkdom = 0; chkdom <= 15; chkdom++) {
2736 hd_stat = query_online(indx, chkdom, MAX_RESET,
2737 &q_depth, &dev_type);
2738 if (hd_stat == HD_TSQ_EXCEPTION) {
2739 z90crypt.terminating = 1;
2740 PRINTKC("exception taken!\n");
2741 break;
2743 if (hd_stat == HD_ONLINE) {
2744 cdx_array[numdomains++] = chkdom;
2745 if (*cdx_p == chkdom) {
2746 *correct_cdx_found = 1;
2747 break;
2751 if ((*correct_cdx_found == 1) || (numdomains != 0))
2752 break;
2753 if (z90crypt.terminating)
2754 break;
2756 return numdomains;
2759 static inline int
2760 probe_crypto_domain(int *cdx_p)
2762 int cdx_array[16];
2763 char cdx_array_text[53], temp[5];
2764 int correct_cdx_found, numdomains;
2766 correct_cdx_found = 0;
2767 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2769 if (z90crypt.terminating)
2770 return TSQ_FATAL_ERROR;
2772 if (correct_cdx_found)
2773 return 0;
2775 if (numdomains == 0) {
2776 PRINTKW("Unable to find crypto domain: No devices found\n");
2777 return Z90C_NO_DEVICES;
2780 if (numdomains == 1) {
2781 if (*cdx_p == -1) {
2782 *cdx_p = cdx_array[0];
2783 return 0;
2785 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2786 *cdx_p, cdx_array[0]);
2787 return Z90C_INCORRECT_DOMAIN;
2790 numdomains--;
2791 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2792 while (numdomains) {
2793 numdomains--;
2794 sprintf(temp, ", %d", cdx_array[numdomains]);
2795 strcat(cdx_array_text, temp);
2798 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2799 *cdx_p, cdx_array_text);
2800 return Z90C_AMBIGUOUS_DOMAIN;
2803 static int
2804 refresh_z90crypt(int *cdx_p)
2806 int i, j, indx, rv;
2807 static struct status local_mask;
2808 struct device *devPtr;
2809 unsigned char oldStat, newStat;
2810 int return_unchanged;
2812 if (z90crypt.len != sizeof(z90crypt))
2813 return ENOTINIT;
2814 if (z90crypt.terminating)
2815 return TSQ_FATAL_ERROR;
2816 rv = 0;
2817 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2818 !z90crypt.domain_established) {
2819 rv = probe_crypto_domain(cdx_p);
2820 if (z90crypt.terminating)
2821 return TSQ_FATAL_ERROR;
2822 if (rv == Z90C_NO_DEVICES)
2823 return 0; // try later
2824 if (rv)
2825 return rv;
2826 z90crypt.cdx = *cdx_p;
2827 z90crypt.domain_established = 1;
2829 rv = find_crypto_devices(&local_mask);
2830 if (rv) {
2831 PRINTK("find crypto devices returned %d\n", rv);
2832 return rv;
2834 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2835 sizeof(struct status))) {
2836 return_unchanged = 1;
2837 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2839 * Check for disabled cards. If any device is marked
2840 * disabled, destroy it.
2842 for (j = 0;
2843 j < z90crypt.hdware_info->type_mask[i].st_count;
2844 j++) {
2845 indx = z90crypt.hdware_info->type_x_addr[i].
2846 device_index[j];
2847 devPtr = z90crypt.device_p[indx];
2848 if (devPtr && devPtr->disabled) {
2849 local_mask.st_mask[indx] = HD_NOT_THERE;
2850 return_unchanged = 0;
2854 if (return_unchanged == 1)
2855 return 0;
2858 spin_lock_irq(&queuespinlock);
2859 for (i = 0; i < z90crypt.max_count; i++) {
2860 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2861 newStat = local_mask.st_mask[i];
2862 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2863 destroy_crypto_device(i);
2864 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2865 rv = create_crypto_device(i);
2866 if (rv >= REC_FATAL_ERROR)
2867 return rv;
2868 if (rv != 0) {
2869 local_mask.st_mask[i] = HD_NOT_THERE;
2870 local_mask.st_count--;
2874 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2875 sizeof(local_mask.st_mask));
2876 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2877 z90crypt.hdware_info->hdware_mask.disabled_count =
2878 local_mask.disabled_count;
2879 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2880 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2881 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2882 &(z90crypt.hdware_info->type_x_addr[i]));
2883 spin_unlock_irq(&queuespinlock);
2885 return rv;
2888 static int
2889 find_crypto_devices(struct status *deviceMask)
2891 int i, q_depth, dev_type;
2892 enum hdstat hd_stat;
2894 deviceMask->st_count = 0;
2895 deviceMask->disabled_count = 0;
2896 deviceMask->user_disabled_count = 0;
2898 for (i = 0; i < z90crypt.max_count; i++) {
2899 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2900 &dev_type);
2901 if (hd_stat == HD_TSQ_EXCEPTION) {
2902 z90crypt.terminating = 1;
2903 PRINTKC("Exception during probe for crypto devices\n");
2904 return TSQ_FATAL_ERROR;
2906 deviceMask->st_mask[i] = hd_stat;
2907 if (hd_stat == HD_ONLINE) {
2908 PDEBUG("Got an online crypto!: %d\n", i);
2909 PDEBUG("Got a queue depth of %d\n", q_depth);
2910 PDEBUG("Got a device type of %d\n", dev_type);
2911 if (q_depth <= 0)
2912 return TSQ_FATAL_ERROR;
2913 deviceMask->st_count++;
2914 z90crypt.q_depth_array[i] = q_depth;
2915 z90crypt.dev_type_array[i] = dev_type;
2919 return 0;
2922 static int
2923 refresh_index_array(struct status *status_str, struct device_x *index_array)
2925 int i, count;
2926 enum devstat stat;
2928 i = -1;
2929 count = 0;
2930 do {
2931 stat = status_str->st_mask[++i];
2932 if (stat == DEV_ONLINE)
2933 index_array->device_index[count++] = i;
2934 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2936 return count;
2939 static int
2940 create_crypto_device(int index)
2942 int rv, devstat, total_size;
2943 struct device *dev_ptr;
2944 struct status *type_str_p;
2945 int deviceType;
2947 dev_ptr = z90crypt.device_p[index];
2948 if (!dev_ptr) {
2949 total_size = sizeof(struct device) +
2950 z90crypt.q_depth_array[index] * sizeof(int);
2952 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
2953 if (!dev_ptr) {
2954 PRINTK("kmalloc device %d failed\n", index);
2955 return ENOMEM;
2957 memset(dev_ptr, 0, total_size);
2958 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2959 if (!dev_ptr->dev_resp_p) {
2960 kfree(dev_ptr);
2961 PRINTK("kmalloc device %d rec buffer failed\n", index);
2962 return ENOMEM;
2964 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2965 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2968 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2969 if (devstat == DEV_RSQ_EXCEPTION) {
2970 PRINTK("exception during reset device %d\n", index);
2971 kfree(dev_ptr->dev_resp_p);
2972 kfree(dev_ptr);
2973 return RSQ_FATAL_ERROR;
2975 if (devstat == DEV_ONLINE) {
2976 dev_ptr->dev_self_x = index;
2977 dev_ptr->dev_type = z90crypt.dev_type_array[index];
2978 if (dev_ptr->dev_type == NILDEV) {
2979 rv = probe_device_type(dev_ptr);
2980 if (rv) {
2981 PRINTK("rv = %d from probe_device_type %d\n",
2982 rv, index);
2983 kfree(dev_ptr->dev_resp_p);
2984 kfree(dev_ptr);
2985 return rv;
2988 if (dev_ptr->dev_type == PCIXCC_UNK) {
2989 rv = probe_PCIXCC_type(dev_ptr);
2990 if (rv) {
2991 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
2992 rv, index);
2993 kfree(dev_ptr->dev_resp_p);
2994 kfree(dev_ptr);
2995 return rv;
2998 deviceType = dev_ptr->dev_type;
2999 z90crypt.dev_type_array[index] = deviceType;
3000 if (deviceType == PCICA)
3001 z90crypt.hdware_info->device_type_array[index] = 1;
3002 else if (deviceType == PCICC)
3003 z90crypt.hdware_info->device_type_array[index] = 2;
3004 else if (deviceType == PCIXCC_MCL2)
3005 z90crypt.hdware_info->device_type_array[index] = 3;
3006 else if (deviceType == PCIXCC_MCL3)
3007 z90crypt.hdware_info->device_type_array[index] = 4;
3008 else if (deviceType == CEX2C)
3009 z90crypt.hdware_info->device_type_array[index] = 5;
3010 else
3011 z90crypt.hdware_info->device_type_array[index] = -1;
3015 * 'q_depth' returned by the hardware is one less than
3016 * the actual depth
3018 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3019 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3020 dev_ptr->dev_stat = devstat;
3021 dev_ptr->disabled = 0;
3022 z90crypt.device_p[index] = dev_ptr;
3024 if (devstat == DEV_ONLINE) {
3025 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3026 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3027 z90crypt.mask.st_count++;
3029 deviceType = dev_ptr->dev_type;
3030 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3031 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3032 type_str_p->st_mask[index] = DEV_ONLINE;
3033 type_str_p->st_count++;
3037 return 0;
3040 static int
3041 destroy_crypto_device(int index)
3043 struct device *dev_ptr;
3044 int t, disabledFlag;
3046 dev_ptr = z90crypt.device_p[index];
3048 /* remember device type; get rid of device struct */
3049 if (dev_ptr) {
3050 disabledFlag = dev_ptr->disabled;
3051 t = dev_ptr->dev_type;
3052 kfree(dev_ptr->dev_resp_p);
3053 kfree(dev_ptr);
3054 } else {
3055 disabledFlag = 0;
3056 t = -1;
3058 z90crypt.device_p[index] = 0;
3060 /* if the type is valid, remove the device from the type_mask */
3061 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3062 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3063 z90crypt.hdware_info->type_mask[t].st_count--;
3064 if (disabledFlag == 1)
3065 z90crypt.hdware_info->type_mask[t].disabled_count--;
3067 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3068 z90crypt.mask.st_mask[index] = DEV_GONE;
3069 z90crypt.mask.st_count--;
3071 z90crypt.hdware_info->device_type_array[index] = 0;
3073 return 0;
3076 static void
3077 destroy_z90crypt(void)
3079 int i;
3081 for (i = 0; i < z90crypt.max_count; i++)
3082 if (z90crypt.device_p[i])
3083 destroy_crypto_device(i);
3084 kfree(z90crypt.hdware_info);
3085 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3088 static unsigned char static_testmsg[384] = {
3089 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
3090 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
3091 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
3092 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
3093 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3094 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3095 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
3096 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3097 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3098 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3099 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3100 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3101 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
3102 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
3103 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
3104 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
3105 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
3106 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
3107 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
3108 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
3109 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
3110 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
3111 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
3112 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3115 static int
3116 probe_device_type(struct device *devPtr)
3118 int rv, dv, i, index, length;
3119 unsigned char psmid[8];
3120 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3122 index = devPtr->dev_self_x;
3123 rv = 0;
3124 do {
3125 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3126 length = sizeof(static_testmsg) - 24;
3127 /* the -24 allows for the header */
3128 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3129 if (dv) {
3130 PDEBUG("dv returned by send during probe: %d\n", dv);
3131 if (dv == DEV_SEN_EXCEPTION) {
3132 rv = SEN_FATAL_ERROR;
3133 PRINTKC("exception in send to AP %d\n", index);
3134 break;
3136 PDEBUG("return value from send_to_AP: %d\n", rv);
3137 switch (dv) {
3138 case DEV_GONE:
3139 PDEBUG("dev %d not available\n", index);
3140 rv = SEN_NOT_AVAIL;
3141 break;
3142 case DEV_ONLINE:
3143 rv = 0;
3144 break;
3145 case DEV_EMPTY:
3146 rv = SEN_NOT_AVAIL;
3147 break;
3148 case DEV_NO_WORK:
3149 rv = SEN_FATAL_ERROR;
3150 break;
3151 case DEV_BAD_MESSAGE:
3152 rv = SEN_USER_ERROR;
3153 break;
3154 case DEV_QUEUE_FULL:
3155 rv = SEN_QUEUE_FULL;
3156 break;
3157 default:
3158 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3159 rv = SEN_NOT_AVAIL;
3160 break;
3164 if (rv)
3165 break;
3167 for (i = 0; i < 6; i++) {
3168 mdelay(300);
3169 dv = receive_from_AP(index, z90crypt.cdx,
3170 devPtr->dev_resp_l,
3171 devPtr->dev_resp_p, psmid);
3172 PDEBUG("dv returned by DQ = %d\n", dv);
3173 if (dv == DEV_REC_EXCEPTION) {
3174 rv = REC_FATAL_ERROR;
3175 PRINTKC("exception in dequeue %d\n",
3176 index);
3177 break;
3179 switch (dv) {
3180 case DEV_ONLINE:
3181 rv = 0;
3182 break;
3183 case DEV_EMPTY:
3184 rv = REC_EMPTY;
3185 break;
3186 case DEV_NO_WORK:
3187 rv = REC_NO_WORK;
3188 break;
3189 case DEV_BAD_MESSAGE:
3190 case DEV_GONE:
3191 default:
3192 rv = REC_NO_RESPONSE;
3193 break;
3195 if ((rv != 0) && (rv != REC_NO_WORK))
3196 break;
3197 if (rv == 0)
3198 break;
3200 if (rv)
3201 break;
3202 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3203 (devPtr->dev_resp_p[1] == 0x86);
3204 if (rv)
3205 devPtr->dev_type = PCICC;
3206 else
3207 devPtr->dev_type = PCICA;
3208 rv = 0;
3209 } while (0);
3210 /* In a general error case, the card is not marked online */
3211 return rv;
3214 static unsigned char MCL3_testmsg[] = {
3215 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
3216 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3217 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3218 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3219 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
3220 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
3221 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
3222 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
3223 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3224 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3225 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3226 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3227 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3228 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3229 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3230 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3231 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3232 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3233 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3234 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3235 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
3236 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
3237 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
3238 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
3239 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
3240 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
3241 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
3242 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
3243 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
3244 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
3245 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
3246 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
3247 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
3248 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
3249 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3252 static int
3253 probe_PCIXCC_type(struct device *devPtr)
3255 int rv, dv, i, index, length;
3256 unsigned char psmid[8];
3257 static unsigned char loc_testmsg[548];
3258 struct CPRBX *cprbx_p;
3260 index = devPtr->dev_self_x;
3261 rv = 0;
3262 do {
3263 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3264 length = sizeof(MCL3_testmsg) - 0x0C;
3265 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3266 if (dv) {
3267 PDEBUG("dv returned = %d\n", dv);
3268 if (dv == DEV_SEN_EXCEPTION) {
3269 rv = SEN_FATAL_ERROR;
3270 PRINTKC("exception in send to AP %d\n", index);
3271 break;
3273 PDEBUG("return value from send_to_AP: %d\n", rv);
3274 switch (dv) {
3275 case DEV_GONE:
3276 PDEBUG("dev %d not available\n", index);
3277 rv = SEN_NOT_AVAIL;
3278 break;
3279 case DEV_ONLINE:
3280 rv = 0;
3281 break;
3282 case DEV_EMPTY:
3283 rv = SEN_NOT_AVAIL;
3284 break;
3285 case DEV_NO_WORK:
3286 rv = SEN_FATAL_ERROR;
3287 break;
3288 case DEV_BAD_MESSAGE:
3289 rv = SEN_USER_ERROR;
3290 break;
3291 case DEV_QUEUE_FULL:
3292 rv = SEN_QUEUE_FULL;
3293 break;
3294 default:
3295 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3296 rv = SEN_NOT_AVAIL;
3297 break;
3301 if (rv)
3302 break;
3304 for (i = 0; i < 6; i++) {
3305 mdelay(300);
3306 dv = receive_from_AP(index, z90crypt.cdx,
3307 devPtr->dev_resp_l,
3308 devPtr->dev_resp_p, psmid);
3309 PDEBUG("dv returned by DQ = %d\n", dv);
3310 if (dv == DEV_REC_EXCEPTION) {
3311 rv = REC_FATAL_ERROR;
3312 PRINTKC("exception in dequeue %d\n",
3313 index);
3314 break;
3316 switch (dv) {
3317 case DEV_ONLINE:
3318 rv = 0;
3319 break;
3320 case DEV_EMPTY:
3321 rv = REC_EMPTY;
3322 break;
3323 case DEV_NO_WORK:
3324 rv = REC_NO_WORK;
3325 break;
3326 case DEV_BAD_MESSAGE:
3327 case DEV_GONE:
3328 default:
3329 rv = REC_NO_RESPONSE;
3330 break;
3332 if ((rv != 0) && (rv != REC_NO_WORK))
3333 break;
3334 if (rv == 0)
3335 break;
3337 if (rv)
3338 break;
3339 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3340 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3341 devPtr->dev_type = PCIXCC_MCL2;
3342 PDEBUG("device %d is MCL2\n", index);
3343 } else {
3344 devPtr->dev_type = PCIXCC_MCL3;
3345 PDEBUG("device %d is MCL3\n", index);
3347 } while (0);
3348 /* In a general error case, the card is not marked online */
3349 return rv;
3352 module_init(z90crypt_init_module);
3353 module_exit(z90crypt_cleanup_module);