[PATCH] don't include ioctl32.h in drivers
[linux-2.6/linux-2.6-openrd.git] / drivers / s390 / crypto / z90main.c
blob2f54d033d7cf8925502ba7a3f09c81c7599777d9
1 /*
2 * linux/drivers/s390/crypto/z90main.c
4 * z90crypt 1.3.2
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <asm/uaccess.h> // copy_(from|to)_user
28 #include <linux/compat.h>
29 #include <linux/compiler.h>
30 #include <linux/delay.h> // mdelay
31 #include <linux/init.h>
32 #include <linux/interrupt.h> // for tasklets
33 #include <linux/miscdevice.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/proc_fs.h>
37 #include <linux/syscalls.h>
38 #include "z90crypt.h"
39 #include "z90common.h"
41 #define VERSION_Z90MAIN_C "$Revision: 1.62 $"
43 static char z90main_version[] __initdata =
44 "z90main.o (" VERSION_Z90MAIN_C "/"
45 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
47 extern char z90hardware_version[];
49 /**
50 * Defaults that may be modified.
53 /**
54 * You can specify a different minor at compile time.
56 #ifndef Z90CRYPT_MINOR
57 #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
58 #endif
60 /**
61 * You can specify a different domain at compile time or on the insmod
62 * command line.
64 #ifndef DOMAIN_INDEX
65 #define DOMAIN_INDEX -1
66 #endif
68 /**
69 * This is the name under which the device is registered in /proc/modules.
71 #define REG_NAME "z90crypt"
73 /**
74 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
75 * older than CLEANUPTIME seconds in the past.
77 #ifndef CLEANUPTIME
78 #define CLEANUPTIME 15
79 #endif
81 /**
82 * Config should run every CONFIGTIME seconds
84 #ifndef CONFIGTIME
85 #define CONFIGTIME 30
86 #endif
88 /**
89 * The first execution of the config task should take place
90 * immediately after initialization
92 #ifndef INITIAL_CONFIGTIME
93 #define INITIAL_CONFIGTIME 1
94 #endif
96 /**
97 * Reader should run every READERTIME milliseconds
98 * With the 100Hz patch for s390, z90crypt can lock the system solid while
99 * under heavy load. We'll try to avoid that.
101 #ifndef READERTIME
102 #if HZ > 1000
103 #define READERTIME 2
104 #else
105 #define READERTIME 10
106 #endif
107 #endif
110 * turn long device array index into device pointer
112 #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
115 * turn short device array index into long device array index
117 #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
120 * turn short device array index into device pointer
122 #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
125 * Status for a work-element
127 #define STAT_DEFAULT 0x00 // request has not been processed
129 #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
130 // else, device is determined each write
131 #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
132 // before being sent to the hardware.
133 #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
134 // 0x20 // UNUSED state
135 #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
136 #define STAT_NOWORK 0x00 // bits off: no work on any queue
137 #define STAT_RDWRMASK 0x30 // mask for bits 5-4
140 * Macros to check the status RDWRMASK
142 #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
143 #define SET_RDWRMASK(statbyte, newval) \
144 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
147 * Audit Trail. Progress of a Work element
148 * audit[0]: Unless noted otherwise, these bits are all set by the process
150 #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
151 #define FP_BUFFREQ 0x40 // Low Level buffer requested
152 #define FP_BUFFGOT 0x20 // Low Level buffer obtained
153 #define FP_SENT 0x10 // Work element sent to a crypto device
154 // (may be set by process or by reader task)
155 #define FP_PENDING 0x08 // Work element placed on pending queue
156 // (may be set by process or by reader task)
157 #define FP_REQUEST 0x04 // Work element placed on request queue
158 #define FP_ASLEEP 0x02 // Work element about to sleep
159 #define FP_AWAKE 0x01 // Work element has been awakened
162 * audit[1]: These bits are set by the reader task and/or the cleanup task
164 #define FP_NOTPENDING 0x80 // Work element removed from pending queue
165 #define FP_AWAKENING 0x40 // Caller about to be awakened
166 #define FP_TIMEDOUT 0x20 // Caller timed out
167 #define FP_RESPSIZESET 0x10 // Response size copied to work element
168 #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
169 #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
170 #define FP_REMREQUEST 0x02 // Work element removed from request queue
171 #define FP_SIGNALED 0x01 // Work element was awakened by a signal
174 * audit[2]: unused
178 * state of the file handle in private_data.status
180 #define STAT_OPEN 0
181 #define STAT_CLOSED 1
184 * PID() expands to the process ID of the current process
186 #define PID() (current->pid)
189 * Selected Constants. The number of APs and the number of devices
191 #ifndef Z90CRYPT_NUM_APS
192 #define Z90CRYPT_NUM_APS 64
193 #endif
194 #ifndef Z90CRYPT_NUM_DEVS
195 #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
196 #endif
199 * Buffer size for receiving responses. The maximum Response Size
200 * is actually the maximum request size, since in an error condition
201 * the request itself may be returned unchanged.
203 #define MAX_RESPONSE_SIZE 0x0000077C
206 * A count and status-byte mask
208 struct status {
209 int st_count; // # of enabled devices
210 int disabled_count; // # of disabled devices
211 int user_disabled_count; // # of devices disabled via proc fs
212 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
216 * The array of device indexes is a mechanism for fast indexing into
217 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
218 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
219 * z90CDeviceIndex[2] is 47.
221 struct device_x {
222 int device_index[Z90CRYPT_NUM_DEVS];
226 * All devices are arranged in a single array: 64 APs
228 struct device {
229 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
230 // PCIXCC_MCL3, CEX2C, CEX2A
231 enum devstat dev_stat; // current device status
232 int dev_self_x; // Index in array
233 int disabled; // Set when device is in error
234 int user_disabled; // Set when device is disabled by user
235 int dev_q_depth; // q depth
236 unsigned char * dev_resp_p; // Response buffer address
237 int dev_resp_l; // Response Buffer length
238 int dev_caller_count; // Number of callers
239 int dev_total_req_cnt; // # requests for device since load
240 struct list_head dev_caller_list; // List of callers
244 * There's a struct status and a struct device_x for each device type.
246 struct hdware_block {
247 struct status hdware_mask;
248 struct status type_mask[Z90CRYPT_NUM_TYPES];
249 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
250 unsigned char device_type_array[Z90CRYPT_NUM_APS];
254 * z90crypt is the topmost data structure in the hierarchy.
256 struct z90crypt {
257 int max_count; // Nr of possible crypto devices
258 struct status mask;
259 int q_depth_array[Z90CRYPT_NUM_DEVS];
260 int dev_type_array[Z90CRYPT_NUM_DEVS];
261 struct device_x overall_device_x; // array device indexes
262 struct device * device_p[Z90CRYPT_NUM_DEVS];
263 int terminating;
264 int domain_established;// TRUE: domain has been found
265 int cdx; // Crypto Domain Index
266 int len; // Length of this data structure
267 struct hdware_block *hdware_info;
271 * An array of these structures is pointed to from dev_caller
272 * The length of the array depends on the device type. For APs,
273 * there are 8.
275 * The caller buffer is allocated to the user at OPEN. At WRITE,
276 * it contains the request; at READ, the response. The function
277 * send_to_crypto_device converts the request to device-dependent
278 * form and use the caller's OPEN-allocated buffer for the response.
280 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
281 * because that points to it, see the discussion in z90hardware.c.
282 * Search for "extended request message block".
284 struct caller {
285 int caller_buf_l; // length of original request
286 unsigned char * caller_buf_p; // Original request on WRITE
287 int caller_dev_dep_req_l; // len device dependent request
288 unsigned char * caller_dev_dep_req_p; // Device dependent form
289 unsigned char caller_id[8]; // caller-supplied message id
290 struct list_head caller_liste;
291 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
295 * Function prototypes from z90hardware.c
297 enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
298 int *dev_type);
299 enum devstat reset_device(int deviceNr, int cdx, int resetNr);
300 enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
301 enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
302 unsigned char *resp, unsigned char *psmid);
303 int convert_request(unsigned char *buffer, int func, unsigned short function,
304 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
305 int convert_response(unsigned char *response, unsigned char *buffer,
306 int *respbufflen_p, unsigned char *resp_buff);
309 * Low level function prototypes
311 static int create_z90crypt(int *cdx_p);
312 static int refresh_z90crypt(int *cdx_p);
313 static int find_crypto_devices(struct status *deviceMask);
314 static int create_crypto_device(int index);
315 static int destroy_crypto_device(int index);
316 static void destroy_z90crypt(void);
317 static int refresh_index_array(struct status *status_str,
318 struct device_x *index_array);
319 static int probe_device_type(struct device *devPtr);
320 static int probe_PCIXCC_type(struct device *devPtr);
323 * proc fs definitions
325 static struct proc_dir_entry *z90crypt_entry;
328 * data structures
332 * work_element.opener points back to this structure
334 struct priv_data {
335 pid_t opener_pid;
336 unsigned char status; // 0: open 1: closed
340 * A work element is allocated for each request
342 struct work_element {
343 struct priv_data *priv_data;
344 pid_t pid;
345 int devindex; // index of device processing this w_e
346 // (If request did not specify device,
347 // -1 until placed onto a queue)
348 int devtype;
349 struct list_head liste; // used for requestq and pendingq
350 char buffer[128]; // local copy of user request
351 int buff_size; // size of the buffer for the request
352 char resp_buff[RESPBUFFSIZE];
353 int resp_buff_size;
354 char __user * resp_addr; // address of response in user space
355 unsigned int funccode; // function code of request
356 wait_queue_head_t waitq;
357 unsigned long requestsent; // time at which the request was sent
358 atomic_t alarmrung; // wake-up signal
359 unsigned char caller_id[8]; // pid + counter, for this w_e
360 unsigned char status[1]; // bits to mark status of the request
361 unsigned char audit[3]; // record of work element's progress
362 unsigned char * requestptr; // address of request buffer
363 int retcode; // return code of request
367 * High level function prototypes
369 static int z90crypt_open(struct inode *, struct file *);
370 static int z90crypt_release(struct inode *, struct file *);
371 static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
372 static ssize_t z90crypt_write(struct file *, const char __user *,
373 size_t, loff_t *);
374 static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
375 static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
377 static void z90crypt_reader_task(unsigned long);
378 static void z90crypt_schedule_reader_task(unsigned long);
379 static void z90crypt_config_task(unsigned long);
380 static void z90crypt_cleanup_task(unsigned long);
382 static int z90crypt_status(char *, char **, off_t, int, int *, void *);
383 static int z90crypt_status_write(struct file *, const char __user *,
384 unsigned long, void *);
387 * Storage allocated at initialization and used throughout the life of
388 * this insmod
390 static int domain = DOMAIN_INDEX;
391 static struct z90crypt z90crypt;
392 static int quiesce_z90crypt;
393 static spinlock_t queuespinlock;
394 static struct list_head request_list;
395 static int requestq_count;
396 static struct list_head pending_list;
397 static int pendingq_count;
399 static struct tasklet_struct reader_tasklet;
400 static struct timer_list reader_timer;
401 static struct timer_list config_timer;
402 static struct timer_list cleanup_timer;
403 static atomic_t total_open;
404 static atomic_t z90crypt_step;
406 static struct file_operations z90crypt_fops = {
407 .owner = THIS_MODULE,
408 .read = z90crypt_read,
409 .write = z90crypt_write,
410 .unlocked_ioctl = z90crypt_unlocked_ioctl,
411 #ifdef CONFIG_COMPAT
412 .compat_ioctl = z90crypt_compat_ioctl,
413 #endif
414 .open = z90crypt_open,
415 .release = z90crypt_release
418 static struct miscdevice z90crypt_misc_device = {
419 .minor = Z90CRYPT_MINOR,
420 .name = DEV_NAME,
421 .fops = &z90crypt_fops,
422 .devfs_name = DEV_NAME
426 * Documentation values.
428 MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
429 "and Jochen Roehrig");
430 MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
431 "Copyright 2001, 2005 IBM Corporation");
432 MODULE_LICENSE("GPL");
433 module_param(domain, int, 0);
434 MODULE_PARM_DESC(domain, "domain index for device");
436 #ifdef CONFIG_COMPAT
438 * ioctl32 conversion routines
440 struct ica_rsa_modexpo_32 { // For 32-bit callers
441 compat_uptr_t inputdata;
442 unsigned int inputdatalength;
443 compat_uptr_t outputdata;
444 unsigned int outputdatalength;
445 compat_uptr_t b_key;
446 compat_uptr_t n_modulus;
449 static long
450 trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
452 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
453 struct ica_rsa_modexpo_32 mex32k;
454 struct ica_rsa_modexpo __user *mex64;
455 long ret = 0;
456 unsigned int i;
458 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
459 return -EFAULT;
460 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
461 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
462 return -EFAULT;
463 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
464 return -EFAULT;
465 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
466 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
467 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
468 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
469 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
470 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
471 return -EFAULT;
472 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
473 if (!ret)
474 if (__get_user(i, &mex64->outputdatalength) ||
475 __put_user(i, &mex32u->outputdatalength))
476 ret = -EFAULT;
477 return ret;
480 struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
481 compat_uptr_t inputdata;
482 unsigned int inputdatalength;
483 compat_uptr_t outputdata;
484 unsigned int outputdatalength;
485 compat_uptr_t bp_key;
486 compat_uptr_t bq_key;
487 compat_uptr_t np_prime;
488 compat_uptr_t nq_prime;
489 compat_uptr_t u_mult_inv;
492 static long
493 trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
495 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
496 struct ica_rsa_modexpo_crt_32 crt32k;
497 struct ica_rsa_modexpo_crt __user *crt64;
498 long ret = 0;
499 unsigned int i;
501 if (!access_ok(VERIFY_WRITE, crt32u,
502 sizeof(struct ica_rsa_modexpo_crt_32)))
503 return -EFAULT;
504 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
505 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
506 return -EFAULT;
507 if (copy_from_user(&crt32k, crt32u,
508 sizeof(struct ica_rsa_modexpo_crt_32)))
509 return -EFAULT;
510 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
511 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
512 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
513 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
514 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
515 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
516 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
517 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
518 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
519 return -EFAULT;
520 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
521 if (!ret)
522 if (__get_user(i, &crt64->outputdatalength) ||
523 __put_user(i, &crt32u->outputdatalength))
524 ret = -EFAULT;
525 return ret;
528 static long
529 z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
531 switch (cmd) {
532 case ICAZ90STATUS:
533 case Z90QUIESCE:
534 case Z90STAT_TOTALCOUNT:
535 case Z90STAT_PCICACOUNT:
536 case Z90STAT_PCICCCOUNT:
537 case Z90STAT_PCIXCCCOUNT:
538 case Z90STAT_PCIXCCMCL2COUNT:
539 case Z90STAT_PCIXCCMCL3COUNT:
540 case Z90STAT_CEX2CCOUNT:
541 case Z90STAT_REQUESTQ_COUNT:
542 case Z90STAT_PENDINGQ_COUNT:
543 case Z90STAT_TOTALOPEN_COUNT:
544 case Z90STAT_DOMAIN_INDEX:
545 case Z90STAT_STATUS_MASK:
546 case Z90STAT_QDEPTH_MASK:
547 case Z90STAT_PERDEV_REQCNT:
548 return z90crypt_unlocked_ioctl(filp, cmd, arg);
549 case ICARSAMODEXPO:
550 return trans_modexpo32(filp, cmd, arg);
551 case ICARSACRT:
552 return trans_modexpo_crt32(filp, cmd, arg);
553 default:
554 return -ENOIOCTLCMD;
557 #endif
560 * The module initialization code.
562 static int __init
563 z90crypt_init_module(void)
565 int result, nresult;
566 struct proc_dir_entry *entry;
568 PDEBUG("PID %d\n", PID());
570 if ((domain < -1) || (domain > 15)) {
571 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
572 return -EINVAL;
575 /* Register as misc device with given minor (or get a dynamic one). */
576 result = misc_register(&z90crypt_misc_device);
577 if (result < 0) {
578 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
579 z90crypt_misc_device.minor, result);
580 return result;
583 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
585 result = create_z90crypt(&domain);
586 if (result != 0) {
587 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
588 domain, result);
589 result = -ENOMEM;
590 goto init_module_cleanup;
593 if (result == 0) {
594 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
595 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
596 __DATE__, __TIME__);
597 PRINTKN("%s\n", z90main_version);
598 PRINTKN("%s\n", z90hardware_version);
599 PDEBUG("create_z90crypt (domain index %d) successful.\n",
600 domain);
601 } else
602 PRINTK("No devices at startup\n");
604 /* Initialize globals. */
605 spin_lock_init(&queuespinlock);
607 INIT_LIST_HEAD(&pending_list);
608 pendingq_count = 0;
610 INIT_LIST_HEAD(&request_list);
611 requestq_count = 0;
613 quiesce_z90crypt = 0;
615 atomic_set(&total_open, 0);
616 atomic_set(&z90crypt_step, 0);
618 /* Set up the cleanup task. */
619 init_timer(&cleanup_timer);
620 cleanup_timer.function = z90crypt_cleanup_task;
621 cleanup_timer.data = 0;
622 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
623 add_timer(&cleanup_timer);
625 /* Set up the proc file system */
626 entry = create_proc_entry("driver/z90crypt", 0644, 0);
627 if (entry) {
628 entry->nlink = 1;
629 entry->data = 0;
630 entry->read_proc = z90crypt_status;
631 entry->write_proc = z90crypt_status_write;
633 else
634 PRINTK("Couldn't create z90crypt proc entry\n");
635 z90crypt_entry = entry;
637 /* Set up the configuration task. */
638 init_timer(&config_timer);
639 config_timer.function = z90crypt_config_task;
640 config_timer.data = 0;
641 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
642 add_timer(&config_timer);
644 /* Set up the reader task */
645 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
646 init_timer(&reader_timer);
647 reader_timer.function = z90crypt_schedule_reader_task;
648 reader_timer.data = 0;
649 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
650 add_timer(&reader_timer);
652 return 0; // success
654 init_module_cleanup:
655 if ((nresult = misc_deregister(&z90crypt_misc_device)))
656 PRINTK("misc_deregister failed with %d.\n", nresult);
657 else
658 PDEBUG("misc_deregister successful.\n");
660 return result; // failure
664 * The module termination code
666 static void __exit
667 z90crypt_cleanup_module(void)
669 int nresult;
671 PDEBUG("PID %d\n", PID());
673 remove_proc_entry("driver/z90crypt", 0);
675 if ((nresult = misc_deregister(&z90crypt_misc_device)))
676 PRINTK("misc_deregister failed with %d.\n", nresult);
677 else
678 PDEBUG("misc_deregister successful.\n");
680 /* Remove the tasks */
681 tasklet_kill(&reader_tasklet);
682 del_timer(&reader_timer);
683 del_timer(&config_timer);
684 del_timer(&cleanup_timer);
686 destroy_z90crypt();
688 PRINTKN("Unloaded.\n");
692 * Functions running under a process id
694 * The I/O functions:
695 * z90crypt_open
696 * z90crypt_release
697 * z90crypt_read
698 * z90crypt_write
699 * z90crypt_unlocked_ioctl
700 * z90crypt_status
701 * z90crypt_status_write
702 * disable_card
703 * enable_card
705 * Helper functions:
706 * z90crypt_rsa
707 * z90crypt_prepare
708 * z90crypt_send
709 * z90crypt_process_results
712 static int
713 z90crypt_open(struct inode *inode, struct file *filp)
715 struct priv_data *private_data_p;
717 if (quiesce_z90crypt)
718 return -EQUIESCE;
720 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
721 if (!private_data_p) {
722 PRINTK("Memory allocate failed\n");
723 return -ENOMEM;
726 memset((void *)private_data_p, 0, sizeof(struct priv_data));
727 private_data_p->status = STAT_OPEN;
728 private_data_p->opener_pid = PID();
729 filp->private_data = private_data_p;
730 atomic_inc(&total_open);
732 return 0;
735 static int
736 z90crypt_release(struct inode *inode, struct file *filp)
738 struct priv_data *private_data_p = filp->private_data;
740 PDEBUG("PID %d (filp %p)\n", PID(), filp);
742 private_data_p->status = STAT_CLOSED;
743 memset(private_data_p, 0, sizeof(struct priv_data));
744 kfree(private_data_p);
745 atomic_dec(&total_open);
747 return 0;
751 * there are two read functions, of which compile options will choose one
752 * without USE_GET_RANDOM_BYTES
753 * => read() always returns -EPERM;
754 * otherwise
755 * => read() uses get_random_bytes() kernel function
757 #ifndef USE_GET_RANDOM_BYTES
759 * z90crypt_read will not be supported beyond z90crypt 1.3.1
761 static ssize_t
762 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
764 PDEBUG("filp %p (PID %d)\n", filp, PID());
765 return -EPERM;
767 #else // we want to use get_random_bytes
769 * read() just returns a string of random bytes. Since we have no way
770 * to generate these cryptographically, we just execute get_random_bytes
771 * for the length specified.
773 #include <linux/random.h>
774 static ssize_t
775 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
777 unsigned char *temp_buff;
779 PDEBUG("filp %p (PID %d)\n", filp, PID());
781 if (quiesce_z90crypt)
782 return -EQUIESCE;
783 if (count < 0) {
784 PRINTK("Requested random byte count negative: %ld\n", count);
785 return -EINVAL;
787 if (count > RESPBUFFSIZE) {
788 PDEBUG("count[%d] > RESPBUFFSIZE", count);
789 return -EINVAL;
791 if (count == 0)
792 return 0;
793 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
794 if (!temp_buff) {
795 PRINTK("Memory allocate failed\n");
796 return -ENOMEM;
798 get_random_bytes(temp_buff, count);
800 if (copy_to_user(buf, temp_buff, count) != 0) {
801 kfree(temp_buff);
802 return -EFAULT;
804 kfree(temp_buff);
805 return count;
807 #endif
810 * Write is is not allowed
812 static ssize_t
813 z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
815 PDEBUG("filp %p (PID %d)\n", filp, PID());
816 return -EPERM;
820 * New status functions
822 static inline int
823 get_status_totalcount(void)
825 return z90crypt.hdware_info->hdware_mask.st_count;
828 static inline int
829 get_status_PCICAcount(void)
831 return z90crypt.hdware_info->type_mask[PCICA].st_count;
834 static inline int
835 get_status_PCICCcount(void)
837 return z90crypt.hdware_info->type_mask[PCICC].st_count;
840 static inline int
841 get_status_PCIXCCcount(void)
843 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
844 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
847 static inline int
848 get_status_PCIXCCMCL2count(void)
850 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
853 static inline int
854 get_status_PCIXCCMCL3count(void)
856 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
859 static inline int
860 get_status_CEX2Ccount(void)
862 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
865 static inline int
866 get_status_CEX2Acount(void)
868 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
871 static inline int
872 get_status_requestq_count(void)
874 return requestq_count;
877 static inline int
878 get_status_pendingq_count(void)
880 return pendingq_count;
883 static inline int
884 get_status_totalopen_count(void)
886 return atomic_read(&total_open);
889 static inline int
890 get_status_domain_index(void)
892 return z90crypt.cdx;
895 static inline unsigned char *
896 get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
898 int i, ix;
900 memcpy(status, z90crypt.hdware_info->device_type_array,
901 Z90CRYPT_NUM_APS);
903 for (i = 0; i < get_status_totalcount(); i++) {
904 ix = SHRT2LONG(i);
905 if (LONG2DEVPTR(ix)->user_disabled)
906 status[ix] = 0x0d;
909 return status;
912 static inline unsigned char *
913 get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
915 int i, ix;
917 memset(qdepth, 0, Z90CRYPT_NUM_APS);
919 for (i = 0; i < get_status_totalcount(); i++) {
920 ix = SHRT2LONG(i);
921 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
924 return qdepth;
927 static inline unsigned int *
928 get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
930 int i, ix;
932 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
934 for (i = 0; i < get_status_totalcount(); i++) {
935 ix = SHRT2LONG(i);
936 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
939 return reqcnt;
942 static inline void
943 init_work_element(struct work_element *we_p,
944 struct priv_data *priv_data, pid_t pid)
946 int step;
948 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
949 /* Come up with a unique id for this caller. */
950 step = atomic_inc_return(&z90crypt_step);
951 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
952 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
953 we_p->pid = pid;
954 we_p->priv_data = priv_data;
955 we_p->status[0] = STAT_DEFAULT;
956 we_p->audit[0] = 0x00;
957 we_p->audit[1] = 0x00;
958 we_p->audit[2] = 0x00;
959 we_p->resp_buff_size = 0;
960 we_p->retcode = 0;
961 we_p->devindex = -1;
962 we_p->devtype = -1;
963 atomic_set(&we_p->alarmrung, 0);
964 init_waitqueue_head(&we_p->waitq);
965 INIT_LIST_HEAD(&(we_p->liste));
968 static inline int
969 allocate_work_element(struct work_element **we_pp,
970 struct priv_data *priv_data_p, pid_t pid)
972 struct work_element *we_p;
974 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
975 if (!we_p)
976 return -ENOMEM;
977 init_work_element(we_p, priv_data_p, pid);
978 *we_pp = we_p;
979 return 0;
982 static inline void
983 remove_device(struct device *device_p)
985 if (!device_p || (device_p->disabled != 0))
986 return;
987 device_p->disabled = 1;
988 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
989 z90crypt.hdware_info->hdware_mask.disabled_count++;
993 * Bitlength limits for each card
995 * There are new MCLs which allow more bitlengths. See the table for details.
996 * The MCL must be applied and the newer bitlengths enabled for these to work.
998 * Card Type Old limit New limit
999 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
1000 * PCICC 512-1024 512-2048
1001 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
1002 * PCIXCC_MCL3 ----- 128-2048
1003 * CEX2C 512-2048 128-2048
1005 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
1006 * MCL to just one card in a machine. We assume, at first, that all cards have
1007 * these capabilities.
1009 int ext_bitlens = 1; // This is global
1010 #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1011 #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1012 #define PCICC_MIN_MOD_SIZE 64 // 512 bits
1013 #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1014 #define MAX_MOD_SIZE 256 // 2048 bits
1016 static inline int
1017 select_device_type(int *dev_type_p, int bytelength)
1019 static int count = 0;
1020 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1021 index_to_use;
1022 struct status *stat;
1023 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1024 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1025 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1026 (*dev_type_p != ANYDEV))
1027 return -1;
1028 if (*dev_type_p != ANYDEV) {
1029 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1030 if (stat->st_count >
1031 (stat->disabled_count + stat->user_disabled_count))
1032 return 0;
1033 return -1;
1037 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1038 * speed.
1040 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1041 * other present.
1043 stat = &z90crypt.hdware_info->type_mask[PCICA];
1044 PCICA_avail = stat->st_count -
1045 (stat->disabled_count + stat->user_disabled_count);
1046 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1047 PCIXCC_MCL3_avail = stat->st_count -
1048 (stat->disabled_count + stat->user_disabled_count);
1049 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1050 CEX2C_avail = stat->st_count -
1051 (stat->disabled_count + stat->user_disabled_count);
1052 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1053 CEX2A_avail = stat->st_count -
1054 (stat->disabled_count + stat->user_disabled_count);
1055 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1057 * bitlength is a factor, PCICA or CEX2A are the most capable,
1058 * even with the new MCL for PCIXCC.
1060 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1061 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1062 if (PCICA_avail) {
1063 *dev_type_p = PCICA;
1064 return 0;
1066 if (CEX2A_avail) {
1067 *dev_type_p = CEX2A;
1068 return 0;
1070 return -1;
1073 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1074 CEX2C_avail + CEX2A_avail);
1075 if (index_to_use < PCICA_avail)
1076 *dev_type_p = PCICA;
1077 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1078 *dev_type_p = PCIXCC_MCL3;
1079 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1080 CEX2C_avail))
1081 *dev_type_p = CEX2C;
1082 else
1083 *dev_type_p = CEX2A;
1084 count++;
1085 return 0;
1088 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1089 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1090 return -1;
1091 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1092 if (stat->st_count >
1093 (stat->disabled_count + stat->user_disabled_count)) {
1094 *dev_type_p = PCIXCC_MCL2;
1095 return 0;
1099 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1100 * (if we don't have the MCL applied and the newer bitlengths enabled)
1101 * cannot go to a PCICC
1103 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1104 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1105 return -1;
1107 stat = &z90crypt.hdware_info->type_mask[PCICC];
1108 if (stat->st_count >
1109 (stat->disabled_count + stat->user_disabled_count)) {
1110 *dev_type_p = PCICC;
1111 return 0;
1114 return -1;
1118 * Try the selected number, then the selected type (can be ANYDEV)
1120 static inline int
1121 select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1123 int i, indx, devTp, low_count, low_indx;
1124 struct device_x *index_p;
1125 struct device *dev_ptr;
1127 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1128 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1129 PDEBUG("trying index = %d\n", *device_nr_p);
1130 dev_ptr = z90crypt.device_p[*device_nr_p];
1132 if (dev_ptr &&
1133 (dev_ptr->dev_stat != DEV_GONE) &&
1134 (dev_ptr->disabled == 0) &&
1135 (dev_ptr->user_disabled == 0)) {
1136 PDEBUG("selected by number, index = %d\n",
1137 *device_nr_p);
1138 *dev_type_p = dev_ptr->dev_type;
1139 return *device_nr_p;
1142 *device_nr_p = -1;
1143 PDEBUG("trying type = %d\n", *dev_type_p);
1144 devTp = *dev_type_p;
1145 if (select_device_type(&devTp, bytelength) == -1) {
1146 PDEBUG("failed to select by type\n");
1147 return -1;
1149 PDEBUG("selected type = %d\n", devTp);
1150 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1151 low_count = 0x0000FFFF;
1152 low_indx = -1;
1153 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1154 indx = index_p->device_index[i];
1155 dev_ptr = z90crypt.device_p[indx];
1156 if (dev_ptr &&
1157 (dev_ptr->dev_stat != DEV_GONE) &&
1158 (dev_ptr->disabled == 0) &&
1159 (dev_ptr->user_disabled == 0) &&
1160 (devTp == dev_ptr->dev_type) &&
1161 (low_count > dev_ptr->dev_caller_count)) {
1162 low_count = dev_ptr->dev_caller_count;
1163 low_indx = indx;
1166 *device_nr_p = low_indx;
1167 return low_indx;
1170 static inline int
1171 send_to_crypto_device(struct work_element *we_p)
1173 struct caller *caller_p;
1174 struct device *device_p;
1175 int dev_nr;
1176 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1178 if (!we_p->requestptr)
1179 return SEN_FATAL_ERROR;
1180 caller_p = (struct caller *)we_p->requestptr;
1181 dev_nr = we_p->devindex;
1182 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1183 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1184 return SEN_RETRY;
1185 else
1186 return SEN_NOT_AVAIL;
1188 we_p->devindex = dev_nr;
1189 device_p = z90crypt.device_p[dev_nr];
1190 if (!device_p)
1191 return SEN_NOT_AVAIL;
1192 if (device_p->dev_type != we_p->devtype)
1193 return SEN_RETRY;
1194 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1195 return SEN_QUEUE_FULL;
1196 PDEBUG("device number prior to send: %d\n", dev_nr);
1197 switch (send_to_AP(dev_nr, z90crypt.cdx,
1198 caller_p->caller_dev_dep_req_l,
1199 caller_p->caller_dev_dep_req_p)) {
1200 case DEV_SEN_EXCEPTION:
1201 PRINTKC("Exception during send to device %d\n", dev_nr);
1202 z90crypt.terminating = 1;
1203 return SEN_FATAL_ERROR;
1204 case DEV_GONE:
1205 PRINTK("Device %d not available\n", dev_nr);
1206 remove_device(device_p);
1207 return SEN_NOT_AVAIL;
1208 case DEV_EMPTY:
1209 return SEN_NOT_AVAIL;
1210 case DEV_NO_WORK:
1211 return SEN_FATAL_ERROR;
1212 case DEV_BAD_MESSAGE:
1213 return SEN_USER_ERROR;
1214 case DEV_QUEUE_FULL:
1215 return SEN_QUEUE_FULL;
1216 default:
1217 case DEV_ONLINE:
1218 break;
1220 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1221 device_p->dev_caller_count++;
1222 return 0;
1226 * Send puts the user's work on one of two queues:
1227 * the pending queue if the send was successful
1228 * the request queue if the send failed because device full or busy
1230 static inline int
1231 z90crypt_send(struct work_element *we_p, const char *buf)
1233 int rv;
1235 PDEBUG("PID %d\n", PID());
1237 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1238 PDEBUG("PID %d tried to send more work but has outstanding "
1239 "work.\n", PID());
1240 return -EWORKPEND;
1242 we_p->devindex = -1; // Reset device number
1243 spin_lock_irq(&queuespinlock);
1244 rv = send_to_crypto_device(we_p);
1245 switch (rv) {
1246 case 0:
1247 we_p->requestsent = jiffies;
1248 we_p->audit[0] |= FP_SENT;
1249 list_add_tail(&we_p->liste, &pending_list);
1250 ++pendingq_count;
1251 we_p->audit[0] |= FP_PENDING;
1252 break;
1253 case SEN_BUSY:
1254 case SEN_QUEUE_FULL:
1255 rv = 0;
1256 we_p->devindex = -1; // any device will do
1257 we_p->requestsent = jiffies;
1258 list_add_tail(&we_p->liste, &request_list);
1259 ++requestq_count;
1260 we_p->audit[0] |= FP_REQUEST;
1261 break;
1262 case SEN_RETRY:
1263 rv = -ERESTARTSYS;
1264 break;
1265 case SEN_NOT_AVAIL:
1266 PRINTK("*** No devices available.\n");
1267 rv = we_p->retcode = -ENODEV;
1268 we_p->status[0] |= STAT_FAILED;
1269 break;
1270 case REC_OPERAND_INV:
1271 case REC_OPERAND_SIZE:
1272 case REC_EVEN_MOD:
1273 case REC_INVALID_PAD:
1274 rv = we_p->retcode = -EINVAL;
1275 we_p->status[0] |= STAT_FAILED;
1276 break;
1277 default:
1278 we_p->retcode = rv;
1279 we_p->status[0] |= STAT_FAILED;
1280 break;
1282 if (rv != -ERESTARTSYS)
1283 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1284 spin_unlock_irq(&queuespinlock);
1285 if (rv == 0)
1286 tasklet_schedule(&reader_tasklet);
1287 return rv;
1291 * process_results copies the user's work from kernel space.
1293 static inline int
1294 z90crypt_process_results(struct work_element *we_p, char __user *buf)
1296 int rv;
1298 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1300 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1301 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1303 rv = 0;
1304 if (!we_p->buffer) {
1305 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1306 we_p, PID());
1307 rv = -ENOBUFF;
1310 if (!rv)
1311 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1312 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1313 rv = -EFAULT;
1316 if (!rv)
1317 rv = we_p->retcode;
1318 if (!rv)
1319 if (we_p->resp_buff_size
1320 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1321 we_p->resp_buff_size))
1322 rv = -EFAULT;
1324 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1325 return rv;
1328 static unsigned char NULL_psmid[8] =
1329 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1332 * Used in device configuration functions
1334 #define MAX_RESET 90
1337 * This is used only for PCICC support
1339 static inline int
1340 is_PKCS11_padded(unsigned char *buffer, int length)
1342 int i;
1343 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1344 return 0;
1345 for (i = 2; i < length; i++)
1346 if (buffer[i] != 0xFF)
1347 break;
1348 if ((i < 10) || (i == length))
1349 return 0;
1350 if (buffer[i] != 0x00)
1351 return 0;
1352 return 1;
1356 * This is used only for PCICC support
1358 static inline int
1359 is_PKCS12_padded(unsigned char *buffer, int length)
1361 int i;
1362 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1363 return 0;
1364 for (i = 2; i < length; i++)
1365 if (buffer[i] == 0x00)
1366 break;
1367 if ((i < 10) || (i == length))
1368 return 0;
1369 if (buffer[i] != 0x00)
1370 return 0;
1371 return 1;
1375 * builds struct caller and converts message from generic format to
1376 * device-dependent format
1377 * func is ICARSAMODEXPO or ICARSACRT
1378 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1380 static inline int
1381 build_caller(struct work_element *we_p, short function)
1383 int rv;
1384 struct caller *caller_p = (struct caller *)we_p->requestptr;
1386 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1387 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1388 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1389 return SEN_NOT_AVAIL;
1391 memcpy(caller_p->caller_id, we_p->caller_id,
1392 sizeof(caller_p->caller_id));
1393 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1394 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1395 caller_p->caller_buf_p = we_p->buffer;
1396 INIT_LIST_HEAD(&(caller_p->caller_liste));
1398 rv = convert_request(we_p->buffer, we_p->funccode, function,
1399 z90crypt.cdx, we_p->devtype,
1400 &caller_p->caller_dev_dep_req_l,
1401 caller_p->caller_dev_dep_req_p);
1402 if (rv) {
1403 if (rv == SEN_NOT_AVAIL)
1404 PDEBUG("request can't be processed on hdwr avail\n");
1405 else
1406 PRINTK("Error from convert_request: %d\n", rv);
1408 else
1409 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1410 return rv;
1413 static inline void
1414 unbuild_caller(struct device *device_p, struct caller *caller_p)
1416 if (!caller_p)
1417 return;
1418 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1419 if (!list_empty(&caller_p->caller_liste)) {
1420 list_del_init(&caller_p->caller_liste);
1421 device_p->dev_caller_count--;
1423 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1426 static inline int
1427 get_crypto_request_buffer(struct work_element *we_p)
1429 struct ica_rsa_modexpo *mex_p;
1430 struct ica_rsa_modexpo_crt *crt_p;
1431 unsigned char *temp_buffer;
1432 short function;
1433 int rv;
1435 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1436 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1438 PDEBUG("device type input = %d\n", we_p->devtype);
1440 if (z90crypt.terminating)
1441 return REC_NO_RESPONSE;
1442 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1443 PRINTK("psmid zeroes\n");
1444 return SEN_FATAL_ERROR;
1446 if (!we_p->buffer) {
1447 PRINTK("buffer pointer NULL\n");
1448 return SEN_USER_ERROR;
1450 if (!we_p->requestptr) {
1451 PRINTK("caller pointer NULL\n");
1452 return SEN_USER_ERROR;
1455 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1456 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1457 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1458 (we_p->devtype != ANYDEV)) {
1459 PRINTK("invalid device type\n");
1460 return SEN_USER_ERROR;
1463 if ((mex_p->inputdatalength < 1) ||
1464 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1465 PRINTK("inputdatalength[%d] is not valid\n",
1466 mex_p->inputdatalength);
1467 return SEN_USER_ERROR;
1470 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1471 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1472 mex_p->outputdatalength, mex_p->inputdatalength);
1473 return SEN_USER_ERROR;
1476 if (!mex_p->inputdata || !mex_p->outputdata) {
1477 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1478 mex_p->outputdata, mex_p->inputdata);
1479 return SEN_USER_ERROR;
1483 * As long as outputdatalength is big enough, we can set the
1484 * outputdatalength equal to the inputdatalength, since that is the
1485 * number of bytes we will copy in any case
1487 mex_p->outputdatalength = mex_p->inputdatalength;
1489 rv = 0;
1490 switch (we_p->funccode) {
1491 case ICARSAMODEXPO:
1492 if (!mex_p->b_key || !mex_p->n_modulus)
1493 rv = SEN_USER_ERROR;
1494 break;
1495 case ICARSACRT:
1496 if (!IS_EVEN(crt_p->inputdatalength)) {
1497 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1498 crt_p->inputdatalength);
1499 rv = SEN_USER_ERROR;
1500 break;
1502 if (!crt_p->bp_key ||
1503 !crt_p->bq_key ||
1504 !crt_p->np_prime ||
1505 !crt_p->nq_prime ||
1506 !crt_p->u_mult_inv) {
1507 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1508 crt_p->bp_key, crt_p->bq_key,
1509 crt_p->np_prime, crt_p->nq_prime,
1510 crt_p->u_mult_inv);
1511 rv = SEN_USER_ERROR;
1513 break;
1514 default:
1515 PRINTK("bad func = %d\n", we_p->funccode);
1516 rv = SEN_USER_ERROR;
1517 break;
1519 if (rv != 0)
1520 return rv;
1522 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1523 return SEN_NOT_AVAIL;
1525 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1526 sizeof(struct caller);
1527 if (copy_from_user(temp_buffer, mex_p->inputdata,
1528 mex_p->inputdatalength) != 0)
1529 return SEN_RELEASED;
1531 function = PCI_FUNC_KEY_ENCRYPT;
1532 switch (we_p->devtype) {
1533 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1534 case PCICA:
1535 case CEX2A:
1536 function = PCI_FUNC_KEY_ENCRYPT;
1537 break;
1539 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1540 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1541 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1542 * mod-expo operation
1544 case PCIXCC_MCL2:
1545 if (we_p->funccode == ICARSAMODEXPO)
1546 function = PCI_FUNC_KEY_ENCRYPT;
1547 else
1548 function = PCI_FUNC_KEY_DECRYPT;
1549 break;
1550 case PCIXCC_MCL3:
1551 case CEX2C:
1552 if (we_p->funccode == ICARSAMODEXPO)
1553 function = PCI_FUNC_KEY_ENCRYPT;
1554 else
1555 function = PCI_FUNC_KEY_DECRYPT;
1556 break;
1558 * PCICC does everything as a PKCS-1.2 format request
1560 case PCICC:
1561 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1562 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1563 return SEN_NOT_AVAIL;
1565 if (we_p->funccode == ICARSAMODEXPO) {
1566 if (is_PKCS12_padded(temp_buffer,
1567 mex_p->inputdatalength))
1568 function = PCI_FUNC_KEY_ENCRYPT;
1569 else
1570 function = PCI_FUNC_KEY_DECRYPT;
1571 } else
1572 /* all CRT forms are decrypts */
1573 function = PCI_FUNC_KEY_DECRYPT;
1574 break;
1576 PDEBUG("function: %04x\n", function);
1577 rv = build_caller(we_p, function);
1578 PDEBUG("rv from build_caller = %d\n", rv);
1579 return rv;
1582 static inline int
1583 z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1584 const char __user *buffer)
1586 int rv;
1588 we_p->devindex = -1;
1589 if (funccode == ICARSAMODEXPO)
1590 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1591 else
1592 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1594 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1595 return -EFAULT;
1597 we_p->audit[0] |= FP_COPYFROM;
1598 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1599 we_p->funccode = funccode;
1600 we_p->devtype = -1;
1601 we_p->audit[0] |= FP_BUFFREQ;
1602 rv = get_crypto_request_buffer(we_p);
1603 switch (rv) {
1604 case 0:
1605 we_p->audit[0] |= FP_BUFFGOT;
1606 break;
1607 case SEN_USER_ERROR:
1608 rv = -EINVAL;
1609 break;
1610 case SEN_QUEUE_FULL:
1611 rv = 0;
1612 break;
1613 case SEN_RELEASED:
1614 rv = -EFAULT;
1615 break;
1616 case REC_NO_RESPONSE:
1617 rv = -ENODEV;
1618 break;
1619 case SEN_NOT_AVAIL:
1620 case EGETBUFF:
1621 rv = -EGETBUFF;
1622 break;
1623 default:
1624 PRINTK("rv = %d\n", rv);
1625 rv = -EGETBUFF;
1626 break;
1628 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1629 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1630 return rv;
1633 static inline void
1634 purge_work_element(struct work_element *we_p)
1636 struct list_head *lptr;
1638 spin_lock_irq(&queuespinlock);
1639 list_for_each(lptr, &request_list) {
1640 if (lptr == &we_p->liste) {
1641 list_del_init(lptr);
1642 requestq_count--;
1643 break;
1646 list_for_each(lptr, &pending_list) {
1647 if (lptr == &we_p->liste) {
1648 list_del_init(lptr);
1649 pendingq_count--;
1650 break;
1653 spin_unlock_irq(&queuespinlock);
1657 * Build the request and send it.
1659 static inline int
1660 z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1661 unsigned int cmd, unsigned long arg)
1663 struct work_element *we_p;
1664 int rv;
1666 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1667 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1668 return rv;
1670 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1671 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1672 if (!rv)
1673 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1674 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1675 if (!rv) {
1676 we_p->audit[0] |= FP_ASLEEP;
1677 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1678 we_p->audit[0] |= FP_AWAKE;
1679 rv = we_p->retcode;
1681 if (!rv)
1682 rv = z90crypt_process_results(we_p, (char __user *)arg);
1684 if ((we_p->status[0] & STAT_FAILED)) {
1685 switch (rv) {
1687 * EINVAL *after* receive is almost always a padding error or
1688 * length error issued by a coprocessor (not an accelerator).
1689 * We convert this return value to -EGETBUFF which should
1690 * trigger a fallback to software.
1692 case -EINVAL:
1693 if ((we_p->devtype != PCICA) &&
1694 (we_p->devtype != CEX2A))
1695 rv = -EGETBUFF;
1696 break;
1697 case -ETIMEOUT:
1698 if (z90crypt.mask.st_count > 0)
1699 rv = -ERESTARTSYS; // retry with another
1700 else
1701 rv = -ENODEV; // no cards left
1702 /* fall through to clean up request queue */
1703 case -ERESTARTSYS:
1704 case -ERELEASED:
1705 switch (CHK_RDWRMASK(we_p->status[0])) {
1706 case STAT_WRITTEN:
1707 purge_work_element(we_p);
1708 break;
1709 case STAT_READPEND:
1710 case STAT_NOWORK:
1711 default:
1712 break;
1714 break;
1715 default:
1716 we_p->status[0] ^= STAT_FAILED;
1717 break;
1720 free_page((long)we_p);
1721 return rv;
1725 * This function is a little long, but it's really just one large switch
1726 * statement.
1728 static long
1729 z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1731 struct priv_data *private_data_p = filp->private_data;
1732 unsigned char *status;
1733 unsigned char *qdepth;
1734 unsigned int *reqcnt;
1735 struct ica_z90_status *pstat;
1736 int ret, i, loopLim, tempstat;
1737 static int deprecated_msg_count1 = 0;
1738 static int deprecated_msg_count2 = 0;
1740 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1741 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1742 cmd,
1743 !_IOC_DIR(cmd) ? "NO"
1744 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1745 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1746 : "WR")),
1747 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1749 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1750 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1751 return -ENOTTY;
1754 ret = 0;
1755 switch (cmd) {
1756 case ICARSAMODEXPO:
1757 case ICARSACRT:
1758 if (quiesce_z90crypt) {
1759 ret = -EQUIESCE;
1760 break;
1762 ret = -ENODEV; // Default if no devices
1763 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1764 (z90crypt.hdware_info->hdware_mask.disabled_count +
1765 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1766 for (i = 0; i < loopLim; i++) {
1767 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1768 if (ret != -ERESTARTSYS)
1769 break;
1771 if (ret == -ERESTARTSYS)
1772 ret = -ENODEV;
1773 break;
1775 case Z90STAT_TOTALCOUNT:
1776 tempstat = get_status_totalcount();
1777 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1778 ret = -EFAULT;
1779 break;
1781 case Z90STAT_PCICACOUNT:
1782 tempstat = get_status_PCICAcount();
1783 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1784 ret = -EFAULT;
1785 break;
1787 case Z90STAT_PCICCCOUNT:
1788 tempstat = get_status_PCICCcount();
1789 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1790 ret = -EFAULT;
1791 break;
1793 case Z90STAT_PCIXCCMCL2COUNT:
1794 tempstat = get_status_PCIXCCMCL2count();
1795 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1796 ret = -EFAULT;
1797 break;
1799 case Z90STAT_PCIXCCMCL3COUNT:
1800 tempstat = get_status_PCIXCCMCL3count();
1801 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1802 ret = -EFAULT;
1803 break;
1805 case Z90STAT_CEX2CCOUNT:
1806 tempstat = get_status_CEX2Ccount();
1807 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1808 ret = -EFAULT;
1809 break;
1811 case Z90STAT_CEX2ACOUNT:
1812 tempstat = get_status_CEX2Acount();
1813 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1814 ret = -EFAULT;
1815 break;
1817 case Z90STAT_REQUESTQ_COUNT:
1818 tempstat = get_status_requestq_count();
1819 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1820 ret = -EFAULT;
1821 break;
1823 case Z90STAT_PENDINGQ_COUNT:
1824 tempstat = get_status_pendingq_count();
1825 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1826 ret = -EFAULT;
1827 break;
1829 case Z90STAT_TOTALOPEN_COUNT:
1830 tempstat = get_status_totalopen_count();
1831 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1832 ret = -EFAULT;
1833 break;
1835 case Z90STAT_DOMAIN_INDEX:
1836 tempstat = get_status_domain_index();
1837 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1838 ret = -EFAULT;
1839 break;
1841 case Z90STAT_STATUS_MASK:
1842 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1843 if (!status) {
1844 PRINTK("kmalloc for status failed!\n");
1845 ret = -ENOMEM;
1846 break;
1848 get_status_status_mask(status);
1849 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1850 != 0)
1851 ret = -EFAULT;
1852 kfree(status);
1853 break;
1855 case Z90STAT_QDEPTH_MASK:
1856 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1857 if (!qdepth) {
1858 PRINTK("kmalloc for qdepth failed!\n");
1859 ret = -ENOMEM;
1860 break;
1862 get_status_qdepth_mask(qdepth);
1863 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1864 ret = -EFAULT;
1865 kfree(qdepth);
1866 break;
1868 case Z90STAT_PERDEV_REQCNT:
1869 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1870 if (!reqcnt) {
1871 PRINTK("kmalloc for reqcnt failed!\n");
1872 ret = -ENOMEM;
1873 break;
1875 get_status_perdevice_reqcnt(reqcnt);
1876 if (copy_to_user((char __user *) arg, reqcnt,
1877 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1878 ret = -EFAULT;
1879 kfree(reqcnt);
1880 break;
1882 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1883 case ICAZ90STATUS:
1884 if (deprecated_msg_count1 < 20) {
1885 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1886 deprecated_msg_count1++;
1887 if (deprecated_msg_count1 == 20)
1888 PRINTK("No longer issuing messages related to "
1889 "deprecated call to ICAZ90STATUS.\n");
1892 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1893 if (!pstat) {
1894 PRINTK("kmalloc for pstat failed!\n");
1895 ret = -ENOMEM;
1896 break;
1899 pstat->totalcount = get_status_totalcount();
1900 pstat->leedslitecount = get_status_PCICAcount();
1901 pstat->leeds2count = get_status_PCICCcount();
1902 pstat->requestqWaitCount = get_status_requestq_count();
1903 pstat->pendingqWaitCount = get_status_pendingq_count();
1904 pstat->totalOpenCount = get_status_totalopen_count();
1905 pstat->cryptoDomain = get_status_domain_index();
1906 get_status_status_mask(pstat->status);
1907 get_status_qdepth_mask(pstat->qdepth);
1909 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1910 sizeof(struct ica_z90_status)) != 0)
1911 ret = -EFAULT;
1912 kfree(pstat);
1913 break;
1915 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1916 case Z90STAT_PCIXCCCOUNT:
1917 if (deprecated_msg_count2 < 20) {
1918 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1919 deprecated_msg_count2++;
1920 if (deprecated_msg_count2 == 20)
1921 PRINTK("No longer issuing messages about depre"
1922 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1925 tempstat = get_status_PCIXCCcount();
1926 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1927 ret = -EFAULT;
1928 break;
1930 case Z90QUIESCE:
1931 if (current->euid != 0) {
1932 PRINTK("QUIESCE fails: euid %d\n",
1933 current->euid);
1934 ret = -EACCES;
1935 } else {
1936 PRINTK("QUIESCE device from PID %d\n", PID());
1937 quiesce_z90crypt = 1;
1939 break;
1941 default:
1942 /* user passed an invalid IOCTL number */
1943 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1944 ret = -ENOTTY;
1945 break;
1948 return ret;
1951 static inline int
1952 sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1954 int hl, i;
1956 hl = 0;
1957 for (i = 0; i < len; i++)
1958 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1959 hl += sprintf(outaddr+hl, " ");
1961 return hl;
1964 static inline int
1965 sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1967 int hl, inl, c, cx;
1969 hl = sprintf(outaddr, " ");
1970 inl = 0;
1971 for (c = 0; c < (len / 16); c++) {
1972 hl += sprintcl(outaddr+hl, addr+inl, 16);
1973 inl += 16;
1976 cx = len%16;
1977 if (cx) {
1978 hl += sprintcl(outaddr+hl, addr+inl, cx);
1979 inl += cx;
1982 hl += sprintf(outaddr+hl, "\n");
1984 return hl;
1987 static inline int
1988 sprinthx(unsigned char *title, unsigned char *outaddr,
1989 unsigned char *addr, unsigned int len)
1991 int hl, inl, r, rx;
1993 hl = sprintf(outaddr, "\n%s\n", title);
1994 inl = 0;
1995 for (r = 0; r < (len / 64); r++) {
1996 hl += sprintrw(outaddr+hl, addr+inl, 64);
1997 inl += 64;
1999 rx = len % 64;
2000 if (rx) {
2001 hl += sprintrw(outaddr+hl, addr+inl, rx);
2002 inl += rx;
2005 hl += sprintf(outaddr+hl, "\n");
2007 return hl;
2010 static inline int
2011 sprinthx4(unsigned char *title, unsigned char *outaddr,
2012 unsigned int *array, unsigned int len)
2014 int hl, r;
2016 hl = sprintf(outaddr, "\n%s\n", title);
2018 for (r = 0; r < len; r++) {
2019 if ((r % 8) == 0)
2020 hl += sprintf(outaddr+hl, " ");
2021 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2022 if ((r % 8) == 7)
2023 hl += sprintf(outaddr+hl, "\n");
2026 hl += sprintf(outaddr+hl, "\n");
2028 return hl;
2031 static int
2032 z90crypt_status(char *resp_buff, char **start, off_t offset,
2033 int count, int *eof, void *data)
2035 unsigned char *workarea;
2036 int len;
2038 /* resp_buff is a page. Use the right half for a work area */
2039 workarea = resp_buff+2000;
2040 len = 0;
2041 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2042 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2043 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2044 get_status_domain_index());
2045 len += sprintf(resp_buff+len, "Total device count: %d\n",
2046 get_status_totalcount());
2047 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2048 get_status_PCICAcount());
2049 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2050 get_status_PCICCcount());
2051 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2052 get_status_PCIXCCMCL2count());
2053 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2054 get_status_PCIXCCMCL3count());
2055 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2056 get_status_CEX2Ccount());
2057 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2058 get_status_CEX2Acount());
2059 len += sprintf(resp_buff+len, "requestq count: %d\n",
2060 get_status_requestq_count());
2061 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2062 get_status_pendingq_count());
2063 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2064 get_status_totalopen_count());
2065 len += sprinthx(
2066 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2067 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2068 resp_buff+len,
2069 get_status_status_mask(workarea),
2070 Z90CRYPT_NUM_APS);
2071 len += sprinthx("Waiting work element counts",
2072 resp_buff+len,
2073 get_status_qdepth_mask(workarea),
2074 Z90CRYPT_NUM_APS);
2075 len += sprinthx4(
2076 "Per-device successfully completed request counts",
2077 resp_buff+len,
2078 get_status_perdevice_reqcnt((unsigned int *)workarea),
2079 Z90CRYPT_NUM_APS);
2080 *eof = 1;
2081 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2082 return len;
2085 static inline void
2086 disable_card(int card_index)
2088 struct device *devp;
2090 devp = LONG2DEVPTR(card_index);
2091 if (!devp || devp->user_disabled)
2092 return;
2093 devp->user_disabled = 1;
2094 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2095 if (devp->dev_type == -1)
2096 return;
2097 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2100 static inline void
2101 enable_card(int card_index)
2103 struct device *devp;
2105 devp = LONG2DEVPTR(card_index);
2106 if (!devp || !devp->user_disabled)
2107 return;
2108 devp->user_disabled = 0;
2109 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2110 if (devp->dev_type == -1)
2111 return;
2112 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2115 static int
2116 z90crypt_status_write(struct file *file, const char __user *buffer,
2117 unsigned long count, void *data)
2119 int j, eol;
2120 unsigned char *lbuf, *ptr;
2121 unsigned int local_count;
2123 #define LBUFSIZE 1200
2124 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2125 if (!lbuf) {
2126 PRINTK("kmalloc failed!\n");
2127 return 0;
2130 if (count <= 0)
2131 return 0;
2133 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2135 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2136 kfree(lbuf);
2137 return -EFAULT;
2140 lbuf[local_count] = '\0';
2142 ptr = strstr(lbuf, "Online devices");
2143 if (ptr == 0) {
2144 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2145 kfree(lbuf);
2146 return count;
2149 ptr = strstr(ptr, "\n");
2150 if (ptr == 0) {
2151 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2152 kfree(lbuf);
2153 return count;
2155 ptr++;
2157 if (strstr(ptr, "Waiting work element counts") == NULL) {
2158 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2159 kfree(lbuf);
2160 return count;
2163 j = 0;
2164 eol = 0;
2165 while ((j < 64) && (*ptr != '\0')) {
2166 switch (*ptr) {
2167 case '\t':
2168 case ' ':
2169 break;
2170 case '\n':
2171 default:
2172 eol = 1;
2173 break;
2174 case '0': // no device
2175 case '1': // PCICA
2176 case '2': // PCICC
2177 case '3': // PCIXCC_MCL2
2178 case '4': // PCIXCC_MCL3
2179 case '5': // CEX2C
2180 case '6': // CEX2A
2181 j++;
2182 break;
2183 case 'd':
2184 case 'D':
2185 disable_card(j);
2186 j++;
2187 break;
2188 case 'e':
2189 case 'E':
2190 enable_card(j);
2191 j++;
2192 break;
2194 if (eol)
2195 break;
2196 ptr++;
2199 kfree(lbuf);
2200 return count;
2204 * Functions that run under a timer, with no process id
2206 * The task functions:
2207 * z90crypt_reader_task
2208 * helper_send_work
2209 * helper_handle_work_element
2210 * helper_receive_rc
2211 * z90crypt_config_task
2212 * z90crypt_cleanup_task
2214 * Helper functions:
2215 * z90crypt_schedule_reader_timer
2216 * z90crypt_schedule_reader_task
2217 * z90crypt_schedule_config_task
2218 * z90crypt_schedule_cleanup_task
2220 static inline int
2221 receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2222 unsigned char *buff, unsigned char __user **dest_p_p)
2224 int dv, rv;
2225 struct device *dev_ptr;
2226 struct caller *caller_p;
2227 struct ica_rsa_modexpo *icaMsg_p;
2228 struct list_head *ptr, *tptr;
2230 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2232 if (z90crypt.terminating)
2233 return REC_FATAL_ERROR;
2235 caller_p = 0;
2236 dev_ptr = z90crypt.device_p[index];
2237 rv = 0;
2238 do {
2239 if (!dev_ptr || dev_ptr->disabled) {
2240 rv = REC_NO_WORK; // a disabled device can't return work
2241 break;
2243 if (dev_ptr->dev_self_x != index) {
2244 PRINTKC("Corrupt dev ptr\n");
2245 z90crypt.terminating = 1;
2246 rv = REC_FATAL_ERROR;
2247 break;
2249 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2250 dv = DEV_REC_EXCEPTION;
2251 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2252 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2253 } else {
2254 PDEBUG("Dequeue called for device %d\n", index);
2255 dv = receive_from_AP(index, z90crypt.cdx,
2256 dev_ptr->dev_resp_l,
2257 dev_ptr->dev_resp_p, psmid);
2259 switch (dv) {
2260 case DEV_REC_EXCEPTION:
2261 rv = REC_FATAL_ERROR;
2262 z90crypt.terminating = 1;
2263 PRINTKC("Exception in receive from device %d\n",
2264 index);
2265 break;
2266 case DEV_ONLINE:
2267 rv = 0;
2268 break;
2269 case DEV_EMPTY:
2270 rv = REC_EMPTY;
2271 break;
2272 case DEV_NO_WORK:
2273 rv = REC_NO_WORK;
2274 break;
2275 case DEV_BAD_MESSAGE:
2276 case DEV_GONE:
2277 case REC_HARDWAR_ERR:
2278 default:
2279 rv = REC_NO_RESPONSE;
2280 break;
2282 if (rv)
2283 break;
2284 if (dev_ptr->dev_caller_count <= 0) {
2285 rv = REC_USER_GONE;
2286 break;
2289 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2290 caller_p = list_entry(ptr, struct caller, caller_liste);
2291 if (!memcmp(caller_p->caller_id, psmid,
2292 sizeof(caller_p->caller_id))) {
2293 if (!list_empty(&caller_p->caller_liste)) {
2294 list_del_init(ptr);
2295 dev_ptr->dev_caller_count--;
2296 break;
2299 caller_p = 0;
2301 if (!caller_p) {
2302 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2303 "%02X%02X%02X in device list\n",
2304 psmid[0], psmid[1], psmid[2], psmid[3],
2305 psmid[4], psmid[5], psmid[6], psmid[7]);
2306 rv = REC_USER_GONE;
2307 break;
2310 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2311 rv = convert_response(dev_ptr->dev_resp_p,
2312 caller_p->caller_buf_p, buff_len_p, buff);
2313 switch (rv) {
2314 case REC_USE_PCICA:
2315 break;
2316 case REC_OPERAND_INV:
2317 case REC_OPERAND_SIZE:
2318 case REC_EVEN_MOD:
2319 case REC_INVALID_PAD:
2320 PDEBUG("device %d: 'user error' %d\n", index, rv);
2321 break;
2322 case WRONG_DEVICE_TYPE:
2323 case REC_HARDWAR_ERR:
2324 case REC_BAD_MESSAGE:
2325 PRINTKW("device %d: hardware error %d\n", index, rv);
2326 rv = REC_NO_RESPONSE;
2327 break;
2328 default:
2329 PDEBUG("device %d: rv = %d\n", index, rv);
2330 break;
2332 } while (0);
2334 switch (rv) {
2335 case 0:
2336 PDEBUG("Successful receive from device %d\n", index);
2337 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2338 *dest_p_p = icaMsg_p->outputdata;
2339 if (*buff_len_p == 0)
2340 PRINTK("Zero *buff_len_p\n");
2341 break;
2342 case REC_NO_RESPONSE:
2343 PRINTKW("Removing device %d from availability\n", index);
2344 remove_device(dev_ptr);
2345 break;
2348 if (caller_p)
2349 unbuild_caller(dev_ptr, caller_p);
2351 return rv;
2354 static inline void
2355 helper_send_work(int index)
2357 struct work_element *rq_p;
2358 int rv;
2360 if (list_empty(&request_list))
2361 return;
2362 requestq_count--;
2363 rq_p = list_entry(request_list.next, struct work_element, liste);
2364 list_del_init(&rq_p->liste);
2365 rq_p->audit[1] |= FP_REMREQUEST;
2366 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2367 rq_p->devindex = SHRT2LONG(index);
2368 rv = send_to_crypto_device(rq_p);
2369 if (rv == 0) {
2370 rq_p->requestsent = jiffies;
2371 rq_p->audit[0] |= FP_SENT;
2372 list_add_tail(&rq_p->liste, &pending_list);
2373 ++pendingq_count;
2374 rq_p->audit[0] |= FP_PENDING;
2375 } else {
2376 switch (rv) {
2377 case REC_OPERAND_INV:
2378 case REC_OPERAND_SIZE:
2379 case REC_EVEN_MOD:
2380 case REC_INVALID_PAD:
2381 rq_p->retcode = -EINVAL;
2382 break;
2383 case SEN_NOT_AVAIL:
2384 case SEN_RETRY:
2385 case REC_NO_RESPONSE:
2386 default:
2387 if (z90crypt.mask.st_count > 1)
2388 rq_p->retcode =
2389 -ERESTARTSYS;
2390 else
2391 rq_p->retcode = -ENODEV;
2392 break;
2394 rq_p->status[0] |= STAT_FAILED;
2395 rq_p->audit[1] |= FP_AWAKENING;
2396 atomic_set(&rq_p->alarmrung, 1);
2397 wake_up(&rq_p->waitq);
2399 } else {
2400 if (z90crypt.mask.st_count > 1)
2401 rq_p->retcode = -ERESTARTSYS;
2402 else
2403 rq_p->retcode = -ENODEV;
2404 rq_p->status[0] |= STAT_FAILED;
2405 rq_p->audit[1] |= FP_AWAKENING;
2406 atomic_set(&rq_p->alarmrung, 1);
2407 wake_up(&rq_p->waitq);
2411 static inline void
2412 helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2413 int buff_len, unsigned char *buff,
2414 unsigned char __user *resp_addr)
2416 struct work_element *pq_p;
2417 struct list_head *lptr, *tptr;
2419 pq_p = 0;
2420 list_for_each_safe(lptr, tptr, &pending_list) {
2421 pq_p = list_entry(lptr, struct work_element, liste);
2422 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2423 list_del_init(lptr);
2424 pendingq_count--;
2425 pq_p->audit[1] |= FP_NOTPENDING;
2426 break;
2428 pq_p = 0;
2431 if (!pq_p) {
2432 PRINTK("device %d has work but no caller exists on pending Q\n",
2433 SHRT2LONG(index));
2434 return;
2437 switch (rc) {
2438 case 0:
2439 pq_p->resp_buff_size = buff_len;
2440 pq_p->audit[1] |= FP_RESPSIZESET;
2441 if (buff_len) {
2442 pq_p->resp_addr = resp_addr;
2443 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2444 memcpy(pq_p->resp_buff, buff, buff_len);
2445 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2447 break;
2448 case REC_OPERAND_INV:
2449 case REC_OPERAND_SIZE:
2450 case REC_EVEN_MOD:
2451 case REC_INVALID_PAD:
2452 PDEBUG("-EINVAL after application error %d\n", rc);
2453 pq_p->retcode = -EINVAL;
2454 pq_p->status[0] |= STAT_FAILED;
2455 break;
2456 case REC_USE_PCICA:
2457 pq_p->retcode = -ERESTARTSYS;
2458 pq_p->status[0] |= STAT_FAILED;
2459 break;
2460 case REC_NO_RESPONSE:
2461 default:
2462 if (z90crypt.mask.st_count > 1)
2463 pq_p->retcode = -ERESTARTSYS;
2464 else
2465 pq_p->retcode = -ENODEV;
2466 pq_p->status[0] |= STAT_FAILED;
2467 break;
2469 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2470 pq_p->audit[1] |= FP_AWAKENING;
2471 atomic_set(&pq_p->alarmrung, 1);
2472 wake_up(&pq_p->waitq);
2477 * return TRUE if the work element should be removed from the queue
2479 static inline int
2480 helper_receive_rc(int index, int *rc_p)
2482 switch (*rc_p) {
2483 case 0:
2484 case REC_OPERAND_INV:
2485 case REC_OPERAND_SIZE:
2486 case REC_EVEN_MOD:
2487 case REC_INVALID_PAD:
2488 case REC_USE_PCICA:
2489 break;
2491 case REC_BUSY:
2492 case REC_NO_WORK:
2493 case REC_EMPTY:
2494 case REC_RETRY_DEV:
2495 case REC_FATAL_ERROR:
2496 return 0;
2498 case REC_NO_RESPONSE:
2499 break;
2501 default:
2502 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2503 *rc_p, SHRT2LONG(index));
2504 *rc_p = REC_NO_RESPONSE;
2505 break;
2507 return 1;
2510 static inline void
2511 z90crypt_schedule_reader_timer(void)
2513 if (timer_pending(&reader_timer))
2514 return;
2515 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2516 PRINTK("Timer pending while modifying reader timer\n");
2519 static void
2520 z90crypt_reader_task(unsigned long ptr)
2522 int workavail, index, rc, buff_len;
2523 unsigned char psmid[8];
2524 unsigned char __user *resp_addr;
2525 static unsigned char buff[1024];
2528 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2529 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2530 * loop, there is no work remaining on the queues.
2532 resp_addr = 0;
2533 workavail = 2;
2534 buff_len = 0;
2535 while (workavail) {
2536 workavail--;
2537 rc = 0;
2538 spin_lock_irq(&queuespinlock);
2539 memset(buff, 0x00, sizeof(buff));
2541 /* Dequeue once from each device in round robin. */
2542 for (index = 0; index < z90crypt.mask.st_count; index++) {
2543 PDEBUG("About to receive.\n");
2544 rc = receive_from_crypto_device(SHRT2LONG(index),
2545 psmid,
2546 &buff_len,
2547 buff,
2548 &resp_addr);
2549 PDEBUG("Dequeued: rc = %d.\n", rc);
2551 if (helper_receive_rc(index, &rc)) {
2552 if (rc != REC_NO_RESPONSE) {
2553 helper_send_work(index);
2554 workavail = 2;
2557 helper_handle_work_element(index, psmid, rc,
2558 buff_len, buff,
2559 resp_addr);
2562 if (rc == REC_FATAL_ERROR)
2563 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2564 SHRT2LONG(index));
2566 spin_unlock_irq(&queuespinlock);
2569 if (pendingq_count + requestq_count)
2570 z90crypt_schedule_reader_timer();
2573 static inline void
2574 z90crypt_schedule_config_task(unsigned int expiration)
2576 if (timer_pending(&config_timer))
2577 return;
2578 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2579 PRINTK("Timer pending while modifying config timer\n");
2582 static void
2583 z90crypt_config_task(unsigned long ptr)
2585 int rc;
2587 PDEBUG("jiffies %ld\n", jiffies);
2589 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2590 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2591 /* If return was fatal, don't bother reconfiguring */
2592 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2593 z90crypt_schedule_config_task(CONFIGTIME);
2596 static inline void
2597 z90crypt_schedule_cleanup_task(void)
2599 if (timer_pending(&cleanup_timer))
2600 return;
2601 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2602 PRINTK("Timer pending while modifying cleanup timer\n");
2605 static inline void
2606 helper_drain_queues(void)
2608 struct work_element *pq_p;
2609 struct list_head *lptr, *tptr;
2611 list_for_each_safe(lptr, tptr, &pending_list) {
2612 pq_p = list_entry(lptr, struct work_element, liste);
2613 pq_p->retcode = -ENODEV;
2614 pq_p->status[0] |= STAT_FAILED;
2615 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2616 (struct caller *)pq_p->requestptr);
2617 list_del_init(lptr);
2618 pendingq_count--;
2619 pq_p->audit[1] |= FP_NOTPENDING;
2620 pq_p->audit[1] |= FP_AWAKENING;
2621 atomic_set(&pq_p->alarmrung, 1);
2622 wake_up(&pq_p->waitq);
2625 list_for_each_safe(lptr, tptr, &request_list) {
2626 pq_p = list_entry(lptr, struct work_element, liste);
2627 pq_p->retcode = -ENODEV;
2628 pq_p->status[0] |= STAT_FAILED;
2629 list_del_init(lptr);
2630 requestq_count--;
2631 pq_p->audit[1] |= FP_REMREQUEST;
2632 pq_p->audit[1] |= FP_AWAKENING;
2633 atomic_set(&pq_p->alarmrung, 1);
2634 wake_up(&pq_p->waitq);
2638 static inline void
2639 helper_timeout_requests(void)
2641 struct work_element *pq_p;
2642 struct list_head *lptr, *tptr;
2643 long timelimit;
2645 timelimit = jiffies - (CLEANUPTIME * HZ);
2646 /* The list is in strict chronological order */
2647 list_for_each_safe(lptr, tptr, &pending_list) {
2648 pq_p = list_entry(lptr, struct work_element, liste);
2649 if (pq_p->requestsent >= timelimit)
2650 break;
2651 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2652 ((struct caller *)pq_p->requestptr)->caller_id[0],
2653 ((struct caller *)pq_p->requestptr)->caller_id[1],
2654 ((struct caller *)pq_p->requestptr)->caller_id[2],
2655 ((struct caller *)pq_p->requestptr)->caller_id[3],
2656 ((struct caller *)pq_p->requestptr)->caller_id[4],
2657 ((struct caller *)pq_p->requestptr)->caller_id[5],
2658 ((struct caller *)pq_p->requestptr)->caller_id[6],
2659 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2660 pq_p->retcode = -ETIMEOUT;
2661 pq_p->status[0] |= STAT_FAILED;
2662 /* get this off any caller queue it may be on */
2663 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2664 (struct caller *) pq_p->requestptr);
2665 list_del_init(lptr);
2666 pendingq_count--;
2667 pq_p->audit[1] |= FP_TIMEDOUT;
2668 pq_p->audit[1] |= FP_NOTPENDING;
2669 pq_p->audit[1] |= FP_AWAKENING;
2670 atomic_set(&pq_p->alarmrung, 1);
2671 wake_up(&pq_p->waitq);
2675 * If pending count is zero, items left on the request queue may
2676 * never be processed.
2678 if (pendingq_count <= 0) {
2679 list_for_each_safe(lptr, tptr, &request_list) {
2680 pq_p = list_entry(lptr, struct work_element, liste);
2681 if (pq_p->requestsent >= timelimit)
2682 break;
2683 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2684 ((struct caller *)pq_p->requestptr)->caller_id[0],
2685 ((struct caller *)pq_p->requestptr)->caller_id[1],
2686 ((struct caller *)pq_p->requestptr)->caller_id[2],
2687 ((struct caller *)pq_p->requestptr)->caller_id[3],
2688 ((struct caller *)pq_p->requestptr)->caller_id[4],
2689 ((struct caller *)pq_p->requestptr)->caller_id[5],
2690 ((struct caller *)pq_p->requestptr)->caller_id[6],
2691 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2692 pq_p->retcode = -ETIMEOUT;
2693 pq_p->status[0] |= STAT_FAILED;
2694 list_del_init(lptr);
2695 requestq_count--;
2696 pq_p->audit[1] |= FP_TIMEDOUT;
2697 pq_p->audit[1] |= FP_REMREQUEST;
2698 pq_p->audit[1] |= FP_AWAKENING;
2699 atomic_set(&pq_p->alarmrung, 1);
2700 wake_up(&pq_p->waitq);
2705 static void
2706 z90crypt_cleanup_task(unsigned long ptr)
2708 PDEBUG("jiffies %ld\n", jiffies);
2709 spin_lock_irq(&queuespinlock);
2710 if (z90crypt.mask.st_count <= 0) // no devices!
2711 helper_drain_queues();
2712 else
2713 helper_timeout_requests();
2714 spin_unlock_irq(&queuespinlock);
2715 z90crypt_schedule_cleanup_task();
2718 static void
2719 z90crypt_schedule_reader_task(unsigned long ptr)
2721 tasklet_schedule(&reader_tasklet);
2725 * Lowlevel Functions:
2727 * create_z90crypt: creates and initializes basic data structures
2728 * refresh_z90crypt: re-initializes basic data structures
2729 * find_crypto_devices: returns a count and mask of hardware status
2730 * create_crypto_device: builds the descriptor for a device
2731 * destroy_crypto_device: unallocates the descriptor for a device
2732 * destroy_z90crypt: drains all work, unallocates structs
2736 * build the z90crypt root structure using the given domain index
2738 static int
2739 create_z90crypt(int *cdx_p)
2741 struct hdware_block *hdware_blk_p;
2743 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2744 z90crypt.domain_established = 0;
2745 z90crypt.len = sizeof(struct z90crypt);
2746 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2747 z90crypt.cdx = *cdx_p;
2749 hdware_blk_p = (struct hdware_block *)
2750 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2751 if (!hdware_blk_p) {
2752 PDEBUG("kmalloc for hardware block failed\n");
2753 return ENOMEM;
2755 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2756 z90crypt.hdware_info = hdware_blk_p;
2758 return 0;
2761 static inline int
2762 helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2764 enum hdstat hd_stat;
2765 int q_depth, dev_type;
2766 int indx, chkdom, numdomains;
2768 q_depth = dev_type = numdomains = 0;
2769 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2770 for (indx = 0; indx < z90crypt.max_count; indx++) {
2771 hd_stat = HD_NOT_THERE;
2772 numdomains = 0;
2773 for (chkdom = 0; chkdom <= 15; chkdom++) {
2774 hd_stat = query_online(indx, chkdom, MAX_RESET,
2775 &q_depth, &dev_type);
2776 if (hd_stat == HD_TSQ_EXCEPTION) {
2777 z90crypt.terminating = 1;
2778 PRINTKC("exception taken!\n");
2779 break;
2781 if (hd_stat == HD_ONLINE) {
2782 cdx_array[numdomains++] = chkdom;
2783 if (*cdx_p == chkdom) {
2784 *correct_cdx_found = 1;
2785 break;
2789 if ((*correct_cdx_found == 1) || (numdomains != 0))
2790 break;
2791 if (z90crypt.terminating)
2792 break;
2794 return numdomains;
2797 static inline int
2798 probe_crypto_domain(int *cdx_p)
2800 int cdx_array[16];
2801 char cdx_array_text[53], temp[5];
2802 int correct_cdx_found, numdomains;
2804 correct_cdx_found = 0;
2805 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2807 if (z90crypt.terminating)
2808 return TSQ_FATAL_ERROR;
2810 if (correct_cdx_found)
2811 return 0;
2813 if (numdomains == 0) {
2814 PRINTKW("Unable to find crypto domain: No devices found\n");
2815 return Z90C_NO_DEVICES;
2818 if (numdomains == 1) {
2819 if (*cdx_p == -1) {
2820 *cdx_p = cdx_array[0];
2821 return 0;
2823 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2824 *cdx_p, cdx_array[0]);
2825 return Z90C_INCORRECT_DOMAIN;
2828 numdomains--;
2829 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2830 while (numdomains) {
2831 numdomains--;
2832 sprintf(temp, ", %d", cdx_array[numdomains]);
2833 strcat(cdx_array_text, temp);
2836 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2837 *cdx_p, cdx_array_text);
2838 return Z90C_AMBIGUOUS_DOMAIN;
2841 static int
2842 refresh_z90crypt(int *cdx_p)
2844 int i, j, indx, rv;
2845 static struct status local_mask;
2846 struct device *devPtr;
2847 unsigned char oldStat, newStat;
2848 int return_unchanged;
2850 if (z90crypt.len != sizeof(z90crypt))
2851 return ENOTINIT;
2852 if (z90crypt.terminating)
2853 return TSQ_FATAL_ERROR;
2854 rv = 0;
2855 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2856 !z90crypt.domain_established) {
2857 rv = probe_crypto_domain(cdx_p);
2858 if (z90crypt.terminating)
2859 return TSQ_FATAL_ERROR;
2860 if (rv == Z90C_NO_DEVICES)
2861 return 0; // try later
2862 if (rv)
2863 return rv;
2864 z90crypt.cdx = *cdx_p;
2865 z90crypt.domain_established = 1;
2867 rv = find_crypto_devices(&local_mask);
2868 if (rv) {
2869 PRINTK("find crypto devices returned %d\n", rv);
2870 return rv;
2872 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2873 sizeof(struct status))) {
2874 return_unchanged = 1;
2875 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2877 * Check for disabled cards. If any device is marked
2878 * disabled, destroy it.
2880 for (j = 0;
2881 j < z90crypt.hdware_info->type_mask[i].st_count;
2882 j++) {
2883 indx = z90crypt.hdware_info->type_x_addr[i].
2884 device_index[j];
2885 devPtr = z90crypt.device_p[indx];
2886 if (devPtr && devPtr->disabled) {
2887 local_mask.st_mask[indx] = HD_NOT_THERE;
2888 return_unchanged = 0;
2892 if (return_unchanged == 1)
2893 return 0;
2896 spin_lock_irq(&queuespinlock);
2897 for (i = 0; i < z90crypt.max_count; i++) {
2898 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2899 newStat = local_mask.st_mask[i];
2900 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2901 destroy_crypto_device(i);
2902 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2903 rv = create_crypto_device(i);
2904 if (rv >= REC_FATAL_ERROR)
2905 return rv;
2906 if (rv != 0) {
2907 local_mask.st_mask[i] = HD_NOT_THERE;
2908 local_mask.st_count--;
2912 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2913 sizeof(local_mask.st_mask));
2914 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2915 z90crypt.hdware_info->hdware_mask.disabled_count =
2916 local_mask.disabled_count;
2917 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2918 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2919 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2920 &(z90crypt.hdware_info->type_x_addr[i]));
2921 spin_unlock_irq(&queuespinlock);
2923 return rv;
2926 static int
2927 find_crypto_devices(struct status *deviceMask)
2929 int i, q_depth, dev_type;
2930 enum hdstat hd_stat;
2932 deviceMask->st_count = 0;
2933 deviceMask->disabled_count = 0;
2934 deviceMask->user_disabled_count = 0;
2936 for (i = 0; i < z90crypt.max_count; i++) {
2937 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2938 &dev_type);
2939 if (hd_stat == HD_TSQ_EXCEPTION) {
2940 z90crypt.terminating = 1;
2941 PRINTKC("Exception during probe for crypto devices\n");
2942 return TSQ_FATAL_ERROR;
2944 deviceMask->st_mask[i] = hd_stat;
2945 if (hd_stat == HD_ONLINE) {
2946 PDEBUG("Got an online crypto!: %d\n", i);
2947 PDEBUG("Got a queue depth of %d\n", q_depth);
2948 PDEBUG("Got a device type of %d\n", dev_type);
2949 if (q_depth <= 0)
2950 return TSQ_FATAL_ERROR;
2951 deviceMask->st_count++;
2952 z90crypt.q_depth_array[i] = q_depth;
2953 z90crypt.dev_type_array[i] = dev_type;
2957 return 0;
2960 static int
2961 refresh_index_array(struct status *status_str, struct device_x *index_array)
2963 int i, count;
2964 enum devstat stat;
2966 i = -1;
2967 count = 0;
2968 do {
2969 stat = status_str->st_mask[++i];
2970 if (stat == DEV_ONLINE)
2971 index_array->device_index[count++] = i;
2972 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2974 return count;
2977 static int
2978 create_crypto_device(int index)
2980 int rv, devstat, total_size;
2981 struct device *dev_ptr;
2982 struct status *type_str_p;
2983 int deviceType;
2985 dev_ptr = z90crypt.device_p[index];
2986 if (!dev_ptr) {
2987 total_size = sizeof(struct device) +
2988 z90crypt.q_depth_array[index] * sizeof(int);
2990 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
2991 if (!dev_ptr) {
2992 PRINTK("kmalloc device %d failed\n", index);
2993 return ENOMEM;
2995 memset(dev_ptr, 0, total_size);
2996 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2997 if (!dev_ptr->dev_resp_p) {
2998 kfree(dev_ptr);
2999 PRINTK("kmalloc device %d rec buffer failed\n", index);
3000 return ENOMEM;
3002 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
3003 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
3006 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
3007 if (devstat == DEV_RSQ_EXCEPTION) {
3008 PRINTK("exception during reset device %d\n", index);
3009 kfree(dev_ptr->dev_resp_p);
3010 kfree(dev_ptr);
3011 return RSQ_FATAL_ERROR;
3013 if (devstat == DEV_ONLINE) {
3014 dev_ptr->dev_self_x = index;
3015 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3016 if (dev_ptr->dev_type == NILDEV) {
3017 rv = probe_device_type(dev_ptr);
3018 if (rv) {
3019 PRINTK("rv = %d from probe_device_type %d\n",
3020 rv, index);
3021 kfree(dev_ptr->dev_resp_p);
3022 kfree(dev_ptr);
3023 return rv;
3026 if (dev_ptr->dev_type == PCIXCC_UNK) {
3027 rv = probe_PCIXCC_type(dev_ptr);
3028 if (rv) {
3029 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3030 rv, index);
3031 kfree(dev_ptr->dev_resp_p);
3032 kfree(dev_ptr);
3033 return rv;
3036 deviceType = dev_ptr->dev_type;
3037 z90crypt.dev_type_array[index] = deviceType;
3038 if (deviceType == PCICA)
3039 z90crypt.hdware_info->device_type_array[index] = 1;
3040 else if (deviceType == PCICC)
3041 z90crypt.hdware_info->device_type_array[index] = 2;
3042 else if (deviceType == PCIXCC_MCL2)
3043 z90crypt.hdware_info->device_type_array[index] = 3;
3044 else if (deviceType == PCIXCC_MCL3)
3045 z90crypt.hdware_info->device_type_array[index] = 4;
3046 else if (deviceType == CEX2C)
3047 z90crypt.hdware_info->device_type_array[index] = 5;
3048 else if (deviceType == CEX2A)
3049 z90crypt.hdware_info->device_type_array[index] = 6;
3050 else // No idea how this would happen.
3051 z90crypt.hdware_info->device_type_array[index] = -1;
3055 * 'q_depth' returned by the hardware is one less than
3056 * the actual depth
3058 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3059 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3060 dev_ptr->dev_stat = devstat;
3061 dev_ptr->disabled = 0;
3062 z90crypt.device_p[index] = dev_ptr;
3064 if (devstat == DEV_ONLINE) {
3065 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3066 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3067 z90crypt.mask.st_count++;
3069 deviceType = dev_ptr->dev_type;
3070 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3071 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3072 type_str_p->st_mask[index] = DEV_ONLINE;
3073 type_str_p->st_count++;
3077 return 0;
3080 static int
3081 destroy_crypto_device(int index)
3083 struct device *dev_ptr;
3084 int t, disabledFlag;
3086 dev_ptr = z90crypt.device_p[index];
3088 /* remember device type; get rid of device struct */
3089 if (dev_ptr) {
3090 disabledFlag = dev_ptr->disabled;
3091 t = dev_ptr->dev_type;
3092 kfree(dev_ptr->dev_resp_p);
3093 kfree(dev_ptr);
3094 } else {
3095 disabledFlag = 0;
3096 t = -1;
3098 z90crypt.device_p[index] = 0;
3100 /* if the type is valid, remove the device from the type_mask */
3101 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3102 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3103 z90crypt.hdware_info->type_mask[t].st_count--;
3104 if (disabledFlag == 1)
3105 z90crypt.hdware_info->type_mask[t].disabled_count--;
3107 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3108 z90crypt.mask.st_mask[index] = DEV_GONE;
3109 z90crypt.mask.st_count--;
3111 z90crypt.hdware_info->device_type_array[index] = 0;
3113 return 0;
3116 static void
3117 destroy_z90crypt(void)
3119 int i;
3121 for (i = 0; i < z90crypt.max_count; i++)
3122 if (z90crypt.device_p[i])
3123 destroy_crypto_device(i);
3124 kfree(z90crypt.hdware_info);
3125 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3128 static unsigned char static_testmsg[384] = {
3129 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
3130 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
3131 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
3132 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
3133 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3134 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3135 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
3136 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3137 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3138 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3139 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3140 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3141 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
3142 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
3143 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
3144 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
3145 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
3146 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
3147 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
3148 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
3149 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
3150 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
3151 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
3152 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3155 static int
3156 probe_device_type(struct device *devPtr)
3158 int rv, dv, i, index, length;
3159 unsigned char psmid[8];
3160 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3162 index = devPtr->dev_self_x;
3163 rv = 0;
3164 do {
3165 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3166 length = sizeof(static_testmsg) - 24;
3167 /* the -24 allows for the header */
3168 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3169 if (dv) {
3170 PDEBUG("dv returned by send during probe: %d\n", dv);
3171 if (dv == DEV_SEN_EXCEPTION) {
3172 rv = SEN_FATAL_ERROR;
3173 PRINTKC("exception in send to AP %d\n", index);
3174 break;
3176 PDEBUG("return value from send_to_AP: %d\n", rv);
3177 switch (dv) {
3178 case DEV_GONE:
3179 PDEBUG("dev %d not available\n", index);
3180 rv = SEN_NOT_AVAIL;
3181 break;
3182 case DEV_ONLINE:
3183 rv = 0;
3184 break;
3185 case DEV_EMPTY:
3186 rv = SEN_NOT_AVAIL;
3187 break;
3188 case DEV_NO_WORK:
3189 rv = SEN_FATAL_ERROR;
3190 break;
3191 case DEV_BAD_MESSAGE:
3192 rv = SEN_USER_ERROR;
3193 break;
3194 case DEV_QUEUE_FULL:
3195 rv = SEN_QUEUE_FULL;
3196 break;
3197 default:
3198 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3199 rv = SEN_NOT_AVAIL;
3200 break;
3204 if (rv)
3205 break;
3207 for (i = 0; i < 6; i++) {
3208 mdelay(300);
3209 dv = receive_from_AP(index, z90crypt.cdx,
3210 devPtr->dev_resp_l,
3211 devPtr->dev_resp_p, psmid);
3212 PDEBUG("dv returned by DQ = %d\n", dv);
3213 if (dv == DEV_REC_EXCEPTION) {
3214 rv = REC_FATAL_ERROR;
3215 PRINTKC("exception in dequeue %d\n",
3216 index);
3217 break;
3219 switch (dv) {
3220 case DEV_ONLINE:
3221 rv = 0;
3222 break;
3223 case DEV_EMPTY:
3224 rv = REC_EMPTY;
3225 break;
3226 case DEV_NO_WORK:
3227 rv = REC_NO_WORK;
3228 break;
3229 case DEV_BAD_MESSAGE:
3230 case DEV_GONE:
3231 default:
3232 rv = REC_NO_RESPONSE;
3233 break;
3235 if ((rv != 0) && (rv != REC_NO_WORK))
3236 break;
3237 if (rv == 0)
3238 break;
3240 if (rv)
3241 break;
3242 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3243 (devPtr->dev_resp_p[1] == 0x86);
3244 if (rv)
3245 devPtr->dev_type = PCICC;
3246 else
3247 devPtr->dev_type = PCICA;
3248 rv = 0;
3249 } while (0);
3250 /* In a general error case, the card is not marked online */
3251 return rv;
3254 static unsigned char MCL3_testmsg[] = {
3255 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
3256 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3257 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3258 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3259 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
3260 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
3261 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
3262 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
3263 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3264 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3265 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3266 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3267 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3268 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3269 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3270 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3271 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3272 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3273 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3274 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3275 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
3276 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
3277 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
3278 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
3279 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
3280 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
3281 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
3282 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
3283 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
3284 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
3285 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
3286 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
3287 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
3288 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
3289 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3292 static int
3293 probe_PCIXCC_type(struct device *devPtr)
3295 int rv, dv, i, index, length;
3296 unsigned char psmid[8];
3297 static unsigned char loc_testmsg[548];
3298 struct CPRBX *cprbx_p;
3300 index = devPtr->dev_self_x;
3301 rv = 0;
3302 do {
3303 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3304 length = sizeof(MCL3_testmsg) - 0x0C;
3305 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3306 if (dv) {
3307 PDEBUG("dv returned = %d\n", dv);
3308 if (dv == DEV_SEN_EXCEPTION) {
3309 rv = SEN_FATAL_ERROR;
3310 PRINTKC("exception in send to AP %d\n", index);
3311 break;
3313 PDEBUG("return value from send_to_AP: %d\n", rv);
3314 switch (dv) {
3315 case DEV_GONE:
3316 PDEBUG("dev %d not available\n", index);
3317 rv = SEN_NOT_AVAIL;
3318 break;
3319 case DEV_ONLINE:
3320 rv = 0;
3321 break;
3322 case DEV_EMPTY:
3323 rv = SEN_NOT_AVAIL;
3324 break;
3325 case DEV_NO_WORK:
3326 rv = SEN_FATAL_ERROR;
3327 break;
3328 case DEV_BAD_MESSAGE:
3329 rv = SEN_USER_ERROR;
3330 break;
3331 case DEV_QUEUE_FULL:
3332 rv = SEN_QUEUE_FULL;
3333 break;
3334 default:
3335 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3336 rv = SEN_NOT_AVAIL;
3337 break;
3341 if (rv)
3342 break;
3344 for (i = 0; i < 6; i++) {
3345 mdelay(300);
3346 dv = receive_from_AP(index, z90crypt.cdx,
3347 devPtr->dev_resp_l,
3348 devPtr->dev_resp_p, psmid);
3349 PDEBUG("dv returned by DQ = %d\n", dv);
3350 if (dv == DEV_REC_EXCEPTION) {
3351 rv = REC_FATAL_ERROR;
3352 PRINTKC("exception in dequeue %d\n",
3353 index);
3354 break;
3356 switch (dv) {
3357 case DEV_ONLINE:
3358 rv = 0;
3359 break;
3360 case DEV_EMPTY:
3361 rv = REC_EMPTY;
3362 break;
3363 case DEV_NO_WORK:
3364 rv = REC_NO_WORK;
3365 break;
3366 case DEV_BAD_MESSAGE:
3367 case DEV_GONE:
3368 default:
3369 rv = REC_NO_RESPONSE;
3370 break;
3372 if ((rv != 0) && (rv != REC_NO_WORK))
3373 break;
3374 if (rv == 0)
3375 break;
3377 if (rv)
3378 break;
3379 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3380 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3381 devPtr->dev_type = PCIXCC_MCL2;
3382 PDEBUG("device %d is MCL2\n", index);
3383 } else {
3384 devPtr->dev_type = PCIXCC_MCL3;
3385 PDEBUG("device %d is MCL3\n", index);
3387 } while (0);
3388 /* In a general error case, the card is not marked online */
3389 return rv;
3392 module_init(z90crypt_init_module);
3393 module_exit(z90crypt_cleanup_module);