29 #include <linux/module.h>
37 #include <linux/slab.h>
39 #include <asm/uaccess.h>
42 #include <asm/debug.h>
52 "Copyright IBM Corp. 2001, 2012");
57 static int zcrypt_device_count = 0;
64 static int zcrypt_rng_device_add(
void);
65 static void zcrypt_rng_device_remove(
void);
72 static struct dentry *debugfs_root;
100 if (
sscanf(buf,
"%d\n", &online) != 1 || online < 0 || online > 1)
110 static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
112 static struct attribute * zcrypt_device_attrs[] = {
114 &dev_attr_online.attr,
119 .attrs = zcrypt_device_attrs,
127 static inline int zcrypt_process_rescan(
void)
148 static void __zcrypt_increase_preference(
struct zcrypt_device *zdev)
155 for (l = zdev->
list.prev; l != &zcrypt_device_list; l = l->
prev) {
162 if (l == zdev->
list.prev)
165 list_move(&zdev->
list, l);
176 static void __zcrypt_decrease_preference(
struct zcrypt_device *zdev)
183 for (l = zdev->
list.next; l != &zcrypt_device_list; l = l->
next) {
190 if (l == zdev->
list.next)
193 list_move_tail(&zdev->
list, l);
196 static void zcrypt_device_release(
struct kref *
kref)
211 return kref_put(&zdev->
refcount, zcrypt_device_release);
223 if (!zdev->
reply.message)
225 zdev->
reply.length = max_response_size;
227 INIT_LIST_HEAD(&zdev->
list);
228 zdev->
dbf_area = zcrypt_dbf_devices;
257 &zcrypt_device_attr_group);
262 spin_lock_bh(&zcrypt_device_lock);
267 __zcrypt_increase_preference(zdev);
268 zcrypt_device_count++;
269 spin_unlock_bh(&zcrypt_device_lock);
270 if (zdev->
ops->rng) {
271 rc = zcrypt_rng_device_add();
278 spin_lock_bh(&zcrypt_device_lock);
279 zcrypt_device_count--;
280 list_del_init(&zdev->
list);
281 spin_unlock_bh(&zcrypt_device_lock);
283 &zcrypt_device_attr_group);
300 zcrypt_rng_device_remove();
301 spin_lock_bh(&zcrypt_device_lock);
302 zcrypt_device_count--;
303 list_del_init(&zdev->
list);
304 spin_unlock_bh(&zcrypt_device_lock);
306 &zcrypt_device_attr_group);
315 spin_lock_bh(&zcrypt_ops_list_lock);
317 spin_unlock_bh(&zcrypt_ops_list_lock);
324 spin_lock_bh(&zcrypt_ops_list_lock);
325 list_del_init(&zops->
list);
326 spin_unlock_bh(&zcrypt_ops_list_lock);
336 spin_lock_bh(&zcrypt_ops_list_lock);
338 if ((zops->
variant == variant) &&
344 spin_unlock_bh(&zcrypt_ops_list_lock);
355 zops = __ops_lookup(name, variant);
357 request_module(name);
358 zops = __ops_lookup(name, variant);
360 if ((!zops) || (!try_module_get(zops->
owner)))
369 module_put(zops->
owner);
378 static ssize_t zcrypt_read(
struct file *filp,
char __user *buf,
379 size_t count, loff_t *f_pos)
389 static ssize_t zcrypt_write(
struct file *filp,
const char __user *buf,
390 size_t count, loff_t *f_pos)
434 spin_lock_bh(&zcrypt_device_lock);
437 !zdev->
ops->rsa_modexpo ||
444 __zcrypt_decrease_preference(zdev);
445 if (try_module_get(zdev->
ap_dev->drv->driver.owner)) {
446 spin_unlock_bh(&zcrypt_device_lock);
447 rc = zdev->
ops->rsa_modexpo(zdev, mex);
448 spin_lock_bh(&zcrypt_device_lock);
449 module_put(zdev->
ap_dev->drv->driver.owner);
454 __zcrypt_increase_preference(zdev);
457 spin_unlock_bh(&zcrypt_device_lock);
460 spin_unlock_bh(&zcrypt_device_lock);
467 unsigned long long z1, z2, z3;
482 spin_lock_bh(&zcrypt_device_lock);
485 !zdev->
ops->rsa_modexpo_crt ||
497 spin_unlock_bh(&zcrypt_device_lock);
507 if (len >
sizeof(z1))
522 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
529 __zcrypt_decrease_preference(zdev);
530 if (try_module_get(zdev->
ap_dev->drv->driver.owner)) {
531 spin_unlock_bh(&zcrypt_device_lock);
532 rc = zdev->
ops->rsa_modexpo_crt(zdev, crt);
533 spin_lock_bh(&zcrypt_device_lock);
534 module_put(zdev->
ap_dev->drv->driver.owner);
539 __zcrypt_increase_preference(zdev);
542 spin_unlock_bh(&zcrypt_device_lock);
545 spin_unlock_bh(&zcrypt_device_lock);
549 static long zcrypt_send_cprb(
struct ica_xcRB *xcRB)
554 spin_lock_bh(&zcrypt_device_lock);
556 if (!zdev->
online || !zdev->
ops->send_cprb ||
564 __zcrypt_decrease_preference(zdev);
565 if (try_module_get(zdev->
ap_dev->drv->driver.owner)) {
566 spin_unlock_bh(&zcrypt_device_lock);
567 rc = zdev->
ops->send_cprb(zdev, xcRB);
568 spin_lock_bh(&zcrypt_device_lock);
569 module_put(zdev->
ap_dev->drv->driver.owner);
574 __zcrypt_increase_preference(zdev);
577 spin_unlock_bh(&zcrypt_device_lock);
580 spin_unlock_bh(&zcrypt_device_lock);
584 static long zcrypt_rng(
char *
buffer)
589 spin_lock_bh(&zcrypt_device_lock);
596 __zcrypt_decrease_preference(zdev);
597 if (try_module_get(zdev->
ap_dev->drv->driver.owner)) {
598 spin_unlock_bh(&zcrypt_device_lock);
599 rc = zdev->
ops->rng(zdev, buffer);
600 spin_lock_bh(&zcrypt_device_lock);
601 module_put(zdev->
ap_dev->drv->driver.owner);
605 __zcrypt_increase_preference(zdev);
608 spin_unlock_bh(&zcrypt_device_lock);
611 spin_unlock_bh(&zcrypt_device_lock);
620 spin_lock_bh(&zcrypt_device_lock);
624 spin_unlock_bh(&zcrypt_device_lock);
627 static
void zcrypt_qdepth_mask(
char qdepth[AP_DEVICES])
631 memset(qdepth, 0,
sizeof(
char) * AP_DEVICES);
632 spin_lock_bh(&zcrypt_device_lock);
634 spin_lock(&zdev->
ap_dev->lock);
636 zdev->
ap_dev->pendingq_count +
637 zdev->
ap_dev->requestq_count;
638 spin_unlock(&zdev->
ap_dev->lock);
640 spin_unlock_bh(&zcrypt_device_lock);
643 static void zcrypt_perdev_reqcnt(
int reqcnt[AP_DEVICES])
647 memset(reqcnt, 0,
sizeof(
int) * AP_DEVICES);
648 spin_lock_bh(&zcrypt_device_lock);
650 spin_lock(&zdev->
ap_dev->lock);
652 zdev->
ap_dev->total_request_count;
653 spin_unlock(&zdev->
ap_dev->lock);
655 spin_unlock_bh(&zcrypt_device_lock);
658 static int zcrypt_pendingq_count(
void)
661 int pendingq_count = 0;
663 spin_lock_bh(&zcrypt_device_lock);
665 spin_lock(&zdev->
ap_dev->lock);
666 pendingq_count += zdev->
ap_dev->pendingq_count;
667 spin_unlock(&zdev->
ap_dev->lock);
669 spin_unlock_bh(&zcrypt_device_lock);
670 return pendingq_count;
673 static int zcrypt_requestq_count(
void)
676 int requestq_count = 0;
678 spin_lock_bh(&zcrypt_device_lock);
680 spin_lock(&zdev->
ap_dev->lock);
681 requestq_count += zdev->
ap_dev->requestq_count;
682 spin_unlock(&zdev->
ap_dev->lock);
684 spin_unlock_bh(&zcrypt_device_lock);
685 return requestq_count;
688 static int zcrypt_count_type(
int type)
691 int device_count = 0;
693 spin_lock_bh(&zcrypt_device_lock);
697 spin_unlock_bh(&zcrypt_device_lock);
706 static
long zcrypt_ica_status(
struct file *filp,
unsigned long arg)
721 zcrypt_status_mask(pstat->
status);
722 zcrypt_qdepth_mask(pstat->
qdepth);
724 if (
copy_to_user((
void __user *) arg, pstat,
sizeof(*pstat)))
730 static long zcrypt_unlocked_ioctl(
struct file *filp,
unsigned int cmd,
742 rc = zcrypt_rsa_modexpo(&mex);
745 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
747 rc = zcrypt_rsa_modexpo(&mex);
759 rc = zcrypt_rsa_crt(&crt);
762 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
764 rc = zcrypt_rsa_crt(&crt);
776 rc = zcrypt_send_cprb(&xcRB);
779 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
781 rc = zcrypt_send_cprb(&xcRB);
789 zcrypt_status_mask(status);
791 sizeof(
char) * AP_DEVICES))
797 zcrypt_qdepth_mask(qdepth);
799 sizeof(
char) * AP_DEVICES))
805 zcrypt_perdev_reqcnt(reqcnt);
807 sizeof(
int) * AP_DEVICES))
812 return put_user(zcrypt_requestq_count(), (
int __user *) arg);
814 return put_user(zcrypt_pendingq_count(), (
int __user *) arg);
826 return zcrypt_ica_status(filp, arg);
828 return put_user(zcrypt_device_count, (
int __user *) arg);
861 struct compat_ica_rsa_modexpo {
863 unsigned int inputdatalength;
865 unsigned int outputdatalength;
870 static long trans_modexpo32(
struct file *filp,
unsigned int cmd,
873 struct compat_ica_rsa_modexpo
__user *umex32 = compat_ptr(arg);
874 struct compat_ica_rsa_modexpo mex32;
880 mex64.inputdata = compat_ptr(mex32.inputdata);
881 mex64.inputdatalength = mex32.inputdatalength;
882 mex64.outputdata = compat_ptr(mex32.outputdata);
883 mex64.outputdatalength = mex32.outputdatalength;
884 mex64.b_key = compat_ptr(mex32.b_key);
885 mex64.n_modulus = compat_ptr(mex32.n_modulus);
887 rc = zcrypt_rsa_modexpo(&mex64);
890 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
892 rc = zcrypt_rsa_modexpo(&mex64);
896 return put_user(mex64.outputdatalength,
897 &umex32->outputdatalength);
900 struct compat_ica_rsa_modexpo_crt {
902 unsigned int inputdatalength;
904 unsigned int outputdatalength;
912 static long trans_modexpo_crt32(
struct file *filp,
unsigned int cmd,
915 struct compat_ica_rsa_modexpo_crt
__user *ucrt32 = compat_ptr(arg);
916 struct compat_ica_rsa_modexpo_crt crt32;
922 crt64.inputdata = compat_ptr(crt32.inputdata);
923 crt64.inputdatalength = crt32.inputdatalength;
924 crt64.outputdata= compat_ptr(crt32.outputdata);
925 crt64.outputdatalength = crt32.outputdatalength;
926 crt64.bp_key = compat_ptr(crt32.bp_key);
927 crt64.bq_key = compat_ptr(crt32.bq_key);
928 crt64.np_prime = compat_ptr(crt32.np_prime);
929 crt64.nq_prime = compat_ptr(crt32.nq_prime);
930 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
932 rc = zcrypt_rsa_crt(&crt64);
935 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
937 rc = zcrypt_rsa_crt(&crt64);
941 return put_user(crt64.outputdatalength,
942 &ucrt32->outputdatalength);
945 struct compat_ica_xcRB {
965 static long trans_xcRB32(
struct file *filp,
unsigned int cmd,
968 struct compat_ica_xcRB
__user *uxcRB32 = compat_ptr(arg);
969 struct compat_ica_xcRB xcRB32;
975 xcRB64.agent_ID = xcRB32.agent_ID;
976 xcRB64.user_defined = xcRB32.user_defined;
977 xcRB64.request_ID = xcRB32.request_ID;
978 xcRB64.request_control_blk_length =
979 xcRB32.request_control_blk_length;
980 xcRB64.request_control_blk_addr =
981 compat_ptr(xcRB32.request_control_blk_addr);
982 xcRB64.request_data_length =
983 xcRB32.request_data_length;
984 xcRB64.request_data_address =
985 compat_ptr(xcRB32.request_data_address);
986 xcRB64.reply_control_blk_length =
987 xcRB32.reply_control_blk_length;
988 xcRB64.reply_control_blk_addr =
989 compat_ptr(xcRB32.reply_control_blk_addr);
990 xcRB64.reply_data_length = xcRB32.reply_data_length;
991 xcRB64.reply_data_addr =
992 compat_ptr(xcRB32.reply_data_addr);
993 xcRB64.priority_window = xcRB32.priority_window;
994 xcRB64.status = xcRB32.status;
996 rc = zcrypt_send_cprb(&xcRB64);
999 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
1001 rc = zcrypt_send_cprb(&xcRB64);
1003 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1004 xcRB32.reply_data_length = xcRB64.reply_data_length;
1005 xcRB32.status = xcRB64.status;
1011 static long zcrypt_compat_ioctl(
struct file *filp,
unsigned int cmd,
1015 return trans_modexpo32(filp, cmd, arg);
1017 return trans_modexpo_crt32(filp, cmd, arg);
1019 return trans_xcRB32(filp, cmd, arg);
1020 return zcrypt_unlocked_ioctl(filp, cmd, arg);
1029 .read = zcrypt_read,
1030 .write = zcrypt_write,
1031 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1032 #ifdef CONFIG_COMPAT
1033 .compat_ioctl = zcrypt_compat_ioctl,
1035 .open = zcrypt_open,
1036 .release = zcrypt_release,
1043 static struct miscdevice zcrypt_misc_device = {
1046 .fops = &zcrypt_fops,
1054 static void sprintcl(
struct seq_file *
m,
unsigned char *
addr,
unsigned int len)
1058 for (i = 0; i < len; i++)
1059 seq_printf(m,
"%01x", (
unsigned int) addr[i]);
1063 static void sprintrw(
struct seq_file *m,
unsigned char *addr,
unsigned int len)
1069 for (c = 0; c < (len / 16); c++) {
1070 sprintcl(m, addr+inl, 16);
1075 sprintcl(m, addr+inl, cx);
1081 static void sprinthx(
unsigned char *title,
struct seq_file *m,
1082 unsigned char *addr,
unsigned int len)
1088 for (r = 0; r < (len / 64); r++) {
1089 sprintrw(m, addr+inl, 64);
1094 sprintrw(m, addr+inl, rx);
1100 static void sprinthx4(
unsigned char *title,
struct seq_file *m,
1101 unsigned int *array,
unsigned int len)
1106 for (r = 0; r < len; r++) {
1116 static int zcrypt_proc_show(
struct seq_file *m,
void *
v)
1118 char workarea[
sizeof(
int) * AP_DEVICES];
1120 seq_printf(m,
"\nzcrypt version: %d.%d.%d\n",
1123 seq_printf(m,
"Total device count: %d\n", zcrypt_device_count);
1134 seq_printf(m,
"requestq count: %d\n", zcrypt_requestq_count());
1135 seq_printf(m,
"pendingq count: %d\n", zcrypt_pendingq_count());
1138 zcrypt_status_mask(workarea);
1139 sprinthx(
"Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1140 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1141 m, workarea, AP_DEVICES);
1142 zcrypt_qdepth_mask(workarea);
1143 sprinthx(
"Waiting work element counts", m, workarea, AP_DEVICES);
1144 zcrypt_perdev_reqcnt((
int *) workarea);
1145 sprinthx4(
"Per-device successfully completed request counts",
1146 m, (
unsigned int *) workarea, AP_DEVICES);
1155 static void zcrypt_disable_card(
int index)
1159 spin_lock_bh(&zcrypt_device_lock);
1166 spin_unlock_bh(&zcrypt_device_lock);
1169 static void zcrypt_enable_card(
int index)
1173 spin_lock_bh(&zcrypt_device_lock);
1179 spin_unlock_bh(&zcrypt_device_lock);
1182 static ssize_t zcrypt_proc_write(
struct file *
file,
const char __user *buffer,
1183 size_t count, loff_t *
pos)
1192 #define LBUFSIZE 1200UL
1197 local_count =
min(LBUFSIZE - 1, count);
1202 lbuf[local_count] =
'\0';
1204 ptr =
strstr(lbuf,
"Online devices");
1212 if (
strstr(ptr,
"Waiting work element counts") ==
NULL)
1215 for (j = 0; j < 64 && *
ptr; ptr++) {
1222 if (*ptr >=
'0' && *ptr <=
'8')
1224 else if (*ptr ==
'd' || *ptr ==
'D')
1225 zcrypt_disable_card(j++);
1226 else if (*ptr ==
'e' || *ptr ==
'E')
1227 zcrypt_enable_card(j++);
1228 else if (*ptr !=
' ' && *ptr !=
'\t')
1238 .open = zcrypt_proc_open,
1242 .write = zcrypt_proc_write,
1245 static int zcrypt_rng_device_count;
1246 static u32 *zcrypt_rng_buffer;
1247 static int zcrypt_rng_buffer_index;
1250 static int zcrypt_rng_data_read(
struct hwrng *rng,
u32 *
data)
1258 if (zcrypt_rng_buffer_index == 0) {
1259 rc = zcrypt_rng((
char *) zcrypt_rng_buffer);
1261 if ((rc == -
ENODEV) && (zcrypt_process_rescan()))
1262 rc = zcrypt_rng((
char *) zcrypt_rng_buffer);
1265 zcrypt_rng_buffer_index = rc /
sizeof *
data;
1267 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1268 return sizeof *
data;
1271 static struct hwrng zcrypt_rng_dev = {
1273 .data_read = zcrypt_rng_data_read,
1276 static int zcrypt_rng_device_add(
void)
1281 if (zcrypt_rng_device_count == 0) {
1283 if (!zcrypt_rng_buffer) {
1287 zcrypt_rng_buffer_index = 0;
1291 zcrypt_rng_device_count = 1;
1293 zcrypt_rng_device_count++;
1298 free_page((
unsigned long) zcrypt_rng_buffer);
1304 static void zcrypt_rng_device_remove(
void)
1307 zcrypt_rng_device_count--;
1308 if (zcrypt_rng_device_count == 0) {
1310 free_page((
unsigned long) zcrypt_rng_buffer);
1333 if (zcrypt_dbf_common)
1335 if (zcrypt_dbf_devices)
1360 zcrypt_entry = proc_create(
"driver/z90crypt", 0644,
NULL, &zcrypt_proc_fops);
1361 if (!zcrypt_entry) {