36 #define KMSG_COMPONENT "iucv"
37 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/slab.h>
47 #include <linux/list.h>
48 #include <linux/errno.h>
50 #include <linux/device.h>
52 #include <linux/reboot.h>
71 #define IUCV_IPSRCCLS 0x01
72 #define IUCV_IPTRGCLS 0x01
73 #define IUCV_IPFGPID 0x02
74 #define IUCV_IPFGMID 0x04
75 #define IUCV_IPNORPY 0x10
76 #define IUCV_IPALL 0x80
91 static int iucv_pm_prepare(
struct device *);
92 static void iucv_pm_complete(
struct device *);
93 static int iucv_pm_freeze(
struct device *);
94 static int iucv_pm_thaw(
struct device *);
95 static int iucv_pm_restore(
struct device *);
98 .prepare = iucv_pm_prepare,
99 .complete = iucv_pm_complete,
100 .freeze = iucv_pm_freeze,
101 .thaw = iucv_pm_thaw,
102 .restore = iucv_pm_restore,
107 .match = iucv_bus_match,
115 static int iucv_available;
143 static void iucv_tasklet_fn(
unsigned long);
185 static char iucv_error_no_listener[16] =
"NO LISTENER";
186 static char iucv_error_no_memory[16] =
"NO MEMORY";
187 static char iucv_error_pathid[16] =
"INVALID PATHID";
197 static struct iucv_path **iucv_path_table;
198 static unsigned long iucv_max_pathid;
209 static int iucv_active_cpu = -1;
219 static int iucv_nonsmp_handler;
312 static union iucv_param *iucv_param_irq[
NR_CPUS];
323 static inline int iucv_call_b2f0(
int command,
union iucv_param *
parm)
325 register unsigned long reg0 asm (
"0");
326 register unsigned long reg1 asm (
"1");
332 " .long 0xb2f01000\n"
335 :
"=d" (ccode),
"=m" (*parm),
"+d" (
reg0),
"+a" (
reg1)
336 :
"m" (*parm) :
"cc");
337 return (ccode == 1) ? parm->
ctrl.iprcode : ccode;
348 static int iucv_query_maxconn(
void)
350 register unsigned long reg0 asm (
"0");
351 register unsigned long reg1 asm (
"1");
361 " .long 0xb2f01000\n"
364 :
"=d" (ccode),
"+d" (
reg0),
"+d" (
reg1) : :
"cc");
366 iucv_max_pathid =
reg1;
368 return ccode ? -
EPERM : 0;
377 static void iucv_allow_cpu(
void *
data)
380 union iucv_param *
parm;
391 parm = iucv_param_irq[
cpu];
392 memset(parm, 0,
sizeof(
union iucv_param));
405 memset(parm, 0,
sizeof(
union iucv_param));
409 cpumask_set_cpu(cpu, &iucv_irq_cpumask);
418 static void iucv_block_cpu(
void *data)
421 union iucv_param *
parm;
424 parm = iucv_param_irq[
cpu];
425 memset(parm, 0,
sizeof(
union iucv_param));
429 cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
438 static void iucv_block_cpu_almost(
void *data)
441 union iucv_param *
parm;
444 parm = iucv_param_irq[
cpu];
445 memset(parm, 0,
sizeof(
union iucv_param));
449 memset(parm, 0,
sizeof(
union iucv_param));
454 cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
463 static void iucv_declare_cpu(
void *data)
466 union iucv_param *
parm;
473 parm = iucv_param_irq[
cpu];
474 memset(parm, 0,
sizeof(
union iucv_param));
478 char *
err =
"Unknown";
481 err =
"Directory error";
484 err =
"Invalid length";
487 err =
"Buffer already exists";
490 err =
"Buffer overlap";
493 err =
"Paging or storage error";
496 pr_warning(
"Defining an interrupt buffer on CPU %i"
497 " failed with 0x%02x (%s)\n", cpu, rc, err);
502 cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
504 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
506 iucv_allow_cpu(
NULL);
509 iucv_block_cpu(
NULL);
518 static void iucv_retrieve_cpu(
void *data)
521 union iucv_param *
parm;
527 iucv_block_cpu(
NULL);
530 parm = iucv_param_irq[
cpu];
534 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
542 static void iucv_setmask_mp(
void)
561 static
void iucv_setmask_up(
void)
567 cpumask_copy(&cpumask, &iucv_irq_cpumask);
568 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
581 static
int iucv_enable(
void)
588 alloc_size = iucv_max_pathid *
sizeof(
struct iucv_path);
589 iucv_path_table = kzalloc(alloc_size,
GFP_KERNEL);
590 if (!iucv_path_table)
596 if (cpumask_empty(&iucv_buffer_cpumask))
602 kfree(iucv_path_table);
603 iucv_path_table = NULL;
615 static
void iucv_disable(
void)
619 kfree(iucv_path_table);
620 iucv_path_table =
NULL;
625 unsigned long action,
void *hcpu)
628 long cpu = (
long) hcpu;
633 iucv_irq_data[
cpu] = kmalloc_node(
sizeof(
struct iucv_irq_data),
635 if (!iucv_irq_data[cpu])
636 return notifier_from_errno(-
ENOMEM);
638 iucv_param[
cpu] = kmalloc_node(
sizeof(
union iucv_param),
640 if (!iucv_param[cpu]) {
641 kfree(iucv_irq_data[cpu]);
643 return notifier_from_errno(-
ENOMEM);
645 iucv_param_irq[
cpu] = kmalloc_node(
sizeof(
union iucv_param),
647 if (!iucv_param_irq[cpu]) {
648 kfree(iucv_param[cpu]);
650 kfree(iucv_irq_data[cpu]);
652 return notifier_from_errno(-
ENOMEM);
659 kfree(iucv_param_irq[cpu]);
661 kfree(iucv_param[cpu]);
663 kfree(iucv_irq_data[cpu]);
670 if (!iucv_path_table)
676 if (!iucv_path_table)
678 cpumask_copy(&cpumask, &iucv_buffer_cpumask);
679 cpumask_clear_cpu(cpu, &cpumask);
680 if (cpumask_empty(&cpumask))
682 return notifier_from_errno(-
EINVAL);
684 if (cpumask_empty(&iucv_irq_cpumask))
686 cpumask_first(&iucv_buffer_cpumask),
687 iucv_allow_cpu, NULL, 1);
694 .notifier_call = iucv_cpu_notify,
704 static int iucv_sever_pathid(
u16 pathid,
u8 userdata[16])
706 union iucv_param *
parm;
709 memset(parm, 0,
sizeof(
union iucv_param));
723 static void __iucv_cleanup_queue(
void *
dummy)
734 static void iucv_cleanup_queue(
void)
748 spin_lock_irq(&iucv_queue_lock);
751 if (iucv_path_table[p->
data.ippathid] == NULL) {
756 spin_unlock_irq(&iucv_queue_lock);
777 iucv_nonsmp_handler++;
778 if (list_empty(&iucv_handler_list)) {
782 }
else if (!smp && iucv_nonsmp_handler == 1)
784 INIT_LIST_HEAD(&handler->
paths);
786 spin_lock_bh(&iucv_table_lock);
788 spin_unlock_bh(&iucv_table_lock);
808 spin_lock_bh(&iucv_table_lock);
810 list_del_init(&handler->
list);
813 iucv_sever_pathid(p->
pathid, NULL);
818 spin_unlock_bh(&iucv_table_lock);
820 iucv_nonsmp_handler--;
821 if (list_empty(&iucv_handler_list))
823 else if (!smp && iucv_nonsmp_handler == 0)
837 for (i = 0; i < iucv_max_pathid; i++) {
838 if (iucv_path_table[i])
839 iucv_sever_pathid(i, NULL);
864 u8 userdata[16],
void *
private)
866 union iucv_param *
parm;
870 if (cpumask_empty(&iucv_buffer_cpumask)) {
876 memset(parm, 0,
sizeof(
union iucv_param));
914 union iucv_param *
parm;
917 spin_lock_bh(&iucv_table_lock);
918 iucv_cleanup_queue();
919 if (cpumask_empty(&iucv_buffer_cpumask)) {
924 memset(parm, 0,
sizeof(
union iucv_param));
934 sizeof(parm->
ctrl.iptarget));
943 if (parm->
ctrl.ippathid < iucv_max_pathid) {
952 iucv_sever_pathid(parm->
ctrl.ippathid,
958 spin_unlock_bh(&iucv_table_lock);
975 union iucv_param *
parm;
979 if (cpumask_empty(&iucv_buffer_cpumask)) {
984 memset(parm, 0,
sizeof(
union iucv_param));
1007 union iucv_param *
parm;
1011 if (cpumask_empty(&iucv_buffer_cpumask)) {
1016 memset(parm, 0,
sizeof(
union iucv_param));
1040 if (cpumask_empty(&iucv_buffer_cpumask)) {
1045 spin_lock_bh(&iucv_table_lock);
1046 rc = iucv_sever_pathid(path->
pathid, userdata);
1048 list_del_init(&path->
list);
1050 spin_unlock_bh(&iucv_table_lock);
1070 union iucv_param *
parm;
1074 if (cpumask_empty(&iucv_buffer_cpumask)) {
1079 memset(parm, 0,
sizeof(
union iucv_param));
1081 parm->
purge.ipmsgid = msg->
id;
1082 parm->
purge.ipsrccls = srccls;
1107 static int iucv_message_receive_iprmdata(
struct iucv_path *path,
1121 *residual =
abs(size - 8);
1125 size = (size < 8) ? size : 8;
1126 for (array = buffer; size > 0; array++) {
1158 u8 flags,
void *buffer,
size_t size,
size_t *residual)
1160 union iucv_param *
parm;
1164 return iucv_message_receive_iprmdata(path, msg, flags,
1165 buffer, size, residual);
1166 if (cpumask_empty(&iucv_buffer_cpumask)) {
1171 memset(parm, 0,
sizeof(
union iucv_param));
1173 parm->
db.ipbfln1f = (
u32) size;
1174 parm->
db.ipmsgid = msg->
id;
1176 parm->
db.iptrgcls = msg->
class;
1180 if (!rc || rc == 5) {
1181 msg->
flags = parm->
db.ipflags1;
1183 *residual = parm->
db.ipbfln1f;
1208 u8 flags,
void *buffer,
size_t size,
size_t *residual)
1213 return iucv_message_receive_iprmdata(path, msg, flags,
1214 buffer, size, residual);
1235 union iucv_param *
parm;
1239 if (cpumask_empty(&iucv_buffer_cpumask)) {
1244 memset(parm, 0,
sizeof(
union iucv_param));
1246 parm->
db.ipmsgid = msg->
id;
1247 parm->
db.iptrgcls = msg->
class;
1272 u8 flags,
void *reply,
size_t size)
1274 union iucv_param *
parm;
1278 if (cpumask_empty(&iucv_buffer_cpumask)) {
1283 memset(parm, 0,
sizeof(
union iucv_param));
1287 parm->
dpl.ipmsgid = msg->
id;
1292 parm->
db.ipbfln1f = (
u32) size;
1295 parm->
db.ipmsgid = msg->
id;
1296 parm->
db.iptrgcls = msg->
class;
1323 u8 flags,
u32 srccls,
void *buffer,
size_t size)
1325 union iucv_param *
parm;
1328 if (cpumask_empty(&iucv_buffer_cpumask)) {
1333 memset(parm, 0,
sizeof(
union iucv_param));
1334 if (flags & IUCV_IPRMDATA) {
1339 parm->
dpl.ipsrccls = srccls;
1340 parm->
dpl.ipmsgtag = msg->
tag;
1344 parm->
db.ipbfln1f = (
u32) size;
1347 parm->
db.iptrgcls = msg->
class;
1348 parm->
db.ipsrccls = srccls;
1349 parm->
db.ipmsgtag = msg->
tag;
1353 msg->
id = parm->
db.ipmsgid;
1377 u8 flags,
u32 srccls,
void *buffer,
size_t size)
1408 u8 flags,
u32 srccls,
void *buffer,
size_t size,
1409 void *answer,
size_t asize,
size_t *residual)
1411 union iucv_param *
parm;
1415 if (cpumask_empty(&iucv_buffer_cpumask)) {
1420 memset(parm, 0,
sizeof(
union iucv_param));
1421 if (flags & IUCV_IPRMDATA) {
1425 parm->
dpl.ipsrccls = srccls;
1426 parm->
dpl.ipmsgtag = msg->
tag;
1428 parm->
dpl.ipbfln2f = (
u32) asize;
1432 parm->
db.ipflags1 = path->
flags;
1433 parm->
db.iptrgcls = msg->
class;
1434 parm->
db.ipsrccls = srccls;
1435 parm->
db.ipmsgtag = msg->
tag;
1437 parm->
db.ipbfln1f = (
u32) size;
1439 parm->
db.ipbfln2f = (
u32) asize;
1443 msg->
id = parm->
db.ipmsgid;
1457 struct iucv_path_pending {
1470 static void iucv_path_pending(
struct iucv_irq_data *data)
1472 struct iucv_path_pending *ipp = (
void *) data;
1477 BUG_ON(iucv_path_table[ipp->ippathid]);
1479 error = iucv_error_no_memory;
1480 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1,
GFP_ATOMIC);
1483 path->
pathid = ipp->ippathid;
1498 if (!handler->
path_pending(path, ipp->ipvmid, ipp->ipuser))
1505 iucv_path_free(path);
1506 error = iucv_error_no_listener;
1508 iucv_sever_pathid(ipp->ippathid, error);
1518 struct iucv_path_complete {
1531 static void iucv_path_complete(
struct iucv_irq_data *data)
1533 struct iucv_path_complete *ipc = (
void *) data;
1534 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1537 path->
flags = ipc->ipflags1;
1539 path->
handler->path_complete(path, ipc->ipuser);
1549 struct iucv_path_severed {
1561 static void iucv_path_severed(
struct iucv_irq_data *data)
1563 struct iucv_path_severed *ips = (
void *) data;
1564 struct iucv_path *path = iucv_path_table[ips->ippathid];
1568 if (path->
handler->path_severed)
1569 path->
handler->path_severed(path, ips->ipuser);
1571 iucv_sever_pathid(path->
pathid, NULL);
1574 iucv_path_free(path);
1585 struct iucv_path_quiesced {
1597 static void iucv_path_quiesced(
struct iucv_irq_data *data)
1599 struct iucv_path_quiesced *
ipq = (
void *) data;
1600 struct iucv_path *path = iucv_path_table[ipq->ippathid];
1603 path->
handler->path_quiesced(path, ipq->ipuser);
1613 struct iucv_path_resumed {
1625 static void iucv_path_resumed(
struct iucv_irq_data *data)
1627 struct iucv_path_resumed *ipr = (
void *) data;
1628 struct iucv_path *path = iucv_path_table[ipr->ippathid];
1631 path->
handler->path_resumed(path, ipr->ipuser);
1641 struct iucv_message_complete {
1656 static void iucv_message_complete(
struct iucv_irq_data *data)
1658 struct iucv_message_complete *imc = (
void *) data;
1659 struct iucv_path *path = iucv_path_table[imc->ippathid];
1663 msg.
flags = imc->ipflags1;
1664 msg.
id = imc->ipmsgid;
1665 msg.
audit = imc->ipaudit;
1667 msg.
class = imc->ipsrccls;
1668 msg.
tag = imc->ipmsgtag;
1669 msg.
length = imc->ipbfln2f;
1670 path->
handler->message_complete(path, &msg);
1681 struct iucv_message_pending {
1701 static void iucv_message_pending(
struct iucv_irq_data *data)
1703 struct iucv_message_pending *imp = (
void *) data;
1704 struct iucv_path *path = iucv_path_table[imp->ippathid];
1708 msg.
flags = imp->ipflags1;
1709 msg.
id = imp->ipmsgid;
1710 msg.
class = imp->iptrgcls;
1711 if (imp->ipflags1 & IUCV_IPRMDATA) {
1715 msg.
length = imp->ln1msg2.ipbfln1f;
1717 path->
handler->message_pending(path, &msg);
1728 static void iucv_tasklet_fn(
unsigned long ignored)
1730 typedef void iucv_irq_fn(
struct iucv_irq_data *);
1731 static iucv_irq_fn *irq_fn[] = {
1732 [0x02] = iucv_path_complete,
1733 [0x03] = iucv_path_severed,
1734 [0x04] = iucv_path_quiesced,
1735 [0x05] = iucv_path_resumed,
1736 [0x06] = iucv_message_complete,
1737 [0x07] = iucv_message_complete,
1738 [0x08] = iucv_message_pending,
1739 [0x09] = iucv_message_pending,
1745 if (!spin_trylock(&iucv_table_lock)) {
1746 tasklet_schedule(&iucv_tasklet);
1751 spin_lock_irq(&iucv_queue_lock);
1752 list_splice_init(&iucv_task_queue, &task_queue);
1753 spin_unlock_irq(&iucv_queue_lock);
1756 list_del_init(&p->
list);
1761 iucv_active_cpu = -1;
1762 spin_unlock(&iucv_table_lock);
1778 spin_lock_bh(&iucv_table_lock);
1781 spin_lock_irq(&iucv_queue_lock);
1782 list_splice_init(&iucv_work_queue, &work_queue);
1783 spin_unlock_irq(&iucv_queue_lock);
1785 iucv_cleanup_queue();
1787 list_del_init(&p->
list);
1788 iucv_path_pending(&p->
data);
1792 iucv_active_cpu = -1;
1793 spin_unlock_bh(&iucv_table_lock);
1804 unsigned int param32,
unsigned long param64)
1806 struct iucv_irq_data *
p;
1811 if (p->
ippathid >= iucv_max_pathid) {
1813 iucv_sever_pathid(p->
ippathid, iucv_error_no_listener);
1819 pr_warning(
"iucv_external_interrupt: out of memory\n");
1823 spin_lock(&iucv_queue_lock);
1831 tasklet_schedule(&iucv_tasklet);
1833 spin_unlock(&iucv_queue_lock);
1836 static int iucv_pm_prepare(
struct device *
dev)
1840 #ifdef CONFIG_PM_DEBUG
1844 rc = dev->
driver->pm->prepare(dev);
1848 static void iucv_pm_complete(
struct device *dev)
1850 #ifdef CONFIG_PM_DEBUG
1854 dev->
driver->pm->complete(dev);
1863 int iucv_path_table_empty(
void)
1867 for (i = 0; i < iucv_max_pathid; i++) {
1868 if (iucv_path_table[i])
1882 static int iucv_pm_freeze(
struct device *dev)
1888 #ifdef CONFIG_PM_DEBUG
1897 list_del_init(&p->list);
1898 iucv_sever_pathid(p->data.ippathid,
1899 iucv_error_no_listener);
1905 rc = dev->
driver->pm->freeze(dev);
1906 if (iucv_path_table_empty())
1919 static int iucv_pm_thaw(
struct device *dev)
1923 #ifdef CONFIG_PM_DEBUG
1927 if (!iucv_path_table) {
1932 if (cpumask_empty(&iucv_irq_cpumask)) {
1933 if (iucv_nonsmp_handler)
1935 iucv_allow_cpu(NULL);
1941 rc = dev->
driver->pm->thaw(dev);
1954 static int iucv_pm_restore(
struct device *dev)
1958 #ifdef CONFIG_PM_DEBUG
1962 pr_warning(
"Suspending Linux did not completely close all IUCV "
1965 if (cpumask_empty(&iucv_irq_cpumask)) {
1966 rc = iucv_query_maxconn();
1972 rc = dev->
driver->pm->restore(dev);
1986 .path_accept = iucv_path_accept,
1987 .path_connect = iucv_path_connect,
2003 static int __init iucv_init(
void)
2013 rc = iucv_query_maxconn();
2020 if (IS_ERR(iucv_root)) {
2021 rc = PTR_ERR(iucv_root);
2027 iucv_irq_data[
cpu] = kmalloc_node(
sizeof(
struct iucv_irq_data),
2029 if (!iucv_irq_data[cpu]) {
2035 iucv_param[
cpu] = kmalloc_node(
sizeof(
union iucv_param),
2037 if (!iucv_param[cpu]) {
2041 iucv_param_irq[
cpu] = kmalloc_node(
sizeof(
union iucv_param),
2043 if (!iucv_param_irq[cpu]) {
2055 ASCEBC(iucv_error_no_listener, 16);
2056 ASCEBC(iucv_error_no_memory, 16);
2057 ASCEBC(iucv_error_pathid, 16);
2072 kfree(iucv_param_irq[cpu]);
2074 kfree(iucv_param[cpu]);
2076 kfree(iucv_irq_data[cpu]);
2093 static void __exit iucv_exit(
void)
2098 spin_lock_irq(&iucv_queue_lock);
2103 spin_unlock_irq(&iucv_queue_lock);
2107 kfree(iucv_param_irq[cpu]);
2109 kfree(iucv_param[cpu]);
2111 kfree(iucv_irq_data[cpu]);