12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
24 #include <linux/poll.h>
34 static char iucv_userid[80];
36 static const struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
47 static const u8 iprm_shutdown[8] =
48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
53 #define CB_TAG(skb) ((skb)->cb)
54 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN)
56 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
58 #define __iucv_sock_wait(sk, condition, timeo, ret) \
60 DEFINE_WAIT(__wait); \
61 long __timeo = timeo; \
63 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
64 while (!(condition)) { \
69 if (signal_pending(current)) { \
70 ret = sock_intr_errno(__timeo); \
74 __timeo = schedule_timeout(__timeo); \
76 ret = sock_error(sk); \
80 finish_wait(sk_sleep(sk), &__wait); \
83 #define iucv_sock_wait(sk, condition, timeo) \
87 __iucv_sock_wait(sk, condition, timeo, __ret); \
91 static void iucv_sock_kill(
struct sock *
sk);
92 static void iucv_sock_close(
struct sock *
sk);
93 static void iucv_sever_path(
struct sock *,
int);
116 .path_pending = iucv_callback_connreq,
117 .path_complete = iucv_callback_connack,
118 .path_severed = iucv_callback_connrej,
119 .message_pending = iucv_callback_rx,
120 .message_complete = iucv_callback_txdone,
121 .path_quiesced = iucv_callback_shutdown,
124 static inline void high_nmcpy(
unsigned char *
dst,
char *
src)
129 static inline void low_nmcpy(
unsigned char *
dst,
char *
src)
134 static int afiucv_pm_prepare(
struct device *
dev)
136 #ifdef CONFIG_PM_DEBUG
142 static void afiucv_pm_complete(
struct device *
dev)
144 #ifdef CONFIG_PM_DEBUG
155 static int afiucv_pm_freeze(
struct device *
dev)
162 #ifdef CONFIG_PM_DEBUG
168 switch (sk->sk_state) {
172 iucv_sever_path(sk, 0);
194 static int afiucv_pm_restore_thaw(
struct device *dev)
199 #ifdef CONFIG_PM_DEBUG
204 switch (sk->sk_state) {
223 static const struct dev_pm_ops afiucv_pm_ops = {
224 .prepare = afiucv_pm_prepare,
225 .complete = afiucv_pm_complete,
226 .freeze = afiucv_pm_freeze,
227 .thaw = afiucv_pm_restore_thaw,
228 .restore = afiucv_pm_restore_thaw,
235 .pm = &afiucv_pm_ops,
239 static struct device *af_iucv_dev;
266 datalen = 0xff - msg->
rmmsg[7];
267 return (datalen < 8) ? datalen : 8;
282 return (sk->sk_state == state || sk->sk_state == state2);
293 static inline int iucv_below_msglim(
struct sock *sk)
309 static void iucv_sock_wake_msglim(
struct sock *sk)
315 if (wq_has_sleeper(wq))
330 int err, confirm_recv = 0;
335 skb_reset_mac_header(skb);
336 skb_reset_network_header(skb);
338 skb_reset_mac_header(skb);
348 phs_hdr->
window = confirm_recv;
366 if (!(skb->
dev->flags &
IFF_UP) || !netif_carrier_ok(skb->
dev))
368 if (skb->
len > skb->
dev->mtu) {
390 static struct sock *__iucv_get_sock_by_name(
char *nm)
402 static
void iucv_sock_destruct(
struct sock *sk)
410 pr_err(
"Attempt to release alive iucv socket %p\n", sk);
421 static void iucv_sock_cleanup_listen(
struct sock *parent)
435 static void iucv_sock_kill(
struct sock *sk)
446 static void iucv_sever_path(
struct sock *sk,
int with_user_data)
454 if (with_user_data) {
455 low_nmcpy(user_data, iucv->
src_name);
456 high_nmcpy(user_data, iucv->
dst_name);
457 ASCEBC(user_data,
sizeof(user_data));
461 iucv_path_free(path);
466 static int iucv_send_ctrl(
struct sock *sk,
u8 flags)
475 skb_reserve(skb, blen);
476 err = afiucv_hs_send(NULL, sk, skb, flags);
482 static void iucv_sock_close(
struct sock *sk)
490 switch (sk->sk_state) {
492 iucv_sock_cleanup_listen(sk);
505 if (!err && !skb_queue_empty(&iucv->
send_skb_q)) {
526 iucv_sever_path(sk, 1);
532 sk->sk_bound_dev_if = 0;
541 static void iucv_sock_init(
struct sock *sk,
struct sock *parent)
597 if (protocol && protocol !=
PF_IUCV)
602 switch (sock->
type) {
604 sock->
ops = &iucv_sock_ops;
608 sock->
ops = &iucv_sock_ops;
614 sk = iucv_sock_alloc(sock, protocol,
GFP_KERNEL);
618 iucv_sock_init(sk, NULL);
626 sk_add_node(sk, &l->
head);
633 sk_del_node_init(sk);
647 sk_acceptq_added(parent);
658 sk_acceptq_removed(
iucv_sk(sk)->parent);
669 sk = (
struct sock *) isk;
683 sock_graft(sk, newsock);
699 struct sock *sk = sock->
sk;
718 if (__iucv_get_sock_by_name(sa->
siucv_name)) {
738 sk->sk_bound_dev_if = dev->
ifindex;
772 static int iucv_sock_autobind(
struct sock *sk)
786 while (__iucv_get_sock_by_name(name)) {
801 static int afiucv_path_connect(
struct socket *sock,
struct sockaddr *addr)
804 struct sock *sk = sock->
sk;
806 unsigned char user_data[16];
810 low_nmcpy(user_data, iucv->
src_name);
811 ASCEBC(user_data,
sizeof(user_data));
824 iucv_path_free(iucv->
path);
847 static int iucv_sock_connect(
struct socket *sock,
struct sockaddr *addr,
851 struct sock *sk = sock->
sk;
869 err = iucv_sock_autobind(sk);
883 err = afiucv_path_connect(sock, addr);
896 iucv_sever_path(sk, 0);
904 static int iucv_sock_listen(
struct socket *sock,
int backlog)
906 struct sock *sk = sock->
sk;
929 static int iucv_sock_accept(
struct socket *sock,
struct socket *newsock,
933 struct sock *sk = sock->
sk, *nsk;
944 timeo = sock_rcvtimeo(sk, flags &
O_NONBLOCK);
965 err = sock_intr_errno(timeo);
983 static int iucv_sock_getname(
struct socket *sock,
struct sockaddr *addr,
987 struct sock *sk = sock->
sk;
1026 prmdata[7] = 0xff - (
u8) skb->
len;
1028 (
void *) prmdata, 8);
1032 struct msghdr *msg,
size_t len)
1034 struct sock *sk = sock->
sk;
1046 err = sock_error(sk);
1134 timeo = sock_sndtimeo(sk, noblock);
1151 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1162 err = iucv_send_iprm(iucv->
path, &txmsg, skb);
1181 (
void *) skb->
data, skb->
len);
1188 pr_err(
"Application %s on z/VM guest %s"
1189 " exceeds message limit\n",
1213 static int iucv_fragment_skb(
struct sock *sk,
struct sk_buff *skb,
int len)
1215 int dataleft,
size, copied = 0;
1237 skb_reset_transport_header(nskb);
1238 skb_reset_network_header(nskb);
1251 static void iucv_process_message(
struct sock *sk,
struct sk_buff *skb,
1258 len = iucv_msg_length(msg);
1273 skb->
data, len, NULL);
1283 rc = iucv_fragment_skb(sk, skb, len);
1292 skb_reset_transport_header(skb);
1293 skb_reset_network_header(skb);
1306 static void iucv_process_message_q(
struct sock *sk)
1316 iucv_process_message(sk, skb, p->
path, &p->
msg);
1324 static int iucv_sock_recvmsg(
struct kiocb *iocb,
struct socket *sock,
1325 struct msghdr *msg,
size_t len,
int flags)
1328 struct sock *sk = sock->
sk;
1330 unsigned int copied, rlen;
1353 copied =
min_t(
unsigned int, rlen, len);
1400 iucv_sock_close(sk);
1419 iucv_process_message_q(sk);
1440 static inline unsigned int iucv_accept_poll(
struct sock *parent)
1446 sk = (
struct sock *) isk;
1458 struct sock *sk = sock->
sk;
1459 unsigned int mask = 0;
1461 sock_poll_wait(file, sk_sleep(sk), wait);
1464 return iucv_accept_poll(sk);
1485 if (sock_writeable(sk) && iucv_below_msglim(sk))
1493 static int iucv_sock_shutdown(
struct socket *sock,
int how)
1495 struct sock *sk = sock->
sk;
1506 switch (sk->sk_state) {
1559 static int iucv_sock_release(
struct socket *sock)
1561 struct sock *sk = sock->
sk;
1567 iucv_sock_close(sk);
1575 static int iucv_sock_setsockopt(
struct socket *sock,
int level,
int optname,
1576 char __user *optval,
unsigned int optlen)
1578 struct sock *sk = sock->
sk;
1586 if (optlen <
sizeof(
int))
1589 if (
get_user(val, (
int __user *) optval))
1603 switch (sk->sk_state) {
1606 if (val < 1 || val > (
u16)(~0))
1625 static int iucv_sock_getsockopt(
struct socket *sock,
int level,
int optname,
1626 char __user *optval,
int __user *optlen)
1628 struct sock *sk = sock->
sk;
1642 len =
min_t(
unsigned int, len,
sizeof(
int));
1675 static int iucv_callback_connreq(
struct iucv_path *path,
1678 unsigned char user_data[16];
1679 unsigned char nuser_data[16];
1680 unsigned char src_name[8];
1682 struct sock *
sk, *nsk;
1686 memcpy(src_name, ipuser, 8);
1710 low_nmcpy(user_data, iucv->
src_name);
1711 high_nmcpy(user_data, iucv->
dst_name);
1712 ASCEBC(user_data,
sizeof(user_data));
1715 iucv_path_free(path);
1720 if (sk_acceptq_is_full(sk)) {
1722 iucv_path_free(path);
1730 iucv_path_free(path);
1735 iucv_sock_init(nsk, sk);
1746 high_nmcpy(nuser_data, ipuser + 8);
1748 ASCEBC(nuser_data + 8, 8);
1753 err = pr_iucv->
path_accept(path, &af_iucv_handler, nuser_data, nsk);
1755 iucv_sever_path(nsk, 1);
1756 iucv_sock_kill(nsk);
1771 static void iucv_callback_connack(
struct iucv_path *path,
u8 ipuser[16])
1773 struct sock *sk = path->
private;
1781 struct sock *sk = path->
private;
1794 if (!list_empty(&iucv->
message_q.list) ||
1807 iucv_process_message(sk, skb, path, msg);
1823 static void iucv_callback_txdone(
struct iucv_path *path,
1826 struct sock *sk = path->
private;
1830 unsigned long flags;
1833 if (!skb_queue_empty(list)) {
1836 while (list_skb != (
struct sk_buff *)list) {
1841 list_skb = list_skb->
next;
1844 __skb_unlink(
this, list);
1846 spin_unlock_irqrestore(&list->
lock, flags);
1851 iucv_sock_wake_msglim(sk);
1856 if (skb_queue_empty(&
iucv_sk(sk)->send_skb_q)) {
1865 static void iucv_callback_connrej(
struct iucv_path *path,
u8 ipuser[16])
1867 struct sock *sk = path->
private;
1873 iucv_sever_path(sk, 1);
1883 static void iucv_callback_shutdown(
struct iucv_path *path,
u8 ipuser[16])
1885 struct sock *sk = path->
private;
1896 static void afiucv_swap_src_dest(
struct sk_buff *skb)
1920 static int afiucv_hs_callback_syn(
struct sock *sk,
struct sk_buff *skb)
1931 afiucv_swap_src_dest(skb);
1940 sk_acceptq_is_full(sk) ||
1945 afiucv_swap_src_dest(skb);
1953 iucv_sock_init(nsk, sk);
1964 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1967 afiucv_swap_src_dest(skb);
1977 iucv_sock_kill(nsk);
1987 static int afiucv_hs_callback_synack(
struct sock *sk,
struct sk_buff *skb)
2010 static int afiucv_hs_callback_synfin(
struct sock *sk,
struct sk_buff *skb)
2030 static int afiucv_hs_callback_fin(
struct sock *sk,
struct sk_buff *skb)
2051 static int afiucv_hs_callback_win(
struct sock *sk,
struct sk_buff *skb)
2064 iucv_sock_wake_msglim(sk);
2071 static int afiucv_hs_callback_rx(
struct sock *sk,
struct sk_buff *skb)
2096 skb_reset_transport_header(skb);
2097 skb_reset_network_header(skb);
2131 memset(nullstring, 0,
sizeof(nullstring));
2174 switch (trans_hdr->
flags) {
2177 err = afiucv_hs_callback_syn(sk, skb);
2181 err = afiucv_hs_callback_synack(sk, skb);
2185 err = afiucv_hs_callback_synfin(sk, skb);
2189 err = afiucv_hs_callback_fin(sk, skb);
2192 err = afiucv_hs_callback_win(sk, skb);
2205 err = afiucv_hs_callback_rx(sk, skb);
2218 static void afiucv_hs_callback_txnotify(
struct sk_buff *skb,
2221 struct sock *isk = skb->
sk;
2222 struct sock *sk =
NULL;
2227 unsigned long flags;
2243 if (skb_queue_empty(list))
2245 list_skb = list->
next;
2246 nskb = list_skb->
next;
2247 while (list_skb != (
struct sk_buff *)list) {
2248 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2251 __skb_unlink(list_skb, list);
2253 iucv_sock_wake_msglim(sk);
2259 __skb_unlink(list_skb, list);
2262 iucv_sock_wake_msglim(sk);
2270 __skb_unlink(list_skb, list);
2284 spin_unlock_irqrestore(&list->
lock, flags);
2287 if (skb_queue_empty(&
iucv_sk(sk)->send_skb_q)) {
2311 if ((iucv->
hs_dev == event_dev) &&
2329 .notifier_call = afiucv_netdev_event,
2332 static const struct proto_ops iucv_sock_ops = {
2335 .release = iucv_sock_release,
2336 .bind = iucv_sock_bind,
2337 .connect = iucv_sock_connect,
2338 .listen = iucv_sock_listen,
2339 .accept = iucv_sock_accept,
2340 .getname = iucv_sock_getname,
2341 .sendmsg = iucv_sock_sendmsg,
2342 .recvmsg = iucv_sock_recvmsg,
2347 .shutdown = iucv_sock_shutdown,
2348 .setsockopt = iucv_sock_setsockopt,
2349 .getsockopt = iucv_sock_getsockopt,
2355 .create = iucv_sock_create,
2360 .func = afiucv_hs_rcv,
2363 static int afiucv_iucv_init(
void)
2371 af_iucv_driver.
bus = pr_iucv->
bus;
2381 af_iucv_dev->
bus = pr_iucv->
bus;
2384 af_iucv_dev->
driver = &af_iucv_driver;
2398 static int __init afiucv_init(
void)
2403 cpcmd(
"QUERY USERID", iucv_userid,
sizeof(iucv_userid), &err);
2413 memset(&iucv_userid, 0,
sizeof(iucv_userid));
2416 memset(&iucv_userid, 0,
sizeof(iucv_userid));
2428 err = afiucv_iucv_init();
2446 static void __exit afiucv_exit(
void)