Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
af_iucv.c
Go to the documentation of this file.
1 /*
2  * IUCV protocol stack for Linux on zSeries
3  *
4  * Copyright IBM Corp. 2006, 2009
5  *
6  * Author(s): Jennifer Hunt <[email protected]>
7  * Hendrik Brueckner <[email protected]>
8  * PM functions:
9  * Ursula Braun <[email protected]>
10  */
11 
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <net/sock.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
29 
30 #include <net/iucv/af_iucv.h>
31 
32 #define VERSION "1.2"
33 
34 static char iucv_userid[80];
35 
36 static const struct proto_ops iucv_sock_ops;
37 
38 static struct proto iucv_proto = {
39  .name = "AF_IUCV",
40  .owner = THIS_MODULE,
41  .obj_size = sizeof(struct iucv_sock),
42 };
43 
44 static struct iucv_interface *pr_iucv;
45 
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48  {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49 
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57 
58 #define __iucv_sock_wait(sk, condition, timeo, ret) \
59 do { \
60  DEFINE_WAIT(__wait); \
61  long __timeo = timeo; \
62  ret = 0; \
63  prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
64  while (!(condition)) { \
65  if (!__timeo) { \
66  ret = -EAGAIN; \
67  break; \
68  } \
69  if (signal_pending(current)) { \
70  ret = sock_intr_errno(__timeo); \
71  break; \
72  } \
73  release_sock(sk); \
74  __timeo = schedule_timeout(__timeo); \
75  lock_sock(sk); \
76  ret = sock_error(sk); \
77  if (ret) \
78  break; \
79  } \
80  finish_wait(sk_sleep(sk), &__wait); \
81 } while (0)
82 
83 #define iucv_sock_wait(sk, condition, timeo) \
84 ({ \
85  int __ret = 0; \
86  if (!(condition)) \
87  __iucv_sock_wait(sk, condition, timeo, __ret); \
88  __ret; \
89 })
90 
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
93 static void iucv_sever_path(struct sock *, int);
94 
95 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
96  struct packet_type *pt, struct net_device *orig_dev);
97 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
98  struct sk_buff *skb, u8 flags);
99 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
100 
101 /* Call Back functions */
102 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
103 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
104 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
105 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
106  u8 ipuser[16]);
107 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
108 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
109 
110 static struct iucv_sock_list iucv_sk_list = {
111  .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
112  .autobind_name = ATOMIC_INIT(0)
113 };
114 
115 static struct iucv_handler af_iucv_handler = {
116  .path_pending = iucv_callback_connreq,
117  .path_complete = iucv_callback_connack,
118  .path_severed = iucv_callback_connrej,
119  .message_pending = iucv_callback_rx,
120  .message_complete = iucv_callback_txdone,
121  .path_quiesced = iucv_callback_shutdown,
122 };
123 
124 static inline void high_nmcpy(unsigned char *dst, char *src)
125 {
126  memcpy(dst, src, 8);
127 }
128 
129 static inline void low_nmcpy(unsigned char *dst, char *src)
130 {
131  memcpy(&dst[8], src, 8);
132 }
133 
134 static int afiucv_pm_prepare(struct device *dev)
135 {
136 #ifdef CONFIG_PM_DEBUG
137  printk(KERN_WARNING "afiucv_pm_prepare\n");
138 #endif
139  return 0;
140 }
141 
142 static void afiucv_pm_complete(struct device *dev)
143 {
144 #ifdef CONFIG_PM_DEBUG
145  printk(KERN_WARNING "afiucv_pm_complete\n");
146 #endif
147 }
148 
155 static int afiucv_pm_freeze(struct device *dev)
156 {
157  struct iucv_sock *iucv;
158  struct sock *sk;
159  struct hlist_node *node;
160  int err = 0;
161 
162 #ifdef CONFIG_PM_DEBUG
163  printk(KERN_WARNING "afiucv_pm_freeze\n");
164 #endif
165  read_lock(&iucv_sk_list.lock);
166  sk_for_each(sk, node, &iucv_sk_list.head) {
167  iucv = iucv_sk(sk);
168  switch (sk->sk_state) {
169  case IUCV_DISCONN:
170  case IUCV_CLOSING:
171  case IUCV_CONNECTED:
172  iucv_sever_path(sk, 0);
173  break;
174  case IUCV_OPEN:
175  case IUCV_BOUND:
176  case IUCV_LISTEN:
177  case IUCV_CLOSED:
178  default:
179  break;
180  }
181  skb_queue_purge(&iucv->send_skb_q);
183  }
184  read_unlock(&iucv_sk_list.lock);
185  return err;
186 }
187 
194 static int afiucv_pm_restore_thaw(struct device *dev)
195 {
196  struct sock *sk;
197  struct hlist_node *node;
198 
199 #ifdef CONFIG_PM_DEBUG
200  printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
201 #endif
202  read_lock(&iucv_sk_list.lock);
203  sk_for_each(sk, node, &iucv_sk_list.head) {
204  switch (sk->sk_state) {
205  case IUCV_CONNECTED:
206  sk->sk_err = EPIPE;
207  sk->sk_state = IUCV_DISCONN;
208  sk->sk_state_change(sk);
209  break;
210  case IUCV_DISCONN:
211  case IUCV_CLOSING:
212  case IUCV_LISTEN:
213  case IUCV_BOUND:
214  case IUCV_OPEN:
215  default:
216  break;
217  }
218  }
219  read_unlock(&iucv_sk_list.lock);
220  return 0;
221 }
222 
223 static const struct dev_pm_ops afiucv_pm_ops = {
224  .prepare = afiucv_pm_prepare,
225  .complete = afiucv_pm_complete,
226  .freeze = afiucv_pm_freeze,
227  .thaw = afiucv_pm_restore_thaw,
228  .restore = afiucv_pm_restore_thaw,
229 };
230 
231 static struct device_driver af_iucv_driver = {
232  .owner = THIS_MODULE,
233  .name = "afiucv",
234  .bus = NULL,
235  .pm = &afiucv_pm_ops,
236 };
237 
238 /* dummy device used as trigger for PM functions */
239 static struct device *af_iucv_dev;
240 
261 static inline size_t iucv_msg_length(struct iucv_message *msg)
262 {
263  size_t datalen;
264 
265  if (msg->flags & IUCV_IPRMDATA) {
266  datalen = 0xff - msg->rmmsg[7];
267  return (datalen < 8) ? datalen : 8;
268  }
269  return msg->length;
270 }
271 
280 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
281 {
282  return (sk->sk_state == state || sk->sk_state == state2);
283 }
284 
293 static inline int iucv_below_msglim(struct sock *sk)
294 {
295  struct iucv_sock *iucv = iucv_sk(sk);
296 
297  if (sk->sk_state != IUCV_CONNECTED)
298  return 1;
299  if (iucv->transport == AF_IUCV_TRANS_IUCV)
300  return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
301  else
302  return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
303  (atomic_read(&iucv->pendings) <= 0));
304 }
305 
309 static void iucv_sock_wake_msglim(struct sock *sk)
310 {
311  struct socket_wq *wq;
312 
313  rcu_read_lock();
314  wq = rcu_dereference(sk->sk_wq);
315  if (wq_has_sleeper(wq))
317  sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
318  rcu_read_unlock();
319 }
320 
324 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
325  struct sk_buff *skb, u8 flags)
326 {
327  struct iucv_sock *iucv = iucv_sk(sock);
328  struct af_iucv_trans_hdr *phs_hdr;
329  struct sk_buff *nskb;
330  int err, confirm_recv = 0;
331 
332  memset(skb->head, 0, ETH_HLEN);
333  phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
334  sizeof(struct af_iucv_trans_hdr));
335  skb_reset_mac_header(skb);
336  skb_reset_network_header(skb);
337  skb_push(skb, ETH_HLEN);
338  skb_reset_mac_header(skb);
339  memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
340 
341  phs_hdr->magic = ETH_P_AF_IUCV;
342  phs_hdr->version = 1;
343  phs_hdr->flags = flags;
344  if (flags == AF_IUCV_FLAG_SYN)
345  phs_hdr->window = iucv->msglimit;
346  else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
347  confirm_recv = atomic_read(&iucv->msg_recv);
348  phs_hdr->window = confirm_recv;
349  if (confirm_recv)
350  phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
351  }
352  memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
353  memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
354  memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
355  memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
356  ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
357  ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
358  ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
359  ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
360  if (imsg)
361  memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
362 
363  skb->dev = iucv->hs_dev;
364  if (!skb->dev)
365  return -ENODEV;
366  if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
367  return -ENETDOWN;
368  if (skb->len > skb->dev->mtu) {
369  if (sock->sk_type == SOCK_SEQPACKET)
370  return -EMSGSIZE;
371  else
372  skb_trim(skb, skb->dev->mtu);
373  }
374  skb->protocol = ETH_P_AF_IUCV;
375  nskb = skb_clone(skb, GFP_ATOMIC);
376  if (!nskb)
377  return -ENOMEM;
378  skb_queue_tail(&iucv->send_skb_q, nskb);
379  err = dev_queue_xmit(skb);
380  if (net_xmit_eval(err)) {
381  skb_unlink(nskb, &iucv->send_skb_q);
382  kfree_skb(nskb);
383  } else {
384  atomic_sub(confirm_recv, &iucv->msg_recv);
385  WARN_ON(atomic_read(&iucv->msg_recv) < 0);
386  }
387  return net_xmit_eval(err);
388 }
389 
390 static struct sock *__iucv_get_sock_by_name(char *nm)
391 {
392  struct sock *sk;
393  struct hlist_node *node;
394 
395  sk_for_each(sk, node, &iucv_sk_list.head)
396  if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
397  return sk;
398 
399  return NULL;
400 }
401 
402 static void iucv_sock_destruct(struct sock *sk)
403 {
404  skb_queue_purge(&sk->sk_receive_queue);
405  skb_queue_purge(&sk->sk_error_queue);
406 
407  sk_mem_reclaim(sk);
408 
409  if (!sock_flag(sk, SOCK_DEAD)) {
410  pr_err("Attempt to release alive iucv socket %p\n", sk);
411  return;
412  }
413 
414  WARN_ON(atomic_read(&sk->sk_rmem_alloc));
415  WARN_ON(atomic_read(&sk->sk_wmem_alloc));
416  WARN_ON(sk->sk_wmem_queued);
417  WARN_ON(sk->sk_forward_alloc);
418 }
419 
420 /* Cleanup Listen */
421 static void iucv_sock_cleanup_listen(struct sock *parent)
422 {
423  struct sock *sk;
424 
425  /* Close non-accepted connections */
426  while ((sk = iucv_accept_dequeue(parent, NULL))) {
427  iucv_sock_close(sk);
428  iucv_sock_kill(sk);
429  }
430 
431  parent->sk_state = IUCV_CLOSED;
432 }
433 
434 /* Kill socket (only if zapped and orphaned) */
435 static void iucv_sock_kill(struct sock *sk)
436 {
437  if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
438  return;
439 
440  iucv_sock_unlink(&iucv_sk_list, sk);
441  sock_set_flag(sk, SOCK_DEAD);
442  sock_put(sk);
443 }
444 
445 /* Terminate an IUCV path */
446 static void iucv_sever_path(struct sock *sk, int with_user_data)
447 {
448  unsigned char user_data[16];
449  struct iucv_sock *iucv = iucv_sk(sk);
450  struct iucv_path *path = iucv->path;
451 
452  if (iucv->path) {
453  iucv->path = NULL;
454  if (with_user_data) {
455  low_nmcpy(user_data, iucv->src_name);
456  high_nmcpy(user_data, iucv->dst_name);
457  ASCEBC(user_data, sizeof(user_data));
458  pr_iucv->path_sever(path, user_data);
459  } else
460  pr_iucv->path_sever(path, NULL);
461  iucv_path_free(path);
462  }
463 }
464 
465 /* Send FIN through an IUCV socket for HIPER transport */
466 static int iucv_send_ctrl(struct sock *sk, u8 flags)
467 {
468  int err = 0;
469  int blen;
470  struct sk_buff *skb;
471 
472  blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
473  skb = sock_alloc_send_skb(sk, blen, 1, &err);
474  if (skb) {
475  skb_reserve(skb, blen);
476  err = afiucv_hs_send(NULL, sk, skb, flags);
477  }
478  return err;
479 }
480 
481 /* Close an IUCV socket */
482 static void iucv_sock_close(struct sock *sk)
483 {
484  struct iucv_sock *iucv = iucv_sk(sk);
485  unsigned long timeo;
486  int err = 0;
487 
488  lock_sock(sk);
489 
490  switch (sk->sk_state) {
491  case IUCV_LISTEN:
492  iucv_sock_cleanup_listen(sk);
493  break;
494 
495  case IUCV_CONNECTED:
496  if (iucv->transport == AF_IUCV_TRANS_HIPER) {
497  err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
498  sk->sk_state = IUCV_DISCONN;
499  sk->sk_state_change(sk);
500  }
501  case IUCV_DISCONN: /* fall through */
502  sk->sk_state = IUCV_CLOSING;
503  sk->sk_state_change(sk);
504 
505  if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
506  if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
507  timeo = sk->sk_lingertime;
508  else
509  timeo = IUCV_DISCONN_TIMEOUT;
510  iucv_sock_wait(sk,
511  iucv_sock_in_state(sk, IUCV_CLOSED, 0),
512  timeo);
513  }
514 
515  case IUCV_CLOSING: /* fall through */
516  sk->sk_state = IUCV_CLOSED;
517  sk->sk_state_change(sk);
518 
519  sk->sk_err = ECONNRESET;
520  sk->sk_state_change(sk);
521 
522  skb_queue_purge(&iucv->send_skb_q);
524 
525  default: /* fall through */
526  iucv_sever_path(sk, 1);
527  }
528 
529  if (iucv->hs_dev) {
530  dev_put(iucv->hs_dev);
531  iucv->hs_dev = NULL;
532  sk->sk_bound_dev_if = 0;
533  }
534 
535  /* mark socket for deletion by iucv_sock_kill() */
536  sock_set_flag(sk, SOCK_ZAPPED);
537 
538  release_sock(sk);
539 }
540 
541 static void iucv_sock_init(struct sock *sk, struct sock *parent)
542 {
543  if (parent)
544  sk->sk_type = parent->sk_type;
545 }
546 
547 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
548 {
549  struct sock *sk;
550  struct iucv_sock *iucv;
551 
552  sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
553  if (!sk)
554  return NULL;
555  iucv = iucv_sk(sk);
556 
557  sock_init_data(sock, sk);
558  INIT_LIST_HEAD(&iucv->accept_q);
560  skb_queue_head_init(&iucv->send_skb_q);
561  INIT_LIST_HEAD(&iucv->message_q.list);
562  spin_lock_init(&iucv->message_q.lock);
563  skb_queue_head_init(&iucv->backlog_skb_q);
564  iucv->send_tag = 0;
565  atomic_set(&iucv->pendings, 0);
566  iucv->flags = 0;
567  iucv->msglimit = 0;
568  atomic_set(&iucv->msg_sent, 0);
569  atomic_set(&iucv->msg_recv, 0);
570  iucv->path = NULL;
571  iucv->sk_txnotify = afiucv_hs_callback_txnotify;
572  memset(&iucv->src_user_id , 0, 32);
573  if (pr_iucv)
575  else
577 
578  sk->sk_destruct = iucv_sock_destruct;
580  sk->sk_allocation = GFP_DMA;
581 
582  sock_reset_flag(sk, SOCK_ZAPPED);
583 
584  sk->sk_protocol = proto;
585  sk->sk_state = IUCV_OPEN;
586 
587  iucv_sock_link(&iucv_sk_list, sk);
588  return sk;
589 }
590 
591 /* Create an IUCV socket */
592 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
593  int kern)
594 {
595  struct sock *sk;
596 
597  if (protocol && protocol != PF_IUCV)
598  return -EPROTONOSUPPORT;
599 
600  sock->state = SS_UNCONNECTED;
601 
602  switch (sock->type) {
603  case SOCK_STREAM:
604  sock->ops = &iucv_sock_ops;
605  break;
606  case SOCK_SEQPACKET:
607  /* currently, proto ops can handle both sk types */
608  sock->ops = &iucv_sock_ops;
609  break;
610  default:
611  return -ESOCKTNOSUPPORT;
612  }
613 
614  sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
615  if (!sk)
616  return -ENOMEM;
617 
618  iucv_sock_init(sk, NULL);
619 
620  return 0;
621 }
622 
623 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
624 {
625  write_lock_bh(&l->lock);
626  sk_add_node(sk, &l->head);
627  write_unlock_bh(&l->lock);
628 }
629 
630 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
631 {
632  write_lock_bh(&l->lock);
633  sk_del_node_init(sk);
634  write_unlock_bh(&l->lock);
635 }
636 
637 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
638 {
639  unsigned long flags;
640  struct iucv_sock *par = iucv_sk(parent);
641 
642  sock_hold(sk);
643  spin_lock_irqsave(&par->accept_q_lock, flags);
644  list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
645  spin_unlock_irqrestore(&par->accept_q_lock, flags);
646  iucv_sk(sk)->parent = parent;
647  sk_acceptq_added(parent);
648 }
649 
650 void iucv_accept_unlink(struct sock *sk)
651 {
652  unsigned long flags;
653  struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
654 
655  spin_lock_irqsave(&par->accept_q_lock, flags);
656  list_del_init(&iucv_sk(sk)->accept_q);
657  spin_unlock_irqrestore(&par->accept_q_lock, flags);
658  sk_acceptq_removed(iucv_sk(sk)->parent);
659  iucv_sk(sk)->parent = NULL;
660  sock_put(sk);
661 }
662 
663 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
664 {
665  struct iucv_sock *isk, *n;
666  struct sock *sk;
667 
668  list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
669  sk = (struct sock *) isk;
670  lock_sock(sk);
671 
672  if (sk->sk_state == IUCV_CLOSED) {
673  iucv_accept_unlink(sk);
674  release_sock(sk);
675  continue;
676  }
677 
678  if (sk->sk_state == IUCV_CONNECTED ||
679  sk->sk_state == IUCV_DISCONN ||
680  !newsock) {
681  iucv_accept_unlink(sk);
682  if (newsock)
683  sock_graft(sk, newsock);
684 
685  release_sock(sk);
686  return sk;
687  }
688 
689  release_sock(sk);
690  }
691  return NULL;
692 }
693 
694 /* Bind an unbound socket */
695 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
696  int addr_len)
697 {
698  struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
699  struct sock *sk = sock->sk;
700  struct iucv_sock *iucv;
701  int err = 0;
702  struct net_device *dev;
703  char uid[9];
704 
705  /* Verify the input sockaddr */
706  if (!addr || addr->sa_family != AF_IUCV)
707  return -EINVAL;
708 
709  lock_sock(sk);
710  if (sk->sk_state != IUCV_OPEN) {
711  err = -EBADFD;
712  goto done;
713  }
714 
715  write_lock_bh(&iucv_sk_list.lock);
716 
717  iucv = iucv_sk(sk);
718  if (__iucv_get_sock_by_name(sa->siucv_name)) {
719  err = -EADDRINUSE;
720  goto done_unlock;
721  }
722  if (iucv->path)
723  goto done_unlock;
724 
725  /* Bind the socket */
726  if (pr_iucv)
727  if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
728  goto vm_bind; /* VM IUCV transport */
729 
730  /* try hiper transport */
731  memcpy(uid, sa->siucv_user_id, sizeof(uid));
732  ASCEBC(uid, 8);
733  rcu_read_lock();
735  if (!memcmp(dev->perm_addr, uid, 8)) {
736  memcpy(iucv->src_name, sa->siucv_name, 8);
737  memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
738  sk->sk_bound_dev_if = dev->ifindex;
739  iucv->hs_dev = dev;
740  dev_hold(dev);
741  sk->sk_state = IUCV_BOUND;
743  if (!iucv->msglimit)
745  rcu_read_unlock();
746  goto done_unlock;
747  }
748  }
749  rcu_read_unlock();
750 vm_bind:
751  if (pr_iucv) {
752  /* use local userid for backward compat */
753  memcpy(iucv->src_name, sa->siucv_name, 8);
754  memcpy(iucv->src_user_id, iucv_userid, 8);
755  sk->sk_state = IUCV_BOUND;
757  if (!iucv->msglimit)
759  goto done_unlock;
760  }
761  /* found no dev to bind */
762  err = -ENODEV;
763 done_unlock:
764  /* Release the socket list lock */
765  write_unlock_bh(&iucv_sk_list.lock);
766 done:
767  release_sock(sk);
768  return err;
769 }
770 
771 /* Automatically bind an unbound socket */
772 static int iucv_sock_autobind(struct sock *sk)
773 {
774  struct iucv_sock *iucv = iucv_sk(sk);
775  char name[12];
776  int err = 0;
777 
778  if (unlikely(!pr_iucv))
779  return -EPROTO;
780 
781  memcpy(iucv->src_user_id, iucv_userid, 8);
782 
783  write_lock_bh(&iucv_sk_list.lock);
784 
785  sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
786  while (__iucv_get_sock_by_name(name)) {
787  sprintf(name, "%08x",
788  atomic_inc_return(&iucv_sk_list.autobind_name));
789  }
790 
791  write_unlock_bh(&iucv_sk_list.lock);
792 
793  memcpy(&iucv->src_name, name, 8);
794 
795  if (!iucv->msglimit)
797 
798  return err;
799 }
800 
801 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
802 {
803  struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
804  struct sock *sk = sock->sk;
805  struct iucv_sock *iucv = iucv_sk(sk);
806  unsigned char user_data[16];
807  int err;
808 
809  high_nmcpy(user_data, sa->siucv_name);
810  low_nmcpy(user_data, iucv->src_name);
811  ASCEBC(user_data, sizeof(user_data));
812 
813  /* Create path. */
814  iucv->path = iucv_path_alloc(iucv->msglimit,
816  if (!iucv->path) {
817  err = -ENOMEM;
818  goto done;
819  }
820  err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
821  sa->siucv_user_id, NULL, user_data,
822  sk);
823  if (err) {
824  iucv_path_free(iucv->path);
825  iucv->path = NULL;
826  switch (err) {
827  case 0x0b: /* Target communicator is not logged on */
828  err = -ENETUNREACH;
829  break;
830  case 0x0d: /* Max connections for this guest exceeded */
831  case 0x0e: /* Max connections for target guest exceeded */
832  err = -EAGAIN;
833  break;
834  case 0x0f: /* Missing IUCV authorization */
835  err = -EACCES;
836  break;
837  default:
838  err = -ECONNREFUSED;
839  break;
840  }
841  }
842 done:
843  return err;
844 }
845 
846 /* Connect an unconnected socket */
847 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
848  int alen, int flags)
849 {
850  struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
851  struct sock *sk = sock->sk;
852  struct iucv_sock *iucv = iucv_sk(sk);
853  int err;
854 
855  if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
856  return -EINVAL;
857 
858  if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
859  return -EBADFD;
860 
861  if (sk->sk_state == IUCV_OPEN &&
863  return -EBADFD; /* explicit bind required */
864 
865  if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
866  return -EINVAL;
867 
868  if (sk->sk_state == IUCV_OPEN) {
869  err = iucv_sock_autobind(sk);
870  if (unlikely(err))
871  return err;
872  }
873 
874  lock_sock(sk);
875 
876  /* Set the destination information */
877  memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
878  memcpy(iucv->dst_name, sa->siucv_name, 8);
879 
880  if (iucv->transport == AF_IUCV_TRANS_HIPER)
881  err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
882  else
883  err = afiucv_path_connect(sock, addr);
884  if (err)
885  goto done;
886 
887  if (sk->sk_state != IUCV_CONNECTED)
888  err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
889  IUCV_DISCONN),
890  sock_sndtimeo(sk, flags & O_NONBLOCK));
891 
892  if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
893  err = -ECONNREFUSED;
894 
895  if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
896  iucv_sever_path(sk, 0);
897 
898 done:
899  release_sock(sk);
900  return err;
901 }
902 
903 /* Move a socket into listening state. */
904 static int iucv_sock_listen(struct socket *sock, int backlog)
905 {
906  struct sock *sk = sock->sk;
907  int err;
908 
909  lock_sock(sk);
910 
911  err = -EINVAL;
912  if (sk->sk_state != IUCV_BOUND)
913  goto done;
914 
915  if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
916  goto done;
917 
919  sk->sk_ack_backlog = 0;
920  sk->sk_state = IUCV_LISTEN;
921  err = 0;
922 
923 done:
924  release_sock(sk);
925  return err;
926 }
927 
928 /* Accept a pending connection */
929 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
930  int flags)
931 {
933  struct sock *sk = sock->sk, *nsk;
934  long timeo;
935  int err = 0;
936 
938 
939  if (sk->sk_state != IUCV_LISTEN) {
940  err = -EBADFD;
941  goto done;
942  }
943 
944  timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
945 
946  /* Wait for an incoming connection */
947  add_wait_queue_exclusive(sk_sleep(sk), &wait);
948  while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
950  if (!timeo) {
951  err = -EAGAIN;
952  break;
953  }
954 
955  release_sock(sk);
956  timeo = schedule_timeout(timeo);
958 
959  if (sk->sk_state != IUCV_LISTEN) {
960  err = -EBADFD;
961  break;
962  }
963 
964  if (signal_pending(current)) {
965  err = sock_intr_errno(timeo);
966  break;
967  }
968  }
969 
971  remove_wait_queue(sk_sleep(sk), &wait);
972 
973  if (err)
974  goto done;
975 
976  newsock->state = SS_CONNECTED;
977 
978 done:
979  release_sock(sk);
980  return err;
981 }
982 
983 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
984  int *len, int peer)
985 {
986  struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
987  struct sock *sk = sock->sk;
988  struct iucv_sock *iucv = iucv_sk(sk);
989 
990  addr->sa_family = AF_IUCV;
991  *len = sizeof(struct sockaddr_iucv);
992 
993  if (peer) {
994  memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
995  memcpy(siucv->siucv_name, iucv->dst_name, 8);
996  } else {
997  memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
998  memcpy(siucv->siucv_name, iucv->src_name, 8);
999  }
1000  memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1001  memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1002  memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1003 
1004  return 0;
1005 }
1006 
1020 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1021  struct sk_buff *skb)
1022 {
1023  u8 prmdata[8];
1024 
1025  memcpy(prmdata, (void *) skb->data, skb->len);
1026  prmdata[7] = 0xff - (u8) skb->len;
1027  return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1028  (void *) prmdata, 8);
1029 }
1030 
1031 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1032  struct msghdr *msg, size_t len)
1033 {
1034  struct sock *sk = sock->sk;
1035  struct iucv_sock *iucv = iucv_sk(sk);
1036  struct sk_buff *skb;
1037  struct iucv_message txmsg;
1038  struct cmsghdr *cmsg;
1039  int cmsg_done;
1040  long timeo;
1041  char user_id[9];
1042  char appl_id[9];
1043  int err;
1044  int noblock = msg->msg_flags & MSG_DONTWAIT;
1045 
1046  err = sock_error(sk);
1047  if (err)
1048  return err;
1049 
1050  if (msg->msg_flags & MSG_OOB)
1051  return -EOPNOTSUPP;
1052 
1053  /* SOCK_SEQPACKET: we do not support segmented records */
1054  if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1055  return -EOPNOTSUPP;
1056 
1057  lock_sock(sk);
1058 
1059  if (sk->sk_shutdown & SEND_SHUTDOWN) {
1060  err = -EPIPE;
1061  goto out;
1062  }
1063 
1064  /* Return if the socket is not in connected state */
1065  if (sk->sk_state != IUCV_CONNECTED) {
1066  err = -ENOTCONN;
1067  goto out;
1068  }
1069 
1070  /* initialize defaults */
1071  cmsg_done = 0; /* check for duplicate headers */
1072  txmsg.class = 0;
1073 
1074  /* iterate over control messages */
1075  for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1076  cmsg = CMSG_NXTHDR(msg, cmsg)) {
1077 
1078  if (!CMSG_OK(msg, cmsg)) {
1079  err = -EINVAL;
1080  goto out;
1081  }
1082 
1083  if (cmsg->cmsg_level != SOL_IUCV)
1084  continue;
1085 
1086  if (cmsg->cmsg_type & cmsg_done) {
1087  err = -EINVAL;
1088  goto out;
1089  }
1090  cmsg_done |= cmsg->cmsg_type;
1091 
1092  switch (cmsg->cmsg_type) {
1093  case SCM_IUCV_TRGCLS:
1094  if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1095  err = -EINVAL;
1096  goto out;
1097  }
1098 
1099  /* set iucv message target class */
1100  memcpy(&txmsg.class,
1101  (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1102 
1103  break;
1104 
1105  default:
1106  err = -EINVAL;
1107  goto out;
1108  break;
1109  }
1110  }
1111 
1112  /* allocate one skb for each iucv message:
1113  * this is fine for SOCK_SEQPACKET (unless we want to support
1114  * segmented records using the MSG_EOR flag), but
1115  * for SOCK_STREAM we might want to improve it in future */
1116  if (iucv->transport == AF_IUCV_TRANS_HIPER)
1117  skb = sock_alloc_send_skb(sk,
1118  len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1119  noblock, &err);
1120  else
1121  skb = sock_alloc_send_skb(sk, len, noblock, &err);
1122  if (!skb) {
1123  err = -ENOMEM;
1124  goto out;
1125  }
1126  if (iucv->transport == AF_IUCV_TRANS_HIPER)
1127  skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1128  if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1129  err = -EFAULT;
1130  goto fail;
1131  }
1132 
1133  /* wait if outstanding messages for iucv path has reached */
1134  timeo = sock_sndtimeo(sk, noblock);
1135  err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1136  if (err)
1137  goto fail;
1138 
1139  /* return -ECONNRESET if the socket is no longer connected */
1140  if (sk->sk_state != IUCV_CONNECTED) {
1141  err = -ECONNRESET;
1142  goto fail;
1143  }
1144 
1145  /* increment and save iucv message tag for msg_completion cbk */
1146  txmsg.tag = iucv->send_tag++;
1147  memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1148 
1149  if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1150  atomic_inc(&iucv->msg_sent);
1151  err = afiucv_hs_send(&txmsg, sk, skb, 0);
1152  if (err) {
1153  atomic_dec(&iucv->msg_sent);
1154  goto fail;
1155  }
1156  goto release;
1157  }
1158  skb_queue_tail(&iucv->send_skb_q, skb);
1159 
1160  if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1161  && skb->len <= 7) {
1162  err = iucv_send_iprm(iucv->path, &txmsg, skb);
1163 
1164  /* on success: there is no message_complete callback
1165  * for an IPRMDATA msg; remove skb from send queue */
1166  if (err == 0) {
1167  skb_unlink(skb, &iucv->send_skb_q);
1168  kfree_skb(skb);
1169  }
1170 
1171  /* this error should never happen since the
1172  * IUCV_IPRMDATA path flag is set... sever path */
1173  if (err == 0x15) {
1174  pr_iucv->path_sever(iucv->path, NULL);
1175  skb_unlink(skb, &iucv->send_skb_q);
1176  err = -EPIPE;
1177  goto fail;
1178  }
1179  } else
1180  err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1181  (void *) skb->data, skb->len);
1182  if (err) {
1183  if (err == 3) {
1184  user_id[8] = 0;
1185  memcpy(user_id, iucv->dst_user_id, 8);
1186  appl_id[8] = 0;
1187  memcpy(appl_id, iucv->dst_name, 8);
1188  pr_err("Application %s on z/VM guest %s"
1189  " exceeds message limit\n",
1190  appl_id, user_id);
1191  err = -EAGAIN;
1192  } else
1193  err = -EPIPE;
1194  skb_unlink(skb, &iucv->send_skb_q);
1195  goto fail;
1196  }
1197 
1198 release:
1199  release_sock(sk);
1200  return len;
1201 
1202 fail:
1203  kfree_skb(skb);
1204 out:
1205  release_sock(sk);
1206  return err;
1207 }
1208 
1209 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1210  *
1211  * Locking: must be called with message_q.lock held
1212  */
1213 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1214 {
1215  int dataleft, size, copied = 0;
1216  struct sk_buff *nskb;
1217 
1218  dataleft = len;
1219  while (dataleft) {
1220  if (dataleft >= sk->sk_rcvbuf / 4)
1221  size = sk->sk_rcvbuf / 4;
1222  else
1223  size = dataleft;
1224 
1225  nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1226  if (!nskb)
1227  return -ENOMEM;
1228 
1229  /* copy target class to control buffer of new skb */
1230  memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1231 
1232  /* copy data fragment */
1233  memcpy(nskb->data, skb->data + copied, size);
1234  copied += size;
1235  dataleft -= size;
1236 
1237  skb_reset_transport_header(nskb);
1238  skb_reset_network_header(nskb);
1239  nskb->len = size;
1240 
1241  skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1242  }
1243 
1244  return 0;
1245 }
1246 
1247 /* iucv_process_message() - Receive a single outstanding IUCV message
1248  *
1249  * Locking: must be called with message_q.lock held
1250  */
1251 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1252  struct iucv_path *path,
1253  struct iucv_message *msg)
1254 {
1255  int rc;
1256  unsigned int len;
1257 
1258  len = iucv_msg_length(msg);
1259 
1260  /* store msg target class in the second 4 bytes of skb ctrl buffer */
1261  /* Note: the first 4 bytes are reserved for msg tag */
1262  memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1263 
1264  /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1265  if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1266  if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1267  skb->data = NULL;
1268  skb->len = 0;
1269  }
1270  } else {
1271  rc = pr_iucv->message_receive(path, msg,
1272  msg->flags & IUCV_IPRMDATA,
1273  skb->data, len, NULL);
1274  if (rc) {
1275  kfree_skb(skb);
1276  return;
1277  }
1278  /* we need to fragment iucv messages for SOCK_STREAM only;
1279  * for SOCK_SEQPACKET, it is only relevant if we support
1280  * record segmentation using MSG_EOR (see also recvmsg()) */
1281  if (sk->sk_type == SOCK_STREAM &&
1282  skb->truesize >= sk->sk_rcvbuf / 4) {
1283  rc = iucv_fragment_skb(sk, skb, len);
1284  kfree_skb(skb);
1285  skb = NULL;
1286  if (rc) {
1287  pr_iucv->path_sever(path, NULL);
1288  return;
1289  }
1290  skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1291  } else {
1292  skb_reset_transport_header(skb);
1293  skb_reset_network_header(skb);
1294  skb->len = len;
1295  }
1296  }
1297 
1298  if (sock_queue_rcv_skb(sk, skb))
1299  skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1300 }
1301 
1302 /* iucv_process_message_q() - Process outstanding IUCV messages
1303  *
1304  * Locking: must be called with message_q.lock held
1305  */
1306 static void iucv_process_message_q(struct sock *sk)
1307 {
1308  struct iucv_sock *iucv = iucv_sk(sk);
1309  struct sk_buff *skb;
1310  struct sock_msg_q *p, *n;
1311 
1312  list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1313  skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1314  if (!skb)
1315  break;
1316  iucv_process_message(sk, skb, p->path, &p->msg);
1317  list_del(&p->list);
1318  kfree(p);
1319  if (!skb_queue_empty(&iucv->backlog_skb_q))
1320  break;
1321  }
1322 }
1323 
1324 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1325  struct msghdr *msg, size_t len, int flags)
1326 {
1327  int noblock = flags & MSG_DONTWAIT;
1328  struct sock *sk = sock->sk;
1329  struct iucv_sock *iucv = iucv_sk(sk);
1330  unsigned int copied, rlen;
1331  struct sk_buff *skb, *rskb, *cskb;
1332  int err = 0;
1333 
1334  if ((sk->sk_state == IUCV_DISCONN) &&
1335  skb_queue_empty(&iucv->backlog_skb_q) &&
1336  skb_queue_empty(&sk->sk_receive_queue) &&
1337  list_empty(&iucv->message_q.list))
1338  return 0;
1339 
1340  if (flags & (MSG_OOB))
1341  return -EOPNOTSUPP;
1342 
1343  /* receive/dequeue next skb:
1344  * the function understands MSG_PEEK and, thus, does not dequeue skb */
1345  skb = skb_recv_datagram(sk, flags, noblock, &err);
1346  if (!skb) {
1347  if (sk->sk_shutdown & RCV_SHUTDOWN)
1348  return 0;
1349  return err;
1350  }
1351 
1352  rlen = skb->len; /* real length of skb */
1353  copied = min_t(unsigned int, rlen, len);
1354  if (!rlen)
1355  sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1356 
1357  cskb = skb;
1358  if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1359  if (!(flags & MSG_PEEK))
1360  skb_queue_head(&sk->sk_receive_queue, skb);
1361  return -EFAULT;
1362  }
1363 
1364  /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1365  if (sk->sk_type == SOCK_SEQPACKET) {
1366  if (copied < rlen)
1367  msg->msg_flags |= MSG_TRUNC;
1368  /* each iucv message contains a complete record */
1369  msg->msg_flags |= MSG_EOR;
1370  }
1371 
1372  /* create control message to store iucv msg target class:
1373  * get the trgcls from the control buffer of the skb due to
1374  * fragmentation of original iucv message. */
1375  err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1376  CB_TRGCLS_LEN, CB_TRGCLS(skb));
1377  if (err) {
1378  if (!(flags & MSG_PEEK))
1379  skb_queue_head(&sk->sk_receive_queue, skb);
1380  return err;
1381  }
1382 
1383  /* Mark read part of skb as used */
1384  if (!(flags & MSG_PEEK)) {
1385 
1386  /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1387  if (sk->sk_type == SOCK_STREAM) {
1388  skb_pull(skb, copied);
1389  if (skb->len) {
1390  skb_queue_head(&sk->sk_receive_queue, skb);
1391  goto done;
1392  }
1393  }
1394 
1395  kfree_skb(skb);
1396  if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1397  atomic_inc(&iucv->msg_recv);
1398  if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1399  WARN_ON(1);
1400  iucv_sock_close(sk);
1401  return -EFAULT;
1402  }
1403  }
1404 
1405  /* Queue backlog skbs */
1406  spin_lock_bh(&iucv->message_q.lock);
1407  rskb = skb_dequeue(&iucv->backlog_skb_q);
1408  while (rskb) {
1409  if (sock_queue_rcv_skb(sk, rskb)) {
1411  rskb);
1412  break;
1413  } else {
1414  rskb = skb_dequeue(&iucv->backlog_skb_q);
1415  }
1416  }
1417  if (skb_queue_empty(&iucv->backlog_skb_q)) {
1418  if (!list_empty(&iucv->message_q.list))
1419  iucv_process_message_q(sk);
1420  if (atomic_read(&iucv->msg_recv) >=
1421  iucv->msglimit / 2) {
1422  err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1423  if (err) {
1424  sk->sk_state = IUCV_DISCONN;
1425  sk->sk_state_change(sk);
1426  }
1427  }
1428  }
1429  spin_unlock_bh(&iucv->message_q.lock);
1430  }
1431 
1432 done:
1433  /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1434  if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1435  copied = rlen;
1436 
1437  return copied;
1438 }
1439 
1440 static inline unsigned int iucv_accept_poll(struct sock *parent)
1441 {
1442  struct iucv_sock *isk, *n;
1443  struct sock *sk;
1444 
1445  list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1446  sk = (struct sock *) isk;
1447 
1448  if (sk->sk_state == IUCV_CONNECTED)
1449  return POLLIN | POLLRDNORM;
1450  }
1451 
1452  return 0;
1453 }
1454 
1455 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1456  poll_table *wait)
1457 {
1458  struct sock *sk = sock->sk;
1459  unsigned int mask = 0;
1460 
1461  sock_poll_wait(file, sk_sleep(sk), wait);
1462 
1463  if (sk->sk_state == IUCV_LISTEN)
1464  return iucv_accept_poll(sk);
1465 
1466  if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1467  mask |= POLLERR;
1468 
1469  if (sk->sk_shutdown & RCV_SHUTDOWN)
1470  mask |= POLLRDHUP;
1471 
1472  if (sk->sk_shutdown == SHUTDOWN_MASK)
1473  mask |= POLLHUP;
1474 
1475  if (!skb_queue_empty(&sk->sk_receive_queue) ||
1476  (sk->sk_shutdown & RCV_SHUTDOWN))
1477  mask |= POLLIN | POLLRDNORM;
1478 
1479  if (sk->sk_state == IUCV_CLOSED)
1480  mask |= POLLHUP;
1481 
1482  if (sk->sk_state == IUCV_DISCONN)
1483  mask |= POLLIN;
1484 
1485  if (sock_writeable(sk) && iucv_below_msglim(sk))
1486  mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1487  else
1488  set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1489 
1490  return mask;
1491 }
1492 
1493 static int iucv_sock_shutdown(struct socket *sock, int how)
1494 {
1495  struct sock *sk = sock->sk;
1496  struct iucv_sock *iucv = iucv_sk(sk);
1497  struct iucv_message txmsg;
1498  int err = 0;
1499 
1500  how++;
1501 
1502  if ((how & ~SHUTDOWN_MASK) || !how)
1503  return -EINVAL;
1504 
1505  lock_sock(sk);
1506  switch (sk->sk_state) {
1507  case IUCV_LISTEN:
1508  case IUCV_DISCONN:
1509  case IUCV_CLOSING:
1510  case IUCV_CLOSED:
1511  err = -ENOTCONN;
1512  goto fail;
1513  default:
1514  break;
1515  }
1516 
1517  if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1518  if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1519  txmsg.class = 0;
1520  txmsg.tag = 0;
1521  err = pr_iucv->message_send(iucv->path, &txmsg,
1522  IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1523  if (err) {
1524  switch (err) {
1525  case 1:
1526  err = -ENOTCONN;
1527  break;
1528  case 2:
1529  err = -ECONNRESET;
1530  break;
1531  default:
1532  err = -ENOTCONN;
1533  break;
1534  }
1535  }
1536  } else
1537  iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1538  }
1539 
1540  sk->sk_shutdown |= how;
1541  if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1542  if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1543  err = pr_iucv->path_quiesce(iucv->path, NULL);
1544  if (err)
1545  err = -ENOTCONN;
1546 /* skb_queue_purge(&sk->sk_receive_queue); */
1547  }
1549  }
1550 
1551  /* Wake up anyone sleeping in poll */
1552  sk->sk_state_change(sk);
1553 
1554 fail:
1555  release_sock(sk);
1556  return err;
1557 }
1558 
1559 static int iucv_sock_release(struct socket *sock)
1560 {
1561  struct sock *sk = sock->sk;
1562  int err = 0;
1563 
1564  if (!sk)
1565  return 0;
1566 
1567  iucv_sock_close(sk);
1568 
1569  sock_orphan(sk);
1570  iucv_sock_kill(sk);
1571  return err;
1572 }
1573 
1574 /* getsockopt and setsockopt */
1575 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1576  char __user *optval, unsigned int optlen)
1577 {
1578  struct sock *sk = sock->sk;
1579  struct iucv_sock *iucv = iucv_sk(sk);
1580  int val;
1581  int rc;
1582 
1583  if (level != SOL_IUCV)
1584  return -ENOPROTOOPT;
1585 
1586  if (optlen < sizeof(int))
1587  return -EINVAL;
1588 
1589  if (get_user(val, (int __user *) optval))
1590  return -EFAULT;
1591 
1592  rc = 0;
1593 
1594  lock_sock(sk);
1595  switch (optname) {
1596  case SO_IPRMDATA_MSG:
1597  if (val)
1598  iucv->flags |= IUCV_IPRMDATA;
1599  else
1600  iucv->flags &= ~IUCV_IPRMDATA;
1601  break;
1602  case SO_MSGLIMIT:
1603  switch (sk->sk_state) {
1604  case IUCV_OPEN:
1605  case IUCV_BOUND:
1606  if (val < 1 || val > (u16)(~0))
1607  rc = -EINVAL;
1608  else
1609  iucv->msglimit = val;
1610  break;
1611  default:
1612  rc = -EINVAL;
1613  break;
1614  }
1615  break;
1616  default:
1617  rc = -ENOPROTOOPT;
1618  break;
1619  }
1620  release_sock(sk);
1621 
1622  return rc;
1623 }
1624 
1625 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1626  char __user *optval, int __user *optlen)
1627 {
1628  struct sock *sk = sock->sk;
1629  struct iucv_sock *iucv = iucv_sk(sk);
1630  unsigned int val;
1631  int len;
1632 
1633  if (level != SOL_IUCV)
1634  return -ENOPROTOOPT;
1635 
1636  if (get_user(len, optlen))
1637  return -EFAULT;
1638 
1639  if (len < 0)
1640  return -EINVAL;
1641 
1642  len = min_t(unsigned int, len, sizeof(int));
1643 
1644  switch (optname) {
1645  case SO_IPRMDATA_MSG:
1646  val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1647  break;
1648  case SO_MSGLIMIT:
1649  lock_sock(sk);
1650  val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1651  : iucv->msglimit; /* default */
1652  release_sock(sk);
1653  break;
1654  case SO_MSGSIZE:
1655  if (sk->sk_state == IUCV_OPEN)
1656  return -EBADFD;
1657  val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1658  sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1659  0x7fffffff;
1660  break;
1661  default:
1662  return -ENOPROTOOPT;
1663  }
1664 
1665  if (put_user(len, optlen))
1666  return -EFAULT;
1667  if (copy_to_user(optval, &val, len))
1668  return -EFAULT;
1669 
1670  return 0;
1671 }
1672 
1673 
1674 /* Callback wrappers - called from iucv base support */
1675 static int iucv_callback_connreq(struct iucv_path *path,
1676  u8 ipvmid[8], u8 ipuser[16])
1677 {
1678  unsigned char user_data[16];
1679  unsigned char nuser_data[16];
1680  unsigned char src_name[8];
1681  struct hlist_node *node;
1682  struct sock *sk, *nsk;
1683  struct iucv_sock *iucv, *niucv;
1684  int err;
1685 
1686  memcpy(src_name, ipuser, 8);
1687  EBCASC(src_name, 8);
1688  /* Find out if this path belongs to af_iucv. */
1689  read_lock(&iucv_sk_list.lock);
1690  iucv = NULL;
1691  sk = NULL;
1692  sk_for_each(sk, node, &iucv_sk_list.head)
1693  if (sk->sk_state == IUCV_LISTEN &&
1694  !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1695  /*
1696  * Found a listening socket with
1697  * src_name == ipuser[0-7].
1698  */
1699  iucv = iucv_sk(sk);
1700  break;
1701  }
1702  read_unlock(&iucv_sk_list.lock);
1703  if (!iucv)
1704  /* No socket found, not one of our paths. */
1705  return -EINVAL;
1706 
1707  bh_lock_sock(sk);
1708 
1709  /* Check if parent socket is listening */
1710  low_nmcpy(user_data, iucv->src_name);
1711  high_nmcpy(user_data, iucv->dst_name);
1712  ASCEBC(user_data, sizeof(user_data));
1713  if (sk->sk_state != IUCV_LISTEN) {
1714  err = pr_iucv->path_sever(path, user_data);
1715  iucv_path_free(path);
1716  goto fail;
1717  }
1718 
1719  /* Check for backlog size */
1720  if (sk_acceptq_is_full(sk)) {
1721  err = pr_iucv->path_sever(path, user_data);
1722  iucv_path_free(path);
1723  goto fail;
1724  }
1725 
1726  /* Create the new socket */
1727  nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1728  if (!nsk) {
1729  err = pr_iucv->path_sever(path, user_data);
1730  iucv_path_free(path);
1731  goto fail;
1732  }
1733 
1734  niucv = iucv_sk(nsk);
1735  iucv_sock_init(nsk, sk);
1736 
1737  /* Set the new iucv_sock */
1738  memcpy(niucv->dst_name, ipuser + 8, 8);
1739  EBCASC(niucv->dst_name, 8);
1740  memcpy(niucv->dst_user_id, ipvmid, 8);
1741  memcpy(niucv->src_name, iucv->src_name, 8);
1742  memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1743  niucv->path = path;
1744 
1745  /* Call iucv_accept */
1746  high_nmcpy(nuser_data, ipuser + 8);
1747  memcpy(nuser_data + 8, niucv->src_name, 8);
1748  ASCEBC(nuser_data + 8, 8);
1749 
1750  /* set message limit for path based on msglimit of accepting socket */
1751  niucv->msglimit = iucv->msglimit;
1752  path->msglim = iucv->msglimit;
1753  err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1754  if (err) {
1755  iucv_sever_path(nsk, 1);
1756  iucv_sock_kill(nsk);
1757  goto fail;
1758  }
1759 
1760  iucv_accept_enqueue(sk, nsk);
1761 
1762  /* Wake up accept */
1763  nsk->sk_state = IUCV_CONNECTED;
1764  sk->sk_data_ready(sk, 1);
1765  err = 0;
1766 fail:
1767  bh_unlock_sock(sk);
1768  return 0;
1769 }
1770 
1771 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1772 {
1773  struct sock *sk = path->private;
1774 
1775  sk->sk_state = IUCV_CONNECTED;
1776  sk->sk_state_change(sk);
1777 }
1778 
1779 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1780 {
1781  struct sock *sk = path->private;
1782  struct iucv_sock *iucv = iucv_sk(sk);
1783  struct sk_buff *skb;
1784  struct sock_msg_q *save_msg;
1785  int len;
1786 
1787  if (sk->sk_shutdown & RCV_SHUTDOWN) {
1788  pr_iucv->message_reject(path, msg);
1789  return;
1790  }
1791 
1792  spin_lock(&iucv->message_q.lock);
1793 
1794  if (!list_empty(&iucv->message_q.list) ||
1795  !skb_queue_empty(&iucv->backlog_skb_q))
1796  goto save_message;
1797 
1798  len = atomic_read(&sk->sk_rmem_alloc);
1799  len += SKB_TRUESIZE(iucv_msg_length(msg));
1800  if (len > sk->sk_rcvbuf)
1801  goto save_message;
1802 
1803  skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1804  if (!skb)
1805  goto save_message;
1806 
1807  iucv_process_message(sk, skb, path, msg);
1808  goto out_unlock;
1809 
1810 save_message:
1811  save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1812  if (!save_msg)
1813  goto out_unlock;
1814  save_msg->path = path;
1815  save_msg->msg = *msg;
1816 
1817  list_add_tail(&save_msg->list, &iucv->message_q.list);
1818 
1819 out_unlock:
1820  spin_unlock(&iucv->message_q.lock);
1821 }
1822 
1823 static void iucv_callback_txdone(struct iucv_path *path,
1824  struct iucv_message *msg)
1825 {
1826  struct sock *sk = path->private;
1827  struct sk_buff *this = NULL;
1828  struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1829  struct sk_buff *list_skb = list->next;
1830  unsigned long flags;
1831 
1832  bh_lock_sock(sk);
1833  if (!skb_queue_empty(list)) {
1834  spin_lock_irqsave(&list->lock, flags);
1835 
1836  while (list_skb != (struct sk_buff *)list) {
1837  if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1838  this = list_skb;
1839  break;
1840  }
1841  list_skb = list_skb->next;
1842  }
1843  if (this)
1844  __skb_unlink(this, list);
1845 
1846  spin_unlock_irqrestore(&list->lock, flags);
1847 
1848  if (this) {
1849  kfree_skb(this);
1850  /* wake up any process waiting for sending */
1851  iucv_sock_wake_msglim(sk);
1852  }
1853  }
1854 
1855  if (sk->sk_state == IUCV_CLOSING) {
1856  if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1857  sk->sk_state = IUCV_CLOSED;
1858  sk->sk_state_change(sk);
1859  }
1860  }
1861  bh_unlock_sock(sk);
1862 
1863 }
1864 
1865 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1866 {
1867  struct sock *sk = path->private;
1868 
1869  if (sk->sk_state == IUCV_CLOSED)
1870  return;
1871 
1872  bh_lock_sock(sk);
1873  iucv_sever_path(sk, 1);
1874  sk->sk_state = IUCV_DISCONN;
1875 
1876  sk->sk_state_change(sk);
1877  bh_unlock_sock(sk);
1878 }
1879 
1880 /* called if the other communication side shuts down its RECV direction;
1881  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1882  */
1883 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1884 {
1885  struct sock *sk = path->private;
1886 
1887  bh_lock_sock(sk);
1888  if (sk->sk_state != IUCV_CLOSED) {
1889  sk->sk_shutdown |= SEND_SHUTDOWN;
1890  sk->sk_state_change(sk);
1891  }
1892  bh_unlock_sock(sk);
1893 }
1894 
1895 /***************** HiperSockets transport callbacks ********************/
1896 static void afiucv_swap_src_dest(struct sk_buff *skb)
1897 {
1898  struct af_iucv_trans_hdr *trans_hdr =
1899  (struct af_iucv_trans_hdr *)skb->data;
1900  char tmpID[8];
1901  char tmpName[8];
1902 
1903  ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1904  ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1905  ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1906  ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1907  memcpy(tmpID, trans_hdr->srcUserID, 8);
1908  memcpy(tmpName, trans_hdr->srcAppName, 8);
1909  memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1910  memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1911  memcpy(trans_hdr->destUserID, tmpID, 8);
1912  memcpy(trans_hdr->destAppName, tmpName, 8);
1913  skb_push(skb, ETH_HLEN);
1914  memset(skb->data, 0, ETH_HLEN);
1915 }
1916 
1920 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1921 {
1922  struct sock *nsk;
1923  struct iucv_sock *iucv, *niucv;
1924  struct af_iucv_trans_hdr *trans_hdr;
1925  int err;
1926 
1927  iucv = iucv_sk(sk);
1928  trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1929  if (!iucv) {
1930  /* no sock - connection refused */
1931  afiucv_swap_src_dest(skb);
1932  trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1933  err = dev_queue_xmit(skb);
1934  goto out;
1935  }
1936 
1937  nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1938  bh_lock_sock(sk);
1939  if ((sk->sk_state != IUCV_LISTEN) ||
1940  sk_acceptq_is_full(sk) ||
1941  !nsk) {
1942  /* error on server socket - connection refused */
1943  if (nsk)
1944  sk_free(nsk);
1945  afiucv_swap_src_dest(skb);
1946  trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1947  err = dev_queue_xmit(skb);
1948  bh_unlock_sock(sk);
1949  goto out;
1950  }
1951 
1952  niucv = iucv_sk(nsk);
1953  iucv_sock_init(nsk, sk);
1954  niucv->transport = AF_IUCV_TRANS_HIPER;
1955  niucv->msglimit = iucv->msglimit;
1956  if (!trans_hdr->window)
1958  else
1959  niucv->msglimit_peer = trans_hdr->window;
1960  memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1961  memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1962  memcpy(niucv->src_name, iucv->src_name, 8);
1963  memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1964  nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1965  niucv->hs_dev = iucv->hs_dev;
1966  dev_hold(niucv->hs_dev);
1967  afiucv_swap_src_dest(skb);
1968  trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1969  trans_hdr->window = niucv->msglimit;
1970  /* if receiver acks the xmit connection is established */
1971  err = dev_queue_xmit(skb);
1972  if (!err) {
1973  iucv_accept_enqueue(sk, nsk);
1974  nsk->sk_state = IUCV_CONNECTED;
1975  sk->sk_data_ready(sk, 1);
1976  } else
1977  iucv_sock_kill(nsk);
1978  bh_unlock_sock(sk);
1979 
1980 out:
1981  return NET_RX_SUCCESS;
1982 }
1983 
1987 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1988 {
1989  struct iucv_sock *iucv = iucv_sk(sk);
1990  struct af_iucv_trans_hdr *trans_hdr =
1991  (struct af_iucv_trans_hdr *)skb->data;
1992 
1993  if (!iucv)
1994  goto out;
1995  if (sk->sk_state != IUCV_BOUND)
1996  goto out;
1997  bh_lock_sock(sk);
1998  iucv->msglimit_peer = trans_hdr->window;
1999  sk->sk_state = IUCV_CONNECTED;
2000  sk->sk_state_change(sk);
2001  bh_unlock_sock(sk);
2002 out:
2003  kfree_skb(skb);
2004  return NET_RX_SUCCESS;
2005 }
2006 
2010 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2011 {
2012  struct iucv_sock *iucv = iucv_sk(sk);
2013 
2014  if (!iucv)
2015  goto out;
2016  if (sk->sk_state != IUCV_BOUND)
2017  goto out;
2018  bh_lock_sock(sk);
2019  sk->sk_state = IUCV_DISCONN;
2020  sk->sk_state_change(sk);
2021  bh_unlock_sock(sk);
2022 out:
2023  kfree_skb(skb);
2024  return NET_RX_SUCCESS;
2025 }
2026 
2030 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2031 {
2032  struct iucv_sock *iucv = iucv_sk(sk);
2033 
2034  /* other end of connection closed */
2035  if (!iucv)
2036  goto out;
2037  bh_lock_sock(sk);
2038  if (sk->sk_state == IUCV_CONNECTED) {
2039  sk->sk_state = IUCV_DISCONN;
2040  sk->sk_state_change(sk);
2041  }
2042  bh_unlock_sock(sk);
2043 out:
2044  kfree_skb(skb);
2045  return NET_RX_SUCCESS;
2046 }
2047 
2051 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2052 {
2053  struct iucv_sock *iucv = iucv_sk(sk);
2054  struct af_iucv_trans_hdr *trans_hdr =
2055  (struct af_iucv_trans_hdr *)skb->data;
2056 
2057  if (!iucv)
2058  return NET_RX_SUCCESS;
2059 
2060  if (sk->sk_state != IUCV_CONNECTED)
2061  return NET_RX_SUCCESS;
2062 
2063  atomic_sub(trans_hdr->window, &iucv->msg_sent);
2064  iucv_sock_wake_msglim(sk);
2065  return NET_RX_SUCCESS;
2066 }
2067 
2071 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2072 {
2073  struct iucv_sock *iucv = iucv_sk(sk);
2074 
2075  if (!iucv) {
2076  kfree_skb(skb);
2077  return NET_RX_SUCCESS;
2078  }
2079 
2080  if (sk->sk_state != IUCV_CONNECTED) {
2081  kfree_skb(skb);
2082  return NET_RX_SUCCESS;
2083  }
2084 
2085  if (sk->sk_shutdown & RCV_SHUTDOWN) {
2086  kfree_skb(skb);
2087  return NET_RX_SUCCESS;
2088  }
2089 
2090  /* write stuff from iucv_msg to skb cb */
2091  if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2092  kfree_skb(skb);
2093  return NET_RX_SUCCESS;
2094  }
2095  skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2096  skb_reset_transport_header(skb);
2097  skb_reset_network_header(skb);
2098  spin_lock(&iucv->message_q.lock);
2099  if (skb_queue_empty(&iucv->backlog_skb_q)) {
2100  if (sock_queue_rcv_skb(sk, skb)) {
2101  /* handle rcv queue full */
2102  skb_queue_tail(&iucv->backlog_skb_q, skb);
2103  }
2104  } else
2105  skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2106  spin_unlock(&iucv->message_q.lock);
2107  return NET_RX_SUCCESS;
2108 }
2109 
2115 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2116  struct packet_type *pt, struct net_device *orig_dev)
2117 {
2118  struct hlist_node *node;
2119  struct sock *sk;
2120  struct iucv_sock *iucv;
2121  struct af_iucv_trans_hdr *trans_hdr;
2122  char nullstring[8];
2123  int err = 0;
2124 
2125  skb_pull(skb, ETH_HLEN);
2126  trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2127  EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2128  EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2129  EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2130  EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2131  memset(nullstring, 0, sizeof(nullstring));
2132  iucv = NULL;
2133  sk = NULL;
2134  read_lock(&iucv_sk_list.lock);
2135  sk_for_each(sk, node, &iucv_sk_list.head) {
2136  if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2137  if ((!memcmp(&iucv_sk(sk)->src_name,
2138  trans_hdr->destAppName, 8)) &&
2139  (!memcmp(&iucv_sk(sk)->src_user_id,
2140  trans_hdr->destUserID, 8)) &&
2141  (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2142  (!memcmp(&iucv_sk(sk)->dst_user_id,
2143  nullstring, 8))) {
2144  iucv = iucv_sk(sk);
2145  break;
2146  }
2147  } else {
2148  if ((!memcmp(&iucv_sk(sk)->src_name,
2149  trans_hdr->destAppName, 8)) &&
2150  (!memcmp(&iucv_sk(sk)->src_user_id,
2151  trans_hdr->destUserID, 8)) &&
2152  (!memcmp(&iucv_sk(sk)->dst_name,
2153  trans_hdr->srcAppName, 8)) &&
2154  (!memcmp(&iucv_sk(sk)->dst_user_id,
2155  trans_hdr->srcUserID, 8))) {
2156  iucv = iucv_sk(sk);
2157  break;
2158  }
2159  }
2160  }
2161  read_unlock(&iucv_sk_list.lock);
2162  if (!iucv)
2163  sk = NULL;
2164 
2165  /* no sock
2166  how should we send with no sock
2167  1) send without sock no send rc checking?
2168  2) introduce default sock to handle this cases
2169 
2170  SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2171  data -> send FIN
2172  SYN|ACK, SYN|FIN, FIN -> no action? */
2173 
2174  switch (trans_hdr->flags) {
2175  case AF_IUCV_FLAG_SYN:
2176  /* connect request */
2177  err = afiucv_hs_callback_syn(sk, skb);
2178  break;
2180  /* connect request confirmed */
2181  err = afiucv_hs_callback_synack(sk, skb);
2182  break;
2184  /* connect request refused */
2185  err = afiucv_hs_callback_synfin(sk, skb);
2186  break;
2187  case (AF_IUCV_FLAG_FIN):
2188  /* close request */
2189  err = afiucv_hs_callback_fin(sk, skb);
2190  break;
2191  case (AF_IUCV_FLAG_WIN):
2192  err = afiucv_hs_callback_win(sk, skb);
2193  if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2194  kfree_skb(skb);
2195  break;
2196  }
2197  /* fall through and receive non-zero length data */
2198  case (AF_IUCV_FLAG_SHT):
2199  /* shutdown request */
2200  /* fall through and receive zero length data */
2201  case 0:
2202  /* plain data frame */
2203  memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2204  CB_TRGCLS_LEN);
2205  err = afiucv_hs_callback_rx(sk, skb);
2206  break;
2207  default:
2208  ;
2209  }
2210 
2211  return err;
2212 }
2213 
2218 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2219  enum iucv_tx_notify n)
2220 {
2221  struct sock *isk = skb->sk;
2222  struct sock *sk = NULL;
2223  struct iucv_sock *iucv = NULL;
2224  struct sk_buff_head *list;
2225  struct sk_buff *list_skb;
2226  struct sk_buff *nskb;
2227  unsigned long flags;
2228  struct hlist_node *node;
2229 
2230  read_lock_irqsave(&iucv_sk_list.lock, flags);
2231  sk_for_each(sk, node, &iucv_sk_list.head)
2232  if (sk == isk) {
2233  iucv = iucv_sk(sk);
2234  break;
2235  }
2236  read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2237 
2238  if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2239  return;
2240 
2241  list = &iucv->send_skb_q;
2242  spin_lock_irqsave(&list->lock, flags);
2243  if (skb_queue_empty(list))
2244  goto out_unlock;
2245  list_skb = list->next;
2246  nskb = list_skb->next;
2247  while (list_skb != (struct sk_buff *)list) {
2248  if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2249  switch (n) {
2250  case TX_NOTIFY_OK:
2251  __skb_unlink(list_skb, list);
2252  kfree_skb(list_skb);
2253  iucv_sock_wake_msglim(sk);
2254  break;
2255  case TX_NOTIFY_PENDING:
2256  atomic_inc(&iucv->pendings);
2257  break;
2258  case TX_NOTIFY_DELAYED_OK:
2259  __skb_unlink(list_skb, list);
2260  atomic_dec(&iucv->pendings);
2261  if (atomic_read(&iucv->pendings) <= 0)
2262  iucv_sock_wake_msglim(sk);
2263  kfree_skb(list_skb);
2264  break;
2265  case TX_NOTIFY_UNREACHABLE:
2267  case TX_NOTIFY_TPQFULL: /* not yet used */
2270  __skb_unlink(list_skb, list);
2271  kfree_skb(list_skb);
2272  if (sk->sk_state == IUCV_CONNECTED) {
2273  sk->sk_state = IUCV_DISCONN;
2274  sk->sk_state_change(sk);
2275  }
2276  break;
2277  }
2278  break;
2279  }
2280  list_skb = nskb;
2281  nskb = nskb->next;
2282  }
2283 out_unlock:
2284  spin_unlock_irqrestore(&list->lock, flags);
2285 
2286  if (sk->sk_state == IUCV_CLOSING) {
2287  if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2288  sk->sk_state = IUCV_CLOSED;
2289  sk->sk_state_change(sk);
2290  }
2291  }
2292 
2293 }
2294 
2295 /*
2296  * afiucv_netdev_event: handle netdev notifier chain events
2297  */
2298 static int afiucv_netdev_event(struct notifier_block *this,
2299  unsigned long event, void *ptr)
2300 {
2301  struct net_device *event_dev = (struct net_device *)ptr;
2302  struct hlist_node *node;
2303  struct sock *sk;
2304  struct iucv_sock *iucv;
2305 
2306  switch (event) {
2307  case NETDEV_REBOOT:
2308  case NETDEV_GOING_DOWN:
2309  sk_for_each(sk, node, &iucv_sk_list.head) {
2310  iucv = iucv_sk(sk);
2311  if ((iucv->hs_dev == event_dev) &&
2312  (sk->sk_state == IUCV_CONNECTED)) {
2313  if (event == NETDEV_GOING_DOWN)
2314  iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2315  sk->sk_state = IUCV_DISCONN;
2316  sk->sk_state_change(sk);
2317  }
2318  }
2319  break;
2320  case NETDEV_DOWN:
2321  case NETDEV_UNREGISTER:
2322  default:
2323  break;
2324  }
2325  return NOTIFY_DONE;
2326 }
2327 
2328 static struct notifier_block afiucv_netdev_notifier = {
2329  .notifier_call = afiucv_netdev_event,
2330 };
2331 
2332 static const struct proto_ops iucv_sock_ops = {
2333  .family = PF_IUCV,
2334  .owner = THIS_MODULE,
2335  .release = iucv_sock_release,
2336  .bind = iucv_sock_bind,
2337  .connect = iucv_sock_connect,
2338  .listen = iucv_sock_listen,
2339  .accept = iucv_sock_accept,
2340  .getname = iucv_sock_getname,
2341  .sendmsg = iucv_sock_sendmsg,
2342  .recvmsg = iucv_sock_recvmsg,
2343  .poll = iucv_sock_poll,
2344  .ioctl = sock_no_ioctl,
2345  .mmap = sock_no_mmap,
2346  .socketpair = sock_no_socketpair,
2347  .shutdown = iucv_sock_shutdown,
2348  .setsockopt = iucv_sock_setsockopt,
2349  .getsockopt = iucv_sock_getsockopt,
2350 };
2351 
2352 static const struct net_proto_family iucv_sock_family_ops = {
2353  .family = AF_IUCV,
2354  .owner = THIS_MODULE,
2355  .create = iucv_sock_create,
2356 };
2357 
2358 static struct packet_type iucv_packet_type = {
2359  .type = cpu_to_be16(ETH_P_AF_IUCV),
2360  .func = afiucv_hs_rcv,
2361 };
2362 
2363 static int afiucv_iucv_init(void)
2364 {
2365  int err;
2366 
2367  err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2368  if (err)
2369  goto out;
2370  /* establish dummy device */
2371  af_iucv_driver.bus = pr_iucv->bus;
2372  err = driver_register(&af_iucv_driver);
2373  if (err)
2374  goto out_iucv;
2375  af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2376  if (!af_iucv_dev) {
2377  err = -ENOMEM;
2378  goto out_driver;
2379  }
2380  dev_set_name(af_iucv_dev, "af_iucv");
2381  af_iucv_dev->bus = pr_iucv->bus;
2382  af_iucv_dev->parent = pr_iucv->root;
2383  af_iucv_dev->release = (void (*)(struct device *))kfree;
2384  af_iucv_dev->driver = &af_iucv_driver;
2385  err = device_register(af_iucv_dev);
2386  if (err)
2387  goto out_driver;
2388  return 0;
2389 
2390 out_driver:
2391  driver_unregister(&af_iucv_driver);
2392 out_iucv:
2393  pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2394 out:
2395  return err;
2396 }
2397 
2398 static int __init afiucv_init(void)
2399 {
2400  int err;
2401 
2402  if (MACHINE_IS_VM) {
2403  cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2404  if (unlikely(err)) {
2405  WARN_ON(err);
2406  err = -EPROTONOSUPPORT;
2407  goto out;
2408  }
2409 
2410  pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2411  if (!pr_iucv) {
2412  printk(KERN_WARNING "iucv_if lookup failed\n");
2413  memset(&iucv_userid, 0, sizeof(iucv_userid));
2414  }
2415  } else {
2416  memset(&iucv_userid, 0, sizeof(iucv_userid));
2417  pr_iucv = NULL;
2418  }
2419 
2420  err = proto_register(&iucv_proto, 0);
2421  if (err)
2422  goto out;
2423  err = sock_register(&iucv_sock_family_ops);
2424  if (err)
2425  goto out_proto;
2426 
2427  if (pr_iucv) {
2428  err = afiucv_iucv_init();
2429  if (err)
2430  goto out_sock;
2431  } else
2432  register_netdevice_notifier(&afiucv_netdev_notifier);
2433  dev_add_pack(&iucv_packet_type);
2434  return 0;
2435 
2436 out_sock:
2438 out_proto:
2439  proto_unregister(&iucv_proto);
2440 out:
2441  if (pr_iucv)
2442  symbol_put(iucv_if);
2443  return err;
2444 }
2445 
2446 static void __exit afiucv_exit(void)
2447 {
2448  if (pr_iucv) {
2449  device_unregister(af_iucv_dev);
2450  driver_unregister(&af_iucv_driver);
2451  pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2452  symbol_put(iucv_if);
2453  } else
2454  unregister_netdevice_notifier(&afiucv_netdev_notifier);
2455  dev_remove_pack(&iucv_packet_type);
2457  proto_unregister(&iucv_proto);
2458 }
2459 
2460 module_init(afiucv_init);
2461 module_exit(afiucv_exit);
2462 
2463 MODULE_AUTHOR("Jennifer Hunt <[email protected]>");
2464 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2466 MODULE_LICENSE("GPL");
2468