Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hvc_iucv.c
Go to the documentation of this file.
1 /*
2  * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
3  *
4  * This HVC device driver provides terminal access using
5  * z/VM IUCV communication paths.
6  *
7  * Copyright IBM Corp. 2008, 2009
8  *
9  * Author(s): Hendrik Brueckner <[email protected]>
10  */
11 #define KMSG_COMPONENT "hvc_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <asm/ebcdic.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/mempool.h>
22 #include <linux/moduleparam.h>
23 #include <linux/tty.h>
24 #include <linux/wait.h>
25 #include <net/iucv/iucv.h>
26 
27 #include "hvc_console.h"
28 
29 
30 /* General device driver settings */
31 #define HVC_IUCV_MAGIC 0xc9e4c3e5
32 #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
33 #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34 
35 /* IUCV TTY message */
36 #define MSG_VERSION 0x02 /* Message version */
37 #define MSG_TYPE_ERROR 0x01 /* Error message */
38 #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
39 #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
40 #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
41 #define MSG_TYPE_DATA 0x10 /* Terminal data */
42 
43 struct iucv_tty_msg {
44  u8 version; /* Message version */
45  u8 type; /* Message type */
46 #define MSG_MAX_DATALEN ((u16)(~0))
47  u16 datalen; /* Payload length */
48  u8 data[]; /* Payload buffer */
49 } __attribute__((packed));
50 #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
51 
56 };
57 
61 };
62 
64  struct hvc_struct *hvc; /* HVC struct reference */
65  u8 srv_name[8]; /* IUCV service name (ebcdic) */
66  unsigned char is_console; /* Linux console usage flag */
67  enum iucv_state_t iucv_state; /* IUCV connection status */
68  enum tty_state_t tty_state; /* TTY status */
69  struct iucv_path *path; /* IUCV path pointer */
70  spinlock_t lock; /* hvc_iucv_private lock */
71 #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
72  void *sndbuf; /* send buffer */
73  size_t sndbuf_len; /* length of send buffer */
74 #define QUEUE_SNDBUF_DELAY (HZ / 25)
75  struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
76  wait_queue_head_t sndbuf_waitq; /* wait for send completion */
77  struct list_head tty_outqueue; /* outgoing IUCV messages */
78  struct list_head tty_inqueue; /* incoming IUCV messages */
79  struct device *dev; /* device structure */
80 };
81 
83  struct list_head list; /* list pointer */
84  struct iucv_message msg; /* store an IUCV message */
85  size_t offset; /* data buffer offset */
86  struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
87 };
88 
89 /* IUCV callback handler */
90 static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
91 static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
92 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
93 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
94 
95 
96 /* Kernel module parameter: use one terminal device as default */
97 static unsigned long hvc_iucv_devices = 1;
98 
99 /* Array of allocated hvc iucv tty lines... */
100 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
101 #define IUCV_HVC_CON_IDX (0)
102 /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
103 #define MAX_VMID_FILTER (500)
104 static size_t hvc_iucv_filter_size;
105 static void *hvc_iucv_filter;
106 static const char *hvc_iucv_filter_string;
107 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
108 
109 /* Kmem cache and mempool for iucv_tty_buffer elements */
110 static struct kmem_cache *hvc_iucv_buffer_cache;
111 static mempool_t *hvc_iucv_mempool;
112 
113 /* IUCV handler callback functions */
114 static struct iucv_handler hvc_iucv_handler = {
115  .path_pending = hvc_iucv_path_pending,
116  .path_severed = hvc_iucv_path_severed,
117  .message_complete = hvc_iucv_msg_complete,
118  .message_pending = hvc_iucv_msg_pending,
119 };
120 
121 
130 {
131  if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
132  return NULL;
133  return hvc_iucv_table[num - HVC_IUCV_MAGIC];
134 }
135 
149 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
150 {
151  struct iucv_tty_buffer *bufp;
152 
153  bufp = mempool_alloc(hvc_iucv_mempool, flags);
154  if (!bufp)
155  return NULL;
156  memset(bufp, 0, sizeof(*bufp));
157 
158  if (size > 0) {
159  bufp->msg.length = MSG_SIZE(size);
160  bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
161  if (!bufp->mbuf) {
162  mempool_free(bufp, hvc_iucv_mempool);
163  return NULL;
164  }
165  bufp->mbuf->version = MSG_VERSION;
166  bufp->mbuf->type = MSG_TYPE_DATA;
167  bufp->mbuf->datalen = (u16) size;
168  }
169  return bufp;
170 }
171 
176 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
177 {
178  kfree(bufp->mbuf);
179  mempool_free(bufp, hvc_iucv_mempool);
180 }
181 
186 static void destroy_tty_buffer_list(struct list_head *list)
187 {
188  struct iucv_tty_buffer *ent, *next;
189 
190  list_for_each_entry_safe(ent, next, list, list) {
191  list_del(&ent->list);
192  destroy_tty_buffer(ent);
193  }
194 }
195 
216 static int hvc_iucv_write(struct hvc_iucv_private *priv,
217  char *buf, int count, int *has_more_data)
218 {
219  struct iucv_tty_buffer *rb;
220  int written;
221  int rc;
222 
223  /* immediately return if there is no IUCV connection */
224  if (priv->iucv_state == IUCV_DISCONN)
225  return 0;
226 
227  /* if the IUCV path has been severed, return -EPIPE to inform the
228  * HVC layer to hang up the tty device. */
229  if (priv->iucv_state == IUCV_SEVERED)
230  return -EPIPE;
231 
232  /* check if there are pending messages */
233  if (list_empty(&priv->tty_inqueue))
234  return 0;
235 
236  /* receive an iucv message and flip data to the tty (ldisc) */
237  rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
238 
239  written = 0;
240  if (!rb->mbuf) { /* message not yet received ... */
241  /* allocate mem to store msg data; if no memory is available
242  * then leave the buffer on the list and re-try later */
243  rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
244  if (!rb->mbuf)
245  return -ENOMEM;
246 
247  rc = __iucv_message_receive(priv->path, &rb->msg, 0,
248  rb->mbuf, rb->msg.length, NULL);
249  switch (rc) {
250  case 0: /* Successful */
251  break;
252  case 2: /* No message found */
253  case 9: /* Message purged */
254  break;
255  default:
256  written = -EIO;
257  }
258  /* remove buffer if an error has occurred or received data
259  * is not correct */
260  if (rc || (rb->mbuf->version != MSG_VERSION) ||
261  (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
262  goto out_remove_buffer;
263  }
264 
265  switch (rb->mbuf->type) {
266  case MSG_TYPE_DATA:
267  written = min_t(int, rb->mbuf->datalen - rb->offset, count);
268  memcpy(buf, rb->mbuf->data + rb->offset, written);
269  if (written < (rb->mbuf->datalen - rb->offset)) {
270  rb->offset += written;
271  *has_more_data = 1;
272  goto out_written;
273  }
274  break;
275 
276  case MSG_TYPE_WINSIZE:
277  if (rb->mbuf->datalen != sizeof(struct winsize))
278  break;
279  /* The caller must ensure that the hvc is locked, which
280  * is the case when called from hvc_iucv_get_chars() */
281  __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
282  break;
283 
284  case MSG_TYPE_ERROR: /* ignored ... */
285  case MSG_TYPE_TERMENV: /* ignored ... */
286  case MSG_TYPE_TERMIOS: /* ignored ... */
287  break;
288  }
289 
290 out_remove_buffer:
291  list_del(&rb->list);
292  destroy_tty_buffer(rb);
293  *has_more_data = !list_empty(&priv->tty_inqueue);
294 
295 out_written:
296  return written;
297 }
298 
313 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
314 {
315  struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
316  int written;
317  int has_more_data;
318 
319  if (count <= 0)
320  return 0;
321 
322  if (!priv)
323  return -ENODEV;
324 
325  spin_lock(&priv->lock);
326  has_more_data = 0;
327  written = hvc_iucv_write(priv, buf, count, &has_more_data);
328  spin_unlock(&priv->lock);
329 
330  /* if there are still messages on the queue... schedule another run */
331  if (has_more_data)
332  hvc_kick();
333 
334  return written;
335 }
336 
353 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
354  int count)
355 {
356  size_t len;
357 
358  if (priv->iucv_state == IUCV_DISCONN)
359  return count; /* ignore data */
360 
361  if (priv->iucv_state == IUCV_SEVERED)
362  return -EPIPE;
363 
364  len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
365  if (!len)
366  return 0;
367 
368  memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
369  priv->sndbuf_len += len;
370 
371  if (priv->iucv_state == IUCV_CONNECTED)
373 
374  return len;
375 }
376 
386 static int hvc_iucv_send(struct hvc_iucv_private *priv)
387 {
388  struct iucv_tty_buffer *sb;
389  int rc, len;
390 
391  if (priv->iucv_state == IUCV_SEVERED)
392  return -EPIPE;
393 
394  if (priv->iucv_state == IUCV_DISCONN)
395  return -EIO;
396 
397  if (!priv->sndbuf_len)
398  return 0;
399 
400  /* allocate internal buffer to store msg data and also compute total
401  * message length */
402  sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
403  if (!sb)
404  return -ENOMEM;
405 
406  memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
407  sb->mbuf->datalen = (u16) priv->sndbuf_len;
408  sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
409 
410  list_add_tail(&sb->list, &priv->tty_outqueue);
411 
412  rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
413  (void *) sb->mbuf, sb->msg.length);
414  if (rc) {
415  /* drop the message here; however we might want to handle
416  * 0x03 (msg limit reached) by trying again... */
417  list_del(&sb->list);
418  destroy_tty_buffer(sb);
419  }
420  len = priv->sndbuf_len;
421  priv->sndbuf_len = 0;
422 
423  return len;
424 }
425 
433 static void hvc_iucv_sndbuf_work(struct work_struct *work)
434 {
435  struct hvc_iucv_private *priv;
436 
437  priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
438  if (!priv)
439  return;
440 
441  spin_lock_bh(&priv->lock);
442  hvc_iucv_send(priv);
443  spin_unlock_bh(&priv->lock);
444 }
445 
458 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
459 {
460  struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
461  int queued;
462 
463  if (count <= 0)
464  return 0;
465 
466  if (!priv)
467  return -ENODEV;
468 
469  spin_lock(&priv->lock);
470  queued = hvc_iucv_queue(priv, buf, count);
471  spin_unlock(&priv->lock);
472 
473  return queued;
474 }
475 
487 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
488 {
489  struct hvc_iucv_private *priv;
490 
491  priv = hvc_iucv_get_private(id);
492  if (!priv)
493  return 0;
494 
495  spin_lock_bh(&priv->lock);
496  priv->tty_state = TTY_OPENED;
497  spin_unlock_bh(&priv->lock);
498 
499  return 0;
500 }
501 
506 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
507 {
508  destroy_tty_buffer_list(&priv->tty_outqueue);
509  destroy_tty_buffer_list(&priv->tty_inqueue);
510 
511  priv->tty_state = TTY_CLOSED;
512  priv->iucv_state = IUCV_DISCONN;
513 
514  priv->sndbuf_len = 0;
515 }
516 
521 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
522 {
523  int rc;
524 
525  spin_lock_bh(&priv->lock);
526  rc = list_empty(&priv->tty_outqueue);
527  spin_unlock_bh(&priv->lock);
528 
529  return rc;
530 }
531 
539 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
540 {
541  int sync_wait;
542 
544 
545  spin_lock_bh(&priv->lock);
546  hvc_iucv_send(priv); /* force sending buffered data */
547  sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
548  spin_unlock_bh(&priv->lock);
549 
550  if (sync_wait)
552  tty_outqueue_empty(priv), HZ/10);
553 }
554 
587 static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
588 {
589  struct iucv_path *path;
590 
591  path = NULL;
592  spin_lock(&priv->lock);
593  if (priv->iucv_state == IUCV_CONNECTED) {
594  path = priv->path;
595  priv->path = NULL;
596  priv->iucv_state = IUCV_SEVERED;
597  if (priv->tty_state == TTY_CLOSED)
598  hvc_iucv_cleanup(priv);
599  else
600  /* console is special (see above) */
601  if (priv->is_console) {
602  hvc_iucv_cleanup(priv);
603  priv->tty_state = TTY_OPENED;
604  } else
605  hvc_kick();
606  }
607  spin_unlock(&priv->lock);
608 
609  /* finally sever path (outside of priv->lock due to lock ordering) */
610  if (path) {
611  iucv_path_sever(path, NULL);
612  iucv_path_free(path);
613  }
614 }
615 
633 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
634 {
635  struct hvc_iucv_private *priv;
636 
637  priv = hvc_iucv_get_private(id);
638  if (!priv)
639  return;
640 
641  flush_sndbuf_sync(priv);
642 
643  spin_lock_bh(&priv->lock);
644  /* NOTE: If the hangup was scheduled by ourself (from the iucv
645  * path_servered callback [IUCV_SEVERED]), we have to clean up
646  * our structure and to set state to TTY_CLOSED.
647  * If the tty was hung up otherwise (e.g. vhangup()), then we
648  * ignore this hangup and keep an established IUCV path open...
649  * (...the reason is that we are not able to connect back to the
650  * client if we disconnect on hang up) */
651  priv->tty_state = TTY_CLOSED;
652 
653  if (priv->iucv_state == IUCV_SEVERED)
654  hvc_iucv_cleanup(priv);
655  spin_unlock_bh(&priv->lock);
656 }
657 
670 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
671 {
672  struct hvc_iucv_private *priv;
673  struct iucv_path *path;
674 
675  priv = hvc_iucv_get_private(id);
676  if (!priv)
677  return;
678 
679  flush_sndbuf_sync(priv);
680 
681  spin_lock_bh(&priv->lock);
682  path = priv->path; /* save reference to IUCV path */
683  priv->path = NULL;
684  hvc_iucv_cleanup(priv);
685  spin_unlock_bh(&priv->lock);
686 
687  /* sever IUCV path outside of priv->lock due to lock ordering of:
688  * priv->lock <--> iucv_table_lock */
689  if (path) {
690  iucv_path_sever(path, NULL);
691  iucv_path_free(path);
692  }
693 }
694 
702 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
703 {
704  size_t i;
705 
706  /* Note: default policy is ACCEPT if no filter is set */
707  if (!hvc_iucv_filter_size)
708  return 0;
709 
710  for (i = 0; i < hvc_iucv_filter_size; i++)
711  if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
712  return 0;
713  return 1;
714 }
715 
735 static int hvc_iucv_path_pending(struct iucv_path *path,
736  u8 ipvmid[8], u8 ipuser[16])
737 {
738  struct hvc_iucv_private *priv;
739  u8 nuser_data[16];
740  u8 vm_user_id[9];
741  int i, rc;
742 
743  priv = NULL;
744  for (i = 0; i < hvc_iucv_devices; i++)
745  if (hvc_iucv_table[i] &&
746  (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
747  priv = hvc_iucv_table[i];
748  break;
749  }
750  if (!priv)
751  return -ENODEV;
752 
753  /* Enforce that ipvmid is allowed to connect to us */
754  read_lock(&hvc_iucv_filter_lock);
755  rc = hvc_iucv_filter_connreq(ipvmid);
756  read_unlock(&hvc_iucv_filter_lock);
757  if (rc) {
758  iucv_path_sever(path, ipuser);
759  iucv_path_free(path);
760  memcpy(vm_user_id, ipvmid, 8);
761  vm_user_id[8] = 0;
762  pr_info("A connection request from z/VM user ID %s "
763  "was refused\n", vm_user_id);
764  return 0;
765  }
766 
767  spin_lock(&priv->lock);
768 
769  /* If the terminal is already connected or being severed, then sever
770  * this path to enforce that there is only ONE established communication
771  * path per terminal. */
772  if (priv->iucv_state != IUCV_DISCONN) {
773  iucv_path_sever(path, ipuser);
774  iucv_path_free(path);
775  goto out_path_handled;
776  }
777 
778  /* accept path */
779  memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
780  memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
781  path->msglim = 0xffff; /* IUCV MSGLIMIT */
782  path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
783  rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
784  if (rc) {
785  iucv_path_sever(path, ipuser);
786  iucv_path_free(path);
787  goto out_path_handled;
788  }
789  priv->path = path;
790  priv->iucv_state = IUCV_CONNECTED;
791 
792  /* flush buffered output data... */
794 
795 out_path_handled:
796  spin_unlock(&priv->lock);
797  return 0;
798 }
799 
811 static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
812 {
813  struct hvc_iucv_private *priv = path->private;
814 
815  hvc_iucv_hangup(priv);
816 }
817 
829 static void hvc_iucv_msg_pending(struct iucv_path *path,
830  struct iucv_message *msg)
831 {
832  struct hvc_iucv_private *priv = path->private;
833  struct iucv_tty_buffer *rb;
834 
835  /* reject messages that exceed max size of iucv_tty_msg->datalen */
836  if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
837  iucv_message_reject(path, msg);
838  return;
839  }
840 
841  spin_lock(&priv->lock);
842 
843  /* reject messages if tty has not yet been opened */
844  if (priv->tty_state == TTY_CLOSED) {
845  iucv_message_reject(path, msg);
846  goto unlock_return;
847  }
848 
849  /* allocate tty buffer to save iucv msg only */
850  rb = alloc_tty_buffer(0, GFP_ATOMIC);
851  if (!rb) {
852  iucv_message_reject(path, msg);
853  goto unlock_return; /* -ENOMEM */
854  }
855  rb->msg = *msg;
856 
857  list_add_tail(&rb->list, &priv->tty_inqueue);
858 
859  hvc_kick(); /* wake up hvc thread */
860 
861 unlock_return:
862  spin_unlock(&priv->lock);
863 }
864 
877 static void hvc_iucv_msg_complete(struct iucv_path *path,
878  struct iucv_message *msg)
879 {
880  struct hvc_iucv_private *priv = path->private;
881  struct iucv_tty_buffer *ent, *next;
882  LIST_HEAD(list_remove);
883 
884  spin_lock(&priv->lock);
885  list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
886  if (ent->msg.id == msg->id) {
887  list_move(&ent->list, &list_remove);
888  break;
889  }
890  wake_up(&priv->sndbuf_waitq);
891  spin_unlock(&priv->lock);
892  destroy_tty_buffer_list(&list_remove);
893 }
894 
902 static int hvc_iucv_pm_freeze(struct device *dev)
903 {
904  struct hvc_iucv_private *priv = dev_get_drvdata(dev);
905 
907  hvc_iucv_hangup(priv);
908  local_bh_enable();
909 
910  return 0;
911 }
912 
920 static int hvc_iucv_pm_restore_thaw(struct device *dev)
921 {
922  hvc_kick();
923  return 0;
924 }
925 
926 
927 /* HVC operations */
928 static const struct hv_ops hvc_iucv_ops = {
929  .get_chars = hvc_iucv_get_chars,
930  .put_chars = hvc_iucv_put_chars,
931  .notifier_add = hvc_iucv_notifier_add,
932  .notifier_del = hvc_iucv_notifier_del,
933  .notifier_hangup = hvc_iucv_notifier_hangup,
934 };
935 
936 /* Suspend / resume device operations */
937 static const struct dev_pm_ops hvc_iucv_pm_ops = {
938  .freeze = hvc_iucv_pm_freeze,
939  .thaw = hvc_iucv_pm_restore_thaw,
940  .restore = hvc_iucv_pm_restore_thaw,
941 };
942 
943 /* IUCV HVC device driver */
944 static struct device_driver hvc_iucv_driver = {
945  .name = KMSG_COMPONENT,
946  .bus = &iucv_bus,
947  .pm = &hvc_iucv_pm_ops,
948 };
949 
959 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
960 {
961  struct hvc_iucv_private *priv;
962  char name[9];
963  int rc;
964 
965  priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
966  if (!priv)
967  return -ENOMEM;
968 
969  spin_lock_init(&priv->lock);
970  INIT_LIST_HEAD(&priv->tty_outqueue);
971  INIT_LIST_HEAD(&priv->tty_inqueue);
972  INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
974 
975  priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
976  if (!priv->sndbuf) {
977  kfree(priv);
978  return -ENOMEM;
979  }
980 
981  /* set console flag */
982  priv->is_console = is_console;
983 
984  /* allocate hvc device */
985  priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
986  HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
987  if (IS_ERR(priv->hvc)) {
988  rc = PTR_ERR(priv->hvc);
989  goto out_error_hvc;
990  }
991 
992  /* notify HVC thread instead of using polling */
993  priv->hvc->irq_requested = 1;
994 
995  /* setup iucv related information */
996  snprintf(name, 9, "lnxhvc%-2d", id);
997  memcpy(priv->srv_name, name, 8);
998  ASCEBC(priv->srv_name, 8);
999 
1000  /* create and setup device */
1001  priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1002  if (!priv->dev) {
1003  rc = -ENOMEM;
1004  goto out_error_dev;
1005  }
1006  dev_set_name(priv->dev, "hvc_iucv%d", id);
1007  dev_set_drvdata(priv->dev, priv);
1008  priv->dev->bus = &iucv_bus;
1009  priv->dev->parent = iucv_root;
1010  priv->dev->driver = &hvc_iucv_driver;
1011  priv->dev->release = (void (*)(struct device *)) kfree;
1012  rc = device_register(priv->dev);
1013  if (rc) {
1014  put_device(priv->dev);
1015  goto out_error_dev;
1016  }
1017 
1018  hvc_iucv_table[id] = priv;
1019  return 0;
1020 
1021 out_error_dev:
1022  hvc_remove(priv->hvc);
1023 out_error_hvc:
1024  free_page((unsigned long) priv->sndbuf);
1025  kfree(priv);
1026 
1027  return rc;
1028 }
1029 
1033 static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1034 {
1035  hvc_remove(priv->hvc);
1036  device_unregister(priv->dev);
1037  free_page((unsigned long) priv->sndbuf);
1038  kfree(priv);
1039 }
1040 
1045 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1046 {
1047  const char *nextdelim, *residual;
1048  size_t len;
1049 
1050  nextdelim = strchr(filter, ',');
1051  if (nextdelim) {
1052  len = nextdelim - filter;
1053  residual = nextdelim + 1;
1054  } else {
1055  len = strlen(filter);
1056  residual = filter + len;
1057  }
1058 
1059  if (len == 0)
1060  return ERR_PTR(-EINVAL);
1061 
1062  /* check for '\n' (if called from sysfs) */
1063  if (filter[len - 1] == '\n')
1064  len--;
1065 
1066  if (len > 8)
1067  return ERR_PTR(-EINVAL);
1068 
1069  /* pad with blanks and save upper case version of user ID */
1070  memset(dest, ' ', 8);
1071  while (len--)
1072  dest[len] = toupper(filter[len]);
1073  return residual;
1074 }
1075 
1086 static int hvc_iucv_setup_filter(const char *val)
1087 {
1088  const char *residual;
1089  int err;
1090  size_t size, count;
1091  void *array, *old_filter;
1092 
1093  count = strlen(val);
1094  if (count == 0 || (count == 1 && val[0] == '\n')) {
1095  size = 0;
1096  array = NULL;
1097  goto out_replace_filter; /* clear filter */
1098  }
1099 
1100  /* count user IDs in order to allocate sufficient memory */
1101  size = 1;
1102  residual = val;
1103  while ((residual = strchr(residual, ',')) != NULL) {
1104  residual++;
1105  size++;
1106  }
1107 
1108  /* check if the specified list exceeds the filter limit */
1109  if (size > MAX_VMID_FILTER)
1110  return -ENOSPC;
1111 
1112  array = kzalloc(size * 8, GFP_KERNEL);
1113  if (!array)
1114  return -ENOMEM;
1115 
1116  count = size;
1117  residual = val;
1118  while (*residual && count) {
1119  residual = hvc_iucv_parse_filter(residual,
1120  array + ((size - count) * 8));
1121  if (IS_ERR(residual)) {
1122  err = PTR_ERR(residual);
1123  kfree(array);
1124  goto out_err;
1125  }
1126  count--;
1127  }
1128 
1129 out_replace_filter:
1130  write_lock_bh(&hvc_iucv_filter_lock);
1131  old_filter = hvc_iucv_filter;
1132  hvc_iucv_filter_size = size;
1133  hvc_iucv_filter = array;
1134  write_unlock_bh(&hvc_iucv_filter_lock);
1135  kfree(old_filter);
1136 
1137  err = 0;
1138 out_err:
1139  return err;
1140 }
1141 
1152 static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1153 {
1154  int rc;
1155 
1156  if (!MACHINE_IS_VM || !hvc_iucv_devices)
1157  return -ENODEV;
1158 
1159  if (!val)
1160  return -EINVAL;
1161 
1162  rc = 0;
1163  if (slab_is_available())
1164  rc = hvc_iucv_setup_filter(val);
1165  else
1166  hvc_iucv_filter_string = val; /* defer... */
1167  return rc;
1168 }
1169 
1179 static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1180 {
1181  int rc;
1182  size_t index, len;
1183  void *start, *end;
1184 
1185  if (!MACHINE_IS_VM || !hvc_iucv_devices)
1186  return -ENODEV;
1187 
1188  rc = 0;
1189  read_lock_bh(&hvc_iucv_filter_lock);
1190  for (index = 0; index < hvc_iucv_filter_size; index++) {
1191  start = hvc_iucv_filter + (8 * index);
1192  end = memchr(start, ' ', 8);
1193  len = (end) ? end - start : 8;
1194  memcpy(buffer + rc, start, len);
1195  rc += len;
1196  buffer[rc++] = ',';
1197  }
1198  read_unlock_bh(&hvc_iucv_filter_lock);
1199  if (rc)
1200  buffer[--rc] = '\0'; /* replace last comma and update rc */
1201  return rc;
1202 }
1203 
1204 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1205 
1206 static struct kernel_param_ops param_ops_vmidfilter = {
1207  .set = param_set_vmidfilter,
1208  .get = param_get_vmidfilter,
1209 };
1210 
1214 static int __init hvc_iucv_init(void)
1215 {
1216  int rc;
1217  unsigned int i;
1218 
1219  if (!hvc_iucv_devices)
1220  return -ENODEV;
1221 
1222  if (!MACHINE_IS_VM) {
1223  pr_notice("The z/VM IUCV HVC device driver cannot "
1224  "be used without z/VM\n");
1225  rc = -ENODEV;
1226  goto out_error;
1227  }
1228 
1229  if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1230  pr_err("%lu is not a valid value for the hvc_iucv= "
1231  "kernel parameter\n", hvc_iucv_devices);
1232  rc = -EINVAL;
1233  goto out_error;
1234  }
1235 
1236  /* register IUCV HVC device driver */
1237  rc = driver_register(&hvc_iucv_driver);
1238  if (rc)
1239  goto out_error;
1240 
1241  /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1242  if (hvc_iucv_filter_string) {
1243  rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1244  switch (rc) {
1245  case 0:
1246  break;
1247  case -ENOMEM:
1248  pr_err("Allocating memory failed with "
1249  "reason code=%d\n", 3);
1250  goto out_error;
1251  case -EINVAL:
1252  pr_err("hvc_iucv_allow= does not specify a valid "
1253  "z/VM user ID list\n");
1254  goto out_error;
1255  case -ENOSPC:
1256  pr_err("hvc_iucv_allow= specifies too many "
1257  "z/VM user IDs\n");
1258  goto out_error;
1259  default:
1260  goto out_error;
1261  }
1262  }
1263 
1264  hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1265  sizeof(struct iucv_tty_buffer),
1266  0, 0, NULL);
1267  if (!hvc_iucv_buffer_cache) {
1268  pr_err("Allocating memory failed with reason code=%d\n", 1);
1269  rc = -ENOMEM;
1270  goto out_error;
1271  }
1272 
1273  hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1274  hvc_iucv_buffer_cache);
1275  if (!hvc_iucv_mempool) {
1276  pr_err("Allocating memory failed with reason code=%d\n", 2);
1277  kmem_cache_destroy(hvc_iucv_buffer_cache);
1278  rc = -ENOMEM;
1279  goto out_error;
1280  }
1281 
1282  /* register the first terminal device as console
1283  * (must be done before allocating hvc terminal devices) */
1284  rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1285  if (rc) {
1286  pr_err("Registering HVC terminal device as "
1287  "Linux console failed\n");
1288  goto out_error_memory;
1289  }
1290 
1291  /* allocate hvc_iucv_private structs */
1292  for (i = 0; i < hvc_iucv_devices; i++) {
1293  rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1294  if (rc) {
1295  pr_err("Creating a new HVC terminal device "
1296  "failed with error code=%d\n", rc);
1297  goto out_error_hvc;
1298  }
1299  }
1300 
1301  /* register IUCV callback handler */
1302  rc = iucv_register(&hvc_iucv_handler, 0);
1303  if (rc) {
1304  pr_err("Registering IUCV handlers failed with error code=%d\n",
1305  rc);
1306  goto out_error_hvc;
1307  }
1308 
1309  return 0;
1310 
1311 out_error_hvc:
1312  for (i = 0; i < hvc_iucv_devices; i++)
1313  if (hvc_iucv_table[i])
1314  hvc_iucv_destroy(hvc_iucv_table[i]);
1315 out_error_memory:
1316  mempool_destroy(hvc_iucv_mempool);
1317  kmem_cache_destroy(hvc_iucv_buffer_cache);
1318 out_error:
1319  if (hvc_iucv_filter)
1320  kfree(hvc_iucv_filter);
1321  hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1322  return rc;
1323 }
1324 
1329 static int __init hvc_iucv_config(char *val)
1330 {
1331  return strict_strtoul(val, 10, &hvc_iucv_devices);
1332 }
1333 
1334 
1335 device_initcall(hvc_iucv_init);
1336 __setup("hvc_iucv=", hvc_iucv_config);
1337 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);