Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iucv.c
Go to the documentation of this file.
1 /*
2  * IUCV base infrastructure.
3  *
4  * Copyright IBM Corp. 2001, 2009
5  *
6  * Author(s):
7  * Original source:
8  * Alan Altmark ([email protected]) Sept. 2000
9  * Xenia Tkatschow ([email protected])
10  * 2Gb awareness and general cleanup:
11  * Fritz Elfert ([email protected], [email protected])
12  * Rewritten for af_iucv:
13  * Martin Schwidefsky <[email protected]>
14  * PM functions:
15  * Ursula Braun ([email protected])
16  *
17  * Documentation used:
18  * The original source
19  * CP Programming Service, IBM document # SC24-5760
20  *
21  * This program is free software; you can redistribute it and/or modify
22  * it under the terms of the GNU General Public License as published by
23  * the Free Software Foundation; either version 2, or (at your option)
24  * any later version.
25  *
26  * This program is distributed in the hope that it will be useful,
27  * but WITHOUT ANY WARRANTY; without even the implied warranty of
28  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29  * GNU General Public License for more details.
30  *
31  * You should have received a copy of the GNU General Public License
32  * along with this program; if not, write to the Free Software
33  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35 
36 #define KMSG_COMPONENT "iucv"
37 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
38 
39 #include <linux/kernel_stat.h>
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/spinlock.h>
43 #include <linux/kernel.h>
44 #include <linux/slab.h>
45 #include <linux/init.h>
46 #include <linux/interrupt.h>
47 #include <linux/list.h>
48 #include <linux/errno.h>
49 #include <linux/err.h>
50 #include <linux/device.h>
51 #include <linux/cpu.h>
52 #include <linux/reboot.h>
53 #include <net/iucv/iucv.h>
54 #include <linux/atomic.h>
55 #include <asm/ebcdic.h>
56 #include <asm/io.h>
57 #include <asm/irq.h>
58 #include <asm/smp.h>
59 
60 /*
61  * FLAGS:
62  * All flags are defined in the field IPFLAGS1 of each function
63  * and can be found in CP Programming Services.
64  * IPSRCCLS - Indicates you have specified a source class.
65  * IPTRGCLS - Indicates you have specified a target class.
66  * IPFGPID - Indicates you have specified a pathid.
67  * IPFGMID - Indicates you have specified a message ID.
68  * IPNORPY - Indicates a one-way message. No reply expected.
69  * IPALL - Indicates that all paths are affected.
70  */
71 #define IUCV_IPSRCCLS 0x01
72 #define IUCV_IPTRGCLS 0x01
73 #define IUCV_IPFGPID 0x02
74 #define IUCV_IPFGMID 0x04
75 #define IUCV_IPNORPY 0x10
76 #define IUCV_IPALL 0x80
77 
78 static int iucv_bus_match(struct device *dev, struct device_driver *drv)
79 {
80  return 0;
81 }
82 
88 };
89 static enum iucv_pm_states iucv_pm_state;
90 
91 static int iucv_pm_prepare(struct device *);
92 static void iucv_pm_complete(struct device *);
93 static int iucv_pm_freeze(struct device *);
94 static int iucv_pm_thaw(struct device *);
95 static int iucv_pm_restore(struct device *);
96 
97 static const struct dev_pm_ops iucv_pm_ops = {
98  .prepare = iucv_pm_prepare,
99  .complete = iucv_pm_complete,
100  .freeze = iucv_pm_freeze,
101  .thaw = iucv_pm_thaw,
102  .restore = iucv_pm_restore,
103 };
104 
105 struct bus_type iucv_bus = {
106  .name = "iucv",
107  .match = iucv_bus_match,
108  .pm = &iucv_pm_ops,
109 };
110 EXPORT_SYMBOL(iucv_bus);
111 
113 EXPORT_SYMBOL(iucv_root);
114 
115 static int iucv_available;
116 
117 /* General IUCV interrupt structure */
122  u32 res2[8];
123 };
124 
126  struct list_head list;
128 };
129 
130 static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
131 static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
132 static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };
133 
134 /*
135  * Queue of interrupt buffers lock for delivery via the tasklet
136  * (fast but can't call smp_call_function).
137  */
138 static LIST_HEAD(iucv_task_queue);
139 
140 /*
141  * The tasklet for fast delivery of iucv interrupts.
142  */
143 static void iucv_tasklet_fn(unsigned long);
144 static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);
145 
146 /*
147  * Queue of interrupt buffers for delivery via a work queue
148  * (slower but can call smp_call_function).
149  */
150 static LIST_HEAD(iucv_work_queue);
151 
152 /*
153  * The work element to deliver path pending interrupts.
154  */
155 static void iucv_work_fn(struct work_struct *work);
156 static DECLARE_WORK(iucv_work, iucv_work_fn);
157 
158 /*
159  * Spinlock protecting task and work queue.
160  */
161 static DEFINE_SPINLOCK(iucv_queue_lock);
162 
179 };
180 
181 /*
182  * Error messages that are used with the iucv_sever function. They get
183  * converted to EBCDIC.
184  */
185 static char iucv_error_no_listener[16] = "NO LISTENER";
186 static char iucv_error_no_memory[16] = "NO MEMORY";
187 static char iucv_error_pathid[16] = "INVALID PATHID";
188 
189 /*
190  * iucv_handler_list: List of registered handlers.
191  */
192 static LIST_HEAD(iucv_handler_list);
193 
194 /*
195  * iucv_path_table: an array of iucv_path structures.
196  */
197 static struct iucv_path **iucv_path_table;
198 static unsigned long iucv_max_pathid;
199 
200 /*
201  * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
202  */
203 static DEFINE_SPINLOCK(iucv_table_lock);
204 
205 /*
206  * iucv_active_cpu: contains the number of the cpu executing the tasklet
207  * or the work handler. Needed for iucv_path_sever called from tasklet.
208  */
209 static int iucv_active_cpu = -1;
210 
211 /*
212  * Mutex and wait queue for iucv_register/iucv_unregister.
213  */
214 static DEFINE_MUTEX(iucv_register_mutex);
215 
216 /*
217  * Counter for number of non-smp capable handlers.
218  */
219 static int iucv_nonsmp_handler;
220 
221 /*
222  * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
223  * iucv_path_quiesce and iucv_path_sever.
224  */
231  u8 ipvmid[8];
232  u8 ipuser[16];
234 } __attribute__ ((packed,aligned(8)));
236 /*
237  * Data in parameter list iucv structure. Used by iucv_message_send,
238  * iucv_message_send2way and iucv_message_reply.
239  */
240 struct iucv_cmd_dpl {
252 } __attribute__ ((packed,aligned(8)));
253 
254 /*
255  * Data in buffer iucv structure. Used by iucv_message_receive,
256  * iucv_message_reject, iucv_message_send, iucv_message_send2way
257  * and iucv_declare_cpu.
258  */
259 struct iucv_cmd_db {
272 } __attribute__ ((packed,aligned(8)));
273 
274 /*
275  * Purge message iucv structure. Used by iucv_message_purge.
276  */
283  u8 res1[5];
287  u32 res3[3];
288 } __attribute__ ((packed,aligned(8)));
289 
290 /*
291  * Set mask iucv structure. Used by iucv_enable_cpu.
292  */
295  u8 res1[2];
297  u32 res2[9];
298 } __attribute__ ((packed,aligned(8)));
300 union iucv_param {
303  struct iucv_cmd_db db;
306 };
307 
308 /*
309  * Anchor for per-cpu IUCV command parameter block.
310  */
311 static union iucv_param *iucv_param[NR_CPUS];
312 static union iucv_param *iucv_param_irq[NR_CPUS];
313 
323 static inline int iucv_call_b2f0(int command, union iucv_param *parm)
324 {
325  register unsigned long reg0 asm ("0");
326  register unsigned long reg1 asm ("1");
327  int ccode;
328 
329  reg0 = command;
330  reg1 = virt_to_phys(parm);
331  asm volatile(
332  " .long 0xb2f01000\n"
333  " ipm %0\n"
334  " srl %0,28\n"
335  : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1)
336  : "m" (*parm) : "cc");
337  return (ccode == 1) ? parm->ctrl.iprcode : ccode;
338 }
339 
348 static int iucv_query_maxconn(void)
349 {
350  register unsigned long reg0 asm ("0");
351  register unsigned long reg1 asm ("1");
352  void *param;
353  int ccode;
354 
355  param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA);
356  if (!param)
357  return -ENOMEM;
358  reg0 = IUCV_QUERY;
359  reg1 = (unsigned long) param;
360  asm volatile (
361  " .long 0xb2f01000\n"
362  " ipm %0\n"
363  " srl %0,28\n"
364  : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
365  if (ccode == 0)
366  iucv_max_pathid = reg1;
367  kfree(param);
368  return ccode ? -EPERM : 0;
369 }
370 
377 static void iucv_allow_cpu(void *data)
378 {
379  int cpu = smp_processor_id();
380  union iucv_param *parm;
381 
382  /*
383  * Enable all iucv interrupts.
384  * ipmask contains bits for the different interrupts
385  * 0x80 - Flag to allow nonpriority message pending interrupts
386  * 0x40 - Flag to allow priority message pending interrupts
387  * 0x20 - Flag to allow nonpriority message completion interrupts
388  * 0x10 - Flag to allow priority message completion interrupts
389  * 0x08 - Flag to allow IUCV control interrupts
390  */
391  parm = iucv_param_irq[cpu];
392  memset(parm, 0, sizeof(union iucv_param));
393  parm->set_mask.ipmask = 0xf8;
394  iucv_call_b2f0(IUCV_SETMASK, parm);
395 
396  /*
397  * Enable all iucv control interrupts.
398  * ipmask contains bits for the different interrupts
399  * 0x80 - Flag to allow pending connections interrupts
400  * 0x40 - Flag to allow connection complete interrupts
401  * 0x20 - Flag to allow connection severed interrupts
402  * 0x10 - Flag to allow connection quiesced interrupts
403  * 0x08 - Flag to allow connection resumed interrupts
404  */
405  memset(parm, 0, sizeof(union iucv_param));
406  parm->set_mask.ipmask = 0xf8;
407  iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
408  /* Set indication that iucv interrupts are allowed for this cpu. */
409  cpumask_set_cpu(cpu, &iucv_irq_cpumask);
410 }
411 
418 static void iucv_block_cpu(void *data)
419 {
420  int cpu = smp_processor_id();
421  union iucv_param *parm;
422 
423  /* Disable all iucv interrupts. */
424  parm = iucv_param_irq[cpu];
425  memset(parm, 0, sizeof(union iucv_param));
426  iucv_call_b2f0(IUCV_SETMASK, parm);
427 
428  /* Clear indication that iucv interrupts are allowed for this cpu. */
429  cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
430 }
431 
438 static void iucv_block_cpu_almost(void *data)
439 {
440  int cpu = smp_processor_id();
441  union iucv_param *parm;
442 
443  /* Allow iucv control interrupts only */
444  parm = iucv_param_irq[cpu];
445  memset(parm, 0, sizeof(union iucv_param));
446  parm->set_mask.ipmask = 0x08;
447  iucv_call_b2f0(IUCV_SETMASK, parm);
448  /* Allow iucv-severed interrupt only */
449  memset(parm, 0, sizeof(union iucv_param));
450  parm->set_mask.ipmask = 0x20;
451  iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
452 
453  /* Clear indication that iucv interrupts are allowed for this cpu. */
454  cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
455 }
456 
463 static void iucv_declare_cpu(void *data)
464 {
465  int cpu = smp_processor_id();
466  union iucv_param *parm;
467  int rc;
468 
469  if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
470  return;
471 
472  /* Declare interrupt buffer. */
473  parm = iucv_param_irq[cpu];
474  memset(parm, 0, sizeof(union iucv_param));
475  parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
476  rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
477  if (rc) {
478  char *err = "Unknown";
479  switch (rc) {
480  case 0x03:
481  err = "Directory error";
482  break;
483  case 0x0a:
484  err = "Invalid length";
485  break;
486  case 0x13:
487  err = "Buffer already exists";
488  break;
489  case 0x3e:
490  err = "Buffer overlap";
491  break;
492  case 0x5c:
493  err = "Paging or storage error";
494  break;
495  }
496  pr_warning("Defining an interrupt buffer on CPU %i"
497  " failed with 0x%02x (%s)\n", cpu, rc, err);
498  return;
499  }
500 
501  /* Set indication that an iucv buffer exists for this cpu. */
502  cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
503 
504  if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
505  /* Enable iucv interrupts on this cpu. */
506  iucv_allow_cpu(NULL);
507  else
508  /* Disable iucv interrupts on this cpu. */
509  iucv_block_cpu(NULL);
510 }
511 
518 static void iucv_retrieve_cpu(void *data)
519 {
520  int cpu = smp_processor_id();
521  union iucv_param *parm;
522 
523  if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
524  return;
525 
526  /* Block iucv interrupts. */
527  iucv_block_cpu(NULL);
528 
529  /* Retrieve interrupt buffer. */
530  parm = iucv_param_irq[cpu];
531  iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
532 
533  /* Clear indication that an iucv buffer exists for this cpu. */
534  cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
535 }
536 
542 static void iucv_setmask_mp(void)
543 {
544  int cpu;
545 
546  get_online_cpus();
548  /* Enable all cpus with a declared buffer. */
549  if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
550  !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
551  smp_call_function_single(cpu, iucv_allow_cpu,
552  NULL, 1);
553  put_online_cpus();
554 }
555 
561 static void iucv_setmask_up(void)
562 {
564  int cpu;
565 
566  /* Disable all cpu but the first in cpu_irq_cpumask. */
567  cpumask_copy(&cpumask, &iucv_irq_cpumask);
568  cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
569  for_each_cpu(cpu, &cpumask)
570  smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
571 }
572 
581 static int iucv_enable(void)
582 {
583  size_t alloc_size;
584  int cpu, rc;
585 
586  get_online_cpus();
587  rc = -ENOMEM;
588  alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
589  iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
590  if (!iucv_path_table)
591  goto out;
592  /* Declare per cpu buffers. */
593  rc = -EIO;
595  smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
596  if (cpumask_empty(&iucv_buffer_cpumask))
597  /* No cpu could declare an iucv buffer. */
598  goto out;
599  put_online_cpus();
600  return 0;
601 out:
602  kfree(iucv_path_table);
603  iucv_path_table = NULL;
604  put_online_cpus();
605  return rc;
606 }
607 
615 static void iucv_disable(void)
616 {
617  get_online_cpus();
618  on_each_cpu(iucv_retrieve_cpu, NULL, 1);
619  kfree(iucv_path_table);
620  iucv_path_table = NULL;
621  put_online_cpus();
622 }
623 
624 static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
625  unsigned long action, void *hcpu)
626 {
628  long cpu = (long) hcpu;
629 
630  switch (action) {
631  case CPU_UP_PREPARE:
633  iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
635  if (!iucv_irq_data[cpu])
636  return notifier_from_errno(-ENOMEM);
637 
638  iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
640  if (!iucv_param[cpu]) {
641  kfree(iucv_irq_data[cpu]);
642  iucv_irq_data[cpu] = NULL;
643  return notifier_from_errno(-ENOMEM);
644  }
645  iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
647  if (!iucv_param_irq[cpu]) {
648  kfree(iucv_param[cpu]);
649  iucv_param[cpu] = NULL;
650  kfree(iucv_irq_data[cpu]);
651  iucv_irq_data[cpu] = NULL;
652  return notifier_from_errno(-ENOMEM);
653  }
654  break;
655  case CPU_UP_CANCELED:
657  case CPU_DEAD:
658  case CPU_DEAD_FROZEN:
659  kfree(iucv_param_irq[cpu]);
660  iucv_param_irq[cpu] = NULL;
661  kfree(iucv_param[cpu]);
662  iucv_param[cpu] = NULL;
663  kfree(iucv_irq_data[cpu]);
664  iucv_irq_data[cpu] = NULL;
665  break;
666  case CPU_ONLINE:
667  case CPU_ONLINE_FROZEN:
668  case CPU_DOWN_FAILED:
670  if (!iucv_path_table)
671  break;
672  smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
673  break;
674  case CPU_DOWN_PREPARE:
676  if (!iucv_path_table)
677  break;
678  cpumask_copy(&cpumask, &iucv_buffer_cpumask);
679  cpumask_clear_cpu(cpu, &cpumask);
680  if (cpumask_empty(&cpumask))
681  /* Can't offline last IUCV enabled cpu. */
682  return notifier_from_errno(-EINVAL);
683  smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
684  if (cpumask_empty(&iucv_irq_cpumask))
686  cpumask_first(&iucv_buffer_cpumask),
687  iucv_allow_cpu, NULL, 1);
688  break;
689  }
690  return NOTIFY_OK;
691 }
692 
693 static struct notifier_block __refdata iucv_cpu_notifier = {
694  .notifier_call = iucv_cpu_notify,
695 };
696 
704 static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
705 {
706  union iucv_param *parm;
707 
708  parm = iucv_param_irq[smp_processor_id()];
709  memset(parm, 0, sizeof(union iucv_param));
710  if (userdata)
711  memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
712  parm->ctrl.ippathid = pathid;
713  return iucv_call_b2f0(IUCV_SEVER, parm);
714 }
715 
723 static void __iucv_cleanup_queue(void *dummy)
724 {
725 }
726 
734 static void iucv_cleanup_queue(void)
735 {
736  struct iucv_irq_list *p, *n;
737 
738  /*
739  * When a path is severed, the pathid can be reused immediately
740  * on a iucv connect or a connection pending interrupt. Remove
741  * all entries from the task queue that refer to a stale pathid
742  * (iucv_path_table[ix] == NULL). Only then do the iucv connect
743  * or deliver the connection pending interrupt. To get all the
744  * pending interrupts force them to the work queue by calling
745  * an empty function on all cpus.
746  */
747  smp_call_function(__iucv_cleanup_queue, NULL, 1);
748  spin_lock_irq(&iucv_queue_lock);
749  list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
750  /* Remove stale work items from the task queue. */
751  if (iucv_path_table[p->data.ippathid] == NULL) {
752  list_del(&p->list);
753  kfree(p);
754  }
755  }
756  spin_unlock_irq(&iucv_queue_lock);
757 }
758 
769 int iucv_register(struct iucv_handler *handler, int smp)
770 {
771  int rc;
772 
773  if (!iucv_available)
774  return -ENOSYS;
775  mutex_lock(&iucv_register_mutex);
776  if (!smp)
777  iucv_nonsmp_handler++;
778  if (list_empty(&iucv_handler_list)) {
779  rc = iucv_enable();
780  if (rc)
781  goto out_mutex;
782  } else if (!smp && iucv_nonsmp_handler == 1)
783  iucv_setmask_up();
784  INIT_LIST_HEAD(&handler->paths);
785 
786  spin_lock_bh(&iucv_table_lock);
787  list_add_tail(&handler->list, &iucv_handler_list);
788  spin_unlock_bh(&iucv_table_lock);
789  rc = 0;
790 out_mutex:
791  mutex_unlock(&iucv_register_mutex);
792  return rc;
793 }
795 
803 void iucv_unregister(struct iucv_handler *handler, int smp)
804 {
805  struct iucv_path *p, *n;
806 
807  mutex_lock(&iucv_register_mutex);
808  spin_lock_bh(&iucv_table_lock);
809  /* Remove handler from the iucv_handler_list. */
810  list_del_init(&handler->list);
811  /* Sever all pathids still referring to the handler. */
812  list_for_each_entry_safe(p, n, &handler->paths, list) {
813  iucv_sever_pathid(p->pathid, NULL);
814  iucv_path_table[p->pathid] = NULL;
815  list_del(&p->list);
816  iucv_path_free(p);
817  }
818  spin_unlock_bh(&iucv_table_lock);
819  if (!smp)
820  iucv_nonsmp_handler--;
821  if (list_empty(&iucv_handler_list))
822  iucv_disable();
823  else if (!smp && iucv_nonsmp_handler == 0)
824  iucv_setmask_mp();
825  mutex_unlock(&iucv_register_mutex);
826 }
828 
829 static int iucv_reboot_event(struct notifier_block *this,
830  unsigned long event, void *ptr)
831 {
832  int i;
833 
834  get_online_cpus();
835  on_each_cpu(iucv_block_cpu, NULL, 1);
836  preempt_disable();
837  for (i = 0; i < iucv_max_pathid; i++) {
838  if (iucv_path_table[i])
839  iucv_sever_pathid(i, NULL);
840  }
841  preempt_enable();
842  put_online_cpus();
843  iucv_disable();
844  return NOTIFY_DONE;
845 }
846 
847 static struct notifier_block iucv_reboot_notifier = {
848  .notifier_call = iucv_reboot_event,
849 };
850 
863 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
864  u8 userdata[16], void *private)
865 {
866  union iucv_param *parm;
867  int rc;
868 
870  if (cpumask_empty(&iucv_buffer_cpumask)) {
871  rc = -EIO;
872  goto out;
873  }
874  /* Prepare parameter block. */
875  parm = iucv_param[smp_processor_id()];
876  memset(parm, 0, sizeof(union iucv_param));
877  parm->ctrl.ippathid = path->pathid;
878  parm->ctrl.ipmsglim = path->msglim;
879  if (userdata)
880  memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
881  parm->ctrl.ipflags1 = path->flags;
882 
883  rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
884  if (!rc) {
885  path->private = private;
886  path->msglim = parm->ctrl.ipmsglim;
887  path->flags = parm->ctrl.ipflags1;
888  }
889 out:
890  local_bh_enable();
891  return rc;
892 }
893 EXPORT_SYMBOL(iucv_path_accept);
894 
910 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
911  u8 userid[8], u8 system[8], u8 userdata[16],
912  void *private)
913 {
914  union iucv_param *parm;
915  int rc;
916 
917  spin_lock_bh(&iucv_table_lock);
918  iucv_cleanup_queue();
919  if (cpumask_empty(&iucv_buffer_cpumask)) {
920  rc = -EIO;
921  goto out;
922  }
923  parm = iucv_param[smp_processor_id()];
924  memset(parm, 0, sizeof(union iucv_param));
925  parm->ctrl.ipmsglim = path->msglim;
926  parm->ctrl.ipflags1 = path->flags;
927  if (userid) {
928  memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
929  ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
930  EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
931  }
932  if (system) {
933  memcpy(parm->ctrl.iptarget, system,
934  sizeof(parm->ctrl.iptarget));
935  ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
936  EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
937  }
938  if (userdata)
939  memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
940 
941  rc = iucv_call_b2f0(IUCV_CONNECT, parm);
942  if (!rc) {
943  if (parm->ctrl.ippathid < iucv_max_pathid) {
944  path->pathid = parm->ctrl.ippathid;
945  path->msglim = parm->ctrl.ipmsglim;
946  path->flags = parm->ctrl.ipflags1;
947  path->handler = handler;
948  path->private = private;
949  list_add_tail(&path->list, &handler->paths);
950  iucv_path_table[path->pathid] = path;
951  } else {
952  iucv_sever_pathid(parm->ctrl.ippathid,
953  iucv_error_pathid);
954  rc = -EIO;
955  }
956  }
957 out:
958  spin_unlock_bh(&iucv_table_lock);
959  return rc;
960 }
961 EXPORT_SYMBOL(iucv_path_connect);
962 
973 int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
974 {
975  union iucv_param *parm;
976  int rc;
977 
979  if (cpumask_empty(&iucv_buffer_cpumask)) {
980  rc = -EIO;
981  goto out;
982  }
983  parm = iucv_param[smp_processor_id()];
984  memset(parm, 0, sizeof(union iucv_param));
985  if (userdata)
986  memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
987  parm->ctrl.ippathid = path->pathid;
988  rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
989 out:
990  local_bh_enable();
991  return rc;
992 }
994 
1005 int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
1006 {
1007  union iucv_param *parm;
1008  int rc;
1009 
1010  local_bh_disable();
1011  if (cpumask_empty(&iucv_buffer_cpumask)) {
1012  rc = -EIO;
1013  goto out;
1014  }
1015  parm = iucv_param[smp_processor_id()];
1016  memset(parm, 0, sizeof(union iucv_param));
1017  if (userdata)
1018  memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
1019  parm->ctrl.ippathid = path->pathid;
1020  rc = iucv_call_b2f0(IUCV_RESUME, parm);
1021 out:
1022  local_bh_enable();
1023  return rc;
1024 }
1025 
1035 int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
1036 {
1037  int rc;
1038 
1039  preempt_disable();
1040  if (cpumask_empty(&iucv_buffer_cpumask)) {
1041  rc = -EIO;
1042  goto out;
1043  }
1044  if (iucv_active_cpu != smp_processor_id())
1045  spin_lock_bh(&iucv_table_lock);
1046  rc = iucv_sever_pathid(path->pathid, userdata);
1047  iucv_path_table[path->pathid] = NULL;
1048  list_del_init(&path->list);
1049  if (iucv_active_cpu != smp_processor_id())
1050  spin_unlock_bh(&iucv_table_lock);
1051 out:
1052  preempt_enable();
1053  return rc;
1054 }
1056 
1067 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
1068  u32 srccls)
1069 {
1070  union iucv_param *parm;
1071  int rc;
1072 
1073  local_bh_disable();
1074  if (cpumask_empty(&iucv_buffer_cpumask)) {
1075  rc = -EIO;
1076  goto out;
1077  }
1078  parm = iucv_param[smp_processor_id()];
1079  memset(parm, 0, sizeof(union iucv_param));
1080  parm->purge.ippathid = path->pathid;
1081  parm->purge.ipmsgid = msg->id;
1082  parm->purge.ipsrccls = srccls;
1083  parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
1084  rc = iucv_call_b2f0(IUCV_PURGE, parm);
1085  if (!rc) {
1086  msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
1087  msg->tag = parm->purge.ipmsgtag;
1088  }
1089 out:
1090  local_bh_enable();
1091  return rc;
1092 }
1094 
1107 static int iucv_message_receive_iprmdata(struct iucv_path *path,
1108  struct iucv_message *msg,
1109  u8 flags, void *buffer,
1110  size_t size, size_t *residual)
1111 {
1112  struct iucv_array *array;
1113  u8 *rmmsg;
1114  size_t copy;
1115 
1116  /*
1117  * Message is 8 bytes long and has been stored to the
1118  * message descriptor itself.
1119  */
1120  if (residual)
1121  *residual = abs(size - 8);
1122  rmmsg = msg->rmmsg;
1123  if (flags & IUCV_IPBUFLST) {
1124  /* Copy to struct iucv_array. */
1125  size = (size < 8) ? size : 8;
1126  for (array = buffer; size > 0; array++) {
1127  copy = min_t(size_t, size, array->length);
1128  memcpy((u8 *)(addr_t) array->address,
1129  rmmsg, copy);
1130  rmmsg += copy;
1131  size -= copy;
1132  }
1133  } else {
1134  /* Copy to direct buffer. */
1135  memcpy(buffer, rmmsg, min_t(size_t, size, 8));
1136  }
1137  return 0;
1138 }
1139 
1157 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1158  u8 flags, void *buffer, size_t size, size_t *residual)
1159 {
1160  union iucv_param *parm;
1161  int rc;
1162 
1163  if (msg->flags & IUCV_IPRMDATA)
1164  return iucv_message_receive_iprmdata(path, msg, flags,
1165  buffer, size, residual);
1166  if (cpumask_empty(&iucv_buffer_cpumask)) {
1167  rc = -EIO;
1168  goto out;
1169  }
1170  parm = iucv_param[smp_processor_id()];
1171  memset(parm, 0, sizeof(union iucv_param));
1172  parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1173  parm->db.ipbfln1f = (u32) size;
1174  parm->db.ipmsgid = msg->id;
1175  parm->db.ippathid = path->pathid;
1176  parm->db.iptrgcls = msg->class;
1177  parm->db.ipflags1 = (flags | IUCV_IPFGPID |
1179  rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
1180  if (!rc || rc == 5) {
1181  msg->flags = parm->db.ipflags1;
1182  if (residual)
1183  *residual = parm->db.ipbfln1f;
1184  }
1185 out:
1186  return rc;
1187 }
1189 
1207 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1208  u8 flags, void *buffer, size_t size, size_t *residual)
1209 {
1210  int rc;
1211 
1212  if (msg->flags & IUCV_IPRMDATA)
1213  return iucv_message_receive_iprmdata(path, msg, flags,
1214  buffer, size, residual);
1215  local_bh_disable();
1216  rc = __iucv_message_receive(path, msg, flags, buffer, size, residual);
1217  local_bh_enable();
1218  return rc;
1219 }
1221 
1233 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1234 {
1235  union iucv_param *parm;
1236  int rc;
1237 
1238  local_bh_disable();
1239  if (cpumask_empty(&iucv_buffer_cpumask)) {
1240  rc = -EIO;
1241  goto out;
1242  }
1243  parm = iucv_param[smp_processor_id()];
1244  memset(parm, 0, sizeof(union iucv_param));
1245  parm->db.ippathid = path->pathid;
1246  parm->db.ipmsgid = msg->id;
1247  parm->db.iptrgcls = msg->class;
1248  parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
1249  rc = iucv_call_b2f0(IUCV_REJECT, parm);
1250 out:
1251  local_bh_enable();
1252  return rc;
1253 }
1255 
1271 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1272  u8 flags, void *reply, size_t size)
1273 {
1274  union iucv_param *parm;
1275  int rc;
1276 
1277  local_bh_disable();
1278  if (cpumask_empty(&iucv_buffer_cpumask)) {
1279  rc = -EIO;
1280  goto out;
1281  }
1282  parm = iucv_param[smp_processor_id()];
1283  memset(parm, 0, sizeof(union iucv_param));
1284  if (flags & IUCV_IPRMDATA) {
1285  parm->dpl.ippathid = path->pathid;
1286  parm->dpl.ipflags1 = flags;
1287  parm->dpl.ipmsgid = msg->id;
1288  parm->dpl.iptrgcls = msg->class;
1289  memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
1290  } else {
1291  parm->db.ipbfadr1 = (u32)(addr_t) reply;
1292  parm->db.ipbfln1f = (u32) size;
1293  parm->db.ippathid = path->pathid;
1294  parm->db.ipflags1 = flags;
1295  parm->db.ipmsgid = msg->id;
1296  parm->db.iptrgcls = msg->class;
1297  }
1298  rc = iucv_call_b2f0(IUCV_REPLY, parm);
1299 out:
1300  local_bh_enable();
1301  return rc;
1302 }
1304 
1322 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1323  u8 flags, u32 srccls, void *buffer, size_t size)
1324 {
1325  union iucv_param *parm;
1326  int rc;
1327 
1328  if (cpumask_empty(&iucv_buffer_cpumask)) {
1329  rc = -EIO;
1330  goto out;
1331  }
1332  parm = iucv_param[smp_processor_id()];
1333  memset(parm, 0, sizeof(union iucv_param));
1334  if (flags & IUCV_IPRMDATA) {
1335  /* Message of 8 bytes can be placed into the parameter list. */
1336  parm->dpl.ippathid = path->pathid;
1337  parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
1338  parm->dpl.iptrgcls = msg->class;
1339  parm->dpl.ipsrccls = srccls;
1340  parm->dpl.ipmsgtag = msg->tag;
1341  memcpy(parm->dpl.iprmmsg, buffer, 8);
1342  } else {
1343  parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1344  parm->db.ipbfln1f = (u32) size;
1345  parm->db.ippathid = path->pathid;
1346  parm->db.ipflags1 = flags | IUCV_IPNORPY;
1347  parm->db.iptrgcls = msg->class;
1348  parm->db.ipsrccls = srccls;
1349  parm->db.ipmsgtag = msg->tag;
1350  }
1351  rc = iucv_call_b2f0(IUCV_SEND, parm);
1352  if (!rc)
1353  msg->id = parm->db.ipmsgid;
1354 out:
1355  return rc;
1356 }
1358 
1376 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1377  u8 flags, u32 srccls, void *buffer, size_t size)
1378 {
1379  int rc;
1380 
1381  local_bh_disable();
1382  rc = __iucv_message_send(path, msg, flags, srccls, buffer, size);
1383  local_bh_enable();
1384  return rc;
1385 }
1387 
1407 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1408  u8 flags, u32 srccls, void *buffer, size_t size,
1409  void *answer, size_t asize, size_t *residual)
1410 {
1411  union iucv_param *parm;
1412  int rc;
1413 
1414  local_bh_disable();
1415  if (cpumask_empty(&iucv_buffer_cpumask)) {
1416  rc = -EIO;
1417  goto out;
1418  }
1419  parm = iucv_param[smp_processor_id()];
1420  memset(parm, 0, sizeof(union iucv_param));
1421  if (flags & IUCV_IPRMDATA) {
1422  parm->dpl.ippathid = path->pathid;
1423  parm->dpl.ipflags1 = path->flags; /* priority message */
1424  parm->dpl.iptrgcls = msg->class;
1425  parm->dpl.ipsrccls = srccls;
1426  parm->dpl.ipmsgtag = msg->tag;
1427  parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
1428  parm->dpl.ipbfln2f = (u32) asize;
1429  memcpy(parm->dpl.iprmmsg, buffer, 8);
1430  } else {
1431  parm->db.ippathid = path->pathid;
1432  parm->db.ipflags1 = path->flags; /* priority message */
1433  parm->db.iptrgcls = msg->class;
1434  parm->db.ipsrccls = srccls;
1435  parm->db.ipmsgtag = msg->tag;
1436  parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1437  parm->db.ipbfln1f = (u32) size;
1438  parm->db.ipbfadr2 = (u32)(addr_t) answer;
1439  parm->db.ipbfln2f = (u32) asize;
1440  }
1441  rc = iucv_call_b2f0(IUCV_SEND, parm);
1442  if (!rc)
1443  msg->id = parm->db.ipmsgid;
1444 out:
1445  local_bh_enable();
1446  return rc;
1447 }
1449 
1457 struct iucv_path_pending {
1458  u16 ippathid;
1459  u8 ipflags1;
1460  u8 iptype;
1461  u16 ipmsglim;
1462  u16 res1;
1463  u8 ipvmid[8];
1464  u8 ipuser[16];
1465  u32 res3;
1466  u8 ippollfg;
1467  u8 res4[3];
1468 } __packed;
1469 
1470 static void iucv_path_pending(struct iucv_irq_data *data)
1471 {
1472  struct iucv_path_pending *ipp = (void *) data;
1473  struct iucv_handler *handler;
1474  struct iucv_path *path;
1475  char *error;
1476 
1477  BUG_ON(iucv_path_table[ipp->ippathid]);
1478  /* New pathid, handler found. Create a new path struct. */
1479  error = iucv_error_no_memory;
1480  path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
1481  if (!path)
1482  goto out_sever;
1483  path->pathid = ipp->ippathid;
1484  iucv_path_table[path->pathid] = path;
1485  EBCASC(ipp->ipvmid, 8);
1486 
1487  /* Call registered handler until one is found that wants the path. */
1488  list_for_each_entry(handler, &iucv_handler_list, list) {
1489  if (!handler->path_pending)
1490  continue;
1491  /*
1492  * Add path to handler to allow a call to iucv_path_sever
1493  * inside the path_pending function. If the handler returns
1494  * an error remove the path from the handler again.
1495  */
1496  list_add(&path->list, &handler->paths);
1497  path->handler = handler;
1498  if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
1499  return;
1500  list_del(&path->list);
1501  path->handler = NULL;
1502  }
1503  /* No handler wanted the path. */
1504  iucv_path_table[path->pathid] = NULL;
1505  iucv_path_free(path);
1506  error = iucv_error_no_listener;
1507 out_sever:
1508  iucv_sever_pathid(ipp->ippathid, error);
1509 }
1510 
1518 struct iucv_path_complete {
1519  u16 ippathid;
1520  u8 ipflags1;
1521  u8 iptype;
1522  u16 ipmsglim;
1523  u16 res1;
1524  u8 res2[8];
1525  u8 ipuser[16];
1526  u32 res3;
1527  u8 ippollfg;
1528  u8 res4[3];
1529 } __packed;
1530 
1531 static void iucv_path_complete(struct iucv_irq_data *data)
1532 {
1533  struct iucv_path_complete *ipc = (void *) data;
1534  struct iucv_path *path = iucv_path_table[ipc->ippathid];
1535 
1536  if (path)
1537  path->flags = ipc->ipflags1;
1538  if (path && path->handler && path->handler->path_complete)
1539  path->handler->path_complete(path, ipc->ipuser);
1540 }
1541 
1549 struct iucv_path_severed {
1550  u16 ippathid;
1551  u8 res1;
1552  u8 iptype;
1553  u32 res2;
1554  u8 res3[8];
1555  u8 ipuser[16];
1556  u32 res4;
1557  u8 ippollfg;
1558  u8 res5[3];
1559 } __packed;
1560 
1561 static void iucv_path_severed(struct iucv_irq_data *data)
1562 {
1563  struct iucv_path_severed *ips = (void *) data;
1564  struct iucv_path *path = iucv_path_table[ips->ippathid];
1565 
1566  if (!path || !path->handler) /* Already severed */
1567  return;
1568  if (path->handler->path_severed)
1569  path->handler->path_severed(path, ips->ipuser);
1570  else {
1571  iucv_sever_pathid(path->pathid, NULL);
1572  iucv_path_table[path->pathid] = NULL;
1573  list_del(&path->list);
1574  iucv_path_free(path);
1575  }
1576 }
1577 
1585 struct iucv_path_quiesced {
1586  u16 ippathid;
1587  u8 res1;
1588  u8 iptype;
1589  u32 res2;
1590  u8 res3[8];
1591  u8 ipuser[16];
1592  u32 res4;
1593  u8 ippollfg;
1594  u8 res5[3];
1595 } __packed;
1596 
1597 static void iucv_path_quiesced(struct iucv_irq_data *data)
1598 {
1599  struct iucv_path_quiesced *ipq = (void *) data;
1600  struct iucv_path *path = iucv_path_table[ipq->ippathid];
1601 
1602  if (path && path->handler && path->handler->path_quiesced)
1603  path->handler->path_quiesced(path, ipq->ipuser);
1604 }
1605 
1613 struct iucv_path_resumed {
1614  u16 ippathid;
1615  u8 res1;
1616  u8 iptype;
1617  u32 res2;
1618  u8 res3[8];
1619  u8 ipuser[16];
1620  u32 res4;
1621  u8 ippollfg;
1622  u8 res5[3];
1623 } __packed;
1624 
1625 static void iucv_path_resumed(struct iucv_irq_data *data)
1626 {
1627  struct iucv_path_resumed *ipr = (void *) data;
1628  struct iucv_path *path = iucv_path_table[ipr->ippathid];
1629 
1630  if (path && path->handler && path->handler->path_resumed)
1631  path->handler->path_resumed(path, ipr->ipuser);
1632 }
1633 
1641 struct iucv_message_complete {
1642  u16 ippathid;
1643  u8 ipflags1;
1644  u8 iptype;
1645  u32 ipmsgid;
1646  u32 ipaudit;
1647  u8 iprmmsg[8];
1648  u32 ipsrccls;
1649  u32 ipmsgtag;
1650  u32 res;
1651  u32 ipbfln2f;
1652  u8 ippollfg;
1653  u8 res2[3];
1654 } __packed;
1655 
1656 static void iucv_message_complete(struct iucv_irq_data *data)
1657 {
1658  struct iucv_message_complete *imc = (void *) data;
1659  struct iucv_path *path = iucv_path_table[imc->ippathid];
1660  struct iucv_message msg;
1661 
1662  if (path && path->handler && path->handler->message_complete) {
1663  msg.flags = imc->ipflags1;
1664  msg.id = imc->ipmsgid;
1665  msg.audit = imc->ipaudit;
1666  memcpy(msg.rmmsg, imc->iprmmsg, 8);
1667  msg.class = imc->ipsrccls;
1668  msg.tag = imc->ipmsgtag;
1669  msg.length = imc->ipbfln2f;
1670  path->handler->message_complete(path, &msg);
1671  }
1672 }
1673 
1681 struct iucv_message_pending {
1682  u16 ippathid;
1683  u8 ipflags1;
1684  u8 iptype;
1685  u32 ipmsgid;
1686  u32 iptrgcls;
1687  union {
1688  u32 iprmmsg1_u32;
1689  u8 iprmmsg1[4];
1690  } ln1msg1;
1691  union {
1692  u32 ipbfln1f;
1693  u8 iprmmsg2[4];
1694  } ln1msg2;
1695  u32 res1[3];
1696  u32 ipbfln2f;
1697  u8 ippollfg;
1698  u8 res2[3];
1699 } __packed;
1700 
1701 static void iucv_message_pending(struct iucv_irq_data *data)
1702 {
1703  struct iucv_message_pending *imp = (void *) data;
1704  struct iucv_path *path = iucv_path_table[imp->ippathid];
1705  struct iucv_message msg;
1706 
1707  if (path && path->handler && path->handler->message_pending) {
1708  msg.flags = imp->ipflags1;
1709  msg.id = imp->ipmsgid;
1710  msg.class = imp->iptrgcls;
1711  if (imp->ipflags1 & IUCV_IPRMDATA) {
1712  memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
1713  msg.length = 8;
1714  } else
1715  msg.length = imp->ln1msg2.ipbfln1f;
1716  msg.reply_size = imp->ipbfln2f;
1717  path->handler->message_pending(path, &msg);
1718  }
1719 }
1720 
1728 static void iucv_tasklet_fn(unsigned long ignored)
1729 {
1730  typedef void iucv_irq_fn(struct iucv_irq_data *);
1731  static iucv_irq_fn *irq_fn[] = {
1732  [0x02] = iucv_path_complete,
1733  [0x03] = iucv_path_severed,
1734  [0x04] = iucv_path_quiesced,
1735  [0x05] = iucv_path_resumed,
1736  [0x06] = iucv_message_complete,
1737  [0x07] = iucv_message_complete,
1738  [0x08] = iucv_message_pending,
1739  [0x09] = iucv_message_pending,
1740  };
1741  LIST_HEAD(task_queue);
1742  struct iucv_irq_list *p, *n;
1743 
1744  /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1745  if (!spin_trylock(&iucv_table_lock)) {
1746  tasklet_schedule(&iucv_tasklet);
1747  return;
1748  }
1749  iucv_active_cpu = smp_processor_id();
1750 
1751  spin_lock_irq(&iucv_queue_lock);
1752  list_splice_init(&iucv_task_queue, &task_queue);
1753  spin_unlock_irq(&iucv_queue_lock);
1754 
1755  list_for_each_entry_safe(p, n, &task_queue, list) {
1756  list_del_init(&p->list);
1757  irq_fn[p->data.iptype](&p->data);
1758  kfree(p);
1759  }
1760 
1761  iucv_active_cpu = -1;
1762  spin_unlock(&iucv_table_lock);
1763 }
1764 
1772 static void iucv_work_fn(struct work_struct *work)
1773 {
1774  LIST_HEAD(work_queue);
1775  struct iucv_irq_list *p, *n;
1776 
1777  /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1778  spin_lock_bh(&iucv_table_lock);
1779  iucv_active_cpu = smp_processor_id();
1780 
1781  spin_lock_irq(&iucv_queue_lock);
1782  list_splice_init(&iucv_work_queue, &work_queue);
1783  spin_unlock_irq(&iucv_queue_lock);
1784 
1785  iucv_cleanup_queue();
1786  list_for_each_entry_safe(p, n, &work_queue, list) {
1787  list_del_init(&p->list);
1788  iucv_path_pending(&p->data);
1789  kfree(p);
1790  }
1791 
1792  iucv_active_cpu = -1;
1793  spin_unlock_bh(&iucv_table_lock);
1794 }
1795 
1803 static void iucv_external_interrupt(struct ext_code ext_code,
1804  unsigned int param32, unsigned long param64)
1805 {
1806  struct iucv_irq_data *p;
1807  struct iucv_irq_list *work;
1808 
1810  p = iucv_irq_data[smp_processor_id()];
1811  if (p->ippathid >= iucv_max_pathid) {
1812  WARN_ON(p->ippathid >= iucv_max_pathid);
1813  iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
1814  return;
1815  }
1816  BUG_ON(p->iptype < 0x01 || p->iptype > 0x09);
1817  work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
1818  if (!work) {
1819  pr_warning("iucv_external_interrupt: out of memory\n");
1820  return;
1821  }
1822  memcpy(&work->data, p, sizeof(work->data));
1823  spin_lock(&iucv_queue_lock);
1824  if (p->iptype == 0x01) {
1825  /* Path pending interrupt. */
1826  list_add_tail(&work->list, &iucv_work_queue);
1827  schedule_work(&iucv_work);
1828  } else {
1829  /* The other interrupts. */
1830  list_add_tail(&work->list, &iucv_task_queue);
1831  tasklet_schedule(&iucv_tasklet);
1832  }
1833  spin_unlock(&iucv_queue_lock);
1834 }
1835 
1836 static int iucv_pm_prepare(struct device *dev)
1837 {
1838  int rc = 0;
1839 
1840 #ifdef CONFIG_PM_DEBUG
1841  printk(KERN_INFO "iucv_pm_prepare\n");
1842 #endif
1843  if (dev->driver && dev->driver->pm && dev->driver->pm->prepare)
1844  rc = dev->driver->pm->prepare(dev);
1845  return rc;
1846 }
1847 
1848 static void iucv_pm_complete(struct device *dev)
1849 {
1850 #ifdef CONFIG_PM_DEBUG
1851  printk(KERN_INFO "iucv_pm_complete\n");
1852 #endif
1853  if (dev->driver && dev->driver->pm && dev->driver->pm->complete)
1854  dev->driver->pm->complete(dev);
1855 }
1856 
1863 int iucv_path_table_empty(void)
1864 {
1865  int i;
1866 
1867  for (i = 0; i < iucv_max_pathid; i++) {
1868  if (iucv_path_table[i])
1869  return 0;
1870  }
1871  return 1;
1872 }
1873 
1882 static int iucv_pm_freeze(struct device *dev)
1883 {
1884  int cpu;
1885  struct iucv_irq_list *p, *n;
1886  int rc = 0;
1887 
1888 #ifdef CONFIG_PM_DEBUG
1889  printk(KERN_WARNING "iucv_pm_freeze\n");
1890 #endif
1891  if (iucv_pm_state != IUCV_PM_FREEZING) {
1892  for_each_cpu(cpu, &iucv_irq_cpumask)
1893  smp_call_function_single(cpu, iucv_block_cpu_almost,
1894  NULL, 1);
1895  cancel_work_sync(&iucv_work);
1896  list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
1897  list_del_init(&p->list);
1898  iucv_sever_pathid(p->data.ippathid,
1899  iucv_error_no_listener);
1900  kfree(p);
1901  }
1902  }
1903  iucv_pm_state = IUCV_PM_FREEZING;
1904  if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
1905  rc = dev->driver->pm->freeze(dev);
1906  if (iucv_path_table_empty())
1907  iucv_disable();
1908  return rc;
1909 }
1910 
1919 static int iucv_pm_thaw(struct device *dev)
1920 {
1921  int rc = 0;
1922 
1923 #ifdef CONFIG_PM_DEBUG
1924  printk(KERN_WARNING "iucv_pm_thaw\n");
1925 #endif
1926  iucv_pm_state = IUCV_PM_THAWING;
1927  if (!iucv_path_table) {
1928  rc = iucv_enable();
1929  if (rc)
1930  goto out;
1931  }
1932  if (cpumask_empty(&iucv_irq_cpumask)) {
1933  if (iucv_nonsmp_handler)
1934  /* enable interrupts on one cpu */
1935  iucv_allow_cpu(NULL);
1936  else
1937  /* enable interrupts on all cpus */
1938  iucv_setmask_mp();
1939  }
1940  if (dev->driver && dev->driver->pm && dev->driver->pm->thaw)
1941  rc = dev->driver->pm->thaw(dev);
1942 out:
1943  return rc;
1944 }
1945 
1954 static int iucv_pm_restore(struct device *dev)
1955 {
1956  int rc = 0;
1957 
1958 #ifdef CONFIG_PM_DEBUG
1959  printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
1960 #endif
1961  if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table)
1962  pr_warning("Suspending Linux did not completely close all IUCV "
1963  "connections\n");
1964  iucv_pm_state = IUCV_PM_RESTORING;
1965  if (cpumask_empty(&iucv_irq_cpumask)) {
1966  rc = iucv_query_maxconn();
1967  rc = iucv_enable();
1968  if (rc)
1969  goto out;
1970  }
1971  if (dev->driver && dev->driver->pm && dev->driver->pm->restore)
1972  rc = dev->driver->pm->restore(dev);
1973 out:
1974  return rc;
1975 }
1976 
1977 struct iucv_interface iucv_if = {
1978  .message_receive = iucv_message_receive,
1979  .__message_receive = __iucv_message_receive,
1980  .message_reply = iucv_message_reply,
1981  .message_reject = iucv_message_reject,
1982  .message_send = iucv_message_send,
1983  .__message_send = __iucv_message_send,
1984  .message_send2way = iucv_message_send2way,
1985  .message_purge = iucv_message_purge,
1986  .path_accept = iucv_path_accept,
1987  .path_connect = iucv_path_connect,
1988  .path_quiesce = iucv_path_quiesce,
1989  .path_resume = iucv_path_resume,
1990  .path_sever = iucv_path_sever,
1991  .iucv_register = iucv_register,
1992  .iucv_unregister = iucv_unregister,
1993  .bus = NULL,
1994  .root = NULL,
1995 };
1996 EXPORT_SYMBOL(iucv_if);
1997 
2003 static int __init iucv_init(void)
2004 {
2005  int rc;
2006  int cpu;
2007 
2008  if (!MACHINE_IS_VM) {
2009  rc = -EPROTONOSUPPORT;
2010  goto out;
2011  }
2012  ctl_set_bit(0, 1);
2013  rc = iucv_query_maxconn();
2014  if (rc)
2015  goto out_ctl;
2016  rc = register_external_interrupt(0x4000, iucv_external_interrupt);
2017  if (rc)
2018  goto out_ctl;
2019  iucv_root = root_device_register("iucv");
2020  if (IS_ERR(iucv_root)) {
2021  rc = PTR_ERR(iucv_root);
2022  goto out_int;
2023  }
2024 
2025  for_each_online_cpu(cpu) {
2026  /* Note: GFP_DMA used to get memory below 2G */
2027  iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
2029  if (!iucv_irq_data[cpu]) {
2030  rc = -ENOMEM;
2031  goto out_free;
2032  }
2033 
2034  /* Allocate parameter blocks. */
2035  iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
2037  if (!iucv_param[cpu]) {
2038  rc = -ENOMEM;
2039  goto out_free;
2040  }
2041  iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
2043  if (!iucv_param_irq[cpu]) {
2044  rc = -ENOMEM;
2045  goto out_free;
2046  }
2047 
2048  }
2049  rc = register_hotcpu_notifier(&iucv_cpu_notifier);
2050  if (rc)
2051  goto out_free;
2052  rc = register_reboot_notifier(&iucv_reboot_notifier);
2053  if (rc)
2054  goto out_cpu;
2055  ASCEBC(iucv_error_no_listener, 16);
2056  ASCEBC(iucv_error_no_memory, 16);
2057  ASCEBC(iucv_error_pathid, 16);
2058  iucv_available = 1;
2059  rc = bus_register(&iucv_bus);
2060  if (rc)
2061  goto out_reboot;
2062  iucv_if.root = iucv_root;
2063  iucv_if.bus = &iucv_bus;
2064  return 0;
2065 
2066 out_reboot:
2067  unregister_reboot_notifier(&iucv_reboot_notifier);
2068 out_cpu:
2069  unregister_hotcpu_notifier(&iucv_cpu_notifier);
2070 out_free:
2071  for_each_possible_cpu(cpu) {
2072  kfree(iucv_param_irq[cpu]);
2073  iucv_param_irq[cpu] = NULL;
2074  kfree(iucv_param[cpu]);
2075  iucv_param[cpu] = NULL;
2076  kfree(iucv_irq_data[cpu]);
2077  iucv_irq_data[cpu] = NULL;
2078  }
2079  root_device_unregister(iucv_root);
2080 out_int:
2081  unregister_external_interrupt(0x4000, iucv_external_interrupt);
2082 out_ctl:
2083  ctl_clear_bit(0, 1);
2084 out:
2085  return rc;
2086 }
2087 
2093 static void __exit iucv_exit(void)
2094 {
2095  struct iucv_irq_list *p, *n;
2096  int cpu;
2097 
2098  spin_lock_irq(&iucv_queue_lock);
2099  list_for_each_entry_safe(p, n, &iucv_task_queue, list)
2100  kfree(p);
2101  list_for_each_entry_safe(p, n, &iucv_work_queue, list)
2102  kfree(p);
2103  spin_unlock_irq(&iucv_queue_lock);
2104  unregister_reboot_notifier(&iucv_reboot_notifier);
2105  unregister_hotcpu_notifier(&iucv_cpu_notifier);
2106  for_each_possible_cpu(cpu) {
2107  kfree(iucv_param_irq[cpu]);
2108  iucv_param_irq[cpu] = NULL;
2109  kfree(iucv_param[cpu]);
2110  iucv_param[cpu] = NULL;
2111  kfree(iucv_irq_data[cpu]);
2112  iucv_irq_data[cpu] = NULL;
2113  }
2114  root_device_unregister(iucv_root);
2115  bus_unregister(&iucv_bus);
2116  unregister_external_interrupt(0x4000, iucv_external_interrupt);
2117 }
2118 
2119 subsys_initcall(iucv_init);
2120 module_exit(iucv_exit);
2121 
2122 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert ([email protected])");
2123 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
2124 MODULE_LICENSE("GPL");