Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ap_bus.c
Go to the documentation of this file.
1 /*
2  * Copyright IBM Corp. 2006, 2012
3  * Author(s): Cornelia Huck <[email protected]>
4  * Martin Schwidefsky <[email protected]>
5  * Ralph Wuerthner <[email protected]>
6  * Felix Beck <[email protected]>
7  * Holger Dengler <[email protected]>
8  *
9  * Adjunct processor bus.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 #define KMSG_COMPONENT "ap"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 
29 #include <linux/kernel_stat.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/slab.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/mutex.h>
40 #include <asm/reset.h>
41 #include <asm/airq.h>
42 #include <linux/atomic.h>
43 #include <asm/isc.h>
44 #include <linux/hrtimer.h>
45 #include <linux/ktime.h>
46 #include <asm/facility.h>
47 
48 #include "ap_bus.h"
49 
50 /* Some prototypes. */
51 static void ap_scan_bus(struct work_struct *);
52 static void ap_poll_all(unsigned long);
53 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
54 static int ap_poll_thread_start(void);
55 static void ap_poll_thread_stop(void);
56 static void ap_request_timeout(unsigned long);
57 static inline void ap_schedule_poll_timer(void);
58 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
59 static int ap_device_remove(struct device *dev);
60 static int ap_device_probe(struct device *dev);
61 static void ap_interrupt_handler(void *unused1, void *unused2);
62 static void ap_reset(struct ap_device *ap_dev);
63 static void ap_config_timeout(unsigned long ptr);
64 static int ap_select_domain(void);
65 static void ap_query_configuration(void);
66 
67 /*
68  * Module description.
69  */
70 MODULE_AUTHOR("IBM Corporation");
71 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
72  "Copyright IBM Corp. 2006, 2012");
73 MODULE_LICENSE("GPL");
74 
75 /*
76  * Module parameter
77  */
78 int ap_domain_index = -1; /* Adjunct Processor Domain Index */
79 module_param_named(domain, ap_domain_index, int, 0000);
80 MODULE_PARM_DESC(domain, "domain index for ap devices");
82 
83 static int ap_thread_flag = 0;
84 module_param_named(poll_thread, ap_thread_flag, int, 0000);
85 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
86 
87 static struct device *ap_root_device = NULL;
88 static struct ap_config_info *ap_configuration;
89 static DEFINE_SPINLOCK(ap_device_list_lock);
90 static LIST_HEAD(ap_device_list);
91 
92 /*
93  * Workqueue & timer for bus rescan.
94  */
95 static struct workqueue_struct *ap_work_queue;
96 static struct timer_list ap_config_timer;
97 static int ap_config_time = AP_CONFIG_TIME;
98 static DECLARE_WORK(ap_config_work, ap_scan_bus);
99 
100 /*
101  * Tasklet & timer for AP request polling and interrupts
102  */
103 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
104 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
105 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
106 static struct task_struct *ap_poll_kthread = NULL;
107 static DEFINE_MUTEX(ap_poll_thread_mutex);
108 static DEFINE_SPINLOCK(ap_poll_timer_lock);
109 static void *ap_interrupt_indicator;
110 static struct hrtimer ap_poll_timer;
111 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
112  * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
113 static unsigned long long poll_timeout = 250000;
114 
115 /* Suspend flag */
116 static int ap_suspend_flag;
117 /* Flag to check if domain was set through module parameter domain=. This is
118  * important when supsend and resume is done in a z/VM environment where the
119  * domain might change. */
120 static int user_set_domain = 0;
121 static struct bus_type ap_bus_type;
122 
127 static inline int ap_using_interrupts(void)
128 {
129  return ap_interrupt_indicator != NULL;
130 }
131 
137 static inline int ap_instructions_available(void)
138 {
139  register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
140  register unsigned long reg1 asm ("1") = -ENODEV;
141  register unsigned long reg2 asm ("2") = 0UL;
142 
143  asm volatile(
144  " .long 0xb2af0000\n" /* PQAP(TAPQ) */
145  "0: la %1,0\n"
146  "1:\n"
147  EX_TABLE(0b, 1b)
148  : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
149  return reg1;
150 }
151 
157 static int ap_interrupts_available(void)
158 {
159  return test_facility(2) && test_facility(65);
160 }
161 
168 #ifdef CONFIG_64BIT
169 static int ap_configuration_available(void)
170 {
171  return test_facility(2) && test_facility(12);
172 }
173 #endif
174 
183 static inline struct ap_queue_status
184 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
185 {
186  register unsigned long reg0 asm ("0") = qid;
187  register struct ap_queue_status reg1 asm ("1");
188  register unsigned long reg2 asm ("2") = 0UL;
189 
190  asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
191  : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
192  *device_type = (int) (reg2 >> 24);
193  *queue_depth = (int) (reg2 & 0xff);
194  return reg1;
195 }
196 
203 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
204 {
205  register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
206  register struct ap_queue_status reg1 asm ("1");
207  register unsigned long reg2 asm ("2") = 0UL;
208 
209  asm volatile(
210  ".long 0xb2af0000" /* PQAP(RAPQ) */
211  : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
212  return reg1;
213 }
214 
215 #ifdef CONFIG_64BIT
216 
223 static inline struct ap_queue_status
224 ap_queue_interruption_control(ap_qid_t qid, void *ind)
225 {
226  register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
227  register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
228  register struct ap_queue_status reg1_out asm ("1");
229  register void *reg2 asm ("2") = ind;
230  asm volatile(
231  ".long 0xb2af0000" /* PQAP(AQIC) */
232  : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
233  :
234  : "cc" );
235  return reg1_out;
236 }
237 #endif
238 
239 #ifdef CONFIG_64BIT
240 static inline struct ap_queue_status
241 __ap_query_functions(ap_qid_t qid, unsigned int *functions)
242 {
243  register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
244  register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
245  register unsigned long reg2 asm ("2");
246 
247  asm volatile(
248  ".long 0xb2af0000\n" /* PQAP(TAPQ) */
249  "0:\n"
250  EX_TABLE(0b, 0b)
251  : "+d" (reg0), "+d" (reg1), "=d" (reg2)
252  :
253  : "cc");
254 
255  *functions = (unsigned int)(reg2 >> 32);
256  return reg1;
257 }
258 #endif
259 
260 #ifdef CONFIG_64BIT
261 static inline int __ap_query_configuration(struct ap_config_info *config)
262 {
263  register unsigned long reg0 asm ("0") = 0x04000000UL;
264  register unsigned long reg1 asm ("1") = -EINVAL;
265  register unsigned char *reg2 asm ("2") = (unsigned char *)config;
266 
267  asm volatile(
268  ".long 0xb2af0000\n" /* PQAP(QCI) */
269  "0: la %1,0\n"
270  "1:\n"
271  EX_TABLE(0b, 1b)
272  : "+d" (reg0), "+d" (reg1), "+d" (reg2)
273  :
274  : "cc");
275 
276  return reg1;
277 }
278 #endif
279 
291 static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
292 {
293 #ifdef CONFIG_64BIT
294  struct ap_queue_status status;
295  int i;
296  status = __ap_query_functions(qid, functions);
297 
298  for (i = 0; i < AP_MAX_RESET; i++) {
299  if (ap_queue_status_invalid_test(&status))
300  return -ENODEV;
301 
302  switch (status.response_code) {
303  case AP_RESPONSE_NORMAL:
304  return 0;
306  case AP_RESPONSE_BUSY:
307  break;
312  return -ENODEV;
314  break;
315  default:
316  break;
317  }
318  if (i < AP_MAX_RESET - 1) {
319  udelay(5);
320  status = __ap_query_functions(qid, functions);
321  }
322  }
323  return -EBUSY;
324 #else
325  return -EINVAL;
326 #endif
327 }
328 
338 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
339 {
340 #ifdef CONFIG_64BIT
341  struct ap_queue_status status;
342  int t_depth, t_device_type, rc, i;
343 
344  rc = -EBUSY;
345  status = ap_queue_interruption_control(qid, ind);
346 
347  for (i = 0; i < AP_MAX_RESET; i++) {
348  switch (status.response_code) {
349  case AP_RESPONSE_NORMAL:
350  if (status.int_enabled)
351  return 0;
352  break;
354  case AP_RESPONSE_BUSY:
355  if (i < AP_MAX_RESET - 1) {
356  udelay(5);
357  status = ap_queue_interruption_control(qid,
358  ind);
359  continue;
360  }
361  break;
366  return -ENODEV;
368  if (status.int_enabled)
369  return 0;
370  break;
371  default:
372  break;
373  }
374  if (i < AP_MAX_RESET - 1) {
375  udelay(5);
376  status = ap_test_queue(qid, &t_depth, &t_device_type);
377  }
378  }
379  return rc;
380 #else
381  return -EINVAL;
382 #endif
383 }
384 
398 static inline struct ap_queue_status
399 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
400  unsigned int special)
401 {
402  typedef struct { char _[length]; } msgblock;
403  register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
404  register struct ap_queue_status reg1 asm ("1");
405  register unsigned long reg2 asm ("2") = (unsigned long) msg;
406  register unsigned long reg3 asm ("3") = (unsigned long) length;
407  register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
408  register unsigned long reg5 asm ("5") = (unsigned int) psmid;
409 
410  if (special == 1)
411  reg0 |= 0x400000UL;
412 
413  asm volatile (
414  "0: .long 0xb2ad0042\n" /* NQAP */
415  " brc 2,0b"
416  : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
417  : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
418  : "cc" );
419  return reg1;
420 }
421 
422 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
423 {
424  struct ap_queue_status status;
425 
426  status = __ap_send(qid, psmid, msg, length, 0);
427  switch (status.response_code) {
428  case AP_RESPONSE_NORMAL:
429  return 0;
430  case AP_RESPONSE_Q_FULL:
432  return -EBUSY;
434  return -EINVAL;
435  default: /* Device is gone. */
436  return -ENODEV;
437  }
438 }
440 
459 static inline struct ap_queue_status
460 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
461 {
462  typedef struct { char _[length]; } msgblock;
463  register unsigned long reg0 asm("0") = qid | 0x80000000UL;
464  register struct ap_queue_status reg1 asm ("1");
465  register unsigned long reg2 asm("2") = 0UL;
466  register unsigned long reg4 asm("4") = (unsigned long) msg;
467  register unsigned long reg5 asm("5") = (unsigned long) length;
468  register unsigned long reg6 asm("6") = 0UL;
469  register unsigned long reg7 asm("7") = 0UL;
470 
471 
472  asm volatile(
473  "0: .long 0xb2ae0064\n" /* DQAP */
474  " brc 6,0b\n"
475  : "+d" (reg0), "=d" (reg1), "+d" (reg2),
476  "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
477  "=m" (*(msgblock *) msg) : : "cc" );
478  *psmid = (((unsigned long long) reg6) << 32) + reg7;
479  return reg1;
480 }
481 
482 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
483 {
484  struct ap_queue_status status;
485 
486  status = __ap_recv(qid, psmid, msg, length);
487  switch (status.response_code) {
488  case AP_RESPONSE_NORMAL:
489  return 0;
491  if (status.queue_empty)
492  return -ENOENT;
493  return -EBUSY;
495  return -EBUSY;
496  default:
497  return -ENODEV;
498  }
499 }
501 
510 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
511 {
512  struct ap_queue_status status;
513  int t_depth, t_device_type, rc, i;
514 
515  rc = -EBUSY;
516  for (i = 0; i < AP_MAX_RESET; i++) {
517  status = ap_test_queue(qid, &t_depth, &t_device_type);
518  switch (status.response_code) {
519  case AP_RESPONSE_NORMAL:
520  *queue_depth = t_depth + 1;
521  *device_type = t_device_type;
522  rc = 0;
523  break;
525  rc = -ENODEV;
526  break;
528  break;
530  rc = -ENODEV;
531  break;
533  rc = -ENODEV;
534  break;
536  rc = -ENODEV;
537  break;
539  break;
540  case AP_RESPONSE_BUSY:
541  break;
542  default:
543  BUG();
544  }
545  if (rc != -EBUSY)
546  break;
547  if (i < AP_MAX_RESET - 1)
548  udelay(5);
549  }
550  return rc;
551 }
552 
559 static int ap_init_queue(ap_qid_t qid)
560 {
561  struct ap_queue_status status;
562  int rc, dummy, i;
563 
564  rc = -ENODEV;
565  status = ap_reset_queue(qid);
566  for (i = 0; i < AP_MAX_RESET; i++) {
567  switch (status.response_code) {
568  case AP_RESPONSE_NORMAL:
569  if (status.queue_empty)
570  rc = 0;
571  break;
575  i = AP_MAX_RESET; /* return with -ENODEV */
576  break;
578  rc = -EBUSY;
579  case AP_RESPONSE_BUSY:
580  default:
581  break;
582  }
583  if (rc != -ENODEV && rc != -EBUSY)
584  break;
585  if (i < AP_MAX_RESET - 1) {
586  udelay(5);
587  status = ap_test_queue(qid, &dummy, &dummy);
588  }
589  }
590  if (rc == 0 && ap_using_interrupts()) {
591  rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
592  /* If interruption mode is supported by the machine,
593  * but an AP can not be enabled for interruption then
594  * the AP will be discarded. */
595  if (rc)
596  pr_err("Registering adapter interrupts for "
597  "AP %d failed\n", AP_QID_DEVICE(qid));
598  }
599  return rc;
600 }
601 
608 static void ap_increase_queue_count(struct ap_device *ap_dev)
609 {
610  int timeout = ap_dev->drv->request_timeout;
611 
612  ap_dev->queue_count++;
613  if (ap_dev->queue_count == 1) {
614  mod_timer(&ap_dev->timeout, jiffies + timeout);
615  ap_dev->reset = AP_RESET_ARMED;
616  }
617 }
618 
626 static void ap_decrease_queue_count(struct ap_device *ap_dev)
627 {
628  int timeout = ap_dev->drv->request_timeout;
629 
630  ap_dev->queue_count--;
631  if (ap_dev->queue_count > 0)
632  mod_timer(&ap_dev->timeout, jiffies + timeout);
633  else
634  /*
635  * The timeout timer should to be disabled now - since
636  * del_timer_sync() is very expensive, we just tell via the
637  * reset flag to ignore the pending timeout timer.
638  */
639  ap_dev->reset = AP_RESET_IGNORE;
640 }
641 
642 /*
643  * AP device related attributes.
644  */
645 static ssize_t ap_hwtype_show(struct device *dev,
646  struct device_attribute *attr, char *buf)
647 {
648  struct ap_device *ap_dev = to_ap_dev(dev);
649  return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
650 }
651 
652 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
653 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
654  char *buf)
655 {
656  struct ap_device *ap_dev = to_ap_dev(dev);
657  return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
658 }
659 
660 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
661 static ssize_t ap_request_count_show(struct device *dev,
662  struct device_attribute *attr,
663  char *buf)
664 {
665  struct ap_device *ap_dev = to_ap_dev(dev);
666  int rc;
667 
668  spin_lock_bh(&ap_dev->lock);
669  rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
670  spin_unlock_bh(&ap_dev->lock);
671  return rc;
672 }
673 
674 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
675 
676 static ssize_t ap_requestq_count_show(struct device *dev,
677  struct device_attribute *attr, char *buf)
678 {
679  struct ap_device *ap_dev = to_ap_dev(dev);
680  int rc;
681 
682  spin_lock_bh(&ap_dev->lock);
683  rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
684  spin_unlock_bh(&ap_dev->lock);
685  return rc;
686 }
687 
688 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
689 
690 static ssize_t ap_pendingq_count_show(struct device *dev,
691  struct device_attribute *attr, char *buf)
692 {
693  struct ap_device *ap_dev = to_ap_dev(dev);
694  int rc;
695 
696  spin_lock_bh(&ap_dev->lock);
697  rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
698  spin_unlock_bh(&ap_dev->lock);
699  return rc;
700 }
701 
702 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
703 
704 static ssize_t ap_modalias_show(struct device *dev,
705  struct device_attribute *attr, char *buf)
706 {
707  return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
708 }
709 
710 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
711 
712 static ssize_t ap_functions_show(struct device *dev,
713  struct device_attribute *attr, char *buf)
714 {
715  struct ap_device *ap_dev = to_ap_dev(dev);
716  return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
717 }
718 
719 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
720 
721 static struct attribute *ap_dev_attrs[] = {
722  &dev_attr_hwtype.attr,
723  &dev_attr_depth.attr,
724  &dev_attr_request_count.attr,
725  &dev_attr_requestq_count.attr,
726  &dev_attr_pendingq_count.attr,
727  &dev_attr_modalias.attr,
728  &dev_attr_ap_functions.attr,
729  NULL
730 };
731 static struct attribute_group ap_dev_attr_group = {
732  .attrs = ap_dev_attrs
733 };
734 
742 static int ap_bus_match(struct device *dev, struct device_driver *drv)
743 {
744  struct ap_device *ap_dev = to_ap_dev(dev);
745  struct ap_driver *ap_drv = to_ap_drv(drv);
746  struct ap_device_id *id;
747 
748  /*
749  * Compare device type of the device with the list of
750  * supported types of the device_driver.
751  */
752  for (id = ap_drv->ids; id->match_flags; id++) {
754  (id->dev_type != ap_dev->device_type))
755  continue;
756  return 1;
757  }
758  return 0;
759 }
760 
769 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
770 {
771  struct ap_device *ap_dev = to_ap_dev(dev);
772  int retval = 0;
773 
774  if (!ap_dev)
775  return -ENODEV;
776 
777  /* Set up DEV_TYPE environment variable. */
778  retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
779  if (retval)
780  return retval;
781 
782  /* Add MODALIAS= */
783  retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
784 
785  return retval;
786 }
787 
788 static int ap_bus_suspend(struct device *dev, pm_message_t state)
789 {
790  struct ap_device *ap_dev = to_ap_dev(dev);
791  unsigned long flags;
792 
793  if (!ap_suspend_flag) {
794  ap_suspend_flag = 1;
795 
796  /* Disable scanning for devices, thus we do not want to scan
797  * for them after removing.
798  */
799  del_timer_sync(&ap_config_timer);
800  if (ap_work_queue != NULL) {
801  destroy_workqueue(ap_work_queue);
802  ap_work_queue = NULL;
803  }
804 
805  tasklet_disable(&ap_tasklet);
806  }
807  /* Poll on the device until all requests are finished. */
808  do {
809  flags = 0;
810  spin_lock_bh(&ap_dev->lock);
811  __ap_poll_device(ap_dev, &flags);
812  spin_unlock_bh(&ap_dev->lock);
813  } while ((flags & 1) || (flags & 2));
814 
815  spin_lock_bh(&ap_dev->lock);
816  ap_dev->unregistered = 1;
817  spin_unlock_bh(&ap_dev->lock);
818 
819  return 0;
820 }
821 
822 static int ap_bus_resume(struct device *dev)
823 {
824  int rc = 0;
825  struct ap_device *ap_dev = to_ap_dev(dev);
826 
827  if (ap_suspend_flag) {
828  ap_suspend_flag = 0;
829  if (!ap_interrupts_available())
830  ap_interrupt_indicator = NULL;
831  ap_query_configuration();
832  if (!user_set_domain) {
833  ap_domain_index = -1;
834  ap_select_domain();
835  }
836  init_timer(&ap_config_timer);
837  ap_config_timer.function = ap_config_timeout;
838  ap_config_timer.data = 0;
839  ap_config_timer.expires = jiffies + ap_config_time * HZ;
840  add_timer(&ap_config_timer);
841  ap_work_queue = create_singlethread_workqueue("kapwork");
842  if (!ap_work_queue)
843  return -ENOMEM;
844  tasklet_enable(&ap_tasklet);
845  if (!ap_using_interrupts())
846  ap_schedule_poll_timer();
847  else
848  tasklet_schedule(&ap_tasklet);
849  if (ap_thread_flag)
850  rc = ap_poll_thread_start();
851  }
852  if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
853  spin_lock_bh(&ap_dev->lock);
854  ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
856  spin_unlock_bh(&ap_dev->lock);
857  }
858  queue_work(ap_work_queue, &ap_config_work);
859 
860  return rc;
861 }
862 
863 static struct bus_type ap_bus_type = {
864  .name = "ap",
865  .match = &ap_bus_match,
866  .uevent = &ap_uevent,
867  .suspend = ap_bus_suspend,
868  .resume = ap_bus_resume
869 };
870 
871 static int ap_device_probe(struct device *dev)
872 {
873  struct ap_device *ap_dev = to_ap_dev(dev);
874  struct ap_driver *ap_drv = to_ap_drv(dev->driver);
875  int rc;
876 
877  ap_dev->drv = ap_drv;
878  rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
879  if (!rc) {
880  spin_lock_bh(&ap_device_list_lock);
881  list_add(&ap_dev->list, &ap_device_list);
882  spin_unlock_bh(&ap_device_list_lock);
883  }
884  return rc;
885 }
886 
893 static void __ap_flush_queue(struct ap_device *ap_dev)
894 {
895  struct ap_message *ap_msg, *next;
896 
897  list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
898  list_del_init(&ap_msg->list);
899  ap_dev->pendingq_count--;
900  ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
901  }
902  list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
903  list_del_init(&ap_msg->list);
904  ap_dev->requestq_count--;
905  ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
906  }
907 }
908 
909 void ap_flush_queue(struct ap_device *ap_dev)
910 {
911  spin_lock_bh(&ap_dev->lock);
912  __ap_flush_queue(ap_dev);
913  spin_unlock_bh(&ap_dev->lock);
914 }
916 
917 static int ap_device_remove(struct device *dev)
918 {
919  struct ap_device *ap_dev = to_ap_dev(dev);
920  struct ap_driver *ap_drv = ap_dev->drv;
921 
922  ap_flush_queue(ap_dev);
923  del_timer_sync(&ap_dev->timeout);
924  spin_lock_bh(&ap_device_list_lock);
925  list_del_init(&ap_dev->list);
926  spin_unlock_bh(&ap_device_list_lock);
927  if (ap_drv->remove)
928  ap_drv->remove(ap_dev);
929  spin_lock_bh(&ap_dev->lock);
930  atomic_sub(ap_dev->queue_count, &ap_poll_requests);
931  spin_unlock_bh(&ap_dev->lock);
932  return 0;
933 }
934 
935 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
936  char *name)
937 {
938  struct device_driver *drv = &ap_drv->driver;
939 
940  drv->bus = &ap_bus_type;
941  drv->probe = ap_device_probe;
942  drv->remove = ap_device_remove;
943  drv->owner = owner;
944  drv->name = name;
945  return driver_register(drv);
946 }
948 
949 void ap_driver_unregister(struct ap_driver *ap_drv)
950 {
951  driver_unregister(&ap_drv->driver);
952 }
954 
956 {
957  /* Delete the AP bus rescan timer. */
958  del_timer(&ap_config_timer);
959 
960  /* processing a synchonuous bus rescan */
961  ap_scan_bus(NULL);
962 
963  /* Setup the AP bus rescan timer again. */
964  ap_config_timer.expires = jiffies + ap_config_time * HZ;
965  add_timer(&ap_config_timer);
966 }
968 
969 /*
970  * AP bus attributes.
971  */
972 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
973 {
974  return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
975 }
976 
977 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
978 
979 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
980 {
981  return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
982 }
983 
984 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
985 {
986  return snprintf(buf, PAGE_SIZE, "%d\n",
987  ap_using_interrupts() ? 1 : 0);
988 }
989 
990 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
991 
992 static ssize_t ap_config_time_store(struct bus_type *bus,
993  const char *buf, size_t count)
994 {
995  int time;
996 
997  if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
998  return -EINVAL;
999  ap_config_time = time;
1000  if (!timer_pending(&ap_config_timer) ||
1001  !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
1002  ap_config_timer.expires = jiffies + ap_config_time * HZ;
1003  add_timer(&ap_config_timer);
1004  }
1005  return count;
1006 }
1007 
1008 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
1009 
1010 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
1011 {
1012  return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1013 }
1014 
1015 static ssize_t ap_poll_thread_store(struct bus_type *bus,
1016  const char *buf, size_t count)
1017 {
1018  int flag, rc;
1019 
1020  if (sscanf(buf, "%d\n", &flag) != 1)
1021  return -EINVAL;
1022  if (flag) {
1023  rc = ap_poll_thread_start();
1024  if (rc)
1025  return rc;
1026  }
1027  else
1028  ap_poll_thread_stop();
1029  return count;
1030 }
1031 
1032 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
1033 
1034 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1035 {
1036  return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1037 }
1038 
1039 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1040  size_t count)
1041 {
1042  unsigned long long time;
1043  ktime_t hr_time;
1044 
1045  /* 120 seconds = maximum poll interval */
1046  if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1047  time > 120000000000ULL)
1048  return -EINVAL;
1049  poll_timeout = time;
1050  hr_time = ktime_set(0, poll_timeout);
1051 
1052  if (!hrtimer_is_queued(&ap_poll_timer) ||
1053  !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
1054  hrtimer_set_expires(&ap_poll_timer, hr_time);
1055  hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1056  }
1057  return count;
1058 }
1059 
1060 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1061 
1062 static struct bus_attribute *const ap_bus_attrs[] = {
1063  &bus_attr_ap_domain,
1064  &bus_attr_config_time,
1065  &bus_attr_poll_thread,
1066  &bus_attr_ap_interrupts,
1067  &bus_attr_poll_timeout,
1068  NULL,
1069 };
1070 
1071 static inline int ap_test_config(unsigned int *field, unsigned int nr)
1072 {
1073  if (nr > 0xFFu)
1074  return 0;
1075  return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1076 }
1077 
1078 /*
1079  * ap_test_config_card_id(): Test, whether an AP card ID is configured.
1080  * @id AP card ID
1081  *
1082  * Returns 0 if the card is not configured
1083  * 1 if the card is configured or
1084  * if the configuration information is not available
1085  */
1086 static inline int ap_test_config_card_id(unsigned int id)
1087 {
1088  if (!ap_configuration)
1089  return 1;
1090  return ap_test_config(ap_configuration->apm, id);
1091 }
1092 
1093 /*
1094  * ap_test_config_domain(): Test, whether an AP usage domain is configured.
1095  * @domain AP usage domain ID
1096  *
1097  * Returns 0 if the usage domain is not configured
1098  * 1 if the usage domain is configured or
1099  * if the configuration information is not available
1100  */
1101 static inline int ap_test_config_domain(unsigned int domain)
1102 {
1103  if (!ap_configuration)
1104  return 1;
1105  return ap_test_config(ap_configuration->aqm, domain);
1106 }
1107 
1113 static void ap_query_configuration(void)
1114 {
1115 #ifdef CONFIG_64BIT
1116  if (ap_configuration_available()) {
1117  if (!ap_configuration)
1118  ap_configuration =
1119  kzalloc(sizeof(struct ap_config_info),
1120  GFP_KERNEL);
1121  if (ap_configuration)
1122  __ap_query_configuration(ap_configuration);
1123  } else
1124  ap_configuration = NULL;
1125 #else
1126  ap_configuration = NULL;
1127 #endif
1128 }
1129 
1135 static int ap_select_domain(void)
1136 {
1137  int queue_depth, device_type, count, max_count, best_domain;
1138  ap_qid_t qid;
1139  int rc, i, j;
1140 
1141  /*
1142  * We want to use a single domain. Either the one specified with
1143  * the "domain=" parameter or the domain with the maximum number
1144  * of devices.
1145  */
1147  /* Domain has already been selected. */
1148  return 0;
1149  best_domain = -1;
1150  max_count = 0;
1151  for (i = 0; i < AP_DOMAINS; i++) {
1152  if (!ap_test_config_domain(i))
1153  continue;
1154  count = 0;
1155  for (j = 0; j < AP_DEVICES; j++) {
1156  if (!ap_test_config_card_id(j))
1157  continue;
1158  qid = AP_MKQID(j, i);
1159  rc = ap_query_queue(qid, &queue_depth, &device_type);
1160  if (rc)
1161  continue;
1162  count++;
1163  }
1164  if (count > max_count) {
1165  max_count = count;
1166  best_domain = i;
1167  }
1168  }
1169  if (best_domain >= 0){
1170  ap_domain_index = best_domain;
1171  return 0;
1172  }
1173  return -ENODEV;
1174 }
1175 
1182 static int ap_probe_device_type(struct ap_device *ap_dev)
1183 {
1184  static unsigned char msg[] = {
1185  0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1186  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1187  0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1188  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1189  0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1190  0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1191  0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1192  0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1193  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1194  0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1195  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1196  0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1197  0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1198  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1199  0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1200  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1201  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1202  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1203  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1204  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1205  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1206  0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1207  0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1208  0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1209  0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1210  0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1211  0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1212  0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1213  0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1214  0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1215  0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1216  0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1217  0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1218  0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1219  0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1220  0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1221  0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1222  0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1223  0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1224  0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1225  0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1226  0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1227  0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1228  0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1229  0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1230  };
1231  struct ap_queue_status status;
1232  unsigned long long psmid;
1233  char *reply;
1234  int rc, i;
1235 
1236  reply = (void *) get_zeroed_page(GFP_KERNEL);
1237  if (!reply) {
1238  rc = -ENOMEM;
1239  goto out;
1240  }
1241 
1242  status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1243  msg, sizeof(msg), 0);
1244  if (status.response_code != AP_RESPONSE_NORMAL) {
1245  rc = -ENODEV;
1246  goto out_free;
1247  }
1248 
1249  /* Wait for the test message to complete. */
1250  for (i = 0; i < 6; i++) {
1251  mdelay(300);
1252  status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1253  if (status.response_code == AP_RESPONSE_NORMAL &&
1254  psmid == 0x0102030405060708ULL)
1255  break;
1256  }
1257  if (i < 6) {
1258  /* Got an answer. */
1259  if (reply[0] == 0x00 && reply[1] == 0x86)
1261  else
1263  rc = 0;
1264  } else
1265  rc = -ENODEV;
1266 
1267 out_free:
1268  free_page((unsigned long) reply);
1269 out:
1270  return rc;
1271 }
1272 
1273 static void ap_interrupt_handler(void *unused1, void *unused2)
1274 {
1276  tasklet_schedule(&ap_tasklet);
1277 }
1278 
1286 static int __ap_scan_bus(struct device *dev, void *data)
1287 {
1288  return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1289 }
1290 
1291 static void ap_device_release(struct device *dev)
1292 {
1293  struct ap_device *ap_dev = to_ap_dev(dev);
1294 
1295  kfree(ap_dev);
1296 }
1297 
1298 static void ap_scan_bus(struct work_struct *unused)
1299 {
1300  struct ap_device *ap_dev;
1301  struct device *dev;
1302  ap_qid_t qid;
1303  int queue_depth, device_type;
1304  unsigned int device_functions;
1305  int rc, i;
1306 
1307  ap_query_configuration();
1308  if (ap_select_domain() != 0)
1309  return;
1310  for (i = 0; i < AP_DEVICES; i++) {
1311  qid = AP_MKQID(i, ap_domain_index);
1312  dev = bus_find_device(&ap_bus_type, NULL,
1313  (void *)(unsigned long)qid,
1314  __ap_scan_bus);
1315  if (ap_test_config_card_id(i))
1316  rc = ap_query_queue(qid, &queue_depth, &device_type);
1317  else
1318  rc = -ENODEV;
1319  if (dev) {
1320  if (rc == -EBUSY) {
1323  rc = ap_query_queue(qid, &queue_depth,
1324  &device_type);
1325  }
1326  ap_dev = to_ap_dev(dev);
1327  spin_lock_bh(&ap_dev->lock);
1328  if (rc || ap_dev->unregistered) {
1329  spin_unlock_bh(&ap_dev->lock);
1330  if (ap_dev->unregistered)
1331  i--;
1332  device_unregister(dev);
1333  put_device(dev);
1334  continue;
1335  }
1336  spin_unlock_bh(&ap_dev->lock);
1337  put_device(dev);
1338  continue;
1339  }
1340  if (rc)
1341  continue;
1342  rc = ap_init_queue(qid);
1343  if (rc)
1344  continue;
1345  ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1346  if (!ap_dev)
1347  break;
1348  ap_dev->qid = qid;
1349  ap_dev->queue_depth = queue_depth;
1350  ap_dev->unregistered = 1;
1351  spin_lock_init(&ap_dev->lock);
1352  INIT_LIST_HEAD(&ap_dev->pendingq);
1353  INIT_LIST_HEAD(&ap_dev->requestq);
1354  INIT_LIST_HEAD(&ap_dev->list);
1355  setup_timer(&ap_dev->timeout, ap_request_timeout,
1356  (unsigned long) ap_dev);
1357  switch (device_type) {
1358  case 0:
1359  /* device type probing for old cards */
1360  if (ap_probe_device_type(ap_dev)) {
1361  kfree(ap_dev);
1362  continue;
1363  }
1364  break;
1365  default:
1366  ap_dev->device_type = device_type;
1367  }
1368 
1369  rc = ap_query_functions(qid, &device_functions);
1370  if (!rc)
1371  ap_dev->functions = device_functions;
1372  else
1373  ap_dev->functions = 0u;
1374 
1375  ap_dev->device.bus = &ap_bus_type;
1376  ap_dev->device.parent = ap_root_device;
1377  if (dev_set_name(&ap_dev->device, "card%02x",
1378  AP_QID_DEVICE(ap_dev->qid))) {
1379  kfree(ap_dev);
1380  continue;
1381  }
1382  ap_dev->device.release = ap_device_release;
1383  rc = device_register(&ap_dev->device);
1384  if (rc) {
1385  put_device(&ap_dev->device);
1386  continue;
1387  }
1388  /* Add device attributes. */
1389  rc = sysfs_create_group(&ap_dev->device.kobj,
1390  &ap_dev_attr_group);
1391  if (!rc) {
1392  spin_lock_bh(&ap_dev->lock);
1393  ap_dev->unregistered = 0;
1394  spin_unlock_bh(&ap_dev->lock);
1395  }
1396  else
1397  device_unregister(&ap_dev->device);
1398  }
1399 }
1400 
1401 static void
1402 ap_config_timeout(unsigned long ptr)
1403 {
1404  queue_work(ap_work_queue, &ap_config_work);
1405  ap_config_timer.expires = jiffies + ap_config_time * HZ;
1406  add_timer(&ap_config_timer);
1407 }
1408 
1414 static inline void __ap_schedule_poll_timer(void)
1415 {
1416  ktime_t hr_time;
1417 
1418  spin_lock_bh(&ap_poll_timer_lock);
1419  if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1420  goto out;
1421  if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1422  hr_time = ktime_set(0, poll_timeout);
1423  hrtimer_forward_now(&ap_poll_timer, hr_time);
1424  hrtimer_restart(&ap_poll_timer);
1425  }
1426 out:
1427  spin_unlock_bh(&ap_poll_timer_lock);
1428 }
1429 
1435 static inline void ap_schedule_poll_timer(void)
1436 {
1437  if (ap_using_interrupts())
1438  return;
1439  __ap_schedule_poll_timer();
1440 }
1441 
1450 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1451 {
1452  struct ap_queue_status status;
1453  struct ap_message *ap_msg;
1454 
1455  if (ap_dev->queue_count <= 0)
1456  return 0;
1457  status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1458  ap_dev->reply->message, ap_dev->reply->length);
1459  switch (status.response_code) {
1460  case AP_RESPONSE_NORMAL:
1461  atomic_dec(&ap_poll_requests);
1462  ap_decrease_queue_count(ap_dev);
1463  list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1464  if (ap_msg->psmid != ap_dev->reply->psmid)
1465  continue;
1466  list_del_init(&ap_msg->list);
1467  ap_dev->pendingq_count--;
1468  ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1469  break;
1470  }
1471  if (ap_dev->queue_count > 0)
1472  *flags |= 1;
1473  break;
1475  if (status.queue_empty) {
1476  /* The card shouldn't forget requests but who knows. */
1477  atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1478  ap_dev->queue_count = 0;
1479  list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1480  ap_dev->requestq_count += ap_dev->pendingq_count;
1481  ap_dev->pendingq_count = 0;
1482  } else
1483  *flags |= 2;
1484  break;
1485  default:
1486  return -ENODEV;
1487  }
1488  return 0;
1489 }
1490 
1499 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1500 {
1501  struct ap_queue_status status;
1502  struct ap_message *ap_msg;
1503 
1504  if (ap_dev->requestq_count <= 0 ||
1505  ap_dev->queue_count >= ap_dev->queue_depth)
1506  return 0;
1507  /* Start the next request on the queue. */
1508  ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1509  status = __ap_send(ap_dev->qid, ap_msg->psmid,
1510  ap_msg->message, ap_msg->length, ap_msg->special);
1511  switch (status.response_code) {
1512  case AP_RESPONSE_NORMAL:
1513  atomic_inc(&ap_poll_requests);
1514  ap_increase_queue_count(ap_dev);
1515  list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1516  ap_dev->requestq_count--;
1517  ap_dev->pendingq_count++;
1518  if (ap_dev->queue_count < ap_dev->queue_depth &&
1519  ap_dev->requestq_count > 0)
1520  *flags |= 1;
1521  *flags |= 2;
1522  break;
1524  __ap_schedule_poll_timer();
1525  case AP_RESPONSE_Q_FULL:
1526  *flags |= 2;
1527  break;
1530  return -EINVAL;
1531  default:
1532  return -ENODEV;
1533  }
1534  return 0;
1535 }
1536 
1547 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1548 {
1549  int rc;
1550 
1551  rc = ap_poll_read(ap_dev, flags);
1552  if (rc)
1553  return rc;
1554  return ap_poll_write(ap_dev, flags);
1555 }
1556 
1564 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1565 {
1566  struct ap_queue_status status;
1567 
1568  if (list_empty(&ap_dev->requestq) &&
1569  ap_dev->queue_count < ap_dev->queue_depth) {
1570  status = __ap_send(ap_dev->qid, ap_msg->psmid,
1571  ap_msg->message, ap_msg->length,
1572  ap_msg->special);
1573  switch (status.response_code) {
1574  case AP_RESPONSE_NORMAL:
1575  list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1576  atomic_inc(&ap_poll_requests);
1577  ap_dev->pendingq_count++;
1578  ap_increase_queue_count(ap_dev);
1579  ap_dev->total_request_count++;
1580  break;
1581  case AP_RESPONSE_Q_FULL:
1583  list_add_tail(&ap_msg->list, &ap_dev->requestq);
1584  ap_dev->requestq_count++;
1585  ap_dev->total_request_count++;
1586  return -EBUSY;
1589  ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1590  return -EINVAL;
1591  default: /* Device is gone. */
1592  ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1593  return -ENODEV;
1594  }
1595  } else {
1596  list_add_tail(&ap_msg->list, &ap_dev->requestq);
1597  ap_dev->requestq_count++;
1598  ap_dev->total_request_count++;
1599  return -EBUSY;
1600  }
1601  ap_schedule_poll_timer();
1602  return 0;
1603 }
1604 
1605 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1606 {
1607  unsigned long flags;
1608  int rc;
1609 
1610  /* For asynchronous message handling a valid receive-callback
1611  * is required. */
1612  BUG_ON(!ap_msg->receive);
1613 
1614  spin_lock_bh(&ap_dev->lock);
1615  if (!ap_dev->unregistered) {
1616  /* Make room on the queue by polling for finished requests. */
1617  rc = ap_poll_queue(ap_dev, &flags);
1618  if (!rc)
1619  rc = __ap_queue_message(ap_dev, ap_msg);
1620  if (!rc)
1621  wake_up(&ap_poll_wait);
1622  if (rc == -ENODEV)
1623  ap_dev->unregistered = 1;
1624  } else {
1625  ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1626  rc = -ENODEV;
1627  }
1628  spin_unlock_bh(&ap_dev->lock);
1629  if (rc == -ENODEV)
1630  device_unregister(&ap_dev->device);
1631 }
1633 
1644 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1645 {
1646  struct ap_message *tmp;
1647 
1648  spin_lock_bh(&ap_dev->lock);
1649  if (!list_empty(&ap_msg->list)) {
1650  list_for_each_entry(tmp, &ap_dev->pendingq, list)
1651  if (tmp->psmid == ap_msg->psmid) {
1652  ap_dev->pendingq_count--;
1653  goto found;
1654  }
1655  ap_dev->requestq_count--;
1656  found:
1657  list_del_init(&ap_msg->list);
1658  }
1659  spin_unlock_bh(&ap_dev->lock);
1660 }
1662 
1669 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1670 {
1671  tasklet_schedule(&ap_tasklet);
1672  return HRTIMER_NORESTART;
1673 }
1674 
1682 static void ap_reset(struct ap_device *ap_dev)
1683 {
1684  int rc;
1685 
1686  ap_dev->reset = AP_RESET_IGNORE;
1687  atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1688  ap_dev->queue_count = 0;
1689  list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1690  ap_dev->requestq_count += ap_dev->pendingq_count;
1691  ap_dev->pendingq_count = 0;
1692  rc = ap_init_queue(ap_dev->qid);
1693  if (rc == -ENODEV)
1694  ap_dev->unregistered = 1;
1695  else
1696  __ap_schedule_poll_timer();
1697 }
1698 
1699 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1700 {
1701  if (!ap_dev->unregistered) {
1702  if (ap_poll_queue(ap_dev, flags))
1703  ap_dev->unregistered = 1;
1704  if (ap_dev->reset == AP_RESET_DO)
1705  ap_reset(ap_dev);
1706  }
1707  return 0;
1708 }
1709 
1718 static void ap_poll_all(unsigned long dummy)
1719 {
1720  unsigned long flags;
1721  struct ap_device *ap_dev;
1722 
1723  /* Reset the indicator if interrupts are used. Thus new interrupts can
1724  * be received. Doing it in the beginning of the tasklet is therefor
1725  * important that no requests on any AP get lost.
1726  */
1727  if (ap_using_interrupts())
1728  xchg((u8 *)ap_interrupt_indicator, 0);
1729  do {
1730  flags = 0;
1731  spin_lock(&ap_device_list_lock);
1732  list_for_each_entry(ap_dev, &ap_device_list, list) {
1733  spin_lock(&ap_dev->lock);
1734  __ap_poll_device(ap_dev, &flags);
1735  spin_unlock(&ap_dev->lock);
1736  }
1737  spin_unlock(&ap_device_list_lock);
1738  } while (flags & 1);
1739  if (flags & 2)
1740  ap_schedule_poll_timer();
1741 }
1742 
1753 static int ap_poll_thread(void *data)
1754 {
1756  unsigned long flags;
1757  int requests;
1758  struct ap_device *ap_dev;
1759 
1760  set_user_nice(current, 19);
1761  while (1) {
1762  if (ap_suspend_flag)
1763  return 0;
1764  if (need_resched()) {
1765  schedule();
1766  continue;
1767  }
1768  add_wait_queue(&ap_poll_wait, &wait);
1770  if (kthread_should_stop())
1771  break;
1772  requests = atomic_read(&ap_poll_requests);
1773  if (requests <= 0)
1774  schedule();
1776  remove_wait_queue(&ap_poll_wait, &wait);
1777 
1778  flags = 0;
1779  spin_lock_bh(&ap_device_list_lock);
1780  list_for_each_entry(ap_dev, &ap_device_list, list) {
1781  spin_lock(&ap_dev->lock);
1782  __ap_poll_device(ap_dev, &flags);
1783  spin_unlock(&ap_dev->lock);
1784  }
1785  spin_unlock_bh(&ap_device_list_lock);
1786  }
1788  remove_wait_queue(&ap_poll_wait, &wait);
1789  return 0;
1790 }
1791 
1792 static int ap_poll_thread_start(void)
1793 {
1794  int rc;
1795 
1796  if (ap_using_interrupts() || ap_suspend_flag)
1797  return 0;
1798  mutex_lock(&ap_poll_thread_mutex);
1799  if (!ap_poll_kthread) {
1800  ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1801  rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1802  if (rc)
1803  ap_poll_kthread = NULL;
1804  }
1805  else
1806  rc = 0;
1807  mutex_unlock(&ap_poll_thread_mutex);
1808  return rc;
1809 }
1810 
1811 static void ap_poll_thread_stop(void)
1812 {
1813  mutex_lock(&ap_poll_thread_mutex);
1814  if (ap_poll_kthread) {
1815  kthread_stop(ap_poll_kthread);
1816  ap_poll_kthread = NULL;
1817  }
1818  mutex_unlock(&ap_poll_thread_mutex);
1819 }
1820 
1827 static void ap_request_timeout(unsigned long data)
1828 {
1829  struct ap_device *ap_dev = (struct ap_device *) data;
1830 
1831  if (ap_dev->reset == AP_RESET_ARMED) {
1832  ap_dev->reset = AP_RESET_DO;
1833 
1834  if (ap_using_interrupts())
1835  tasklet_schedule(&ap_tasklet);
1836  }
1837 }
1838 
1839 static void ap_reset_domain(void)
1840 {
1841  int i;
1842 
1843  if (ap_domain_index != -1)
1844  for (i = 0; i < AP_DEVICES; i++)
1845  ap_reset_queue(AP_MKQID(i, ap_domain_index));
1846 }
1847 
1848 static void ap_reset_all(void)
1849 {
1850  int i, j;
1851 
1852  for (i = 0; i < AP_DOMAINS; i++)
1853  for (j = 0; j < AP_DEVICES; j++)
1854  ap_reset_queue(AP_MKQID(j, i));
1855 }
1856 
1857 static struct reset_call ap_reset_call = {
1858  .fn = ap_reset_all,
1859 };
1860 
1867 {
1868  int rc, i;
1869 
1870  if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1871  pr_warning("%d is not a valid cryptographic domain\n",
1872  ap_domain_index);
1873  return -EINVAL;
1874  }
1875  /* In resume callback we need to know if the user had set the domain.
1876  * If so, we can not just reset it.
1877  */
1878  if (ap_domain_index >= 0)
1879  user_set_domain = 1;
1880 
1881  if (ap_instructions_available() != 0) {
1882  pr_warning("The hardware system does not support "
1883  "AP instructions\n");
1884  return -ENODEV;
1885  }
1886  if (ap_interrupts_available()) {
1888  ap_interrupt_indicator = s390_register_adapter_interrupt(
1889  &ap_interrupt_handler, NULL, AP_ISC);
1890  if (IS_ERR(ap_interrupt_indicator)) {
1891  ap_interrupt_indicator = NULL;
1893  }
1894  }
1895 
1896  register_reset_call(&ap_reset_call);
1897 
1898  /* Create /sys/bus/ap. */
1899  rc = bus_register(&ap_bus_type);
1900  if (rc)
1901  goto out;
1902  for (i = 0; ap_bus_attrs[i]; i++) {
1903  rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1904  if (rc)
1905  goto out_bus;
1906  }
1907 
1908  /* Create /sys/devices/ap. */
1909  ap_root_device = root_device_register("ap");
1910  rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1911  if (rc)
1912  goto out_bus;
1913 
1914  ap_work_queue = create_singlethread_workqueue("kapwork");
1915  if (!ap_work_queue) {
1916  rc = -ENOMEM;
1917  goto out_root;
1918  }
1919 
1920  ap_query_configuration();
1921  if (ap_select_domain() == 0)
1922  ap_scan_bus(NULL);
1923 
1924  /* Setup the AP bus rescan timer. */
1925  init_timer(&ap_config_timer);
1926  ap_config_timer.function = ap_config_timeout;
1927  ap_config_timer.data = 0;
1928  ap_config_timer.expires = jiffies + ap_config_time * HZ;
1929  add_timer(&ap_config_timer);
1930 
1931  /* Setup the high resultion poll timer.
1932  * If we are running under z/VM adjust polling to z/VM polling rate.
1933  */
1934  if (MACHINE_IS_VM)
1935  poll_timeout = 1500000;
1936  spin_lock_init(&ap_poll_timer_lock);
1937  hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1938  ap_poll_timer.function = ap_poll_timeout;
1939 
1940  /* Start the low priority AP bus poll thread. */
1941  if (ap_thread_flag) {
1942  rc = ap_poll_thread_start();
1943  if (rc)
1944  goto out_work;
1945  }
1946 
1947  return 0;
1948 
1949 out_work:
1950  del_timer_sync(&ap_config_timer);
1951  hrtimer_cancel(&ap_poll_timer);
1952  destroy_workqueue(ap_work_queue);
1953 out_root:
1954  root_device_unregister(ap_root_device);
1955 out_bus:
1956  while (i--)
1957  bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1958  bus_unregister(&ap_bus_type);
1959 out:
1960  unregister_reset_call(&ap_reset_call);
1961  if (ap_using_interrupts()) {
1962  s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1964  }
1965  return rc;
1966 }
1967 
1968 static int __ap_match_all(struct device *dev, void *data)
1969 {
1970  return 1;
1971 }
1972 
1978 void ap_module_exit(void)
1979 {
1980  int i;
1981  struct device *dev;
1982 
1983  ap_reset_domain();
1984  ap_poll_thread_stop();
1985  del_timer_sync(&ap_config_timer);
1986  hrtimer_cancel(&ap_poll_timer);
1987  destroy_workqueue(ap_work_queue);
1988  tasklet_kill(&ap_tasklet);
1989  root_device_unregister(ap_root_device);
1990  while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1991  __ap_match_all)))
1992  {
1993  device_unregister(dev);
1994  put_device(dev);
1995  }
1996  for (i = 0; ap_bus_attrs[i]; i++)
1997  bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1998  bus_unregister(&ap_bus_type);
1999  unregister_reset_call(&ap_reset_call);
2000  if (ap_using_interrupts()) {
2001  s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
2003  }
2004 }
2005