Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vmlogrdr.c
Go to the documentation of this file.
1 /*
2  * character device driver for reading z/VM system service records
3  *
4  *
5  * Copyright IBM Corp. 2004, 2009
6  * character device driver for reading z/VM system service records,
7  * Version 1.0
8  * Author(s): Xenia Tkatschow <[email protected]>
9  * Stefan Weinhuber <[email protected]>
10  *
11  */
12 
13 #define KMSG_COMPONENT "vmlogrdr"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/string.h>
33 
35  ("(C) 2004 IBM Corporation by Xenia Tkatschow ([email protected])\n"
36  " Stefan Weinhuber ([email protected])");
37 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
38  "system service records.");
39 MODULE_LICENSE("GPL");
40 
41 
42 /*
43  * The size of the buffer for iucv data transfer is one page,
44  * but in addition to the data we read from iucv we also
45  * place an integer and some characters into that buffer,
46  * so the maximum size for record data is a little less then
47  * one page.
48  */
49 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
50 
51 /*
52  * The elements that are concurrently accessed by bottom halves are
53  * connection_established, iucv_path_severed, local_interrupt_buffer
54  * and receive_ready. The first three can be protected by
55  * priv_lock. receive_ready is atomic, so it can be incremented and
56  * decremented without holding a lock.
57  * The variable dev_in_use needs to be protected by the lock, since
58  * it's a flag used by open to make sure that the device is opened only
59  * by one user at the same time.
60  */
62  char system_service[8];
63  char internal_name[8];
64  char recording_name[8];
65  struct iucv_path *path;
70  int minor_num;
71  char * buffer;
73  int remaining;
76  int dev_in_use; /* 1: already opened, 0: not opened*/
78  struct device *device;
81  int autopurge;
82 };
83 
84 
85 /*
86  * File operation structure for vmlogrdr devices
87  */
88 static int vmlogrdr_open(struct inode *, struct file *);
89 static int vmlogrdr_release(struct inode *, struct file *);
90 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
91  size_t count, loff_t * ppos);
92 
93 static const struct file_operations vmlogrdr_fops = {
94  .owner = THIS_MODULE,
95  .open = vmlogrdr_open,
96  .release = vmlogrdr_release,
97  .read = vmlogrdr_read,
98  .llseek = no_llseek,
99 };
100 
101 
102 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
105  struct iucv_message *);
106 
107 
108 static struct iucv_handler vmlogrdr_iucv_handler = {
109  .path_complete = vmlogrdr_iucv_path_complete,
110  .path_severed = vmlogrdr_iucv_path_severed,
111  .message_pending = vmlogrdr_iucv_message_pending,
112 };
113 
114 
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
117 
118 /*
119  * pointer to system service private structure
120  * minor number 0 --> logrec
121  * minor number 1 --> account
122  * minor number 2 --> symptom
123  */
124 
125 static struct vmlogrdr_priv_t sys_ser[] = {
126  { .system_service = "*LOGREC ",
127  .internal_name = "logrec",
128  .recording_name = "EREP",
129  .minor_num = 0,
130  .buffer_free = 1,
131  .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
132  .autorecording = 1,
133  .autopurge = 1,
134  },
135  { .system_service = "*ACCOUNT",
136  .internal_name = "account",
137  .recording_name = "ACCOUNT",
138  .minor_num = 1,
139  .buffer_free = 1,
140  .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
141  .autorecording = 1,
142  .autopurge = 1,
143  },
144  { .system_service = "*SYMPTOM",
145  .internal_name = "symptom",
146  .recording_name = "SYMPTOM",
147  .minor_num = 2,
148  .buffer_free = 1,
149  .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
150  .autorecording = 1,
151  .autopurge = 1,
152  }
153 };
154 
155 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
156 
157 static char FENCE[] = {"EOR"};
158 static int vmlogrdr_major = 0;
159 static struct cdev *vmlogrdr_cdev = NULL;
160 static int recording_class_AB;
161 
162 
163 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
164 {
165  struct vmlogrdr_priv_t * logptr = path->private;
166 
167  spin_lock(&logptr->priv_lock);
168  logptr->connection_established = 1;
169  spin_unlock(&logptr->priv_lock);
170  wake_up(&conn_wait_queue);
171 }
172 
173 
174 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
175 {
176  struct vmlogrdr_priv_t * logptr = path->private;
177  u8 reason = (u8) ipuser[8];
178 
179  pr_err("vmlogrdr: connection severed with reason %i\n", reason);
180 
181  iucv_path_sever(path, NULL);
182  kfree(path);
183  logptr->path = NULL;
184 
185  spin_lock(&logptr->priv_lock);
186  logptr->connection_established = 0;
187  logptr->iucv_path_severed = 1;
188  spin_unlock(&logptr->priv_lock);
189 
190  wake_up(&conn_wait_queue);
191  /* just in case we're sleeping waiting for a record */
192  wake_up_interruptible(&read_wait_queue);
193 }
194 
195 
196 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
197  struct iucv_message *msg)
198 {
199  struct vmlogrdr_priv_t * logptr = path->private;
200 
201  /*
202  * This function is the bottom half so it should be quick.
203  * Copy the external interrupt data into our local eib and increment
204  * the usage count
205  */
206  spin_lock(&logptr->priv_lock);
207  memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
208  atomic_inc(&logptr->receive_ready);
209  spin_unlock(&logptr->priv_lock);
210  wake_up_interruptible(&read_wait_queue);
211 }
212 
213 
214 static int vmlogrdr_get_recording_class_AB(void)
215 {
216  static const char cp_command[] = "QUERY COMMAND RECORDING ";
217  char cp_response[80];
218  char *tail;
219  int len,i;
220 
221  cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
222  len = strnlen(cp_response,sizeof(cp_response));
223  // now the parsing
224  tail=strnchr(cp_response,len,'=');
225  if (!tail)
226  return 0;
227  tail++;
228  if (!strncmp("ANY",tail,3))
229  return 1;
230  if (!strncmp("NONE",tail,4))
231  return 0;
232  /*
233  * expect comma separated list of classes here, if one of them
234  * is A or B return 1 otherwise 0
235  */
236  for (i=tail-cp_response; i<len; i++)
237  if ( cp_response[i]=='A' || cp_response[i]=='B' )
238  return 1;
239  return 0;
240 }
241 
242 
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
244  int action, int purge)
245 {
246 
247  char cp_command[80];
248  char cp_response[160];
249  char *onoff, *qid_string;
250  int rc;
251 
252  onoff = ((action == 1) ? "ON" : "OFF");
253  qid_string = ((recording_class_AB == 1) ? " QID * " : "");
254 
255  /*
256  * The recording commands needs to be called with option QID
257  * for guests that have previlege classes A or B.
258  * Purging has to be done as separate step, because recording
259  * can't be switched on as long as records are on the queue.
260  * Doing both at the same time doesn't work.
261  */
262  if (purge && (action == 1)) {
263  memset(cp_command, 0x00, sizeof(cp_command));
264  memset(cp_response, 0x00, sizeof(cp_response));
265  snprintf(cp_command, sizeof(cp_command),
266  "RECORDING %s PURGE %s",
267  logptr->recording_name,
268  qid_string);
269  cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
270  }
271 
272  memset(cp_command, 0x00, sizeof(cp_command));
273  memset(cp_response, 0x00, sizeof(cp_response));
274  snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
275  logptr->recording_name,
276  onoff,
277  qid_string);
278  cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
279  /* The recording command will usually answer with 'Command complete'
280  * on success, but when the specific service was never connected
281  * before then there might be an additional informational message
282  * 'HCPCRC8072I Recording entry not found' before the
283  * 'Command complete'. So I use strstr rather then the strncmp.
284  */
285  if (strstr(cp_response,"Command complete"))
286  rc = 0;
287  else
288  rc = -EIO;
289  /*
290  * If we turn recording off, we have to purge any remaining records
291  * afterwards, as a large number of queued records may impact z/VM
292  * performance.
293  */
294  if (purge && (action == 0)) {
295  memset(cp_command, 0x00, sizeof(cp_command));
296  memset(cp_response, 0x00, sizeof(cp_response));
297  snprintf(cp_command, sizeof(cp_command),
298  "RECORDING %s PURGE %s",
299  logptr->recording_name,
300  qid_string);
301  cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
302  }
303 
304  return rc;
305 }
306 
307 
308 static int vmlogrdr_open (struct inode *inode, struct file *filp)
309 {
310  int dev_num = 0;
311  struct vmlogrdr_priv_t * logptr = NULL;
312  int connect_rc = 0;
313  int ret;
314 
315  dev_num = iminor(inode);
316  if (dev_num > MAXMINOR)
317  return -ENODEV;
318  logptr = &sys_ser[dev_num];
319 
320  /*
321  * only allow for blocking reads to be open
322  */
323  if (filp->f_flags & O_NONBLOCK)
324  return -EOPNOTSUPP;
325 
326  /* Besure this device hasn't already been opened */
327  spin_lock_bh(&logptr->priv_lock);
328  if (logptr->dev_in_use) {
329  spin_unlock_bh(&logptr->priv_lock);
330  return -EBUSY;
331  }
332  logptr->dev_in_use = 1;
333  logptr->connection_established = 0;
334  logptr->iucv_path_severed = 0;
335  atomic_set(&logptr->receive_ready, 0);
336  logptr->buffer_free = 1;
337  spin_unlock_bh(&logptr->priv_lock);
338 
339  /* set the file options */
340  filp->private_data = logptr;
341  filp->f_op = &vmlogrdr_fops;
342 
343  /* start recording for this service*/
344  if (logptr->autorecording) {
345  ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
346  if (ret)
347  pr_warning("vmlogrdr: failed to start "
348  "recording automatically\n");
349  }
350 
351  /* create connection to the system service */
352  logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
353  if (!logptr->path)
354  goto out_dev;
355  connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
356  logptr->system_service, NULL, NULL,
357  logptr);
358  if (connect_rc) {
359  pr_err("vmlogrdr: iucv connection to %s "
360  "failed with rc %i \n",
361  logptr->system_service, connect_rc);
362  goto out_path;
363  }
364 
365  /* We've issued the connect and now we must wait for a
366  * ConnectionComplete or ConnectinSevered Interrupt
367  * before we can continue to process.
368  */
369  wait_event(conn_wait_queue, (logptr->connection_established)
370  || (logptr->iucv_path_severed));
371  if (logptr->iucv_path_severed)
372  goto out_record;
373  nonseekable_open(inode, filp);
374  return 0;
375 
376 out_record:
377  if (logptr->autorecording)
378  vmlogrdr_recording(logptr,0,logptr->autopurge);
379 out_path:
380  kfree(logptr->path); /* kfree(NULL) is ok. */
381  logptr->path = NULL;
382 out_dev:
383  logptr->dev_in_use = 0;
384  return -EIO;
385 }
386 
387 
388 static int vmlogrdr_release (struct inode *inode, struct file *filp)
389 {
390  int ret;
391 
392  struct vmlogrdr_priv_t * logptr = filp->private_data;
393 
394  iucv_path_sever(logptr->path, NULL);
395  kfree(logptr->path);
396  logptr->path = NULL;
397  if (logptr->autorecording) {
398  ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
399  if (ret)
400  pr_warning("vmlogrdr: failed to stop "
401  "recording automatically\n");
402  }
403  logptr->dev_in_use = 0;
404 
405  return 0;
406 }
407 
408 
409 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
410 {
411  int rc, *temp;
412  /* we need to keep track of two data sizes here:
413  * The number of bytes we need to receive from iucv and
414  * the total number of bytes we actually write into the buffer.
415  */
416  int user_data_count, iucv_data_count;
417  char * buffer;
418 
419  if (atomic_read(&priv->receive_ready)) {
420  spin_lock_bh(&priv->priv_lock);
421  if (priv->residual_length){
422  /* receive second half of a record */
423  iucv_data_count = priv->residual_length;
424  user_data_count = 0;
425  buffer = priv->buffer;
426  } else {
427  /* receive a new record:
428  * We need to return the total length of the record
429  * + size of FENCE in the first 4 bytes of the buffer.
430  */
431  iucv_data_count = priv->local_interrupt_buffer.length;
432  user_data_count = sizeof(int);
433  temp = (int*)priv->buffer;
434  *temp= iucv_data_count + sizeof(FENCE);
435  buffer = priv->buffer + sizeof(int);
436  }
437  /*
438  * If the record is bigger than our buffer, we receive only
439  * a part of it. We can get the rest later.
440  */
441  if (iucv_data_count > NET_BUFFER_SIZE)
442  iucv_data_count = NET_BUFFER_SIZE;
443  rc = iucv_message_receive(priv->path,
444  &priv->local_interrupt_buffer,
445  0, buffer, iucv_data_count,
446  &priv->residual_length);
447  spin_unlock_bh(&priv->priv_lock);
448  /* An rc of 5 indicates that the record was bigger than
449  * the buffer, which is OK for us. A 9 indicates that the
450  * record was purged befor we could receive it.
451  */
452  if (rc == 5)
453  rc = 0;
454  if (rc == 9)
455  atomic_set(&priv->receive_ready, 0);
456  } else {
457  rc = 1;
458  }
459  if (!rc) {
460  priv->buffer_free = 0;
461  user_data_count += iucv_data_count;
462  priv->current_position = priv->buffer;
463  if (priv->residual_length == 0){
464  /* the whole record has been captured,
465  * now add the fence */
466  atomic_dec(&priv->receive_ready);
467  buffer = priv->buffer + user_data_count;
468  memcpy(buffer, FENCE, sizeof(FENCE));
469  user_data_count += sizeof(FENCE);
470  }
471  priv->remaining = user_data_count;
472  }
473 
474  return rc;
475 }
476 
477 
478 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
479  size_t count, loff_t * ppos)
480 {
481  int rc;
482  struct vmlogrdr_priv_t * priv = filp->private_data;
483 
484  while (priv->buffer_free) {
485  rc = vmlogrdr_receive_data(priv);
486  if (rc) {
487  rc = wait_event_interruptible(read_wait_queue,
488  atomic_read(&priv->receive_ready));
489  if (rc)
490  return rc;
491  }
492  }
493  /* copy only up to end of record */
494  if (count > priv->remaining)
495  count = priv->remaining;
496 
497  if (copy_to_user(data, priv->current_position, count))
498  return -EFAULT;
499 
500  *ppos += count;
501  priv->current_position += count;
502  priv->remaining -= count;
503 
504  /* if all data has been transferred, set buffer free */
505  if (priv->remaining == 0)
506  priv->buffer_free = 1;
507 
508  return count;
509 }
510 
511 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
512  struct device_attribute *attr,
513  const char * buf, size_t count)
514 {
515  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
516  ssize_t ret = count;
517 
518  switch (buf[0]) {
519  case '0':
520  priv->autopurge=0;
521  break;
522  case '1':
523  priv->autopurge=1;
524  break;
525  default:
526  ret = -EINVAL;
527  }
528  return ret;
529 }
530 
531 
532 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
533  struct device_attribute *attr,
534  char *buf)
535 {
536  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
537  return sprintf(buf, "%u\n", priv->autopurge);
538 }
539 
540 
541 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
542  vmlogrdr_autopurge_store);
543 
544 
545 static ssize_t vmlogrdr_purge_store(struct device * dev,
546  struct device_attribute *attr,
547  const char * buf, size_t count)
548 {
549 
550  char cp_command[80];
551  char cp_response[80];
552  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
553 
554  if (buf[0] != '1')
555  return -EINVAL;
556 
557  memset(cp_command, 0x00, sizeof(cp_command));
558  memset(cp_response, 0x00, sizeof(cp_response));
559 
560  /*
561  * The recording command needs to be called with option QID
562  * for guests that have previlege classes A or B.
563  * Other guests will not recognize the command and we have to
564  * issue the same command without the QID parameter.
565  */
566 
567  if (recording_class_AB)
568  snprintf(cp_command, sizeof(cp_command),
569  "RECORDING %s PURGE QID * ",
570  priv->recording_name);
571  else
572  snprintf(cp_command, sizeof(cp_command),
573  "RECORDING %s PURGE ",
574  priv->recording_name);
575 
576  cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
577 
578  return count;
579 }
580 
581 
582 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
583 
584 
585 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
586  struct device_attribute *attr,
587  const char *buf, size_t count)
588 {
589  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
590  ssize_t ret = count;
591 
592  switch (buf[0]) {
593  case '0':
594  priv->autorecording=0;
595  break;
596  case '1':
597  priv->autorecording=1;
598  break;
599  default:
600  ret = -EINVAL;
601  }
602  return ret;
603 }
604 
605 
606 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
607  struct device_attribute *attr,
608  char *buf)
609 {
610  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
611  return sprintf(buf, "%u\n", priv->autorecording);
612 }
613 
614 
615 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
616  vmlogrdr_autorecording_store);
617 
618 
619 static ssize_t vmlogrdr_recording_store(struct device * dev,
620  struct device_attribute *attr,
621  const char * buf, size_t count)
622 {
623  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
624  ssize_t ret;
625 
626  switch (buf[0]) {
627  case '0':
628  ret = vmlogrdr_recording(priv,0,0);
629  break;
630  case '1':
631  ret = vmlogrdr_recording(priv,1,0);
632  break;
633  default:
634  ret = -EINVAL;
635  }
636  if (ret)
637  return ret;
638  else
639  return count;
640 
641 }
642 
643 
644 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
645 
646 
647 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
648  char *buf)
649 {
650 
651  static const char cp_command[] = "QUERY RECORDING ";
652  int len;
653 
654  cpcmd(cp_command, buf, 4096, NULL);
655  len = strlen(buf);
656  return len;
657 }
658 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
659  NULL);
660 static struct attribute *vmlogrdr_drv_attrs[] = {
661  &driver_attr_recording_status.attr,
662  NULL,
663 };
664 static struct attribute_group vmlogrdr_drv_attr_group = {
665  .attrs = vmlogrdr_drv_attrs,
666 };
667 static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
668  &vmlogrdr_drv_attr_group,
669  NULL,
670 };
671 
672 static struct attribute *vmlogrdr_attrs[] = {
673  &dev_attr_autopurge.attr,
674  &dev_attr_purge.attr,
675  &dev_attr_autorecording.attr,
676  &dev_attr_recording.attr,
677  NULL,
678 };
679 static struct attribute_group vmlogrdr_attr_group = {
680  .attrs = vmlogrdr_attrs,
681 };
682 static const struct attribute_group *vmlogrdr_attr_groups[] = {
683  &vmlogrdr_attr_group,
684  NULL,
685 };
686 
687 static int vmlogrdr_pm_prepare(struct device *dev)
688 {
689  int rc;
690  struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
691 
692  rc = 0;
693  if (priv) {
694  spin_lock_bh(&priv->priv_lock);
695  if (priv->dev_in_use)
696  rc = -EBUSY;
697  spin_unlock_bh(&priv->priv_lock);
698  }
699  if (rc)
700  pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
701  dev_name(dev));
702  return rc;
703 }
704 
705 
706 static const struct dev_pm_ops vmlogrdr_pm_ops = {
707  .prepare = vmlogrdr_pm_prepare,
708 };
709 
710 static struct class *vmlogrdr_class;
711 static struct device_driver vmlogrdr_driver = {
712  .name = "vmlogrdr",
713  .bus = &iucv_bus,
714  .pm = &vmlogrdr_pm_ops,
715  .groups = vmlogrdr_drv_attr_groups,
716 };
717 
718 static int vmlogrdr_register_driver(void)
719 {
720  int ret;
721 
722  /* Register with iucv driver */
723  ret = iucv_register(&vmlogrdr_iucv_handler, 1);
724  if (ret)
725  goto out;
726 
727  ret = driver_register(&vmlogrdr_driver);
728  if (ret)
729  goto out_iucv;
730 
731  vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
732  if (IS_ERR(vmlogrdr_class)) {
733  ret = PTR_ERR(vmlogrdr_class);
734  vmlogrdr_class = NULL;
735  goto out_driver;
736  }
737  return 0;
738 
739 out_driver:
740  driver_unregister(&vmlogrdr_driver);
741 out_iucv:
742  iucv_unregister(&vmlogrdr_iucv_handler, 1);
743 out:
744  return ret;
745 }
746 
747 
748 static void vmlogrdr_unregister_driver(void)
749 {
750  class_destroy(vmlogrdr_class);
751  vmlogrdr_class = NULL;
752  driver_unregister(&vmlogrdr_driver);
753  iucv_unregister(&vmlogrdr_iucv_handler, 1);
754 }
755 
756 
757 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
758 {
759  struct device *dev;
760  int ret;
761 
762  dev = kzalloc(sizeof(struct device), GFP_KERNEL);
763  if (dev) {
764  dev_set_name(dev, priv->internal_name);
765  dev->bus = &iucv_bus;
766  dev->parent = iucv_root;
767  dev->driver = &vmlogrdr_driver;
768  dev->groups = vmlogrdr_attr_groups;
769  dev_set_drvdata(dev, priv);
770  /*
771  * The release function could be called after the
772  * module has been unloaded. It's _only_ task is to
773  * free the struct. Therefore, we specify kfree()
774  * directly here. (Probably a little bit obfuscating
775  * but legitime ...).
776  */
777  dev->release = (void (*)(struct device *))kfree;
778  } else
779  return -ENOMEM;
780  ret = device_register(dev);
781  if (ret) {
782  put_device(dev);
783  return ret;
784  }
785 
786  priv->class_device = device_create(vmlogrdr_class, dev,
787  MKDEV(vmlogrdr_major,
788  priv->minor_num),
789  priv, "%s", dev_name(dev));
790  if (IS_ERR(priv->class_device)) {
791  ret = PTR_ERR(priv->class_device);
792  priv->class_device=NULL;
793  device_unregister(dev);
794  return ret;
795  }
796  priv->device = dev;
797  return 0;
798 }
799 
800 
801 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
802 {
803  device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
804  if (priv->device != NULL) {
805  device_unregister(priv->device);
806  priv->device=NULL;
807  }
808  return 0;
809 }
810 
811 
812 static int vmlogrdr_register_cdev(dev_t dev)
813 {
814  int rc = 0;
815  vmlogrdr_cdev = cdev_alloc();
816  if (!vmlogrdr_cdev) {
817  return -ENOMEM;
818  }
819  vmlogrdr_cdev->owner = THIS_MODULE;
820  vmlogrdr_cdev->ops = &vmlogrdr_fops;
821  vmlogrdr_cdev->dev = dev;
822  rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
823  if (!rc)
824  return 0;
825 
826  // cleanup: cdev is not fully registered, no cdev_del here!
827  kobject_put(&vmlogrdr_cdev->kobj);
828  vmlogrdr_cdev=NULL;
829  return rc;
830 }
831 
832 
833 static void vmlogrdr_cleanup(void)
834 {
835  int i;
836 
837  if (vmlogrdr_cdev) {
838  cdev_del(vmlogrdr_cdev);
839  vmlogrdr_cdev=NULL;
840  }
841  for (i=0; i < MAXMINOR; ++i ) {
842  vmlogrdr_unregister_device(&sys_ser[i]);
843  free_page((unsigned long)sys_ser[i].buffer);
844  }
845  vmlogrdr_unregister_driver();
846  if (vmlogrdr_major) {
847  unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
848  vmlogrdr_major=0;
849  }
850 }
851 
852 
853 static int __init vmlogrdr_init(void)
854 {
855  int rc;
856  int i;
857  dev_t dev;
858 
859  if (! MACHINE_IS_VM) {
860  pr_err("not running under VM, driver not loaded.\n");
861  return -ENODEV;
862  }
863 
864  recording_class_AB = vmlogrdr_get_recording_class_AB();
865 
866  rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
867  if (rc)
868  return rc;
869  vmlogrdr_major = MAJOR(dev);
870 
871  rc=vmlogrdr_register_driver();
872  if (rc)
873  goto cleanup;
874 
875  for (i=0; i < MAXMINOR; ++i ) {
876  sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
877  if (!sys_ser[i].buffer) {
878  rc = -ENOMEM;
879  break;
880  }
881  sys_ser[i].current_position = sys_ser[i].buffer;
882  rc=vmlogrdr_register_device(&sys_ser[i]);
883  if (rc)
884  break;
885  }
886  if (rc)
887  goto cleanup;
888 
889  rc = vmlogrdr_register_cdev(dev);
890  if (rc)
891  goto cleanup;
892  return 0;
893 
894 cleanup:
895  vmlogrdr_cleanup();
896  return rc;
897 }
898 
899 
900 static void __exit vmlogrdr_exit(void)
901 {
902  vmlogrdr_cleanup();
903  return;
904 }
905 
906 
907 module_init(vmlogrdr_init);
908 module_exit(vmlogrdr_exit);