Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
share.c
Go to the documentation of this file.
1 /*
2  * Parallel-port resource manager code.
3  *
4  * Authors: David Campbell <[email protected]>
5  * Tim Waugh <[email protected]>
6  * Jose Renau <[email protected]>
7  * Philip Blundell <[email protected]>
8  * Andrea Arcangeli
9  *
10  * based on work by Grant Guenther <[email protected]>
11  * and Philip Blundell
12  *
13  * Any part of this program may be used in documents licensed under
14  * the GNU Free Documentation License, Version 1.1 or any later version
15  * published by the Free Software Foundation.
16  */
17 
18 #undef PARPORT_DEBUG_SHARING /* undef for production */
19 
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/threads.h>
23 #include <linux/parport.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/ioport.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/sched.h>
31 #include <linux/kmod.h>
32 
33 #include <linux/spinlock.h>
34 #include <linux/mutex.h>
35 #include <asm/irq.h>
36 
37 #undef PARPORT_PARANOID
38 
39 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
40 
43 
44 static LIST_HEAD(portlist);
45 static DEFINE_SPINLOCK(parportlist_lock);
46 
47 /* list of all allocated ports, sorted by ->number */
48 static LIST_HEAD(all_ports);
49 static DEFINE_SPINLOCK(full_list_lock);
50 
51 static LIST_HEAD(drivers);
52 
53 static DEFINE_MUTEX(registration_lock);
54 
55 /* What you can do to a port that's gone away.. */
56 static void dead_write_lines (struct parport *p, unsigned char b){}
57 static unsigned char dead_read_lines (struct parport *p) { return 0; }
58 static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
59  unsigned char c) { return 0; }
60 static void dead_onearg (struct parport *p){}
61 static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
62 static void dead_state (struct parport *p, struct parport_state *s) { }
63 static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
64 { return 0; }
65 static size_t dead_read (struct parport *p, void *b, size_t l, int f)
66 { return 0; }
67 static struct parport_operations dead_ops = {
68  .write_data = dead_write_lines, /* data */
69  .read_data = dead_read_lines,
70 
71  .write_control = dead_write_lines, /* control */
72  .read_control = dead_read_lines,
73  .frob_control = dead_frob_lines,
74 
75  .read_status = dead_read_lines, /* status */
76 
77  .enable_irq = dead_onearg, /* enable_irq */
78  .disable_irq = dead_onearg, /* disable_irq */
79 
80  .data_forward = dead_onearg, /* data_forward */
81  .data_reverse = dead_onearg, /* data_reverse */
82 
83  .init_state = dead_initstate, /* init_state */
84  .save_state = dead_state,
85  .restore_state = dead_state,
86 
87  .epp_write_data = dead_write, /* epp */
88  .epp_read_data = dead_read,
89  .epp_write_addr = dead_write,
90  .epp_read_addr = dead_read,
91 
92  .ecp_write_data = dead_write, /* ecp */
93  .ecp_read_data = dead_read,
94  .ecp_write_addr = dead_write,
95 
96  .compat_write_data = dead_write, /* compat */
97  .nibble_read_data = dead_read, /* nibble */
98  .byte_read_data = dead_read, /* byte */
99 
100  .owner = NULL,
101 };
102 
103 /* Call attach(port) for each registered driver. */
104 static void attach_driver_chain(struct parport *port)
105 {
106  /* caller has exclusive registration_lock */
107  struct parport_driver *drv;
108  list_for_each_entry(drv, &drivers, list)
109  drv->attach(port);
110 }
111 
112 /* Call detach(port) for each registered driver. */
113 static void detach_driver_chain(struct parport *port)
114 {
115  struct parport_driver *drv;
116  /* caller has exclusive registration_lock */
117  list_for_each_entry(drv, &drivers, list)
118  drv->detach (port);
119 }
120 
121 /* Ask kmod for some lowlevel drivers. */
122 static void get_lowlevel_driver (void)
123 {
124  /* There is no actual module called this: you should set
125  * up an alias for modutils. */
126  request_module ("parport_lowlevel");
127 }
128 
155 {
156  struct parport *port;
157 
158  if (list_empty(&portlist))
159  get_lowlevel_driver ();
160 
161  mutex_lock(&registration_lock);
162  list_for_each_entry(port, &portlist, list)
163  drv->attach(port);
164  list_add(&drv->list, &drivers);
165  mutex_unlock(&registration_lock);
166 
167  return 0;
168 }
169 
188 {
189  struct parport *port;
190 
191  mutex_lock(&registration_lock);
192  list_del_init(&drv->list);
193  list_for_each_entry(port, &portlist, list)
194  drv->detach(port);
195  mutex_unlock(&registration_lock);
196 }
197 
198 static void free_port (struct parport *port)
199 {
200  int d;
201  spin_lock(&full_list_lock);
202  list_del(&port->full_list);
203  spin_unlock(&full_list_lock);
204  for (d = 0; d < 5; d++) {
205  kfree(port->probe_info[d].class_name);
206  kfree(port->probe_info[d].mfr);
207  kfree(port->probe_info[d].model);
208  kfree(port->probe_info[d].cmdset);
209  kfree(port->probe_info[d].description);
210  }
211 
212  kfree(port->name);
213  kfree(port);
214 }
215 
224 struct parport *parport_get_port (struct parport *port)
225 {
226  atomic_inc (&port->ref_count);
227  return port;
228 }
229 
238 void parport_put_port (struct parport *port)
239 {
240  if (atomic_dec_and_test (&port->ref_count))
241  /* Can destroy it now. */
242  free_port (port);
243 
244  return;
245 }
246 
276 struct parport *parport_register_port(unsigned long base, int irq, int dma,
277  struct parport_operations *ops)
278 {
279  struct list_head *l;
280  struct parport *tmp;
281  int num;
282  int device;
283  char *name;
284 
285  tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
286  if (!tmp) {
287  printk(KERN_WARNING "parport: memory squeeze\n");
288  return NULL;
289  }
290 
291  /* Init our structure */
292  memset(tmp, 0, sizeof(struct parport));
293  tmp->base = base;
294  tmp->irq = irq;
295  tmp->dma = dma;
296  tmp->muxport = tmp->daisy = tmp->muxsel = -1;
297  tmp->modes = 0;
298  INIT_LIST_HEAD(&tmp->list);
299  tmp->devices = tmp->cad = NULL;
300  tmp->flags = 0;
301  tmp->ops = ops;
302  tmp->physport = tmp;
303  memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
304  rwlock_init(&tmp->cad_lock);
307  tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
308  tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
309  sema_init(&tmp->ieee1284.irq, 0);
311  atomic_set (&tmp->ref_count, 1);
312  INIT_LIST_HEAD(&tmp->full_list);
313 
314  name = kmalloc(15, GFP_KERNEL);
315  if (!name) {
316  printk(KERN_ERR "parport: memory squeeze\n");
317  kfree(tmp);
318  return NULL;
319  }
320  /* Search for the lowest free parport number. */
321 
322  spin_lock(&full_list_lock);
323  for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
324  struct parport *p = list_entry(l, struct parport, full_list);
325  if (p->number != num)
326  break;
327  }
328  tmp->portnum = tmp->number = num;
329  list_add_tail(&tmp->full_list, l);
330  spin_unlock(&full_list_lock);
331 
332  /*
333  * Now that the portnum is known finish doing the Init.
334  */
335  sprintf(name, "parport%d", tmp->portnum = tmp->number);
336  tmp->name = name;
337 
338  for (device = 0; device < 5; device++)
339  /* assume the worst */
340  tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
341 
342  tmp->waithead = tmp->waittail = NULL;
343 
344  return tmp;
345 }
346 
359 void parport_announce_port (struct parport *port)
360 {
361  int i;
362 
363 #ifdef CONFIG_PARPORT_1284
364  /* Analyse the IEEE1284.3 topology of the port. */
365  parport_daisy_init(port);
366 #endif
367 
368  if (!port->dev)
369  printk(KERN_WARNING "%s: fix this legacy "
370  "no-device port driver!\n",
371  port->name);
372 
373  parport_proc_register(port);
374  mutex_lock(&registration_lock);
375  spin_lock_irq(&parportlist_lock);
376  list_add_tail(&port->list, &portlist);
377  for (i = 1; i < 3; i++) {
378  struct parport *slave = port->slaves[i-1];
379  if (slave)
380  list_add_tail(&slave->list, &portlist);
381  }
382  spin_unlock_irq(&parportlist_lock);
383 
384  /* Let drivers know that new port(s) has arrived. */
385  attach_driver_chain (port);
386  for (i = 1; i < 3; i++) {
387  struct parport *slave = port->slaves[i-1];
388  if (slave)
389  attach_driver_chain(slave);
390  }
391  mutex_unlock(&registration_lock);
392 }
393 
413 void parport_remove_port(struct parport *port)
414 {
415  int i;
416 
417  mutex_lock(&registration_lock);
418 
419  /* Spread the word. */
420  detach_driver_chain (port);
421 
422 #ifdef CONFIG_PARPORT_1284
423  /* Forget the IEEE1284.3 topology of the port. */
424  parport_daisy_fini(port);
425  for (i = 1; i < 3; i++) {
426  struct parport *slave = port->slaves[i-1];
427  if (!slave)
428  continue;
429  detach_driver_chain(slave);
430  parport_daisy_fini(slave);
431  }
432 #endif
433 
434  port->ops = &dead_ops;
435  spin_lock(&parportlist_lock);
436  list_del_init(&port->list);
437  for (i = 1; i < 3; i++) {
438  struct parport *slave = port->slaves[i-1];
439  if (slave)
440  list_del_init(&slave->list);
441  }
442  spin_unlock(&parportlist_lock);
443 
444  mutex_unlock(&registration_lock);
445 
447 
448  for (i = 1; i < 3; i++) {
449  struct parport *slave = port->slaves[i-1];
450  if (slave)
451  parport_put_port(slave);
452  }
453 }
454 
524 struct pardevice *
525 parport_register_device(struct parport *port, const char *name,
526  int (*pf)(void *), void (*kf)(void *),
527  void (*irq_func)(void *),
528  int flags, void *handle)
529 {
530  struct pardevice *tmp;
531 
532  if (port->physport->flags & PARPORT_FLAG_EXCL) {
533  /* An exclusive device is registered. */
534  printk (KERN_DEBUG "%s: no more devices allowed\n",
535  port->name);
536  return NULL;
537  }
538 
539  if (flags & PARPORT_DEV_LURK) {
540  if (!pf || !kf) {
541  printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
542  return NULL;
543  }
544  }
545 
546  /* We up our own module reference count, and that of the port
547  on which a device is to be registered, to ensure that
548  neither of us gets unloaded while we sleep in (e.g.)
549  kmalloc.
550  */
551  if (!try_module_get(port->ops->owner)) {
552  return NULL;
553  }
554 
555  parport_get_port (port);
556 
557  tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
558  if (tmp == NULL) {
559  printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
560  goto out;
561  }
562 
563  tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
564  if (tmp->state == NULL) {
565  printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
566  goto out_free_pardevice;
567  }
568 
569  tmp->name = name;
570  tmp->port = port;
571  tmp->daisy = -1;
572  tmp->preempt = pf;
573  tmp->wakeup = kf;
574  tmp->private = handle;
575  tmp->flags = flags;
576  tmp->irq_func = irq_func;
577  tmp->waiting = 0;
578  tmp->timeout = 5 * HZ;
579 
580  /* Chain this onto the list */
581  tmp->prev = NULL;
582  /*
583  * This function must not run from an irq handler so we don' t need
584  * to clear irq on the local CPU. -arca
585  */
586  spin_lock(&port->physport->pardevice_lock);
587 
588  if (flags & PARPORT_DEV_EXCL) {
589  if (port->physport->devices) {
590  spin_unlock (&port->physport->pardevice_lock);
592  "%s: cannot grant exclusive access for "
593  "device %s\n", port->name, name);
594  goto out_free_all;
595  }
596  port->flags |= PARPORT_FLAG_EXCL;
597  }
598 
599  tmp->next = port->physport->devices;
600  wmb(); /* Make sure that tmp->next is written before it's
601  added to the list; see comments marked 'no locking
602  required' */
603  if (port->physport->devices)
604  port->physport->devices->prev = tmp;
605  port->physport->devices = tmp;
606  spin_unlock(&port->physport->pardevice_lock);
607 
610  tmp->waitnext = tmp->waitprev = NULL;
611 
612  /*
613  * This has to be run as last thing since init_state may need other
614  * pardevice fields. -arca
615  */
616  port->ops->init_state(tmp, tmp->state);
618  port->proc_device = tmp;
620  }
621  return tmp;
622 
623  out_free_all:
624  kfree(tmp->state);
625  out_free_pardevice:
626  kfree(tmp);
627  out:
628  parport_put_port (port);
629  module_put(port->ops->owner);
630 
631  return NULL;
632 }
633 
642 {
643  struct parport *port;
644 
645 #ifdef PARPORT_PARANOID
646  if (dev == NULL) {
647  printk(KERN_ERR "parport_unregister_device: passed NULL\n");
648  return;
649  }
650 #endif
651 
652  port = dev->port->physport;
653 
654  if (port->proc_device == dev) {
655  port->proc_device = NULL;
658  }
659 
660  if (port->cad == dev) {
661  printk(KERN_DEBUG "%s: %s forgot to release port\n",
662  port->name, dev->name);
663  parport_release (dev);
664  }
665 
666  spin_lock(&port->pardevice_lock);
667  if (dev->next)
668  dev->next->prev = dev->prev;
669  if (dev->prev)
670  dev->prev->next = dev->next;
671  else
672  port->devices = dev->next;
673 
674  if (dev->flags & PARPORT_DEV_EXCL)
675  port->flags &= ~PARPORT_FLAG_EXCL;
676 
677  spin_unlock(&port->pardevice_lock);
678 
679  /* Make sure we haven't left any pointers around in the wait
680  * list. */
681  spin_lock_irq(&port->waitlist_lock);
682  if (dev->waitprev || dev->waitnext || port->waithead == dev) {
683  if (dev->waitprev)
684  dev->waitprev->waitnext = dev->waitnext;
685  else
686  port->waithead = dev->waitnext;
687  if (dev->waitnext)
688  dev->waitnext->waitprev = dev->waitprev;
689  else
690  port->waittail = dev->waitprev;
691  }
692  spin_unlock_irq(&port->waitlist_lock);
693 
694  kfree(dev->state);
695  kfree(dev);
696 
697  module_put(port->ops->owner);
698  parport_put_port (port);
699 }
700 
714 {
715  struct parport *port, *result = NULL;
716 
717  if (list_empty(&portlist))
718  get_lowlevel_driver ();
719 
720  spin_lock (&parportlist_lock);
721  list_for_each_entry(port, &portlist, list) {
722  if (port->number == number) {
723  result = parport_get_port (port);
724  break;
725  }
726  }
727  spin_unlock (&parportlist_lock);
728  return result;
729 }
730 
743 struct parport *parport_find_base (unsigned long base)
744 {
745  struct parport *port, *result = NULL;
746 
747  if (list_empty(&portlist))
748  get_lowlevel_driver ();
749 
750  spin_lock (&parportlist_lock);
751  list_for_each_entry(port, &portlist, list) {
752  if (port->base == base) {
753  result = parport_get_port (port);
754  break;
755  }
756  }
757  spin_unlock (&parportlist_lock);
758  return result;
759 }
760 
774 {
775  struct pardevice *oldcad;
776  struct parport *port = dev->port->physport;
777  unsigned long flags;
778 
779  if (port->cad == dev) {
780  printk(KERN_INFO "%s: %s already owner\n",
781  dev->port->name,dev->name);
782  return 0;
783  }
784 
785  /* Preempt any current device */
786  write_lock_irqsave (&port->cad_lock, flags);
787  if ((oldcad = port->cad) != NULL) {
788  if (oldcad->preempt) {
789  if (oldcad->preempt(oldcad->private))
790  goto blocked;
791  port->ops->save_state(port, dev->state);
792  } else
793  goto blocked;
794 
795  if (port->cad != oldcad) {
796  /* I think we'll actually deadlock rather than
797  get here, but just in case.. */
799  "%s: %s released port when preempted!\n",
800  port->name, oldcad->name);
801  if (port->cad)
802  goto blocked;
803  }
804  }
805 
806  /* Can't fail from now on, so mark ourselves as no longer waiting. */
807  if (dev->waiting & 1) {
808  dev->waiting = 0;
809 
810  /* Take ourselves out of the wait list again. */
811  spin_lock_irq (&port->waitlist_lock);
812  if (dev->waitprev)
813  dev->waitprev->waitnext = dev->waitnext;
814  else
815  port->waithead = dev->waitnext;
816  if (dev->waitnext)
817  dev->waitnext->waitprev = dev->waitprev;
818  else
819  port->waittail = dev->waitprev;
820  spin_unlock_irq (&port->waitlist_lock);
821  dev->waitprev = dev->waitnext = NULL;
822  }
823 
824  /* Now we do the change of devices */
825  port->cad = dev;
826 
827 #ifdef CONFIG_PARPORT_1284
828  /* If it's a mux port, select it. */
829  if (dev->port->muxport >= 0) {
830  /* FIXME */
831  port->muxsel = dev->port->muxport;
832  }
833 
834  /* If it's a daisy chain device, select it. */
835  if (dev->daisy >= 0) {
836  /* This could be lazier. */
837  if (!parport_daisy_select (port, dev->daisy,
839  port->daisy = dev->daisy;
840  }
841 #endif /* IEEE1284.3 support */
842 
843  /* Restore control registers */
844  port->ops->restore_state(port, dev->state);
845  write_unlock_irqrestore(&port->cad_lock, flags);
846  dev->time = jiffies;
847  return 0;
848 
849 blocked:
850  /* If this is the first time we tried to claim the port, register an
851  interest. This is only allowed for devices sleeping in
852  parport_claim_or_block(), or those with a wakeup function. */
853 
854  /* The cad_lock is still held for writing here */
855  if (dev->waiting & 2 || dev->wakeup) {
856  spin_lock (&port->waitlist_lock);
857  if (test_and_set_bit(0, &dev->waiting) == 0) {
858  /* First add ourselves to the end of the wait list. */
859  dev->waitnext = NULL;
860  dev->waitprev = port->waittail;
861  if (port->waittail) {
862  port->waittail->waitnext = dev;
863  port->waittail = dev;
864  } else
865  port->waithead = port->waittail = dev;
866  }
867  spin_unlock (&port->waitlist_lock);
868  }
869  write_unlock_irqrestore (&port->cad_lock, flags);
870  return -EAGAIN;
871 }
872 
884 {
885  int r;
886 
887  /* Signal to parport_claim() that we can wait even without a
888  wakeup function. */
889  dev->waiting = 2;
890 
891  /* Try to claim the port. If this fails, we need to sleep. */
892  r = parport_claim(dev);
893  if (r == -EAGAIN) {
894 #ifdef PARPORT_DEBUG_SHARING
895  printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
896 #endif
897  /*
898  * FIXME!!! Use the proper locking for dev->waiting,
899  * and make this use the "wait_event_interruptible()"
900  * interfaces. The cli/sti that used to be here
901  * did nothing.
902  *
903  * See also parport_release()
904  */
905 
906  /* If dev->waiting is clear now, an interrupt
907  gave us the port and we would deadlock if we slept. */
908  if (dev->waiting) {
910  if (signal_pending (current)) {
911  return -EINTR;
912  }
913  r = 1;
914  } else {
915  r = 0;
916 #ifdef PARPORT_DEBUG_SHARING
917  printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
918  dev->name);
919 #endif
920  }
921 
922 #ifdef PARPORT_DEBUG_SHARING
923  if (dev->port->physport->cad != dev)
924  printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
925  "but %s owns port!\n", dev->name,
926  dev->port->physport->cad ?
927  dev->port->physport->cad->name:"nobody");
928 #endif
929  }
930  dev->waiting = 0;
931  return r;
932 }
933 
944 {
945  struct parport *port = dev->port->physport;
946  struct pardevice *pd;
947  unsigned long flags;
948 
949  /* Make sure that dev is the current device */
950  write_lock_irqsave(&port->cad_lock, flags);
951  if (port->cad != dev) {
952  write_unlock_irqrestore (&port->cad_lock, flags);
953  printk(KERN_WARNING "%s: %s tried to release parport "
954  "when not owner\n", port->name, dev->name);
955  return;
956  }
957 
958 #ifdef CONFIG_PARPORT_1284
959  /* If this is on a mux port, deselect it. */
960  if (dev->port->muxport >= 0) {
961  /* FIXME */
962  port->muxsel = -1;
963  }
964 
965  /* If this is a daisy device, deselect it. */
966  if (dev->daisy >= 0) {
968  port->daisy = -1;
969  }
970 #endif
971 
972  port->cad = NULL;
973  write_unlock_irqrestore(&port->cad_lock, flags);
974 
975  /* Save control registers */
976  port->ops->save_state(port, dev->state);
977 
978  /* If anybody is waiting, find out who's been there longest and
979  then wake them up. (Note: no locking required) */
980  /* !!! LOCKING IS NEEDED HERE */
981  for (pd = port->waithead; pd; pd = pd->waitnext) {
982  if (pd->waiting & 2) { /* sleeping in claim_or_block */
983  parport_claim(pd);
984  if (waitqueue_active(&pd->wait_q))
986  return;
987  } else if (pd->wakeup) {
988  pd->wakeup(pd->private);
989  if (dev->port->cad) /* racy but no matter */
990  return;
991  } else {
992  printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
993  }
994  }
995 
996  /* Nobody was waiting, so walk the list to see if anyone is
997  interested in being woken up. (Note: no locking required) */
998  /* !!! LOCKING IS NEEDED HERE */
999  for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
1000  if (pd->wakeup && pd != dev)
1001  pd->wakeup(pd->private);
1002  }
1003 }
1004 
1006 {
1007  struct parport *port = dev_id;
1008 
1009  parport_generic_irq(port);
1010 
1011  return IRQ_HANDLED;
1012 }
1013 
1014 /* Exported symbols for modules. */
1015 
1031 
1032 MODULE_LICENSE("GPL");