Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
platform.c
Go to the documentation of this file.
1 /*
2  * platform.c - platform 'pseudo' bus for legacy devices
3  *
4  * Copyright (c) 2002-3 Patrick Mochel
5  * Copyright (c) 2002-3 Open Source Development Labs
6  *
7  * This file is released under the GPLv2
8  *
9  * Please see Documentation/driver-model/platform.txt for more
10  * information.
11  */
12 
13 #include <linux/string.h>
14 #include <linux/platform_device.h>
15 #include <linux/of_device.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/bootmem.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/idr.h>
24 
25 #include "base.h"
26 #include "power/power.h"
27 
28 /* For automatically allocated device IDs */
29 static DEFINE_IDA(platform_devid_ida);
30 
31 #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
32  driver))
33 
34 struct device platform_bus = {
35  .init_name = "platform",
36 };
37 EXPORT_SYMBOL_GPL(platform_bus);
38 
55 {
56 }
57 
65  unsigned int type, unsigned int num)
66 {
67  int i;
68 
69  for (i = 0; i < dev->num_resources; i++) {
70  struct resource *r = &dev->resource[i];
71 
72  if (type == resource_type(r) && num-- == 0)
73  return r;
74  }
75  return NULL;
76 }
78 
84 int platform_get_irq(struct platform_device *dev, unsigned int num)
85 {
86 #ifdef CONFIG_SPARC
87  /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
88  if (!dev || num >= dev->archdata.num_irqs)
89  return -ENXIO;
90  return dev->archdata.irqs[num];
91 #else
92  struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
93 
94  return r ? r->start : -ENXIO;
95 #endif
96 }
98 
106  unsigned int type,
107  const char *name)
108 {
109  int i;
110 
111  for (i = 0; i < dev->num_resources; i++) {
112  struct resource *r = &dev->resource[i];
113 
114  if (unlikely(!r->name))
115  continue;
116 
117  if (type == resource_type(r) && !strcmp(r->name, name))
118  return r;
119  }
120  return NULL;
121 }
123 
130 {
132  name);
133 
134  return r ? r->start : -ENXIO;
135 }
137 
143 int platform_add_devices(struct platform_device **devs, int num)
144 {
145  int i, ret = 0;
146 
147  for (i = 0; i < num; i++) {
148  ret = platform_device_register(devs[i]);
149  if (ret) {
150  while (--i >= 0)
152  break;
153  }
154  }
155 
156  return ret;
157 }
159 
162  char name[1];
163 };
164 
173 {
174  if (pdev)
175  put_device(&pdev->dev);
176 }
178 
179 static void platform_device_release(struct device *dev)
180 {
181  struct platform_object *pa = container_of(dev, struct platform_object,
182  pdev.dev);
183 
184  of_device_node_put(&pa->pdev.dev);
185  kfree(pa->pdev.dev.platform_data);
186  kfree(pa->pdev.mfd_cell);
187  kfree(pa->pdev.resource);
188  kfree(pa);
189 }
190 
199 struct platform_device *platform_device_alloc(const char *name, int id)
200 {
201  struct platform_object *pa;
202 
203  pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL);
204  if (pa) {
205  strcpy(pa->name, name);
206  pa->pdev.name = pa->name;
207  pa->pdev.id = id;
208  device_initialize(&pa->pdev.dev);
209  pa->pdev.dev.release = platform_device_release;
211  }
212 
213  return pa ? &pa->pdev : NULL;
214 }
216 
228  const struct resource *res, unsigned int num)
229 {
230  struct resource *r = NULL;
231 
232  if (res) {
233  r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
234  if (!r)
235  return -ENOMEM;
236  }
237 
238  kfree(pdev->resource);
239  pdev->resource = r;
240  pdev->num_resources = num;
241  return 0;
242 }
244 
255 int platform_device_add_data(struct platform_device *pdev, const void *data,
256  size_t size)
257 {
258  void *d = NULL;
259 
260  if (data) {
261  d = kmemdup(data, size, GFP_KERNEL);
262  if (!d)
263  return -ENOMEM;
264  }
265 
266  kfree(pdev->dev.platform_data);
267  pdev->dev.platform_data = d;
268  return 0;
269 }
271 
280 {
281  int i, ret;
282 
283  if (!pdev)
284  return -EINVAL;
285 
286  if (!pdev->dev.parent)
287  pdev->dev.parent = &platform_bus;
288 
289  pdev->dev.bus = &platform_bus_type;
290 
291  switch (pdev->id) {
292  default:
293  dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
294  break;
295  case PLATFORM_DEVID_NONE:
296  dev_set_name(&pdev->dev, "%s", pdev->name);
297  break;
298  case PLATFORM_DEVID_AUTO:
299  /*
300  * Automatically allocated device ID. We mark it as such so
301  * that we remember it must be freed, and we append a suffix
302  * to avoid namespace collision with explicit IDs.
303  */
304  ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
305  if (ret < 0)
306  goto err_out;
307  pdev->id = ret;
308  pdev->id_auto = true;
309  dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
310  break;
311  }
312 
313  for (i = 0; i < pdev->num_resources; i++) {
314  struct resource *p, *r = &pdev->resource[i];
315 
316  if (r->name == NULL)
317  r->name = dev_name(&pdev->dev);
318 
319  p = r->parent;
320  if (!p) {
321  if (resource_type(r) == IORESOURCE_MEM)
322  p = &iomem_resource;
323  else if (resource_type(r) == IORESOURCE_IO)
324  p = &ioport_resource;
325  }
326 
327  if (p && insert_resource(p, r)) {
329  "%s: failed to claim resource %d\n",
330  dev_name(&pdev->dev), i);
331  ret = -EBUSY;
332  goto failed;
333  }
334  }
335 
336  pr_debug("Registering platform device '%s'. Parent at %s\n",
337  dev_name(&pdev->dev), dev_name(pdev->dev.parent));
338 
339  ret = device_add(&pdev->dev);
340  if (ret == 0)
341  return ret;
342 
343  failed:
344  if (pdev->id_auto) {
345  ida_simple_remove(&platform_devid_ida, pdev->id);
346  pdev->id = PLATFORM_DEVID_AUTO;
347  }
348 
349  while (--i >= 0) {
350  struct resource *r = &pdev->resource[i];
351  unsigned long type = resource_type(r);
352 
353  if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
354  release_resource(r);
355  }
356 
357  err_out:
358  return ret;
359 }
361 
371 {
372  int i;
373 
374  if (pdev) {
375  device_del(&pdev->dev);
376 
377  if (pdev->id_auto) {
378  ida_simple_remove(&platform_devid_ida, pdev->id);
379  pdev->id = PLATFORM_DEVID_AUTO;
380  }
381 
382  for (i = 0; i < pdev->num_resources; i++) {
383  struct resource *r = &pdev->resource[i];
384  unsigned long type = resource_type(r);
385 
386  if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
387  release_resource(r);
388  }
389  }
390 }
392 
398 {
399  device_initialize(&pdev->dev);
401  return platform_device_add(pdev);
402 }
404 
414 {
415  platform_device_del(pdev);
416  platform_device_put(pdev);
417 }
419 
429  const struct platform_device_info *pdevinfo)
430 {
431  int ret = -ENOMEM;
432  struct platform_device *pdev;
433 
434  pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
435  if (!pdev)
436  goto err_alloc;
437 
438  pdev->dev.parent = pdevinfo->parent;
439 
440  if (pdevinfo->dma_mask) {
441  /*
442  * This memory isn't freed when the device is put,
443  * I don't have a nice idea for that though. Conceptually
444  * dma_mask in struct device should not be a pointer.
445  * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
446  */
447  pdev->dev.dma_mask =
448  kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
449  if (!pdev->dev.dma_mask)
450  goto err;
451 
452  *pdev->dev.dma_mask = pdevinfo->dma_mask;
453  pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
454  }
455 
457  pdevinfo->res, pdevinfo->num_res);
458  if (ret)
459  goto err;
460 
461  ret = platform_device_add_data(pdev,
462  pdevinfo->data, pdevinfo->size_data);
463  if (ret)
464  goto err;
465 
466  ret = platform_device_add(pdev);
467  if (ret) {
468 err:
469  kfree(pdev->dev.dma_mask);
470 
471 err_alloc:
472  platform_device_put(pdev);
473  return ERR_PTR(ret);
474  }
475 
476  return pdev;
477 }
479 
480 static int platform_drv_probe(struct device *_dev)
481 {
482  struct platform_driver *drv = to_platform_driver(_dev->driver);
483  struct platform_device *dev = to_platform_device(_dev);
484 
485  return drv->probe(dev);
486 }
487 
488 static int platform_drv_probe_fail(struct device *_dev)
489 {
490  return -ENXIO;
491 }
492 
493 static int platform_drv_remove(struct device *_dev)
494 {
495  struct platform_driver *drv = to_platform_driver(_dev->driver);
496  struct platform_device *dev = to_platform_device(_dev);
497 
498  return drv->remove(dev);
499 }
500 
501 static void platform_drv_shutdown(struct device *_dev)
502 {
503  struct platform_driver *drv = to_platform_driver(_dev->driver);
504  struct platform_device *dev = to_platform_device(_dev);
505 
506  drv->shutdown(dev);
507 }
508 
514 {
515  drv->driver.bus = &platform_bus_type;
516  if (drv->probe)
517  drv->driver.probe = platform_drv_probe;
518  if (drv->remove)
519  drv->driver.remove = platform_drv_remove;
520  if (drv->shutdown)
521  drv->driver.shutdown = platform_drv_shutdown;
522 
523  return driver_register(&drv->driver);
524 }
526 
532 {
533  driver_unregister(&drv->driver);
534 }
536 
555  int (*probe)(struct platform_device *))
556 {
557  int retval, code;
558 
559  /* make sure driver won't have bind/unbind attributes */
560  drv->driver.suppress_bind_attrs = true;
561 
562  /* temporary section violation during probe() */
563  drv->probe = probe;
564  retval = code = platform_driver_register(drv);
565 
566  /*
567  * Fixup that section violation, being paranoid about code scanning
568  * the list of drivers in order to probe new devices. Check to see
569  * if the probe was successful, and make sure any forced probes of
570  * new devices fail.
571  */
572  spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
573  drv->probe = NULL;
574  if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
575  retval = -ENODEV;
576  drv->driver.probe = platform_drv_probe_fail;
577  spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
578 
579  if (code != retval)
581  return retval;
582 }
584 
600  struct platform_driver *driver,
601  int (*probe)(struct platform_device *),
602  struct resource *res, unsigned int n_res,
603  const void *data, size_t size)
604 {
605  struct platform_device *pdev;
606  int error;
607 
608  pdev = platform_device_alloc(driver->driver.name, -1);
609  if (!pdev) {
610  error = -ENOMEM;
611  goto err_out;
612  }
613 
614  error = platform_device_add_resources(pdev, res, n_res);
615  if (error)
616  goto err_pdev_put;
617 
618  error = platform_device_add_data(pdev, data, size);
619  if (error)
620  goto err_pdev_put;
621 
622  error = platform_device_add(pdev);
623  if (error)
624  goto err_pdev_put;
625 
626  error = platform_driver_probe(driver, probe);
627  if (error)
628  goto err_pdev_del;
629 
630  return pdev;
631 
632 err_pdev_del:
633  platform_device_del(pdev);
634 err_pdev_put:
635  platform_device_put(pdev);
636 err_out:
637  return ERR_PTR(error);
638 }
640 
641 /* modalias support enables more hands-off userspace setup:
642  * (a) environment variable lets new-style hotplug events work once system is
643  * fully running: "modprobe $MODALIAS"
644  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
645  * mishandled before system is fully running: "modprobe $(cat modalias)"
646  */
647 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
648  char *buf)
649 {
650  struct platform_device *pdev = to_platform_device(dev);
651  int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
652 
653  return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
654 }
655 
656 static struct device_attribute platform_dev_attrs[] = {
657  __ATTR_RO(modalias),
658  __ATTR_NULL,
659 };
660 
661 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
662 {
663  struct platform_device *pdev = to_platform_device(dev);
664  int rc;
665 
666  /* Some devices have extra OF data and an OF-style MODALIAS */
667  rc = of_device_uevent_modalias(dev,env);
668  if (rc != -ENODEV)
669  return rc;
670 
671  add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
672  pdev->name);
673  return 0;
674 }
675 
676 static const struct platform_device_id *platform_match_id(
677  const struct platform_device_id *id,
678  struct platform_device *pdev)
679 {
680  while (id->name[0]) {
681  if (strcmp(pdev->name, id->name) == 0) {
682  pdev->id_entry = id;
683  return id;
684  }
685  id++;
686  }
687  return NULL;
688 }
689 
703 static int platform_match(struct device *dev, struct device_driver *drv)
704 {
705  struct platform_device *pdev = to_platform_device(dev);
706  struct platform_driver *pdrv = to_platform_driver(drv);
707 
708  /* Attempt an OF style match first */
709  if (of_driver_match_device(dev, drv))
710  return 1;
711 
712  /* Then try to match against the id table */
713  if (pdrv->id_table)
714  return platform_match_id(pdrv->id_table, pdev) != NULL;
715 
716  /* fall-back to driver name match */
717  return (strcmp(pdev->name, drv->name) == 0);
718 }
719 
720 #ifdef CONFIG_PM_SLEEP
721 
722 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
723 {
724  struct platform_driver *pdrv = to_platform_driver(dev->driver);
725  struct platform_device *pdev = to_platform_device(dev);
726  int ret = 0;
727 
728  if (dev->driver && pdrv->suspend)
729  ret = pdrv->suspend(pdev, mesg);
730 
731  return ret;
732 }
733 
734 static int platform_legacy_resume(struct device *dev)
735 {
736  struct platform_driver *pdrv = to_platform_driver(dev->driver);
737  struct platform_device *pdev = to_platform_device(dev);
738  int ret = 0;
739 
740  if (dev->driver && pdrv->resume)
741  ret = pdrv->resume(pdev);
742 
743  return ret;
744 }
745 
746 #endif /* CONFIG_PM_SLEEP */
747 
748 #ifdef CONFIG_SUSPEND
749 
750 int platform_pm_suspend(struct device *dev)
751 {
752  struct device_driver *drv = dev->driver;
753  int ret = 0;
754 
755  if (!drv)
756  return 0;
757 
758  if (drv->pm) {
759  if (drv->pm->suspend)
760  ret = drv->pm->suspend(dev);
761  } else {
762  ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
763  }
764 
765  return ret;
766 }
767 
768 int platform_pm_resume(struct device *dev)
769 {
770  struct device_driver *drv = dev->driver;
771  int ret = 0;
772 
773  if (!drv)
774  return 0;
775 
776  if (drv->pm) {
777  if (drv->pm->resume)
778  ret = drv->pm->resume(dev);
779  } else {
780  ret = platform_legacy_resume(dev);
781  }
782 
783  return ret;
784 }
785 
786 #endif /* CONFIG_SUSPEND */
787 
788 #ifdef CONFIG_HIBERNATE_CALLBACKS
789 
790 int platform_pm_freeze(struct device *dev)
791 {
792  struct device_driver *drv = dev->driver;
793  int ret = 0;
794 
795  if (!drv)
796  return 0;
797 
798  if (drv->pm) {
799  if (drv->pm->freeze)
800  ret = drv->pm->freeze(dev);
801  } else {
802  ret = platform_legacy_suspend(dev, PMSG_FREEZE);
803  }
804 
805  return ret;
806 }
807 
808 int platform_pm_thaw(struct device *dev)
809 {
810  struct device_driver *drv = dev->driver;
811  int ret = 0;
812 
813  if (!drv)
814  return 0;
815 
816  if (drv->pm) {
817  if (drv->pm->thaw)
818  ret = drv->pm->thaw(dev);
819  } else {
820  ret = platform_legacy_resume(dev);
821  }
822 
823  return ret;
824 }
825 
826 int platform_pm_poweroff(struct device *dev)
827 {
828  struct device_driver *drv = dev->driver;
829  int ret = 0;
830 
831  if (!drv)
832  return 0;
833 
834  if (drv->pm) {
835  if (drv->pm->poweroff)
836  ret = drv->pm->poweroff(dev);
837  } else {
838  ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
839  }
840 
841  return ret;
842 }
843 
844 int platform_pm_restore(struct device *dev)
845 {
846  struct device_driver *drv = dev->driver;
847  int ret = 0;
848 
849  if (!drv)
850  return 0;
851 
852  if (drv->pm) {
853  if (drv->pm->restore)
854  ret = drv->pm->restore(dev);
855  } else {
856  ret = platform_legacy_resume(dev);
857  }
858 
859  return ret;
860 }
861 
862 #endif /* CONFIG_HIBERNATE_CALLBACKS */
863 
864 static const struct dev_pm_ops platform_dev_pm_ops = {
865  .runtime_suspend = pm_generic_runtime_suspend,
866  .runtime_resume = pm_generic_runtime_resume,
867  .runtime_idle = pm_generic_runtime_idle,
869 };
870 
872  .name = "platform",
873  .dev_attrs = platform_dev_attrs,
874  .match = platform_match,
875  .uevent = platform_uevent,
876  .pm = &platform_dev_pm_ops,
877 };
878 EXPORT_SYMBOL_GPL(platform_bus_type);
879 
881 {
882  int error;
883 
885 
886  error = device_register(&platform_bus);
887  if (error)
888  return error;
889  error = bus_register(&platform_bus_type);
890  if (error)
891  device_unregister(&platform_bus);
892  return error;
893 }
894 
895 #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
897 {
898  u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
899  u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
900  u64 mask;
901 
902  if (!high_totalram) {
903  /* convert to mask just covering totalram */
904  low_totalram = (1 << (fls(low_totalram) - 1));
905  low_totalram += low_totalram - 1;
906  mask = low_totalram;
907  } else {
908  high_totalram = (1 << (fls(high_totalram) - 1));
909  high_totalram += high_totalram - 1;
910  mask = (((u64)high_totalram) << 32) + 0xffffffff;
911  }
912  return mask;
913 }
915 #endif
916 
917 static __initdata LIST_HEAD(early_platform_driver_list);
918 static __initdata LIST_HEAD(early_platform_device_list);
919 
928  char *buf)
929 {
930  char *tmp;
931  int n;
932 
933  /* Simply add the driver to the end of the global list.
934  * Drivers will by default be put on the list in compiled-in order.
935  */
936  if (!epdrv->list.next) {
937  INIT_LIST_HEAD(&epdrv->list);
938  list_add_tail(&epdrv->list, &early_platform_driver_list);
939  }
940 
941  /* If the user has specified device then make sure the driver
942  * gets prioritized. The driver of the last device specified on
943  * command line will be put first on the list.
944  */
945  n = strlen(epdrv->pdrv->driver.name);
946  if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
947  list_move(&epdrv->list, &early_platform_driver_list);
948 
949  /* Allow passing parameters after device name */
950  if (buf[n] == '\0' || buf[n] == ',')
951  epdrv->requested_id = -1;
952  else {
953  epdrv->requested_id = simple_strtoul(&buf[n + 1],
954  &tmp, 10);
955 
956  if (buf[n] != '.' || (tmp == &buf[n + 1])) {
958  n = 0;
959  } else
960  n += strcspn(&buf[n + 1], ",") + 1;
961  }
962 
963  if (buf[n] == ',')
964  n++;
965 
966  if (epdrv->bufsize) {
967  memcpy(epdrv->buffer, &buf[n],
968  min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
969  epdrv->buffer[epdrv->bufsize - 1] = '\0';
970  }
971  }
972 
973  return 0;
974 }
975 
985 {
986  struct device *dev;
987  int i;
988 
989  /* simply add the devices to list */
990  for (i = 0; i < num; i++) {
991  dev = &devs[i]->dev;
992 
993  if (!dev->devres_head.next) {
994  pm_runtime_early_init(dev);
995  INIT_LIST_HEAD(&dev->devres_head);
997  &early_platform_device_list);
998  }
999  }
1000 }
1001 
1011 {
1012  /* The "class_str" parameter may or may not be present on the kernel
1013  * command line. If it is present then there may be more than one
1014  * matching parameter.
1015  *
1016  * Since we register our early platform drivers using early_param()
1017  * we need to make sure that they also get registered in the case
1018  * when the parameter is missing from the kernel command line.
1019  *
1020  * We use parse_early_options() to make sure the early_param() gets
1021  * called at least once. The early_param() may be called more than
1022  * once since the name of the preferred device may be specified on
1023  * the kernel command line. early_platform_driver_register() handles
1024  * this case for us.
1025  */
1026  parse_early_options(class_str);
1027 }
1028 
1034 static __init struct platform_device *
1035 early_platform_match(struct early_platform_driver *epdrv, int id)
1036 {
1037  struct platform_device *pd;
1038 
1039  list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1040  if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1041  if (pd->id == id)
1042  return pd;
1043 
1044  return NULL;
1045 }
1046 
1052 static __init int early_platform_left(struct early_platform_driver *epdrv,
1053  int id)
1054 {
1055  struct platform_device *pd;
1056 
1057  list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1058  if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1059  if (pd->id >= id)
1060  return 1;
1061 
1062  return 0;
1063 }
1064 
1071 static int __init early_platform_driver_probe_id(char *class_str,
1072  int id,
1073  int nr_probe)
1074 {
1075  struct early_platform_driver *epdrv;
1076  struct platform_device *match;
1077  int match_id;
1078  int n = 0;
1079  int left = 0;
1080 
1081  list_for_each_entry(epdrv, &early_platform_driver_list, list) {
1082  /* only use drivers matching our class_str */
1083  if (strcmp(class_str, epdrv->class_str))
1084  continue;
1085 
1086  if (id == -2) {
1087  match_id = epdrv->requested_id;
1088  left = 1;
1089 
1090  } else {
1091  match_id = id;
1092  left += early_platform_left(epdrv, id);
1093 
1094  /* skip requested id */
1095  switch (epdrv->requested_id) {
1098  break;
1099  default:
1100  if (epdrv->requested_id == id)
1101  match_id = EARLY_PLATFORM_ID_UNSET;
1102  }
1103  }
1104 
1105  switch (match_id) {
1107  pr_warning("%s: unable to parse %s parameter\n",
1108  class_str, epdrv->pdrv->driver.name);
1109  /* fall-through */
1111  match = NULL;
1112  break;
1113  default:
1114  match = early_platform_match(epdrv, match_id);
1115  }
1116 
1117  if (match) {
1118  /*
1119  * Set up a sensible init_name to enable
1120  * dev_name() and others to be used before the
1121  * rest of the driver core is initialized.
1122  */
1123  if (!match->dev.init_name && slab_is_available()) {
1124  if (match->id != -1)
1125  match->dev.init_name =
1126  kasprintf(GFP_KERNEL, "%s.%d",
1127  match->name,
1128  match->id);
1129  else
1130  match->dev.init_name =
1131  kasprintf(GFP_KERNEL, "%s",
1132  match->name);
1133 
1134  if (!match->dev.init_name)
1135  return -ENOMEM;
1136  }
1137 
1138  if (epdrv->pdrv->probe(match))
1139  pr_warning("%s: unable to probe %s early.\n",
1140  class_str, match->name);
1141  else
1142  n++;
1143  }
1144 
1145  if (n >= nr_probe)
1146  break;
1147  }
1148 
1149  if (left)
1150  return n;
1151  else
1152  return -ENODEV;
1153 }
1154 
1166  int nr_probe,
1167  int user_only)
1168 {
1169  int k, n, i;
1170 
1171  n = 0;
1172  for (i = -2; n < nr_probe; i++) {
1173  k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
1174 
1175  if (k < 0)
1176  break;
1177 
1178  n += k;
1179 
1180  if (user_only)
1181  break;
1182  }
1183 
1184  return n;
1185 }
1186 
1191 {
1192  struct platform_device *pd, *pd2;
1193 
1194  /* clean up the devres list used to chain devices */
1195  list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
1196  dev.devres_head) {
1197  list_del(&pd->dev.devres_head);
1198  memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
1199  }
1200 }
1201