Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
firmware_class.c
Go to the documentation of this file.
1 /*
2  * firmware_class.c - Multi purpose firmware loading support
3  *
4  * Copyright (c) 2003 Manuel Estrada Sainz
5  *
6  * Please see Documentation/firmware_class/ for more information.
7  *
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/timer.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/mutex.h>
19 #include <linux/workqueue.h>
20 #include <linux/highmem.h>
21 #include <linux/firmware.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/file.h>
25 #include <linux/list.h>
26 #include <linux/async.h>
27 #include <linux/pm.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 
31 #include <generated/utsrelease.h>
32 
33 #include "base.h"
34 
35 MODULE_AUTHOR("Manuel Estrada Sainz");
36 MODULE_DESCRIPTION("Multi purpose firmware loading support");
37 MODULE_LICENSE("GPL");
38 
39 /* Builtin firmware support */
40 
41 #ifdef CONFIG_FW_LOADER
42 
43 extern struct builtin_fw __start_builtin_fw[];
44 extern struct builtin_fw __end_builtin_fw[];
45 
46 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
47 {
48  struct builtin_fw *b_fw;
49 
50  for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
51  if (strcmp(name, b_fw->name) == 0) {
52  fw->size = b_fw->size;
53  fw->data = b_fw->data;
54  return true;
55  }
56  }
57 
58  return false;
59 }
60 
61 static bool fw_is_builtin_firmware(const struct firmware *fw)
62 {
63  struct builtin_fw *b_fw;
64 
65  for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
66  if (fw->data == b_fw->data)
67  return true;
68 
69  return false;
70 }
71 
72 #else /* Module case - no builtin firmware support */
73 
74 static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
75 {
76  return false;
77 }
78 
79 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
80 {
81  return false;
82 }
83 #endif
84 
85 enum {
89 };
90 
91 enum fw_buf_fmt {
92  VMALLOC_BUF, /* used in direct loading */
93  PAGE_BUF, /* used in loading via userspace */
94 };
95 
96 static int loading_timeout = 60; /* In seconds */
97 
98 static inline long firmware_loading_timeout(void)
99 {
100  return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
101 }
102 
104  /* firmware_buf instance will be added into the below list */
106  struct list_head head;
107  int state;
108 
109 #ifdef CONFIG_PM_SLEEP
110  /*
111  * Names of firmware images which have been cached successfully
112  * will be added into the below list so that device uncache
113  * helper can trace which firmware images have been cached
114  * before.
115  */
116  spinlock_t name_lock;
117  struct list_head fw_names;
118 
119  struct delayed_work work;
120 
121  struct notifier_block pm_notify;
122 #endif
123 };
124 
125 struct firmware_buf {
126  struct kref ref;
127  struct list_head list;
130  unsigned long status;
132  void *data;
133  size_t size;
134  struct page **pages;
135  int nr_pages;
137  char fw_id[];
138 };
139 
141  struct list_head list;
142  char name[];
143 };
144 
147  bool nowait;
148  struct device dev;
149  struct firmware_buf *buf;
150  struct firmware *fw;
151 };
152 
153 struct fw_name_devm {
154  unsigned long magic;
155  char name[];
156 };
157 
158 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
159 
160 #define FW_LOADER_NO_CACHE 0
161 #define FW_LOADER_START_CACHE 1
162 
163 static int fw_cache_piggyback_on_request(const char *name);
164 
165 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
166  * guarding for corner cases a global lock should be OK */
167 static DEFINE_MUTEX(fw_lock);
168 
169 static struct firmware_cache fw_cache;
170 
171 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
172  struct firmware_cache *fwc)
173 {
174  struct firmware_buf *buf;
175 
176  buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC);
177 
178  if (!buf)
179  return buf;
180 
181  kref_init(&buf->ref);
182  strcpy(buf->fw_id, fw_name);
183  buf->fwc = fwc;
184  init_completion(&buf->completion);
185  buf->fmt = VMALLOC_BUF;
186 
187  pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
188 
189  return buf;
190 }
191 
192 static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
193 {
194  struct firmware_buf *tmp;
195  struct firmware_cache *fwc = &fw_cache;
196 
197  list_for_each_entry(tmp, &fwc->head, list)
198  if (!strcmp(tmp->fw_id, fw_name))
199  return tmp;
200  return NULL;
201 }
202 
203 static int fw_lookup_and_allocate_buf(const char *fw_name,
204  struct firmware_cache *fwc,
205  struct firmware_buf **buf)
206 {
207  struct firmware_buf *tmp;
208 
209  spin_lock(&fwc->lock);
210  tmp = __fw_lookup_buf(fw_name);
211  if (tmp) {
212  kref_get(&tmp->ref);
213  spin_unlock(&fwc->lock);
214  *buf = tmp;
215  return 1;
216  }
217  tmp = __allocate_fw_buf(fw_name, fwc);
218  if (tmp)
219  list_add(&tmp->list, &fwc->head);
220  spin_unlock(&fwc->lock);
221 
222  *buf = tmp;
223 
224  return tmp ? 0 : -ENOMEM;
225 }
226 
227 static struct firmware_buf *fw_lookup_buf(const char *fw_name)
228 {
229  struct firmware_buf *tmp;
230  struct firmware_cache *fwc = &fw_cache;
231 
232  spin_lock(&fwc->lock);
233  tmp = __fw_lookup_buf(fw_name);
234  spin_unlock(&fwc->lock);
235 
236  return tmp;
237 }
238 
239 static void __fw_free_buf(struct kref *ref)
240 {
241  struct firmware_buf *buf = to_fwbuf(ref);
242  struct firmware_cache *fwc = buf->fwc;
243  int i;
244 
245  pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
246  __func__, buf->fw_id, buf, buf->data,
247  (unsigned int)buf->size);
248 
249  spin_lock(&fwc->lock);
250  list_del(&buf->list);
251  spin_unlock(&fwc->lock);
252 
253 
254  if (buf->fmt == PAGE_BUF) {
255  vunmap(buf->data);
256  for (i = 0; i < buf->nr_pages; i++)
257  __free_page(buf->pages[i]);
258  kfree(buf->pages);
259  } else
260  vfree(buf->data);
261  kfree(buf);
262 }
263 
264 static void fw_free_buf(struct firmware_buf *buf)
265 {
266  kref_put(&buf->ref, __fw_free_buf);
267 }
268 
269 /* direct firmware loading support */
270 static const char *fw_path[] = {
271  "/lib/firmware/updates/" UTS_RELEASE,
272  "/lib/firmware/updates",
273  "/lib/firmware/" UTS_RELEASE,
274  "/lib/firmware"
275 };
276 
277 /* Don't inline this: 'struct kstat' is biggish */
278 static noinline long fw_file_size(struct file *file)
279 {
280  struct kstat st;
281  if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
282  return -1;
283  if (!S_ISREG(st.mode))
284  return -1;
285  if (st.size != (long)st.size)
286  return -1;
287  return st.size;
288 }
289 
290 static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
291 {
292  long size;
293  char *buf;
294 
295  size = fw_file_size(file);
296  if (size < 0)
297  return false;
298  buf = vmalloc(size);
299  if (!buf)
300  return false;
301  if (kernel_read(file, 0, buf, size) != size) {
302  vfree(buf);
303  return false;
304  }
305  fw_buf->data = buf;
306  fw_buf->size = size;
307  return true;
308 }
309 
310 static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
311 {
312  int i;
313  bool success = false;
314  char *path = __getname();
315 
316  for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
317  struct file *file;
318  snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
319 
320  file = filp_open(path, O_RDONLY, 0);
321  if (IS_ERR(file))
322  continue;
323  success = fw_read_file_contents(file, buf);
324  fput(file);
325  if (success)
326  break;
327  }
328  __putname(path);
329  return success;
330 }
331 
332 static struct firmware_priv *to_firmware_priv(struct device *dev)
333 {
334  return container_of(dev, struct firmware_priv, dev);
335 }
336 
337 static void fw_load_abort(struct firmware_priv *fw_priv)
338 {
339  struct firmware_buf *buf = fw_priv->buf;
340 
342  complete_all(&buf->completion);
343 }
344 
345 static ssize_t firmware_timeout_show(struct class *class,
346  struct class_attribute *attr,
347  char *buf)
348 {
349  return sprintf(buf, "%d\n", loading_timeout);
350 }
351 
365 static ssize_t firmware_timeout_store(struct class *class,
366  struct class_attribute *attr,
367  const char *buf, size_t count)
368 {
369  loading_timeout = simple_strtol(buf, NULL, 10);
370  if (loading_timeout < 0)
371  loading_timeout = 0;
372 
373  return count;
374 }
375 
376 static struct class_attribute firmware_class_attrs[] = {
377  __ATTR(timeout, S_IWUSR | S_IRUGO,
378  firmware_timeout_show, firmware_timeout_store),
380 };
381 
382 static void fw_dev_release(struct device *dev)
383 {
384  struct firmware_priv *fw_priv = to_firmware_priv(dev);
385 
386  kfree(fw_priv);
387 
388  module_put(THIS_MODULE);
389 }
390 
391 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
392 {
393  struct firmware_priv *fw_priv = to_firmware_priv(dev);
394 
395  if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
396  return -ENOMEM;
397  if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
398  return -ENOMEM;
399  if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
400  return -ENOMEM;
401 
402  return 0;
403 }
404 
405 static struct class firmware_class = {
406  .name = "firmware",
407  .class_attrs = firmware_class_attrs,
408  .dev_uevent = firmware_uevent,
409  .dev_release = fw_dev_release,
410 };
411 
412 static ssize_t firmware_loading_show(struct device *dev,
413  struct device_attribute *attr, char *buf)
414 {
415  struct firmware_priv *fw_priv = to_firmware_priv(dev);
416  int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
417 
418  return sprintf(buf, "%d\n", loading);
419 }
420 
421 /* firmware holds the ownership of pages */
422 static void firmware_free_data(const struct firmware *fw)
423 {
424  /* Loaded directly? */
425  if (!fw->priv) {
426  vfree(fw->data);
427  return;
428  }
429  fw_free_buf(fw->priv);
430 }
431 
432 /* Some architectures don't have PAGE_KERNEL_RO */
433 #ifndef PAGE_KERNEL_RO
434 #define PAGE_KERNEL_RO PAGE_KERNEL
435 #endif
436 
437 /* one pages buffer should be mapped/unmapped only once */
438 static int fw_map_pages_buf(struct firmware_buf *buf)
439 {
440  if (buf->fmt != PAGE_BUF)
441  return 0;
442 
443  if (buf->data)
444  vunmap(buf->data);
445  buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
446  if (!buf->data)
447  return -ENOMEM;
448  return 0;
449 }
450 
464 static ssize_t firmware_loading_store(struct device *dev,
465  struct device_attribute *attr,
466  const char *buf, size_t count)
467 {
468  struct firmware_priv *fw_priv = to_firmware_priv(dev);
469  struct firmware_buf *fw_buf = fw_priv->buf;
470  int loading = simple_strtol(buf, NULL, 10);
471  int i;
472 
473  mutex_lock(&fw_lock);
474 
475  if (!fw_buf)
476  goto out;
477 
478  switch (loading) {
479  case 1:
480  /* discarding any previous partial load */
481  if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
482  for (i = 0; i < fw_buf->nr_pages; i++)
483  __free_page(fw_buf->pages[i]);
484  kfree(fw_buf->pages);
485  fw_buf->pages = NULL;
486  fw_buf->page_array_size = 0;
487  fw_buf->nr_pages = 0;
488  set_bit(FW_STATUS_LOADING, &fw_buf->status);
489  }
490  break;
491  case 0:
492  if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
493  set_bit(FW_STATUS_DONE, &fw_buf->status);
495 
496  /*
497  * Several loading requests may be pending on
498  * one same firmware buf, so let all requests
499  * see the mapped 'buf->data' once the loading
500  * is completed.
501  * */
502  fw_map_pages_buf(fw_buf);
503  complete_all(&fw_buf->completion);
504  break;
505  }
506  /* fallthrough */
507  default:
508  dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
509  /* fallthrough */
510  case -1:
511  fw_load_abort(fw_priv);
512  break;
513  }
514 out:
515  mutex_unlock(&fw_lock);
516  return count;
517 }
518 
519 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
520 
521 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
522  struct bin_attribute *bin_attr,
523  char *buffer, loff_t offset, size_t count)
524 {
525  struct device *dev = kobj_to_dev(kobj);
526  struct firmware_priv *fw_priv = to_firmware_priv(dev);
527  struct firmware_buf *buf;
528  ssize_t ret_count;
529 
530  mutex_lock(&fw_lock);
531  buf = fw_priv->buf;
532  if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
533  ret_count = -ENODEV;
534  goto out;
535  }
536  if (offset > buf->size) {
537  ret_count = 0;
538  goto out;
539  }
540  if (count > buf->size - offset)
541  count = buf->size - offset;
542 
543  ret_count = count;
544 
545  while (count) {
546  void *page_data;
547  int page_nr = offset >> PAGE_SHIFT;
548  int page_ofs = offset & (PAGE_SIZE-1);
549  int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
550 
551  page_data = kmap(buf->pages[page_nr]);
552 
553  memcpy(buffer, page_data + page_ofs, page_cnt);
554 
555  kunmap(buf->pages[page_nr]);
556  buffer += page_cnt;
557  offset += page_cnt;
558  count -= page_cnt;
559  }
560 out:
561  mutex_unlock(&fw_lock);
562  return ret_count;
563 }
564 
565 static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
566 {
567  struct firmware_buf *buf = fw_priv->buf;
568  int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
569 
570  /* If the array of pages is too small, grow it... */
571  if (buf->page_array_size < pages_needed) {
572  int new_array_size = max(pages_needed,
573  buf->page_array_size * 2);
574  struct page **new_pages;
575 
576  new_pages = kmalloc(new_array_size * sizeof(void *),
577  GFP_KERNEL);
578  if (!new_pages) {
579  fw_load_abort(fw_priv);
580  return -ENOMEM;
581  }
582  memcpy(new_pages, buf->pages,
583  buf->page_array_size * sizeof(void *));
584  memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
585  (new_array_size - buf->page_array_size));
586  kfree(buf->pages);
587  buf->pages = new_pages;
588  buf->page_array_size = new_array_size;
589  }
590 
591  while (buf->nr_pages < pages_needed) {
592  buf->pages[buf->nr_pages] =
594 
595  if (!buf->pages[buf->nr_pages]) {
596  fw_load_abort(fw_priv);
597  return -ENOMEM;
598  }
599  buf->nr_pages++;
600  }
601  return 0;
602 }
603 
616 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
617  struct bin_attribute *bin_attr,
618  char *buffer, loff_t offset, size_t count)
619 {
620  struct device *dev = kobj_to_dev(kobj);
621  struct firmware_priv *fw_priv = to_firmware_priv(dev);
622  struct firmware_buf *buf;
623  ssize_t retval;
624 
625  if (!capable(CAP_SYS_RAWIO))
626  return -EPERM;
627 
628  mutex_lock(&fw_lock);
629  buf = fw_priv->buf;
630  if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
631  retval = -ENODEV;
632  goto out;
633  }
634 
635  retval = fw_realloc_buffer(fw_priv, offset + count);
636  if (retval)
637  goto out;
638 
639  retval = count;
640 
641  while (count) {
642  void *page_data;
643  int page_nr = offset >> PAGE_SHIFT;
644  int page_ofs = offset & (PAGE_SIZE - 1);
645  int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
646 
647  page_data = kmap(buf->pages[page_nr]);
648 
649  memcpy(page_data + page_ofs, buffer, page_cnt);
650 
651  kunmap(buf->pages[page_nr]);
652  buffer += page_cnt;
653  offset += page_cnt;
654  count -= page_cnt;
655  }
656 
657  buf->size = max_t(size_t, offset, buf->size);
658 out:
659  mutex_unlock(&fw_lock);
660  return retval;
661 }
662 
663 static struct bin_attribute firmware_attr_data = {
664  .attr = { .name = "data", .mode = 0644 },
665  .size = 0,
666  .read = firmware_data_read,
667  .write = firmware_data_write,
668 };
669 
670 static void firmware_class_timeout(u_long data)
671 {
672  struct firmware_priv *fw_priv = (struct firmware_priv *) data;
673 
674  fw_load_abort(fw_priv);
675 }
676 
677 static struct firmware_priv *
678 fw_create_instance(struct firmware *firmware, const char *fw_name,
679  struct device *device, bool uevent, bool nowait)
680 {
681  struct firmware_priv *fw_priv;
682  struct device *f_dev;
683 
684  fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
685  if (!fw_priv) {
686  dev_err(device, "%s: kmalloc failed\n", __func__);
687  fw_priv = ERR_PTR(-ENOMEM);
688  goto exit;
689  }
690 
691  fw_priv->nowait = nowait;
692  fw_priv->fw = firmware;
693  setup_timer(&fw_priv->timeout,
694  firmware_class_timeout, (u_long) fw_priv);
695 
696  f_dev = &fw_priv->dev;
697 
698  device_initialize(f_dev);
699  dev_set_name(f_dev, "%s", fw_name);
700  f_dev->parent = device;
701  f_dev->class = &firmware_class;
702 exit:
703  return fw_priv;
704 }
705 
706 /* store the pages buffer info firmware from buf */
707 static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
708 {
709  fw->priv = buf;
710  fw->pages = buf->pages;
711  fw->size = buf->size;
712  fw->data = buf->data;
713 
714  pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
715  __func__, buf->fw_id, buf, buf->data,
716  (unsigned int)buf->size);
717 }
718 
719 #ifdef CONFIG_PM_SLEEP
720 static void fw_name_devm_release(struct device *dev, void *res)
721 {
722  struct fw_name_devm *fwn = res;
723 
724  if (fwn->magic == (unsigned long)&fw_cache)
725  pr_debug("%s: fw_name-%s devm-%p released\n",
726  __func__, fwn->name, res);
727 }
728 
729 static int fw_devm_match(struct device *dev, void *res,
730  void *match_data)
731 {
732  struct fw_name_devm *fwn = res;
733 
734  return (fwn->magic == (unsigned long)&fw_cache) &&
735  !strcmp(fwn->name, match_data);
736 }
737 
738 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
739  const char *name)
740 {
741  struct fw_name_devm *fwn;
742 
743  fwn = devres_find(dev, fw_name_devm_release,
744  fw_devm_match, (void *)name);
745  return fwn;
746 }
747 
748 /* add firmware name into devres list */
749 static int fw_add_devm_name(struct device *dev, const char *name)
750 {
751  struct fw_name_devm *fwn;
752 
753  fwn = fw_find_devm_name(dev, name);
754  if (fwn)
755  return 1;
756 
757  fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
758  strlen(name) + 1, GFP_KERNEL);
759  if (!fwn)
760  return -ENOMEM;
761 
762  fwn->magic = (unsigned long)&fw_cache;
763  strcpy(fwn->name, name);
764  devres_add(dev, fwn);
765 
766  return 0;
767 }
768 #else
769 static int fw_add_devm_name(struct device *dev, const char *name)
770 {
771  return 0;
772 }
773 #endif
774 
775 static void _request_firmware_cleanup(const struct firmware **firmware_p)
776 {
777  release_firmware(*firmware_p);
778  *firmware_p = NULL;
779 }
780 
781 static struct firmware_priv *
782 _request_firmware_prepare(const struct firmware **firmware_p, const char *name,
783  struct device *device, bool uevent, bool nowait)
784 {
785  struct firmware *firmware;
786  struct firmware_priv *fw_priv = NULL;
787  struct firmware_buf *buf;
788  int ret;
789 
790  if (!firmware_p)
791  return ERR_PTR(-EINVAL);
792 
793  *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
794  if (!firmware) {
795  dev_err(device, "%s: kmalloc(struct firmware) failed\n",
796  __func__);
797  return ERR_PTR(-ENOMEM);
798  }
799 
800  if (fw_get_builtin_firmware(firmware, name)) {
801  dev_dbg(device, "firmware: using built-in firmware %s\n", name);
802  return NULL;
803  }
804 
805  ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
806  if (!ret)
807  fw_priv = fw_create_instance(firmware, name, device,
808  uevent, nowait);
809 
810  if (IS_ERR(fw_priv) || ret < 0) {
811  kfree(firmware);
812  *firmware_p = NULL;
813  return ERR_PTR(-ENOMEM);
814  } else if (fw_priv) {
815  fw_priv->buf = buf;
816 
817  /*
818  * bind with 'buf' now to avoid warning in failure path
819  * of requesting firmware.
820  */
821  firmware->priv = buf;
822  return fw_priv;
823  }
824 
825  /* share the cached buf, which is inprogessing or completed */
826  check_status:
827  mutex_lock(&fw_lock);
828  if (test_bit(FW_STATUS_ABORT, &buf->status)) {
829  fw_priv = ERR_PTR(-ENOENT);
830  firmware->priv = buf;
831  _request_firmware_cleanup(firmware_p);
832  goto exit;
833  } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
834  fw_priv = NULL;
835  fw_set_page_data(buf, firmware);
836  goto exit;
837  }
838  mutex_unlock(&fw_lock);
840  goto check_status;
841 
842 exit:
843  mutex_unlock(&fw_lock);
844  return fw_priv;
845 }
846 
847 static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
848  long timeout)
849 {
850  int retval = 0;
851  struct device *f_dev = &fw_priv->dev;
852  struct firmware_buf *buf = fw_priv->buf;
853  struct firmware_cache *fwc = &fw_cache;
854  int direct_load = 0;
855 
856  /* try direct loading from fs first */
857  if (fw_get_filesystem_firmware(buf)) {
858  dev_dbg(f_dev->parent, "firmware: direct-loading"
859  " firmware %s\n", buf->fw_id);
860 
861  set_bit(FW_STATUS_DONE, &buf->status);
862  complete_all(&buf->completion);
863  direct_load = 1;
864  goto handle_fw;
865  }
866 
867  /* fall back on userspace loading */
868  buf->fmt = PAGE_BUF;
869 
870  dev_set_uevent_suppress(f_dev, true);
871 
872  /* Need to pin this module until class device is destroyed */
873  __module_get(THIS_MODULE);
874 
875  retval = device_add(f_dev);
876  if (retval) {
877  dev_err(f_dev, "%s: device_register failed\n", __func__);
878  goto err_put_dev;
879  }
880 
881  retval = device_create_bin_file(f_dev, &firmware_attr_data);
882  if (retval) {
883  dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
884  goto err_del_dev;
885  }
886 
887  retval = device_create_file(f_dev, &dev_attr_loading);
888  if (retval) {
889  dev_err(f_dev, "%s: device_create_file failed\n", __func__);
890  goto err_del_bin_attr;
891  }
892 
893  if (uevent) {
894  dev_set_uevent_suppress(f_dev, false);
895  dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
896  if (timeout != MAX_SCHEDULE_TIMEOUT)
897  mod_timer(&fw_priv->timeout,
898  round_jiffies_up(jiffies + timeout));
899 
900  kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
901  }
902 
904 
905  del_timer_sync(&fw_priv->timeout);
906 
907 handle_fw:
908  mutex_lock(&fw_lock);
909  if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
910  retval = -ENOENT;
911 
912  /*
913  * add firmware name into devres list so that we can auto cache
914  * and uncache firmware for device.
915  *
916  * f_dev->parent may has been deleted already, but the problem
917  * should be fixed in devres or driver core.
918  */
919  if (!retval && f_dev->parent)
920  fw_add_devm_name(f_dev->parent, buf->fw_id);
921 
922  /*
923  * After caching firmware image is started, let it piggyback
924  * on request firmware.
925  */
926  if (!retval && fwc->state == FW_LOADER_START_CACHE) {
927  if (fw_cache_piggyback_on_request(buf->fw_id))
928  kref_get(&buf->ref);
929  }
930 
931  /* pass the pages buffer to driver at the last minute */
932  fw_set_page_data(buf, fw_priv->fw);
933 
934  fw_priv->buf = NULL;
935  mutex_unlock(&fw_lock);
936 
937  if (direct_load)
938  goto err_put_dev;
939 
940  device_remove_file(f_dev, &dev_attr_loading);
941 err_del_bin_attr:
942  device_remove_bin_file(f_dev, &firmware_attr_data);
943 err_del_dev:
944  device_del(f_dev);
945 err_put_dev:
946  put_device(f_dev);
947  return retval;
948 }
949 
967 int
968 request_firmware(const struct firmware **firmware_p, const char *name,
969  struct device *device)
970 {
971  struct firmware_priv *fw_priv;
972  int ret;
973 
974  fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
975  false);
976  if (IS_ERR_OR_NULL(fw_priv))
977  return PTR_RET(fw_priv);
978 
980  if (WARN_ON(ret)) {
981  dev_err(device, "firmware: %s will not be loaded\n", name);
982  } else {
983  ret = _request_firmware_load(fw_priv, true,
984  firmware_loading_timeout());
986  }
987  if (ret)
988  _request_firmware_cleanup(firmware_p);
989 
990  return ret;
991 }
992 
997 void release_firmware(const struct firmware *fw)
998 {
999  if (fw) {
1000  if (!fw_is_builtin_firmware(fw))
1001  firmware_free_data(fw);
1002  kfree(fw);
1003  }
1004 }
1005 
1006 /* Async support */
1009  struct module *module;
1010  const char *name;
1011  struct device *device;
1012  void *context;
1013  void (*cont)(const struct firmware *fw, void *context);
1014  bool uevent;
1015 };
1016 
1017 static void request_firmware_work_func(struct work_struct *work)
1018 {
1019  struct firmware_work *fw_work;
1020  const struct firmware *fw;
1021  struct firmware_priv *fw_priv;
1022  long timeout;
1023  int ret;
1024 
1025  fw_work = container_of(work, struct firmware_work, work);
1026  fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
1027  fw_work->uevent, true);
1028  if (IS_ERR_OR_NULL(fw_priv)) {
1029  ret = PTR_RET(fw_priv);
1030  goto out;
1031  }
1032 
1033  timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
1034  if (timeout) {
1035  ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
1037  } else {
1038  dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
1039  fw_work->name);
1040  ret = -EAGAIN;
1041  }
1042  if (ret)
1043  _request_firmware_cleanup(&fw);
1044 
1045  out:
1046  fw_work->cont(fw, fw_work->context);
1047  put_device(fw_work->device);
1048 
1049  module_put(fw_work->module);
1050  kfree(fw_work);
1051 }
1052 
1076 int
1078  struct module *module, bool uevent,
1079  const char *name, struct device *device, gfp_t gfp, void *context,
1080  void (*cont)(const struct firmware *fw, void *context))
1081 {
1082  struct firmware_work *fw_work;
1083 
1084  fw_work = kzalloc(sizeof (struct firmware_work), gfp);
1085  if (!fw_work)
1086  return -ENOMEM;
1087 
1088  fw_work->module = module;
1089  fw_work->name = name;
1090  fw_work->device = device;
1091  fw_work->context = context;
1092  fw_work->cont = cont;
1093  fw_work->uevent = uevent;
1094 
1095  if (!try_module_get(module)) {
1096  kfree(fw_work);
1097  return -EFAULT;
1098  }
1099 
1100  get_device(fw_work->device);
1101  INIT_WORK(&fw_work->work, request_firmware_work_func);
1102  schedule_work(&fw_work->work);
1103  return 0;
1104 }
1105 
1120 int cache_firmware(const char *fw_name)
1121 {
1122  int ret;
1123  const struct firmware *fw;
1124 
1125  pr_debug("%s: %s\n", __func__, fw_name);
1126 
1127  ret = request_firmware(&fw, fw_name, NULL);
1128  if (!ret)
1129  kfree(fw);
1130 
1131  pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1132 
1133  return ret;
1134 }
1135 
1147 int uncache_firmware(const char *fw_name)
1148 {
1149  struct firmware_buf *buf;
1150  struct firmware fw;
1151 
1152  pr_debug("%s: %s\n", __func__, fw_name);
1153 
1154  if (fw_get_builtin_firmware(&fw, fw_name))
1155  return 0;
1156 
1157  buf = fw_lookup_buf(fw_name);
1158  if (buf) {
1159  fw_free_buf(buf);
1160  return 0;
1161  }
1162 
1163  return -EINVAL;
1164 }
1165 
1166 #ifdef CONFIG_PM_SLEEP
1167 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1168 
1169 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1170 {
1171  struct fw_cache_entry *fce;
1172 
1173  fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
1174  if (!fce)
1175  goto exit;
1176 
1177  strcpy(fce->name, name);
1178 exit:
1179  return fce;
1180 }
1181 
1182 static int __fw_entry_found(const char *name)
1183 {
1184  struct firmware_cache *fwc = &fw_cache;
1185  struct fw_cache_entry *fce;
1186 
1187  list_for_each_entry(fce, &fwc->fw_names, list) {
1188  if (!strcmp(fce->name, name))
1189  return 1;
1190  }
1191  return 0;
1192 }
1193 
1194 static int fw_cache_piggyback_on_request(const char *name)
1195 {
1196  struct firmware_cache *fwc = &fw_cache;
1197  struct fw_cache_entry *fce;
1198  int ret = 0;
1199 
1200  spin_lock(&fwc->name_lock);
1201  if (__fw_entry_found(name))
1202  goto found;
1203 
1204  fce = alloc_fw_cache_entry(name);
1205  if (fce) {
1206  ret = 1;
1207  list_add(&fce->list, &fwc->fw_names);
1208  pr_debug("%s: fw: %s\n", __func__, name);
1209  }
1210 found:
1211  spin_unlock(&fwc->name_lock);
1212  return ret;
1213 }
1214 
1215 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1216 {
1217  kfree(fce);
1218 }
1219 
1220 static void __async_dev_cache_fw_image(void *fw_entry,
1222 {
1223  struct fw_cache_entry *fce = fw_entry;
1224  struct firmware_cache *fwc = &fw_cache;
1225  int ret;
1226 
1227  ret = cache_firmware(fce->name);
1228  if (ret) {
1229  spin_lock(&fwc->name_lock);
1230  list_del(&fce->list);
1231  spin_unlock(&fwc->name_lock);
1232 
1233  free_fw_cache_entry(fce);
1234  }
1235 }
1236 
1237 /* called with dev->devres_lock held */
1238 static void dev_create_fw_entry(struct device *dev, void *res,
1239  void *data)
1240 {
1241  struct fw_name_devm *fwn = res;
1242  const char *fw_name = fwn->name;
1243  struct list_head *head = data;
1244  struct fw_cache_entry *fce;
1245 
1246  fce = alloc_fw_cache_entry(fw_name);
1247  if (fce)
1248  list_add(&fce->list, head);
1249 }
1250 
1251 static int devm_name_match(struct device *dev, void *res,
1252  void *match_data)
1253 {
1254  struct fw_name_devm *fwn = res;
1255  return (fwn->magic == (unsigned long)match_data);
1256 }
1257 
1258 static void dev_cache_fw_image(struct device *dev, void *data)
1259 {
1260  LIST_HEAD(todo);
1261  struct fw_cache_entry *fce;
1262  struct fw_cache_entry *fce_next;
1263  struct firmware_cache *fwc = &fw_cache;
1264 
1265  devres_for_each_res(dev, fw_name_devm_release,
1266  devm_name_match, &fw_cache,
1267  dev_create_fw_entry, &todo);
1268 
1269  list_for_each_entry_safe(fce, fce_next, &todo, list) {
1270  list_del(&fce->list);
1271 
1272  spin_lock(&fwc->name_lock);
1273  /* only one cache entry for one firmware */
1274  if (!__fw_entry_found(fce->name)) {
1275  list_add(&fce->list, &fwc->fw_names);
1276  } else {
1277  free_fw_cache_entry(fce);
1278  fce = NULL;
1279  }
1280  spin_unlock(&fwc->name_lock);
1281 
1282  if (fce)
1283  async_schedule_domain(__async_dev_cache_fw_image,
1284  (void *)fce,
1285  &fw_cache_domain);
1286  }
1287 }
1288 
1289 static void __device_uncache_fw_images(void)
1290 {
1291  struct firmware_cache *fwc = &fw_cache;
1292  struct fw_cache_entry *fce;
1293 
1294  spin_lock(&fwc->name_lock);
1295  while (!list_empty(&fwc->fw_names)) {
1296  fce = list_entry(fwc->fw_names.next,
1297  struct fw_cache_entry, list);
1298  list_del(&fce->list);
1299  spin_unlock(&fwc->name_lock);
1300 
1301  uncache_firmware(fce->name);
1302  free_fw_cache_entry(fce);
1303 
1304  spin_lock(&fwc->name_lock);
1305  }
1306  spin_unlock(&fwc->name_lock);
1307 }
1308 
1319 static void device_cache_fw_images(void)
1320 {
1321  struct firmware_cache *fwc = &fw_cache;
1322  int old_timeout;
1323  DEFINE_WAIT(wait);
1324 
1325  pr_debug("%s\n", __func__);
1326 
1327  /* cancel uncache work */
1328  cancel_delayed_work_sync(&fwc->work);
1329 
1330  /*
1331  * use small loading timeout for caching devices' firmware
1332  * because all these firmware images have been loaded
1333  * successfully at lease once, also system is ready for
1334  * completing firmware loading now. The maximum size of
1335  * firmware in current distributions is about 2M bytes,
1336  * so 10 secs should be enough.
1337  */
1338  old_timeout = loading_timeout;
1339  loading_timeout = 10;
1340 
1341  mutex_lock(&fw_lock);
1343  dpm_for_each_dev(NULL, dev_cache_fw_image);
1344  mutex_unlock(&fw_lock);
1345 
1346  /* wait for completion of caching firmware for all devices */
1347  async_synchronize_full_domain(&fw_cache_domain);
1348 
1349  loading_timeout = old_timeout;
1350 }
1351 
1358 static void device_uncache_fw_images(void)
1359 {
1360  pr_debug("%s\n", __func__);
1361  __device_uncache_fw_images();
1362 }
1363 
1364 static void device_uncache_fw_images_work(struct work_struct *work)
1365 {
1366  device_uncache_fw_images();
1367 }
1368 
1376 static void device_uncache_fw_images_delay(unsigned long delay)
1377 {
1378  schedule_delayed_work(&fw_cache.work,
1379  msecs_to_jiffies(delay));
1380 }
1381 
1382 static int fw_pm_notify(struct notifier_block *notify_block,
1383  unsigned long mode, void *unused)
1384 {
1385  switch (mode) {
1387  case PM_SUSPEND_PREPARE:
1388  device_cache_fw_images();
1389  break;
1390 
1391  case PM_POST_SUSPEND:
1392  case PM_POST_HIBERNATION:
1393  case PM_POST_RESTORE:
1394  /*
1395  * In case that system sleep failed and syscore_suspend is
1396  * not called.
1397  */
1398  mutex_lock(&fw_lock);
1399  fw_cache.state = FW_LOADER_NO_CACHE;
1400  mutex_unlock(&fw_lock);
1401 
1402  device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1403  break;
1404  }
1405 
1406  return 0;
1407 }
1408 
1409 /* stop caching firmware once syscore_suspend is reached */
1410 static int fw_suspend(void)
1411 {
1412  fw_cache.state = FW_LOADER_NO_CACHE;
1413  return 0;
1414 }
1415 
1416 static struct syscore_ops fw_syscore_ops = {
1417  .suspend = fw_suspend,
1418 };
1419 #else
1420 static int fw_cache_piggyback_on_request(const char *name)
1421 {
1422  return 0;
1423 }
1424 #endif
1425 
1426 static void __init fw_cache_init(void)
1427 {
1428  spin_lock_init(&fw_cache.lock);
1429  INIT_LIST_HEAD(&fw_cache.head);
1430  fw_cache.state = FW_LOADER_NO_CACHE;
1431 
1432 #ifdef CONFIG_PM_SLEEP
1433  spin_lock_init(&fw_cache.name_lock);
1434  INIT_LIST_HEAD(&fw_cache.fw_names);
1435 
1436  INIT_DELAYED_WORK(&fw_cache.work,
1437  device_uncache_fw_images_work);
1438 
1439  fw_cache.pm_notify.notifier_call = fw_pm_notify;
1440  register_pm_notifier(&fw_cache.pm_notify);
1441 
1442  register_syscore_ops(&fw_syscore_ops);
1443 #endif
1444 }
1445 
1446 static int __init firmware_class_init(void)
1447 {
1448  fw_cache_init();
1449  return class_register(&firmware_class);
1450 }
1451 
1452 static void __exit firmware_class_exit(void)
1453 {
1454 #ifdef CONFIG_PM_SLEEP
1455  unregister_syscore_ops(&fw_syscore_ops);
1456  unregister_pm_notifier(&fw_cache.pm_notify);
1457 #endif
1458  class_unregister(&firmware_class);
1459 }
1460 
1461 fs_initcall(firmware_class_init);
1462 module_exit(firmware_class_exit);
1463