Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
resource.c
Go to the documentation of this file.
1 /*
2  * linux/kernel/resource.c
3  *
4  * Copyright (C) 1999 Linus Torvalds
5  * Copyright (C) 1999 Martin Mares <[email protected]>
6  *
7  * Arbitrary resource management.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/device.h>
23 #include <linux/pfn.h>
24 #include <asm/io.h>
25 
26 
28  .name = "PCI IO",
29  .start = 0,
30  .end = IO_SPACE_LIMIT,
31  .flags = IORESOURCE_IO,
32 };
33 EXPORT_SYMBOL(ioport_resource);
34 
36  .name = "PCI mem",
37  .start = 0,
38  .end = -1,
39  .flags = IORESOURCE_MEM,
40 };
41 EXPORT_SYMBOL(iomem_resource);
42 
43 /* constraints to be met while allocating resources */
46  resource_size_t (*alignf)(void *, const struct resource *,
48  void *alignf_data;
49 };
50 
51 static DEFINE_RWLOCK(resource_lock);
52 
53 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
54 {
55  struct resource *p = v;
56  (*pos)++;
57  if (p->child)
58  return p->child;
59  while (!p->sibling && p->parent)
60  p = p->parent;
61  return p->sibling;
62 }
63 
64 #ifdef CONFIG_PROC_FS
65 
66 enum { MAX_IORES_LEVEL = 5 };
67 
68 static void *r_start(struct seq_file *m, loff_t *pos)
69  __acquires(resource_lock)
70 {
71  struct resource *p = m->private;
72  loff_t l = 0;
73  read_lock(&resource_lock);
74  for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
75  ;
76  return p;
77 }
78 
79 static void r_stop(struct seq_file *m, void *v)
80  __releases(resource_lock)
81 {
82  read_unlock(&resource_lock);
83 }
84 
85 static int r_show(struct seq_file *m, void *v)
86 {
87  struct resource *root = m->private;
88  struct resource *r = v, *p;
89  int width = root->end < 0x10000 ? 4 : 8;
90  int depth;
91 
92  for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
93  if (p->parent == root)
94  break;
95  seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
96  depth * 2, "",
97  width, (unsigned long long) r->start,
98  width, (unsigned long long) r->end,
99  r->name ? r->name : "<BAD>");
100  return 0;
101 }
102 
103 static const struct seq_operations resource_op = {
104  .start = r_start,
105  .next = r_next,
106  .stop = r_stop,
107  .show = r_show,
108 };
109 
110 static int ioports_open(struct inode *inode, struct file *file)
111 {
112  int res = seq_open(file, &resource_op);
113  if (!res) {
114  struct seq_file *m = file->private_data;
115  m->private = &ioport_resource;
116  }
117  return res;
118 }
119 
120 static int iomem_open(struct inode *inode, struct file *file)
121 {
122  int res = seq_open(file, &resource_op);
123  if (!res) {
124  struct seq_file *m = file->private_data;
125  m->private = &iomem_resource;
126  }
127  return res;
128 }
129 
130 static const struct file_operations proc_ioports_operations = {
131  .open = ioports_open,
132  .read = seq_read,
133  .llseek = seq_lseek,
134  .release = seq_release,
135 };
136 
137 static const struct file_operations proc_iomem_operations = {
138  .open = iomem_open,
139  .read = seq_read,
140  .llseek = seq_lseek,
141  .release = seq_release,
142 };
143 
144 static int __init ioresources_init(void)
145 {
146  proc_create("ioports", 0, NULL, &proc_ioports_operations);
147  proc_create("iomem", 0, NULL, &proc_iomem_operations);
148  return 0;
149 }
150 __initcall(ioresources_init);
151 
152 #endif /* CONFIG_PROC_FS */
153 
154 /* Return the conflict entry if you can't request it */
155 static struct resource * __request_resource(struct resource *root, struct resource *new)
156 {
157  resource_size_t start = new->start;
158  resource_size_t end = new->end;
159  struct resource *tmp, **p;
160 
161  if (end < start)
162  return root;
163  if (start < root->start)
164  return root;
165  if (end > root->end)
166  return root;
167  p = &root->child;
168  for (;;) {
169  tmp = *p;
170  if (!tmp || tmp->start > end) {
171  new->sibling = tmp;
172  *p = new;
173  new->parent = root;
174  return NULL;
175  }
176  p = &tmp->sibling;
177  if (tmp->end < start)
178  continue;
179  return tmp;
180  }
181 }
182 
183 static int __release_resource(struct resource *old)
184 {
185  struct resource *tmp, **p;
186 
187  p = &old->parent->child;
188  for (;;) {
189  tmp = *p;
190  if (!tmp)
191  break;
192  if (tmp == old) {
193  *p = tmp->sibling;
194  old->parent = NULL;
195  return 0;
196  }
197  p = &tmp->sibling;
198  }
199  return -EINVAL;
200 }
201 
202 static void __release_child_resources(struct resource *r)
203 {
204  struct resource *tmp, *p;
206 
207  p = r->child;
208  r->child = NULL;
209  while (p) {
210  tmp = p;
211  p = p->sibling;
212 
213  tmp->parent = NULL;
214  tmp->sibling = NULL;
215  __release_child_resources(tmp);
216 
217  printk(KERN_DEBUG "release child resource %pR\n", tmp);
218  /* need to restore size, and keep flags */
219  size = resource_size(tmp);
220  tmp->start = 0;
221  tmp->end = size - 1;
222  }
223 }
224 
226 {
227  write_lock(&resource_lock);
228  __release_child_resources(r);
229  write_unlock(&resource_lock);
230 }
231 
239 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
240 {
241  struct resource *conflict;
242 
243  write_lock(&resource_lock);
244  conflict = __request_resource(root, new);
245  write_unlock(&resource_lock);
246  return conflict;
247 }
248 
256 int request_resource(struct resource *root, struct resource *new)
257 {
258  struct resource *conflict;
259 
260  conflict = request_resource_conflict(root, new);
261  return conflict ? -EBUSY : 0;
262 }
263 
265 
270 int release_resource(struct resource *old)
271 {
272  int retval;
273 
274  write_lock(&resource_lock);
275  retval = __release_resource(old);
276  write_unlock(&resource_lock);
277  return retval;
278 }
279 
281 
282 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
283 /*
284  * Finds the lowest memory reosurce exists within [res->start.res->end)
285  * the caller must specify res->start, res->end, res->flags and "name".
286  * If found, returns 0, res is overwritten, if not found, returns -1.
287  */
288 static int find_next_system_ram(struct resource *res, char *name)
289 {
291  struct resource *p;
292 
293  BUG_ON(!res);
294 
295  start = res->start;
296  end = res->end;
297  BUG_ON(start >= end);
298 
299  read_lock(&resource_lock);
300  for (p = iomem_resource.child; p ; p = p->sibling) {
301  /* system ram is just marked as IORESOURCE_MEM */
302  if (p->flags != res->flags)
303  continue;
304  if (name && strcmp(p->name, name))
305  continue;
306  if (p->start > end) {
307  p = NULL;
308  break;
309  }
310  if ((p->end >= start) && (p->start < end))
311  break;
312  }
313  read_unlock(&resource_lock);
314  if (!p)
315  return -1;
316  /* copy data */
317  if (res->start < p->start)
318  res->start = p->start;
319  if (res->end > p->end)
320  res->end = p->end;
321  return 0;
322 }
323 
324 /*
325  * This function calls callback against all memory range of "System RAM"
326  * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
327  * Now, this function is only for "System RAM".
328  */
329 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
330  void *arg, int (*func)(unsigned long, unsigned long, void *))
331 {
332  struct resource res;
333  unsigned long pfn, end_pfn;
334  u64 orig_end;
335  int ret = -1;
336 
337  res.start = (u64) start_pfn << PAGE_SHIFT;
338  res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
340  orig_end = res.end;
341  while ((res.start < res.end) &&
342  (find_next_system_ram(&res, "System RAM") >= 0)) {
343  pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
344  end_pfn = (res.end + 1) >> PAGE_SHIFT;
345  if (end_pfn > pfn)
346  ret = (*func)(pfn, end_pfn - pfn, arg);
347  if (ret)
348  break;
349  res.start = res.end + 1;
350  res.end = orig_end;
351  }
352  return ret;
353 }
354 
355 #endif
356 
357 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
358 {
359  return 1;
360 }
361 /*
362  * This generic page_is_ram() returns true if specified address is
363  * registered as "System RAM" in iomem_resource list.
364  */
365 int __weak page_is_ram(unsigned long pfn)
366 {
367  return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
368 }
369 
371 {
372 }
373 
374 static resource_size_t simple_align_resource(void *data,
375  const struct resource *avail,
376  resource_size_t size,
378 {
379  return avail->start;
380 }
381 
382 static void resource_clip(struct resource *res, resource_size_t min,
384 {
385  if (res->start < min)
386  res->start = min;
387  if (res->end > max)
388  res->end = max;
389 }
390 
391 static bool resource_contains(struct resource *res1, struct resource *res2)
392 {
393  return res1->start <= res2->start && res1->end >= res2->end;
394 }
395 
396 /*
397  * Find empty slot in the resource tree with the given range and
398  * alignment constraints
399  */
400 static int __find_resource(struct resource *root, struct resource *old,
401  struct resource *new,
402  resource_size_t size,
403  struct resource_constraint *constraint)
404 {
405  struct resource *this = root->child;
406  struct resource tmp = *new, avail, alloc;
407 
408  tmp.flags = new->flags;
409  tmp.start = root->start;
410  /*
411  * Skip past an allocated resource that starts at 0, since the assignment
412  * of this->start - 1 to tmp->end below would cause an underflow.
413  */
414  if (this && this->start == root->start) {
415  tmp.start = (this == old) ? old->start : this->end + 1;
416  this = this->sibling;
417  }
418  for(;;) {
419  if (this)
420  tmp.end = (this == old) ? this->end : this->start - 1;
421  else
422  tmp.end = root->end;
423 
424  if (tmp.end < tmp.start)
425  goto next;
426 
427  resource_clip(&tmp, constraint->min, constraint->max);
429 
430  /* Check for overflow after ALIGN() */
431  avail = *new;
432  avail.start = ALIGN(tmp.start, constraint->align);
433  avail.end = tmp.end;
434  if (avail.start >= tmp.start) {
435  alloc.start = constraint->alignf(constraint->alignf_data, &avail,
436  size, constraint->align);
437  alloc.end = alloc.start + size - 1;
438  if (resource_contains(&avail, &alloc)) {
439  new->start = alloc.start;
440  new->end = alloc.end;
441  return 0;
442  }
443  }
444 
445 next: if (!this || this->end == root->end)
446  break;
447 
448  if (this != old)
449  tmp.start = this->end + 1;
450  this = this->sibling;
451  }
452  return -EBUSY;
453 }
454 
455 /*
456  * Find empty slot in the resource tree given range and alignment.
457  */
458 static int find_resource(struct resource *root, struct resource *new,
459  resource_size_t size,
460  struct resource_constraint *constraint)
461 {
462  return __find_resource(root, NULL, new, size, constraint);
463 }
464 
475 int reallocate_resource(struct resource *root, struct resource *old,
476  resource_size_t newsize,
477  struct resource_constraint *constraint)
478 {
479  int err=0;
480  struct resource new = *old;
481  struct resource *conflict;
482 
483  write_lock(&resource_lock);
484 
485  if ((err = __find_resource(root, old, &new, newsize, constraint)))
486  goto out;
487 
488  if (resource_contains(&new, old)) {
489  old->start = new.start;
490  old->end = new.end;
491  goto out;
492  }
493 
494  if (old->child) {
495  err = -EBUSY;
496  goto out;
497  }
498 
499  if (resource_contains(old, &new)) {
500  old->start = new.start;
501  old->end = new.end;
502  } else {
503  __release_resource(old);
504  *old = new;
505  conflict = __request_resource(root, old);
506  BUG_ON(conflict);
507  }
508 out:
509  write_unlock(&resource_lock);
510  return err;
511 }
512 
513 
526 int allocate_resource(struct resource *root, struct resource *new,
529  resource_size_t (*alignf)(void *,
530  const struct resource *,
533  void *alignf_data)
534 {
535  int err;
536  struct resource_constraint constraint;
537 
538  if (!alignf)
539  alignf = simple_align_resource;
540 
541  constraint.min = min;
542  constraint.max = max;
543  constraint.align = align;
544  constraint.alignf = alignf;
545  constraint.alignf_data = alignf_data;
546 
547  if ( new->parent ) {
548  /* resource is already allocated, try reallocating with
549  the new constraints */
550  return reallocate_resource(root, new, size, &constraint);
551  }
552 
553  write_lock(&resource_lock);
554  err = find_resource(root, new, size, &constraint);
555  if (err >= 0 && __request_resource(root, new))
556  err = -EBUSY;
557  write_unlock(&resource_lock);
558  return err;
559 }
560 
562 
570 struct resource *lookup_resource(struct resource *root, resource_size_t start)
571 {
572  struct resource *res;
573 
574  read_lock(&resource_lock);
575  for (res = root->child; res; res = res->sibling) {
576  if (res->start == start)
577  break;
578  }
579  read_unlock(&resource_lock);
580 
581  return res;
582 }
583 
584 /*
585  * Insert a resource into the resource tree. If successful, return NULL,
586  * otherwise return the conflicting resource (compare to __request_resource())
587  */
588 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
589 {
590  struct resource *first, *next;
591 
592  for (;; parent = first) {
593  first = __request_resource(parent, new);
594  if (!first)
595  return first;
596 
597  if (first == parent)
598  return first;
599  if (WARN_ON(first == new)) /* duplicated insertion */
600  return first;
601 
602  if ((first->start > new->start) || (first->end < new->end))
603  break;
604  if ((first->start == new->start) && (first->end == new->end))
605  break;
606  }
607 
608  for (next = first; ; next = next->sibling) {
609  /* Partial overlap? Bad, and unfixable */
610  if (next->start < new->start || next->end > new->end)
611  return next;
612  if (!next->sibling)
613  break;
614  if (next->sibling->start > new->end)
615  break;
616  }
617 
618  new->parent = parent;
619  new->sibling = next->sibling;
620  new->child = first;
621 
622  next->sibling = NULL;
623  for (next = first; next; next = next->sibling)
624  next->parent = new;
625 
626  if (parent->child == first) {
627  parent->child = new;
628  } else {
629  next = parent->child;
630  while (next->sibling != first)
631  next = next->sibling;
632  next->sibling = new;
633  }
634  return NULL;
635 }
636 
650 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
651 {
652  struct resource *conflict;
653 
654  write_lock(&resource_lock);
655  conflict = __insert_resource(parent, new);
656  write_unlock(&resource_lock);
657  return conflict;
658 }
659 
667 int insert_resource(struct resource *parent, struct resource *new)
668 {
669  struct resource *conflict;
670 
671  conflict = insert_resource_conflict(parent, new);
672  return conflict ? -EBUSY : 0;
673 }
674 
683 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
684 {
685  if (new->parent)
686  return;
687 
688  write_lock(&resource_lock);
689  for (;;) {
690  struct resource *conflict;
691 
692  conflict = __insert_resource(root, new);
693  if (!conflict)
694  break;
695  if (conflict == root)
696  break;
697 
698  /* Ok, expand resource to cover the conflict, then try again .. */
699  if (conflict->start < new->start)
700  new->start = conflict->start;
701  if (conflict->end > new->end)
702  new->end = conflict->end;
703 
704  printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
705  }
706  write_unlock(&resource_lock);
707 }
708 
720 {
721  struct resource *tmp, *parent = res->parent;
722  resource_size_t end = start + size - 1;
723  int result = -EBUSY;
724 
725  write_lock(&resource_lock);
726 
727  if (!parent)
728  goto skip;
729 
730  if ((start < parent->start) || (end > parent->end))
731  goto out;
732 
733  if (res->sibling && (res->sibling->start <= end))
734  goto out;
735 
736  tmp = parent->child;
737  if (tmp != res) {
738  while (tmp->sibling != res)
739  tmp = tmp->sibling;
740  if (start <= tmp->end)
741  goto out;
742  }
743 
744 skip:
745  for (tmp = res->child; tmp; tmp = tmp->sibling)
746  if ((tmp->start < start) || (tmp->end > end))
747  goto out;
748 
749  res->start = start;
750  res->end = end;
751  result = 0;
752 
753  out:
754  write_unlock(&resource_lock);
755  return result;
756 }
758 
759 static void __init __reserve_region_with_split(struct resource *root,
760  resource_size_t start, resource_size_t end,
761  const char *name)
762 {
763  struct resource *parent = root;
764  struct resource *conflict;
765  struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
766  struct resource *next_res = NULL;
767 
768  if (!res)
769  return;
770 
771  res->name = name;
772  res->start = start;
773  res->end = end;
774  res->flags = IORESOURCE_BUSY;
775 
776  while (1) {
777 
778  conflict = __request_resource(parent, res);
779  if (!conflict) {
780  if (!next_res)
781  break;
782  res = next_res;
783  next_res = NULL;
784  continue;
785  }
786 
787  /* conflict covered whole area */
788  if (conflict->start <= res->start &&
789  conflict->end >= res->end) {
790  kfree(res);
791  WARN_ON(next_res);
792  break;
793  }
794 
795  /* failed, split and try again */
796  if (conflict->start > res->start) {
797  end = res->end;
798  res->end = conflict->start - 1;
799  if (conflict->end < end) {
800  next_res = kzalloc(sizeof(*next_res),
801  GFP_ATOMIC);
802  if (!next_res) {
803  kfree(res);
804  break;
805  }
806  next_res->name = name;
807  next_res->start = conflict->end + 1;
808  next_res->end = end;
809  next_res->flags = IORESOURCE_BUSY;
810  }
811  } else {
812  res->start = conflict->end + 1;
813  }
814  }
815 
816 }
817 
819  resource_size_t start, resource_size_t end,
820  const char *name)
821 {
822  int abort = 0;
823 
824  write_lock(&resource_lock);
825  if (root->start > start || root->end < end) {
826  pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
827  (unsigned long long)start, (unsigned long long)end,
828  root);
829  if (start > root->end || end < root->start)
830  abort = 1;
831  else {
832  if (end > root->end)
833  end = root->end;
834  if (start < root->start)
835  start = root->start;
836  pr_err("fixing request to [0x%llx-0x%llx]\n",
837  (unsigned long long)start,
838  (unsigned long long)end);
839  }
840  dump_stack();
841  }
842  if (!abort)
843  __reserve_region_with_split(root, start, end, name);
844  write_unlock(&resource_lock);
845 }
846 
854 {
855  switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
857  return resource_size(res);
859  return res->start;
860  default:
861  return 0;
862  }
863 }
864 
865 /*
866  * This is compatibility stuff for IO resources.
867  *
868  * Note how this, unlike the above, knows about
869  * the IO flag meanings (busy etc).
870  *
871  * request_region creates a new busy region.
872  *
873  * check_region returns non-zero if the area is already busy.
874  *
875  * release_region releases a matching busy region.
876  */
877 
878 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
879 
888 struct resource * __request_region(struct resource *parent,
890  const char *name, int flags)
891 {
893  struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
894 
895  if (!res)
896  return NULL;
897 
898  res->name = name;
899  res->start = start;
900  res->end = start + n - 1;
901  res->flags = IORESOURCE_BUSY;
902  res->flags |= flags;
903 
904  write_lock(&resource_lock);
905 
906  for (;;) {
907  struct resource *conflict;
908 
909  conflict = __request_resource(parent, res);
910  if (!conflict)
911  break;
912  if (conflict != parent) {
913  parent = conflict;
914  if (!(conflict->flags & IORESOURCE_BUSY))
915  continue;
916  }
917  if (conflict->flags & flags & IORESOURCE_MUXED) {
918  add_wait_queue(&muxed_resource_wait, &wait);
919  write_unlock(&resource_lock);
921  schedule();
922  remove_wait_queue(&muxed_resource_wait, &wait);
923  write_lock(&resource_lock);
924  continue;
925  }
926  /* Uhhuh, that didn't work out.. */
927  kfree(res);
928  res = NULL;
929  break;
930  }
931  write_unlock(&resource_lock);
932  return res;
933 }
935 
951 int __check_region(struct resource *parent, resource_size_t start,
953 {
954  struct resource * res;
955 
956  res = __request_region(parent, start, n, "check-region", 0);
957  if (!res)
958  return -EBUSY;
959 
960  release_resource(res);
961  kfree(res);
962  return 0;
963 }
965 
974 void __release_region(struct resource *parent, resource_size_t start,
976 {
977  struct resource **p;
979 
980  p = &parent->child;
981  end = start + n - 1;
982 
983  write_lock(&resource_lock);
984 
985  for (;;) {
986  struct resource *res = *p;
987 
988  if (!res)
989  break;
990  if (res->start <= start && res->end >= end) {
991  if (!(res->flags & IORESOURCE_BUSY)) {
992  p = &res->child;
993  continue;
994  }
995  if (res->start != start || res->end != end)
996  break;
997  *p = res->sibling;
998  write_unlock(&resource_lock);
999  if (res->flags & IORESOURCE_MUXED)
1000  wake_up(&muxed_resource_wait);
1001  kfree(res);
1002  return;
1003  }
1004  p = &res->sibling;
1005  }
1006 
1007  write_unlock(&resource_lock);
1008 
1009  printk(KERN_WARNING "Trying to free nonexistent resource "
1010  "<%016llx-%016llx>\n", (unsigned long long)start,
1011  (unsigned long long)end);
1012 }
1014 
1015 /*
1016  * Managed region resource
1017  */
1019  struct resource *parent;
1022 };
1023 
1024 static void devm_region_release(struct device *dev, void *res)
1025 {
1026  struct region_devres *this = res;
1027 
1028  __release_region(this->parent, this->start, this->n);
1029 }
1030 
1031 static int devm_region_match(struct device *dev, void *res, void *match_data)
1032 {
1033  struct region_devres *this = res, *match = match_data;
1034 
1035  return this->parent == match->parent &&
1036  this->start == match->start && this->n == match->n;
1037 }
1038 
1040  struct resource *parent, resource_size_t start,
1041  resource_size_t n, const char *name)
1042 {
1043  struct region_devres *dr = NULL;
1044  struct resource *res;
1045 
1046  dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1047  GFP_KERNEL);
1048  if (!dr)
1049  return NULL;
1050 
1051  dr->parent = parent;
1052  dr->start = start;
1053  dr->n = n;
1054 
1055  res = __request_region(parent, start, n, name, 0);
1056  if (res)
1057  devres_add(dev, dr);
1058  else
1059  devres_free(dr);
1060 
1061  return res;
1062 }
1064 
1065 void __devm_release_region(struct device *dev, struct resource *parent,
1067 {
1068  struct region_devres match_data = { parent, start, n };
1069 
1070  __release_region(parent, start, n);
1071  WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1072  &match_data));
1073 }
1075 
1076 /*
1077  * Called from init/main.c to reserve IO ports.
1078  */
1079 #define MAXRESERVE 4
1080 static int __init reserve_setup(char *str)
1081 {
1082  static int reserved;
1083  static struct resource reserve[MAXRESERVE];
1084 
1085  for (;;) {
1086  unsigned int io_start, io_num;
1087  int x = reserved;
1088 
1089  if (get_option (&str, &io_start) != 2)
1090  break;
1091  if (get_option (&str, &io_num) == 0)
1092  break;
1093  if (x < MAXRESERVE) {
1094  struct resource *res = reserve + x;
1095  res->name = "reserved";
1096  res->start = io_start;
1097  res->end = io_start + io_num - 1;
1098  res->flags = IORESOURCE_BUSY;
1099  res->child = NULL;
1100  if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
1101  reserved = x+1;
1102  }
1103  }
1104  return 1;
1105 }
1106 
1107 __setup("reserve=", reserve_setup);
1108 
1109 /*
1110  * Check if the requested addr and size spans more than any slot in the
1111  * iomem resource tree.
1112  */
1114 {
1115  struct resource *p = &iomem_resource;
1116  int err = 0;
1117  loff_t l;
1118 
1119  read_lock(&resource_lock);
1120  for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1121  /*
1122  * We can probably skip the resources without
1123  * IORESOURCE_IO attribute?
1124  */
1125  if (p->start >= addr + size)
1126  continue;
1127  if (p->end < addr)
1128  continue;
1129  if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1130  PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1131  continue;
1132  /*
1133  * if a resource is "BUSY", it's not a hardware resource
1134  * but a driver mapping of such a resource; we don't want
1135  * to warn for those; some drivers legitimately map only
1136  * partial hardware resources. (example: vesafb)
1137  */
1138  if (p->flags & IORESOURCE_BUSY)
1139  continue;
1140 
1141  printk(KERN_WARNING "resource map sanity check conflict: "
1142  "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
1143  (unsigned long long)addr,
1144  (unsigned long long)(addr + size - 1),
1145  (unsigned long long)p->start,
1146  (unsigned long long)p->end,
1147  p->name);
1148  err = -1;
1149  break;
1150  }
1151  read_unlock(&resource_lock);
1152 
1153  return err;
1154 }
1155 
1156 #ifdef CONFIG_STRICT_DEVMEM
1157 static int strict_iomem_checks = 1;
1158 #else
1159 static int strict_iomem_checks;
1160 #endif
1161 
1162 /*
1163  * check if an address is reserved in the iomem resource tree
1164  * returns 1 if reserved, 0 if not reserved.
1165  */
1167 {
1168  struct resource *p = &iomem_resource;
1169  int err = 0;
1170  loff_t l;
1171  int size = PAGE_SIZE;
1172 
1173  if (!strict_iomem_checks)
1174  return 0;
1175 
1176  addr = addr & PAGE_MASK;
1177 
1178  read_lock(&resource_lock);
1179  for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1180  /*
1181  * We can probably skip the resources without
1182  * IORESOURCE_IO attribute?
1183  */
1184  if (p->start >= addr + size)
1185  break;
1186  if (p->end < addr)
1187  continue;
1188  if (p->flags & IORESOURCE_BUSY &&
1189  p->flags & IORESOURCE_EXCLUSIVE) {
1190  err = 1;
1191  break;
1192  }
1193  }
1194  read_unlock(&resource_lock);
1195 
1196  return err;
1197 }
1198 
1199 static int __init strict_iomem(char *str)
1200 {
1201  if (strstr(str, "relaxed"))
1202  strict_iomem_checks = 0;
1203  if (strstr(str, "strict"))
1204  strict_iomem_checks = 1;
1205  return 1;
1206 }
1207 
1208 __setup("iomem=", strict_iomem);