Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pagewalk.c
Go to the documentation of this file.
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
5 
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
7  struct mm_walk *walk)
8 {
9  pte_t *pte;
10  int err = 0;
11 
12  pte = pte_offset_map(pmd, addr);
13  for (;;) {
14  err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15  if (err)
16  break;
17  addr += PAGE_SIZE;
18  if (addr == end)
19  break;
20  pte++;
21  }
22 
23  pte_unmap(pte);
24  return err;
25 }
26 
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
28  struct mm_walk *walk)
29 {
30  pmd_t *pmd;
31  unsigned long next;
32  int err = 0;
33 
34  pmd = pmd_offset(pud, addr);
35  do {
36 again:
37  next = pmd_addr_end(addr, end);
38  if (pmd_none(*pmd)) {
39  if (walk->pte_hole)
40  err = walk->pte_hole(addr, next, walk);
41  if (err)
42  break;
43  continue;
44  }
45  /*
46  * This implies that each ->pmd_entry() handler
47  * needs to know about pmd_trans_huge() pmds
48  */
49  if (walk->pmd_entry)
50  err = walk->pmd_entry(pmd, addr, next, walk);
51  if (err)
52  break;
53 
54  /*
55  * Check this here so we only break down trans_huge
56  * pages when we _need_ to
57  */
58  if (!walk->pte_entry)
59  continue;
60 
61  split_huge_page_pmd(walk->mm, pmd);
62  if (pmd_none_or_trans_huge_or_clear_bad(pmd))
63  goto again;
64  err = walk_pte_range(pmd, addr, next, walk);
65  if (err)
66  break;
67  } while (pmd++, addr = next, addr != end);
68 
69  return err;
70 }
71 
72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
73  struct mm_walk *walk)
74 {
75  pud_t *pud;
76  unsigned long next;
77  int err = 0;
78 
79  pud = pud_offset(pgd, addr);
80  do {
81  next = pud_addr_end(addr, end);
82  if (pud_none_or_clear_bad(pud)) {
83  if (walk->pte_hole)
84  err = walk->pte_hole(addr, next, walk);
85  if (err)
86  break;
87  continue;
88  }
89  if (walk->pud_entry)
90  err = walk->pud_entry(pud, addr, next, walk);
91  if (!err && (walk->pmd_entry || walk->pte_entry))
92  err = walk_pmd_range(pud, addr, next, walk);
93  if (err)
94  break;
95  } while (pud++, addr = next, addr != end);
96 
97  return err;
98 }
99 
100 #ifdef CONFIG_HUGETLB_PAGE
101 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
102  unsigned long end)
103 {
104  unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
105  return boundary < end ? boundary : end;
106 }
107 
108 static int walk_hugetlb_range(struct vm_area_struct *vma,
109  unsigned long addr, unsigned long end,
110  struct mm_walk *walk)
111 {
112  struct hstate *h = hstate_vma(vma);
113  unsigned long next;
114  unsigned long hmask = huge_page_mask(h);
115  pte_t *pte;
116  int err = 0;
117 
118  do {
119  next = hugetlb_entry_end(h, addr, end);
120  pte = huge_pte_offset(walk->mm, addr & hmask);
121  if (pte && walk->hugetlb_entry)
122  err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
123  if (err)
124  return err;
125  } while (addr = next, addr != end);
126 
127  return 0;
128 }
129 
130 static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
131 {
132  struct vm_area_struct *vma;
133 
134  /* We don't need vma lookup at all. */
135  if (!walk->hugetlb_entry)
136  return NULL;
137 
138  VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
139  vma = find_vma(walk->mm, addr);
140  if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
141  return vma;
142 
143  return NULL;
144 }
145 
146 #else /* CONFIG_HUGETLB_PAGE */
147 static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
148 {
149  return NULL;
150 }
151 
152 static int walk_hugetlb_range(struct vm_area_struct *vma,
153  unsigned long addr, unsigned long end,
154  struct mm_walk *walk)
155 {
156  return 0;
157 }
158 
159 #endif /* CONFIG_HUGETLB_PAGE */
160 
161 
162 
188 int walk_page_range(unsigned long addr, unsigned long end,
189  struct mm_walk *walk)
190 {
191  pgd_t *pgd;
192  unsigned long next;
193  int err = 0;
194 
195  if (addr >= end)
196  return err;
197 
198  if (!walk->mm)
199  return -EINVAL;
200 
201  pgd = pgd_offset(walk->mm, addr);
202  do {
203  struct vm_area_struct *vma;
204 
205  next = pgd_addr_end(addr, end);
206 
207  /*
208  * handle hugetlb vma individually because pagetable walk for
209  * the hugetlb page is dependent on the architecture and
210  * we can't handled it in the same manner as non-huge pages.
211  */
212  vma = hugetlb_vma(addr, walk);
213  if (vma) {
214  if (vma->vm_end < next)
215  next = vma->vm_end;
216  /*
217  * Hugepage is very tightly coupled with vma, so
218  * walk through hugetlb entries within a given vma.
219  */
220  err = walk_hugetlb_range(vma, addr, next, walk);
221  if (err)
222  break;
223  pgd = pgd_offset(walk->mm, next);
224  continue;
225  }
226 
227  if (pgd_none_or_clear_bad(pgd)) {
228  if (walk->pte_hole)
229  err = walk->pte_hole(addr, next, walk);
230  if (err)
231  break;
232  pgd++;
233  continue;
234  }
235  if (walk->pgd_entry)
236  err = walk->pgd_entry(pgd, addr, next, walk);
237  if (!err &&
238  (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
239  err = walk_pud_range(pgd, addr, next, walk);
240  if (err)
241  break;
242  pgd++;
243  } while (addr = next, addr != end);
244 
245  return err;
246 }