Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hibernate_32.c
Go to the documentation of this file.
1 /*
2  * Hibernation support specific for i386 - temporary page tables
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2006 Rafael J. Wysocki <[email protected]>
7  */
8 
9 #include <linux/gfp.h>
10 #include <linux/suspend.h>
11 #include <linux/bootmem.h>
12 
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15 #include <asm/mmzone.h>
16 
17 /* Defined in hibernate_asm_32.S */
18 extern int restore_image(void);
19 
20 /* References to section boundaries */
21 extern const void __nosave_begin, __nosave_end;
22 
23 /* Pointer to the temporary resume page tables */
25 
26 /* The following three functions are based on the analogous code in
27  * arch/x86/mm/init_32.c
28  */
29 
30 /*
31  * Create a middle page table on a resume-safe page and put a pointer to it in
32  * the given global directory entry. This only returns the gd entry
33  * in non-PAE compilation mode, since the middle layer is folded.
34  */
35 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
36 {
37  pud_t *pud;
38  pmd_t *pmd_table;
39 
40 #ifdef CONFIG_X86_PAE
41  pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
42  if (!pmd_table)
43  return NULL;
44 
45  set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
46  pud = pud_offset(pgd, 0);
47 
48  BUG_ON(pmd_table != pmd_offset(pud, 0));
49 #else
50  pud = pud_offset(pgd, 0);
51  pmd_table = pmd_offset(pud, 0);
52 #endif
53 
54  return pmd_table;
55 }
56 
57 /*
58  * Create a page table on a resume-safe page and place a pointer to it in
59  * a middle page directory entry.
60  */
61 static pte_t *resume_one_page_table_init(pmd_t *pmd)
62 {
63  if (pmd_none(*pmd)) {
64  pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
65  if (!page_table)
66  return NULL;
67 
68  set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
69 
70  BUG_ON(page_table != pte_offset_kernel(pmd, 0));
71 
72  return page_table;
73  }
74 
75  return pte_offset_kernel(pmd, 0);
76 }
77 
78 /*
79  * This maps the physical memory to kernel virtual address space, a total
80  * of max_low_pfn pages, by creating page tables starting from address
81  * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
82  */
83 static int resume_physical_mapping_init(pgd_t *pgd_base)
84 {
85  unsigned long pfn;
86  pgd_t *pgd;
87  pmd_t *pmd;
88  pte_t *pte;
89  int pgd_idx, pmd_idx;
90 
91  pgd_idx = pgd_index(PAGE_OFFSET);
92  pgd = pgd_base + pgd_idx;
93  pfn = 0;
94 
95  for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
96  pmd = resume_one_md_table_init(pgd);
97  if (!pmd)
98  return -ENOMEM;
99 
100  if (pfn >= max_low_pfn)
101  continue;
102 
103  for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
104  if (pfn >= max_low_pfn)
105  break;
106 
107  /* Map with big pages if possible, otherwise create
108  * normal page tables.
109  * NOTE: We can mark everything as executable here
110  */
111  if (cpu_has_pse) {
113  pfn += PTRS_PER_PTE;
114  } else {
115  pte_t *max_pte;
116 
117  pte = resume_one_page_table_init(pmd);
118  if (!pte)
119  return -ENOMEM;
120 
121  max_pte = pte + PTRS_PER_PTE;
122  for (; pte < max_pte; pte++, pfn++) {
123  if (pfn >= max_low_pfn)
124  break;
125 
126  set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
127  }
128  }
129  }
130  }
131 
132  resume_map_numa_kva(pgd_base);
133 
134  return 0;
135 }
136 
137 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
138 {
139 #ifdef CONFIG_X86_PAE
140  int i;
141 
142  /* Init entries of the first-level page table to the zero page */
143  for (i = 0; i < PTRS_PER_PGD; i++)
144  set_pgd(pg_dir + i,
146 #endif
147 }
148 
150 {
151  int error;
152 
153  resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
154  if (!resume_pg_dir)
155  return -ENOMEM;
156 
157  resume_init_first_level_page_table(resume_pg_dir);
158  error = resume_physical_mapping_init(resume_pg_dir);
159  if (error)
160  return error;
161 
162  /* We have got enough memory and from now on we cannot recover */
163  restore_image();
164  return 0;
165 }
166 
167 /*
168  * pfn_is_nosave - check if given pfn is in the 'nosave' section
169  */
170 
171 int pfn_is_nosave(unsigned long pfn)
172 {
173  unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
174  unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
175  return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
176 }