7 #include <linux/module.h>
8 #include <linux/sched.h>
16 #include <linux/pci.h>
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
51 #define CPA_FLUSHTLB 1
53 #define CPA_PAGES_ARRAY 4
58 void update_page_count(
int level,
unsigned long pages)
66 static void split_page_count(
int level)
68 direct_pages_count[
level]--;
76 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
90 static inline void split_page_count(
int level) { }
95 static inline unsigned long highmap_start_pfn(
void)
100 static inline unsigned long highmap_end_pfn(
void)
107 #ifdef CONFIG_DEBUG_PAGEALLOC
108 # define debug_pagealloc 1
110 # define debug_pagealloc 0
114 within(
unsigned long addr,
unsigned long start,
unsigned long end)
116 return addr >= start && addr <
end;
133 void *
vend = vaddr + size - 1;
148 static void __cpa_flush_all(
void *
arg)
150 unsigned long cache = (
unsigned long)arg;
162 static void cpa_flush_all(
unsigned long cache)
169 static void __cpa_flush_range(
void *arg)
179 static void cpa_flush_range(
unsigned long start,
int numpages,
int cache)
198 for (i = 0, addr = start; i < numpages; i++, addr +=
PAGE_SIZE) {
209 static void cpa_flush_array(
unsigned long *start,
int numpages,
int cache,
213 unsigned long do_wbinvd = cache && numpages >= 1024;
217 on_each_cpu(__cpa_flush_all, (
void *) do_wbinvd, 1);
219 if (!cache || do_wbinvd)
228 for (i = 0; i < numpages; i++) {
262 #ifdef CONFIG_PCI_BIOS
272 if (within(address, (
unsigned long)_text, (
unsigned long)_etext))
279 if (within(pfn,
__pa((
unsigned long)__start_rodata) >>
PAGE_SHIFT,
283 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
293 if (kernel_set_to_readonly &&
294 within(address, (
unsigned long)_text,
295 (
unsigned long)__end_rodata_hpage_align)) {
369 static void __set_pmd_pte(
pte_t *kpte,
unsigned long address,
pte_t pte)
392 try_preserve_large_page(
pte_t *kpte,
unsigned long address,
395 unsigned long nextpage_addr, numpages, pmask, psize,
addr, pfn;
397 pgprot_t old_prot, new_prot, req_prot;
404 spin_lock(&pgd_lock);
433 nextpage_addr = (address + psize) & pmask;
435 if (numpages < cpa->numpages)
442 old_prot = new_prot = req_prot =
pte_pgprot(old_pte);
454 new_prot = static_protections(req_prot, address, pfn);
461 addr = address & pmask;
464 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
493 __set_pmd_pte(kpte, address, new_pte);
499 spin_unlock(&pgd_lock);
504 static int split_large_page(
pte_t *kpte,
unsigned long address)
506 unsigned long pfn, pfninc = 1;
513 spin_unlock(&cpa_lock);
516 spin_lock(&cpa_lock);
520 spin_lock(&pgd_lock);
554 if (address >= (
unsigned long)
__va(0) &&
556 split_page_count(level);
559 if (address >= (
unsigned long)
__va(1
UL<<32) &&
561 split_page_count(level);
592 spin_unlock(&pgd_lock);
597 static int __cpa_process_fault(
struct cpa_data *cpa,
unsigned long vaddr,
620 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
627 static int __change_page_attr(
struct cpa_data *cpa,
int primary)
632 pte_t *kpte, old_pte;
634 if (cpa->
flags & CPA_PAGES_ARRAY) {
642 address = *cpa->
vaddr;
646 return __cpa_process_fault(cpa, address, primary);
650 return __cpa_process_fault(cpa, address, primary);
655 unsigned long pfn =
pte_pfn(old_pte);
660 new_prot = static_protections(new_prot, address, pfn);
684 do_split = try_preserve_large_page(kpte, address, cpa);
696 err = split_large_page(kpte, address);
723 static int __change_page_attr_set_clr(
struct cpa_data *cpa,
int checkalias);
725 static int cpa_process_alias(
struct cpa_data *cpa)
743 if (cpa->
flags & CPA_PAGES_ARRAY) {
757 alias_cpa.vaddr = &
laddr;
758 alias_cpa.flags &= ~(CPA_PAGES_ARRAY |
CPA_ARRAY);
760 ret = __change_page_attr_set_clr(&alias_cpa, 0);
771 if (!within(vaddr, (
unsigned long)_text,
_brk_end) &&
772 within(cpa->
pfn, highmap_start_pfn(), highmap_end_pfn())) {
776 alias_cpa.vaddr = &temp_cpa_vaddr;
777 alias_cpa.flags &= ~(CPA_PAGES_ARRAY |
CPA_ARRAY);
783 __change_page_attr_set_clr(&alias_cpa, 0);
790 static int __change_page_attr_set_clr(
struct cpa_data *cpa,
int checkalias)
805 spin_lock(&cpa_lock);
806 ret = __change_page_attr(cpa, checkalias);
808 spin_unlock(&cpa_lock);
813 ret = cpa_process_alias(cpa);
840 static int change_page_attr_set_clr(
unsigned long *addr,
int numpages,
842 int force_split,
int in_flag,
847 unsigned long baddr = 0;
867 }
else if (!(in_flag & CPA_PAGES_ARRAY)) {
900 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
901 cpa.
flags |= in_flag;
906 ret = __change_page_attr_set_clr(&cpa, checkalias);
926 if (!ret && cpu_has_clflush) {
927 if (cpa.
flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
928 cpa_flush_array(addr, numpages, cache,
931 cpa_flush_range(baddr, numpages, cache);
933 cpa_flush_all(cache);
939 static inline int change_page_attr_set(
unsigned long *addr,
int numpages,
942 return change_page_attr_set_clr(addr, numpages, mask,
__pgprot(0), 0,
943 (array ? CPA_ARRAY : 0),
NULL);
946 static inline int change_page_attr_clear(
unsigned long *addr,
int numpages,
949 return change_page_attr_set_clr(addr, numpages,
__pgprot(0), mask, 0,
950 (array ? CPA_ARRAY : 0),
NULL);
953 static inline int cpa_set_pages_array(
struct page **pages,
int numpages,
956 return change_page_attr_set_clr(
NULL, numpages, mask,
__pgprot(0), 0,
957 CPA_PAGES_ARRAY, pages);
960 static inline int cpa_clear_pages_array(
struct page **pages,
int numpages,
963 return change_page_attr_set_clr(
NULL, numpages,
__pgprot(0), mask, 0,
964 CPA_PAGES_ARRAY, pages);
972 return change_page_attr_set(&addr, numpages,
1001 static int _set_memory_array(
unsigned long *addr,
int addrinarray,
1002 unsigned long new_type)
1010 for (i = 0; i < addrinarray; i++) {
1017 ret = change_page_attr_set(addr, addrinarray,
1021 ret = change_page_attr_set_clr(addr, addrinarray,
1024 0, CPA_ARRAY,
NULL);
1031 for (j = 0; j <
i; j++)
1052 unsigned long addr_copy =
addr;
1054 ret = change_page_attr_set(&addr, numpages,
1057 ret = change_page_attr_set_clr(&addr_copy, numpages,
1092 return change_page_attr_clear(&addr, numpages,
1114 ret = change_page_attr_clear(addr, addrinarray,
1119 for (i = 0; i < addrinarray; i++)
1131 return change_page_attr_clear(&addr, numpages,
__pgprot(_PAGE_NX), 0);
1140 return change_page_attr_set(&addr, numpages,
__pgprot(_PAGE_NX), 0);
1163 return change_page_attr_set_clr(&addr, numpages,
__pgprot(0),
1175 static int _set_pages_array(
struct page **pages,
int addrinarray,
1176 unsigned long new_type)
1178 unsigned long start;
1184 for (i = 0; i < addrinarray; i++) {
1185 if (PageHighMem(pages[i]))
1193 ret = cpa_set_pages_array(pages, addrinarray,
1196 ret = change_page_attr_set_clr(
NULL, addrinarray,
1199 0, CPA_PAGES_ARRAY, pages);
1205 for (i = 0; i < free_idx; i++) {
1206 if (PageHighMem(pages[i]))
1238 unsigned long start;
1242 retval = cpa_clear_pages_array(pages, addrinarray,
1247 for (i = 0; i < addrinarray; i++) {
1248 if (PageHighMem(pages[i]))
1289 #ifdef CONFIG_DEBUG_PAGEALLOC
1291 static int __set_pages_p(
struct page *page,
int numpages)
1306 return __change_page_attr_set_clr(&cpa, 0);
1309 static int __set_pages_np(
struct page *page,
int numpages)
1324 return __change_page_attr_set_clr(&cpa, 0);
1329 if (PageHighMem(page))
1333 numpages * PAGE_SIZE);
1342 __set_pages_p(page, numpages);
1344 __set_pages_np(page, numpages);
1353 #ifdef CONFIG_HIBERNATION
1355 bool kernel_page_present(
struct page *page)
1360 if (PageHighMem(page))
1375 #ifdef CONFIG_CPA_DEBUG