11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
30 #include <linux/sched.h>
44 #define update_mmu_cache(vma, address, ptep) do { } while (0)
45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
55 #define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
59 #define is_zero_pfn is_zero_pfn
62 extern unsigned long zero_pfn;
63 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
64 return offset_from_zero_pfn <= (zero_page_mask >>
PAGE_SHIFT);
67 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
79 # define PGDIR_SHIFT 20
83 # define PGDIR_SHIFT 42
86 #define PMD_SIZE (1UL << PMD_SHIFT)
87 #define PMD_MASK (~(PMD_SIZE-1))
88 #define PUD_SIZE (1UL << PUD_SHIFT)
89 #define PUD_MASK (~(PUD_SIZE-1))
90 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
91 #define PGDIR_MASK (~(PGDIR_SIZE-1))
99 #define PTRS_PER_PTE 256
101 #define PTRS_PER_PMD 1
102 #define PTRS_PER_PUD 1
104 #define PTRS_PER_PMD 2048
105 #define PTRS_PER_PUD 2048
107 #define PTRS_PER_PGD 2048
109 #define FIRST_USER_ADDRESS 0
111 #define pte_ERROR(e) \
112 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
113 #define pmd_ERROR(e) \
114 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
115 #define pud_ERROR(e) \
116 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
117 #define pgd_ERROR(e) \
118 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
133 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
138 #define MODULES_VADDR MODULES_VADDR
139 #define MODULES_END MODULES_END
140 #define MODULES_LEN (1UL << 31)
227 #define _PAGE_CO 0x100
228 #define _PAGE_RO 0x200
229 #define _PAGE_INVALID 0x400
232 #define _PAGE_SWT 0x001
233 #define _PAGE_SWX 0x002
234 #define _PAGE_SWC 0x004
235 #define _PAGE_SWR 0x008
236 #define _PAGE_SPECIAL 0x010
237 #define __HAVE_ARCH_PTE_SPECIAL
240 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
243 #define _PAGE_TYPE_EMPTY 0x400
244 #define _PAGE_TYPE_NONE 0x401
245 #define _PAGE_TYPE_SWAP 0x403
246 #define _PAGE_TYPE_FILE 0x601
247 #define _PAGE_TYPE_RO 0x200
248 #define _PAGE_TYPE_RW 0x000
254 #define _HPAGE_TYPE_EMPTY 0x020
255 #define _HPAGE_TYPE_NONE 0x220
256 #define _HPAGE_TYPE_RO 0x200
257 #define _HPAGE_TYPE_RW 0x000
288 #define _ASCE_SPACE_SWITCH 0x80000000UL
289 #define _ASCE_ORIGIN_MASK 0x7ffff000UL
290 #define _ASCE_PRIVATE_SPACE 0x100
291 #define _ASCE_ALT_EVENT 0x80
292 #define _ASCE_TABLE_LENGTH 0x7f
295 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL
296 #define _SEGMENT_ENTRY_RO 0x200
297 #define _SEGMENT_ENTRY_INV 0x20
298 #define _SEGMENT_ENTRY_COMMON 0x10
299 #define _SEGMENT_ENTRY_PTL 0x0f
301 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
302 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
305 #define RCP_ACC_BITS 0xf0000000UL
306 #define RCP_FP_BIT 0x08000000UL
307 #define RCP_PCL_BIT 0x00800000UL
308 #define RCP_HR_BIT 0x00400000UL
309 #define RCP_HC_BIT 0x00200000UL
310 #define RCP_GR_BIT 0x00040000UL
311 #define RCP_GC_BIT 0x00020000UL
314 #define KVM_UR_BIT 0x00008000UL
315 #define KVM_UC_BIT 0x00004000UL
320 #define _ASCE_ORIGIN ~0xfffUL
321 #define _ASCE_PRIVATE_SPACE 0x100
322 #define _ASCE_ALT_EVENT 0x80
323 #define _ASCE_SPACE_SWITCH 0x40
324 #define _ASCE_REAL_SPACE 0x20
325 #define _ASCE_TYPE_MASK 0x0c
326 #define _ASCE_TYPE_REGION1 0x0c
327 #define _ASCE_TYPE_REGION2 0x08
328 #define _ASCE_TYPE_REGION3 0x04
329 #define _ASCE_TYPE_SEGMENT 0x00
330 #define _ASCE_TABLE_LENGTH 0x03
333 #define _REGION_ENTRY_ORIGIN ~0xfffUL
334 #define _REGION_ENTRY_INV 0x20
335 #define _REGION_ENTRY_TYPE_MASK 0x0c
336 #define _REGION_ENTRY_TYPE_R1 0x0c
337 #define _REGION_ENTRY_TYPE_R2 0x08
338 #define _REGION_ENTRY_TYPE_R3 0x04
339 #define _REGION_ENTRY_LENGTH 0x03
341 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
342 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
343 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
344 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
345 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
346 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
349 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
350 #define _SEGMENT_ENTRY_RO 0x200
351 #define _SEGMENT_ENTRY_INV 0x20
353 #define _SEGMENT_ENTRY (0)
354 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
356 #define _SEGMENT_ENTRY_LARGE 0x400
357 #define _SEGMENT_ENTRY_CO 0x100
358 #define _SEGMENT_ENTRY_SPLIT_BIT 0
359 #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
362 #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
363 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
366 #define RCP_ACC_BITS 0xf000000000000000UL
367 #define RCP_FP_BIT 0x0800000000000000UL
368 #define RCP_PCL_BIT 0x0080000000000000UL
369 #define RCP_HR_BIT 0x0040000000000000UL
370 #define RCP_HC_BIT 0x0020000000000000UL
371 #define RCP_GR_BIT 0x0004000000000000UL
372 #define RCP_GC_BIT 0x0002000000000000UL
375 #define KVM_UR_BIT 0x0000800000000000UL
376 #define KVM_UC_BIT 0x0000400000000000UL
385 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
391 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
392 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
393 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
395 #define PAGE_KERNEL PAGE_RW
396 #define PAGE_COPY PAGE_RO
404 #define __P000 PAGE_NONE
405 #define __P001 PAGE_RO
406 #define __P010 PAGE_RO
407 #define __P011 PAGE_RO
408 #define __P100 PAGE_RO
409 #define __P101 PAGE_RO
410 #define __P110 PAGE_RO
411 #define __P111 PAGE_RO
413 #define __S000 PAGE_NONE
414 #define __S001 PAGE_RO
415 #define __S010 PAGE_RW
416 #define __S011 PAGE_RW
417 #define __S100 PAGE_RO
418 #define __S101 PAGE_RO
419 #define __S110 PAGE_RW
420 #define __S111 PAGE_RW
422 static inline int mm_exclusive(
struct mm_struct *mm)
428 static inline int mm_has_pgste(
struct mm_struct *mm)
453 if ((
pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
455 return (
pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0
UL;
460 if ((
pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
462 return (
pgd_val(pgd) & _REGION_ENTRY_INV) != 0
UL;
474 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
475 return (
pgd_val(pgd) & mask) != 0;
480 if ((
pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
482 return (
pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0
UL;
487 if ((
pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
489 return (
pud_val(pud) & _REGION_ENTRY_INV) != 0
UL;
501 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
502 return (
pud_val(pud) & mask) != 0;
523 return !!(
pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
535 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
539 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
544 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
548 #define __HAVE_ARCH_PMD_WRITE
583 #define __HAVE_ARCH_PTE_SAME
591 unsigned long new = 0;
609 static inline void pgste_set_unlock(
pte_t *ptep,
pgste_t pgste)
630 skey = page_get_storage_key(address);
634 page_set_storage_key(address, skey ^ bits, 1);
636 page_reset_referenced(address);
680 unsigned long okey, nkey;
685 okey = nkey = page_get_storage_key(address);
690 page_set_storage_key(address, nkey, 1);
734 unsigned long to,
unsigned long length);
750 if (mm_has_pgste(mm)) {
751 pgste = pgste_get_lock(ptep);
752 pgste_set_pte(ptep, pgste, entry);
754 pgste_set_unlock(ptep, pgste);
793 if ((
pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
794 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
801 if ((
pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
802 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
873 #ifdef CONFIG_HUGETLB_PAGE
896 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
904 static inline int ptep_test_and_clear_user_dirty(
struct mm_struct *mm,
910 if (mm_has_pgste(mm)) {
911 pgste = pgste_get_lock(ptep);
912 pgste = pgste_update_all(ptep, pgste);
915 pgste_set_unlock(ptep, pgste);
924 static inline int ptep_test_and_clear_user_young(
struct mm_struct *mm,
930 if (mm_has_pgste(mm)) {
931 pgste = pgste_get_lock(ptep);
932 pgste = pgste_update_young(ptep, pgste);
935 pgste_set_unlock(ptep, pgste);
940 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
942 unsigned long addr,
pte_t *ptep)
947 if (mm_has_pgste(vma->
vm_mm)) {
948 pgste = pgste_get_lock(ptep);
949 pgste = pgste_update_young(ptep, pgste);
952 pgste_set_unlock(ptep, pgste);
958 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
960 unsigned long address,
pte_t *ptep)
969 static inline void __ptep_ipte(
unsigned long address,
pte_t *ptep)
971 if (!(
pte_val(*ptep) & _PAGE_INVALID)) {
974 pte_t *pto = (
pte_t *) (((
unsigned long) ptep) & 0x7ffffc00);
981 :
"=m" (*ptep) :
"m" (*ptep),
982 "a" (pto),
"a" (address));
999 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1001 unsigned long address,
pte_t *ptep)
1007 if (mm_has_pgste(mm))
1008 pgste = pgste_get_lock(ptep);
1011 if (!mm_exclusive(mm))
1012 __ptep_ipte(address, ptep);
1015 if (mm_has_pgste(mm)) {
1016 pgste = pgste_update_all(&pte, pgste);
1017 pgste_set_unlock(ptep, pgste);
1022 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1024 unsigned long address,
1030 if (mm_has_pgste(mm))
1031 pgste_get_lock(ptep);
1034 if (!mm_exclusive(mm))
1035 __ptep_ipte(address, ptep);
1039 static inline void ptep_modify_prot_commit(
struct mm_struct *mm,
1040 unsigned long address,
1044 if (mm_has_pgste(mm))
1048 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1050 unsigned long address,
pte_t *ptep)
1055 if (mm_has_pgste(vma->
vm_mm))
1056 pgste = pgste_get_lock(ptep);
1059 __ptep_ipte(address, ptep);
1062 if (mm_has_pgste(vma->
vm_mm)) {
1063 pgste = pgste_update_all(&pte, pgste);
1064 pgste_set_unlock(ptep, pgste);
1076 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1077 static inline pte_t ptep_get_and_clear_full(
struct mm_struct *mm,
1078 unsigned long address,
1084 if (mm_has_pgste(mm))
1085 pgste = pgste_get_lock(ptep);
1089 __ptep_ipte(address, ptep);
1092 if (mm_has_pgste(mm)) {
1093 pgste = pgste_update_all(&pte, pgste);
1094 pgste_set_unlock(ptep, pgste);
1099 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1101 unsigned long address,
pte_t *ptep)
1108 if (mm_has_pgste(mm))
1109 pgste = pgste_get_lock(ptep);
1111 if (!mm_exclusive(mm))
1112 __ptep_ipte(address, ptep);
1115 if (mm_has_pgste(mm))
1116 pgste_set_unlock(ptep, pgste);
1121 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1123 unsigned long address,
pte_t *ptep,
1124 pte_t entry,
int dirty)
1130 if (mm_has_pgste(vma->
vm_mm))
1131 pgste = pgste_get_lock(ptep);
1133 __ptep_ipte(address, ptep);
1136 if (mm_has_pgste(vma->
vm_mm))
1137 pgste_set_unlock(ptep, pgste);
1159 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1160 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1161 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1162 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1164 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1165 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1167 #ifndef CONFIG_64BIT
1169 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1170 #define pud_deref(pmd) ({ BUG(); 0UL; })
1171 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1173 #define pud_offset(pgd, address) ((pud_t *) pgd)
1174 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1178 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1179 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1180 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1185 if ((
pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1193 if ((
pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1200 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1201 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1202 #define pte_page(x) pfn_to_page(pte_pfn(x))
1204 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1207 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1208 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1209 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1210 #define pte_unmap(pte) do { } while (0)
1212 static inline void __pmd_idte(
unsigned long address,
pmd_t *pmdp)
1214 unsigned long sto = (
unsigned long) pmdp -
1219 " .insn rrf,0xb98e0000,%2,%3,0,0"
1221 :
"m" (*pmdp),
"a" (sto),
1228 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1230 #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
1231 #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
1232 #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
1234 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1237 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1240 static inline int pmd_trans_splitting(
pmd_t pmd)
1242 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1266 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1267 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1273 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1309 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1311 unsigned long address,
pmd_t *pmdp)
1321 "0: .insn rre,0xb9ae0000,%0,%3\n"
1327 :
"a" (64 * 4096
UL) :
"cc");
1337 :
"+d" (
rc),
"+d" (counter),
"+a" (pmd_addr)
1338 :
"a" (4096
UL) :
"cc");
1343 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1345 unsigned long address,
pmd_t *pmdp)
1349 __pmd_idte(address, pmdp);
1354 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1356 unsigned long address,
pmd_t *pmdp)
1358 return pmdp_get_and_clear(vma->
vm_mm, address, pmdp);
1361 #define __HAVE_ARCH_PMDP_INVALIDATE
1363 unsigned long address,
pmd_t *pmdp)
1365 __pmd_idte(address, pmdp);
1368 static inline pmd_t mk_pmd_phys(
unsigned long physpage,
pgprot_t pgprot)
1371 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1375 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1376 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1378 static inline int pmd_trans_huge(
pmd_t pmd)
1380 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1383 static inline int has_transparent_hugepage(
void)
1390 if (pmd_trans_huge(pmd))
1432 #ifndef CONFIG_64BIT
1433 #define __SWP_OFFSET_MASK (~0UL >> 12)
1435 #define __SWP_OFFSET_MASK (~0UL >> 11)
1442 ((offset & 1
UL) << 7) | ((offset & ~1
UL) << 11);
1446 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1447 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1448 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1450 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1451 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1453 #ifndef CONFIG_64BIT
1454 # define PTE_FILE_MAX_BITS 26
1456 # define PTE_FILE_MAX_BITS 59
1459 #define pte_to_pgoff(__pte) \
1460 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1462 #define pgoff_to_pte(__off) \
1463 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1464 | _PAGE_TYPE_FILE })
1468 #define kern_addr_valid(addr) (1)
1477 #define pgtable_cache_init() do { } while (0)