7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
10 #include <linux/mempolicy.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
23 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
32 hiwater_vm = total_vm = mm->
total_vm;
33 if (hiwater_vm < mm->hiwater_vm)
35 hiwater_rss = total_rss = get_mm_rss(mm);
36 if (hiwater_rss < mm->hiwater_rss)
75 unsigned long *
data,
unsigned long *resident)
85 static void pad_len_spaces(
struct seq_file *
m,
int len)
87 len = 25 +
sizeof(
void*) * 6 - len;
120 priv->task_mempolicy = task->mempolicy;
121 mpol_get(priv->task_mempolicy);
126 mpol_put(priv->task_mempolicy);
139 if (vma && vma != priv->tail_vma) {
141 release_task_mempolicy(priv);
147 static void *m_start(
struct seq_file *m, loff_t *
pos)
150 unsigned long last_addr = m->
version;
157 priv->tail_vma =
NULL;
166 if (last_addr == -1
UL)
171 return ERR_PTR(-
ESRCH);
174 if (!mm || IS_ERR(mm))
179 priv->tail_vma = tail_vma;
180 hold_task_mempolicy(priv);
183 if (last_addr && vma) {
193 if ((
unsigned long)l < mm->map_count) {
207 release_task_mempolicy(priv);
215 static void *m_next(
struct seq_file *m,
void *
v, loff_t *pos)
222 if (vma && (vma != tail_vma) && vma->
vm_next)
225 return (vma != tail_vma)? tail_vma:
NULL;
228 static void m_stop(
struct seq_file *m,
void *v)
236 put_task_struct(priv->
task);
246 priv->
pid = proc_pid(inode);
262 struct file *file = vma->
vm_file;
266 unsigned long ino = 0;
267 unsigned long long pgoff = 0;
274 struct inode *inode = vma->
vm_file->f_path.dentry->d_inode;
275 dev = inode->
i_sb->s_dev;
282 if (stack_guard_page_start(vma, start))
285 if (stack_guard_page_end(vma, end))
288 seq_printf(m,
"%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
291 flags & VM_READ ?
'r' :
'-',
292 flags & VM_WRITE ?
'w' :
'-',
293 flags & VM_EXEC ?
'x' :
'-',
294 flags & VM_MAYSHARE ?
's' :
'p',
303 pad_len_spaces(m, len);
335 pad_len_spaces(m, len);
343 pad_len_spaces(m, len);
349 static int show_map(
struct seq_file *m,
void *v,
int is_pid)
355 show_map_vma(m, vma, is_pid);
363 static int show_pid_map(
struct seq_file *m,
void *v)
365 return show_map(m, v, 1);
368 static int show_tid_map(
struct seq_file *m,
void *v)
370 return show_map(m, v, 0);
387 static int pid_maps_open(
struct inode *inode,
struct file *file)
389 return do_maps_open(inode, file, &proc_pid_maps_op);
392 static int tid_maps_open(
struct inode *inode,
struct file *file)
394 return do_maps_open(inode, file, &proc_tid_maps_op);
398 .open = pid_maps_open,
405 .open = tid_maps_open,
430 #ifdef CONFIG_PROC_PAGE_MONITOR
431 struct mem_size_stats {
433 unsigned long resident;
434 unsigned long shared_clean;
435 unsigned long shared_dirty;
436 unsigned long private_clean;
437 unsigned long private_dirty;
438 unsigned long referenced;
439 unsigned long anonymous;
440 unsigned long anonymous_thp;
447 static void smaps_pte_entry(
pte_t ptent,
unsigned long addr,
448 unsigned long ptent_size,
struct mm_walk *walk)
450 struct mem_size_stats *
mss = walk->private;
452 pgoff_t pgoff = linear_page_index(vma, addr);
458 }
else if (is_swap_pte(ptent)) {
461 if (!non_swap_entry(swpent))
462 mss->swap += ptent_size;
463 else if (is_migration_entry(swpent))
467 mss->nonlinear += ptent_size;
474 mss->anonymous += ptent_size;
476 if (page->
index != pgoff)
477 mss->nonlinear += ptent_size;
479 mss->resident += ptent_size;
481 if (
pte_young(ptent) || PageReferenced(page))
482 mss->referenced += ptent_size;
483 mapcount = page_mapcount(page);
486 mss->shared_dirty += ptent_size;
488 mss->shared_clean += ptent_size;
489 mss->pss += (ptent_size <<
PSS_SHIFT) / mapcount;
492 mss->private_dirty += ptent_size;
494 mss->private_clean += ptent_size;
499 static int smaps_pte_range(
pmd_t *
pmd,
unsigned long addr,
unsigned long end,
500 struct mm_walk *walk)
502 struct mem_size_stats *mss = walk->private;
507 if (pmd_trans_huge_lock(pmd, vma) == 1) {
509 spin_unlock(&walk->mm->page_table_lock);
514 if (pmd_trans_unstable(pmd))
521 pte = pte_offset_map_lock(vma->
vm_mm, pmd, addr, &ptl);
523 smaps_pte_entry(*pte, addr,
PAGE_SIZE, walk);
524 pte_unmap_unlock(pte - 1, ptl);
529 static int show_smap(
struct seq_file *m,
void *v,
int is_pid)
534 struct mem_size_stats mss;
535 struct mm_walk smaps_walk = {
536 .pmd_entry = smaps_pte_range,
541 memset(&mss, 0,
sizeof mss);
544 if (vma->
vm_mm && !is_vm_hugetlb_page(vma))
547 show_map_vma(m, vma, is_pid);
553 "Shared_Clean: %8lu kB\n"
554 "Shared_Dirty: %8lu kB\n"
555 "Private_Clean: %8lu kB\n"
556 "Private_Dirty: %8lu kB\n"
557 "Referenced: %8lu kB\n"
558 "Anonymous: %8lu kB\n"
559 "AnonHugePages: %8lu kB\n"
561 "KernelPageSize: %8lu kB\n"
562 "MMUPageSize: %8lu kB\n"
566 (
unsigned long)(mss.pss >> (10 +
PSS_SHIFT)),
567 mss.shared_clean >> 10,
568 mss.shared_dirty >> 10,
569 mss.private_clean >> 10,
570 mss.private_dirty >> 10,
571 mss.referenced >> 10,
573 mss.anonymous_thp >> 10,
578 (
unsigned long)(mss.pss >> (10 +
PSS_SHIFT)) : 0);
582 mss.nonlinear >> 10);
590 static int show_pid_smap(
struct seq_file *m,
void *v)
592 return show_smap(m, v, 1);
595 static int show_tid_smap(
struct seq_file *m,
void *v)
597 return show_smap(m, v, 0);
604 .show = show_pid_smap
611 .show = show_tid_smap
614 static int pid_smaps_open(
struct inode *inode,
struct file *file)
616 return do_maps_open(inode, file, &proc_pid_smaps_op);
619 static int tid_smaps_open(
struct inode *inode,
struct file *file)
621 return do_maps_open(inode, file, &proc_tid_smaps_op);
625 .open = pid_smaps_open,
632 .open = tid_smaps_open,
638 static int clear_refs_pte_range(
pmd_t *pmd,
unsigned long addr,
639 unsigned long end,
struct mm_walk *walk)
647 if (pmd_trans_unstable(pmd))
650 pte = pte_offset_map_lock(vma->
vm_mm, pmd, addr, &ptl);
662 ClearPageReferenced(page);
664 pte_unmap_unlock(pte - 1, ptl);
669 #define CLEAR_REFS_ALL 1
670 #define CLEAR_REFS_ANON 2
671 #define CLEAR_REFS_MAPPED 3
673 static ssize_t clear_refs_write(
struct file *file,
const char __user *
buf,
674 size_t count, loff_t *ppos)
683 memset(buffer, 0,
sizeof(buffer));
684 if (count >
sizeof(buffer) - 1)
685 count =
sizeof(
buffer) - 1;
688 rv =
kstrtoint(strstrip(buffer), 10, &type);
693 task = get_proc_task(file->
f_path.dentry->d_inode);
698 struct mm_walk clear_refs_walk = {
699 .pmd_entry = clear_refs_pte_range,
704 clear_refs_walk.private = vma;
705 if (is_vm_hugetlb_page(vma))
716 if (type == CLEAR_REFS_ANON && vma->
vm_file)
718 if (type == CLEAR_REFS_MAPPED && !vma->
vm_file)
727 put_task_struct(task);
733 .write = clear_refs_write,
746 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
747 #define PAGEMAP_WALK_MASK (PMD_MASK)
749 #define PM_ENTRY_BYTES sizeof(u64)
750 #define PM_STATUS_BITS 3
751 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
752 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
753 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
754 #define PM_PSHIFT_BITS 6
755 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
756 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
757 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
758 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
759 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
761 #define PM_PRESENT PM_STATUS(4LL)
762 #define PM_SWAP PM_STATUS(2LL)
763 #define PM_FILE PM_STATUS(1LL)
764 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
765 #define PM_END_OF_BUFFER 1
767 static inline pagemap_entry_t make_pme(
u64 val)
769 return (pagemap_entry_t) { .pme = val };
772 static int add_to_pagemap(
unsigned long addr, pagemap_entry_t *pme,
773 struct pagemapread *
pm)
775 pm->buffer[pm->pos++] = *pme;
776 if (pm->pos >= pm->len)
777 return PM_END_OF_BUFFER;
781 static int pagemap_pte_hole(
unsigned long start,
unsigned long end,
782 struct mm_walk *walk)
784 struct pagemapread *pm = walk->private;
787 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
790 err = add_to_pagemap(addr, &pme, pm);
797 static void pte_to_pagemap_entry(pagemap_entry_t *pme,
801 struct page *page =
NULL;
807 }
else if (is_swap_pte(pte)) {
810 frame = swp_type(entry) |
813 if (is_migration_entry(entry))
816 *pme = make_pme(PM_NOT_PRESENT);
820 if (page && !PageAnon(page))
826 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
827 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
839 *pme = make_pme(PM_NOT_PRESENT);
842 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
843 pmd_t pmd,
int offset)
848 static int pagemap_pte_range(
pmd_t *pmd,
unsigned long addr,
unsigned long end,
849 struct mm_walk *walk)
852 struct pagemapread *pm = walk->private;
855 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
859 if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
863 offset = (addr & ~PAGEMAP_WALK_MASK) >>
865 thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
866 err = add_to_pagemap(addr, &pme, pm);
870 spin_unlock(&walk->mm->page_table_lock);
874 if (pmd_trans_unstable(pmd))
880 if (vma && (addr >= vma->
vm_end)) {
882 pme = make_pme(PM_NOT_PRESENT);
887 if (vma && (vma->
vm_start <= addr) &&
888 !is_vm_hugetlb_page(vma)) {
890 pte_to_pagemap_entry(&pme, vma, addr, *pte);
894 err = add_to_pagemap(addr, &pme, pm);
904 #ifdef CONFIG_HUGETLB_PAGE
905 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
906 pte_t pte,
int offset)
912 *pme = make_pme(PM_NOT_PRESENT);
916 static int pagemap_hugetlb_range(
pte_t *pte,
unsigned long hmask,
917 unsigned long addr,
unsigned long end,
918 struct mm_walk *walk)
920 struct pagemapread *pm = walk->private;
926 huge_pte_to_pagemap_entry(&pme, *pte, offset);
927 err = add_to_pagemap(addr, &pme, pm);
962 static ssize_t pagemap_read(
struct file *file,
char __user *buf,
963 size_t count, loff_t *ppos)
967 struct pagemapread pm;
969 struct mm_walk pagemap_walk = {};
972 unsigned long start_vaddr;
973 unsigned long end_vaddr;
988 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >>
PAGE_SHIFT);
996 if (!mm || IS_ERR(mm))
999 pagemap_walk.pmd_entry = pagemap_pte_range;
1000 pagemap_walk.pte_hole = pagemap_pte_hole;
1001 #ifdef CONFIG_HUGETLB_PAGE
1002 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1004 pagemap_walk.mm = mm;
1005 pagemap_walk.private = ±
1014 start_vaddr = end_vaddr;
1023 while (count && (start_vaddr < end_vaddr)) {
1028 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1030 if (end < start_vaddr || end > end_vaddr)
1037 len =
min(count, PM_ENTRY_BYTES * pm.pos);
1047 if (!ret || ret == PM_END_OF_BUFFER)
1055 put_task_struct(task);
1062 .read = pagemap_read,
1070 unsigned long pages;
1074 unsigned long mapcount_max;
1075 unsigned long dirty;
1076 unsigned long swapcache;
1080 struct numa_maps_private {
1082 struct numa_maps
md;
1085 static void gather_stats(
struct page *page,
struct numa_maps *
md,
int pte_dirty,
1086 unsigned long nr_pages)
1088 int count = page_mapcount(page);
1090 md->pages += nr_pages;
1091 if (pte_dirty || PageDirty(page))
1092 md->dirty += nr_pages;
1094 if (PageSwapCache(page))
1095 md->swapcache += nr_pages;
1097 if (PageActive(page) || PageUnevictable(page))
1098 md->active += nr_pages;
1100 if (PageWriteback(page))
1101 md->writeback += nr_pages;
1104 md->anon += nr_pages;
1106 if (count > md->mapcount_max)
1107 md->mapcount_max =
count;
1109 md->node[page_to_nid(page)] += nr_pages;
1125 if (PageReserved(page))
1128 nid = page_to_nid(page);
1135 static int gather_pte_stats(
pmd_t *pmd,
unsigned long addr,
1136 unsigned long end,
struct mm_walk *walk)
1138 struct numa_maps *
md;
1145 if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1149 page = can_gather_numa_stats(huge_pte, md->vma, addr);
1151 gather_stats(page, md,
pte_dirty(huge_pte),
1153 spin_unlock(&walk->mm->page_table_lock);
1157 if (pmd_trans_unstable(pmd))
1159 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1161 struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1164 gather_stats(page, md,
pte_dirty(*pte), 1);
1166 }
while (pte++, addr +=
PAGE_SIZE, addr != end);
1167 pte_unmap_unlock(orig_pte, ptl);
1170 #ifdef CONFIG_HUGETLB_PAGE
1171 static int gather_hugetbl_stats(
pte_t *pte,
unsigned long hmask,
1172 unsigned long addr,
unsigned long end,
struct mm_walk *walk)
1174 struct numa_maps *
md;
1185 gather_stats(page, md,
pte_dirty(*pte), 1);
1190 static int gather_hugetbl_stats(
pte_t *pte,
unsigned long hmask,
1191 unsigned long addr,
unsigned long end,
struct mm_walk *walk)
1200 static int show_numa_map(
struct seq_file *m,
void *v,
int is_pid)
1202 struct numa_maps_private *numa_priv = m->
private;
1205 struct numa_maps *md = &numa_priv->md;
1206 struct file *file = vma->
vm_file;
1209 struct mm_walk walk = {};
1218 memset(md, 0,
sizeof(*md));
1222 walk.hugetlb_entry = gather_hugetbl_stats;
1223 walk.pmd_entry = gather_pte_stats;
1253 if (is_vm_hugetlb_page(vma))
1267 if (md->pages != md->anon && md->pages != md->dirty)
1270 if (md->mapcount_max > 1)
1271 seq_printf(m,
" mapmax=%lu", md->mapcount_max);
1274 seq_printf(m,
" swapcache=%lu", md->swapcache);
1276 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1280 seq_printf(m,
" writeback=%lu", md->writeback);
1289 m->
version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1295 return show_numa_map(m, v, 1);
1298 static int show_tid_numa_map(
struct seq_file *m,
void *v)
1300 return show_numa_map(m, v, 0);
1307 .show = show_pid_numa_map,
1314 .show = show_tid_numa_map,
1317 static int numa_maps_open(
struct inode *inode,
struct file *file,
1320 struct numa_maps_private *
priv;
1324 priv->proc_maps.pid = proc_pid(inode);
1336 static int pid_numa_maps_open(
struct inode *inode,
struct file *file)
1338 return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1341 static int tid_numa_maps_open(
struct inode *inode,
struct file *file)
1343 return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1347 .open = pid_numa_maps_open,
1354 .open = tid_numa_maps_open,