proc pid_statm - ceragon/LinuxDoc GitHub Wiki
int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task) {
unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
}
seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
size, resident, shared, text, data);
return 0;
}
struct mm_struct *get_task_mm(struct task_struct *task) {
struct mm_struct *mm;
mm = task->mm;
return mm;
}
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident) {
// 所谓的共享内存,就是内存和文件产生映射的部分。
*shared = get_mm_counter(mm, MM_FILEPAGES);
// text 是代码段
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
// data 是数据段. 总的虚拟内存去掉共享的部分,剩下的就是数据部分了。
*data = mm->total_vm - mm->shared_vm;
// 物理内存占用就是:共享内存+匿名内存页的部分
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
enum {
MM_FILEPAGES,
MM_ANONPAGES,
MM_SWAPENTS,
NR_MM_COUNTERS
};
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) {
return mm->rss_stat.count[member];
}
static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size,
unsigned long grow) {
struct mm_struct *mm = vma->vm_mm;
mm->total_vm += grow;
}
unsigned long do_brk(unsigned long addr, unsigned long len) {
struct mm_struct * mm = current->mm;
mm->total_vm += len >> PAGE_SHIFT;
}
static unsigned long elf_map(struct file *filep, unsigned long addr,
struct elf_phdr *eppnt, int prot, int type,
unsigned long total_size) {
unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
addr = ELF_PAGESTART(addr);
size = ELF_PAGEALIGN(size);
if (total_size) {
map_addr = do_mmap(filep, addr, total_size, prot, type, off);
}
return(map_addr);
}
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset) {
unsigned long ret = -EINVAL;
ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
return ret;
}
unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff) {
unsigned int vm_flags;
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff) {
mm->total_vm += len >> PAGE_SHIFT;
}
共享虚拟内存
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages) {
// 所有的文件映射均视为共享内存
if (file) {
mm->shared_vm += pages;
}
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff) {
struct mm_struct *mm = current->mm;
// mm->total_vm += len >> PAGE_SHIFT;
// 调用增加 shared_vm 的逻辑
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
}
物理内存的统计,由于系统采用了懒加载的模式,所以只有虚拟地址对应的物理地址发生缺页异常的时候才会真正被用到。所以大部分物理内存的使用增加,都发生在缺页异常的时候。
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte) {
pgoff_t pgoff;
flags |= FAULT_FLAG_NONLINEAR;
pgoff = pte_to_pgoff(orig_pte);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte) {
pte_t *page_table;
spinlock_t *ptl;
int anon = 0;
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
anon = 1;
}
}
if (pte_same(*page_table, orig_pte)) {
if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
} else {
inc_mm_counter_fast(mm, MM_FILEPAGES);
}
}
}
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte) {
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags) {
pte_t entry;
entry = *pte;
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}
}
static inline int pte_present(pte_t a) {
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
static inline int pte_none(pte_t pte) {
return !pte.pte;
}
static inline int pte_file(pte_t pte) {
return pte_flags(pte) & _PAGE_FILE;
}
static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) {
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long start_code, end_code, start_data, end_data;
elf_phdata = kmalloc(size, GFP_KERNEL);
start_code = ~0UL;
end_code = 0;
for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
unsigned long k, vaddr;
k = elf_ppnt->p_vaddr;
if (k < start_code)
start_code = k;
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
end_code = k;
}
current->mm->end_code = end_code;
current->mm->start_code = start_code;
}