Add count for allocated and mapped page

This commit is contained in:
Mathieu Maret 2018-11-14 14:28:06 +01:00
parent 56a16b9ea5
commit 4bbe08d8f5
4 changed files with 64 additions and 41 deletions

View File

@ -9,7 +9,9 @@ static struct mem_desc *used_page;
static unsigned long bottom_mem; static unsigned long bottom_mem;
static unsigned long top_mem; static unsigned long top_mem;
int memSetup(paddr_t upperMem, paddr_t * lastUsedOut) static unsigned long allocatedPage = 0;
int memSetup(paddr_t upperMem, paddr_t *lastUsedOut)
{ {
// Align upper mem (in kB) on page size even if it does loose a page // Align upper mem (in kB) on page size even if it does loose a page
upperMem = ALIGN_DOWN(upperMem, PAGE_SIZE / 1024); upperMem = ALIGN_DOWN(upperMem, PAGE_SIZE / 1024);
@ -56,6 +58,7 @@ paddr_t allocPhyPage(void)
struct mem_desc *mem = list_pop_head(free_page); struct mem_desc *mem = list_pop_head(free_page);
mem->ref = 1; mem->ref = 1;
list_add_tail(used_page, mem); list_add_tail(used_page, mem);
allocatedPage++;
return mem->phy_addr; return mem->phy_addr;
} }
@ -67,6 +70,7 @@ int unrefPhyPage(paddr_t addr)
} }
mem->ref--; mem->ref--;
if (mem->ref == 0) { if (mem->ref == 0) {
allocatedPage--;
list_delete(used_page, mem); list_delete(used_page, mem);
list_add_tail(free_page, mem); list_add_tail(free_page, mem);
} }
@ -82,9 +86,15 @@ int refPhyPage(paddr_t addr)
} }
mem->ref++; mem->ref++;
if (mem->ref == 1) { if (mem->ref == 1) {
allocatedPage++;
list_add_tail(used_page, mem); list_add_tail(used_page, mem);
list_delete(free_page, mem); list_delete(free_page, mem);
} }
return 0; return 0;
} }
unsigned long getNbAllocatedPage(void)
{
return allocatedPage;
}

View File

@ -21,3 +21,4 @@ int memSetup(paddr_t upperMem, paddr_t *lastUsed);
paddr_t allocPhyPage(void); paddr_t allocPhyPage(void);
int unrefPhyPage(paddr_t addr); int unrefPhyPage(paddr_t addr);
int refPhyPage(paddr_t addr); int refPhyPage(paddr_t addr);
unsigned long getNbAllocatedPage(void);

View File

@ -1,26 +1,31 @@
#include "klibc.h"
#include "errno.h"
#include "mem.h"
#include "paging.h" #include "paging.h"
#include "errno.h"
#include "klibc.h"
#include "mem.h"
#include "stdarg.h" #include "stdarg.h"
#include "vga.h" #include "vga.h"
// In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry point to a Page Table. // In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry
// The 10 next bits are then an index in this Page Table. A Page Table Entry then point to a physical address at which is added the remaining 12 bits. // point to a Page Table. The 10 next bits are then an index in this Page Table. A Page Table
// So they are 1024 entry in the PD, each of them pointing to a PT of 1024 entry. Each PTE pointing to 4K page. // Entry then point to a physical address at which is added the remaining 12 bits. So they are
// First address (up to page_desc from mem.c) are mapped such as Paddr == Vaddr. // 1024 entry in the PD, each of them pointing to a PT of 1024 entry. Each PTE pointing to 4K
// To make PD always accessible a (x86?) trick is used : The mirroring. A given entry N in the PD point to the PD (this is possible because PDE very looks like PTE in x86). // page. First address (up to page_desc from mem.c) are mapped such as Paddr == Vaddr. To make
// So N << (10 + 12 = 4Mo) point to the Paddr of PD. Then, accessing N * 4Mo + I * 4Ko is accessing the PT of the Ieme entry in the PD (as MMU take the PD pointed by the PDE number N like a PT). // PD always accessible a (x86?) trick is used : The mirroring. A given entry N in the PD point
// More particularly, accessing N * 4Mo + N * 4ko is accessing the PD. // to the PD (this is possible because PDE very looks like PTE in x86). So N << (10 + 12 = 4Mo)
// point to the Paddr of PD. Then, accessing N * 4Mo + I * 4Ko is accessing the PT of the Ieme
// entry in the PD (as MMU take the PD pointed by the PDE number N like a PT). More
// particularly, accessing N * 4Mo + N * 4ko is accessing the PD.
// //
// PD is at Vaddr N * 4Mo and take 4ko. Each PT are allocated dynamically. // PD is at Vaddr N * 4Mo and take 4ko. Each PT are allocated dynamically.
// Just make sure that N have not been used by identity mapping // Just make sure that N have not been used by identity mapping
#define PT_SHIFT 12 #define PT_SHIFT 12
#define PTE_MASK 0x3ff //10bits #define PTE_MASK 0x3ff // 10bits
#define PD_SHIFT 22 #define PD_SHIFT 22
#define PD_MIRROR_PAGE_IDX 1023U #define PD_MIRROR_PAGE_IDX 1023U
static unsigned long mappedPage = 0;
struct pde { struct pde {
uint32_t present : 1; uint32_t present : 1;
uint32_t write : 1; // 0 read - 1 RW uint32_t write : 1; // 0 read - 1 RW
@ -61,7 +66,6 @@ struct pdbr {
uint32_t pd_paddr : 20; uint32_t pd_paddr : 20;
} __attribute__((packed)); } __attribute__((packed));
// invalidate the TLB entry for the page located at the given virtual address // invalidate the TLB entry for the page located at the given virtual address
static inline void __native_flush_tlb_single(unsigned long addr) static inline void __native_flush_tlb_single(unsigned long addr)
{ {
@ -84,9 +88,9 @@ int pagingSetup(paddr_t upperKernelAddr)
// Identity mapping up to upperKernelAddr // Identity mapping up to upperKernelAddr
for (paddr_t i = 0; i < upperKernelAddr; i += PAGE_SIZE) { for (paddr_t i = 0; i < upperKernelAddr; i += PAGE_SIZE) {
uint pdEntry = i >> (PD_SHIFT); uint pdEntry = i >> (PD_SHIFT);
uint ptEntry = (i >> PT_SHIFT ) & PTE_MASK; uint ptEntry = (i >> PT_SHIFT) & PTE_MASK;
struct pte *pt; struct pte *pt;
if (pd[pdEntry].present){ if (pd[pdEntry].present) {
pt = (struct pte *)(pd[pdEntry].pt_addr << PT_SHIFT); pt = (struct pte *)(pd[pdEntry].pt_addr << PT_SHIFT);
refPhyPage((paddr_t)pt); refPhyPage((paddr_t)pt);
} else { } else {
@ -99,7 +103,7 @@ int pagingSetup(paddr_t upperKernelAddr)
} }
pt[ptEntry].present = 1; pt[ptEntry].present = 1;
pt[ptEntry].write = 1; //TODO set Kernel code as RO pt[ptEntry].write = 1; // TODO set Kernel code as RO
pt[ptEntry].paddr = i >> PAGE_SHIFT; pt[ptEntry].paddr = i >> PAGE_SHIFT;
} }
@ -108,9 +112,8 @@ int pagingSetup(paddr_t upperKernelAddr)
pd[PD_MIRROR_PAGE_IDX].write = 1; pd[PD_MIRROR_PAGE_IDX].write = 1;
pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT); pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT);
// Loading of the PDBR in the MMU: // Loading of the PDBR in the MMU:
asm volatile ("movl %0,%%cr3\n\t" asm volatile("movl %0,%%cr3\n\t"
"movl %%cr0,%%eax\n\t" "movl %%cr0,%%eax\n\t"
"orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */ "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
"movl %%eax,%%cr0\n\t" "movl %%eax,%%cr0\n\t"
@ -118,7 +121,8 @@ int pagingSetup(paddr_t upperKernelAddr)
"1:\n\t" "1:\n\t"
"movl $2f, %%eax\n\t" "movl $2f, %%eax\n\t"
"jmp *%%eax\n\t" "jmp *%%eax\n\t"
"2:\n\t" ::"r"(cr3):"memory","eax"); "2:\n\t" ::"r"(cr3)
: "memory", "eax");
return 0; return 0;
} }
@ -128,17 +132,18 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd = (struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) +
(struct pde *)((PD_MIRROR_PAGE_IDX << 22) + (PD_MIRROR_PAGE_IDX << 12)); (PD_MIRROR_PAGE_IDX << PT_SHIFT));
struct pte *pt = (struct pte *)((PD_MIRROR_PAGE_IDX << 22) + (pdEntry << 12)); struct pte *pt =
(struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
if (!pd[pdEntry].present) { if (!pd[pdEntry].present) {
paddr_t ptPhy = allocPhyPage(); paddr_t ptPhy = allocPhyPage();
if (ptPhy == (vaddr_t)NULL) if (ptPhy == (vaddr_t)NULL)
return ENOMEM; return ENOMEM;
pd[pdEntry].user = (flags & PAGING_MEM_USER) ? 1:0; pd[pdEntry].user = (flags & PAGING_MEM_USER) ? 1 : 0;
pd[pdEntry].present = 1; pd[pdEntry].present = 1;
pd[pdEntry].write = 1; pd[pdEntry].write = 1;
pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT); pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT);
@ -162,6 +167,7 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
refPhyPage(paddr); refPhyPage(paddr);
__native_flush_tlb_single(vaddr); __native_flush_tlb_single(vaddr);
mappedPage++;
return 0; return 0;
} }
@ -171,11 +177,11 @@ int pageUnmap(vaddr_t vaddr)
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd = (struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) +
(struct pde *)(PD_MIRROR_PAGE_IDX * (1U << 22) + PD_MIRROR_PAGE_IDX * (1U << 12)); (PD_MIRROR_PAGE_IDX << PT_SHIFT));
struct pte *pt = struct pte *pt =
(struct pte *)(PD_MIRROR_PAGE_IDX * (1U << 22) + pdEntry * (1U << 12)); (struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
if (!pd[pdEntry].present) if (!pd[pdEntry].present)
return -EINVAL; return -EINVAL;
if (!pt[ptEntry].present) if (!pt[ptEntry].present)
@ -185,11 +191,16 @@ int pageUnmap(vaddr_t vaddr)
pt[ptEntry].present = 0; pt[ptEntry].present = 0;
// PTE not used. Decrease refcount on it. Is PT not used anymore ? // PTE not used. Decrease refcount on it. Is PT not used anymore ?
if(unrefPhyPage(pd[pdEntry].pt_addr<< PT_SHIFT) == 0){ if (unrefPhyPage(pd[pdEntry].pt_addr << PT_SHIFT) == 0) {
pd[pdEntry].present = 0; pd[pdEntry].present = 0;
__native_flush_tlb_single((vaddr_t)pt); __native_flush_tlb_single((vaddr_t)pt);
} }
__native_flush_tlb_single(vaddr); __native_flush_tlb_single(vaddr);
mappedPage--;
return 0; return 0;
}
unsigned long getNbMappedPage(void)
{
return mappedPage;
} }

View File

@ -9,3 +9,4 @@ int pagingSetup(paddr_t upperKernelAddr);
int pageMap(vaddr_t vaddr, paddr_t paddr, int flags); int pageMap(vaddr_t vaddr, paddr_t paddr, int flags);
int pageUnmap(vaddr_t vaddr); int pageUnmap(vaddr_t vaddr);
unsigned long getNbMappedPage(void);