Add count for allocated and mapped page
This commit is contained in:
parent
56a16b9ea5
commit
4bbe08d8f5
10
core/mem.c
10
core/mem.c
@ -9,6 +9,8 @@ static struct mem_desc *used_page;
|
|||||||
static unsigned long bottom_mem;
|
static unsigned long bottom_mem;
|
||||||
static unsigned long top_mem;
|
static unsigned long top_mem;
|
||||||
|
|
||||||
|
static unsigned long allocatedPage = 0;
|
||||||
|
|
||||||
int memSetup(paddr_t upperMem, paddr_t *lastUsedOut)
|
int memSetup(paddr_t upperMem, paddr_t *lastUsedOut)
|
||||||
{
|
{
|
||||||
// Align upper mem (in kB) on page size even if it does loose a page
|
// Align upper mem (in kB) on page size even if it does loose a page
|
||||||
@ -56,6 +58,7 @@ paddr_t allocPhyPage(void)
|
|||||||
struct mem_desc *mem = list_pop_head(free_page);
|
struct mem_desc *mem = list_pop_head(free_page);
|
||||||
mem->ref = 1;
|
mem->ref = 1;
|
||||||
list_add_tail(used_page, mem);
|
list_add_tail(used_page, mem);
|
||||||
|
allocatedPage++;
|
||||||
return mem->phy_addr;
|
return mem->phy_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,6 +70,7 @@ int unrefPhyPage(paddr_t addr)
|
|||||||
}
|
}
|
||||||
mem->ref--;
|
mem->ref--;
|
||||||
if (mem->ref == 0) {
|
if (mem->ref == 0) {
|
||||||
|
allocatedPage--;
|
||||||
list_delete(used_page, mem);
|
list_delete(used_page, mem);
|
||||||
list_add_tail(free_page, mem);
|
list_add_tail(free_page, mem);
|
||||||
}
|
}
|
||||||
@ -82,9 +86,15 @@ int refPhyPage(paddr_t addr)
|
|||||||
}
|
}
|
||||||
mem->ref++;
|
mem->ref++;
|
||||||
if (mem->ref == 1) {
|
if (mem->ref == 1) {
|
||||||
|
allocatedPage++;
|
||||||
list_add_tail(used_page, mem);
|
list_add_tail(used_page, mem);
|
||||||
list_delete(free_page, mem);
|
list_delete(free_page, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long getNbAllocatedPage(void)
|
||||||
|
{
|
||||||
|
return allocatedPage;
|
||||||
|
}
|
||||||
|
@ -21,3 +21,4 @@ int memSetup(paddr_t upperMem, paddr_t *lastUsed);
|
|||||||
paddr_t allocPhyPage(void);
|
paddr_t allocPhyPage(void);
|
||||||
int unrefPhyPage(paddr_t addr);
|
int unrefPhyPage(paddr_t addr);
|
||||||
int refPhyPage(paddr_t addr);
|
int refPhyPage(paddr_t addr);
|
||||||
|
unsigned long getNbAllocatedPage(void);
|
||||||
|
@ -1,17 +1,20 @@
|
|||||||
#include "klibc.h"
|
|
||||||
#include "errno.h"
|
|
||||||
#include "mem.h"
|
|
||||||
#include "paging.h"
|
#include "paging.h"
|
||||||
|
#include "errno.h"
|
||||||
|
#include "klibc.h"
|
||||||
|
#include "mem.h"
|
||||||
#include "stdarg.h"
|
#include "stdarg.h"
|
||||||
#include "vga.h"
|
#include "vga.h"
|
||||||
|
|
||||||
// In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry point to a Page Table.
|
// In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry
|
||||||
// The 10 next bits are then an index in this Page Table. A Page Table Entry then point to a physical address at which is added the remaining 12 bits.
|
// point to a Page Table. The 10 next bits are then an index in this Page Table. A Page Table
|
||||||
// So they are 1024 entry in the PD, each of them pointing to a PT of 1024 entry. Each PTE pointing to 4K page.
|
// Entry then point to a physical address at which is added the remaining 12 bits. So they are
|
||||||
// First address (up to page_desc from mem.c) are mapped such as Paddr == Vaddr.
|
// 1024 entry in the PD, each of them pointing to a PT of 1024 entry. Each PTE pointing to 4K
|
||||||
// To make PD always accessible a (x86?) trick is used : The mirroring. A given entry N in the PD point to the PD (this is possible because PDE very looks like PTE in x86).
|
// page. First address (up to page_desc from mem.c) are mapped such as Paddr == Vaddr. To make
|
||||||
// So N << (10 + 12 = 4Mo) point to the Paddr of PD. Then, accessing N * 4Mo + I * 4Ko is accessing the PT of the Ieme entry in the PD (as MMU take the PD pointed by the PDE number N like a PT).
|
// PD always accessible a (x86?) trick is used : The mirroring. A given entry N in the PD point
|
||||||
// More particularly, accessing N * 4Mo + N * 4ko is accessing the PD.
|
// to the PD (this is possible because PDE very looks like PTE in x86). So N << (10 + 12 = 4Mo)
|
||||||
|
// point to the Paddr of PD. Then, accessing N * 4Mo + I * 4Ko is accessing the PT of the Ieme
|
||||||
|
// entry in the PD (as MMU take the PD pointed by the PDE number N like a PT). More
|
||||||
|
// particularly, accessing N * 4Mo + N * 4ko is accessing the PD.
|
||||||
//
|
//
|
||||||
// PD is at Vaddr N * 4Mo and take 4ko. Each PT are allocated dynamically.
|
// PD is at Vaddr N * 4Mo and take 4ko. Each PT are allocated dynamically.
|
||||||
// Just make sure that N have not been used by identity mapping
|
// Just make sure that N have not been used by identity mapping
|
||||||
@ -21,6 +24,8 @@
|
|||||||
#define PD_SHIFT 22
|
#define PD_SHIFT 22
|
||||||
#define PD_MIRROR_PAGE_IDX 1023U
|
#define PD_MIRROR_PAGE_IDX 1023U
|
||||||
|
|
||||||
|
static unsigned long mappedPage = 0;
|
||||||
|
|
||||||
struct pde {
|
struct pde {
|
||||||
uint32_t present : 1;
|
uint32_t present : 1;
|
||||||
uint32_t write : 1; // 0 read - 1 RW
|
uint32_t write : 1; // 0 read - 1 RW
|
||||||
@ -61,7 +66,6 @@ struct pdbr {
|
|||||||
uint32_t pd_paddr : 20;
|
uint32_t pd_paddr : 20;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
|
||||||
// invalidate the TLB entry for the page located at the given virtual address
|
// invalidate the TLB entry for the page located at the given virtual address
|
||||||
static inline void __native_flush_tlb_single(unsigned long addr)
|
static inline void __native_flush_tlb_single(unsigned long addr)
|
||||||
{
|
{
|
||||||
@ -108,7 +112,6 @@ int pagingSetup(paddr_t upperKernelAddr)
|
|||||||
pd[PD_MIRROR_PAGE_IDX].write = 1;
|
pd[PD_MIRROR_PAGE_IDX].write = 1;
|
||||||
pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT);
|
pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT);
|
||||||
|
|
||||||
|
|
||||||
// Loading of the PDBR in the MMU:
|
// Loading of the PDBR in the MMU:
|
||||||
asm volatile("movl %0,%%cr3\n\t"
|
asm volatile("movl %0,%%cr3\n\t"
|
||||||
"movl %%cr0,%%eax\n\t"
|
"movl %%cr0,%%eax\n\t"
|
||||||
@ -118,7 +121,8 @@ int pagingSetup(paddr_t upperKernelAddr)
|
|||||||
"1:\n\t"
|
"1:\n\t"
|
||||||
"movl $2f, %%eax\n\t"
|
"movl $2f, %%eax\n\t"
|
||||||
"jmp *%%eax\n\t"
|
"jmp *%%eax\n\t"
|
||||||
"2:\n\t" ::"r"(cr3):"memory","eax");
|
"2:\n\t" ::"r"(cr3)
|
||||||
|
: "memory", "eax");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,10 +132,11 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
|
|||||||
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
|
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
|
||||||
|
|
||||||
// Thank to mirroring, we can access the PD
|
// Thank to mirroring, we can access the PD
|
||||||
struct pde *pd =
|
struct pde *pd = (struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) +
|
||||||
(struct pde *)((PD_MIRROR_PAGE_IDX << 22) + (PD_MIRROR_PAGE_IDX << 12));
|
(PD_MIRROR_PAGE_IDX << PT_SHIFT));
|
||||||
|
|
||||||
struct pte *pt = (struct pte *)((PD_MIRROR_PAGE_IDX << 22) + (pdEntry << 12));
|
struct pte *pt =
|
||||||
|
(struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
|
||||||
|
|
||||||
if (!pd[pdEntry].present) {
|
if (!pd[pdEntry].present) {
|
||||||
paddr_t ptPhy = allocPhyPage();
|
paddr_t ptPhy = allocPhyPage();
|
||||||
@ -162,6 +167,7 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
|
|||||||
refPhyPage(paddr);
|
refPhyPage(paddr);
|
||||||
|
|
||||||
__native_flush_tlb_single(vaddr);
|
__native_flush_tlb_single(vaddr);
|
||||||
|
mappedPage++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,11 +177,11 @@ int pageUnmap(vaddr_t vaddr)
|
|||||||
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
|
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
|
||||||
|
|
||||||
// Thank to mirroring, we can access the PD
|
// Thank to mirroring, we can access the PD
|
||||||
struct pde *pd =
|
struct pde *pd = (struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) +
|
||||||
(struct pde *)(PD_MIRROR_PAGE_IDX * (1U << 22) + PD_MIRROR_PAGE_IDX * (1U << 12));
|
(PD_MIRROR_PAGE_IDX << PT_SHIFT));
|
||||||
|
|
||||||
struct pte *pt =
|
struct pte *pt =
|
||||||
(struct pte *)(PD_MIRROR_PAGE_IDX * (1U << 22) + pdEntry * (1U << 12));
|
(struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
|
||||||
if (!pd[pdEntry].present)
|
if (!pd[pdEntry].present)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (!pt[ptEntry].present)
|
if (!pt[ptEntry].present)
|
||||||
@ -190,6 +196,11 @@ int pageUnmap(vaddr_t vaddr)
|
|||||||
__native_flush_tlb_single((vaddr_t)pt);
|
__native_flush_tlb_single((vaddr_t)pt);
|
||||||
}
|
}
|
||||||
__native_flush_tlb_single(vaddr);
|
__native_flush_tlb_single(vaddr);
|
||||||
|
mappedPage--;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long getNbMappedPage(void)
|
||||||
|
{
|
||||||
|
return mappedPage;
|
||||||
}
|
}
|
||||||
|
@ -9,3 +9,4 @@ int pagingSetup(paddr_t upperKernelAddr);
|
|||||||
|
|
||||||
int pageMap(vaddr_t vaddr, paddr_t paddr, int flags);
|
int pageMap(vaddr_t vaddr, paddr_t paddr, int flags);
|
||||||
int pageUnmap(vaddr_t vaddr);
|
int pageUnmap(vaddr_t vaddr);
|
||||||
|
unsigned long getNbMappedPage(void);
|
||||||
|
Loading…
Reference in New Issue
Block a user