Sync PDs on modifications

This commit is contained in:
Mathieu Maret 2021-10-28 00:41:02 +02:00
parent 7b9ceba6b2
commit 07d173a9c1
4 changed files with 90 additions and 12 deletions

View File

@ -77,3 +77,24 @@ struct mmu_context *mmuContextCreate()
return ctx; return ctx;
} }
int mmuContextSyncKernelPDE(int pdEntry, void * pde, size_t pdeSize)
{
uint32_t flags;
struct mmu_context *destContext;
int nbContexts;
disable_IRQs(flags);
list_foreach_forward(listContext, destContext, nbContexts)
{
void *dest_pd;
assert(destContext->ref > 0);
dest_pd = (void *)destContext->vaddr_PD;
memcpy(dest_pd + pdEntry *pdeSize, pde, pdeSize);
}
restore_IRQs(flags);
return 0;
}

View File

@ -1,7 +1,9 @@
#include "paging.h" #include "paging.h"
#include "errno.h" #include "errno.h"
#include "kernel.h"
#include "klibc.h" #include "klibc.h"
#include "mem.h" #include "mem.h"
#include "mmuContext.h"
#include "stdarg.h" #include "stdarg.h"
// In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry // In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry
@ -23,6 +25,20 @@
#define PD_SHIFT 22 #define PD_SHIFT 22
#define PD_MIRROR_PAGE_IDX 1023U #define PD_MIRROR_PAGE_IDX 1023U
/** Frontier between kernel and user space virtual addresses */
#define PAGING_BASE_USER_ADDRESS (0x40000000) /* 1GB (must be 4MB-aligned) */
#define PAGING_TOP_USER_ADDRESS (0xFFFFFFFF) /* 4GB - 1B */
#define PAGING_USER_SPACE_SIZE (0xc0000000) /* 3GB */
/** Length of the space reserved for the mirroring in the kernel
virtual space */
#define PAGING_MIRROR_SIZE (PAGE_SIZE << 10) /* 1 PD = 1024 Page Tables = 4MB */
/** Virtual address where the mirroring takes place */
#define PAGING_MIRROR_VADDR \
(PAGING_BASE_USER_ADDRESS - PAGING_MIRROR_SIZE)
static unsigned long mappedPage = 0; static unsigned long mappedPage = 0;
struct pde { struct pde {
@ -107,9 +123,10 @@ int pagingSetup(paddr_t lowerKernelAddr, paddr_t upperKernelAddr)
} }
// Setup mirroring // Setup mirroring
pd[PD_MIRROR_PAGE_IDX].present = 1; pd[PAGING_MIRROR_VADDR >> PD_SHIFT].present = 1;
pd[PD_MIRROR_PAGE_IDX].write = 1; pd[PAGING_MIRROR_VADDR >> PD_SHIFT].write = 1;
pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT); pd[PAGING_MIRROR_VADDR >> PD_SHIFT].pt_addr = ((paddr_t)pd >> PT_SHIFT);
pd[PAGING_MIRROR_VADDR >> PD_SHIFT].user = 0;
// Loading of the PDBR in the MMU: // Loading of the PDBR in the MMU:
asm volatile("movl %0,%%cr3\n\t" asm volatile("movl %0,%%cr3\n\t"
@ -130,22 +147,34 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
uint pdEntry = vaddr >> (PD_SHIFT); uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
if ((vaddr >= PAGING_MIRROR_VADDR) &&
(vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE))
return -EINVAL;
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd =
(struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (PD_MIRROR_PAGE_IDX << PT_SHIFT)); (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT));
struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE));
struct pte *pt = (struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
if (!pd[pdEntry].present) { if (!pd[pdEntry].present) {
paddr_t ptPhy = allocPhyPage(1); paddr_t ptPhy = allocPhyPage(1);
if (ptPhy == (vaddr_t)NULL) if (ptPhy == (vaddr_t)NULL)
return -ENOMEM; return -ENOMEM;
pd[pdEntry].user = (flags & PAGING_MEM_USER) ? 1 : 0;
pd[pdEntry].present = 1; pd[pdEntry].present = 1;
pd[pdEntry].write = 1; pd[pdEntry].write = 1;
pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT); pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT);
if(vaddr < PAGING_BASE_USER_ADDRESS){
pd[pdEntry].user = 0;
mmuContextSyncKernelPDE(pdEntry, &pd[pdEntry], sizeof(struct pde));
}else{
assert(flags & PAGING_MEM_USER);
pd[pdEntry].user = 1;
}
__native_flush_tlb_single((vaddr_t)pt); __native_flush_tlb_single((vaddr_t)pt);
memset((void *)pt, 0, PAGE_SIZE); memset((void *)pt, 0, PAGE_SIZE);
} else { } else {
@ -175,11 +204,15 @@ int pageUnmap(vaddr_t vaddr)
uint pdEntry = vaddr >> (PD_SHIFT); uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
if ((vaddr >= PAGING_MIRROR_VADDR) &&
(vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE))
return -EINVAL;
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd =
(struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (PD_MIRROR_PAGE_IDX << PT_SHIFT)); (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT));
struct pte *pt = (struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE));
if (!pd[pdEntry].present) if (!pd[pdEntry].present)
return -EINVAL; return -EINVAL;
if (!pt[ptEntry].present) if (!pt[ptEntry].present)
@ -191,6 +224,9 @@ int pageUnmap(vaddr_t vaddr)
// PTE not used. Decrease refcount on it. Is PT not used anymore ? // PTE not used. Decrease refcount on it. Is PT not used anymore ?
if (unrefPhyPage(pd[pdEntry].pt_addr << PT_SHIFT) == 0) { if (unrefPhyPage(pd[pdEntry].pt_addr << PT_SHIFT) == 0) {
pd[pdEntry].present = 0; pd[pdEntry].present = 0;
if (vaddr < PAGING_BASE_USER_ADDRESS) {
mmuContextSyncKernelPDE(pdEntry, &pd[pdEntry], sizeof(struct pde));
}
__native_flush_tlb_single((vaddr_t)pt); __native_flush_tlb_single((vaddr_t)pt);
} }
__native_flush_tlb_single(vaddr); __native_flush_tlb_single(vaddr);
@ -206,12 +242,11 @@ paddr_t pagingGetPaddr(vaddr_t vaddr)
unsigned ptEntry = vaddr >> PT_SHIFT; unsigned ptEntry = vaddr >> PT_SHIFT;
unsigned pageOffset = vaddr & PAGE_MASK; unsigned pageOffset = vaddr & PAGE_MASK;
/* Get the PD of the current context */ // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd =
(struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (PD_MIRROR_PAGE_IDX << PT_SHIFT)); (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT));
/* Address of the PT in the mirroring */ struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE));
struct pte *pt = (struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
/* No page mapped at this address ? */ /* No page mapped at this address ? */
if (!pd[pdEntry].present) if (!pd[pdEntry].present)
@ -233,3 +268,21 @@ paddr_t pagingGetCurrentPDPaddr()
asm volatile("movl %%cr3, %0\n": "=r"(pdbr)); asm volatile("movl %%cr3, %0\n": "=r"(pdbr));
return (pdbr.pd_paddr << 12); return (pdbr.pd_paddr << 12);
} }
int pagingSetCurrentPDPaddr(paddr_t paddrPD)
{
struct pdbr pdbr;
assert(paddrPD != 0);
assert(IS_ALIGNED(paddrPD, PAGE_SIZE));
/* Setup the value of the PDBR */
memset(& pdbr, 0x0, sizeof(struct pdbr)); /* Reset the PDBR */
pdbr.pd_paddr = (paddrPD >> 12);
/* Configure the MMU according to the PDBR */
asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr));
return 0;
}

View File

@ -11,5 +11,6 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags);
int pageUnmap(vaddr_t vaddr); int pageUnmap(vaddr_t vaddr);
unsigned long getNbMappedPage(void); unsigned long getNbMappedPage(void);
int pagingSetCurrentPDPaddr(paddr_t paddrPD);
paddr_t pagingGetPaddr(vaddr_t vaddr); paddr_t pagingGetPaddr(vaddr_t vaddr);
paddr_t pagingGetCurrentPDPaddr(); paddr_t pagingGetCurrentPDPaddr();

View File

@ -1,5 +1,8 @@
#pragma once #pragma once
#include "stdarg.h"
struct mmu_context; struct mmu_context;
int mmuContextSetup(); int mmuContextSetup();
struct mmu_context *mmuContextCreate(); struct mmu_context *mmuContextCreate();
int mmuContextSyncKernelPDE(int pdEntry, void *pde, size_t pdeSize);