diff --git a/arch/x86/mmuContext.c b/arch/x86/mmuContext.c index 278bfb9..ae778d2 100644 --- a/arch/x86/mmuContext.c +++ b/arch/x86/mmuContext.c @@ -2,8 +2,8 @@ #include "alloc.h" #include "allocArea.h" #include "errno.h" -#include "klibc.h" #include "irq.h" +#include "klibc.h" #include "list.h" #include "paging.h" #include "stdarg.h" @@ -70,7 +70,13 @@ struct mmu_context *mmuContextCreate() ctx->ref = 1; - //TODO copy kernel space + if (pagingCopyKernelSpace(ctx->vaddr_PD, ctx->paddr_PD, currentContext->vaddr_PD)) { + pr_err("Fail to copy Kernel space\n"); + free(ctx); + + return NULL; + } + disable_IRQs(flags); list_add_tail(listContext, ctx); restore_IRQs(flags); @@ -78,7 +84,7 @@ struct mmu_context *mmuContextCreate() return ctx; } -int mmuContextSyncKernelPDE(int pdEntry, void * pde, size_t pdeSize) +int mmuContextSyncKernelPDE(int pdEntry, void *pde, size_t pdeSize) { uint32_t flags; struct mmu_context *destContext; @@ -91,8 +97,8 @@ int mmuContextSyncKernelPDE(int pdEntry, void * pde, size_t pdeSize) assert(destContext->ref > 0); - dest_pd = (void *)destContext->vaddr_PD; - memcpy(dest_pd + pdEntry *pdeSize, pde, pdeSize); + dest_pd = (void *)destContext->vaddr_PD; + memcpy(dest_pd + pdEntry * pdeSize, pde, pdeSize); } restore_IRQs(flags); diff --git a/arch/x86/paging.c b/arch/x86/paging.c index 666e0c3..19bc60b 100644 --- a/arch/x86/paging.c +++ b/arch/x86/paging.c @@ -133,17 +133,15 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags) uint pdEntry = vaddr >> (PD_SHIFT); uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; - if ((vaddr >= PAGING_MIRROR_VADDR) && - (vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE)) + if ((vaddr >= PAGING_MIRROR_VADDR) && (vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE)) return -EINVAL; // Thank to mirroring, we can access the PD struct pde *pd = - (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT)); + (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE * (PAGING_MIRROR_VADDR >> PD_SHIFT)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE)); - if (!pd[pdEntry].present) { paddr_t ptPhy = allocPhyPage(1); if (ptPhy == (vaddr_t)NULL) @@ -153,12 +151,12 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags) pd[pdEntry].write = 1; pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT); - if(vaddr < PAGING_BASE_USER_ADDRESS){ - pd[pdEntry].user = 0; + if (vaddr < PAGING_BASE_USER_ADDRESS) { + pd[pdEntry].user = 0; mmuContextSyncKernelPDE(pdEntry, &pd[pdEntry], sizeof(struct pde)); - }else{ + } else { assert(flags & PAGING_MEM_USER); - pd[pdEntry].user = 1; + pd[pdEntry].user = 1; } __native_flush_tlb_single((vaddr_t)pt); @@ -190,13 +188,12 @@ int pageUnmap(vaddr_t vaddr) uint pdEntry = vaddr >> (PD_SHIFT); uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; - if ((vaddr >= PAGING_MIRROR_VADDR) && - (vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE)) + if ((vaddr >= PAGING_MIRROR_VADDR) && (vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE)) return -EINVAL; // Thank to mirroring, we can access the PD struct pde *pd = - (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT)); + (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE * (PAGING_MIRROR_VADDR >> PD_SHIFT)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE)); if (!pd[pdEntry].present) @@ -230,7 +227,7 @@ paddr_t pagingGetPaddr(vaddr_t vaddr) // Thank to mirroring, we can access the PD struct pde *pd = - (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT)); + (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE * (PAGING_MIRROR_VADDR >> PD_SHIFT)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE)); @@ -250,25 +247,64 @@ unsigned long getNbMappedPage(void) paddr_t pagingGetCurrentPDPaddr() { - struct pdbr pdbr; - asm volatile("movl %%cr3, %0\n": "=r"(pdbr)); - return (pdbr.pd_paddr << 12); + struct pdbr pdbr; + asm volatile("movl %%cr3, %0\n" : "=r"(pdbr)); + return (pdbr.pd_paddr << 12); } int pagingSetCurrentPDPaddr(paddr_t paddrPD) { - struct pdbr pdbr; + struct pdbr pdbr; - assert(paddrPD != 0); - assert(IS_ALIGNED(paddrPD, PAGE_SIZE)); + assert(paddrPD != 0); + assert(IS_ALIGNED(paddrPD, PAGE_SIZE)); - /* Setup the value of the PDBR */ - memset(& pdbr, 0x0, sizeof(struct pdbr)); /* Reset the PDBR */ - pdbr.pd_paddr = (paddrPD >> 12); + /* Setup the value of the PDBR */ + memset(&pdbr, 0x0, sizeof(struct pdbr)); /* Reset the PDBR */ + pdbr.pd_paddr = (paddrPD >> 12); - /* Configure the MMU according to the PDBR */ - asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)); + /* Configure the MMU according to the PDBR */ + asm volatile("movl %0,%%cr3\n" ::"r"(pdbr)); - return 0; + return 0; +} + +int pagingCopyKernelSpace(vaddr_t destVaddrPD, paddr_t destPaddrPD, vaddr_t srcVaddrPD) +{ + struct pde *src_pd = (struct pde *)srcVaddrPD; + struct pde *dest_pd = (struct pde *)destVaddrPD; + struct pde mirror_pde; + uint index_in_pd; + + /* Fill destination PD with zeros */ + memset((void *)destVaddrPD, 0x0, PAGE_SIZE); + + /* Synchronize it with the master Kernel MMU context. Stop just + before the mirroring ! */ + for (index_in_pd = 0; index_in_pd < (PAGING_MIRROR_VADDR >> 22); /* 1 PDE = 1 PT + = 1024 Pages + = 4MB */ + index_in_pd++) { + /* Copy the master's configuration */ + dest_pd[index_in_pd] = src_pd[index_in_pd]; + + /* We DON'T mark the underlying PT and pages as referenced + because all the PD are equivalent in the kernel space: as + soon as a page is mapped in the kernel, it is mapped by X + address spaces, and as soon as it is unmapped by 1 address + space, it is unmapped in all the others. So that for X + address spaces, the reference counter will be either 0 or X, + and not something else: using the reference counter correctly + won't be of any use and would consume some time in updating it. */ + } + + /* Setup the mirroring for the new address space */ + mirror_pde.present = TRUE; + mirror_pde.write = 1; + mirror_pde.user = 0; /* This is a KERNEL PDE */ + mirror_pde.pt_addr = (destPaddrPD >> 12); + dest_pd[PAGING_MIRROR_VADDR >> 22] = mirror_pde; + + return 0; } diff --git a/arch/x86/paging.h b/arch/x86/paging.h index 717a046..b676b39 100644 --- a/arch/x86/paging.h +++ b/arch/x86/paging.h @@ -27,3 +27,4 @@ unsigned long getNbMappedPage(void); int pagingSetCurrentPDPaddr(paddr_t paddrPD); paddr_t pagingGetPaddr(vaddr_t vaddr); paddr_t pagingGetCurrentPDPaddr(); +int pagingCopyKernelSpace(vaddr_t destVaddrPD, paddr_t destPaddrPD, vaddr_t srcVaddrPD);