context: add copy kernel context

fix also some indentation
This commit is contained in:
Mathieu Maret 2021-10-28 19:18:03 +02:00
parent ca399ee782
commit f1039b7fe4
3 changed files with 72 additions and 29 deletions

View File

@ -2,8 +2,8 @@
#include "alloc.h" #include "alloc.h"
#include "allocArea.h" #include "allocArea.h"
#include "errno.h" #include "errno.h"
#include "klibc.h"
#include "irq.h" #include "irq.h"
#include "klibc.h"
#include "list.h" #include "list.h"
#include "paging.h" #include "paging.h"
#include "stdarg.h" #include "stdarg.h"
@ -70,7 +70,13 @@ struct mmu_context *mmuContextCreate()
ctx->ref = 1; ctx->ref = 1;
//TODO copy kernel space if (pagingCopyKernelSpace(ctx->vaddr_PD, ctx->paddr_PD, currentContext->vaddr_PD)) {
pr_err("Fail to copy Kernel space\n");
free(ctx);
return NULL;
}
disable_IRQs(flags); disable_IRQs(flags);
list_add_tail(listContext, ctx); list_add_tail(listContext, ctx);
restore_IRQs(flags); restore_IRQs(flags);
@ -78,7 +84,7 @@ struct mmu_context *mmuContextCreate()
return ctx; return ctx;
} }
int mmuContextSyncKernelPDE(int pdEntry, void * pde, size_t pdeSize) int mmuContextSyncKernelPDE(int pdEntry, void *pde, size_t pdeSize)
{ {
uint32_t flags; uint32_t flags;
struct mmu_context *destContext; struct mmu_context *destContext;
@ -91,8 +97,8 @@ int mmuContextSyncKernelPDE(int pdEntry, void * pde, size_t pdeSize)
assert(destContext->ref > 0); assert(destContext->ref > 0);
dest_pd = (void *)destContext->vaddr_PD; dest_pd = (void *)destContext->vaddr_PD;
memcpy(dest_pd + pdEntry *pdeSize, pde, pdeSize); memcpy(dest_pd + pdEntry * pdeSize, pde, pdeSize);
} }
restore_IRQs(flags); restore_IRQs(flags);

View File

@ -133,17 +133,15 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
uint pdEntry = vaddr >> (PD_SHIFT); uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
if ((vaddr >= PAGING_MIRROR_VADDR) && if ((vaddr >= PAGING_MIRROR_VADDR) && (vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE))
(vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE))
return -EINVAL; return -EINVAL;
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd =
(struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT)); (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE * (PAGING_MIRROR_VADDR >> PD_SHIFT));
struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE));
if (!pd[pdEntry].present) { if (!pd[pdEntry].present) {
paddr_t ptPhy = allocPhyPage(1); paddr_t ptPhy = allocPhyPage(1);
if (ptPhy == (vaddr_t)NULL) if (ptPhy == (vaddr_t)NULL)
@ -153,12 +151,12 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
pd[pdEntry].write = 1; pd[pdEntry].write = 1;
pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT); pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT);
if(vaddr < PAGING_BASE_USER_ADDRESS){ if (vaddr < PAGING_BASE_USER_ADDRESS) {
pd[pdEntry].user = 0; pd[pdEntry].user = 0;
mmuContextSyncKernelPDE(pdEntry, &pd[pdEntry], sizeof(struct pde)); mmuContextSyncKernelPDE(pdEntry, &pd[pdEntry], sizeof(struct pde));
}else{ } else {
assert(flags & PAGING_MEM_USER); assert(flags & PAGING_MEM_USER);
pd[pdEntry].user = 1; pd[pdEntry].user = 1;
} }
__native_flush_tlb_single((vaddr_t)pt); __native_flush_tlb_single((vaddr_t)pt);
@ -190,13 +188,12 @@ int pageUnmap(vaddr_t vaddr)
uint pdEntry = vaddr >> (PD_SHIFT); uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
if ((vaddr >= PAGING_MIRROR_VADDR) && if ((vaddr >= PAGING_MIRROR_VADDR) && (vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE))
(vaddr < PAGING_MIRROR_VADDR + PAGING_MIRROR_SIZE))
return -EINVAL; return -EINVAL;
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd =
(struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT)); (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE * (PAGING_MIRROR_VADDR >> PD_SHIFT));
struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE));
if (!pd[pdEntry].present) if (!pd[pdEntry].present)
@ -230,7 +227,7 @@ paddr_t pagingGetPaddr(vaddr_t vaddr)
// Thank to mirroring, we can access the PD // Thank to mirroring, we can access the PD
struct pde *pd = struct pde *pd =
(struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE*(PAGING_MIRROR_VADDR>>PD_SHIFT)); (struct pde *)(PAGING_MIRROR_VADDR + PAGE_SIZE * (PAGING_MIRROR_VADDR >> PD_SHIFT));
struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE)); struct pte *pt = (struct pte *)((PAGING_MIRROR_VADDR) + (pdEntry * PAGE_SIZE));
@ -250,25 +247,64 @@ unsigned long getNbMappedPage(void)
paddr_t pagingGetCurrentPDPaddr() paddr_t pagingGetCurrentPDPaddr()
{ {
struct pdbr pdbr; struct pdbr pdbr;
asm volatile("movl %%cr3, %0\n": "=r"(pdbr)); asm volatile("movl %%cr3, %0\n" : "=r"(pdbr));
return (pdbr.pd_paddr << 12); return (pdbr.pd_paddr << 12);
} }
int pagingSetCurrentPDPaddr(paddr_t paddrPD) int pagingSetCurrentPDPaddr(paddr_t paddrPD)
{ {
struct pdbr pdbr; struct pdbr pdbr;
assert(paddrPD != 0); assert(paddrPD != 0);
assert(IS_ALIGNED(paddrPD, PAGE_SIZE)); assert(IS_ALIGNED(paddrPD, PAGE_SIZE));
/* Setup the value of the PDBR */ /* Setup the value of the PDBR */
memset(& pdbr, 0x0, sizeof(struct pdbr)); /* Reset the PDBR */ memset(&pdbr, 0x0, sizeof(struct pdbr)); /* Reset the PDBR */
pdbr.pd_paddr = (paddrPD >> 12); pdbr.pd_paddr = (paddrPD >> 12);
/* Configure the MMU according to the PDBR */ /* Configure the MMU according to the PDBR */
asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)); asm volatile("movl %0,%%cr3\n" ::"r"(pdbr));
return 0; return 0;
}
int pagingCopyKernelSpace(vaddr_t destVaddrPD, paddr_t destPaddrPD, vaddr_t srcVaddrPD)
{
struct pde *src_pd = (struct pde *)srcVaddrPD;
struct pde *dest_pd = (struct pde *)destVaddrPD;
struct pde mirror_pde;
uint index_in_pd;
/* Fill destination PD with zeros */
memset((void *)destVaddrPD, 0x0, PAGE_SIZE);
/* Synchronize it with the master Kernel MMU context. Stop just
before the mirroring ! */
for (index_in_pd = 0; index_in_pd < (PAGING_MIRROR_VADDR >> 22); /* 1 PDE = 1 PT
= 1024 Pages
= 4MB */
index_in_pd++) {
/* Copy the master's configuration */
dest_pd[index_in_pd] = src_pd[index_in_pd];
/* We DON'T mark the underlying PT and pages as referenced
because all the PD are equivalent in the kernel space: as
soon as a page is mapped in the kernel, it is mapped by X
address spaces, and as soon as it is unmapped by 1 address
space, it is unmapped in all the others. So that for X
address spaces, the reference counter will be either 0 or X,
and not something else: using the reference counter correctly
won't be of any use and would consume some time in updating it. */
}
/* Setup the mirroring for the new address space */
mirror_pde.present = TRUE;
mirror_pde.write = 1;
mirror_pde.user = 0; /* This is a KERNEL PDE */
mirror_pde.pt_addr = (destPaddrPD >> 12);
dest_pd[PAGING_MIRROR_VADDR >> 22] = mirror_pde;
return 0;
} }

View File

@ -27,3 +27,4 @@ unsigned long getNbMappedPage(void);
int pagingSetCurrentPDPaddr(paddr_t paddrPD); int pagingSetCurrentPDPaddr(paddr_t paddrPD);
paddr_t pagingGetPaddr(vaddr_t vaddr); paddr_t pagingGetPaddr(vaddr_t vaddr);
paddr_t pagingGetCurrentPDPaddr(); paddr_t pagingGetCurrentPDPaddr();
int pagingCopyKernelSpace(vaddr_t destVaddrPD, paddr_t destPaddrPD, vaddr_t srcVaddrPD);