173 lines
3.3 KiB
C
173 lines
3.3 KiB
C
#include "alloc.h"
|
|
#include "allocArea.h"
|
|
#include "errno.h"
|
|
#include "irq.h"
|
|
#include "klibc.h"
|
|
#include "list.h"
|
|
#include "mem.h"
|
|
#include "mmuContext.h"
|
|
#include "paging.h"
|
|
#include "stdarg.h"
|
|
#include "types.h"
|
|
|
|
struct mmu_context {
|
|
paddr_t paddr_PD;
|
|
vaddr_t vaddr_PD;
|
|
uint32_t ref;
|
|
|
|
struct mmu_context *next, *prev;
|
|
};
|
|
|
|
static struct mmu_context *listContext = NULL;
|
|
static struct mmu_context *currentContext = NULL;
|
|
|
|
struct mmu_context * mmuContextGetCurrent(){
|
|
return currentContext;
|
|
}
|
|
|
|
int mmuContextSetup()
|
|
{
|
|
struct mmu_context *initialCtx;
|
|
int ret = 0;
|
|
|
|
allocBookSlab(sizeof(struct mmu_context), PAGE_SIZE * 3, 0, 0);
|
|
|
|
initialCtx = malloc(sizeof(struct mmu_context));
|
|
|
|
if (initialCtx == NULL)
|
|
return -ENOMEM;
|
|
|
|
initialCtx->paddr_PD = pagingGetCurrentPDPaddr();
|
|
initialCtx->vaddr_PD = areaAlloc(1, 0);
|
|
|
|
ret = pageMap(initialCtx->vaddr_PD, initialCtx->paddr_PD,
|
|
PAGING_MEM_WRITE | PAGING_MEM_READ);
|
|
|
|
if (ret){
|
|
free(initialCtx);
|
|
return ret;
|
|
}
|
|
|
|
list_singleton(listContext, initialCtx);
|
|
currentContext = initialCtx;
|
|
|
|
// We create the context and we are using it
|
|
initialCtx->ref = 2;
|
|
return 0;
|
|
}
|
|
|
|
struct mmu_context *mmuContextCreate()
|
|
{
|
|
struct mmu_context *ctx;
|
|
uint32_t flags;
|
|
|
|
ctx = malloc(sizeof(struct mmu_context));
|
|
|
|
if (ctx == NULL)
|
|
return NULL;
|
|
|
|
ctx->vaddr_PD = areaAlloc(1, AREA_PHY_MAP);
|
|
|
|
if (ctx->vaddr_PD == (vaddr_t)NULL) {
|
|
pr_info("Fail to allocate MMU Context\n");
|
|
free(ctx);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
ctx->paddr_PD = pagingGetPaddr(ctx->vaddr_PD);
|
|
|
|
ctx->ref = 1;
|
|
|
|
if (pagingCopyKernelSpace(ctx->vaddr_PD, ctx->paddr_PD, currentContext->vaddr_PD)) {
|
|
pr_err("Fail to copy Kernel space\n");
|
|
free(ctx);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
disable_IRQs(flags);
|
|
list_add_tail(listContext, ctx);
|
|
restore_IRQs(flags);
|
|
|
|
return ctx;
|
|
}
|
|
|
|
int mmuContextRef(struct mmu_context *ctx)
|
|
{
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
|
|
// ref == 0 => suppression
|
|
assert(ctx->ref > 0);
|
|
ctx->ref++;
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmuContextUnref(struct mmu_context *ctx)
|
|
{
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
|
|
assert(ctx->ref > 0);
|
|
ctx->ref--;
|
|
|
|
if (ctx->ref == 0) {
|
|
list_delete(listContext, ctx);
|
|
pagingClearUserContext(ctx->vaddr_PD);
|
|
areaFree(ctx->vaddr_PD);
|
|
free(ctx);
|
|
}
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmuContextSwitch(struct mmu_context *ctx)
|
|
{
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
assert(ctx->ref > 0);
|
|
assert(currentContext->ref > 0);
|
|
|
|
if (ctx != currentContext) {
|
|
struct mmu_context *prev = currentContext;
|
|
|
|
ctx->ref++;
|
|
currentContext = ctx;
|
|
pagingSetCurrentPDPaddr(ctx->paddr_PD);
|
|
mmuContextUnref(prev);
|
|
}
|
|
|
|
restore_IRQs(flags);
|
|
return 0;
|
|
}
|
|
|
|
int mmuContextSyncKernelPDE(int pdEntry, void *pde, size_t pdeSize)
|
|
{
|
|
uint32_t flags;
|
|
struct mmu_context *destContext;
|
|
int nbContexts;
|
|
|
|
disable_IRQs(flags);
|
|
list_foreach_forward(listContext, destContext, nbContexts)
|
|
{
|
|
vaddr_t dest_pd;
|
|
|
|
assert(destContext->ref > 0);
|
|
|
|
dest_pd = destContext->vaddr_PD;
|
|
memcpy((void *)(dest_pd + pdEntry * pdeSize), pde, pdeSize);
|
|
}
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
}
|