2021-10-26 22:52:24 +02:00
|
|
|
#include "alloc.h"
|
|
|
|
#include "allocArea.h"
|
|
|
|
#include "errno.h"
|
|
|
|
#include "irq.h"
|
2021-10-28 19:18:03 +02:00
|
|
|
#include "klibc.h"
|
2021-10-26 22:52:24 +02:00
|
|
|
#include "list.h"
|
2021-10-30 15:30:19 +02:00
|
|
|
#include "mem.h"
|
|
|
|
#include "mmuContext.h"
|
2021-10-26 22:52:24 +02:00
|
|
|
#include "paging.h"
|
|
|
|
#include "stdarg.h"
|
|
|
|
#include "types.h"
|
|
|
|
|
|
|
|
struct mmu_context {
|
|
|
|
paddr_t paddr_PD;
|
|
|
|
vaddr_t vaddr_PD;
|
|
|
|
uint32_t ref;
|
|
|
|
|
|
|
|
struct mmu_context *next, *prev;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct mmu_context *listContext = NULL;
|
|
|
|
static struct mmu_context *currentContext = NULL;
|
|
|
|
|
2021-10-30 00:28:31 +02:00
|
|
|
struct mmu_context * mmuContextGetCurrent(){
|
|
|
|
return currentContext;
|
|
|
|
}
|
|
|
|
|
2021-10-26 22:52:24 +02:00
|
|
|
int mmuContextSetup()
|
|
|
|
{
|
|
|
|
struct mmu_context *initialCtx;
|
|
|
|
int ret = 0;
|
|
|
|
|
2021-10-30 15:30:19 +02:00
|
|
|
allocBookSlab(sizeof(struct mmu_context), PAGE_SIZE * 3, 0, 0);
|
|
|
|
|
2021-10-26 22:52:24 +02:00
|
|
|
initialCtx = malloc(sizeof(struct mmu_context));
|
|
|
|
|
|
|
|
if (initialCtx == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
initialCtx->paddr_PD = pagingGetCurrentPDPaddr();
|
|
|
|
initialCtx->vaddr_PD = areaAlloc(1, 0);
|
|
|
|
|
|
|
|
ret = pageMap(initialCtx->vaddr_PD, initialCtx->paddr_PD,
|
|
|
|
PAGING_MEM_WRITE | PAGING_MEM_READ);
|
|
|
|
|
2021-11-12 10:19:43 +01:00
|
|
|
if (ret){
|
|
|
|
free(initialCtx);
|
2021-10-26 22:52:24 +02:00
|
|
|
return ret;
|
2021-11-12 10:19:43 +01:00
|
|
|
}
|
2021-10-26 22:52:24 +02:00
|
|
|
|
|
|
|
list_singleton(listContext, initialCtx);
|
|
|
|
currentContext = initialCtx;
|
|
|
|
|
|
|
|
// We create the context and we are using it
|
|
|
|
initialCtx->ref = 2;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mmu_context *mmuContextCreate()
|
|
|
|
{
|
|
|
|
struct mmu_context *ctx;
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
ctx = malloc(sizeof(struct mmu_context));
|
|
|
|
|
|
|
|
if (ctx == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ctx->vaddr_PD = areaAlloc(1, AREA_PHY_MAP);
|
|
|
|
|
|
|
|
if (ctx->vaddr_PD == (vaddr_t)NULL) {
|
|
|
|
pr_info("Fail to allocate MMU Context\n");
|
|
|
|
free(ctx);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->paddr_PD = pagingGetPaddr(ctx->vaddr_PD);
|
|
|
|
|
|
|
|
ctx->ref = 1;
|
|
|
|
|
2021-10-28 19:18:03 +02:00
|
|
|
if (pagingCopyKernelSpace(ctx->vaddr_PD, ctx->paddr_PD, currentContext->vaddr_PD)) {
|
|
|
|
pr_err("Fail to copy Kernel space\n");
|
|
|
|
free(ctx);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-10-26 22:52:24 +02:00
|
|
|
disable_IRQs(flags);
|
|
|
|
list_add_tail(listContext, ctx);
|
|
|
|
restore_IRQs(flags);
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
2021-10-28 00:41:02 +02:00
|
|
|
|
2021-10-30 00:28:31 +02:00
|
|
|
int mmuContextRef(struct mmu_context *ctx)
|
|
|
|
{
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
disable_IRQs(flags);
|
|
|
|
|
|
|
|
// ref == 0 => suppression
|
|
|
|
assert(ctx->ref > 0);
|
|
|
|
ctx->ref++;
|
|
|
|
|
|
|
|
restore_IRQs(flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmuContextUnref(struct mmu_context *ctx)
|
|
|
|
{
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
disable_IRQs(flags);
|
|
|
|
|
|
|
|
assert(ctx->ref > 0);
|
|
|
|
ctx->ref--;
|
|
|
|
|
|
|
|
if (ctx->ref == 0) {
|
|
|
|
list_delete(listContext, ctx);
|
|
|
|
pagingClearUserContext(ctx->vaddr_PD);
|
|
|
|
areaFree(ctx->vaddr_PD);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
restore_IRQs(flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmuContextSwitch(struct mmu_context *ctx)
|
|
|
|
{
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
disable_IRQs(flags);
|
|
|
|
assert(ctx->ref > 0);
|
|
|
|
assert(currentContext->ref > 0);
|
|
|
|
|
|
|
|
if (ctx != currentContext) {
|
|
|
|
struct mmu_context *prev = currentContext;
|
|
|
|
|
|
|
|
ctx->ref++;
|
|
|
|
currentContext = ctx;
|
|
|
|
pagingSetCurrentPDPaddr(ctx->paddr_PD);
|
|
|
|
mmuContextUnref(prev);
|
|
|
|
}
|
|
|
|
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-28 19:18:03 +02:00
|
|
|
int mmuContextSyncKernelPDE(int pdEntry, void *pde, size_t pdeSize)
|
2021-10-28 00:41:02 +02:00
|
|
|
{
|
|
|
|
uint32_t flags;
|
|
|
|
struct mmu_context *destContext;
|
|
|
|
int nbContexts;
|
|
|
|
|
|
|
|
disable_IRQs(flags);
|
|
|
|
list_foreach_forward(listContext, destContext, nbContexts)
|
|
|
|
{
|
2021-11-13 08:31:05 +01:00
|
|
|
vaddr_t dest_pd;
|
2021-10-28 00:41:02 +02:00
|
|
|
|
|
|
|
assert(destContext->ref > 0);
|
|
|
|
|
2021-11-13 08:31:05 +01:00
|
|
|
dest_pd = destContext->vaddr_PD;
|
|
|
|
memcpy((void *)(dest_pd + pdEntry * pdeSize), pde, pdeSize);
|
2021-10-28 00:41:02 +02:00
|
|
|
}
|
|
|
|
restore_IRQs(flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|