sos-code-article7/hwcore/mm_context.c
2017-01-29 14:33:48 +01:00

298 lines
7.5 KiB
C

/* Copyright (C) 2005 David Decotigny
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
*/
#include <hwcore/paging.h>
#include <hwcore/irq.h>
#include <sos/assert.h>
#include <sos/list.h>
#include <sos/klibc.h>
#include <sos/physmem.h>
#include <sos/kmem_slab.h>
#include <sos/kmem_vmm.h>
#include "mm_context.h"
/**
* Definition of an MMU context.
*/
struct sos_mm_context
{
/** Physical address of the PD for this MMU context */
sos_paddr_t paddr_PD;
/** Virtual address where it is mapped into the Kernel space */
sos_vaddr_t vaddr_PD;
/** Reference counter for this mm_context */
sos_ui32_t ref_cnt;
/** List of MMU contexts in the system */
struct sos_mm_context *prev, *next;
};
/**
* The cache of mm_context structures
*/
struct sos_kslab_cache * cache_struct_mm_context;
/**
* The current MMU context corresponding to the current configuration
* of the MMU.
*/
static struct sos_mm_context *current_mm_context = NULL;
/**
* System-wide list of all the mm_contexts in the system
*/
static struct sos_mm_context *list_mm_context = NULL;
/* The "= NULL" here is FUNDAMENTAL, because paging.c must work
correctly, ie synch_PDE below must behave reasonably (eg do
nothing), until the mm_context subsystem has been initialized. */
sos_ret_t sos_mm_context_subsystem_setup()
{
struct sos_mm_context * initial_mm_context;
sos_ret_t retval;
/* Create the new mm_context cache */
cache_struct_mm_context = sos_kmem_cache_create("struct mm_context",
sizeof(struct sos_mm_context),
1, 0,
SOS_KSLAB_CREATE_MAP);
if (NULL == cache_struct_mm_context)
return -SOS_ENOMEM;
/*
* Allocate the initial mm_context structure
*/
initial_mm_context
= (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context,
SOS_KSLAB_ALLOC_ATOMIC);
if (NULL == initial_mm_context)
return -SOS_ENOMEM;
/* Retrieve the address of the current page where the PD lies */
initial_mm_context->paddr_PD = sos_paging_get_current_PD_paddr();
/*
* Map it somewhere in kernel virtual memory
*/
/* Allocate 1 page of kernel Virtual memory */
initial_mm_context->vaddr_PD = sos_kmem_vmm_alloc(1, 0);
if (initial_mm_context->vaddr_PD == 0)
return -SOS_ENOMEM;
/* Map the PD at this virtual address: it will thus be mapped 2
times (1 time for the mirroring, 1 time for mm_context) ! */
retval = sos_paging_map(initial_mm_context->paddr_PD,
initial_mm_context->vaddr_PD,
FALSE,
SOS_VM_MAP_PROT_READ
| SOS_VM_MAP_PROT_WRITE);
if (SOS_OK != retval)
return retval;
/* Initialize the initial list of mm_contexts */
list_singleton(list_mm_context, initial_mm_context);
/* We just created this mm_context: mark it as "referenced" */
initial_mm_context->ref_cnt ++;
/* We are actually already using it ! */
initial_mm_context->ref_cnt ++; /* ie reference it a 2nd time ! */
current_mm_context = initial_mm_context;
return SOS_OK;
}
struct sos_mm_context * sos_mm_context_create(void)
{
sos_ui32_t flags;
struct sos_mm_context *mmctxt;
/*
* Allocate the initial mm_context structure
*/
mmctxt = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context, 0);
if (NULL == mmctxt)
return NULL;
/* Allocate a new page for the new PD and map it into the kernel */
mmctxt->vaddr_PD = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
if (mmctxt->vaddr_PD == 0)
{
sos_kmem_cache_free((sos_vaddr_t) mmctxt);
return NULL;
}
/* Retrieve its physical address */
mmctxt->paddr_PD = sos_paging_get_paddr(mmctxt->vaddr_PD);
if (mmctxt->paddr_PD == 0)
{
sos_kmem_cache_free((sos_vaddr_t) mmctxt);
return NULL;
}
/* Copy the current hardware MMU address translation tables */
if (SOS_OK != sos_paging_copy_kernel_space(mmctxt->vaddr_PD,
current_mm_context->vaddr_PD))
{
sos_kmem_cache_free((sos_vaddr_t) mmctxt);
return NULL;
}
/* Mark the mm_context as "referenced" */
mmctxt->ref_cnt = 1;
/* Add it to the list of MMU contexts */
sos_disable_IRQs(flags);
list_add_tail(list_mm_context, mmctxt);
sos_restore_IRQs(flags);
return mmctxt;
}
sos_ret_t sos_mm_context_unref(struct sos_mm_context *mmctxt)
{
sos_ui32_t flags;
sos_disable_IRQs(flags);
/* A valid mmctxt is one which is not yet unreferenced */
SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
/* Unreference it */
mmctxt->ref_cnt --;
/* If somebody is still using it, don't release it now */
if (mmctxt->ref_cnt > 0)
{
sos_restore_IRQs(flags);
return SOS_OK;
}
/* If nobody uses it, then it cannot be the current mm_context ! */
SOS_ASSERT_FATAL(mmctxt != current_mm_context);
/* Remove it from the list of mm_contexts */
list_delete(list_mm_context, mmctxt);
sos_restore_IRQs(flags);
/* Remove all user mappings (if any) */
sos_paging_dispose(mmctxt->vaddr_PD);
/* Unmap the PD from the kernel */
sos_kmem_vmm_free(mmctxt->vaddr_PD);
memset(mmctxt, 0x0, sizeof(*mmctxt));
return SOS_OK;
}
sos_ret_t sos_mm_context_ref(struct sos_mm_context *mmctxt)
{
sos_ui32_t flags;
sos_disable_IRQs(flags);
/* A valid mmctxt is one which is not yet unreferenced */
SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
/* Reference it once again */
mmctxt->ref_cnt ++;
sos_restore_IRQs(flags);
return SOS_OK;
}
sos_ret_t sos_mm_context_switch_to(struct sos_mm_context *mmctxt)
{
SOS_ASSERT_FATAL(NULL != mmctxt);
SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0);
if (mmctxt != current_mm_context)
{
sos_ui32_t flags;
struct sos_mm_context * prev_mm_context = current_mm_context;
/* This is the most dangerous part of the whole thing. If we set
the wrong MMU configuration in mmctxt, this will hang or
reboot the machine... */
sos_paging_set_current_PD_paddr(mmctxt->paddr_PD);
/* Exchange the mm_contexts */
current_mm_context = mmctxt;
/* Update the reference counts */
sos_disable_IRQs(flags);
mmctxt->ref_cnt ++;
sos_mm_context_unref(prev_mm_context);
sos_restore_IRQs(flags);
}
return SOS_OK;
}
struct sos_mm_context *get_current_mm_context()
{
SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0);
return current_mm_context;
}
/* ******************************************************
* Reserved functions
*/
sos_ret_t sos_mm_context_synch_kernel_PDE(unsigned int index_in_pd,
sos_ui32_t pde)
{
sos_ui32_t flags;
struct sos_mm_context * dest_mm_context;
int nb_mm_contexts;
sos_disable_IRQs(flags);
list_foreach_forward(list_mm_context, dest_mm_context, nb_mm_contexts)
{
sos_ui32_t * dest_pd;
SOS_ASSERT_FATAL(dest_mm_context->ref_cnt > 0);
dest_pd = (sos_ui32_t*) dest_mm_context->vaddr_PD;
dest_pd[index_in_pd] = pde;
}
sos_restore_IRQs(flags);
return SOS_OK;
}