sos-code-article10/sos/umem_vmm.c

1758 lines
49 KiB
C
Raw Normal View History

2018-07-13 17:13:10 +02:00
/* Copyright (C) 2005,2006 David Decotigny
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
*/
#include <sos/assert.h>
#include <sos/list.h>
#include <sos/physmem.h>
#include <sos/kmem_slab.h>
#include <drivers/bochs.h>
#include <hwcore/mm_context.h>
#include <hwcore/paging.h>
#include <hwcore/irq.h>
#include <drivers/zero.h>
#include "umem_vmm.h"
struct sos_umem_vmm_as
{
/** The process that owns this address space */
struct sos_process * process;
/** The MMU configuration of this address space */
struct sos_mm_context * mm_context;
/** The list of VRs in this address space */
struct sos_umem_vmm_vr * list_vr;
/** Heap location */
sos_uaddr_t heap_start;
sos_size_t heap_size; /**< Updated by sos_umem_vmm_brk() */
/* Memory usage statistics */
sos_size_t phys_total; /* shared + private */
struct vm_usage
{
sos_size_t overall;
sos_size_t ro, rw, code /* all: non readable, read and read/write */;
} vm_total, vm_shrd;
/* Page fault counters */
sos_size_t pgflt_cow;
sos_size_t pgflt_page_in;
sos_size_t pgflt_invalid;
};
struct sos_umem_vmm_vr
{
/** The address space owning this VR */
struct sos_umem_vmm_as *address_space;
/** The location of the mapping in user space */
sos_uaddr_t start;
sos_size_t size;
/** What accesses are allowed (read, write, exec): @see
SOS_VM_MAP_PROT_* flags in hwcore/paging.h */
sos_ui32_t access_rights;
/** Flags of the VR. Allowed flags:
* - SOS_VR_MAP_SHARED
*/
sos_ui32_t flags;
/**
* The callbacks for the VR called along map/unmapping of the
* resource
*/
struct sos_umem_vmm_vr_ops *ops;
/** Description of the resource being mapped, if any */
struct sos_umem_vmm_mapped_resource *mapped_resource;
sos_luoffset_t offset_in_resource;
/** The VRs of an AS are linked together and are accessible by way
of as->list_vr */
struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
/** The VRs mapping a given resource are linked together and are
accessible by way of mapped_resource->list_vr */
struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
};
/*
* We use special slab caches to allocate AS and VR data structures
*/
static struct sos_kslab_cache * cache_of_as;
static struct sos_kslab_cache * cache_of_vr;
/** Temporary function to debug: list the VRs of the given As */
void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
{
struct sos_umem_vmm_vr *vr;
int nb_vr;
sos_bochs_printf("AS %p - %s:\n", as, str);
sos_bochs_printf(" physical mem: %x\n",
as->phys_total);
sos_bochs_printf(" VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
as->vm_total.overall,
as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
as->vm_shrd.overall,
as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
sos_bochs_printf(" pgflt cow=%d pgin=%d inv=%d\n",
as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
{
sos_bochs_printf(" VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
nb_vr, (unsigned)vr,
vr->start, vr->start + vr->size, vr->size,
(unsigned)vr->mapped_resource,
vr->offset_in_resource,
(vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
(vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
(vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
(unsigned)vr->flags);
}
sos_bochs_printf("FIN (%s)\n", str);
}
/**
* Physical address of THE page (full of 0s) used for anonymous
* mappings
*/
sos_paddr_t sos_zero_physpage = 0 /* Initial value prior to allocation */;
sos_vaddr_t sos_zero_kernelpage = 0 /* Initial value prior to allocation */;
/*
* Helper functions defined at the bottom of the file
*/
/**
* Helper function to retrieve the first VR to have a vr->end >= uaddr
*/
static struct sos_umem_vmm_vr *
find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
sos_uaddr_t uaddr);
/**
* Helper function to retrieve the first VR that overlaps the given
* interval, if any
*/
static struct sos_umem_vmm_vr *
find_first_intersecting_vr(struct sos_umem_vmm_as * as,
sos_uaddr_t start_uaddr, sos_size_t size);
/**
* Helper function to find first address where there is enough
* space. Begin to look for such an interval at or after the given
* address
*
* @param hint_addr The address where to begin the scan, or NULL
*/
static sos_uaddr_t
find_first_free_interval(struct sos_umem_vmm_as * as,
sos_uaddr_t hint_uaddr, sos_size_t size);
/** Called each time a VR of the AS changes. Don't cope with any
underlying physcal mapping/unmapping, COW, etc... */
static void
as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
sos_bool_t is_shared,
sos_size_t size,
sos_ui32_t prev_access_rights,
sos_ui32_t new_access_rights);
sos_ret_t sos_umem_vmm_subsystem_setup()
{
/* Allocate a new kernel physical page mapped into kernel space and
reset it with 0s */
sos_zero_kernelpage = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
if (sos_zero_kernelpage == (sos_vaddr_t)NULL)
return -SOS_ENOMEM;
memset((void*)sos_zero_kernelpage, 0x0, SOS_PAGE_SIZE);
/* Keep a reference to the underlying pphysical page... */
sos_zero_physpage = sos_paging_get_paddr(sos_zero_kernelpage);
SOS_ASSERT_FATAL(NULL != (void*)sos_zero_physpage);
sos_physmem_ref_physpage_at(sos_zero_physpage);
/* Allocate the VR/AS caches */
cache_of_as
= sos_kmem_cache_create("Address space structures",
sizeof(struct sos_umem_vmm_as),
1, 0,
SOS_KSLAB_CREATE_MAP
| SOS_KSLAB_CREATE_ZERO);
if (! cache_of_as)
{
sos_physmem_unref_physpage(sos_zero_physpage);
return -SOS_ENOMEM;
}
cache_of_vr
= sos_kmem_cache_create("Virtual Region structures",
sizeof(struct sos_umem_vmm_vr),
1, 0,
SOS_KSLAB_CREATE_MAP
| SOS_KSLAB_CREATE_ZERO);
if (! cache_of_vr)
{
sos_physmem_unref_physpage(sos_zero_physpage);
sos_kmem_cache_destroy(cache_of_as);
return -SOS_ENOMEM;
}
return SOS_OK;
}
static struct sos_umem_vmm_as * current_address_space = NULL;
struct sos_umem_vmm_as * sos_umem_vmm_get_current_as(void)
{
return current_address_space;
}
sos_ret_t sos_umem_vmm_set_current_as(struct sos_umem_vmm_as * as)
{
sos_ui32_t flags;
struct sos_umem_vmm_as *prev_as = current_address_space;
if (current_address_space == as)
return SOS_OK;
if (NULL != as)
{
sos_disable_IRQs(flags);
sos_process_ref(sos_umem_vmm_get_process(as));
sos_mm_context_switch_to(sos_umem_vmm_get_mm_context(as));
current_address_space = as;
sos_restore_IRQs(flags);
}
else
current_address_space = as;
if (prev_as)
sos_process_unref(sos_umem_vmm_get_process(prev_as));
return SOS_OK;
}
struct sos_umem_vmm_as *
sos_umem_vmm_create_empty_as(struct sos_process *owner)
{
struct sos_umem_vmm_as * as
= (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
if (! as)
return NULL;
as->mm_context = sos_mm_context_create();
if (NULL == as->mm_context)
{
/* Error */
sos_kmem_cache_free((sos_vaddr_t)as);
return NULL;
}
as->process = owner;
return as;
}
struct sos_umem_vmm_as *
sos_umem_vmm_duplicate_as(struct sos_umem_vmm_as * model_as,
struct sos_process *for_owner)
{
__label__ undo_creation;
struct sos_umem_vmm_vr * model_vr;
int nb_vr;
struct sos_umem_vmm_as * new_as
= (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
if (! new_as)
return NULL;
new_as->process = for_owner;
list_init_named(new_as->list_vr, prev_in_as, next_in_as);
/*
* Switch to the current threads' mm_context, as duplicating it implies
* being able to configure some of its mappings as read-only (for
* COW)
*/
SOS_ASSERT_FATAL(SOS_OK
== sos_thread_prepare_user_space_access(model_as,
(sos_vaddr_t)
NULL));
/* Copy the virtual regions */
list_foreach_named(model_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
{
struct sos_umem_vmm_vr * vr;
/* Prepare COW on the read/write private mappings */
if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
&& (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
{
/* Mark the underlying physical pages (if any) as
read-only */
SOS_ASSERT_FATAL(SOS_OK
== sos_paging_prepare_COW(model_vr->start,
model_vr->size));
}
/* Allocate a new virtual region and copy the 'model' into it */
vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
if (! vr)
goto undo_creation;
memcpy(vr, model_vr, sizeof(*vr));
vr->address_space = new_as;
/* Signal the "new" mapping to the underlying VR mapper */
if (vr->ops && vr->ops->ref)
vr->ops->ref(vr);
/* Insert the new VR into the new AS */
list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
/* Insert the new VR into the list of mappings of the resource */
list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
prev_in_mapped_resource,
next_in_mapped_resource);
}
/* Now copy the current MMU configuration */
new_as->mm_context = sos_mm_context_duplicate(model_as->mm_context);
if (NULL == new_as->mm_context)
goto undo_creation;
/* Correct behavior */
new_as->heap_start = model_as->heap_start;
new_as->heap_size = model_as->heap_size;
new_as->phys_total = model_as->phys_total;
memcpy(& new_as->vm_total, & model_as->vm_total, sizeof(struct vm_usage));
memcpy(& new_as->vm_shrd, & model_as->vm_shrd, sizeof(struct vm_usage));
SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
return new_as;
/* Handle erroneous behavior */
undo_creation:
SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
sos_umem_vmm_delete_as(new_as);
return NULL;
}
sos_ret_t
sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
{
while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
{
struct sos_umem_vmm_vr * vr;
vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
/* Remove the vr from the lists */
list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
list_delete_named(vr->mapped_resource->list_vr, vr,
prev_in_mapped_resource,
next_in_mapped_resource);
/* Signal to the underlying VR mapper that the mapping is
suppressed */
if (vr->ops)
{
if (vr->ops->unmap)
vr->ops->unmap(vr, vr->start, vr->size);
if (vr->ops->unref)
vr->ops->unref(vr);
}
sos_kmem_cache_free((sos_vaddr_t)vr);
}
/* Release MMU configuration */
if (as->mm_context)
sos_mm_context_unref(as->mm_context);
/* Now unallocate main address space construct */
sos_kmem_cache_free((sos_vaddr_t)as);
return SOS_OK;
}
struct sos_process *
sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
{
return as->process;
}
struct sos_mm_context *
sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
{
return as->mm_context;
}
struct sos_umem_vmm_vr *
sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
sos_uaddr_t uaddr)
{
struct sos_umem_vmm_vr * vr;
vr = find_enclosing_or_next_vr(as, uaddr);
if (! vr)
return NULL;
/* Ok uaddr <= vr->end, but do we have uaddr > vr->start ? */
if (uaddr < vr->start)
return NULL;
return vr;
}
struct sos_umem_vmm_as *
sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->address_space;
}
struct sos_umem_vmm_vr_ops *
sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->ops;
}
sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->access_rights;
}
sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->flags;
}
struct sos_umem_vmm_mapped_resource *
sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->mapped_resource;
}
sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->start;
}
sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
{
return vr->size;
}
sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
{
return vr->offset_in_resource;
}
sos_ret_t
sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
struct sos_umem_vmm_vr_ops * ops)
{
/* Don't allow to overwrite any preceding VR ops */
SOS_ASSERT_FATAL(NULL == vr->ops);
vr->ops = ops;
return SOS_OK;
}
/**
* When resize asks to map the resource elsewhere, make sure not to
* overwrite the offset_in_resource field
*/
#define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
sos_ret_t
sos_umem_vmm_map(struct sos_umem_vmm_as * as,
sos_uaddr_t * /*in/out*/uaddr, sos_size_t size,
sos_ui32_t access_rights,
sos_ui32_t flags,
struct sos_umem_vmm_mapped_resource * resource,
sos_luoffset_t offset_in_resource)
{
__label__ return_mmap;
sos_uaddr_t hint_uaddr;
struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
sos_bool_t internal_map_called_from_mremap
= (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
sos_ret_t retval = SOS_OK;
used_preallocated_vr = FALSE;
hint_uaddr = *uaddr;
/* Default mapping address is NULL */
*uaddr = (sos_vaddr_t)NULL;
if (! resource)
return -SOS_EINVAL;
if (! resource->mmap)
return -SOS_EPERM;
if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
return -SOS_EINVAL;
if (size <= 0)
return -SOS_EINVAL;
size = SOS_PAGE_ALIGN_SUP(size);
if (flags & SOS_VR_MAP_SHARED)
{
/* Make sure the mapped resource allows the required protection flags */
if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
&& !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
|| ( (access_rights & SOS_VM_MAP_PROT_WRITE)
&& !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
|| ( (access_rights & SOS_VM_MAP_PROT_EXEC)
&& !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
return -SOS_EPERM;
}
/* Sanity checks over the offset_in_resource parameter */
if ( !internal_map_called_from_mremap
&& ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
/* Initial offset ignored for anonymous mappings */
{
/* Nothing to check */
}
/* Make sure that the offset in resource won't overflow */
else if (offset_in_resource + size <= offset_in_resource)
return -SOS_EINVAL;
/* Filter out unsupported flags */
access_rights &= (SOS_VM_MAP_PROT_READ
| SOS_VM_MAP_PROT_WRITE
| SOS_VM_MAP_PROT_EXEC);
flags &= (SOS_VR_MAP_SHARED
| SOS_VR_MAP_FIXED);
/* Pre-allocate a new VR. Because once we found a valid slot inside
the VR list, we don't want the list to be altered by another
process */
preallocated_vr
= (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
if (! preallocated_vr)
return -SOS_ENOMEM;
/* Compute the user address of the new mapping */
if (flags & SOS_VR_MAP_FIXED)
{
/*
* The address is imposed
*/
/* Make sure the hint_uaddr hint is valid */
if (! SOS_PAGING_IS_USER_AREA(hint_uaddr, size) )
{ retval = -SOS_EINVAL; goto return_mmap; }
/* Unmap any overlapped VR */
retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
if (SOS_OK != retval)
{ goto return_mmap; }
}
else
{
/*
* A free range has to be determined
*/
/* Find a suitable free VR */
hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
if (! hint_uaddr)
{ retval = -SOS_ENOMEM; goto return_mmap; }
}
/* For anonymous resource mappings, set the initial
offset_in_resource to the initial virtual start address in user
space */
if ( !internal_map_called_from_mremap
&& (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
offset_in_resource = hint_uaddr;
/* Lookup next and previous VR, if any. This will allow us to merge
the regions, when possible */
next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
if (next_vr)
{
/* Find previous VR, if any */
prev_vr = next_vr->prev_in_as;
/* The list is curcular: it may happen that we looped over the
tail of the list (ie the list is a singleton) */
if (prev_vr->start > hint_uaddr)
prev_vr = NULL; /* No preceding VR */
}
else
{
/* Otherwise we went beyond the last VR */
prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
}
/* Merge with preceding VR ? */
merge_with_preceding
= ( (NULL != prev_vr)
&& (prev_vr->mapped_resource == resource)
&& (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
&& (prev_vr->start + prev_vr->size == hint_uaddr)
&& (prev_vr->flags == flags)
&& (prev_vr->access_rights == access_rights) );
/* Merge with next VR ? */
merge_with_next
= ( (NULL != next_vr)
&& (next_vr->mapped_resource == resource)
&& (offset_in_resource + size == next_vr->offset_in_resource)
&& (hint_uaddr + size == next_vr->start)
&& (next_vr->flags == flags)
&& (next_vr->access_rights == access_rights) );
if (merge_with_preceding && merge_with_next)
{
/* Widen the prev_vr VR to encompass both the new VR and the next_vr */
vr = prev_vr;
vr->size += size + next_vr->size;
/* Remove the next_vr VR */
list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
prev_in_mapped_resource, next_in_mapped_resource);
if (next_vr->ops && next_vr->ops->unref)
next_vr->ops->unref(next_vr);
sos_kmem_vmm_free((sos_vaddr_t) next_vr);
}
else if (merge_with_preceding)
{
/* Widen the prev_vr VR to encompass the new VR */
vr = prev_vr;
vr->size += size;
}
else if (merge_with_next)
{
/* Widen the next_vr VR to encompass the new VR */
vr = next_vr;
vr->start -= size;
vr->size += size;
}
else
{
/* Allocate a brand new VR and insert it into the list */
vr = preallocated_vr;
used_preallocated_vr = TRUE;
vr->start = hint_uaddr;
vr->size = size;
vr->access_rights = access_rights;
vr->flags = flags;
vr->mapped_resource = resource;
vr->offset_in_resource = offset_in_resource;
/* Insert VR in address space */
vr->address_space = as;
if (prev_vr)
list_insert_after_named(as->list_vr, prev_vr, vr,
prev_in_as, next_in_as);
else
list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
list_add_tail_named(vr->mapped_resource->list_vr, vr,
prev_in_mapped_resource,
next_in_mapped_resource);
/* Signal the resource we are mapping it */
if (resource && resource->mmap)
{
retval = resource->mmap(vr);
if (SOS_OK != retval)
{
retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
goto return_mmap;
}
/* The page_in method is MANDATORY for mapped resources */
SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
}
if (vr->ops && vr->ops->ref)
vr->ops->ref(vr);
}
/* Ok, fine, we got it right ! Return the address to the caller */
*uaddr = hint_uaddr;
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
size, 0, vr->access_rights);
retval = SOS_OK;
return_mmap:
if (! used_preallocated_vr)
sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
return retval;
}
sos_ret_t
sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
sos_uaddr_t uaddr, sos_size_t size)
{
struct sos_umem_vmm_vr *vr, *preallocated_vr;
sos_bool_t used_preallocated_vr;
sos_bool_t need_to_change_address_space = FALSE;
if (! SOS_IS_PAGE_ALIGNED(uaddr))
return -SOS_EINVAL;
if (size <= 0)
return -SOS_EINVAL;
size = SOS_PAGE_ALIGN_SUP(size);
/* Make sure the uaddr is valid */
if (! SOS_PAGING_IS_USER_AREA(uaddr, size) )
return -SOS_EINVAL;
/* In some cases, the unmapping might imply a VR to be split into
2. Actually, allocating a new VR can be a blocking operation, but
actually we can block now, it won't do no harm. But we must be
careful not to block later, while altering the VR lists: that's
why we pre-allocate now. */
used_preallocated_vr = FALSE;
preallocated_vr
= (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
if (! preallocated_vr)
return -SOS_ENOMEM;
/* Find any VR intersecting with the given interval */
vr = find_first_intersecting_vr(as, uaddr, size);
/* Unmap (part of) the VR covered by [uaddr .. uaddr+size[ */
while (NULL != vr)
{
/* Went past the end of the *circular* list => back at the
beginning ? */
if (vr->start + vr->size <= uaddr)
/* Yes, stop now */
break;
/* Went beyond the region to unmap ? */
if (uaddr + size <= vr->start)
/* Yes, stop now */
break;
/* VR totally unmapped ? */
if ((vr->start >= uaddr)
&& (vr->start + vr->size <= uaddr + size))
{
struct sos_umem_vmm_vr *next_vr;
/* Yes: signal we remove it completely */
if (vr->ops && vr->ops->unmap)
vr->ops->unmap(vr, vr->start, vr->size);
/* Remove it from the AS list now */
next_vr = vr->next_in_as;
if (next_vr == vr) /* singleton ? */
next_vr = NULL;
list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
/* Remove from the list of VRs mapping the resource */
list_delete_named(vr->mapped_resource->list_vr, vr,
prev_in_mapped_resource,
next_in_mapped_resource);
if (vr->ops && vr->ops->unref)
vr->ops->unref(vr);
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
vr->size, vr->access_rights, 0);
sos_kmem_vmm_free((sos_vaddr_t)vr);
/* Prepare next iteration */
vr = next_vr;
continue;
}
/* unmapped region lies completely INSIDE the the VR */
else if ( (vr->start < uaddr)
&& (vr->start + vr->size > uaddr + size) )
{
/* VR has to be split into 2 */
/* Use the preallocated VR and copy the VR into it */
used_preallocated_vr = TRUE;
memcpy(preallocated_vr, vr, sizeof(*vr));
/* Adjust the start/size of both VRs */
preallocated_vr->start = uaddr + size;
preallocated_vr->size = vr->start + vr->size - (uaddr + size);
preallocated_vr->offset_in_resource += uaddr + size - vr->start;
vr->size = uaddr - vr->start;
/* Insert the new VR into the list */
list_insert_after_named(as->list_vr, vr, preallocated_vr,
prev_in_as, next_in_as);
list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
prev_in_mapped_resource,
next_in_mapped_resource);
/* Signal the changes to the underlying resource */
if (vr->ops && vr->ops->unmap)
vr->ops->unmap(vr, uaddr, size);
if (preallocated_vr->ops && preallocated_vr->ops->ref)
preallocated_vr->ops->ref(preallocated_vr);
/* Account for change in VRs */
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
size, vr->access_rights, 0);
/* No need to go further */
break;
}
/* Unmapped region only affects the START address of the VR */
else if (uaddr <= vr->start)
{
sos_size_t translation = uaddr + size - vr->start;
/* Shift the VR */
vr->size -= translation;
vr->offset_in_resource += translation;
vr->start += translation;
/* Signal unmapping */
if (vr->ops && vr->ops->unmap)
vr->ops->unmap(vr, uaddr + size,
translation);
/* Account for change in VRs */
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
translation,
vr->access_rights, 0);
/* No need to go further, we reached the last VR that
overlaps the unmapped region */
break;
}
/* Unmapped region only affects the ENDING address of the VR */
else if (uaddr + size >= vr->start + vr->size)
{
sos_size_t unmapped_size = vr->start + vr->size - uaddr;
/* Resize VR */
vr->size = uaddr - vr->start;
/* Signal unmapping */
if (vr->ops && vr->ops->unmap)
vr->ops->unmap(vr, uaddr, unmapped_size);
/* Account for change in VRs */
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
unmapped_size,
vr->access_rights, 0);
vr = vr->next_in_as;
continue;
}
sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
uaddr, size, vr->start, vr->size);
}
/* When called from mresize, the address space is already squatted,
so we don't have to change it again */
need_to_change_address_space
= (as != sos_thread_get_current()->squatted_address_space);
if (need_to_change_address_space)
SOS_ASSERT_FATAL(SOS_OK
== sos_thread_prepare_user_space_access(as,
(sos_vaddr_t)
NULL));
/* Begin independent sub-block */
{
sos_ret_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
SOS_ASSERT_FATAL(sz_unmapped >= 0);
as->phys_total -= sz_unmapped;
}
/* End independent sub-block */
if (need_to_change_address_space)
SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
if (! used_preallocated_vr)
sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
return SOS_OK;
}
sos_ret_t
sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
sos_uaddr_t uaddr, sos_size_t size,
sos_ui32_t new_access_rights)
{
struct sos_umem_vmm_vr *start_vr, *vr,
*preallocated_middle_vr, *preallocated_right_vr;
sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
if (! SOS_IS_PAGE_ALIGNED(uaddr))
return -SOS_EINVAL;
if (size <= 0)
return -SOS_EINVAL;
size = SOS_PAGE_ALIGN_SUP(size);
/* Make sure the uaddr is valid */
if (! SOS_PAGING_IS_USER_AREA(uaddr, size) )
return -SOS_EINVAL;
/* Pre-allocate 2 new VRs (same reason as for unmap). Because chprot
may imply at most 2 regions to be split */
used_preallocated_middle_vr = FALSE;
used_preallocated_right_vr = FALSE;
preallocated_middle_vr
= (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
if (! preallocated_middle_vr)
return -SOS_ENOMEM;
preallocated_right_vr
= (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
if (! preallocated_right_vr)
{
sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
return -SOS_ENOMEM;
}
/* Find any VR intersecting with the given interval */
start_vr = find_first_intersecting_vr(as, uaddr, size);
if (NULL == start_vr)
return SOS_OK;
/* First of all: make sure we are allowed to change the access
rights of all the VRs concerned by the chprot */
vr = start_vr;
while (TRUE)
{
/* Went past the end of the *circular* list => back at the
begining ? */
if (vr->start + vr->size <= uaddr)
/* Yes, stop now */
break;
/* Went beyond the region to chprot ? */
if (uaddr + size < vr->start)
/* Yes, stop now */
break;
if (vr->flags & SOS_VR_MAP_SHARED)
{
/* Make sure the mapped resource allows the required
protection flags */
if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
&& !(vr->mapped_resource->allowed_access_rights
& SOS_VM_MAP_PROT_READ) )
|| ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
&& !(vr->mapped_resource->allowed_access_rights
& SOS_VM_MAP_PROT_WRITE) )
|| ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
&& !(vr->mapped_resource->allowed_access_rights
& SOS_VM_MAP_PROT_EXEC) ) )
return -SOS_EPERM;
}
vr = vr->next_in_as;
}
/* Change the access rights of the VRs covered by [uaddr
.. uaddr+size[ */
vr = start_vr;
while (TRUE)
{
/* Went past the end of the *circular* list => back at the
begining ? */
if (vr->start + vr->size <= uaddr)
/* Yes, stop now */
break;
/* Went beyond the region to chprot ? */
if (uaddr + size <= vr->start)
/* Yes, stop now */
break;
/* Access rights unchanged ? */
if (vr->access_rights == new_access_rights)
/* nop */
{
vr = vr->next_in_as;
continue;
}
/* VR totally chprot ? */
if ((vr->start >= uaddr)
&& (vr->start + vr->size <= uaddr + size))
{
/* Account for change in VRs */
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
vr->size, vr->access_rights,
new_access_rights);
vr->access_rights = new_access_rights;
if (vr->flags & SOS_VR_MAP_SHARED)
/* For shared mappings: effectively change the access
rights of the physical pages */
sos_paging_set_prot_of_interval(vr->start, vr->size,
new_access_rights);
else
/* Private mapping */
{
/* For private mappings, we set the new access_rights
only if it becomes read-only. For private mappings
that become writable, we don't do anything: we keep
the access rights unchanged to preserve the COW
semantics */
if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
sos_paging_set_prot_of_interval(vr->start, vr->size,
new_access_rights);
}
vr = vr->next_in_as;
continue;
}
/* chprot region lies completely INSIDE the VR */
else if ( (vr->start < uaddr)
&& (vr->start + vr->size > uaddr + size) )
{
/* VR has to be split into 3 */
/* Use the preallocated VRs and copy the VR into them */
SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
SOS_ASSERT_FATAL(! used_preallocated_right_vr);
used_preallocated_middle_vr = TRUE;
memcpy(preallocated_middle_vr, vr, sizeof(*vr));
used_preallocated_right_vr = TRUE;
memcpy(preallocated_right_vr, vr, sizeof(*vr));
/* Adjust the start/size of the VRs */
preallocated_middle_vr->start = uaddr;
preallocated_middle_vr->size = size;
preallocated_right_vr->start = uaddr + size;
preallocated_right_vr->size = vr->start + vr->size
- (uaddr + size);
preallocated_middle_vr->offset_in_resource
+= uaddr - vr->start;
preallocated_right_vr->offset_in_resource
+= uaddr + size - vr->start;
vr->size = uaddr - vr->start;
/* Account for change in VRs */
preallocated_middle_vr->access_rights = new_access_rights;
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
size, vr->access_rights,
new_access_rights);
/* Insert the new VRs into the lists */
list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
prev_in_as, next_in_as);
list_insert_after_named(as->list_vr, preallocated_middle_vr,
preallocated_right_vr,
prev_in_as, next_in_as);
list_add_tail_named(vr->mapped_resource->list_vr,
preallocated_middle_vr,
prev_in_mapped_resource,
next_in_mapped_resource);
list_add_tail_named(vr->mapped_resource->list_vr,
preallocated_right_vr,
prev_in_mapped_resource,
next_in_mapped_resource);
/* Effectively change the access rights of the physical pages */
if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
&& (new_access_rights & SOS_VM_MAP_PROT_WRITE))
/* For private mappings with write access, prepare for COW */
sos_paging_prepare_COW(preallocated_middle_vr->start,
preallocated_middle_vr->size);
else
sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
preallocated_middle_vr->size,
new_access_rights);
if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
preallocated_right_vr->ops->ref(preallocated_right_vr);
if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
preallocated_middle_vr->ops->ref(preallocated_middle_vr);
/* No need to go further */
break;
}
/* Chprot region only affects the START address of the VR */
else if (uaddr <= vr->start)
{
/* Split the region into 2 */
sos_uoffset_t offset_in_region = uaddr + size - vr->start;
/* Use the preallocated VRs and copy the VR into them */
SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
used_preallocated_middle_vr = TRUE;
memcpy(preallocated_middle_vr, vr, sizeof(*vr));
/* Adjust the start/size of the VRs */
preallocated_middle_vr->start += offset_in_region;
preallocated_middle_vr->size -= offset_in_region;
vr->size = offset_in_region;
preallocated_middle_vr->offset_in_resource += offset_in_region;
/* Account for change in VRs */
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
vr->size,
vr->access_rights,
new_access_rights);
vr->access_rights = new_access_rights;
/* Insert the new VR into the lists */
list_insert_after_named(as->list_vr, vr,
preallocated_middle_vr,
prev_in_as, next_in_as);
list_add_tail_named(vr->mapped_resource->list_vr,
preallocated_middle_vr,
prev_in_mapped_resource,
next_in_mapped_resource);
/* Effectively change the access rights of the physical pages */
if (!(vr->flags & SOS_VR_MAP_SHARED)
&& (new_access_rights & SOS_VM_MAP_PROT_WRITE))
/* For private mappings with write access, prepare for COW */
sos_paging_prepare_COW(vr->start, vr->size);
else
sos_paging_set_prot_of_interval(vr->start, vr->size,
new_access_rights);
if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
preallocated_middle_vr->ops->ref(preallocated_middle_vr);
/* Ne need to go further (we reached the last VR that
overlaps the given interval to chprot) */
break;
}
/* Chprot region only affects the ENDING address of the VR */
else if (uaddr + size >= vr->start + vr->size)
{
/* Split the region into 2 */
sos_uoffset_t offset_in_region = uaddr - vr->start;
/* Use the preallocated VRs and copy the VR into them */
SOS_ASSERT_FATAL(! used_preallocated_right_vr);
used_preallocated_right_vr = TRUE;
memcpy(preallocated_right_vr, vr, sizeof(*vr));
/* Adjust the start/size of the VRs */
preallocated_right_vr->start += offset_in_region;
preallocated_right_vr->size -= offset_in_region;
vr->size = offset_in_region;
preallocated_right_vr->offset_in_resource += offset_in_region;
/* Account for change in VRs */
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
preallocated_right_vr->size,
vr->access_rights,
new_access_rights);
preallocated_right_vr->access_rights = new_access_rights;
/* Insert the new VR into the lists */
list_insert_after_named(as->list_vr, vr,
preallocated_right_vr,
prev_in_as, next_in_as);
list_add_tail_named(vr->mapped_resource->list_vr,
preallocated_right_vr,
prev_in_mapped_resource,
next_in_mapped_resource);
/* Effectively change the access rights of the physical pages */
if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
&& (new_access_rights & SOS_VM_MAP_PROT_WRITE))
/* For private mappings with write access, prepare for COW */
sos_paging_prepare_COW(preallocated_right_vr->start,
preallocated_right_vr->size);
else
sos_paging_set_prot_of_interval(preallocated_right_vr->start,
preallocated_right_vr->size,
new_access_rights);
if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
preallocated_right_vr->ops->ref(preallocated_right_vr);
vr = vr->next_in_as;
continue;
}
sos_display_fatal_error("BUG");
}
if (! used_preallocated_middle_vr)
sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
if (! used_preallocated_right_vr)
sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
return SOS_OK;
}
sos_ret_t
sos_umem_vmm_sync(struct sos_umem_vmm_as * as,
sos_uaddr_t uaddr, sos_size_t size,
sos_ui32_t flags)
{
if (! SOS_IS_PAGE_ALIGNED(uaddr))
return -SOS_EINVAL;
if (size <= 0)
return -SOS_EINVAL;
size = SOS_PAGE_ALIGN_SUP(size);
/* Make sure the uaddr is valid */
if (! SOS_PAGING_IS_USER_AREA(uaddr, size) )
return -SOS_EINVAL;
/* Go from page to page, and for each dirty page in the region, call
the sync_page method */
while (TRUE)
{
struct sos_umem_vmm_vr *vr;
if (size <= 0)
break;
/* Find any VR intersecting with the given interval */
vr = find_first_intersecting_vr(as, uaddr, size);
if (NULL == vr)
break;
/* For private or anonymous mappings => no backing store */
if ( !(vr->flags & SOS_VR_MAP_SHARED)
|| (vr->mapped_resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS)
/* Likewise for non msync-able regions */
|| ! vr->ops->sync_page )
{
if (size <= vr->size)
break;
uaddr += vr->size;
size -= vr->size;
}
/* Find the next dirty page in this VR */
for ( ; (size > 0)
&& (uaddr - vr->start < vr->size) ;
uaddr += SOS_PAGE_SIZE,
size -= SOS_PAGE_SIZE)
if (sos_paging_is_dirty(uaddr))
{
/* Synchronize it with its backing store */
vr->ops->sync_page(vr, uaddr, flags);
uaddr += SOS_PAGE_SIZE;
size -= SOS_PAGE_SIZE;
break;
}
}
return SOS_OK;
}
sos_ret_t
sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
sos_uaddr_t old_uaddr, sos_size_t old_size,
sos_uaddr_t *new_uaddr, sos_size_t new_size,
sos_ui32_t flags)
{
sos_luoffset_t new_offset_in_resource;
sos_bool_t must_move_vr = FALSE;
struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
/* Make sure the new uaddr is valid */
if (! SOS_PAGING_IS_USER_AREA(*new_uaddr, new_size) )
return -SOS_EINVAL;
old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
old_size = SOS_PAGE_ALIGN_SUP(old_size);
if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
return -SOS_EINVAL;
if (new_size <= 0)
return -SOS_EINVAL;
new_size = SOS_PAGE_ALIGN_SUP(new_size);
/* Lookup a VR overlapping the address range */
vr = find_first_intersecting_vr(as, old_uaddr, old_size);
if (! vr)
return -SOS_EINVAL;
/* Make sure there is exactly ONE VR overlapping the area */
if ( (vr->start > old_uaddr)
|| (vr->start + vr->size < old_uaddr + old_size) )
return -SOS_EINVAL;
/* Retrieve the prev/next VR if they exist (the VR are on circular
list) */
prev_vr = vr->prev_in_as;
if (prev_vr->start >= vr->start)
prev_vr = NULL;
next_vr = vr->prev_in_as;
if (next_vr->start <= vr->start)
next_vr = NULL;
/*
* Compute new offset inside the mapped resource, if any
*/
/* Don't allow to resize if the uaddr goes beyond the 'offset 0' of
the resource */
if ( (*new_uaddr < vr->start)
&& (vr->start - *new_uaddr > vr->offset_in_resource) )
return -SOS_EINVAL;
/* Compute new offset in the resource (overflow-safe) */
if (vr->start > *new_uaddr)
new_offset_in_resource
= vr->offset_in_resource
- (vr->start - *new_uaddr);
else
new_offset_in_resource
= vr->offset_in_resource
+ (*new_uaddr - vr->start);
/* If other VRs would be affected by this resizing, then the VR must
be moved */
if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
must_move_vr |= TRUE;
if (next_vr && (next_vr->start < *new_uaddr + new_size))
must_move_vr |= TRUE;
/* If VR would be out-of-user-space, it must be moved */
if (! SOS_PAGING_IS_USER_AREA(*new_uaddr, new_size) )
must_move_vr |= TRUE;
/* The VR must be moved but the user forbids it */
if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
return -SOS_EINVAL;
/* If the VR must be moved, we simply map the resource elsewhere and
unmap the current VR */
if (must_move_vr)
{
sos_uaddr_t uaddr, result_uaddr;
sos_ret_t retval;
result_uaddr = *new_uaddr;
retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
vr->access_rights,
vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
vr->mapped_resource,
new_offset_in_resource);
if (SOS_OK != retval)
return retval;
/* Remap the physical pages at their new address */
for (uaddr = vr->start ;
uaddr < vr->start + vr->size ;
uaddr += SOS_PAGE_SIZE)
{
sos_paddr_t paddr;
sos_ui32_t prot;
sos_uaddr_t vaddr;
if (uaddr < *new_uaddr)
continue;
if (uaddr > *new_uaddr + new_size)
continue;
/* Compute destination virtual address (should be
overflow-safe) */
if (vr->start >= *new_uaddr)
vaddr = result_uaddr
+ (uaddr - vr->start)
+ (vr->start - *new_uaddr);
else
vaddr = result_uaddr
+ (uaddr - vr->start)
- (*new_uaddr - vr->start);
paddr = sos_paging_get_paddr(uaddr);
if (! paddr)
/* No physical page mapped at this address yet */
continue;
prot = sos_paging_get_prot(uaddr);
SOS_ASSERT_FATAL(prot);
/* Remap it at its destination address */
retval = sos_paging_map(paddr, vaddr, TRUE, prot);
if (SOS_OK != retval)
{
sos_umem_vmm_unmap(as, result_uaddr, new_size);
return retval;
}
}
retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
if (SOS_OK != retval)
{
sos_umem_vmm_unmap(as, result_uaddr, new_size);
return retval;
}
*new_uaddr = result_uaddr;
return retval;
}
/* Otherwise we simply resize the VR, taking care of unmapping
what's been unmapped */
if (*new_uaddr + new_size < vr->start + vr->size)
sos_umem_vmm_unmap(as, *new_uaddr + new_size,
vr->start + vr->size - (*new_uaddr + new_size));
else
{
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
*new_uaddr + new_size
- (vr->start + vr->size),
0, vr->access_rights);
vr->size += *new_uaddr + new_size - (vr->start + vr->size);
}
if (*new_uaddr > vr->start)
sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
else
{
as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
vr->start - *new_uaddr,
0, vr->access_rights);
vr->size += vr->start - *new_uaddr;
vr->start = *new_uaddr;
vr->offset_in_resource = new_offset_in_resource;
}
SOS_ASSERT_FATAL(vr->start == *new_uaddr);
SOS_ASSERT_FATAL(vr->size == new_size);
SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
return SOS_OK;
}
sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
sos_bool_t write_access,
sos_bool_t user_access)
{
struct sos_umem_vmm_as *as;
struct sos_umem_vmm_vr *vr;
as = current_address_space;
if (! as)
return -SOS_EFAULT;
vr = find_first_intersecting_vr(as, uaddr, 1);
if (! vr)
return -SOS_EFAULT;
/* Write on a read-only VR */
if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
return -SOS_EFAULT;
/* Write on a COW VR */
if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
{
if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
{
as->pgflt_cow ++;
return SOS_OK;
}
}
/* Ask the underlying resource to resolve the page fault */
if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
{
as->pgflt_invalid ++;
return -SOS_EFAULT;
}
as->phys_total += SOS_PAGE_SIZE;
as->pgflt_page_in ++;
/* For a private mapping, keep the mapping read-only */
if (!(vr->flags & SOS_VR_MAP_SHARED))
{
sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
SOS_PAGE_SIZE);
}
return SOS_OK;
}
sos_ret_t
sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
sos_uaddr_t heap_start)
{
SOS_ASSERT_FATAL(! as->heap_start);
as->heap_start = heap_start;
as->heap_size = 0;
return SOS_OK;
}
sos_uaddr_t
sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
sos_uaddr_t new_top_uaddr)
{
sos_uaddr_t new_start;
sos_size_t new_size;
SOS_ASSERT_FATAL(as->heap_start);
if (! new_top_uaddr)
return as->heap_start + as->heap_size;
if (new_top_uaddr == as->heap_start + as->heap_size)
return as->heap_start + as->heap_size;
if (new_top_uaddr < as->heap_start)
return (sos_uaddr_t)NULL;
new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
new_start = as->heap_start;
new_size = new_top_uaddr - as->heap_start;
/* First call to brk: we must map /dev/zero */
if (! as->heap_size)
{
if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
new_size,
SOS_VM_MAP_PROT_READ
| SOS_VM_MAP_PROT_WRITE,
0 /* private non-fixed */))
return (sos_uaddr_t)NULL;
as->heap_size = new_size;
return as->heap_start + as->heap_size;
}
/* Otherwise we just have to unmap or resize the region */
if (new_size <= 0)
{
if (SOS_OK != sos_umem_vmm_unmap(as,
as->heap_start, as->heap_size))
return (sos_uaddr_t)NULL;
}
else
{
if (SOS_OK != sos_umem_vmm_resize(as,
as->heap_start, as->heap_size,
& new_start, new_size,
0))
return (sos_uaddr_t)NULL;
}
SOS_ASSERT_FATAL(new_start == as->heap_start);
as->heap_size = new_size;
return new_top_uaddr;
}
static struct sos_umem_vmm_vr *
find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
sos_uaddr_t uaddr)
{
struct sos_umem_vmm_vr *vr;
int nb_vr;
if (! SOS_PAGING_IS_USER_AREA(uaddr, 1) )
return NULL;
list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
{
/* Equivalent to "if (uaddr < vr->start + vr->size)" but more
robust (resilient to integer overflows) */
if (uaddr <= vr->start + (vr->size - 1))
return vr;
}
return NULL;
}
static struct sos_umem_vmm_vr *
find_first_intersecting_vr(struct sos_umem_vmm_as * as,
sos_uaddr_t start_uaddr, sos_size_t size)
{
struct sos_umem_vmm_vr * vr;
vr = find_enclosing_or_next_vr(as, start_uaddr);
if (! vr)
return NULL;
if (start_uaddr + size <= vr->start)
return NULL;
return vr;
}
static sos_uaddr_t
find_first_free_interval(struct sos_umem_vmm_as * as,
sos_uaddr_t hint_uaddr, sos_size_t size)
{
struct sos_umem_vmm_vr * initial_vr, * vr;
if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
if (hint_uaddr > SOS_PAGING_UPPER_USER_ADDRESS - size + 1)
return (sos_uaddr_t)NULL;
initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
if (! vr)
/* Great, there is nothing after ! */
return hint_uaddr;
/* Scan the remaining VRs in the list */
do
{
/* Is there enough space /before/ that VR ? */
if (hint_uaddr + size <= vr->start)
/* Great ! */
return hint_uaddr;
/* Is there any VR /after/ this one, or do we have to wrap back
at the begining of the user space ? */
if (vr->next_in_as->start >= hint_uaddr)
/* Ok, the next VR is really after us */
hint_uaddr = vr->start + vr->size;
else
{
/* No: wrapping up */
/* Is there any space before the end of user space ? */
if (hint_uaddr <= SOS_PAGING_UPPER_USER_ADDRESS - size)
return hint_uaddr;
hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
}
/* Prepare to look after this VR */
vr = vr->next_in_as;
}
while (vr != initial_vr);
/* Reached the end of the list and did not find anything ?... Look
at the space after the last VR */
return (sos_uaddr_t)NULL;
}
static void
as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
sos_bool_t is_shared,
sos_size_t size,
sos_ui32_t prev_access_rights,
sos_ui32_t new_access_rights)
{
if (prev_access_rights == new_access_rights)
return;
#define _UPDATE_VMSTAT(field,is_increment) \
({ if (is_increment > 0) \
as->field += size; \
else \
{ SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
#define UPDATE_VMSTAT(field,is_increment) \
({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
_UPDATE_VMSTAT(vm_total.field, is_increment); \
SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
&& !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
{
UPDATE_VMSTAT(rw, +1);
if (prev_access_rights & SOS_VM_MAP_PROT_READ)
UPDATE_VMSTAT(ro, -1);
}
else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
&& (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
{
if (new_access_rights & SOS_VM_MAP_PROT_READ)
UPDATE_VMSTAT(ro, +1);
UPDATE_VMSTAT(rw, -1);
}
else if (new_access_rights & SOS_VM_MAP_PROT_READ)
UPDATE_VMSTAT(ro, +1);
else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
UPDATE_VMSTAT(ro, -1);
if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
&& !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
{
UPDATE_VMSTAT(code, +1);
}
else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
&& (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
{
UPDATE_VMSTAT(code, -1);
}
if (new_access_rights && !prev_access_rights)
UPDATE_VMSTAT(overall, +1);
else if (!new_access_rights && prev_access_rights)
UPDATE_VMSTAT(overall, -1);
}