sos-code-article10/drivers/mem.c

490 lines
13 KiB
C
Raw Permalink Normal View History

2018-07-13 17:13:10 +02:00
/* Copyright (C) 2005 David Decotigny
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
*/
#include <sos/assert.h>
#include <sos/kmalloc.h>
#include <sos/physmem.h>
#include <hwcore/paging.h>
#include <sos/kmem_slab.h>
#include <sos/list.h>
#include <hwcore/paging.h>
#include <drivers/devices.h>
#include <sos/kmem_vmm.h>
#include <sos/uaccess.h>
#include <sos/chardev.h>
#include "mem.h"
/**
* A mapped mem/kmem resource
*/
struct kernel_remapped_resource
{
int ref_cnt;
struct sos_umem_vmm_mapped_resource mr;
};
/** Called after the virtual region has been inserted inside its
address space */
static void resource_ref(struct sos_umem_vmm_vr * vr)
{
/* Retrieve the mem/kmem structure associated with the mapped resource */
struct kernel_remapped_resource * resource;
resource
= (struct kernel_remapped_resource*)
sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
/* Increment ref counter */
resource->ref_cnt ++;
}
/** Called when the virtual region is removed from its address
space */
static void resource_unref(struct sos_umem_vmm_vr * vr)
{
/* Retrieve the mem/kmem structure associated with the mapped resource */
struct kernel_remapped_resource * resource;
resource
= (struct kernel_remapped_resource*)
sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
/* Decrement ref coutner */
SOS_ASSERT_FATAL(resource->ref_cnt > 0);
resource->ref_cnt --;
/* Free the resource if it becomes unused */
if (resource->ref_cnt == 0)
sos_kfree((sos_vaddr_t)resource);
}
/** MOST IMPORTANT callback ! Called when a thread page faults on the
resource's mapping */
static sos_ret_t kmem_page_in(struct sos_umem_vmm_vr * vr,
sos_uaddr_t uaddr,
sos_bool_t write_access)
{
sos_vaddr_t vaddr;
sos_ret_t retval = SOS_OK;
sos_paddr_t ppage_paddr;
/* Compute address of kernel page */
vaddr = uaddr - sos_umem_vmm_get_start_of_vr(vr)
+ sos_umem_vmm_get_offset_in_resource(vr);
/* Don't allow demand paging of non kernel pages */
if (! SOS_PAGING_IS_KERNEL_AREA(vaddr, 1))
return -SOS_EFAULT;
/* Lookup physical kernel page */
ppage_paddr = sos_paging_get_paddr(SOS_PAGE_ALIGN_INF(vaddr));
/* Cannot access unmapped kernel pages */
if (! ppage_paddr)
return -SOS_EFAULT;
/* Remap it in user space */
retval = sos_paging_map(ppage_paddr,
SOS_PAGE_ALIGN_INF(uaddr),
TRUE,
sos_umem_vmm_get_prot_of_vr(vr));
return retval;
}
/** The callbacks for a mapped kmem resource */
static struct sos_umem_vmm_vr_ops kmem_ops = (struct sos_umem_vmm_vr_ops)
{
.ref = resource_ref,
.unref = resource_unref,
.page_in = kmem_page_in,
};
/** The callback that gets called when the resource gets mapped */
static sos_ret_t kmem_mmap(struct sos_umem_vmm_vr *vr)
{
return sos_umem_vmm_set_ops_of_vr(vr, &kmem_ops);
}
/** The function responsible for mapping the /dev/kmem resource in
user space */
static
sos_ret_t sos_dev_kmem_map(struct sos_umem_vmm_as * dest_as,
sos_uaddr_t *uaddr,
sos_size_t size,
sos_vaddr_t offset,
sos_ui32_t access_rights,
sos_ui32_t flags)
{
sos_ret_t retval;
struct kernel_remapped_resource * kmem_resource;
/* Allocate a new "descriptor" for the resource */
kmem_resource
= (struct kernel_remapped_resource*) sos_kmalloc(sizeof(*kmem_resource),
0);
if (! kmem_resource)
return -SOS_ENOMEM;
memset(kmem_resource, 0x0, sizeof(*kmem_resource));
kmem_resource->mr.allowed_access_rights
= SOS_VM_MAP_PROT_READ
| SOS_VM_MAP_PROT_WRITE
| SOS_VM_MAP_PROT_EXEC;
kmem_resource->mr.custom_data = kmem_resource;
kmem_resource->mr.mmap = kmem_mmap;
/* Map it in user space */
retval = sos_umem_vmm_map(dest_as, uaddr, size,
access_rights, flags,
& kmem_resource->mr, offset);
if (SOS_OK != retval)
{
sos_kfree((sos_vaddr_t)kmem_resource);
return retval;
}
return SOS_OK;
}
/** MOST IMPORTANT callback ! Called when a thread page faults on the
resource's mapping */
static sos_ret_t physmem_page_in(struct sos_umem_vmm_vr * vr,
sos_uaddr_t uaddr,
sos_bool_t write_access)
{
sos_ret_t retval = SOS_OK;
sos_paddr_t ppage_paddr;
/* Compute address of kernel page */
ppage_paddr = uaddr - sos_umem_vmm_get_start_of_vr(vr)
+ sos_umem_vmm_get_offset_in_resource(vr);
/* Remap page in user space */
retval = sos_paging_map(SOS_PAGE_ALIGN_INF(ppage_paddr),
SOS_PAGE_ALIGN_INF(uaddr),
TRUE,
sos_umem_vmm_get_prot_of_vr(vr));
return retval;
}
/** The callbacks for a mapped physmem resource */
static struct sos_umem_vmm_vr_ops physmem_ops = (struct sos_umem_vmm_vr_ops)
{
.ref = resource_ref,
.unref = resource_unref,
.page_in = physmem_page_in,
};
/** The callback that gets called when the resource gets mapped */
static sos_ret_t physmem_mmap(struct sos_umem_vmm_vr *vr)
{
return sos_umem_vmm_set_ops_of_vr(vr, &physmem_ops);
}
/** The function responsible for mapping the /dev/mem resource in
user space */
static
sos_ret_t sos_dev_physmem_map(struct sos_umem_vmm_as * dest_as,
sos_uaddr_t *uaddr,
sos_size_t size,
sos_paddr_t offset,
sos_ui32_t access_rights,
sos_ui32_t flags)
{
sos_ret_t retval;
struct kernel_remapped_resource * physmem_resource;
physmem_resource
= (struct kernel_remapped_resource*) sos_kmalloc(sizeof(*physmem_resource),
0);
if (! physmem_resource)
return -SOS_ENOMEM;
memset(physmem_resource, 0x0, sizeof(*physmem_resource));
physmem_resource->mr.allowed_access_rights
= SOS_VM_MAP_PROT_READ
| SOS_VM_MAP_PROT_WRITE
| SOS_VM_MAP_PROT_EXEC;
physmem_resource->mr.custom_data = physmem_resource;
physmem_resource->mr.mmap = physmem_mmap;
retval = sos_umem_vmm_map(dest_as, uaddr, size,
access_rights, flags,
& physmem_resource->mr, offset);
if (SOS_OK != retval)
{
sos_kfree((sos_vaddr_t)physmem_resource);
return retval;
}
return SOS_OK;
}
/*
* /dev/mem and /dev/kmem character device operations
*
* the "custom_data" field of the FS node is used to store the total
* number of pages available
*/
#define GET_DEV_SIZE(fsnode) \
((sos_size_t)(fsnode)->custom_data)
static sos_ret_t dev_mem_fs_open(struct sos_fs_node * fsnode,
struct sos_fs_opened_file * of,
void * chardev_class_custom_data)
{
/* Make sure the device is supported by this driver and compute its
"size" (use the custom_data field to store it) */
switch (fsnode->dev_id.device_instance)
{
/* For /dev/kmem, go to the end of the kernel mapping */
case SOS_CHARDEV_KMEM_MINOR:
fsnode->custom_data = (void*)SOS_PAGING_BASE_USER_ADDRESS;
return SOS_OK;
break;
/* For /dev/mem, go to the end of physical memory */
case SOS_CHARDEV_PHYSMEM_MINOR:
{
sos_size_t ram_pages = 0;
sos_physmem_get_state(& ram_pages, NULL);
fsnode->custom_data = (void*)(ram_pages << SOS_PAGE_SHIFT);
}
return SOS_OK;
break;
default:
break;
}
return -SOS_ENODEV;
}
static sos_ret_t dev_mem_fs_seek(struct sos_fs_opened_file *this,
sos_lsoffset_t offset,
sos_seek_whence_t whence,
/* out */ sos_lsoffset_t * result_position)
{
/* Make sure the device is supported by this driver */
struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
/* Artificiallly update the position in the "file" */
sos_lsoffset_t ref_offs;
sos_lsoffset_t dev_size = GET_DEV_SIZE(fsnode);
*result_position = this->position;
switch (whence)
{
case SOS_SEEK_SET:
ref_offs = 0;
break;
case SOS_SEEK_CUR:
ref_offs = this->position;
break;
case SOS_SEEK_END:
ref_offs = dev_size;
break;
default:
return -SOS_EINVAL;
}
/* Forbid accesses "before" the start of the device */
if (offset < -ref_offs)
return -SOS_EINVAL;
/* Forbid accesses "after" the end of the device */
else if (ref_offs + offset > dev_size)
return -SOS_EINVAL;
this->position = ref_offs + offset;
*result_position = this->position;
return SOS_OK;
}
typedef enum { DO_READ, DO_WRITE } dev_mem_access_type_t;
static sos_ret_t dev_mem_fs_access(struct sos_fs_opened_file *this,
sos_uaddr_t user_buf,
sos_size_t * /* in/out */len,
dev_mem_access_type_t access_type)
{
struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
sos_vaddr_t physmem_transfer_kernel_page = 0; /* Used for /dev/mem only */
sos_uoffset_t offs;
sos_size_t accesslen = 0;
/* Readjust copy length to match the size of the device */
if (this->position + *len >= GET_DEV_SIZE(fsnode))
*len = GET_DEV_SIZE(fsnode) - this->position;
/* Ignore zero-size requests */
if (*len <= 0)
return SOS_OK;
/* For /dev/mem device, prepare a kernel page to copy the physical
pages before transferring to user space */
if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
{
physmem_transfer_kernel_page = sos_kmem_vmm_alloc(1, 0);
if (! physmem_transfer_kernel_page)
return -SOS_ENOMEM;
}
/* Try to copy the data in page-size chunks */
offs = this->position;
while (offs < this->position + *len)
{
/* Retrieve page address of data in kernel memory */
sos_uoffset_t page_boundary = SOS_PAGE_ALIGN_INF(offs);
sos_vaddr_t page_vaddr;
sos_uoffset_t offset_in_page;
sos_uoffset_t accesslen_in_page;
sos_ret_t retval;
/* For /dev/mem device, we need to map the page in kernel memory
before */
if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
{
retval = sos_paging_map(page_boundary,
physmem_transfer_kernel_page,
FALSE,
(access_type==DO_WRITE)?
SOS_VM_MAP_PROT_WRITE
:SOS_VM_MAP_PROT_READ);
if (SOS_OK != retval)
break;
page_vaddr = physmem_transfer_kernel_page;
}
/* For /dev/kmem device, the page should already be in kernel space */
else if (! sos_kmem_vmm_is_valid_vaddr(page_boundary))
break; /* No: page is not mapped in kernel space ! */
else
page_vaddr = page_boundary; /* Yes, page is mapped */
/* Now copy the data from kernel to user space */
offset_in_page = offs - page_boundary;
accesslen_in_page = SOS_PAGE_SIZE - offset_in_page;
if (accesslen + accesslen_in_page > *len)
accesslen_in_page = *len - accesslen;
if (access_type==DO_WRITE)
retval = sos_memcpy_from_user(page_vaddr + offset_in_page,
user_buf + accesslen,
accesslen_in_page);
else
retval = sos_memcpy_to_user(user_buf + accesslen,
page_vaddr + offset_in_page,
accesslen_in_page);
/* Now, for /dev/mem, unmap the page from kernel */
if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
sos_paging_unmap(physmem_transfer_kernel_page);
/* Go to next page if possible */
if (retval < 0)
break;
accesslen += retval;
/* If transfer was interrupted, stop here */
if (retval < (sos_ret_t)accesslen_in_page)
break;
/* Go on to next page */
offs = page_boundary + SOS_PAGE_SIZE;
}
/* Release the temporary page for physical mem transfers */
if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
sos_kmem_vmm_free(physmem_transfer_kernel_page);
/* Update the position in the "file" */
*len = accesslen;
this->position += accesslen;
return SOS_OK;
}
static sos_ret_t dev_mem_fs_read(struct sos_fs_opened_file *this,
sos_uaddr_t dest_buf,
sos_size_t * /* in/out */len)
{
return dev_mem_fs_access(this, dest_buf, len, DO_READ);
}
static sos_ret_t dev_mem_fs_write(struct sos_fs_opened_file *this,
sos_uaddr_t src_buf,
sos_size_t * /* in/out */len)
{
return dev_mem_fs_access(this, src_buf, len, DO_WRITE);
}
static sos_ret_t dev_mem_fs_mmap(struct sos_fs_opened_file *this,
sos_uaddr_t *uaddr, sos_size_t size,
sos_ui32_t access_rights,
sos_ui32_t flags,
sos_luoffset_t offset)
{
struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
return sos_dev_physmem_map(sos_process_get_address_space(this->owner),
uaddr, size, offset, access_rights, flags);
return sos_dev_kmem_map(sos_process_get_address_space(this->owner),
uaddr, size, offset, access_rights, flags);
}
static struct sos_chardev_ops dev_mem_fs_ops
= (struct sos_chardev_ops) {
.open = dev_mem_fs_open,
.close = NULL,
.seek = dev_mem_fs_seek,
.read = dev_mem_fs_read,
.write = dev_mem_fs_write,
.mmap = dev_mem_fs_mmap,
.fcntl = NULL,
.ioctl = NULL
};
sos_ret_t sos_dev_mem_chardev_setup()
{
return sos_chardev_register_class(SOS_CHARDEV_MEM_MAJOR,
& dev_mem_fs_ops,
NULL);
}