sos-code-article10/drivers/zero.c

453 lines
12 KiB
C
Raw Permalink Normal View History

2018-07-13 17:13:10 +02:00
/* Copyright (C) 2005 David Decotigny
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
*/
#include <sos/assert.h>
#include <sos/kmalloc.h>
#include <sos/physmem.h>
#include <hwcore/paging.h>
#include <sos/kmem_slab.h>
#include <sos/list.h>
#include <hwcore/paging.h>
#include <sos/uaccess.h>
#include <sos/chardev.h>
#include <drivers/devices.h>
#include "zero.h"
/**
* A mapped page for a shared mapping of /dev/zero
*/
struct zero_mapped_page
{
sos_uoffset_t page_id;
sos_paddr_t ppage_paddr;
struct zero_mapped_page *prev, *next;
};
/** The Slab cache of shared mapped pages */
struct sos_kslab_cache * cache_of_zero_mapped_pages;
/**
* A mapped /dev/zero resource
*/
struct zero_mapped_resource
{
int ref_cnt;
/**
* For shared mappings: the list of shared pages mapped inside one
* or multiple VRs
*/
struct zero_mapped_page *list_mapped_pages;
struct sos_umem_vmm_mapped_resource mr;
};
/** Forward declaration: the FS operation for the /dev/zero character
device */
static struct sos_chardev_ops dev_zero_fs_ops;
/** Helper function to insert the given physical page in the list of
physical pages used for shared anonymous mappings */
static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
sos_paddr_t ppage_paddr,
sos_uoffset_t page_id);
/** Helper function to insert the given physical page in the list of
physical pages used for shared anonymous mappings */
static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
sos_uoffset_t page_id);
sos_ret_t sos_dev_zero_subsystem_setup()
{
sos_ret_t retval;
cache_of_zero_mapped_pages =
sos_kmem_cache_create("shared anonymous mappings",
sizeof(struct zero_mapped_page),
1, 0,
SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
if (! cache_of_zero_mapped_pages)
return -SOS_ENOMEM;
retval = sos_chardev_register_class(SOS_CHARDEV_ZERO_MAJOR,
& dev_zero_fs_ops,
NULL);
if (SOS_OK != retval)
{
sos_kmem_cache_destroy(cache_of_zero_mapped_pages);
return retval;
}
return SOS_OK;
}
/** Called after the virtual region has been inserted inside its
address space */
static void zero_ref(struct sos_umem_vmm_vr * vr)
{
/* Retrieve the 'zero' structure associated with the mapped resource */
struct zero_mapped_resource * zero_resource;
zero_resource
= (struct zero_mapped_resource*)
sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
/* Increment ref counter */
zero_resource->ref_cnt ++;
}
/** Called when the virtual region is removed from its address
space */
static void zero_unref(struct sos_umem_vmm_vr * vr)
{
/* Retrieve the 'zero' structure associated with the mapped resource */
struct zero_mapped_resource * zero_resource;
zero_resource
= (struct zero_mapped_resource*)
sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
/* Decrement ref coutner */
SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
zero_resource->ref_cnt --;
/* Free the resource if it becomes unused */
if (zero_resource->ref_cnt == 0)
{
/* Delete the list of anonymous shared mappings */
struct zero_mapped_page *zmp;
list_collapse(zero_resource->list_mapped_pages, zmp)
{
/* Unreference the underlying physical page */
sos_physmem_unref_physpage(zmp->ppage_paddr);
sos_kfree((sos_vaddr_t)zmp);
}
sos_kfree((sos_vaddr_t)zero_resource);
}
}
/** MOST IMPORTANT callback ! Called when a thread page faults on the
resource's mapping */
static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
sos_uaddr_t uaddr,
sos_bool_t write_access)
{
sos_ret_t retval = SOS_OK;
sos_paddr_t ppage_paddr;
sos_uoffset_t required_page_id;
struct zero_mapped_resource * zero_resource;
sos_ui32_t vr_prot, vr_flags;
/* Retrieve the 'zero' structure associated with the mapped resource */
zero_resource
= (struct zero_mapped_resource*)
sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
/* Retrieve access rights/flags of the VR */
vr_prot = sos_umem_vmm_get_prot_of_vr(vr);
vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
/* Identifies the page in the mapping that's being paged-in */
required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
- sos_umem_vmm_get_start_of_vr(vr)
+ sos_umem_vmm_get_offset_in_resource(vr);
/* For shared mappings, check if there is a page already mapping the
required address */
if (vr_flags & SOS_VR_MAP_SHARED)
{
ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
if (NULL != (void*)ppage_paddr)
{
retval = sos_paging_map(ppage_paddr,
SOS_PAGE_ALIGN_INF(uaddr),
TRUE,
vr_prot);
return retval;
}
}
/* For write accesses, directly maps a new page. For read accesses,
simply map in the zero_page (and wait for COW to handle the next
write accesses) */
if (write_access)
{
/* Allocate a new page for the virtual address */
ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
if (! ppage_paddr)
return -SOS_ENOMEM;
retval = sos_paging_map(ppage_paddr,
SOS_PAGE_ALIGN_INF(uaddr),
TRUE,
vr_prot);
if (SOS_OK != retval)
{
sos_physmem_unref_physpage(ppage_paddr);
return retval;
}
memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
/* For shared mappings, add the page in the list of shared
mapped pages */
if (vr_flags & SOS_VR_MAP_SHARED)
insert_anonymous_physpage(zero_resource, ppage_paddr,
required_page_id);
sos_physmem_unref_physpage(ppage_paddr);
}
else
{
/* Map-in the zero page in READ ONLY whatever the access_rights
or the type (shared/private) of the VR to activate COW */
retval = sos_paging_map(sos_zero_physpage,
SOS_PAGE_ALIGN_INF(uaddr),
TRUE,
SOS_VM_MAP_PROT_READ);
}
return retval;
}
/** The callbacks for a mapped /dev/zero resource */
static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
{
.ref = zero_ref,
.unref = zero_unref,
.page_in = zero_page_in,
.unmap = NULL
};
/** The callback that gets called when the resource gets mapped */
static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
{
return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
}
/** The function responsible for mapping the /dev/zero resource in
user space */
sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
sos_uaddr_t *uaddr,
sos_size_t size,
sos_ui32_t access_rights,
sos_ui32_t flags)
{
sos_ret_t retval;
struct zero_mapped_resource * zero_resource;
zero_resource
= (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
if (! zero_resource)
return -SOS_ENOMEM;
memset(zero_resource, 0x0, sizeof(*zero_resource));
zero_resource->mr.allowed_access_rights
= SOS_VM_MAP_PROT_READ
| SOS_VM_MAP_PROT_WRITE
| SOS_VM_MAP_PROT_EXEC;
zero_resource->mr.flags |= SOS_MAPPED_RESOURCE_ANONYMOUS;
zero_resource->mr.custom_data = zero_resource;
zero_resource->mr.mmap = zero_mmap;
retval = sos_umem_vmm_map(dest_as, uaddr, size,
access_rights, flags,
&zero_resource->mr, 0);
if (SOS_OK != retval)
{
sos_kfree((sos_vaddr_t)zero_resource);
return retval;
}
return SOS_OK;
}
static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
sos_paddr_t ppage_paddr,
sos_uoffset_t page_id)
{
struct zero_mapped_page * zmp
= (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
0);
if (! zmp)
return -SOS_ENOMEM;
zmp->page_id = page_id;
zmp->ppage_paddr = ppage_paddr;
list_add_head(mr->list_mapped_pages, zmp);
sos_physmem_ref_physpage_at(ppage_paddr);
return SOS_OK;
}
static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
sos_uoffset_t page_id)
{
struct zero_mapped_page * zmp;
int nb_elts;
list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
{
if (zmp->page_id == page_id)
return zmp->ppage_paddr;
}
return (sos_paddr_t)NULL;
}
/*
* /dev/zero character device FS operations
*/
static sos_ret_t dev_zero_fs_open(struct sos_fs_node * fsnode,
struct sos_fs_opened_file * of,
void * chardev_class_custom_data)
{
/* Make sure the device instance is known to the driver */
if ( (SOS_CHARDEV_NULL_MINOR != fsnode->dev_id.device_instance)
&& (SOS_CHARDEV_ZERO_MINOR != fsnode->dev_id.device_instance) )
return -SOS_ENODEV;
return SOS_OK;
}
static sos_ret_t dev_zero_fs_seek(struct sos_fs_opened_file *this,
sos_lsoffset_t offset,
sos_seek_whence_t whence,
/* out */ sos_lsoffset_t * result_position)
{
/* Artificiallly update the position in the "file" */
sos_lsoffset_t ref_offs;
*result_position = this->position;
switch (whence)
{
case SOS_SEEK_SET:
ref_offs = 0;
break;
case SOS_SEEK_CUR:
ref_offs = this->position;
break;
case SOS_SEEK_END:
return -SOS_ENOSUP;
break;
default:
return -SOS_EINVAL;
}
if (offset < -ref_offs)
return -SOS_EINVAL;
this->position = ref_offs + offset;
*result_position = this->position;
return SOS_OK;
}
static sos_ret_t dev_zero_fs_read(struct sos_fs_opened_file *this,
sos_uaddr_t dest_buf,
sos_size_t * /* in/out */len)
{
struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
sos_size_t offs, rdlen;
/* Reading /dev/null returns immediately */
if (SOS_CHARDEV_NULL_MINOR == fsnode->dev_id.device_instance)
{
*len = 0;
return SOS_OK;
}
/* ZERO the destination buffer using the zero page (by page_size
increments) */
for (rdlen = offs = 0 ; offs < *len ; offs += SOS_PAGE_SIZE)
{
sos_ret_t retval;
sos_size_t memcpy_len = SOS_PAGE_SIZE;
if (offs + memcpy_len > *len)
memcpy_len = *len - offs;
retval = sos_memcpy_to_user(dest_buf + offs, sos_zero_kernelpage,
memcpy_len);
if (retval < 0)
break;
rdlen += retval;
if (retval != (sos_ret_t)memcpy_len)
break;
}
/* Artificiallly update the position in the "file" */
*len = rdlen;
this->position += rdlen;
return SOS_OK;
}
static sos_ret_t dev_zero_fs_write(struct sos_fs_opened_file *this,
sos_uaddr_t src_buf,
sos_size_t * /* in/out */len)
{
/* Artificiallly update the position in the "file" */
this->position += *len;
return SOS_OK;
}
static sos_ret_t dev_zero_fs_mmap(struct sos_fs_opened_file *this,
sos_uaddr_t *uaddr, sos_size_t size,
sos_ui32_t access_rights,
sos_ui32_t flags,
sos_luoffset_t offset)
{
return sos_dev_zero_map(sos_process_get_address_space(this->owner),
uaddr, size, access_rights, flags);
}
static struct sos_chardev_ops dev_zero_fs_ops
= (struct sos_chardev_ops) {
.open = dev_zero_fs_open,
.close = NULL,
.seek = dev_zero_fs_seek,
.read = dev_zero_fs_read,
.write = dev_zero_fs_write,
.mmap = dev_zero_fs_mmap,
.fcntl = NULL,
.ioctl = NULL
};