2018-07-13 17:13:10 +02:00
|
|
|
/* Copyright (C) 2004,2005 David Decotigny
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU General Public License
|
|
|
|
as published by the Free Software Foundation; either version 2
|
|
|
|
of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
|
|
|
|
USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sos/physmem.h>
|
|
|
|
#include <sos/kmem_slab.h>
|
|
|
|
#include <sos/kmalloc.h>
|
|
|
|
#include <sos/klibc.h>
|
|
|
|
#include <sos/list.h>
|
|
|
|
#include <sos/assert.h>
|
|
|
|
#include <hwcore/mm_context.h>
|
|
|
|
#include <sos/process.h>
|
|
|
|
|
|
|
|
#include <drivers/bochs.h>
|
|
|
|
#include <drivers/x86_videomem.h>
|
|
|
|
|
|
|
|
#include <hwcore/irq.h>
|
|
|
|
|
|
|
|
#include "thread.h"
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The size of the stack of a kernel thread
|
|
|
|
*/
|
|
|
|
#define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The identifier of the thread currently running on CPU.
|
|
|
|
*
|
|
|
|
* We only support a SINGLE processor, ie a SINGLE thread
|
|
|
|
* running at any time in the system. This greatly simplifies the
|
|
|
|
* implementation of the system, since we don't have to complicate
|
|
|
|
* things in order to retrieve the identifier of the threads running
|
|
|
|
* on the CPU. On multiprocessor systems the current_thread below is
|
|
|
|
* an array indexed by the id of the CPU, so that the challenge is to
|
|
|
|
* retrieve the identifier of the CPU. This is usually done based on
|
|
|
|
* the stack address (Linux implementation) or on some form of TLS
|
|
|
|
* ("Thread Local Storage": can be implemented by way of LDTs for the
|
|
|
|
* processes, accessed through the fs or gs registers).
|
|
|
|
*/
|
|
|
|
static volatile struct sos_thread *current_thread = NULL;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The list of threads currently in the system.
|
|
|
|
*
|
|
|
|
* @note We could have used current_thread for that...
|
|
|
|
*/
|
|
|
|
static struct sos_thread *thread_list = NULL;
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The Cache of thread structures
|
|
|
|
*/
|
|
|
|
static struct sos_kslab_cache *cache_thread;
|
|
|
|
|
|
|
|
|
|
|
|
struct sos_thread *sos_thread_get_current()
|
|
|
|
{
|
|
|
|
SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
|
|
|
|
return (struct sos_thread*)current_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
inline static sos_ret_t _set_current(struct sos_thread *thr)
|
|
|
|
{
|
|
|
|
SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
|
|
|
|
current_thread = thr;
|
|
|
|
current_thread->state = SOS_THR_RUNNING;
|
|
|
|
return SOS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
|
|
|
|
sos_size_t init_thread_stack_size)
|
|
|
|
{
|
|
|
|
struct sos_thread *myself;
|
|
|
|
|
|
|
|
/* Allocate the cache of threads */
|
|
|
|
cache_thread = sos_kmem_cache_create("thread",
|
|
|
|
sizeof(struct sos_thread),
|
|
|
|
2,
|
|
|
|
0,
|
|
|
|
SOS_KSLAB_CREATE_MAP
|
|
|
|
| SOS_KSLAB_CREATE_ZERO);
|
|
|
|
if (! cache_thread)
|
|
|
|
return -SOS_ENOMEM;
|
|
|
|
|
|
|
|
/* Allocate a new thread structure for the current running thread */
|
|
|
|
myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
|
|
|
|
SOS_KSLAB_ALLOC_ATOMIC);
|
|
|
|
if (! myself)
|
|
|
|
return -SOS_ENOMEM;
|
|
|
|
|
|
|
|
/* Initialize the thread attributes */
|
|
|
|
strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
|
|
|
|
myself->state = SOS_THR_CREATED;
|
|
|
|
myself->priority = SOS_SCHED_PRIO_LOWEST;
|
|
|
|
myself->kernel_stack_base_addr = init_thread_stack_base_addr;
|
|
|
|
myself->kernel_stack_size = init_thread_stack_size;
|
|
|
|
|
|
|
|
/* Do some stack poisoning on the bottom of the stack, if needed */
|
|
|
|
sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
|
|
|
|
myself->kernel_stack_base_addr,
|
|
|
|
myself->kernel_stack_size);
|
|
|
|
|
|
|
|
/* Add the thread in the global list */
|
|
|
|
list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
|
|
|
|
|
|
|
|
/* Ok, now pretend that the running thread is ourselves */
|
|
|
|
myself->state = SOS_THR_READY;
|
|
|
|
_set_current(myself);
|
|
|
|
|
|
|
|
return SOS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct sos_thread *
|
|
|
|
sos_create_kernel_thread(const char *name,
|
|
|
|
sos_kernel_thread_start_routine_t start_func,
|
|
|
|
void *start_arg,
|
|
|
|
sos_sched_priority_t priority)
|
|
|
|
{
|
|
|
|
__label__ undo_creation;
|
|
|
|
sos_ui32_t flags;
|
|
|
|
struct sos_thread *new_thread;
|
|
|
|
|
|
|
|
if (! start_func)
|
|
|
|
return NULL;
|
|
|
|
if (! SOS_SCHED_PRIO_IS_VALID(priority))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Allocate a new thread structure for the current running thread */
|
|
|
|
new_thread
|
|
|
|
= (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
|
|
|
|
SOS_KSLAB_ALLOC_ATOMIC);
|
|
|
|
if (! new_thread)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Initialize the thread attributes */
|
|
|
|
strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
|
|
|
|
new_thread->state = SOS_THR_CREATED;
|
|
|
|
new_thread->priority = priority;
|
|
|
|
|
|
|
|
/* Allocate the stack for the new thread */
|
|
|
|
new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
|
|
|
|
new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
|
|
|
|
if (! new_thread->kernel_stack_base_addr)
|
|
|
|
goto undo_creation;
|
|
|
|
|
|
|
|
/* Initialize the CPU context of the new thread */
|
|
|
|
if (SOS_OK
|
|
|
|
!= sos_cpu_kstate_init(& new_thread->cpu_state,
|
|
|
|
(sos_cpu_kstate_function_arg1_t*) start_func,
|
|
|
|
(sos_ui32_t) start_arg,
|
|
|
|
new_thread->kernel_stack_base_addr,
|
|
|
|
new_thread->kernel_stack_size,
|
|
|
|
(sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
|
|
|
|
(sos_ui32_t) NULL))
|
|
|
|
goto undo_creation;
|
|
|
|
|
|
|
|
/* Add the thread in the global list */
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
|
|
|
|
/* Mark the thread ready */
|
|
|
|
if (SOS_OK != sos_sched_set_ready(new_thread))
|
|
|
|
goto undo_creation;
|
|
|
|
|
|
|
|
/* Normal non-erroneous end of function */
|
|
|
|
return new_thread;
|
|
|
|
|
|
|
|
undo_creation:
|
|
|
|
if (new_thread->kernel_stack_base_addr)
|
|
|
|
sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
|
|
|
|
sos_kmem_cache_free((sos_vaddr_t) new_thread);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper function to create a new user thread. If model_thread is
|
|
|
|
* given, then the new thread will be the copy of this
|
|
|
|
* thread. Otherwise the thread will have its initial SP/PC correctly
|
|
|
|
* initialized with the user_initial_PC/SP arguments
|
|
|
|
*/
|
|
|
|
static struct sos_thread *
|
|
|
|
create_user_thread(const char *name,
|
|
|
|
struct sos_process *process,
|
|
|
|
const struct sos_thread * model_thread,
|
|
|
|
const struct sos_cpu_state * model_uctxt,
|
|
|
|
sos_uaddr_t user_initial_PC,
|
|
|
|
sos_ui32_t user_start_arg1,
|
|
|
|
sos_ui32_t user_start_arg2,
|
|
|
|
sos_uaddr_t user_initial_SP,
|
|
|
|
sos_sched_priority_t priority)
|
|
|
|
{
|
|
|
|
__label__ undo_creation;
|
|
|
|
sos_ui32_t flags;
|
|
|
|
struct sos_thread *new_thread;
|
|
|
|
|
|
|
|
if (model_thread)
|
|
|
|
{
|
|
|
|
SOS_ASSERT_FATAL(model_uctxt);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (! SOS_SCHED_PRIO_IS_VALID(priority))
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For a user thread, the process must be given */
|
|
|
|
if (! process)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Allocate a new thread structure for the current running thread */
|
|
|
|
new_thread
|
|
|
|
= (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
|
|
|
|
SOS_KSLAB_ALLOC_ATOMIC);
|
|
|
|
if (! new_thread)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Initialize the thread attributes */
|
|
|
|
strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
|
|
|
|
new_thread->state = SOS_THR_CREATED;
|
|
|
|
if (model_thread)
|
|
|
|
new_thread->priority = model_thread->priority;
|
|
|
|
else
|
|
|
|
new_thread->priority = priority;
|
|
|
|
|
|
|
|
/* Allocate the stack for the new thread */
|
|
|
|
new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
|
|
|
|
new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
|
|
|
|
if (! new_thread->kernel_stack_base_addr)
|
|
|
|
goto undo_creation;
|
|
|
|
|
|
|
|
/* Initialize the CPU context of the new thread */
|
|
|
|
if (model_thread)
|
|
|
|
{
|
|
|
|
if (SOS_OK
|
|
|
|
!= sos_cpu_ustate_duplicate(& new_thread->cpu_state,
|
|
|
|
model_uctxt,
|
|
|
|
user_start_arg1,
|
|
|
|
new_thread->kernel_stack_base_addr,
|
|
|
|
new_thread->kernel_stack_size))
|
|
|
|
goto undo_creation;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (SOS_OK
|
|
|
|
!= sos_cpu_ustate_init(& new_thread->cpu_state,
|
|
|
|
user_initial_PC,
|
|
|
|
user_start_arg1,
|
|
|
|
user_start_arg2,
|
|
|
|
user_initial_SP,
|
|
|
|
new_thread->kernel_stack_base_addr,
|
|
|
|
new_thread->kernel_stack_size))
|
|
|
|
goto undo_creation;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Attach the new thread to the process */
|
|
|
|
if (SOS_OK != sos_process_register_thread(process, new_thread))
|
|
|
|
goto undo_creation;
|
|
|
|
|
|
|
|
/* Add the thread in the global list */
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
|
|
|
|
/* Mark the thread ready */
|
|
|
|
if (SOS_OK != sos_sched_set_ready(new_thread))
|
|
|
|
goto undo_creation;
|
|
|
|
|
|
|
|
/* Normal non-erroneous end of function */
|
|
|
|
return new_thread;
|
|
|
|
|
|
|
|
undo_creation:
|
|
|
|
if (new_thread->kernel_stack_base_addr)
|
|
|
|
sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
|
|
|
|
sos_kmem_cache_free((sos_vaddr_t) new_thread);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct sos_thread *
|
|
|
|
sos_create_user_thread(const char *name,
|
|
|
|
struct sos_process *process,
|
|
|
|
sos_uaddr_t user_initial_PC,
|
|
|
|
sos_ui32_t user_start_arg1,
|
|
|
|
sos_ui32_t user_start_arg2,
|
|
|
|
sos_uaddr_t user_initial_SP,
|
|
|
|
sos_sched_priority_t priority)
|
|
|
|
{
|
|
|
|
return create_user_thread(name, process, NULL, NULL,
|
|
|
|
user_initial_PC,
|
|
|
|
user_start_arg1,
|
|
|
|
user_start_arg2,
|
|
|
|
user_initial_SP,
|
|
|
|
priority);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a new user thread, copy of the given user thread with the
|
|
|
|
* given user context
|
|
|
|
*/
|
|
|
|
struct sos_thread *
|
|
|
|
sos_duplicate_user_thread(const char *name,
|
|
|
|
struct sos_process *process,
|
|
|
|
const struct sos_thread * model_thread,
|
|
|
|
const struct sos_cpu_state * model_uctxt,
|
|
|
|
sos_ui32_t retval)
|
|
|
|
{
|
|
|
|
return create_user_thread(name, process, model_thread, model_uctxt,
|
|
|
|
0, retval, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper function to switch to the correct MMU configuration to suit
|
|
|
|
* the_thread's needs.
|
|
|
|
* - When switching to a user-mode thread, force the reconfiguration
|
|
|
|
* of the MMU
|
|
|
|
* - When switching to a kernel-mode thread, only change the MMU
|
|
|
|
* configuration if the thread was squatting someone else's space
|
|
|
|
*/
|
|
|
|
static void _prepare_mm_context(struct sos_thread *the_thread)
|
|
|
|
{
|
|
|
|
/* Going to restore a thread in user mode ? */
|
|
|
|
if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
|
|
|
|
== TRUE)
|
|
|
|
{
|
|
|
|
/* Yes: force the MMU to be correctly setup with the correct
|
|
|
|
user's address space */
|
|
|
|
|
|
|
|
/* The thread should be a user thread */
|
|
|
|
SOS_ASSERT_FATAL(the_thread->process != NULL);
|
|
|
|
|
|
|
|
/* It should not squat any other's address space */
|
|
|
|
SOS_ASSERT_FATAL(the_thread->squatted_address_space == NULL);
|
|
|
|
|
|
|
|
/* Perform an MMU context switch if needed */
|
|
|
|
sos_umem_vmm_set_current_as(sos_process_get_address_space(the_thread->process));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Restore the address space currently in use */
|
|
|
|
else
|
|
|
|
sos_umem_vmm_set_current_as(the_thread->squatted_address_space);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Function called after thr has terminated. Called from inside the context
|
|
|
|
of another thread, interrupts disabled */
|
|
|
|
static void delete_thread(struct sos_thread *thr)
|
|
|
|
{
|
|
|
|
sos_ui32_t flags;
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
list_delete_named(thread_list, thr, gbl_prev, gbl_next);
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
|
|
|
|
sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
|
|
|
|
|
|
|
|
/* Not allowed to squat any user space at deletion time */
|
|
|
|
SOS_ASSERT_FATAL(NULL == thr->squatted_address_space);
|
|
|
|
|
|
|
|
/* For a user thread: remove the thread from the process threads' list */
|
|
|
|
if (thr->process)
|
|
|
|
SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
|
|
|
|
|
|
|
|
memset(thr, 0x0, sizeof(struct sos_thread));
|
|
|
|
sos_kmem_cache_free((sos_vaddr_t) thr);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void sos_thread_exit()
|
|
|
|
{
|
|
|
|
sos_ui32_t flags;
|
|
|
|
struct sos_thread *myself, *next_thread;
|
|
|
|
|
|
|
|
/* Interrupt handlers are NOT allowed to exit the current thread ! */
|
|
|
|
SOS_ASSERT_FATAL(! sos_servicing_irq());
|
|
|
|
|
|
|
|
myself = sos_thread_get_current();
|
|
|
|
|
|
|
|
/* Refuse to end the current executing thread if it still holds a
|
|
|
|
resource ! */
|
|
|
|
SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
|
|
|
|
prev_entry_for_thread,
|
|
|
|
next_entry_for_thread));
|
|
|
|
|
|
|
|
/* Prepare to run the next thread */
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
myself->state = SOS_THR_ZOMBIE;
|
|
|
|
next_thread = sos_reschedule(myself, FALSE);
|
|
|
|
|
|
|
|
/* Make sure that the next_thread is valid */
|
|
|
|
sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
|
|
|
|
next_thread->kernel_stack_base_addr,
|
|
|
|
next_thread->kernel_stack_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform an MMU context switch if needed
|
|
|
|
*/
|
|
|
|
_prepare_mm_context(next_thread);
|
|
|
|
|
|
|
|
/* No need for sos_restore_IRQs() here because the IRQ flag will be
|
|
|
|
restored to that of the next thread upon context switch */
|
|
|
|
|
|
|
|
/* Immediate switch to next thread */
|
|
|
|
_set_current(next_thread);
|
|
|
|
sos_cpu_context_exit_to(next_thread->cpu_state,
|
|
|
|
(sos_cpu_kstate_function_arg1_t*) delete_thread,
|
|
|
|
(sos_ui32_t) myself);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
|
|
|
|
{
|
|
|
|
if (! thr)
|
|
|
|
thr = (struct sos_thread*)current_thread;
|
|
|
|
|
|
|
|
return thr->priority;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
|
|
|
|
{
|
|
|
|
if (! thr)
|
|
|
|
thr = (struct sos_thread*)current_thread;
|
|
|
|
|
|
|
|
return thr->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
|
|
|
|
/**
|
|
|
|
* Helper function to initiate a context switch in case the current
|
|
|
|
* thread becomes blocked, waiting for a timeout, or calls yield.
|
|
|
|
*/
|
|
|
|
static sos_ret_t _switch_to_next_thread(switch_type_t operation)
|
|
|
|
{
|
|
|
|
struct sos_thread *myself, *next_thread;
|
|
|
|
|
|
|
|
SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
|
|
|
|
|
|
|
|
/* Interrupt handlers are NOT allowed to block ! */
|
|
|
|
SOS_ASSERT_FATAL(! sos_servicing_irq());
|
|
|
|
|
|
|
|
myself = (struct sos_thread*)current_thread;
|
|
|
|
|
|
|
|
/* Make sure that if we are to be marked "BLOCKED", we have any
|
|
|
|
reason of effectively being blocked */
|
|
|
|
if (BLOCK_MYSELF == operation)
|
|
|
|
{
|
|
|
|
myself->state = SOS_THR_BLOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Identify the next thread */
|
|
|
|
next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
|
|
|
|
|
|
|
|
/* Avoid context switch if the context does not change */
|
|
|
|
if (myself != next_thread)
|
|
|
|
{
|
|
|
|
/* Sanity checks for the next thread */
|
|
|
|
sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
|
|
|
|
next_thread->kernel_stack_base_addr,
|
|
|
|
next_thread->kernel_stack_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform an MMU context switch if needed
|
|
|
|
*/
|
|
|
|
_prepare_mm_context(next_thread);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Actual CPU context switch
|
|
|
|
*/
|
|
|
|
_set_current(next_thread);
|
|
|
|
sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
|
|
|
|
|
|
|
|
/* Back here ! */
|
|
|
|
SOS_ASSERT_FATAL(current_thread == myself);
|
|
|
|
SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* No context switch but still update ID of current thread */
|
|
|
|
_set_current(next_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SOS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper function to change the thread's priority in all the
|
|
|
|
* waitqueues associated with the thread.
|
|
|
|
*/
|
|
|
|
static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
|
|
|
|
sos_sched_priority_t priority)
|
|
|
|
{
|
|
|
|
struct sos_kwaitq_entry *kwq_entry;
|
|
|
|
int nb_waitqs;
|
|
|
|
|
|
|
|
list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
|
|
|
|
prev_entry_for_thread, next_entry_for_thread)
|
|
|
|
{
|
|
|
|
SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
|
|
|
|
kwq_entry,
|
|
|
|
priority));
|
|
|
|
}
|
|
|
|
|
|
|
|
return SOS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
|
|
|
|
sos_sched_priority_t priority)
|
|
|
|
{
|
|
|
|
__label__ exit_set_prio;
|
|
|
|
sos_ui32_t flags;
|
|
|
|
sos_ret_t retval;
|
|
|
|
|
|
|
|
|
|
|
|
if (! SOS_SCHED_PRIO_IS_VALID(priority))
|
|
|
|
return -SOS_EINVAL;
|
|
|
|
|
|
|
|
if (! thr)
|
|
|
|
thr = (struct sos_thread*)current_thread;
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
|
|
|
|
/* Signal kwaitq subsystem that the priority of the thread in all
|
|
|
|
the waitq it is waiting in should be updated */
|
|
|
|
retval = _change_waitq_priorities(thr, priority);
|
|
|
|
if (SOS_OK != retval)
|
|
|
|
goto exit_set_prio;
|
|
|
|
|
|
|
|
/* Signal scheduler that the thread, currently in a waiting list,
|
|
|
|
should take into account the change of priority */
|
|
|
|
if (SOS_THR_READY == thr->state)
|
|
|
|
retval = sos_sched_change_priority(thr, priority);
|
|
|
|
|
|
|
|
/* Update priority */
|
|
|
|
thr->priority = priority;
|
|
|
|
|
|
|
|
exit_set_prio:
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t sos_thread_yield()
|
|
|
|
{
|
|
|
|
sos_ui32_t flags;
|
|
|
|
sos_ret_t retval;
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
|
|
|
|
retval = _switch_to_next_thread(YIELD_MYSELF);
|
|
|
|
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal sleep timeout management
|
|
|
|
*/
|
|
|
|
struct sleep_timeout_params
|
|
|
|
{
|
|
|
|
struct sos_thread *thread_to_wakeup;
|
|
|
|
sos_bool_t timeout_triggered;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callback called when a timeout happened
|
|
|
|
*/
|
|
|
|
static void sleep_timeout(struct sos_timeout_action *act)
|
|
|
|
{
|
|
|
|
struct sleep_timeout_params *sleep_timeout_params
|
|
|
|
= (struct sleep_timeout_params*) act->routine_data;
|
|
|
|
|
|
|
|
/* Signal that we have been woken up by the timeout */
|
|
|
|
sleep_timeout_params->timeout_triggered = TRUE;
|
|
|
|
|
|
|
|
/* Mark the thread ready */
|
|
|
|
SOS_ASSERT_FATAL(SOS_OK ==
|
|
|
|
sos_thread_force_unblock(sleep_timeout_params
|
|
|
|
->thread_to_wakeup));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t sos_thread_sleep(struct sos_time *timeout)
|
|
|
|
{
|
|
|
|
sos_ui32_t flags;
|
|
|
|
struct sleep_timeout_params sleep_timeout_params;
|
|
|
|
struct sos_timeout_action timeout_action;
|
|
|
|
sos_ret_t retval;
|
|
|
|
|
|
|
|
/* Block forever if no timeout is given */
|
|
|
|
if (NULL == timeout)
|
|
|
|
{
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
retval = _switch_to_next_thread(BLOCK_MYSELF);
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the timeout action */
|
|
|
|
sos_time_init_action(& timeout_action);
|
|
|
|
|
|
|
|
/* Prepare parameters used by the sleep timeout callback */
|
|
|
|
sleep_timeout_params.thread_to_wakeup
|
|
|
|
= (struct sos_thread*)current_thread;
|
|
|
|
sleep_timeout_params.timeout_triggered = FALSE;
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
|
|
|
|
/* Now program the timeout ! */
|
|
|
|
SOS_ASSERT_FATAL(SOS_OK ==
|
|
|
|
sos_time_register_action_relative(& timeout_action,
|
|
|
|
timeout,
|
|
|
|
sleep_timeout,
|
|
|
|
& sleep_timeout_params));
|
|
|
|
|
|
|
|
/* Prepare to block: wait for sleep_timeout() to wakeup us in the
|
|
|
|
timeout kwaitq, or for someone to wake us up in any other
|
|
|
|
waitq */
|
|
|
|
retval = _switch_to_next_thread(BLOCK_MYSELF);
|
|
|
|
/* Unblocked by something ! */
|
|
|
|
|
|
|
|
/* Unblocked by timeout ? */
|
|
|
|
if (sleep_timeout_params.timeout_triggered)
|
|
|
|
{
|
|
|
|
/* Yes */
|
|
|
|
SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
|
|
|
|
retval = SOS_OK;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* No: We have probably been woken up while in some other
|
|
|
|
kwaitq */
|
|
|
|
SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
|
|
|
|
retval = -SOS_EINTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
|
|
|
|
/* Update the remaining timeout */
|
|
|
|
memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
|
|
|
|
{
|
|
|
|
sos_ret_t retval;
|
|
|
|
sos_ui32_t flags;
|
|
|
|
|
|
|
|
if (! thread)
|
|
|
|
return -SOS_EINVAL;
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
|
|
|
|
/* Thread already woken up ? */
|
|
|
|
retval = SOS_OK;
|
|
|
|
switch(sos_thread_get_state(thread))
|
|
|
|
{
|
|
|
|
case SOS_THR_RUNNING:
|
|
|
|
case SOS_THR_READY:
|
|
|
|
/* Do nothing */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SOS_THR_ZOMBIE:
|
|
|
|
retval = -SOS_EFATAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
retval = sos_sched_set_ready(thread);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void sos_thread_dump_backtrace(sos_bool_t on_console,
|
|
|
|
sos_bool_t on_bochs)
|
|
|
|
{
|
|
|
|
sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
|
|
|
|
sos_size_t stack_size = current_thread->kernel_stack_size;
|
|
|
|
|
|
|
|
void backtracer(sos_vaddr_t PC,
|
|
|
|
sos_vaddr_t params,
|
|
|
|
sos_ui32_t depth,
|
|
|
|
void *custom_arg)
|
|
|
|
{
|
|
|
|
sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
|
|
|
|
|
|
|
|
/* Get the address of the first 3 arguments from the
|
|
|
|
frame. Among these arguments, 0, 1, 2, 3 arguments might be
|
|
|
|
meaningful (depending on how many arguments the function may
|
|
|
|
take). */
|
|
|
|
arg1 = (sos_ui32_t*)params;
|
|
|
|
arg2 = (sos_ui32_t*)(params+4);
|
|
|
|
arg3 = (sos_ui32_t*)(params+8);
|
|
|
|
arg4 = (sos_ui32_t*)(params+12);
|
|
|
|
|
|
|
|
/* Make sure the addresses of these arguments fit inside the
|
|
|
|
stack boundaries */
|
|
|
|
#define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
|
|
|
|
&& ((sos_vaddr_t)(v) < (u)) )
|
|
|
|
if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
|
|
|
|
arg1 = &invalid;
|
|
|
|
if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
|
|
|
|
arg2 = &invalid;
|
|
|
|
if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
|
|
|
|
arg3 = &invalid;
|
|
|
|
if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
|
|
|
|
arg4 = &invalid;
|
|
|
|
|
|
|
|
/* Print the function context for this frame */
|
|
|
|
if (on_bochs)
|
|
|
|
sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
|
|
|
|
(unsigned)depth, (unsigned)PC,
|
|
|
|
(unsigned)*arg1, (unsigned)*arg2,
|
|
|
|
(unsigned)*arg3);
|
|
|
|
|
|
|
|
if (on_console)
|
2018-07-19 11:33:14 +02:00
|
|
|
sos_x86_videomem_printf(22-depth, 3,
|
2018-07-13 17:13:10 +02:00
|
|
|
SOS_X86_VIDEO_BG_BLUE
|
|
|
|
| SOS_X86_VIDEO_FG_LTGREEN,
|
|
|
|
"[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
|
|
|
|
(unsigned)depth, PC,
|
|
|
|
(unsigned)*arg1, (unsigned)*arg2,
|
|
|
|
(unsigned)*arg3, (unsigned)*arg4);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
sos_backtrace(NULL, 15, stack_bottom, stack_size,
|
|
|
|
backtracer, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* **********************************************
|
|
|
|
* Restricted functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t
|
|
|
|
sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
|
|
|
|
sos_vaddr_t fixup_retvaddr)
|
|
|
|
{
|
|
|
|
sos_ret_t retval;
|
|
|
|
sos_ui32_t flags;
|
|
|
|
|
|
|
|
if (! dest_as)
|
|
|
|
{
|
|
|
|
/* Thread is not a user thread: do nothing */
|
|
|
|
if (! current_thread->process)
|
|
|
|
return -SOS_EINVAL;
|
|
|
|
|
|
|
|
dest_as = sos_process_get_address_space(current_thread->process);
|
|
|
|
}
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
SOS_ASSERT_FATAL(NULL == current_thread->squatted_address_space);
|
|
|
|
SOS_ASSERT_FATAL(0 == current_thread->fixup_uaccess.return_vaddr);
|
|
|
|
|
|
|
|
/* Change the MMU configuration and init the fixup return address */
|
|
|
|
retval = sos_umem_vmm_set_current_as(dest_as);
|
|
|
|
if (SOS_OK == retval)
|
|
|
|
{
|
|
|
|
current_thread->squatted_address_space = dest_as;
|
|
|
|
current_thread->fixup_uaccess.return_vaddr = fixup_retvaddr;
|
|
|
|
current_thread->fixup_uaccess.faulted_uaddr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sos_ret_t
|
|
|
|
sos_thread_end_user_space_access(void)
|
|
|
|
{
|
|
|
|
sos_ret_t retval;
|
|
|
|
sos_ui32_t flags;
|
|
|
|
|
|
|
|
sos_disable_IRQs(flags);
|
|
|
|
SOS_ASSERT_FATAL(NULL != current_thread->squatted_address_space);
|
|
|
|
|
|
|
|
/* Don't impose anything regarding the current MMU configuration anymore */
|
|
|
|
current_thread->fixup_uaccess.return_vaddr = 0;
|
|
|
|
current_thread->fixup_uaccess.faulted_uaddr = 0;
|
|
|
|
|
|
|
|
retval = sos_umem_vmm_set_current_as(NULL);
|
|
|
|
current_thread->squatted_address_space = NULL;
|
|
|
|
|
|
|
|
sos_restore_IRQs(flags);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
|
|
|
|
{
|
|
|
|
/* Don't preempt the current thread */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the state of the interrupted context to make sure that:
|
|
|
|
* - The list of threads correctly reflects that the thread is back
|
|
|
|
* in user mode
|
|
|
|
* - _prepare_mm_context() deals with the correct mm_context
|
|
|
|
*/
|
|
|
|
current_thread->cpu_state = cpu_state;
|
|
|
|
|
|
|
|
/* Perform an MMU context switch if needed */
|
|
|
|
_prepare_mm_context((struct sos_thread*) current_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
|
|
|
|
{
|
|
|
|
/* Don't preempt the current thread */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the state of the interrupted context to make sure that:
|
|
|
|
* - The list of threads correctly reflects that the thread is
|
|
|
|
* running in user or kernel mode
|
|
|
|
* - _prepare_mm_context() deals with the correct mm_context
|
|
|
|
*/
|
|
|
|
current_thread->cpu_state = cpu_state;
|
|
|
|
|
|
|
|
/* Perform an MMU context switch if needed */
|
|
|
|
_prepare_mm_context((struct sos_thread*) current_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
|
|
|
|
{
|
|
|
|
current_thread->cpu_state = interrupted_state;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct sos_cpu_state *
|
|
|
|
sos_thread_prepare_irq_switch_back(void)
|
|
|
|
{
|
|
|
|
struct sos_thread *myself, *next_thread;
|
|
|
|
|
|
|
|
/* In SOS, threads in kernel mode are NEVER preempted from the
|
|
|
|
interrupt handlers ! */
|
|
|
|
if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
|
|
|
|
return current_thread->cpu_state;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here we are dealing only with possible preemption of user threads
|
|
|
|
* in user context !
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Make sure the thread actually is a user thread */
|
|
|
|
SOS_ASSERT_FATAL(current_thread->process != NULL);
|
|
|
|
|
|
|
|
/* Save the state of the interrupted context */
|
|
|
|
myself = (struct sos_thread*)current_thread;
|
|
|
|
|
|
|
|
/* Select the next thread to run */
|
|
|
|
next_thread = sos_reschedule(myself, FALSE);
|
|
|
|
|
|
|
|
/* Perform an MMU context switch if needed */
|
|
|
|
_prepare_mm_context(next_thread);
|
|
|
|
|
|
|
|
/* Setup the next_thread's context into the CPU */
|
|
|
|
_set_current(next_thread);
|
|
|
|
return next_thread->cpu_state;
|
|
|
|
}
|