sos-code-article10/hwcore/cpu_context.c
2018-07-13 17:13:10 +02:00

1009 lines
29 KiB
C

/* Copyright (C) 2005 David Decotigny
Copyright (C) 2000-2004, The KOS team
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
*/
#include <sos/assert.h>
#include <sos/klibc.h>
#include <drivers/bochs.h>
#include <drivers/x86_videomem.h>
#include <hwcore/segment.h>
#include <hwcore/gdt.h>
#include <sos/uaccess.h>
#include "cpu_context.h"
/**
* Here is the definition of a CPU context for IA32 processors. This
* is a SOS convention, not a specification given by the IA32
* spec. However there is a strong constraint related to the x86
* interrupt handling specification: the top of the stack MUST be
* compatible with the 'iret' instruction, ie there must be the
* err_code (might be 0), eip, cs and eflags of the destination
* context in that order (see Intel x86 specs vol 3, figure 5-4).
*
* @note IMPORTANT: This definition MUST be consistent with the way
* the registers are stored on the stack in
* irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above.
*/
struct sos_cpu_state {
/* (Lower addresses) */
/* These are SOS convention */
sos_ui16_t gs;
sos_ui16_t fs;
sos_ui16_t es;
sos_ui16_t ds;
sos_ui16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the
Kernel context (CPL0) of the interrupted
thread, even for a user thread */
sos_ui16_t alignment_padding; /* unused */
sos_ui32_t eax;
sos_ui32_t ebx;
sos_ui32_t ecx;
sos_ui32_t edx;
sos_ui32_t esi;
sos_ui32_t edi;
sos_ui32_t ebp;
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */
sos_ui32_t error_code;
sos_vaddr_t eip;
sos_ui32_t cs; /* 32bits according to the specs ! However, the CS
register is really 16bits long */
sos_ui32_t eflags;
/* (Higher addresses) */
} __attribute__((packed));
/**
* The CS value pushed on the stack by the CPU upon interrupt, and
* needed by the iret instruction, is 32bits long while the real CPU
* CS register is 16bits only: this macro simply retrieves the CPU
* "CS" register value from the CS value pushed on the stack by the
* CPU upon interrupt.
*
* The remaining 16bits pushed by the CPU should be considered
* "reserved" and architecture dependent. IMHO, the specs don't say
* anything about them. Considering that some architectures generate
* non-zero values for these 16bits (at least Cyrix), we'd better
* ignore them.
*/
#define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
( (pushed_ui32_cs_value) & 0xffff )
/**
* Structure of an interrupted Kernel thread's context
*/
struct sos_cpu_kstate
{
struct sos_cpu_state regs;
} __attribute__((packed));
/**
* Structure of an interrupted User thread's context. This is almost
* the same as a kernel context, except that 2 additional values are
* pushed on the stack before the eflags/cs/eip of the interrupted
* context: the stack configuration of the interrupted user context.
*
* @see Section 6.4.1 of Intel x86 vol 1
*/
struct sos_cpu_ustate
{
struct sos_cpu_state regs;
struct
{
sos_ui32_t cpl3_esp;
sos_ui16_t cpl3_ss;
};
} __attribute__((packed));
/*
* Structure of a Task State Segment on the x86 Architecture.
*
* @see Intel x86 spec vol 3, figure 6-2
*
* @note Such a data structure should not cross any page boundary (see
* end of section 6.2.1 of Intel spec vol 3). This is the reason why
* we tell gcc to align it on a 128B boundary (its size is 104B, which
* is <= 128).
*/
struct x86_tss {
/**
* Intel provides a way for a task to switch to another in an
* automatic way (call gates). In this case, the back_link field
* stores the source TSS of the context switch. This allows to
* easily implement coroutines, task backtracking, ... In SOS we
* don't use TSS for the context switch purpouse, so we always
* ignore this field.
* (+0)
*/
sos_ui16_t back_link;
sos_ui16_t reserved1;
/* CPL0 saved context. (+4) */
sos_vaddr_t esp0;
sos_ui16_t ss0;
sos_ui16_t reserved2;
/* CPL1 saved context. (+12) */
sos_vaddr_t esp1;
sos_ui16_t ss1;
sos_ui16_t reserved3;
/* CPL2 saved context. (+20) */
sos_vaddr_t esp2;
sos_ui16_t ss2;
sos_ui16_t reserved4;
/* Interrupted context's saved registers. (+28) */
sos_vaddr_t cr3;
sos_vaddr_t eip;
sos_ui32_t eflags;
sos_ui32_t eax;
sos_ui32_t ecx;
sos_ui32_t edx;
sos_ui32_t ebx;
sos_ui32_t esp;
sos_ui32_t ebp;
sos_ui32_t esi;
sos_ui32_t edi;
/* +72 */
sos_ui16_t es;
sos_ui16_t reserved5;
/* +76 */
sos_ui16_t cs;
sos_ui16_t reserved6;
/* +80 */
sos_ui16_t ss;
sos_ui16_t reserved7;
/* +84 */
sos_ui16_t ds;
sos_ui16_t reserved8;
/* +88 */
sos_ui16_t fs;
sos_ui16_t reserved9;
/* +92 */
sos_ui16_t gs;
sos_ui16_t reserved10;
/* +96 */
sos_ui16_t ldtr;
sos_ui16_t reserved11;
/* +100 */
sos_ui16_t debug_trap_flag :1;
sos_ui16_t reserved12 :15;
sos_ui16_t iomap_base_addr;
/* 104 */
} __attribute__((packed, aligned(128)));
static struct x86_tss kernel_tss;
sos_ret_t sos_cpu_context_subsystem_setup()
{
/* Reset the kernel TSS */
memset(&kernel_tss, 0x0, sizeof(kernel_tss));
/**
* Now setup the kernel TSS.
*
* Considering the privilege change method we choose (cpl3 -> cpl0
* through a software interrupt), we don't need to initialize a
* full-fledged TSS. See section 6.4.1 of Intel x86 vol 1. Actually,
* only a correct value for the kernel esp and ss are required (aka
* "ss0" and "esp0" fields). Since the esp0 will have to be updated
* at privilege change time, we don't have to set it up now.
*/
kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
/* Register this TSS into the gdt */
sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss);
return SOS_OK;
}
/**
* THE main operation of a kernel thread. This routine calls the
* kernel thread function start_func and calls exit_func when
* start_func returns.
*/
static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
sos_ui32_t start_arg,
sos_cpu_kstate_function_arg1_t *exit_func,
sos_ui32_t exit_arg)
__attribute__((noreturn));
static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
sos_ui32_t start_arg,
sos_cpu_kstate_function_arg1_t *exit_func,
sos_ui32_t exit_arg)
{
start_func(start_arg);
exit_func(exit_arg);
SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
for(;;);
}
sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
sos_cpu_kstate_function_arg1_t *start_func,
sos_ui32_t start_arg,
sos_vaddr_t stack_bottom,
sos_size_t stack_size,
sos_cpu_kstate_function_arg1_t *exit_func,
sos_ui32_t exit_arg)
{
/* We are initializing a Kernel thread's context */
struct sos_cpu_kstate *kctxt;
/* This is a critical internal function, so that it is assumed that
the caller knows what he does: we legitimally assume that values
for ctxt, start_func, stack_* and exit_func are allways VALID ! */
/* Setup the stack.
*
* On x86, the stack goes downward. Each frame is configured this
* way (higher addresses first):
*
* - (optional unused space. As of gcc 3.3, this space is 24 bytes)
* - arg n
* - arg n-1
* - ...
* - arg 1
* - return instruction address: The address the function returns to
* once finished
* - local variables
*
* The remaining of the code should be read from the end upward to
* understand how the processor will handle it.
*/
sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
/* If needed, poison the stack */
#ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
#elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
#endif
/* Simulate a call to the core_routine() function: prepare its
arguments */
*(--stack) = exit_arg;
*(--stack) = (sos_ui32_t)exit_func;
*(--stack) = start_arg;
*(--stack) = (sos_ui32_t)start_func;
*(--stack) = 0; /* Return address of core_routine => force page fault */
/*
* Setup the initial context structure, so that the CPU will execute
* the function core_routine() once this new context has been
* restored on CPU
*/
/* Compute the base address of the structure, which must be located
below the previous elements */
tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
/* Initialize the CPU context structure */
memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
/* Tell the CPU context structure that the first instruction to
execute will be that of the core_routine() function */
kctxt->regs.eip = (sos_ui32_t)core_routine;
/* Setup the segment registers */
kctxt->regs.cs
= SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE); /* Code */
kctxt->regs.ds
= SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */
kctxt->regs.es
= SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */
kctxt->regs.cpl0_ss
= SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Stack */
/* fs and gs unused for the moment. */
/* The newly created context is initially interruptible */
kctxt->regs.eflags = (1 << 9); /* set IF bit */
/* Finally, update the generic kernel/user thread context */
*ctxt = (struct sos_cpu_state*) kctxt;
return SOS_OK;
}
/**
* Helper function to create a new user thread context. When
* model_uctxt is NON NULL, the new user context is the copy of
* model_uctxt, otherwise the SP/PC registers are initialized to the
* user_initial_SP/PC arguments
*/
static sos_ret_t cpu_ustate_init(struct sos_cpu_state **ctxt,
const struct sos_cpu_state *model_uctxt,
sos_uaddr_t user_start_PC,
sos_ui32_t user_start_arg1,
sos_ui32_t user_start_arg2,
sos_uaddr_t user_initial_SP,
sos_vaddr_t kernel_stack_bottom,
sos_size_t kernel_stack_size)
{
/* We are initializing a User thread's context */
struct sos_cpu_ustate *uctxt;
/* This is a critical internal function, so that it is assumed that
the caller knows what he does: we legitimally assume that values
for ctxt, etc. are allways VALID ! */
/* Compute the address of the CPU state to restore on CPU when
switching to this new user thread */
sos_vaddr_t uctxt_vaddr = kernel_stack_bottom
+ kernel_stack_size
- sizeof(struct sos_cpu_ustate);
uctxt = (struct sos_cpu_ustate*)uctxt_vaddr;
if (model_uctxt && !sos_cpu_context_is_in_user_mode(model_uctxt))
return -SOS_EINVAL;
/* If needed, poison the kernel stack */
#ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
memset((void*)kernel_stack_bottom,
SOS_CPU_STATE_STACK_POISON,
kernel_stack_size);
#elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom,
kernel_stack_size);
#endif
/*
* Setup the initial context structure, so that the CPU will restore
* the initial registers' value for the user thread. The
* user thread argument is passed in the EAX register.
*/
/* Initialize the CPU context structure */
if (! model_uctxt)
{
memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate));
/* Tell the CPU context structure that the first instruction to
execute will be located at user_start_PC (in user space) */
uctxt->regs.eip = (sos_ui32_t)user_start_PC;
/* Tell the CPU where will be the user stack */
uctxt->cpl3_esp = user_initial_SP;
}
else
memcpy(uctxt, model_uctxt, sizeof(struct sos_cpu_ustate));
/* The parameter to the start function is not passed by the stack to
avoid a possible page fault */
uctxt->regs.eax = user_start_arg1;
/* Optional additional argument for non-duplicated threads */
if (! model_uctxt)
uctxt->regs.ebx = user_start_arg2;
/* Setup the segment registers */
uctxt->regs.cs
= SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE); /* Code */
uctxt->regs.ds
= SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* Data */
uctxt->regs.es
= SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* Data */
uctxt->cpl3_ss
= SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* User Stack */
/* We need also to update the segment for the kernel stack
segment. It will be used when this context will be restored on
CPU: initially it will be executing in kernel mode and will
switch immediatly to user mode */
uctxt->regs.cpl0_ss
= SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Kernel Stack */
/* fs and gs unused for the moment. */
/* The newly created context is initially interruptible */
uctxt->regs.eflags = (1 << 9); /* set IF bit */
/* Finally, update the generic kernel/user thread context */
*ctxt = (struct sos_cpu_state*) uctxt;
return SOS_OK;
}
sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt,
sos_uaddr_t user_start_PC,
sos_ui32_t user_start_arg1,
sos_ui32_t user_start_arg2,
sos_uaddr_t user_initial_SP,
sos_vaddr_t kernel_stack_bottom,
sos_size_t kernel_stack_size)
{
return cpu_ustate_init(ctxt, NULL,
user_start_PC,
user_start_arg1, user_start_arg2,
user_initial_SP,
kernel_stack_bottom, kernel_stack_size);
}
sos_ret_t sos_cpu_ustate_duplicate(struct sos_cpu_state **ctxt,
const struct sos_cpu_state *model_uctxt,
sos_ui32_t user_retval,
sos_vaddr_t kernel_stack_bottom,
sos_size_t kernel_stack_size)
{
return cpu_ustate_init(ctxt, model_uctxt,
/* ignored */0,
user_retval, /* ignored */0,
/* ignored */0,
kernel_stack_bottom, kernel_stack_size);
}
sos_ret_t
sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt)
{
/* An interrupted user thread has its CS register set to that of the
User code segment */
switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs))
{
case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE):
return TRUE;
break;
case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE):
return FALSE;
break;
default:
SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !",
(unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE),
SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE));
break;
}
/* Should never get here */
return -SOS_EFATAL;
}
#if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
void
sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
sos_vaddr_t stack_bottom,
sos_size_t stack_size)
{
sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
if (poison_size > stack_size)
poison_size = stack_size;
memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
}
void
sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
sos_vaddr_t stack_bottom,
sos_size_t stack_size)
{
unsigned char *c;
unsigned int i;
/* On SOS, "ctxt" corresponds to the address of the esp register of
the saved context in Kernel mode (always, even for the interrupted
context of a user thread). Here we make sure that this stack
pointer is within the allowed stack area */
SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
<= stack_bottom + stack_size);
/* Check that the bottom of the stack has not been altered */
for (c = (unsigned char*) stack_bottom, i = 0 ;
(i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
c++, i++)
{
SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
}
}
#endif
/* =======================================================================
* Public Accessor functions
*/
sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
{
SOS_ASSERT_FATAL(NULL != ctxt);
/* This is the PC of the interrupted context (ie kernel or user
context). */
return ctxt->eip;
}
sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
{
SOS_ASSERT_FATAL(NULL != ctxt);
/* 'ctxt' corresponds to the SP of the interrupted context, in Kernel
mode. We have to test whether the original interrupted context
was that of a kernel or user thread */
if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
{
struct sos_cpu_ustate const* uctxt = (struct sos_cpu_ustate const*)ctxt;
return uctxt->cpl3_esp;
}
/* On SOS, "ctxt" corresponds to the address of the esp register of
the saved context in Kernel mode (always, even for the interrupted
context of a user thread). */
return (sos_vaddr_t)ctxt;
}
sos_ret_t
sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt,
sos_vaddr_t ret_vaddr)
{
ctxt->eip = ret_vaddr;
return SOS_OK;
}
void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
{
char buf[128];
snprintf(buf, sizeof(buf),
"CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x",
(unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
(unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
(unsigned)ctxt->cpl0_ss,
(unsigned)ctxt->error_code);
if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
{
struct sos_cpu_ustate const* uctxt = (struct sos_cpu_ustate const*)ctxt;
snprintf(buf, sizeof(buf),
"%s esp3=%x ss3=%x",
buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss);
}
else
snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf);
sos_bochs_putstring(buf); sos_bochs_putstring("\n");
sos_x86_videomem_putstring(23, 0,
SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
buf);
}
/* =======================================================================
* Public Accessor functions TO BE USED ONLY BY Exception handlers
*/
sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
{
SOS_ASSERT_FATAL(NULL != ctxt);
return ctxt->error_code;
}
sos_vaddr_t
sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
{
sos_ui32_t cr2;
/*
* See Intel Vol 3 (section 5.14): the address of the faulting
* virtual address of a page fault is stored in the cr2
* register.
*
* Actually, we do not store the cr2 register in a saved
* kernel thread's context. So we retrieve the cr2's value directly
* from the processor. The value we retrieve in an exception handler
* is actually the correct one because an exception is synchronous
* with the code causing the fault, and cannot be interrupted since
* the IDT entries in SOS are "interrupt gates" (ie IRQ are
* disabled).
*/
asm volatile ("movl %%cr2, %0"
:"=r"(cr2)
: );
return cr2;
}
/* =======================================================================
* Public Accessor functions TO BE USED ONLY BY the SYSCALL handler
*/
/*
* By convention, the USER SOS programs always pass 4 arguments to the
* kernel syscall handler: in eax/../edx. For less arguments, the
* unused registers are filled with 0s. For more arguments, the 4th
* syscall parameter gives the address of the array containing the
* remaining arguments. In any case, eax corresponds to the syscall
* IDentifier.
*/
inline
sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2,
/* out */unsigned int *arg3)
{
*arg1 = user_ctxt->ebx;
*arg2 = user_ctxt->ecx;
*arg3 = user_ctxt->edx;
return SOS_OK;
}
sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1)
{
unsigned int unused;
return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);
}
sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2)
{
unsigned int unused;
return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);
}
/*
* sos_syscall_get3args() is defined in cpu_context.c because it needs
* to know the structure of a struct spu_state
*/
sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2,
/* out */unsigned int *arg3,
/* out */unsigned int *arg4)
{
sos_uaddr_t uaddr_other_args;
unsigned int other_args[2];
sos_ret_t retval;
/* Retrieve the 3 arguments. The last one is an array containing the
remaining arguments */
retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
(unsigned int *)& uaddr_other_args);
if (SOS_OK != retval)
return retval;
/* Copy the array containing the remaining arguments from user
space */
retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
(sos_uaddr_t)uaddr_other_args,
sizeof(other_args));
if (sizeof(other_args) != retval)
return -SOS_EFAULT;
*arg3 = other_args[0];
*arg4 = other_args[1];
return SOS_OK;
}
sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2,
/* out */unsigned int *arg3,
/* out */unsigned int *arg4,
/* out */unsigned int *arg5)
{
sos_uaddr_t uaddr_other_args;
unsigned int other_args[3];
sos_ret_t retval;
/* Retrieve the 3 arguments. The last one is an array containing the
remaining arguments */
retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
(unsigned int *)& uaddr_other_args);
if (SOS_OK != retval)
return retval;
/* Copy the array containing the remaining arguments from user
space */
retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
(sos_uaddr_t)uaddr_other_args,
sizeof(other_args));
if (sizeof(other_args) != retval)
return -SOS_EFAULT;
*arg3 = other_args[0];
*arg4 = other_args[1];
*arg5 = other_args[2];
return SOS_OK;
}
sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2,
/* out */unsigned int *arg3,
/* out */unsigned int *arg4,
/* out */unsigned int *arg5,
/* out */unsigned int *arg6)
{
sos_uaddr_t uaddr_other_args;
unsigned int other_args[4];
sos_ret_t retval;
/* Retrieve the 3 arguments. The last one is an array containing the
remaining arguments */
retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
(unsigned int *)& uaddr_other_args);
if (SOS_OK != retval)
return retval;
/* Copy the array containing the remaining arguments from user
space */
retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
(sos_uaddr_t)uaddr_other_args,
sizeof(other_args));
if (sizeof(other_args) != retval)
return -SOS_EFAULT;
*arg3 = other_args[0];
*arg4 = other_args[1];
*arg5 = other_args[2];
*arg6 = other_args[3];
return SOS_OK;
}
sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2,
/* out */unsigned int *arg3,
/* out */unsigned int *arg4,
/* out */unsigned int *arg5,
/* out */unsigned int *arg6,
/* out */unsigned int *arg7)
{
sos_uaddr_t uaddr_other_args;
unsigned int other_args[5];
sos_ret_t retval;
/* Retrieve the 3 arguments. The last one is an array containing the
remaining arguments */
retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
(unsigned int *)& uaddr_other_args);
if (SOS_OK != retval)
return retval;
/* Copy the array containing the remaining arguments from user
space */
retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
(sos_uaddr_t)uaddr_other_args,
sizeof(other_args));
if (sizeof(other_args) != retval)
return -SOS_EFAULT;
*arg3 = other_args[0];
*arg4 = other_args[1];
*arg5 = other_args[2];
*arg6 = other_args[3];
*arg7 = other_args[4];
return SOS_OK;
}
sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt,
/* out */unsigned int *arg1,
/* out */unsigned int *arg2,
/* out */unsigned int *arg3,
/* out */unsigned int *arg4,
/* out */unsigned int *arg5,
/* out */unsigned int *arg6,
/* out */unsigned int *arg7,
/* out */unsigned int *arg8)
{
sos_uaddr_t uaddr_other_args;
unsigned int other_args[6];
sos_ret_t retval;
/* Retrieve the 3 arguments. The last one is an array containing the
remaining arguments */
retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
(unsigned int *)& uaddr_other_args);
if (SOS_OK != retval)
return retval;
/* Copy the array containing the remaining arguments from user
space */
retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
(sos_uaddr_t)uaddr_other_args,
sizeof(other_args));
if (sizeof(other_args) != retval)
return -SOS_EFAULT;
*arg3 = other_args[0];
*arg4 = other_args[1];
*arg5 = other_args[2];
*arg6 = other_args[3];
*arg7 = other_args[4];
*arg8 = other_args[5];
return SOS_OK;
}
/* =======================================================================
* Backtrace facility. To be used for DEBUGging purpose ONLY.
*/
sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
sos_ui32_t max_depth,
sos_vaddr_t stack_bottom,
sos_size_t stack_size,
sos_backtrace_callback_t * backtracer,
void *custom_arg)
{
unsigned int depth;
sos_vaddr_t callee_PC, caller_frame;
/* Cannot backtrace an interrupted user thread ! */
if ((NULL != cpu_state)
&&
(TRUE == sos_cpu_context_is_in_user_mode(cpu_state)))
{
return 0;
}
/*
* Layout of a frame on the x86 (compiler=gcc):
*
* funcA calls funcB calls funcC
*
* ....
* funcB Argument 2
* funcB Argument 1
* funcA Return eip
* frameB: funcA ebp (ie previous stack frame)
* ....
* (funcB local variables)
* ....
* funcC Argument 2
* funcC Argument 1
* funcB Return eip
* frameC: funcB ebp (ie previous stack frame == A0) <---- a frame address
* ....
* (funcC local variables)
* ....
*
* The presence of "ebp" on the stack depends on 2 things:
* + the compiler is gcc
* + the source is compiled WITHOUT the -fomit-frame-pointer option
* In the absence of "ebp", chances are high that the value pushed
* at that address is outside the stack boundaries, meaning that the
* function will return -SOS_ENOSUP.
*/
if (cpu_state)
{
callee_PC = cpu_state->eip;
caller_frame = cpu_state->ebp;
}
else
{
/* Skip the sos_backtrace() frame */
callee_PC = (sos_vaddr_t)__builtin_return_address(0);
caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
}
for(depth=0 ; depth < max_depth ; depth ++)
{
/* Call the callback */
backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
/* If the frame address is funky, don't go further */
if ( (caller_frame < stack_bottom)
|| (caller_frame + 4 >= stack_bottom + stack_size) )
return depth;
/* Go to caller frame */
callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
caller_frame = *((sos_vaddr_t*) caller_frame);
}
return depth;
}
/* *************************************************************
* Function to manage the TSS. This function is not really "public":
* it is reserved to the assembler routines defined in
* cpu_context_switch.S
*
* Update the kernel stack address so that the IRQ, syscalls and
* exception return in a correct stack location when coming back into
* kernel mode.
*/
void
sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt)
{
/* next_ctxt corresponds to an interrupted user thread ? */
if (sos_cpu_context_is_in_user_mode(next_ctxt))
{
/*
* Yes: "next_ctxt" is an interrupted user thread => we are
* going to switch to user mode ! Setup the stack address so
* that the user thread "next_ctxt" can come back to the correct
* stack location when returning in kernel mode.
*
* This stack location corresponds to the SP of the next user
* thread once its context has been transferred on the CPU, ie
* once the CPU has executed all the pop/iret instruction of the
* context switch with privilege change.
*/
kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt)
+ sizeof(struct sos_cpu_ustate);
/* Note: no need to protect this agains IRQ because IRQs are not
allowed to update it by themselves, and they are not allowed
to block */
}
else
{
/* No: No need to update kernel TSS when we stay in kernel
mode */
}
}