Add context switch
This is taken from SOS
This commit is contained in:
parent
03c74c68d6
commit
3bca737990
6
Makefile
6
Makefile
@ -15,7 +15,7 @@ CPPFLAGS += $(foreach dir, $(SUBDIRS), -I$(dir))
|
|||||||
asmsrc=$(wildcard *.asm)
|
asmsrc=$(wildcard *.asm)
|
||||||
asmobj=$(asmsrc:%.asm=%.o)
|
asmobj=$(asmsrc:%.asm=%.o)
|
||||||
csrc=$(shell find $(SUBDIRS) -type f -name "*.c")# $(wildcard *.c)
|
csrc=$(shell find $(SUBDIRS) -type f -name "*.c")# $(wildcard *.c)
|
||||||
cobj=$(csrc:%.c=%.o)
|
cobj=$(csrc:%.c=%.o) core/cpu_context_switch.o
|
||||||
deps = $(csrc:%.c=%.d)
|
deps = $(csrc:%.c=%.d)
|
||||||
|
|
||||||
kernel:$(asmobj) $(cobj) linker.ld
|
kernel:$(asmobj) $(cobj) linker.ld
|
||||||
@ -39,6 +39,10 @@ core/irq_handler.o:core/irq_handler.c
|
|||||||
%.o:%.asm
|
%.o:%.asm
|
||||||
$(AS) $(ASFLAGS) -o $@ $<
|
$(AS) $(ASFLAGS) -o $@ $<
|
||||||
|
|
||||||
|
%.o: %.S
|
||||||
|
$(CC) "-I$(PWD)" -c "$<" $(CFLAGS) -o "$@"
|
||||||
|
|
||||||
|
|
||||||
self_test: CFLAGS += -DRUN_TEST -DDEBUG
|
self_test: CFLAGS += -DRUN_TEST -DDEBUG
|
||||||
self_test: clean kernel
|
self_test: clean kernel
|
||||||
qemu-system-x86_64 -kernel kernel -serial stdio
|
qemu-system-x86_64 -kernel kernel -serial stdio
|
||||||
|
240
core/cpu_context.c
Normal file
240
core/cpu_context.c
Normal file
@ -0,0 +1,240 @@
|
|||||||
|
/* Copyright (C) 2005 David Decotigny
|
||||||
|
Copyright (C) 2000-2004, The KOS team
|
||||||
|
|
||||||
|
Initially taken from SOS
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "assert.h"
|
||||||
|
#include "klibc.h"
|
||||||
|
#include "segment.h"
|
||||||
|
|
||||||
|
#include "cpu_context.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Here is the definition of a CPU context for IA32 processors. This
|
||||||
|
* is a Matos/SOS convention, not a specification given by the IA32
|
||||||
|
* spec. However there is a strong constraint related to the x86
|
||||||
|
* interrupt handling specification: the top of the stack MUST be
|
||||||
|
* compatible with the 'iret' instruction, ie there must be the
|
||||||
|
* err_code (might be 0), eip, cs and eflags of the destination
|
||||||
|
* context in that order (see Intel x86 specs vol 3, figure 5-4).
|
||||||
|
*
|
||||||
|
* @note IMPORTANT: This definition MUST be consistent with the way
|
||||||
|
* the registers are stored on the stack in
|
||||||
|
* irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above.
|
||||||
|
*/
|
||||||
|
struct cpu_state {
|
||||||
|
/* (Lower addresses) */
|
||||||
|
|
||||||
|
/* These are Matos/SOS convention */
|
||||||
|
uint16_t gs;
|
||||||
|
uint16_t fs;
|
||||||
|
uint16_t es;
|
||||||
|
uint16_t ds;
|
||||||
|
uint16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the
|
||||||
|
Kernel context (CPL0) of the interrupted
|
||||||
|
thread, even for a user thread */
|
||||||
|
uint16_t alignment_padding; /* unused */
|
||||||
|
uint32_t eax;
|
||||||
|
uint32_t ebx;
|
||||||
|
uint32_t ecx;
|
||||||
|
uint32_t edx;
|
||||||
|
uint32_t esi;
|
||||||
|
uint32_t edi;
|
||||||
|
uint32_t ebp;
|
||||||
|
|
||||||
|
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */
|
||||||
|
uint32_t error_code;
|
||||||
|
vaddr_t eip;
|
||||||
|
uint32_t cs; /* 32bits according to the specs ! However, the CS
|
||||||
|
register is really 16bits long */
|
||||||
|
uint32_t eflags;
|
||||||
|
|
||||||
|
/* (Higher addresses) */
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The CS value pushed on the stack by the CPU upon interrupt, and
|
||||||
|
* needed by the iret instruction, is 32bits long while the real CPU
|
||||||
|
* CS register is 16bits only: this macro simply retrieves the CPU
|
||||||
|
* "CS" register value from the CS value pushed on the stack by the
|
||||||
|
* CPU upon interrupt.
|
||||||
|
*
|
||||||
|
* The remaining 16bits pushed by the CPU should be considered
|
||||||
|
* "reserved" and architecture dependent. IMHO, the specs don't say
|
||||||
|
* anything about them. Considering that some architectures generate
|
||||||
|
* non-zero values for these 16bits (at least Cyrix), we'd better
|
||||||
|
* ignore them.
|
||||||
|
*/
|
||||||
|
#define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) ((pushed_ui32_cs_value)&0xffff)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Structure of an interrupted Kernel thread's context
|
||||||
|
*/
|
||||||
|
struct cpu_kstate {
|
||||||
|
struct cpu_state regs;
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* THE main operation of a kernel thread. This routine calls the
|
||||||
|
* kernel thread function start_func and calls exit_func when
|
||||||
|
* start_func returns.
|
||||||
|
*/
|
||||||
|
static void core_routine(cpu_kstate_function_arg1_t *start_func, void *start_arg,
|
||||||
|
cpu_kstate_function_arg1_t *exit_func, void *exit_arg)
|
||||||
|
__attribute__((noreturn));
|
||||||
|
|
||||||
|
static void core_routine(cpu_kstate_function_arg1_t *start_func, void *start_arg,
|
||||||
|
cpu_kstate_function_arg1_t *exit_func, void *exit_arg)
|
||||||
|
{
|
||||||
|
start_func(start_arg);
|
||||||
|
exit_func(exit_arg);
|
||||||
|
|
||||||
|
assert(!"The exit function of the thread should NOT return !");
|
||||||
|
for (;;)
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
int cpu_kstate_init(struct cpu_state **ctxt, cpu_kstate_function_arg1_t *start_func,
|
||||||
|
uint32_t start_arg, vaddr_t stack_bottom, size_t stack_size,
|
||||||
|
cpu_kstate_function_arg1_t *exit_func, uint32_t exit_arg)
|
||||||
|
{
|
||||||
|
/* We are initializing a Kernel thread's context */
|
||||||
|
struct cpu_kstate *kctxt;
|
||||||
|
|
||||||
|
/* This is a critical internal function, so that it is assumed that
|
||||||
|
the caller knows what he does: we legitimally assume that values
|
||||||
|
for ctxt, start_func, stack_* and exit_func are allways VALID ! */
|
||||||
|
|
||||||
|
/* Setup the stack.
|
||||||
|
*
|
||||||
|
* On x86, the stack goes downward. Each frame is configured this
|
||||||
|
* way (higher addresses first):
|
||||||
|
*
|
||||||
|
* - (optional unused space. As of gcc 3.3, this space is 24 bytes)
|
||||||
|
* - arg n
|
||||||
|
* - arg n-1
|
||||||
|
* - ...
|
||||||
|
* - arg 1
|
||||||
|
* - return instruction address: The address the function returns to
|
||||||
|
* once finished
|
||||||
|
* - local variables
|
||||||
|
*
|
||||||
|
* The remaining of the code should be read from the end upward to
|
||||||
|
* understand how the processor will handle it.
|
||||||
|
*/
|
||||||
|
|
||||||
|
vaddr_t tmp_vaddr = stack_bottom + stack_size;
|
||||||
|
uint32_t *stack = (uint32_t *)tmp_vaddr;
|
||||||
|
|
||||||
|
/* If needed, poison the stack */
|
||||||
|
#ifdef CPU_STATE_DETECT_UNINIT_KERNEL_VARS
|
||||||
|
memset((void *)stack_bottom, CPU_STATE_STACK_POISON, stack_size);
|
||||||
|
#elif defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||||
|
cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Simulate a call to the core_routine() function: prepare its
|
||||||
|
arguments */
|
||||||
|
*(--stack) = exit_arg;
|
||||||
|
*(--stack) = (uint32_t)exit_func;
|
||||||
|
*(--stack) = start_arg;
|
||||||
|
*(--stack) = (uint32_t)start_func;
|
||||||
|
*(--stack) = 0; /* Return address of core_routine => force page fault */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setup the initial context structure, so that the CPU will execute
|
||||||
|
* the function core_routine() once this new context has been
|
||||||
|
* restored on CPU
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Compute the base address of the structure, which must be located
|
||||||
|
below the previous elements */
|
||||||
|
tmp_vaddr = ((vaddr_t)stack) - sizeof(struct cpu_kstate);
|
||||||
|
kctxt = (struct cpu_kstate *)tmp_vaddr;
|
||||||
|
|
||||||
|
/* Initialize the CPU context structure */
|
||||||
|
memset(kctxt, 0x0, sizeof(struct cpu_kstate));
|
||||||
|
|
||||||
|
/* Tell the CPU context structure that the first instruction to
|
||||||
|
execute will be that of the core_routine() function */
|
||||||
|
kctxt->regs.eip = (uint32_t)core_routine;
|
||||||
|
|
||||||
|
/* Setup the segment registers */
|
||||||
|
kctxt->regs.cs = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KCODE); /* Code */
|
||||||
|
kctxt->regs.ds = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Data */
|
||||||
|
kctxt->regs.es = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Data */
|
||||||
|
kctxt->regs.cpl0_ss = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Stack */
|
||||||
|
/* fs and gs unused for the moment. */
|
||||||
|
|
||||||
|
/* The newly created context is initially interruptible */
|
||||||
|
kctxt->regs.eflags = (1 << 9); /* set IF bit */
|
||||||
|
|
||||||
|
/* Finally, update the generic kernel/user thread context */
|
||||||
|
*ctxt = (struct cpu_state *)kctxt;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||||
|
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
||||||
|
vaddr_t stack_bottom, size_t stack_size)
|
||||||
|
{
|
||||||
|
(void)ctxt;
|
||||||
|
size_t poison_size = CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
|
||||||
|
if (poison_size > stack_size)
|
||||||
|
poison_size = stack_size;
|
||||||
|
|
||||||
|
memset((void *)stack_bottom, CPU_STATE_STACK_POISON, poison_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void cpu_state_detect_kernel_stack_overflow(const struct cpu_state *ctxt, vaddr_t stack_bottom,
|
||||||
|
size_t stack_size)
|
||||||
|
{
|
||||||
|
unsigned char *c;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
|
/* On Matos/SOS, "ctxt" corresponds to the address of the esp register of
|
||||||
|
the saved context in Kernel mode (always, even for the interrupted
|
||||||
|
context of a user thread). Here we make sure that this stack
|
||||||
|
pointer is within the allowed stack area */
|
||||||
|
assert(((vaddr_t)ctxt) >= stack_bottom);
|
||||||
|
assert(((vaddr_t)ctxt) + sizeof(struct cpu_kstate) <= stack_bottom + stack_size);
|
||||||
|
|
||||||
|
/* Check that the bottom of the stack has not been altered */
|
||||||
|
for (c = (unsigned char *)stack_bottom, i = 0;
|
||||||
|
(i < CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size); c++, i++) {
|
||||||
|
assert(CPU_STATE_STACK_POISON == *c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* =======================================================================
|
||||||
|
* Public Accessor functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
vaddr_t cpu_context_get_PC(const struct cpu_state *ctxt)
|
||||||
|
{
|
||||||
|
assert(NULL != ctxt);
|
||||||
|
|
||||||
|
/* This is the PC of the interrupted context (ie kernel or user
|
||||||
|
context). */
|
||||||
|
return ctxt->eip;
|
||||||
|
}
|
||||||
|
|
||||||
|
vaddr_t cpu_context_get_SP(const struct cpu_state *ctxt)
|
||||||
|
{
|
||||||
|
assert(NULL != ctxt);
|
||||||
|
|
||||||
|
/* On Matos/SOS, "ctxt" corresponds to the address of the esp register of
|
||||||
|
the saved context in Kernel mode (always, even for the interrupted
|
||||||
|
context of a user thread). */
|
||||||
|
return (vaddr_t)ctxt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void cpu_context_dump(const struct cpu_state *ctxt)
|
||||||
|
{
|
||||||
|
printf("CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", (unsigned)ctxt->eip,
|
||||||
|
(unsigned)ctxt, (unsigned)ctxt->eflags, (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
|
||||||
|
(unsigned)ctxt->ds, (unsigned)ctxt->cpl0_ss, (unsigned)ctxt->error_code);
|
||||||
|
}
|
207
core/cpu_context.h
Normal file
207
core/cpu_context.h
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
/* Copyright (C) 2005 David Decotigny
|
||||||
|
Copyright (C) 2000-2004, The KOS team
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU General Public License
|
||||||
|
as published by the Free Software Foundation; either version 2
|
||||||
|
of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
|
||||||
|
USA.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file cpu_context.h
|
||||||
|
*
|
||||||
|
* Low level API to manage kernel and user thread CPU contexts. Should
|
||||||
|
* be some kind of architecture-independent.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "types.h"
|
||||||
|
#include "errno.h"
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque structure storing the CPU context of an inactive kernel or
|
||||||
|
* user thread, as saved by the low level primitives below or by the
|
||||||
|
* interrupt/exception handlers.
|
||||||
|
*
|
||||||
|
* @note This is an (architecture-independent) forward declaration:
|
||||||
|
* see cpu_context.c and the *.S files for its
|
||||||
|
* (architecture-dependent) definition.
|
||||||
|
*/
|
||||||
|
struct cpu_state;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The type of the functions passed as arguments to the Kernel thread
|
||||||
|
* related functions.
|
||||||
|
*/
|
||||||
|
typedef void (cpu_kstate_function_arg1_t(void * arg1));
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Function to create an initial context for a kernel thread starting
|
||||||
|
* its execution at function start_func with the argument initial_arg,
|
||||||
|
* and having the stack defined by stack_bottom/stack_size. When the
|
||||||
|
* start_func function returns, the function exit_func is called with
|
||||||
|
* argument exit_arg.
|
||||||
|
*
|
||||||
|
* @param kctxt The kernel thread CPU context to initialize. The
|
||||||
|
* address of the newly-initialized struct cpu_state will be
|
||||||
|
* stored in this variable. The contents of this struct cpu_state
|
||||||
|
* are actually located /inside/ the stack.
|
||||||
|
*
|
||||||
|
* @param start_func The address of the first instruction that will be
|
||||||
|
* executed when this context will be first transferred on
|
||||||
|
* CPU. Practically speaking, this is the address of a function that
|
||||||
|
* is assumed to take 1 argument.
|
||||||
|
*
|
||||||
|
* @param start_arg The value that will be passed as the argument to
|
||||||
|
* start_func when the thread starts. The stack will be setup
|
||||||
|
* accordingly to simulate a real call to the function and really
|
||||||
|
* passing this arguement.
|
||||||
|
*
|
||||||
|
* @param stack_bottom The lowest address of the stack.
|
||||||
|
*
|
||||||
|
* @param stack_size The size of the stack.
|
||||||
|
*
|
||||||
|
* @param exit_func The address of the instruction executed after the
|
||||||
|
* function start_func has returned. This function takes 1 parameter
|
||||||
|
* as argument: exit_arg.
|
||||||
|
*
|
||||||
|
* @param exit_arg The argument passed to the function exit_func.
|
||||||
|
*
|
||||||
|
* @note the newly created context is INTERRUPTIBLE by default !
|
||||||
|
*/
|
||||||
|
int cpu_kstate_init(struct cpu_state **kctxt,
|
||||||
|
cpu_kstate_function_arg1_t *start_func,
|
||||||
|
uint32_t start_arg,
|
||||||
|
vaddr_t stack_bottom,
|
||||||
|
size_t stack_size,
|
||||||
|
cpu_kstate_function_arg1_t *exit_func,
|
||||||
|
uint32_t exit_arg);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Function that performs an immediate context-switch from one
|
||||||
|
* kernel/user thread to another one. It stores the current executing
|
||||||
|
* context in from_ctxt, and restores to_context on CPU.
|
||||||
|
*
|
||||||
|
* @param from_ctxt The address of the struct cpu_state will be
|
||||||
|
* stored in this variable. Must NOT be NULL.
|
||||||
|
*
|
||||||
|
* @param to_ctxt The CPU will resume its execution with the struct
|
||||||
|
* cpu_state located at this address. Must NOT be NULL.
|
||||||
|
*/
|
||||||
|
void cpu_context_switch(struct cpu_state **from_ctxt,
|
||||||
|
struct cpu_state *to_ctxt);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Switch to the new given context (of a kernel/user thread) without
|
||||||
|
* saving the old context (of another kernel/user thread), and call
|
||||||
|
* the function reclaiming_func passing it the recalining_arg
|
||||||
|
* argument. The reclaining function is called from within the stack
|
||||||
|
* of the new context, so that it can (among other things) safely
|
||||||
|
* destroy the stack of the former context.
|
||||||
|
*
|
||||||
|
* @param switch_to_ctxt The context that will be restored on the CPU
|
||||||
|
*
|
||||||
|
* @param reclaiming_func The address of the function that will be
|
||||||
|
* called after having changed the stack, but before restoring the CPU
|
||||||
|
* context to switch_to_ctxt.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
cpu_context_exit_to(struct cpu_state *switch_to_ctxt,
|
||||||
|
cpu_kstate_function_arg1_t *reclaiming_func,
|
||||||
|
uint32_t reclaiming_arg) __attribute__((noreturn));
|
||||||
|
|
||||||
|
/* =======================================================================
|
||||||
|
* Public Accessor functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return Program Counter stored in the saved kernel/user context
|
||||||
|
*/
|
||||||
|
vaddr_t cpu_context_get_PC(const struct cpu_state *ctxt);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return Stack Pointer stored in the saved kernel/user context
|
||||||
|
*/
|
||||||
|
vaddr_t cpu_context_get_SP(const struct cpu_state *ctxt);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dump the contents of the CPU context (bochs + x86_videomem)
|
||||||
|
*/
|
||||||
|
void cpu_context_dump(const struct cpu_state *ctxt);
|
||||||
|
|
||||||
|
|
||||||
|
/* =======================================================================
|
||||||
|
* Public Accessor functions TO BE USED ONLY BY Exception handlers
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the argument passed by the CPU upon exception, as stored in the
|
||||||
|
* saved context
|
||||||
|
*/
|
||||||
|
uint32_t cpu_context_get_EX_info(const struct cpu_state *ctxt);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the faulting address of the exception
|
||||||
|
*/
|
||||||
|
vaddr_t
|
||||||
|
cpu_context_get_EX_faulting_vaddr(const struct cpu_state *ctxt);
|
||||||
|
|
||||||
|
|
||||||
|
/* =======================================================================
|
||||||
|
* Macros controlling stack poisoning.
|
||||||
|
* Stack poisoning can be used to detect:
|
||||||
|
* - unitialized local variables
|
||||||
|
* - when the thread might have gone too deep in the stack
|
||||||
|
*/
|
||||||
|
/** The signature of the poison */
|
||||||
|
#define CPU_STATE_STACK_POISON 0xa5
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When set, mean that the whole stack is poisoned to detect use of
|
||||||
|
* unititialized variables
|
||||||
|
*/
|
||||||
|
#define CPU_STATE_DETECT_UNINIT_KERNEL_VARS
|
||||||
|
/* #undef CPU_STATE_DETECT_UNINIT_KERNEL_VARS */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When set, mean that the bottom of the stack is poisoned to detect
|
||||||
|
* probable stack overflow. Its value indicates the number of bytes
|
||||||
|
* used for this detection.
|
||||||
|
*/
|
||||||
|
#define CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW 64
|
||||||
|
/* #undef CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW */
|
||||||
|
|
||||||
|
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||||
|
void
|
||||||
|
cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
||||||
|
vaddr_t kernel_stack_bottom,
|
||||||
|
size_t kernel_stack_size);
|
||||||
|
void cpu_state_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
||||||
|
vaddr_t kernel_stack_bottom,
|
||||||
|
size_t kernel_stack_size);
|
||||||
|
#else
|
||||||
|
# define cpu_state_prepare_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \
|
||||||
|
({ /* nop */ })
|
||||||
|
# define cpu_state_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \
|
||||||
|
({ /* nop */ })
|
||||||
|
#endif
|
107
core/cpu_context_switch.S
Normal file
107
core/cpu_context_switch.S
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
.file "cpu_context_switch.S"
|
||||||
|
|
||||||
|
.text
|
||||||
|
|
||||||
|
|
||||||
|
.globl cpu_context_switch
|
||||||
|
.type cpu_context_switch, @function
|
||||||
|
cpu_context_switch:
|
||||||
|
// arg2= to_context -- esp+64
|
||||||
|
// arg1= from_context -- esp+60
|
||||||
|
// caller ip -- esp+56
|
||||||
|
pushf // (eflags) esp+52
|
||||||
|
pushl %cs // (cs) esp+48
|
||||||
|
pushl $resume_pc // (ip) esp+44
|
||||||
|
pushl $0 // (error code) esp+40
|
||||||
|
pushl %ebp // esp+36
|
||||||
|
pushl %edi // esp+32
|
||||||
|
pushl %esi // esp+28
|
||||||
|
pushl %edx // esp+24
|
||||||
|
pushl %ecx // esp+20
|
||||||
|
pushl %ebx // esp+16
|
||||||
|
pushl %eax // esp+12
|
||||||
|
subl $2, %esp // (alignment) esp+10
|
||||||
|
pushw %ss // esp+8
|
||||||
|
pushw %ds // esp+6
|
||||||
|
pushw %es // esp+4
|
||||||
|
pushw %fs // esp+2
|
||||||
|
pushw %gs // esp
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now that the original eax/ebx are stored, we can use them safely
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Store the address of the saved context */
|
||||||
|
movl 60(%esp), %ebx
|
||||||
|
movl %esp, (%ebx)
|
||||||
|
|
||||||
|
/* This is the proper context switch ! We change the stack here */
|
||||||
|
movl 64(%esp), %esp
|
||||||
|
|
||||||
|
/* Restore the CPU context */
|
||||||
|
popw %gs
|
||||||
|
popw %fs
|
||||||
|
popw %es
|
||||||
|
popw %ds
|
||||||
|
popw %ss
|
||||||
|
addl $2,%esp
|
||||||
|
popl %eax
|
||||||
|
popl %ebx
|
||||||
|
popl %ecx
|
||||||
|
popl %edx
|
||||||
|
popl %esi
|
||||||
|
popl %edi
|
||||||
|
popl %ebp
|
||||||
|
addl $4, %esp /* Ignore "error code" */
|
||||||
|
|
||||||
|
/* This restores the eflags, the cs and the eip registers */
|
||||||
|
iret /* equivalent to: popfl ; ret */
|
||||||
|
|
||||||
|
resume_pc:
|
||||||
|
// Same context as that when cpu_context_switch got called
|
||||||
|
// arg2= to_context -- esp+8
|
||||||
|
// arg1= from_context -- esp+4
|
||||||
|
// caller ip -- esp
|
||||||
|
ret
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/* ------------------------- */
|
||||||
|
.globl cpu_context_exit_to
|
||||||
|
.type cpu_context_exit_to, @function
|
||||||
|
cpu_context_exit_to:
|
||||||
|
// arg3= reclaiming_arg -- esp+12
|
||||||
|
// arg2= reclaiming_func -- esp+8
|
||||||
|
// arg1= to_context -- esp+4
|
||||||
|
// caller ip -- esp
|
||||||
|
|
||||||
|
/* Store the current SP in a temporary register */
|
||||||
|
movl %esp, %eax
|
||||||
|
|
||||||
|
/* This is the proper context switch ! We change the stack here */
|
||||||
|
movl 4(%eax), %esp
|
||||||
|
|
||||||
|
/* Call the reclaiming function (remember: the old frame address
|
||||||
|
is stored in eax) */
|
||||||
|
pushl 12(%eax)
|
||||||
|
call *8(%eax)
|
||||||
|
addl $4, %esp
|
||||||
|
|
||||||
|
/* Restore the CPU context */
|
||||||
|
popw %gs
|
||||||
|
popw %fs
|
||||||
|
popw %es
|
||||||
|
popw %ds
|
||||||
|
popw %ss
|
||||||
|
addl $2,%esp
|
||||||
|
popl %eax
|
||||||
|
popl %ebx
|
||||||
|
popl %ecx
|
||||||
|
popl %edx
|
||||||
|
popl %esi
|
||||||
|
popl %edi
|
||||||
|
popl %ebp
|
||||||
|
addl $4, %esp /* Ignore "error code" */
|
||||||
|
|
||||||
|
/* This restores the eflags, the cs and the eip registers */
|
||||||
|
iret /* equivalent to: popfl ; ret */
|
78
tests/test.c
78
tests/test.c
@ -1,5 +1,6 @@
|
|||||||
#include "alloc.h"
|
#include "alloc.h"
|
||||||
#include "assert.h"
|
#include "assert.h"
|
||||||
|
#include "cpu_context.h"
|
||||||
#include "klibc.h"
|
#include "klibc.h"
|
||||||
#include "list.h"
|
#include "list.h"
|
||||||
#include "mem.h"
|
#include "mem.h"
|
||||||
@ -141,6 +142,82 @@ void test_backtrace()
|
|||||||
test_backtrace_1(2);
|
test_backtrace_1(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ======================================================================
|
||||||
|
* Demonstrate the use of the CPU kernet context management API:
|
||||||
|
* - A coroutine prints "Hlowrd" and switches to the other after each
|
||||||
|
* letter
|
||||||
|
* - A coroutine prints "el ol\n" and switches back to the other after
|
||||||
|
* each letter.
|
||||||
|
* The first to reach the '\n' returns back to main.
|
||||||
|
*/
|
||||||
|
struct cpu_state *ctxt_hello1;
|
||||||
|
struct cpu_state *ctxt_hello2;
|
||||||
|
struct cpu_state *ctxt_main;
|
||||||
|
vaddr_t hello1_stack, hello2_stack;
|
||||||
|
|
||||||
|
static void reclaim_stack(void * stack_vaddr)
|
||||||
|
{
|
||||||
|
free(stack_vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void exit_hello12(void * stack_vaddr)
|
||||||
|
{
|
||||||
|
cpu_context_exit_to(ctxt_main, (cpu_kstate_function_arg1_t *)reclaim_stack, (vaddr_t)stack_vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hello1(void *strIn)
|
||||||
|
{
|
||||||
|
char *str = (char *)strIn;
|
||||||
|
for (; *str != '\n'; str++) {
|
||||||
|
printf("hello1: %c\n", *str);
|
||||||
|
cpu_context_switch(&ctxt_hello1, ctxt_hello2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* You can uncomment this in case you explicitly want to exit
|
||||||
|
now. But returning from the function will do the same */
|
||||||
|
/* cpu_context_exit_to(ctxt_main,
|
||||||
|
(cpu_kstate_function_arg1_t*) reclaim_stack,
|
||||||
|
hello1_stack); */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hello2(void *strIn)
|
||||||
|
{
|
||||||
|
char *str = (char *)strIn;
|
||||||
|
for (; *str != '\n'; str++) {
|
||||||
|
printf("hello2: %c\n", *str);
|
||||||
|
cpu_context_switch(&ctxt_hello2, ctxt_hello1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* You can uncomment this in case you explicitly want to exit
|
||||||
|
now. But returning from the function will do the same */
|
||||||
|
/* cpu_context_exit_to(ctxt_main,
|
||||||
|
(cpu_kstate_function_arg1_t*) reclaim_stack,
|
||||||
|
hello2_stack); */
|
||||||
|
}
|
||||||
|
|
||||||
|
void testCoroutine()
|
||||||
|
{
|
||||||
|
#define DEMO_STACK_SIZE 1024
|
||||||
|
/* Allocate the stacks */
|
||||||
|
hello1_stack = (vaddr_t)malloc(DEMO_STACK_SIZE);
|
||||||
|
hello2_stack = (vaddr_t)malloc(DEMO_STACK_SIZE);
|
||||||
|
|
||||||
|
/* Initialize the coroutines' contexts */
|
||||||
|
cpu_kstate_init(&ctxt_hello1, (cpu_kstate_function_arg1_t *)hello1, (uint32_t) "Hlowrd",
|
||||||
|
(vaddr_t)hello1_stack, DEMO_STACK_SIZE,
|
||||||
|
(cpu_kstate_function_arg1_t *)exit_hello12, (uint32_t)hello1_stack);
|
||||||
|
cpu_kstate_init(&ctxt_hello2, (cpu_kstate_function_arg1_t *)hello2, (uint32_t) "el ol\n",
|
||||||
|
(vaddr_t)hello2_stack, DEMO_STACK_SIZE,
|
||||||
|
(cpu_kstate_function_arg1_t *)exit_hello12, (uint32_t)hello2_stack);
|
||||||
|
|
||||||
|
/* Go to first coroutine */
|
||||||
|
printf("Printing Hello World\\n...\n");
|
||||||
|
cpu_context_switch(&ctxt_main, ctxt_hello1);
|
||||||
|
|
||||||
|
/* The first coroutine to reach the '\n' switched back to us */
|
||||||
|
printf("Back in main !\n");
|
||||||
|
}
|
||||||
|
|
||||||
void run_test(void)
|
void run_test(void)
|
||||||
{
|
{
|
||||||
testPaging();
|
testPaging();
|
||||||
@ -153,4 +230,5 @@ void run_test(void)
|
|||||||
testAlloc();
|
testAlloc();
|
||||||
printf("Testing backtrace\n");
|
printf("Testing backtrace\n");
|
||||||
test_backtrace();
|
test_backtrace();
|
||||||
|
testCoroutine();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user