2021-10-17 15:37:20 +02:00
|
|
|
/* Copyright (C) 2021 Mathieu Maret
|
|
|
|
Copyright (C) 2005 David Decotigny
|
2019-05-15 23:22:28 +02:00
|
|
|
Copyright (C) 2000-2004, The KOS team
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU General Public License
|
|
|
|
as published by the Free Software Foundation; either version 2
|
|
|
|
of the License, or (at your option) any later version.
|
2020-04-27 00:14:37 +02:00
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
2020-04-27 00:14:37 +02:00
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
|
2020-04-27 00:14:37 +02:00
|
|
|
USA.
|
2019-05-15 23:22:28 +02:00
|
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file cpu_context.h
|
|
|
|
*
|
|
|
|
* Low level API to manage kernel and user thread CPU contexts. Should
|
|
|
|
* be some kind of architecture-independent.
|
|
|
|
*/
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
#include "errno.h"
|
2020-04-22 16:54:30 +02:00
|
|
|
#include "stdarg.h"
|
2019-05-15 23:22:28 +02:00
|
|
|
#include "types.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Opaque structure storing the CPU context of an inactive kernel or
|
|
|
|
* user thread, as saved by the low level primitives below or by the
|
|
|
|
* interrupt/exception handlers.
|
|
|
|
*
|
|
|
|
* @note This is an (architecture-independent) forward declaration:
|
|
|
|
* see cpu_context.c and the *.S files for its
|
|
|
|
* (architecture-dependent) definition.
|
|
|
|
*/
|
|
|
|
struct cpu_state;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The type of the functions passed as arguments to the Kernel thread
|
|
|
|
* related functions.
|
|
|
|
*/
|
2020-04-27 00:14:37 +02:00
|
|
|
typedef void(cpu_kstate_function_arg1_t(void *arg1));
|
2019-05-15 23:22:28 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Function to create an initial context for a kernel thread starting
|
|
|
|
* its execution at function start_func with the argument initial_arg,
|
|
|
|
* and having the stack defined by stack_bottom/stack_size. When the
|
|
|
|
* start_func function returns, the function exit_func is called with
|
|
|
|
* argument exit_arg.
|
|
|
|
*
|
|
|
|
* @param kctxt The kernel thread CPU context to initialize. The
|
|
|
|
* address of the newly-initialized struct cpu_state will be
|
|
|
|
* stored in this variable. The contents of this struct cpu_state
|
|
|
|
* are actually located /inside/ the stack.
|
|
|
|
*
|
|
|
|
* @param start_func The address of the first instruction that will be
|
|
|
|
* executed when this context will be first transferred on
|
|
|
|
* CPU. Practically speaking, this is the address of a function that
|
|
|
|
* is assumed to take 1 argument.
|
|
|
|
*
|
|
|
|
* @param start_arg The value that will be passed as the argument to
|
|
|
|
* start_func when the thread starts. The stack will be setup
|
|
|
|
* accordingly to simulate a real call to the function and really
|
|
|
|
* passing this arguement.
|
|
|
|
*
|
|
|
|
* @param stack_bottom The lowest address of the stack.
|
|
|
|
*
|
|
|
|
* @param stack_size The size of the stack.
|
|
|
|
*
|
|
|
|
* @param exit_func The address of the instruction executed after the
|
|
|
|
* function start_func has returned. This function takes 1 parameter
|
|
|
|
* as argument: exit_arg.
|
|
|
|
*
|
|
|
|
* @param exit_arg The argument passed to the function exit_func.
|
|
|
|
*
|
|
|
|
* @note the newly created context is INTERRUPTIBLE by default !
|
|
|
|
*/
|
2020-04-27 00:14:37 +02:00
|
|
|
int cpu_kstate_init(struct cpu_state **kctxt, cpu_kstate_function_arg1_t *start_func,
|
|
|
|
vaddr_t start_arg, vaddr_t stack_bottom, size_t stack_size,
|
|
|
|
cpu_kstate_function_arg1_t *exit_func, vaddr_t exit_arg);
|
2019-05-15 23:22:28 +02:00
|
|
|
|
2021-10-17 15:37:20 +02:00
|
|
|
/**
|
|
|
|
* Prepare the system to deal with multiple CPU execution contexts
|
|
|
|
*/
|
|
|
|
int cpu_context_subsystem_setup();
|
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
/**
|
|
|
|
* Function that performs an immediate context-switch from one
|
|
|
|
* kernel/user thread to another one. It stores the current executing
|
|
|
|
* context in from_ctxt, and restores to_context on CPU.
|
|
|
|
*
|
|
|
|
* @param from_ctxt The address of the struct cpu_state will be
|
|
|
|
* stored in this variable. Must NOT be NULL.
|
|
|
|
*
|
|
|
|
* @param to_ctxt The CPU will resume its execution with the struct
|
|
|
|
* cpu_state located at this address. Must NOT be NULL.
|
|
|
|
*/
|
2020-04-27 00:14:37 +02:00
|
|
|
void cpu_context_switch(struct cpu_state **from_ctxt, struct cpu_state *to_ctxt);
|
2019-05-15 23:22:28 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to the new given context (of a kernel/user thread) without
|
|
|
|
* saving the old context (of another kernel/user thread), and call
|
|
|
|
* the function reclaiming_func passing it the recalining_arg
|
|
|
|
* argument. The reclaining function is called from within the stack
|
|
|
|
* of the new context, so that it can (among other things) safely
|
|
|
|
* destroy the stack of the former context.
|
|
|
|
*
|
|
|
|
* @param switch_to_ctxt The context that will be restored on the CPU
|
|
|
|
*
|
|
|
|
* @param reclaiming_func The address of the function that will be
|
|
|
|
* called after having changed the stack, but before restoring the CPU
|
|
|
|
* context to switch_to_ctxt.
|
|
|
|
*/
|
2020-04-27 00:14:37 +02:00
|
|
|
void cpu_context_exit_to(struct cpu_state *switch_to_ctxt,
|
|
|
|
cpu_kstate_function_arg1_t *reclaiming_func, uint32_t reclaiming_arg)
|
|
|
|
__attribute__((noreturn));
|
2019-05-15 23:22:28 +02:00
|
|
|
|
|
|
|
/* =======================================================================
|
|
|
|
* Public Accessor functions
|
|
|
|
*/
|
|
|
|
|
2021-10-17 15:37:20 +02:00
|
|
|
/**
|
|
|
|
* Return whether the saved context was in kernel or user context
|
|
|
|
*
|
|
|
|
* @return TRUE when context was interrupted when in user mode, FALSE
|
|
|
|
* when in kernel mode, < 0 on error.
|
|
|
|
*/
|
|
|
|
int cpu_context_is_in_user_mode(const struct cpu_state *ctxt);
|
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
/**
|
|
|
|
* Return Program Counter stored in the saved kernel/user context
|
|
|
|
*/
|
|
|
|
vaddr_t cpu_context_get_PC(const struct cpu_state *ctxt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return Stack Pointer stored in the saved kernel/user context
|
|
|
|
*/
|
|
|
|
vaddr_t cpu_context_get_SP(const struct cpu_state *ctxt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dump the contents of the CPU context (bochs + x86_videomem)
|
|
|
|
*/
|
|
|
|
void cpu_context_dump(const struct cpu_state *ctxt);
|
|
|
|
|
|
|
|
/* =======================================================================
|
|
|
|
* Public Accessor functions TO BE USED ONLY BY Exception handlers
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the argument passed by the CPU upon exception, as stored in the
|
|
|
|
* saved context
|
|
|
|
*/
|
2021-10-26 21:57:45 +02:00
|
|
|
uint32_t cpu_context_get_EX_err(const struct cpu_state *ctxt);
|
2019-05-15 23:22:28 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the faulting address of the exception
|
|
|
|
*/
|
2020-04-27 00:14:37 +02:00
|
|
|
vaddr_t cpu_context_get_EX_faulting_vaddr(const struct cpu_state *ctxt);
|
2019-05-15 23:22:28 +02:00
|
|
|
|
|
|
|
/* =======================================================================
|
|
|
|
* Macros controlling stack poisoning.
|
|
|
|
* Stack poisoning can be used to detect:
|
|
|
|
* - unitialized local variables
|
|
|
|
* - when the thread might have gone too deep in the stack
|
|
|
|
*/
|
|
|
|
/** The signature of the poison */
|
|
|
|
#define CPU_STATE_STACK_POISON 0xa5
|
|
|
|
|
|
|
|
/**
|
|
|
|
* When set, mean that the whole stack is poisoned to detect use of
|
|
|
|
* unititialized variables
|
|
|
|
*/
|
|
|
|
#define CPU_STATE_DETECT_UNINIT_KERNEL_VARS
|
|
|
|
/* #undef CPU_STATE_DETECT_UNINIT_KERNEL_VARS */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* When set, mean that the bottom of the stack is poisoned to detect
|
|
|
|
* probable stack overflow. Its value indicates the number of bytes
|
|
|
|
* used for this detection.
|
|
|
|
*/
|
2020-04-27 00:14:37 +02:00
|
|
|
#define CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW 64
|
2019-05-15 23:22:28 +02:00
|
|
|
/* #undef CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW */
|
|
|
|
|
|
|
|
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
2020-04-27 00:14:37 +02:00
|
|
|
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
|
|
|
vaddr_t kernel_stack_bottom,
|
|
|
|
size_t kernel_stack_size);
|
2019-05-15 23:22:28 +02:00
|
|
|
void cpu_state_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
2020-04-27 00:14:37 +02:00
|
|
|
vaddr_t kernel_stack_bottom,
|
|
|
|
size_t kernel_stack_size);
|
2019-05-15 23:22:28 +02:00
|
|
|
#else
|
2021-10-17 15:37:20 +02:00
|
|
|
#define cpu_state_prepare_detect_kernel_stack_overflow(ctxt, stkbottom, stksize) \
|
|
|
|
({/* nop \
|
2020-04-27 00:14:37 +02:00
|
|
|
*/})
|
|
|
|
#define cpu_state_detect_kernel_stack_overflow(ctxt, stkbottom, stksize) ({/* nop */})
|
2019-05-15 23:22:28 +02:00
|
|
|
#endif
|