user_space #4
@ -310,6 +310,43 @@ int cpu_kstate_init(struct cpu_state **ctxt, cpu_kstate_function_arg1_t *start_f
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu_ustate_init(struct cpu_state **ctx, uaddr_t startPC, uint32_t arg1, uint32_t arg2,
|
||||
uaddr_t startSP, vaddr_t kernelStackBottom, size_t kernelStackSize)
|
||||
{
|
||||
|
||||
// The user context is stacked above the usual cpu state by the CPU on context switch.
|
||||
// So store it when the cpu expect it (See cpu_kstate_init for more details)
|
||||
struct cpu_ustate *uctx =
|
||||
(struct cpu_ustate *)(kernelStackBottom + kernelStackSize - sizeof(struct cpu_ustate));
|
||||
|
||||
/* If needed, poison the stack */
|
||||
#ifdef CPU_STATE_DETECT_UNINIT_KERNEL_VARS
|
||||
memset((void *)kernelStackBottom, CPU_STATE_STACK_POISON, kernelStackSize);
|
||||
#elif defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||
cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
|
||||
#endif
|
||||
|
||||
memset(uctx, 0, sizeof(struct cpu_ustate));
|
||||
|
||||
uctx->regs.eip = startPC;
|
||||
uctx->regs.eax = arg1;
|
||||
uctx->regs.ebx = arg2;
|
||||
|
||||
uctx->regs.cs = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UCODE); // Code
|
||||
uctx->regs.ds = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // Data
|
||||
uctx->regs.es = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // Data
|
||||
uctx->regs.cpl0_ss = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // Kernel Stack
|
||||
uctx->cpl3_ss = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // User Stack
|
||||
|
||||
uctx->cpl3_esp = startSP;
|
||||
|
||||
/* The newly created context is initially interruptible */
|
||||
uctx->regs.eflags = (1 << 9); /* set IF bit */
|
||||
|
||||
*ctx = (struct cpu_state *)uctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
||||
vaddr_t stack_bottom, size_t stack_size)
|
||||
|
@ -1,3 +1,5 @@
|
||||
#define ASM_SOURCE 1
|
||||
#include "segment.h"
|
||||
.file "irq_pit.S"
|
||||
|
||||
.text
|
||||
@ -22,6 +24,13 @@ pit_handler: // already got eflags, cs and eip on stack thanks to CPU
|
||||
pushw %fs // esp+2
|
||||
pushw %gs // esp
|
||||
|
||||
/* Set correct kernel segment descriptors' value */
|
||||
movw $BUILD_SEGMENT_REG_VALUE(0, 0, SEG_KDATA), %di
|
||||
pushw %di ; popw %ds
|
||||
pushw %di ; popw %es
|
||||
pushw %di ; popw %fs
|
||||
pushw %di ; popw %gs
|
||||
|
||||
/* Send EOI to PIC */
|
||||
movb $0x20, %al
|
||||
outb %al, $0x20
|
||||
|
@ -161,3 +161,8 @@ int processSetName(struct process *proc, char *name)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct mmu_context *processGetMMUContext(struct process *proc)
|
||||
{
|
||||
return proc->context;
|
||||
}
|
||||
|
@ -14,3 +14,4 @@ int processUnref(struct process *proc);
|
||||
int processSetName(struct process *proc, char *name);
|
||||
int processAddThread(struct process *proc, struct thread *th);
|
||||
int processRemoveThread(struct thread *th);
|
||||
struct mmu_context *processGetMMUContext(struct process *th);
|
||||
|
@ -4,12 +4,15 @@
|
||||
#include "irq.h"
|
||||
#include "klibc.h"
|
||||
#include "list.h"
|
||||
#include "mmuContext.h"
|
||||
#include "time.h"
|
||||
#include "vga.h"
|
||||
|
||||
static struct thread *currentThread;
|
||||
static struct thread *threadWithTimeout;
|
||||
|
||||
static void threadPrepareContext(struct thread *th);
|
||||
|
||||
void threadExit()
|
||||
{
|
||||
uint32_t flags;
|
||||
@ -55,7 +58,7 @@ struct thread *threadCreate(const char *name, cpu_kstate_function_arg1_t func, v
|
||||
return NULL;
|
||||
|
||||
thread->stackAddr = (vaddr_t)malloc(THREAD_DEFAULT_STACK_SIZE);
|
||||
if(!thread->stackAddr)
|
||||
if (!thread->stackAddr)
|
||||
return NULL;
|
||||
#ifdef DEBUG
|
||||
printf("Alloc stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
||||
@ -92,13 +95,19 @@ void threadDelete(struct thread *thread)
|
||||
uint32_t flags;
|
||||
disable_IRQs(flags);
|
||||
list_delete(currentThread, thread);
|
||||
restore_IRQs(flags);
|
||||
|
||||
if (thread->squattedContext) {
|
||||
threadChangeCurrentContext(NULL);
|
||||
}
|
||||
if (thread->process)
|
||||
processRemoveThread(thread);
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Free stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
||||
#endif
|
||||
free((void *)thread->stackAddr);
|
||||
free((void *)thread);
|
||||
restore_IRQs(flags);
|
||||
}
|
||||
|
||||
struct thread *threadSelectNext()
|
||||
@ -187,6 +196,7 @@ int threadWait(struct thread *current, struct thread *next, unsigned long msec)
|
||||
|
||||
currentThread = next;
|
||||
currentThread->state = RUNNING;
|
||||
threadPrepareContext(next);
|
||||
cpu_context_switch(¤t->cpuState, next->cpuState);
|
||||
|
||||
return current->sleepHaveTimeouted;
|
||||
@ -213,6 +223,7 @@ int threadYield()
|
||||
|
||||
currentThread = next;
|
||||
currentThread->state = RUNNING;
|
||||
threadPrepareContext(next);
|
||||
cpu_context_switch(¤t->cpuState, next->cpuState);
|
||||
restore_IRQs(flags);
|
||||
|
||||
@ -240,6 +251,7 @@ int threadMsleep(unsigned long msec)
|
||||
|
||||
currentThread = next;
|
||||
currentThread->state = RUNNING;
|
||||
threadPrepareContext(next);
|
||||
cpu_context_switch(¤t->cpuState, next->cpuState);
|
||||
restore_IRQs(flags);
|
||||
return current->sleepHaveTimeouted == 1;
|
||||
@ -261,17 +273,61 @@ int threadAddThread(struct thread *th)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void threadPrepareSyscallSwitchBack(struct cpu_state *cpu_state){
|
||||
(void)cpu_state;
|
||||
return;
|
||||
static void threadPrepareContext(struct thread *th)
|
||||
{
|
||||
if (cpu_context_is_in_user_mode(th->cpuState)) {
|
||||
assert(th->process != NULL);
|
||||
assert(th->squattedContext == NULL);
|
||||
mmuContextSwitch(processGetMMUContext(th->process));
|
||||
} else if (th->squattedContext) {
|
||||
mmuContextSwitch(th->squattedContext);
|
||||
}
|
||||
}
|
||||
|
||||
void threadPrepareExceptionSwitchBack(struct cpu_state *cpu_state){
|
||||
(void)cpu_state;
|
||||
return;
|
||||
int threadChangeCurrentContext(struct mmu_context *ctx)
|
||||
{
|
||||
uint32_t flags;
|
||||
struct mmu_context *prev = currentThread->squattedContext;
|
||||
|
||||
if (ctx != NULL) {
|
||||
assert(prev == NULL);
|
||||
} else {
|
||||
assert(prev != NULL);
|
||||
}
|
||||
|
||||
disable_IRQs(flags);
|
||||
currentThread->squattedContext = ctx;
|
||||
|
||||
if (ctx != NULL) {
|
||||
mmuContextRef(ctx);
|
||||
mmuContextSwitch(ctx);
|
||||
} else {
|
||||
mmuContextUnref(prev);
|
||||
}
|
||||
restore_IRQs(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void threadPrepareIrqSwitchBack(struct cpu_state *cpu_state){
|
||||
(void)cpu_state;
|
||||
return;
|
||||
void threadPrepareSyscallSwitchBack(struct cpu_state *cpuState)
|
||||
{
|
||||
currentThread->cpuState = cpuState;
|
||||
threadPrepareContext(currentThread);
|
||||
}
|
||||
|
||||
void threadPrepareExceptionSwitchBack(struct cpu_state *cpuState)
|
||||
{
|
||||
currentThread->cpuState = cpuState;
|
||||
threadPrepareContext(currentThread);
|
||||
}
|
||||
|
||||
void threadPrepareIrqServicing(struct cpu_state *cpuState)
|
||||
{
|
||||
currentThread->cpuState = cpuState;
|
||||
}
|
||||
|
||||
void threadPrepareIrqSwitchBack(struct cpu_state *cpuState)
|
||||
{
|
||||
currentThread->cpuState = cpuState;
|
||||
threadPrepareContext(currentThread);
|
||||
}
|
||||
|
@ -32,6 +32,40 @@ struct thread {
|
||||
// For User thread only
|
||||
struct thread *nextInProcess, *prevInProcess;
|
||||
struct process *process;
|
||||
|
||||
/**
|
||||
* Address space currently "squatted" by the thread, or used to be
|
||||
* active when the thread was interrupted/preempted. This is the MMU
|
||||
* configuration expected before the cpu_state of the thread is
|
||||
* restored on CPU.
|
||||
* - For kernel threads: should normally be NULL, meaning that the
|
||||
* thread will squat the current mm_context currently set in the
|
||||
* MMU. Might be NON NULL when a kernel thread squats a given
|
||||
* process to manipulate its address space.
|
||||
* - For user threads: should normally be NULL. More precisely:
|
||||
* - in user mode: the thread->process.mm_context is ALWAYS
|
||||
* set on MMU. squatted_mm_context is ALWAYS NULL in this
|
||||
* situation, meaning that the thread in user mode uses its
|
||||
* process-space as expected
|
||||
* - in kernel mode: NULL means that we keep on using the
|
||||
* mm_context currently set on MMU, which might be the
|
||||
* mm_context of another process. This is natural since a
|
||||
* thread in kernel mode normally only uses data in kernel
|
||||
* space. BTW, this limits the number of TLB flushes. However,
|
||||
* there are exceptions where this squatted_mm_context will
|
||||
* NOT be NULL. One is the copy_from/to_user API, which can
|
||||
* force the effective mm_context so that the MMU will be
|
||||
* (re)configured upon every context to the thread to match
|
||||
* the squatted_mm_context. Another exception is when a parent
|
||||
* thread creates the address space of a child process, in
|
||||
* which case the parent thread might temporarilly decide to
|
||||
* switch to the child's process space.
|
||||
*
|
||||
* This is the SOS/matos implementation of the Linux "Lazy TLB" and
|
||||
* address-space loaning.
|
||||
*/
|
||||
struct mmu_context *squattedContext;
|
||||
|
||||
};
|
||||
|
||||
int threadSetup(vaddr_t mainStack, size_t mainStackSize);
|
||||
@ -50,3 +84,4 @@ int threadMsleep(unsigned long msec);
|
||||
int threadOnJieffiesTick();
|
||||
struct thread *getCurrentThread();
|
||||
int threadAddThread(struct thread *th);
|
||||
int threadChangeCurrentContext(struct mmu_context *ctx);
|
||||
|
@ -33,3 +33,6 @@ typedef unsigned long vaddr_t;
|
||||
|
||||
// Physical address
|
||||
typedef unsigned long paddr_t;
|
||||
|
||||
// Userspace vaddr
|
||||
typedef unsigned long uaddr_t;
|
||||
|
Loading…
Reference in New Issue
Block a user