user_space #4
@ -310,6 +310,43 @@ int cpu_kstate_init(struct cpu_state **ctxt, cpu_kstate_function_arg1_t *start_f
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int cpu_ustate_init(struct cpu_state **ctx, uaddr_t startPC, uint32_t arg1, uint32_t arg2,
|
||||||
|
uaddr_t startSP, vaddr_t kernelStackBottom, size_t kernelStackSize)
|
||||||
|
{
|
||||||
|
|
||||||
|
// The user context is stacked above the usual cpu state by the CPU on context switch.
|
||||||
|
// So store it when the cpu expect it (See cpu_kstate_init for more details)
|
||||||
|
struct cpu_ustate *uctx =
|
||||||
|
(struct cpu_ustate *)(kernelStackBottom + kernelStackSize - sizeof(struct cpu_ustate));
|
||||||
|
|
||||||
|
/* If needed, poison the stack */
|
||||||
|
#ifdef CPU_STATE_DETECT_UNINIT_KERNEL_VARS
|
||||||
|
memset((void *)kernelStackBottom, CPU_STATE_STACK_POISON, kernelStackSize);
|
||||||
|
#elif defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||||
|
cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
memset(uctx, 0, sizeof(struct cpu_ustate));
|
||||||
|
|
||||||
|
uctx->regs.eip = startPC;
|
||||||
|
uctx->regs.eax = arg1;
|
||||||
|
uctx->regs.ebx = arg2;
|
||||||
|
|
||||||
|
uctx->regs.cs = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UCODE); // Code
|
||||||
|
uctx->regs.ds = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // Data
|
||||||
|
uctx->regs.es = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // Data
|
||||||
|
uctx->regs.cpl0_ss = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // Kernel Stack
|
||||||
|
uctx->cpl3_ss = BUILD_SEGMENT_REG_VALUE(3, FALSE, SEG_UDATA); // User Stack
|
||||||
|
|
||||||
|
uctx->cpl3_esp = startSP;
|
||||||
|
|
||||||
|
/* The newly created context is initially interruptible */
|
||||||
|
uctx->regs.eflags = (1 << 9); /* set IF bit */
|
||||||
|
|
||||||
|
*ctx = (struct cpu_state *)uctx;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
|
||||||
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
|
||||||
vaddr_t stack_bottom, size_t stack_size)
|
vaddr_t stack_bottom, size_t stack_size)
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#define ASM_SOURCE 1
|
||||||
|
#include "segment.h"
|
||||||
.file "irq_pit.S"
|
.file "irq_pit.S"
|
||||||
|
|
||||||
.text
|
.text
|
||||||
@ -22,6 +24,13 @@ pit_handler: // already got eflags, cs and eip on stack thanks to CPU
|
|||||||
pushw %fs // esp+2
|
pushw %fs // esp+2
|
||||||
pushw %gs // esp
|
pushw %gs // esp
|
||||||
|
|
||||||
|
/* Set correct kernel segment descriptors' value */
|
||||||
|
movw $BUILD_SEGMENT_REG_VALUE(0, 0, SEG_KDATA), %di
|
||||||
|
pushw %di ; popw %ds
|
||||||
|
pushw %di ; popw %es
|
||||||
|
pushw %di ; popw %fs
|
||||||
|
pushw %di ; popw %gs
|
||||||
|
|
||||||
/* Send EOI to PIC */
|
/* Send EOI to PIC */
|
||||||
movb $0x20, %al
|
movb $0x20, %al
|
||||||
outb %al, $0x20
|
outb %al, $0x20
|
||||||
|
@ -161,3 +161,8 @@ int processSetName(struct process *proc, char *name)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mmu_context *processGetMMUContext(struct process *proc)
|
||||||
|
{
|
||||||
|
return proc->context;
|
||||||
|
}
|
||||||
|
@ -14,3 +14,4 @@ int processUnref(struct process *proc);
|
|||||||
int processSetName(struct process *proc, char *name);
|
int processSetName(struct process *proc, char *name);
|
||||||
int processAddThread(struct process *proc, struct thread *th);
|
int processAddThread(struct process *proc, struct thread *th);
|
||||||
int processRemoveThread(struct thread *th);
|
int processRemoveThread(struct thread *th);
|
||||||
|
struct mmu_context *processGetMMUContext(struct process *th);
|
||||||
|
@ -4,12 +4,15 @@
|
|||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
#include "klibc.h"
|
#include "klibc.h"
|
||||||
#include "list.h"
|
#include "list.h"
|
||||||
|
#include "mmuContext.h"
|
||||||
#include "time.h"
|
#include "time.h"
|
||||||
#include "vga.h"
|
#include "vga.h"
|
||||||
|
|
||||||
static struct thread *currentThread;
|
static struct thread *currentThread;
|
||||||
static struct thread *threadWithTimeout;
|
static struct thread *threadWithTimeout;
|
||||||
|
|
||||||
|
static void threadPrepareContext(struct thread *th);
|
||||||
|
|
||||||
void threadExit()
|
void threadExit()
|
||||||
{
|
{
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
@ -92,13 +95,19 @@ void threadDelete(struct thread *thread)
|
|||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
disable_IRQs(flags);
|
disable_IRQs(flags);
|
||||||
list_delete(currentThread, thread);
|
list_delete(currentThread, thread);
|
||||||
|
restore_IRQs(flags);
|
||||||
|
|
||||||
|
if (thread->squattedContext) {
|
||||||
|
threadChangeCurrentContext(NULL);
|
||||||
|
}
|
||||||
|
if (thread->process)
|
||||||
|
processRemoveThread(thread);
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
printf("Free stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
printf("Free stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
||||||
#endif
|
#endif
|
||||||
free((void *)thread->stackAddr);
|
free((void *)thread->stackAddr);
|
||||||
free((void *)thread);
|
free((void *)thread);
|
||||||
restore_IRQs(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct thread *threadSelectNext()
|
struct thread *threadSelectNext()
|
||||||
@ -187,6 +196,7 @@ int threadWait(struct thread *current, struct thread *next, unsigned long msec)
|
|||||||
|
|
||||||
currentThread = next;
|
currentThread = next;
|
||||||
currentThread->state = RUNNING;
|
currentThread->state = RUNNING;
|
||||||
|
threadPrepareContext(next);
|
||||||
cpu_context_switch(¤t->cpuState, next->cpuState);
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
||||||
|
|
||||||
return current->sleepHaveTimeouted;
|
return current->sleepHaveTimeouted;
|
||||||
@ -213,6 +223,7 @@ int threadYield()
|
|||||||
|
|
||||||
currentThread = next;
|
currentThread = next;
|
||||||
currentThread->state = RUNNING;
|
currentThread->state = RUNNING;
|
||||||
|
threadPrepareContext(next);
|
||||||
cpu_context_switch(¤t->cpuState, next->cpuState);
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
||||||
restore_IRQs(flags);
|
restore_IRQs(flags);
|
||||||
|
|
||||||
@ -240,6 +251,7 @@ int threadMsleep(unsigned long msec)
|
|||||||
|
|
||||||
currentThread = next;
|
currentThread = next;
|
||||||
currentThread->state = RUNNING;
|
currentThread->state = RUNNING;
|
||||||
|
threadPrepareContext(next);
|
||||||
cpu_context_switch(¤t->cpuState, next->cpuState);
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
||||||
restore_IRQs(flags);
|
restore_IRQs(flags);
|
||||||
return current->sleepHaveTimeouted == 1;
|
return current->sleepHaveTimeouted == 1;
|
||||||
@ -261,17 +273,61 @@ int threadAddThread(struct thread *th)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void threadPrepareSyscallSwitchBack(struct cpu_state *cpu_state){
|
static void threadPrepareContext(struct thread *th)
|
||||||
(void)cpu_state;
|
{
|
||||||
return;
|
if (cpu_context_is_in_user_mode(th->cpuState)) {
|
||||||
|
assert(th->process != NULL);
|
||||||
|
assert(th->squattedContext == NULL);
|
||||||
|
mmuContextSwitch(processGetMMUContext(th->process));
|
||||||
|
} else if (th->squattedContext) {
|
||||||
|
mmuContextSwitch(th->squattedContext);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void threadPrepareExceptionSwitchBack(struct cpu_state *cpu_state){
|
int threadChangeCurrentContext(struct mmu_context *ctx)
|
||||||
(void)cpu_state;
|
{
|
||||||
return;
|
uint32_t flags;
|
||||||
|
struct mmu_context *prev = currentThread->squattedContext;
|
||||||
|
|
||||||
|
if (ctx != NULL) {
|
||||||
|
assert(prev == NULL);
|
||||||
|
} else {
|
||||||
|
assert(prev != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void threadPrepareIrqSwitchBack(struct cpu_state *cpu_state){
|
disable_IRQs(flags);
|
||||||
(void)cpu_state;
|
currentThread->squattedContext = ctx;
|
||||||
return;
|
|
||||||
|
if (ctx != NULL) {
|
||||||
|
mmuContextRef(ctx);
|
||||||
|
mmuContextSwitch(ctx);
|
||||||
|
} else {
|
||||||
|
mmuContextUnref(prev);
|
||||||
|
}
|
||||||
|
restore_IRQs(flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void threadPrepareSyscallSwitchBack(struct cpu_state *cpuState)
|
||||||
|
{
|
||||||
|
currentThread->cpuState = cpuState;
|
||||||
|
threadPrepareContext(currentThread);
|
||||||
|
}
|
||||||
|
|
||||||
|
void threadPrepareExceptionSwitchBack(struct cpu_state *cpuState)
|
||||||
|
{
|
||||||
|
currentThread->cpuState = cpuState;
|
||||||
|
threadPrepareContext(currentThread);
|
||||||
|
}
|
||||||
|
|
||||||
|
void threadPrepareIrqServicing(struct cpu_state *cpuState)
|
||||||
|
{
|
||||||
|
currentThread->cpuState = cpuState;
|
||||||
|
}
|
||||||
|
|
||||||
|
void threadPrepareIrqSwitchBack(struct cpu_state *cpuState)
|
||||||
|
{
|
||||||
|
currentThread->cpuState = cpuState;
|
||||||
|
threadPrepareContext(currentThread);
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,40 @@ struct thread {
|
|||||||
// For User thread only
|
// For User thread only
|
||||||
struct thread *nextInProcess, *prevInProcess;
|
struct thread *nextInProcess, *prevInProcess;
|
||||||
struct process *process;
|
struct process *process;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Address space currently "squatted" by the thread, or used to be
|
||||||
|
* active when the thread was interrupted/preempted. This is the MMU
|
||||||
|
* configuration expected before the cpu_state of the thread is
|
||||||
|
* restored on CPU.
|
||||||
|
* - For kernel threads: should normally be NULL, meaning that the
|
||||||
|
* thread will squat the current mm_context currently set in the
|
||||||
|
* MMU. Might be NON NULL when a kernel thread squats a given
|
||||||
|
* process to manipulate its address space.
|
||||||
|
* - For user threads: should normally be NULL. More precisely:
|
||||||
|
* - in user mode: the thread->process.mm_context is ALWAYS
|
||||||
|
* set on MMU. squatted_mm_context is ALWAYS NULL in this
|
||||||
|
* situation, meaning that the thread in user mode uses its
|
||||||
|
* process-space as expected
|
||||||
|
* - in kernel mode: NULL means that we keep on using the
|
||||||
|
* mm_context currently set on MMU, which might be the
|
||||||
|
* mm_context of another process. This is natural since a
|
||||||
|
* thread in kernel mode normally only uses data in kernel
|
||||||
|
* space. BTW, this limits the number of TLB flushes. However,
|
||||||
|
* there are exceptions where this squatted_mm_context will
|
||||||
|
* NOT be NULL. One is the copy_from/to_user API, which can
|
||||||
|
* force the effective mm_context so that the MMU will be
|
||||||
|
* (re)configured upon every context to the thread to match
|
||||||
|
* the squatted_mm_context. Another exception is when a parent
|
||||||
|
* thread creates the address space of a child process, in
|
||||||
|
* which case the parent thread might temporarilly decide to
|
||||||
|
* switch to the child's process space.
|
||||||
|
*
|
||||||
|
* This is the SOS/matos implementation of the Linux "Lazy TLB" and
|
||||||
|
* address-space loaning.
|
||||||
|
*/
|
||||||
|
struct mmu_context *squattedContext;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int threadSetup(vaddr_t mainStack, size_t mainStackSize);
|
int threadSetup(vaddr_t mainStack, size_t mainStackSize);
|
||||||
@ -50,3 +84,4 @@ int threadMsleep(unsigned long msec);
|
|||||||
int threadOnJieffiesTick();
|
int threadOnJieffiesTick();
|
||||||
struct thread *getCurrentThread();
|
struct thread *getCurrentThread();
|
||||||
int threadAddThread(struct thread *th);
|
int threadAddThread(struct thread *th);
|
||||||
|
int threadChangeCurrentContext(struct mmu_context *ctx);
|
||||||
|
@ -33,3 +33,6 @@ typedef unsigned long vaddr_t;
|
|||||||
|
|
||||||
// Physical address
|
// Physical address
|
||||||
typedef unsigned long paddr_t;
|
typedef unsigned long paddr_t;
|
||||||
|
|
||||||
|
// Userspace vaddr
|
||||||
|
typedef unsigned long uaddr_t;
|
||||||
|
Loading…
Reference in New Issue
Block a user