Wrap IRQ, Exception, cpu_context to be ready for user
Fix ASM where ebp was push 2 times. One by pushw ebp One by pushal later
This commit is contained in:
parent
8f6f6cf471
commit
75dbbdb53b
7
Makefile
7
Makefile
@ -12,12 +12,13 @@ QEMU_OPT += -hda disk.img
|
||||
ARCH?=x86
|
||||
SUBDIRS := core drivers tests arch/$(ARCH)
|
||||
|
||||
CPPFLAGS += $(foreach dir, $(SUBDIRS), -I$(dir))
|
||||
INCDIRS += $(foreach dir, $(SUBDIRS), -I$(dir))
|
||||
CPPFLAGS += $(INCDIRS)
|
||||
|
||||
asmsrc=$(wildcard arch/$(ARCH)/boot/*.asm)
|
||||
asmobj=$(asmsrc:%.asm=%.o)
|
||||
csrc=$(shell find $(SUBDIRS) -type f -name "*.c")# $(wildcard *.c)
|
||||
cobj=$(csrc:%.c=%.o) arch/$(ARCH)/cpu_context_switch.o arch/$(ARCH)/irq_pit.o arch/$(ARCH)/irq_wrappers.o arch/$(ARCH)/exception_wrappers.o
|
||||
cobj=$(csrc:%.c=%.o) arch/$(ARCH)/cpu_context_switch.o arch/$(ARCH)/irq_pit.o arch/$(ARCH)/irq_wrappers.o arch/$(ARCH)/exception_wrappers.o arch/$(ARCH)/syscall_wrappers.o
|
||||
deps = $(csrc:%.c=%.d)
|
||||
|
||||
kernel kernel.sym &: $(asmobj) $(cobj) linker.ld
|
||||
@ -38,7 +39,7 @@ disk.img:
|
||||
$(AS) $(ASFLAGS) -o $@ $<
|
||||
|
||||
%.o: %.S
|
||||
$(CC) "-I$(PWD)" -c "$<" $(CFLAGS) -o "$@"
|
||||
$(CC) $(INCDIRS) -c "$<" $(CFLAGS) -o "$@"
|
||||
|
||||
|
||||
test: CFLAGS += -DRUN_TEST
|
||||
|
@ -38,12 +38,11 @@ struct cpu_state {
|
||||
uint16_t alignment_padding; /* unused */
|
||||
uint32_t edi;
|
||||
uint32_t esi;
|
||||
uint32_t esp;
|
||||
uint32_t ebp;
|
||||
uint32_t ebx;
|
||||
uint32_t edx;
|
||||
uint32_t ecx;
|
||||
uint32_t eax;
|
||||
uint32_t ebp;
|
||||
|
||||
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */
|
||||
uint32_t error_code;
|
||||
|
@ -24,7 +24,13 @@ cpu_context_switch:
|
||||
pushl %cs // (cs) esp+52
|
||||
pushl $resume_pc // (ip) esp+48
|
||||
pushl $0 // (error code) esp+12+8x4
|
||||
pushal // (general reg) esp+12
|
||||
pushl %ebp
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
subl $2, %esp // (alignment) esp+10
|
||||
pushw %ss // esp+8
|
||||
pushw %ds // esp+6
|
||||
@ -37,11 +43,11 @@ cpu_context_switch:
|
||||
*/
|
||||
|
||||
/* Store the address of the saved context */
|
||||
movl 64(%esp), %ebx
|
||||
movl 60(%esp), %ebx
|
||||
movl %esp, (%ebx)
|
||||
|
||||
/* This is the proper context switch ! We change the stack here */
|
||||
movl 68(%esp), %esp
|
||||
movl 64(%esp), %esp
|
||||
|
||||
/* Prepare kernel TSS in case we are switching to a user thread: we
|
||||
make sure that we will come back into the kernel at a correct
|
||||
@ -49,9 +55,8 @@ cpu_context_switch:
|
||||
pushl %esp /* Pass the location of the context we are
|
||||
restoring to the function */
|
||||
call cpu_context_update_kernel_tss
|
||||
|
||||
|
||||
addl $4, %esp
|
||||
|
||||
/* Restore the CPU context */
|
||||
popw %gs
|
||||
popw %fs
|
||||
@ -59,7 +64,13 @@ cpu_context_switch:
|
||||
popw %ds
|
||||
popw %ss
|
||||
addl $2,%esp
|
||||
popal
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
popl %ebp
|
||||
addl $4, %esp /* Ignore "error code" */
|
||||
|
||||
/* This restores the eflags, the cs and the eip registers */
|
||||
@ -95,6 +106,14 @@ cpu_context_exit_to:
|
||||
call *8(%eax)
|
||||
addl $4, %esp
|
||||
|
||||
/* Prepare kernel TSS in case we are switching to a user thread: we
|
||||
make sure that we will come back into the kernel at a correct
|
||||
stack location */
|
||||
pushl %esp /* Pass the location of the context we are
|
||||
restoring to the function */
|
||||
call cpu_context_update_kernel_tss
|
||||
addl $4, %esp
|
||||
|
||||
/* Restore the CPU context */
|
||||
popw %gs
|
||||
popw %fs
|
||||
@ -102,7 +121,13 @@ cpu_context_exit_to:
|
||||
popw %ds
|
||||
popw %ss
|
||||
addl $2,%esp
|
||||
popal
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
popl %ebp
|
||||
addl $4, %esp /* Ignore "error code" */
|
||||
|
||||
/* This restores the eflags, the cs and the eip registers */
|
||||
|
@ -1,10 +1,19 @@
|
||||
#define ASM_SOURCE 1
|
||||
#include "segment.h"
|
||||
.file "irq_wrappers.S"
|
||||
.text
|
||||
|
||||
.extern exception_handler_wrap
|
||||
.globl exception_handler_wrapper_array
|
||||
|
||||
/** Update the kernel TSS in case we are switching to a thread in user
|
||||
mode in order to come back into the correct kernel stack */
|
||||
.extern cpu_context_update_kernel_tss
|
||||
|
||||
/* The address of the function to call to set back the user thread's
|
||||
MMU configuration upon return to user context */
|
||||
.extern thread_prepare_exception_switch_back
|
||||
|
||||
.altmacro
|
||||
|
||||
|
||||
@ -24,12 +33,12 @@
|
||||
pushl %ebp
|
||||
movl %esp, %ebp
|
||||
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %edx
|
||||
pushl %ecx
|
||||
pushl %ebx
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
subl $2,%esp
|
||||
pushw %ss
|
||||
pushw %ds
|
||||
@ -37,11 +46,31 @@
|
||||
pushw %fs
|
||||
pushw %gs
|
||||
|
||||
/* Set correct kernel segment descriptors' value */
|
||||
movw $BUILD_SEGMENT_REG_VALUE(0, 0, SEG_KDATA), %di
|
||||
pushw %di ; popw %ds
|
||||
pushw %di ; popw %es
|
||||
pushw %di ; popw %fs
|
||||
pushw %di ; popw %gs
|
||||
|
||||
push %esp
|
||||
pushl $\id
|
||||
call exception_handler_wrap
|
||||
addl $8, %esp
|
||||
|
||||
/* Reconfigure the MMU if needed */
|
||||
pushl %esp /* cpu_ctxt */
|
||||
call thread_prepare_exception_switch_back
|
||||
addl $4, %esp /* Unallocate the stack */
|
||||
|
||||
/* Prepare kernel TSS in case we are switching to a
|
||||
user thread: we make sure that we will come back
|
||||
into the kernel at a correct stack location */
|
||||
pushl %esp /* Pass the location of the context we are
|
||||
restoring to the function */
|
||||
call cpu_context_update_kernel_tss
|
||||
addl $4, %esp
|
||||
|
||||
/* Restore the context */
|
||||
popw %gs
|
||||
popw %fs
|
||||
@ -49,14 +78,13 @@
|
||||
popw %ds
|
||||
popw %ss
|
||||
addl $2,%esp
|
||||
popl %eax
|
||||
popl %ebx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
popl %ebp
|
||||
|
||||
/* Remove fake error code */
|
||||
addl $4, %esp
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
*/
|
||||
#pragma once
|
||||
#include "types.h"
|
||||
#include "stdarg.h"
|
||||
/**
|
||||
* @file gdt.h
|
||||
*
|
||||
|
@ -1,9 +1,17 @@
|
||||
#define ASM_SOURCE 1
|
||||
#include "segment.h"
|
||||
.file "irq_wrappers.S"
|
||||
.text
|
||||
|
||||
.extern interrupt_handler_pic
|
||||
.globl irq_handler_wrapper_array
|
||||
/** Update the kernel TSS in case we are switching to a thread in user
|
||||
mode in order to come back into the correct kernel stack */
|
||||
.extern cpu_context_update_kernel_tss
|
||||
|
||||
/* The address of the function to call to set back the user thread's
|
||||
MMU configuration upon return to user context */
|
||||
.extern thread_prepare_irq_switch_back
|
||||
|
||||
.altmacro
|
||||
|
||||
@ -24,12 +32,12 @@
|
||||
pushl %ebp
|
||||
movl %esp, %ebp
|
||||
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %edx
|
||||
pushl %ecx
|
||||
pushl %ebx
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
subl $2,%esp
|
||||
pushw %ss
|
||||
pushw %ds
|
||||
@ -37,11 +45,31 @@
|
||||
pushw %fs
|
||||
pushw %gs
|
||||
|
||||
/* Set correct kernel segment descriptors' value */
|
||||
movw $BUILD_SEGMENT_REG_VALUE(0, 0, SEG_KDATA), %di
|
||||
pushw %di ; popw %ds
|
||||
pushw %di ; popw %es
|
||||
pushw %di ; popw %fs
|
||||
pushw %di ; popw %gs
|
||||
|
||||
push %esp
|
||||
pushl $\irq
|
||||
call interrupt_handler_pic
|
||||
addl $8, %esp
|
||||
|
||||
/* Reconfigure the MMU if needed */
|
||||
pushl %esp /* cpu_ctxt */
|
||||
call thread_prepare_irq_switch_back
|
||||
addl $4, %esp /* Unallocate the stack */
|
||||
|
||||
/* Prepare kernel TSS in case we are switching to a
|
||||
user thread: we make sure that we will come back
|
||||
into the kernel at a correct stack location */
|
||||
pushl %esp /* Pass the location of the context we are
|
||||
restoring to the function */
|
||||
call cpu_context_update_kernel_tss
|
||||
addl $4, %esp
|
||||
|
||||
/* Restore the context */
|
||||
popw %gs
|
||||
popw %fs
|
||||
@ -49,12 +77,12 @@
|
||||
popw %ds
|
||||
popw %ss
|
||||
addl $2,%esp
|
||||
popl %eax
|
||||
popl %ebx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
popl %ebp
|
||||
|
||||
/* Remove fake error code */
|
||||
|
91
arch/x86/syscall_wrappers.S
Normal file
91
arch/x86/syscall_wrappers.S
Normal file
@ -0,0 +1,91 @@
|
||||
#define ASM_SOURCE
|
||||
#include "segment.h"
|
||||
|
||||
.file "syscall_wrappers.S"
|
||||
|
||||
.text
|
||||
|
||||
/* The address of the real "C" syscall function */
|
||||
.extern syscall_execute
|
||||
|
||||
/** Update the kernel TSS in case we are switching to a thread in user
|
||||
mode in order to come back into the correct kernel stack */
|
||||
.extern cpu_context_update_kernel_tss
|
||||
|
||||
/* The address of the function to call to set back the user thread's
|
||||
MMU configuration upon return to user context */
|
||||
.extern thread_prepare_syscall_switch_back
|
||||
|
||||
.p2align 2, 0x90
|
||||
.globl syscallHandler
|
||||
syscallHandler:
|
||||
.type syscallHandler,@function
|
||||
|
||||
/* Fake error code */
|
||||
pushl $0
|
||||
/* Backup the context */
|
||||
pushl %ebp
|
||||
movl %esp, %ebp
|
||||
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
subl $2,%esp
|
||||
pushw %ss
|
||||
pushw %ds
|
||||
pushw %es
|
||||
pushw %fs
|
||||
pushw %gs
|
||||
|
||||
/* Set correct kernel segment descriptors' value */
|
||||
movw $BUILD_SEGMENT_REG_VALUE(0, 0, SEG_KDATA), %di
|
||||
pushw %di ; popw %ds
|
||||
pushw %di ; popw %es
|
||||
pushw %di ; popw %fs
|
||||
pushw %di ; popw %gs
|
||||
|
||||
/* Prepare the call to do_syscall */
|
||||
pushl %esp /* user_ctxt */
|
||||
pushl %eax /* syscall ID */
|
||||
|
||||
call syscall_execute
|
||||
/* Unallocate the stack used by the
|
||||
do_syscall arguments */
|
||||
addl $8, %esp
|
||||
|
||||
/* store the do_syscall return value into interrupted context */
|
||||
movl %eax, 12(%esp)
|
||||
|
||||
/* Set the MMU configuration to that of the user thread's process */
|
||||
pushl %esp /* user_ctxt */
|
||||
call thread_prepare_syscall_switch_back
|
||||
addl $4, %esp /* Unallocate the stack */
|
||||
|
||||
/* Prepare kernel TSS because we are switching back to a user
|
||||
thread: we make sure that we will come back into the kernel at a
|
||||
correct stack location */
|
||||
pushl %esp /* Pass the location of the context we are
|
||||
restoring to the function */
|
||||
call cpu_context_update_kernel_tss
|
||||
addl $4, %esp
|
||||
|
||||
/* Restore the user context */
|
||||
popw %gs
|
||||
popw %fs
|
||||
popw %es
|
||||
popw %ds
|
||||
popw %ss
|
||||
addl $2,%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
popl %ebp
|
||||
/* Remove fake error code */
|
||||
addl $4, %esp
|
||||
iret
|
@ -258,3 +258,16 @@ int kthreadAddThread(struct kthread *th)
|
||||
|
||||
return 0;
|
||||
}
|
||||
void thread_prepare_syscall_switch_back(struct cpu_state *cpu_state){
|
||||
(void)cpu_state;
|
||||
return;
|
||||
}
|
||||
|
||||
void thread_prepare_exception_switch_back(struct cpu_state *cpu_state){
|
||||
(void)cpu_state;
|
||||
return;
|
||||
}
|
||||
void thread_prepare_irq_switch_back(struct cpu_state *cpu_state){
|
||||
(void)cpu_state;
|
||||
return;
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2004 The SOS Team
|
||||
/* Copyright (C) 2021 Mathieu Maret
|
||||
Copyright (C) 2004 The SOS Team
|
||||
Copyright (C) 1999 Free Software Foundation, Inc.
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
@ -29,8 +30,6 @@
|
||||
* @see Intel x86 doc, vol 3 chapter 3.
|
||||
*/
|
||||
|
||||
#include "stdarg.h"
|
||||
|
||||
/*
|
||||
* Global segment selectors (GDT) for SOS/x86.
|
||||
*
|
||||
@ -46,9 +45,16 @@
|
||||
/**
|
||||
* Helper macro that builds a segment register's value
|
||||
*/
|
||||
#ifdef ASM_SOURCE
|
||||
#define BUILD_SEGMENT_REG_VALUE(desc_privilege,in_ldt,seg_index) \
|
||||
( (((desc_privilege) & 0x3) << 0) \
|
||||
| ((in_ldt & 1) << 2) \
|
||||
| ((seg_index) << 3) )
|
||||
|
||||
#else
|
||||
#define BUILD_SEGMENT_REG_VALUE(desc_privilege, in_ldt, seg_index) \
|
||||
((((desc_privilege)&0x3) << 0) | (((in_ldt) ? 1 : 0) << 2) | ((seg_index) << 3))
|
||||
|
||||
#endif
|
||||
/*
|
||||
* Local segment selectors (LDT) for SOS/x86
|
||||
*/
|
||||
|
21
core/syscall.c
Normal file
21
core/syscall.c
Normal file
@ -0,0 +1,21 @@
|
||||
#include "syscall.h"
|
||||
#include "irq.h"
|
||||
#include "idt.h"
|
||||
|
||||
extern void syscallHandler();
|
||||
|
||||
int syscallSetup(){
|
||||
uint32_t flags, ret;
|
||||
|
||||
disable_IRQs(flags);
|
||||
ret = idt_set_handler(SYSCALL_INTR_NB, (vaddr_t)syscallHandler, 3);
|
||||
restore_IRQs(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int syscall_execute(int syscallId, const struct cpu_state *user_ctx){
|
||||
(void)syscallId;
|
||||
(void)user_ctx;
|
||||
return 0;
|
||||
}
|
5
core/syscall.h
Normal file
5
core/syscall.h
Normal file
@ -0,0 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#define SYSCALL_INTR_NB 0x42
|
||||
|
||||
int syscallSetup();
|
Loading…
Reference in New Issue
Block a user