135 lines
3.4 KiB
ArmAsm
135 lines
3.4 KiB
ArmAsm
.file "cpu_context_switch.S"
|
|
|
|
.text
|
|
|
|
/**
|
|
* C Function called by the routines below in order to tell the CPU
|
|
* where will be the kernel stack (needed by the interrupt handlers)
|
|
* when next_ctxt will come back into kernel mode.
|
|
*
|
|
* void cpu_context_update_kernel_tss(struct cpu_state *next_ctxt)
|
|
*
|
|
* @see end of cpu_context.c
|
|
*/
|
|
.extern cpu_context_update_kernel_tss
|
|
|
|
|
|
.globl cpu_context_switch
|
|
.type cpu_context_switch, @function
|
|
cpu_context_switch:
|
|
// arg2= to_context -- esp+64
|
|
// arg1= from_context -- esp+60
|
|
// caller ip -- esp+56
|
|
pushf // (eflags) esp+52
|
|
pushl %cs // (cs) esp+48
|
|
pushl $resume_pc // (ip) esp+44
|
|
pushl $0 // (error code) esp+12+7x4
|
|
pushl %ebp
|
|
pushl %eax
|
|
pushl %ecx
|
|
pushl %edx
|
|
pushl %ebx
|
|
pushl %esi
|
|
pushl %edi
|
|
subl $2, %esp // (alignment) esp+10
|
|
pushw %ss // esp+8
|
|
pushw %ds // esp+6
|
|
pushw %es // esp+4
|
|
pushw %fs // esp+2
|
|
pushw %gs // esp
|
|
|
|
/*
|
|
* Now that the original eax/ebx are stored, we can use them safely
|
|
*/
|
|
|
|
/* Store the address of the saved context */
|
|
movl 60(%esp), %ebx
|
|
movl %esp, (%ebx)
|
|
|
|
/* This is the proper context switch ! We change the stack here */
|
|
movl 64(%esp), %esp
|
|
|
|
/* Prepare kernel TSS in case we are switching to a user thread: we
|
|
make sure that we will come back into the kernel at a correct
|
|
stack location */
|
|
pushl %esp /* Pass the location of the context we are
|
|
restoring to the function */
|
|
call cpu_context_update_kernel_tss
|
|
addl $4, %esp
|
|
|
|
/* Restore the CPU context */
|
|
popw %gs
|
|
popw %fs
|
|
popw %es
|
|
popw %ds
|
|
popw %ss
|
|
addl $2,%esp
|
|
popl %edi
|
|
popl %esi
|
|
popl %ebx
|
|
popl %edx
|
|
popl %ecx
|
|
popl %eax
|
|
popl %ebp
|
|
addl $4, %esp /* Ignore "error code" */
|
|
|
|
/* This restores the eflags, the cs and the eip registers */
|
|
iret /* equivalent to: popfl ; ret */
|
|
|
|
resume_pc:
|
|
// Same context as that when cpu_context_switch got called
|
|
// arg2= to_context -- esp+8
|
|
// arg1= from_context -- esp+4
|
|
// caller ip -- esp
|
|
ret
|
|
|
|
|
|
|
|
/* ------------------------- */
|
|
.globl cpu_context_exit_to
|
|
.type cpu_context_exit_to, @function
|
|
cpu_context_exit_to:
|
|
// arg3= reclaiming_arg -- esp+12
|
|
// arg2= reclaiming_func -- esp+8
|
|
// arg1= to_context -- esp+4
|
|
// caller ip -- esp
|
|
|
|
/* Store the current SP in a temporary register */
|
|
movl %esp, %eax
|
|
|
|
/* This is the proper context switch ! We change the stack here */
|
|
movl 4(%eax), %esp
|
|
|
|
/* Call the reclaiming function (remember: the old frame address
|
|
is stored in eax) */
|
|
pushl 12(%eax)
|
|
call *8(%eax)
|
|
addl $4, %esp
|
|
|
|
/* Prepare kernel TSS in case we are switching to a user thread: we
|
|
make sure that we will come back into the kernel at a correct
|
|
stack location */
|
|
pushl %esp /* Pass the location of the context we are
|
|
restoring to the function */
|
|
call cpu_context_update_kernel_tss
|
|
addl $4, %esp
|
|
|
|
/* Restore the CPU context */
|
|
popw %gs
|
|
popw %fs
|
|
popw %es
|
|
popw %ds
|
|
popw %ss
|
|
addl $2,%esp
|
|
popl %edi
|
|
popl %esi
|
|
popl %ebx
|
|
popl %edx
|
|
popl %ecx
|
|
popl %eax
|
|
popl %ebp
|
|
addl $4, %esp /* Ignore "error code" */
|
|
|
|
/* This restores the eflags, the cs and the eip registers */
|
|
iret /* equivalent to: popfl ; ret */
|