2019-05-15 23:22:28 +02:00
|
|
|
.file "cpu_context_switch.S"
|
|
|
|
|
|
|
|
.text
|
|
|
|
|
2021-10-17 15:37:20 +02:00
|
|
|
/**
|
|
|
|
* C Function called by the routines below in order to tell the CPU
|
|
|
|
* where will be the kernel stack (needed by the interrupt handlers)
|
|
|
|
* when next_ctxt will come back into kernel mode.
|
|
|
|
*
|
|
|
|
* void cpu_context_update_kernel_tss(struct cpu_state *next_ctxt)
|
|
|
|
*
|
|
|
|
* @see end of cpu_context.c
|
|
|
|
*/
|
|
|
|
.extern cpu_context_update_kernel_tss
|
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
|
|
|
|
.globl cpu_context_switch
|
|
|
|
.type cpu_context_switch, @function
|
|
|
|
cpu_context_switch:
|
2020-04-22 17:31:37 +02:00
|
|
|
// arg2= to_context -- esp+68
|
|
|
|
// arg1= from_context -- esp+64
|
|
|
|
// caller ip -- esp+60
|
|
|
|
pushf // (eflags) esp+56
|
|
|
|
pushl %cs // (cs) esp+52
|
|
|
|
pushl $resume_pc // (ip) esp+48
|
2020-04-23 00:49:09 +02:00
|
|
|
pushl $0 // (error code) esp+12+8x4
|
2021-10-25 23:25:31 +02:00
|
|
|
pushl %ebp
|
|
|
|
pushl %eax
|
|
|
|
pushl %ecx
|
|
|
|
pushl %edx
|
|
|
|
pushl %ebx
|
|
|
|
pushl %esi
|
|
|
|
pushl %edi
|
2019-05-15 23:22:28 +02:00
|
|
|
subl $2, %esp // (alignment) esp+10
|
|
|
|
pushw %ss // esp+8
|
|
|
|
pushw %ds // esp+6
|
|
|
|
pushw %es // esp+4
|
|
|
|
pushw %fs // esp+2
|
|
|
|
pushw %gs // esp
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that the original eax/ebx are stored, we can use them safely
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Store the address of the saved context */
|
2021-10-25 23:25:31 +02:00
|
|
|
movl 60(%esp), %ebx
|
2019-05-15 23:22:28 +02:00
|
|
|
movl %esp, (%ebx)
|
|
|
|
|
|
|
|
/* This is the proper context switch ! We change the stack here */
|
2021-10-25 23:25:31 +02:00
|
|
|
movl 64(%esp), %esp
|
2019-05-15 23:22:28 +02:00
|
|
|
|
2021-10-17 15:37:20 +02:00
|
|
|
/* Prepare kernel TSS in case we are switching to a user thread: we
|
|
|
|
make sure that we will come back into the kernel at a correct
|
|
|
|
stack location */
|
|
|
|
pushl %esp /* Pass the location of the context we are
|
|
|
|
restoring to the function */
|
|
|
|
call cpu_context_update_kernel_tss
|
|
|
|
addl $4, %esp
|
2021-10-25 23:25:31 +02:00
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
/* Restore the CPU context */
|
|
|
|
popw %gs
|
|
|
|
popw %fs
|
|
|
|
popw %es
|
|
|
|
popw %ds
|
|
|
|
popw %ss
|
|
|
|
addl $2,%esp
|
2021-10-25 23:25:31 +02:00
|
|
|
popl %edi
|
|
|
|
popl %esi
|
|
|
|
popl %ebx
|
|
|
|
popl %edx
|
|
|
|
popl %ecx
|
|
|
|
popl %eax
|
|
|
|
popl %ebp
|
2019-05-15 23:22:28 +02:00
|
|
|
addl $4, %esp /* Ignore "error code" */
|
|
|
|
|
|
|
|
/* This restores the eflags, the cs and the eip registers */
|
|
|
|
iret /* equivalent to: popfl ; ret */
|
|
|
|
|
|
|
|
resume_pc:
|
|
|
|
// Same context as that when cpu_context_switch got called
|
|
|
|
// arg2= to_context -- esp+8
|
|
|
|
// arg1= from_context -- esp+4
|
|
|
|
// caller ip -- esp
|
|
|
|
ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------------------- */
|
|
|
|
.globl cpu_context_exit_to
|
|
|
|
.type cpu_context_exit_to, @function
|
|
|
|
cpu_context_exit_to:
|
|
|
|
// arg3= reclaiming_arg -- esp+12
|
|
|
|
// arg2= reclaiming_func -- esp+8
|
|
|
|
// arg1= to_context -- esp+4
|
|
|
|
// caller ip -- esp
|
|
|
|
|
|
|
|
/* Store the current SP in a temporary register */
|
|
|
|
movl %esp, %eax
|
|
|
|
|
|
|
|
/* This is the proper context switch ! We change the stack here */
|
|
|
|
movl 4(%eax), %esp
|
|
|
|
|
|
|
|
/* Call the reclaiming function (remember: the old frame address
|
|
|
|
is stored in eax) */
|
|
|
|
pushl 12(%eax)
|
|
|
|
call *8(%eax)
|
|
|
|
addl $4, %esp
|
|
|
|
|
2021-10-25 23:25:31 +02:00
|
|
|
/* Prepare kernel TSS in case we are switching to a user thread: we
|
|
|
|
make sure that we will come back into the kernel at a correct
|
|
|
|
stack location */
|
|
|
|
pushl %esp /* Pass the location of the context we are
|
|
|
|
restoring to the function */
|
|
|
|
call cpu_context_update_kernel_tss
|
|
|
|
addl $4, %esp
|
|
|
|
|
2019-05-15 23:22:28 +02:00
|
|
|
/* Restore the CPU context */
|
|
|
|
popw %gs
|
|
|
|
popw %fs
|
|
|
|
popw %es
|
|
|
|
popw %ds
|
|
|
|
popw %ss
|
|
|
|
addl $2,%esp
|
2021-10-25 23:25:31 +02:00
|
|
|
popl %edi
|
|
|
|
popl %esi
|
|
|
|
popl %ebx
|
|
|
|
popl %edx
|
|
|
|
popl %ecx
|
|
|
|
popl %eax
|
|
|
|
popl %ebp
|
2019-05-15 23:22:28 +02:00
|
|
|
addl $4, %esp /* Ignore "error code" */
|
|
|
|
|
|
|
|
/* This restores the eflags, the cs and the eip registers */
|
|
|
|
iret /* equivalent to: popfl ; ret */
|