2021-10-30 14:18:21 +02:00
|
|
|
#include "thread.h"
|
2020-04-23 00:49:09 +02:00
|
|
|
#include "alloc.h"
|
2020-04-24 23:34:34 +02:00
|
|
|
#include "assert.h"
|
|
|
|
#include "irq.h"
|
2020-04-23 00:49:09 +02:00
|
|
|
#include "klibc.h"
|
2020-04-24 23:34:34 +02:00
|
|
|
#include "list.h"
|
2021-11-02 21:24:12 +01:00
|
|
|
#include "mmuContext.h"
|
2023-11-09 20:30:09 +01:00
|
|
|
#include "process.h"
|
2020-04-27 23:08:36 +02:00
|
|
|
#include "time.h"
|
2021-11-03 23:54:36 +01:00
|
|
|
#include "types.h"
|
2020-04-23 23:40:16 +02:00
|
|
|
#include "vga.h"
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2021-10-30 14:08:12 +02:00
|
|
|
static struct thread *currentThread;
|
|
|
|
static struct thread *threadWithTimeout;
|
2024-02-14 23:20:46 +01:00
|
|
|
static thread_id_t nextTid; // This is the TID for kernel thread ONLY
|
|
|
|
|
2024-02-15 18:40:45 +01:00
|
|
|
pid_t threadGetId(struct thread *th)
|
2024-02-14 23:20:46 +01:00
|
|
|
{
|
|
|
|
return th->tid;
|
|
|
|
}
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2021-11-02 21:24:12 +01:00
|
|
|
static void threadPrepareContext(struct thread *th);
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
void threadExit()
|
2020-04-24 23:34:34 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
uint32_t flags;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
disable_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *current = currentThread;
|
2021-10-30 14:18:21 +02:00
|
|
|
struct thread *next = threadSelectNext();
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (next == current)
|
|
|
|
assert("cannot exit thread");
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
currentThread->state = EXITING;
|
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
2021-11-02 21:47:05 +01:00
|
|
|
threadPrepareContext(next);
|
2021-10-30 14:18:21 +02:00
|
|
|
cpu_context_exit_to(next->cpuState, (cpu_kstate_function_arg1_t *)threadDelete,
|
2020-04-27 00:14:37 +02:00
|
|
|
(uint32_t)current);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
restore_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadSetup(vaddr_t mainStack, size_t mainStackSize)
|
2020-04-24 23:40:19 +02:00
|
|
|
{
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *current = (struct thread *)malloc(sizeof(struct thread));
|
2023-11-08 20:53:13 +01:00
|
|
|
if (current == NULL)
|
|
|
|
return -ENOMEM;
|
2021-10-30 14:18:21 +02:00
|
|
|
strzcpy(current->name, "[KINIT]", THREAD_NAME_MAX_LENGTH);
|
2020-04-27 00:14:37 +02:00
|
|
|
current->stackAddr = mainStack;
|
|
|
|
current->stackSize = mainStackSize;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-05-03 23:11:45 +02:00
|
|
|
current->state = RUNNING;
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_singleton(currentThread, current);
|
2020-08-18 14:12:45 +02:00
|
|
|
list_init_named(threadWithTimeout, timePrev, timeNext);
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
struct thread *threadCreate(const char *name, cpu_kstate_function_arg1_t func, void *args)
|
2020-04-23 00:49:09 +02:00
|
|
|
{
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *thread = (struct thread *)malloc(sizeof(struct thread));
|
2020-04-27 00:14:37 +02:00
|
|
|
if (!thread)
|
|
|
|
return NULL;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
thread->stackAddr = (vaddr_t)malloc(THREAD_DEFAULT_STACK_SIZE);
|
2021-11-03 23:54:36 +01:00
|
|
|
if (!thread->stackAddr){
|
|
|
|
free(thread);
|
2021-10-28 23:49:58 +02:00
|
|
|
return NULL;
|
2021-11-03 23:54:36 +01:00
|
|
|
}
|
2020-08-18 14:12:45 +02:00
|
|
|
#ifdef DEBUG
|
2023-11-09 20:30:09 +01:00
|
|
|
printf("Alloc stack at 0x%p struct at 0x%p\n", (void *)thread->stackAddr, thread);
|
2020-08-18 14:12:45 +02:00
|
|
|
#endif
|
2021-10-30 14:18:21 +02:00
|
|
|
thread->stackSize = THREAD_DEFAULT_STACK_SIZE;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (name)
|
2021-10-30 14:18:21 +02:00
|
|
|
strzcpy(thread->name, name, THREAD_NAME_MAX_LENGTH);
|
2020-04-27 00:14:37 +02:00
|
|
|
else
|
2021-10-30 14:18:21 +02:00
|
|
|
strzcpy(thread->name, "[UNKNOW]", THREAD_NAME_MAX_LENGTH);
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (cpu_kstate_init(&thread->cpuState, (cpu_kstate_function_arg1_t *)func, (vaddr_t)args,
|
|
|
|
thread->stackAddr, thread->stackSize,
|
2021-10-30 14:18:21 +02:00
|
|
|
(cpu_kstate_function_arg1_t *)threadExit, 0))
|
2020-04-27 00:14:37 +02:00
|
|
|
goto free_mem;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2024-02-16 00:40:48 +01:00
|
|
|
thread->state = READY;
|
|
|
|
thread->tid = nextTid++;
|
|
|
|
thread->wqExit = NULL;
|
2020-05-03 14:45:26 +02:00
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
list_add_tail(currentThread, thread);
|
2020-05-03 14:45:26 +02:00
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
return thread;
|
2020-04-23 00:49:09 +02:00
|
|
|
free_mem:
|
2020-04-27 00:14:37 +02:00
|
|
|
free((void *)thread->stackAddr);
|
|
|
|
free((void *)thread);
|
|
|
|
return NULL;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2021-11-03 23:54:36 +01:00
|
|
|
struct thread *threadCreateUser(const char *name, struct process *proc, uaddr_t startPc,
|
|
|
|
uint32_t arg1, uint32_t arg2, uaddr_t startSP)
|
|
|
|
{
|
|
|
|
struct thread *thread = malloc(sizeof(struct thread));
|
|
|
|
|
|
|
|
if (thread == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
thread->stackAddr = (vaddr_t)malloc(THREAD_DEFAULT_STACK_SIZE);
|
|
|
|
if (!thread->stackAddr) {
|
|
|
|
free(thread);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
thread->stackSize = THREAD_DEFAULT_STACK_SIZE;
|
|
|
|
|
2024-02-16 00:40:48 +01:00
|
|
|
thread->wqExit = (struct wait_queue * )malloc(sizeof(struct wait_queue));
|
|
|
|
if (!thread->wqExit) {
|
|
|
|
free((void *)thread->stackAddr);
|
|
|
|
free(thread);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
waitQueueInit(thread->wqExit);
|
|
|
|
|
2021-11-03 23:54:36 +01:00
|
|
|
if (name)
|
|
|
|
strzcpy(thread->name, name, THREAD_NAME_MAX_LENGTH);
|
|
|
|
else
|
|
|
|
strzcpy(thread->name, "[UNKNOW]", THREAD_NAME_MAX_LENGTH);
|
|
|
|
|
|
|
|
if (cpu_ustate_init(&thread->cpuState, startPc, arg1, arg2, startSP, thread->stackAddr,
|
|
|
|
thread->stackSize)) {
|
|
|
|
goto free_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(processAddThread(proc, thread))
|
|
|
|
goto free_mem;
|
|
|
|
|
|
|
|
thread->state = READY;
|
2024-02-14 23:20:46 +01:00
|
|
|
thread->tid = processGetNextTid(proc);
|
2021-11-03 23:54:36 +01:00
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
|
|
list_add_tail(currentThread, thread);
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return thread;
|
|
|
|
free_mem:
|
|
|
|
free((void *)thread->stackAddr);
|
|
|
|
free((void *)thread);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
void threadDelete(struct thread *thread)
|
2020-04-24 23:40:19 +02:00
|
|
|
{
|
2020-05-03 23:13:17 +02:00
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
list_delete(currentThread, thread);
|
2021-11-02 21:24:12 +01:00
|
|
|
restore_IRQs(flags);
|
2021-11-03 23:54:36 +01:00
|
|
|
assert(thread->state == EXITING);
|
2021-11-02 21:24:12 +01:00
|
|
|
|
2024-02-16 00:40:48 +01:00
|
|
|
if (thread->wqExit) {
|
|
|
|
waitUp(thread->wqExit);
|
|
|
|
}
|
|
|
|
|
2021-11-02 21:24:12 +01:00
|
|
|
if (thread->squattedContext) {
|
2024-02-27 23:14:09 +01:00
|
|
|
mmuContextUnref(thread->squattedContext);
|
2021-11-02 21:24:12 +01:00
|
|
|
}
|
2024-02-27 23:14:09 +01:00
|
|
|
|
2021-11-02 21:24:12 +01:00
|
|
|
if (thread->process)
|
|
|
|
processRemoveThread(thread);
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
#ifdef DEBUG
|
2023-11-09 20:30:09 +01:00
|
|
|
printf("Free stack at 0x%p struct at 0x%p\n", (void *)thread->stackAddr, thread);
|
2020-08-28 22:38:05 +02:00
|
|
|
#endif
|
2020-04-27 00:14:37 +02:00
|
|
|
free((void *)thread->stackAddr);
|
|
|
|
free((void *)thread);
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
struct thread *threadSelectNext()
|
2020-04-24 23:40:19 +02:00
|
|
|
{
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *nextThread;
|
2020-05-03 14:45:26 +02:00
|
|
|
int idx;
|
|
|
|
list_foreach(currentThread->next, nextThread, idx)
|
|
|
|
{
|
|
|
|
if (nextThread->state == READY) {
|
|
|
|
return nextThread;
|
|
|
|
}
|
|
|
|
}
|
2020-05-03 23:13:17 +02:00
|
|
|
return currentThread;
|
2020-04-23 23:40:16 +02:00
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
struct cpu_state *threadSwitch(struct cpu_state *prevCpu)
|
2020-04-24 23:34:34 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
uint32_t flags;
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *nextThread;
|
2020-08-18 14:12:45 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
disable_IRQs(flags);
|
2020-08-18 14:12:45 +02:00
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
nextThread = threadSelectNext();
|
2021-11-02 23:56:47 +01:00
|
|
|
currentThread->cpuState = prevCpu;
|
2021-11-02 21:57:57 +01:00
|
|
|
if (nextThread != currentThread) {
|
2021-11-02 23:56:47 +01:00
|
|
|
currentThread->state = READY;
|
2021-11-03 23:54:36 +01:00
|
|
|
// printf(" Switch from %s to %s\n", currentThread->name, nextThread->name);
|
2021-11-02 23:56:47 +01:00
|
|
|
currentThread = nextThread;
|
|
|
|
currentThread->state = RUNNING;
|
2021-11-02 21:57:57 +01:00
|
|
|
threadPrepareContext(nextThread);
|
|
|
|
}
|
2020-08-18 14:12:45 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
restore_IRQs(flags);
|
2020-08-18 14:12:45 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return nextThread->cpuState;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
2020-04-24 23:34:34 +02:00
|
|
|
|
2021-11-03 23:54:36 +01:00
|
|
|
int threadCount()
|
|
|
|
{
|
|
|
|
struct thread *nextThread;
|
|
|
|
int idx;
|
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
|
|
list_foreach(currentThread, nextThread, idx){
|
|
|
|
}
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadOnJieffiesTick()
|
2020-05-03 14:45:26 +02:00
|
|
|
{
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *nextThread;
|
2020-05-03 14:45:26 +02:00
|
|
|
int idx;
|
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
|
|
list_foreach(currentThread, nextThread, idx)
|
|
|
|
{
|
2024-03-13 21:40:35 +01:00
|
|
|
if (nextThread->state == SLEEPING) {
|
|
|
|
if (nextThread->jiffiesSleeping)
|
|
|
|
nextThread->jiffiesSleeping--;
|
|
|
|
if (!nextThread->jiffiesSleeping)
|
2020-05-03 14:45:26 +02:00
|
|
|
nextThread->state = READY;
|
|
|
|
}
|
|
|
|
}
|
2020-08-18 14:12:45 +02:00
|
|
|
list_foreach_named(threadWithTimeout, nextThread, idx, timePrev, timeNext)
|
|
|
|
{
|
2024-03-13 21:40:35 +01:00
|
|
|
if (nextThread->state == WAITING) {
|
|
|
|
if (nextThread->jiffiesSleeping)
|
|
|
|
nextThread->jiffiesSleeping--;
|
2020-08-18 14:12:45 +02:00
|
|
|
if (!nextThread->jiffiesSleeping) {
|
|
|
|
nextThread->sleepHaveTimeouted = 1;
|
|
|
|
list_delete_named(threadWithTimeout, nextThread, timePrev, timeNext);
|
2021-10-30 14:18:21 +02:00
|
|
|
threadAddThread(nextThread);
|
2020-08-18 14:12:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-03 14:45:26 +02:00
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadUnsched(struct thread *th)
|
2020-07-08 23:08:50 +02:00
|
|
|
{
|
|
|
|
list_delete(currentThread, th);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
// Must be called with IRQ disabled
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadWait(struct thread *current, struct thread *next, unsigned long msec)
|
2020-07-08 23:08:50 +02:00
|
|
|
{
|
|
|
|
if (current == next) {
|
2020-08-18 14:12:45 +02:00
|
|
|
assertmsg(0, "Cannot yield from %s to %s\n", current->name, next->name);
|
2020-07-08 23:08:50 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assertmsg(next->state == READY, "thread %s is in state %d\n", next->name, next->state);
|
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
current->jiffiesSleeping = msecs_to_jiffies(msec);
|
|
|
|
current->sleepHaveTimeouted = 0;
|
|
|
|
|
|
|
|
if (current->jiffiesSleeping)
|
|
|
|
list_add_tail_named(threadWithTimeout, current, timePrev, timeNext);
|
2020-07-08 23:08:50 +02:00
|
|
|
|
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
2021-11-02 21:24:12 +01:00
|
|
|
threadPrepareContext(next);
|
2020-07-08 23:08:50 +02:00
|
|
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
return current->sleepHaveTimeouted;
|
2020-07-08 23:08:50 +02:00
|
|
|
}
|
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadYield()
|
2020-04-24 23:34:34 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
uint32_t flags;
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
disable_IRQs(flags);
|
2021-10-30 14:18:21 +02:00
|
|
|
struct thread *next = threadSelectNext();
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *current = currentThread;
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
if (current == next) {
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-05-03 23:13:17 +02:00
|
|
|
assert(current->state == RUNNING);
|
2020-07-08 23:08:50 +02:00
|
|
|
assertmsg(next->state == READY, "thread %s is in state %d\n", next->name, next->state);
|
|
|
|
|
|
|
|
if (current->state == RUNNING)
|
|
|
|
current->state = READY;
|
2021-11-03 23:54:36 +01:00
|
|
|
// printf(" Yield from %s to %s\n", currentThread->name, next->name);
|
2020-05-03 14:45:26 +02:00
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
2021-11-02 21:24:12 +01:00
|
|
|
threadPrepareContext(next);
|
2020-04-27 00:14:37 +02:00
|
|
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
|
|
|
restore_IRQs(flags);
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2020-04-24 23:34:34 +02:00
|
|
|
}
|
2020-05-03 14:45:26 +02:00
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadMsleep(unsigned long msec)
|
2024-02-14 18:43:47 +01:00
|
|
|
{
|
|
|
|
return threadUsleep(msec*1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
int threadUsleep(unsigned long usec)
|
2020-05-03 14:45:26 +02:00
|
|
|
{
|
|
|
|
uint32_t flags;
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *next, *current;
|
2020-08-16 00:24:59 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
disable_IRQs(flags);
|
2020-08-16 00:24:59 +02:00
|
|
|
|
|
|
|
current = currentThread;
|
2024-02-14 18:43:47 +01:00
|
|
|
assertmsg(current->state == RUNNING, "thread %s is in state %d for %lu us\n", current->name,
|
|
|
|
current->state, usec);
|
2020-08-16 00:24:59 +02:00
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
current->state = SLEEPING;
|
|
|
|
current->sleepHaveTimeouted = 0;
|
2024-02-14 18:43:47 +01:00
|
|
|
current->jiffiesSleeping = usecs_to_jiffies(usec);
|
2021-10-30 14:18:21 +02:00
|
|
|
next = threadSelectNext();
|
2020-08-16 00:24:59 +02:00
|
|
|
|
|
|
|
assert(next != current);
|
2020-05-03 14:45:26 +02:00
|
|
|
assert(next->state == READY);
|
2020-08-16 00:24:59 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
2021-11-02 21:24:12 +01:00
|
|
|
threadPrepareContext(next);
|
2020-05-03 14:45:26 +02:00
|
|
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
|
|
|
restore_IRQs(flags);
|
2020-08-18 14:12:45 +02:00
|
|
|
return current->sleepHaveTimeouted == 1;
|
2020-05-03 14:45:26 +02:00
|
|
|
}
|
|
|
|
|
2021-10-30 14:08:12 +02:00
|
|
|
struct thread *getCurrentThread()
|
2020-05-03 14:45:26 +02:00
|
|
|
{
|
|
|
|
return currentThread;
|
|
|
|
}
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2021-10-30 14:18:21 +02:00
|
|
|
int threadAddThread(struct thread *th)
|
2020-07-08 23:08:50 +02:00
|
|
|
{
|
|
|
|
if (th->state == READY)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
th->state = READY;
|
|
|
|
list_add_tail(currentThread, th);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-10-30 13:58:12 +02:00
|
|
|
|
2021-11-02 21:24:12 +01:00
|
|
|
static void threadPrepareContext(struct thread *th)
|
|
|
|
{
|
|
|
|
if (cpu_context_is_in_user_mode(th->cpuState)) {
|
|
|
|
assert(th->process != NULL);
|
|
|
|
assert(th->squattedContext == NULL);
|
|
|
|
mmuContextSwitch(processGetMMUContext(th->process));
|
|
|
|
} else if (th->squattedContext) {
|
|
|
|
mmuContextSwitch(th->squattedContext);
|
|
|
|
}
|
2021-10-25 23:25:31 +02:00
|
|
|
}
|
|
|
|
|
2021-11-02 21:24:12 +01:00
|
|
|
int threadChangeCurrentContext(struct mmu_context *ctx)
|
|
|
|
{
|
|
|
|
uint32_t flags;
|
2023-11-08 20:53:13 +01:00
|
|
|
struct mmu_context *prev;
|
|
|
|
|
|
|
|
assert(currentThread != NULL);
|
|
|
|
|
|
|
|
prev = currentThread->squattedContext;
|
2021-11-02 21:24:12 +01:00
|
|
|
|
|
|
|
if (ctx != NULL) {
|
|
|
|
assert(prev == NULL);
|
|
|
|
} else {
|
|
|
|
assert(prev != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
disable_IRQs(flags);
|
|
|
|
currentThread->squattedContext = ctx;
|
|
|
|
|
|
|
|
if (ctx != NULL) {
|
|
|
|
mmuContextRef(ctx);
|
|
|
|
mmuContextSwitch(ctx);
|
|
|
|
} else {
|
|
|
|
mmuContextUnref(prev);
|
|
|
|
}
|
|
|
|
restore_IRQs(flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void threadPrepareSyscallSwitchBack(struct cpu_state *cpuState)
|
|
|
|
{
|
|
|
|
currentThread->cpuState = cpuState;
|
|
|
|
threadPrepareContext(currentThread);
|
2021-10-25 23:25:31 +02:00
|
|
|
}
|
2021-10-30 13:58:12 +02:00
|
|
|
|
2021-11-02 21:24:12 +01:00
|
|
|
void threadPrepareExceptionSwitchBack(struct cpu_state *cpuState)
|
|
|
|
{
|
|
|
|
currentThread->cpuState = cpuState;
|
|
|
|
threadPrepareContext(currentThread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void threadPrepareIrqServicing(struct cpu_state *cpuState)
|
|
|
|
{
|
|
|
|
currentThread->cpuState = cpuState;
|
|
|
|
}
|
|
|
|
|
|
|
|
void threadPrepareIrqSwitchBack(struct cpu_state *cpuState)
|
|
|
|
{
|
|
|
|
currentThread->cpuState = cpuState;
|
|
|
|
threadPrepareContext(currentThread);
|
2021-10-25 23:25:31 +02:00
|
|
|
}
|