2020-04-23 00:49:09 +02:00
|
|
|
#include "kthread.h"
|
|
|
|
#include "alloc.h"
|
2020-04-24 23:34:34 +02:00
|
|
|
#include "assert.h"
|
|
|
|
#include "irq.h"
|
2020-04-23 00:49:09 +02:00
|
|
|
#include "klibc.h"
|
2020-04-24 23:34:34 +02:00
|
|
|
#include "list.h"
|
2020-04-27 23:08:36 +02:00
|
|
|
#include "time.h"
|
2020-04-23 23:40:16 +02:00
|
|
|
#include "vga.h"
|
2020-04-23 00:49:09 +02:00
|
|
|
|
|
|
|
static struct kthread *currentThread;
|
2020-08-18 14:12:45 +02:00
|
|
|
static struct kthread *threadWithTimeout;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-24 23:34:34 +02:00
|
|
|
void kthreadExit()
|
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
uint32_t flags;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
disable_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
struct kthread *current = currentThread;
|
|
|
|
struct kthread *next = kthreadSelectNext();
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (next == current)
|
|
|
|
assert("cannot exit thread");
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
currentThread->state = EXITING;
|
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
2020-04-27 00:14:37 +02:00
|
|
|
cpu_context_exit_to(next->cpuState, (cpu_kstate_function_arg1_t *)kthreadDelete,
|
|
|
|
(uint32_t)current);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
restore_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 23:40:19 +02:00
|
|
|
int kthreadSetup(vaddr_t mainStack, size_t mainStackSize)
|
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
struct kthread *current = (struct kthread *)malloc(sizeof(struct kthread));
|
|
|
|
strzcpy(current->name, "[KINIT]", KTHREAD_NAME_MAX_LENGTH);
|
|
|
|
current->stackAddr = mainStack;
|
|
|
|
current->stackSize = mainStackSize;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-05-03 23:11:45 +02:00
|
|
|
current->state = RUNNING;
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_singleton(currentThread, current);
|
2020-08-18 14:12:45 +02:00
|
|
|
list_init_named(threadWithTimeout, timePrev, timeNext);
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 23:40:19 +02:00
|
|
|
struct kthread *kthreadCreate(const char *name, cpu_kstate_function_arg1_t func, void *args)
|
2020-04-23 00:49:09 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
struct kthread *thread = (struct kthread *)malloc(sizeof(struct kthread));
|
|
|
|
if (!thread)
|
|
|
|
return NULL;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
thread->stackAddr = (vaddr_t)malloc(KTHREAD_DEFAULT_STACK_SIZE);
|
2020-08-18 14:12:45 +02:00
|
|
|
#ifdef DEBUG
|
2020-08-28 22:38:05 +02:00
|
|
|
printf("Alloc stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
2020-08-18 14:12:45 +02:00
|
|
|
#endif
|
2020-04-27 00:14:37 +02:00
|
|
|
thread->stackSize = KTHREAD_DEFAULT_STACK_SIZE;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (!thread->stackAddr)
|
|
|
|
goto free_mem;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (name)
|
|
|
|
strzcpy(thread->name, name, KTHREAD_NAME_MAX_LENGTH);
|
|
|
|
else
|
|
|
|
strzcpy(thread->name, "[UNKNOW]", KTHREAD_NAME_MAX_LENGTH);
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (cpu_kstate_init(&thread->cpuState, (cpu_kstate_function_arg1_t *)func, (vaddr_t)args,
|
|
|
|
thread->stackAddr, thread->stackSize,
|
|
|
|
(cpu_kstate_function_arg1_t *)kthreadExit, 0))
|
|
|
|
goto free_mem;
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
thread->state = READY;
|
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
list_add_tail(currentThread, thread);
|
2020-05-03 14:45:26 +02:00
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
return thread;
|
2020-04-23 00:49:09 +02:00
|
|
|
free_mem:
|
2020-04-27 00:14:37 +02:00
|
|
|
free((void *)thread->stackAddr);
|
|
|
|
free((void *)thread);
|
|
|
|
return NULL;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 23:40:19 +02:00
|
|
|
void kthreadDelete(struct kthread *thread)
|
|
|
|
{
|
2020-05-03 23:13:17 +02:00
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
list_delete(currentThread, thread);
|
2020-04-23 00:49:09 +02:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
#ifdef DEBUG
|
|
|
|
printf("Free stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
|
|
|
#endif
|
2020-04-27 00:14:37 +02:00
|
|
|
free((void *)thread->stackAddr);
|
|
|
|
free((void *)thread);
|
2020-05-03 23:13:17 +02:00
|
|
|
restore_IRQs(flags);
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 23:40:19 +02:00
|
|
|
struct kthread *kthreadSelectNext()
|
|
|
|
{
|
2020-05-03 14:45:26 +02:00
|
|
|
struct kthread *nextThread;
|
|
|
|
int idx;
|
|
|
|
list_foreach(currentThread->next, nextThread, idx)
|
|
|
|
{
|
|
|
|
if (nextThread->state == READY) {
|
|
|
|
return nextThread;
|
|
|
|
}
|
|
|
|
}
|
2020-05-03 23:13:17 +02:00
|
|
|
return currentThread;
|
2020-04-23 23:40:16 +02:00
|
|
|
}
|
|
|
|
|
2020-04-27 23:08:36 +02:00
|
|
|
struct cpu_state *kthreadSwitch(struct cpu_state *prevCpu)
|
2020-04-24 23:34:34 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
uint32_t flags;
|
2020-08-18 14:12:45 +02:00
|
|
|
struct kthread *nextThread;
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
disable_IRQs(flags);
|
2020-08-18 14:12:45 +02:00
|
|
|
|
|
|
|
nextThread = kthreadSelectNext();
|
|
|
|
currentThread->cpuState = prevCpu;
|
|
|
|
currentThread->state = READY;
|
|
|
|
currentThread = nextThread;
|
|
|
|
currentThread->state = RUNNING;
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
restore_IRQs(flags);
|
2020-08-18 14:12:45 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return nextThread->cpuState;
|
2020-04-23 00:49:09 +02:00
|
|
|
}
|
2020-04-24 23:34:34 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
int kthreadOnJieffiesTick()
|
|
|
|
{
|
|
|
|
struct kthread *nextThread;
|
|
|
|
int idx;
|
|
|
|
uint32_t flags;
|
|
|
|
disable_IRQs(flags);
|
|
|
|
list_foreach(currentThread, nextThread, idx)
|
|
|
|
{
|
2020-07-08 23:08:50 +02:00
|
|
|
if (nextThread->state == SLEEPING && nextThread->jiffiesSleeping) {
|
2020-05-03 14:45:26 +02:00
|
|
|
nextThread->jiffiesSleeping--;
|
|
|
|
if (!nextThread->jiffiesSleeping) {
|
|
|
|
nextThread->state = READY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-08-18 14:12:45 +02:00
|
|
|
list_foreach_named(threadWithTimeout, nextThread, idx, timePrev, timeNext)
|
|
|
|
{
|
|
|
|
if (nextThread->state == WAITING && nextThread->jiffiesSleeping) {
|
|
|
|
nextThread->jiffiesSleeping--;
|
|
|
|
if (!nextThread->jiffiesSleeping) {
|
|
|
|
nextThread->sleepHaveTimeouted = 1;
|
|
|
|
list_delete_named(threadWithTimeout, nextThread, timePrev, timeNext);
|
|
|
|
kthreadAddThread(nextThread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-03 14:45:26 +02:00
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 23:08:50 +02:00
|
|
|
int kthreadUnsched(struct kthread *th)
|
|
|
|
{
|
|
|
|
list_delete(currentThread, th);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
// Must be called with IRQ disabled
|
|
|
|
int kthreadWait(struct kthread *current, struct kthread *next, unsigned long msec)
|
2020-07-08 23:08:50 +02:00
|
|
|
{
|
|
|
|
if (current == next) {
|
2020-08-18 14:12:45 +02:00
|
|
|
assertmsg(0, "Cannot yield from %s to %s\n", current->name, next->name);
|
2020-07-08 23:08:50 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assertmsg(next->state == READY, "thread %s is in state %d\n", next->name, next->state);
|
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
current->jiffiesSleeping = msecs_to_jiffies(msec);
|
|
|
|
current->sleepHaveTimeouted = 0;
|
|
|
|
|
|
|
|
if (current->jiffiesSleeping)
|
|
|
|
list_add_tail_named(threadWithTimeout, current, timePrev, timeNext);
|
2020-07-08 23:08:50 +02:00
|
|
|
|
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
|
|
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
return current->sleepHaveTimeouted;
|
2020-07-08 23:08:50 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 23:34:34 +02:00
|
|
|
int kthreadYield()
|
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
uint32_t flags;
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
disable_IRQs(flags);
|
|
|
|
struct kthread *next = kthreadSelectNext();
|
|
|
|
struct kthread *current = currentThread;
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
if (current == next) {
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-05-03 23:13:17 +02:00
|
|
|
assert(current->state == RUNNING);
|
2020-07-08 23:08:50 +02:00
|
|
|
assertmsg(next->state == READY, "thread %s is in state %d\n", next->name, next->state);
|
|
|
|
|
|
|
|
if (current->state == RUNNING)
|
|
|
|
current->state = READY;
|
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
2020-04-27 00:14:37 +02:00
|
|
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
|
|
|
restore_IRQs(flags);
|
2020-07-08 23:08:50 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2020-04-24 23:34:34 +02:00
|
|
|
}
|
2020-05-03 14:45:26 +02:00
|
|
|
|
|
|
|
int kthreadMsleep(unsigned long msec)
|
|
|
|
{
|
|
|
|
uint32_t flags;
|
2020-08-16 00:24:59 +02:00
|
|
|
struct kthread *next, *current;
|
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
disable_IRQs(flags);
|
2020-08-16 00:24:59 +02:00
|
|
|
|
|
|
|
current = currentThread;
|
2020-07-08 23:08:50 +02:00
|
|
|
assertmsg(current->state == RUNNING, "thread %s is in state %d for %d\n", current->name,
|
|
|
|
current->state, msec);
|
2020-08-16 00:24:59 +02:00
|
|
|
|
2020-08-18 14:12:45 +02:00
|
|
|
current->state = SLEEPING;
|
|
|
|
current->sleepHaveTimeouted = 0;
|
|
|
|
current->jiffiesSleeping = msecs_to_jiffies(msec);
|
|
|
|
next = kthreadSelectNext();
|
2020-08-16 00:24:59 +02:00
|
|
|
|
|
|
|
assert(next != current);
|
2020-05-03 14:45:26 +02:00
|
|
|
assert(next->state == READY);
|
2020-08-16 00:24:59 +02:00
|
|
|
|
2020-05-03 14:45:26 +02:00
|
|
|
currentThread = next;
|
|
|
|
currentThread->state = RUNNING;
|
|
|
|
cpu_context_switch(¤t->cpuState, next->cpuState);
|
|
|
|
restore_IRQs(flags);
|
2020-08-18 14:12:45 +02:00
|
|
|
return current->sleepHaveTimeouted == 1;
|
2020-05-03 14:45:26 +02:00
|
|
|
}
|
|
|
|
|
2020-08-16 00:24:59 +02:00
|
|
|
struct kthread *getCurrentThread()
|
2020-05-03 14:45:26 +02:00
|
|
|
{
|
|
|
|
return currentThread;
|
|
|
|
}
|
2020-07-08 23:08:50 +02:00
|
|
|
|
|
|
|
int kthreadAddThread(struct kthread *th)
|
|
|
|
{
|
|
|
|
if (th->state == READY)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
th->state = READY;
|
|
|
|
list_add_tail(currentThread, th);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-10-25 23:25:31 +02:00
|
|
|
void thread_prepare_syscall_switch_back(struct cpu_state *cpu_state){
|
|
|
|
(void)cpu_state;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void thread_prepare_exception_switch_back(struct cpu_state *cpu_state){
|
|
|
|
(void)cpu_state;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
void thread_prepare_irq_switch_back(struct cpu_state *cpu_state){
|
|
|
|
(void)cpu_state;
|
|
|
|
return;
|
|
|
|
}
|