Style: harmonize formatting

Thanks to: "clang-format -i -style=file **/*.{c,h}"
This commit is contained in:
Mathieu Maret 2020-04-27 00:14:37 +02:00
parent fd6551e90c
commit 3b97d0307d
40 changed files with 1405 additions and 1458 deletions

View File

@ -18,208 +18,208 @@ static int formatPage(struct slabEntry *desc, size_t size, int selfContained);
int allocInit(void)
{
uint start = log2(sizeof(void *));
list_init(slub);
int ret;
if ((ret = allocBookSlab(sizeof(struct slabDesc), 1))) {
pr_devel("Fail to allocBookSlab %d for slabDesc :( \n", ret);
return ret;
uint start = log2(sizeof(void *));
list_init(slub);
int ret;
if ((ret = allocBookSlab(sizeof(struct slabDesc), 1))) {
pr_devel("Fail to allocBookSlab %d for slabDesc :( \n", ret);
return ret;
}
if ((ret = allocBookSlab(sizeof(struct slabEntry), 1))) {
pr_devel("Fail to allocBookSlab %d for slabEntry :( \n", ret);
return ret;
}
for (uint i = start; i <= SLUB_SIZE; i++) {
if ((ret = allocBookSlab(1U << i, 0))) {
if (ret == -EEXIST)
continue;
pr_devel("Fail to allocBookSlab %d for %d \n", ret, (1U << i));
return ret;
}
if ((ret = allocBookSlab(sizeof(struct slabEntry), 1))) {
pr_devel("Fail to allocBookSlab %d for slabEntry :( \n", ret);
return ret;
}
for (uint i = start; i <= SLUB_SIZE; i++) {
if ((ret = allocBookSlab(1U << i, 0))) {
if(ret == -EEXIST)
continue;
pr_devel("Fail to allocBookSlab %d for %d \n", ret, (1U << i));
return ret;
}
}
return 0;
}
return 0;
}
int allocBookSlab(size_t size, int selfContained)
{
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
struct slabDesc *slab = NULL;
int slabIdx;
int ret;
list_foreach(slub, slab, slabIdx)
{
if (slab->size == size) {
return -EEXIST;
}
if (slab->size > size) {
break;
}
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
struct slabDesc *slab = NULL;
int slabIdx;
int ret;
list_foreach(slub, slab, slabIdx)
{
if (slab->size == size) {
return -EEXIST;
}
struct slabDesc *newSlab;
if ((ret = allocSlab(&newSlab, size, selfContained)))
return ret;
if (list_foreach_early_break(slub, slab, slabIdx)) {
list_insert_before(slub, slab, newSlab);
} else {
list_add_tail(slub, newSlab);
if (slab->size > size) {
break;
}
return 0;
}
struct slabDesc *newSlab;
if ((ret = allocSlab(&newSlab, size, selfContained)))
return ret;
if (list_foreach_early_break(slub, slab, slabIdx)) {
list_insert_before(slub, slab, newSlab);
} else {
list_add_tail(slub, newSlab);
}
return 0;
}
int allocSlab(struct slabDesc **desc, size_t size, int selfContained)
{
// pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
if (size > PAGE_SIZE)
return -ENOENT;
paddr_t alloc = allocPhyPage();
if (alloc == (paddr_t)NULL)
return -ENOMEM;
if (pageMap((vaddr_t)alloc, alloc, PAGING_MEM_WRITE))
return -ENOMEM;
// pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
if (size > PAGE_SIZE)
return -ENOENT;
paddr_t alloc = allocPhyPage();
if (alloc == (paddr_t)NULL)
return -ENOMEM;
if (pageMap((vaddr_t)alloc, alloc, PAGING_MEM_WRITE))
return -ENOMEM;
if (selfContained) {
*desc = (struct slabDesc *)alloc;
((*desc)->slab).freeEl = (char *)(*desc) + sizeof(struct slabDesc);
} else {
*desc = malloc(sizeof(struct slabDesc));
(*desc)->slab.freeEl = (void *)alloc;
}
struct slabEntry *slab = &(*desc)->slab;
list_singleton(slab, slab);
slab->page = (vaddr_t)alloc;
slab->full = 0;
(*desc)->size = size;
// pr_devel("got page %d for size %d first %d", alloc, size, (*desc)->slab.freeEl);
return formatPage(&(*desc)->slab, size, selfContained);
if (selfContained) {
*desc = (struct slabDesc *)alloc;
((*desc)->slab).freeEl = (char *)(*desc) + sizeof(struct slabDesc);
} else {
*desc = malloc(sizeof(struct slabDesc));
(*desc)->slab.freeEl = (void *)alloc;
}
struct slabEntry *slab = &(*desc)->slab;
list_singleton(slab, slab);
slab->page = (vaddr_t)alloc;
slab->full = 0;
(*desc)->size = size;
// pr_devel("got page %d for size %d first %d", alloc, size, (*desc)->slab.freeEl);
return formatPage(&(*desc)->slab, size, selfContained);
}
int allocSlabEntry(struct slabEntry **desc, size_t size, int selfContained)
{
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
if (size > PAGE_SIZE)
return -ENOENT;
paddr_t alloc = allocPhyPage();
if (alloc == (paddr_t)NULL)
return -ENOMEM;
if (pageMap((vaddr_t)alloc, alloc, PAGING_MEM_WRITE))
return -ENOMEM;
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
if (size > PAGE_SIZE)
return -ENOENT;
paddr_t alloc = allocPhyPage();
if (alloc == (paddr_t)NULL)
return -ENOMEM;
if (pageMap((vaddr_t)alloc, alloc, PAGING_MEM_WRITE))
return -ENOMEM;
if (selfContained) {
*desc = (struct slabEntry *)alloc;
(*desc)->freeEl = (char *)(*desc) + sizeof(struct slabEntry);
} else {
*desc = malloc(sizeof(struct slabEntry));
(*desc)->freeEl = (void *)alloc;
}
list_singleton(*desc, *desc);
(*desc)->page = (vaddr_t)alloc;
(*desc)->full = 0;
// pr_devel("got page %d for size %d first %d", alloc, size, (*desc)->freeEl);
return formatPage((*desc), size, selfContained);
if (selfContained) {
*desc = (struct slabEntry *)alloc;
(*desc)->freeEl = (char *)(*desc) + sizeof(struct slabEntry);
} else {
*desc = malloc(sizeof(struct slabEntry));
(*desc)->freeEl = (void *)alloc;
}
list_singleton(*desc, *desc);
(*desc)->page = (vaddr_t)alloc;
(*desc)->full = 0;
// pr_devel("got page %d for size %d first %d", alloc, size, (*desc)->freeEl);
return formatPage((*desc), size, selfContained);
}
static int formatPage(struct slabEntry *desc, size_t size, int selfContained)
{
char *cur = desc->freeEl;
ulong nbEl = PAGE_SIZE / size - 1;
if (selfContained)
nbEl = (PAGE_SIZE - sizeof(struct slabDesc)) / size - 1;
ulong i;
for (i = 0; i < nbEl; i++) {
*((vaddr_t *)cur) = (vaddr_t)cur + size;
cur += size;
}
*((vaddr_t *)cur) = (vaddr_t)NULL;
// pr_devel("last at %d allocated %d\n", cur, i + 1);
return 0;
char *cur = desc->freeEl;
ulong nbEl = PAGE_SIZE / size - 1;
if (selfContained)
nbEl = (PAGE_SIZE - sizeof(struct slabDesc)) / size - 1;
ulong i;
for (i = 0; i < nbEl; i++) {
*((vaddr_t *)cur) = (vaddr_t)cur + size;
cur += size;
}
*((vaddr_t *)cur) = (vaddr_t)NULL;
// pr_devel("last at %d allocated %d\n", cur, i + 1);
return 0;
}
static void *allocFromSlab(struct slabEntry *slab)
{
vaddr_t *next = slab->freeEl;
if (*next == (vaddr_t)NULL) {
pr_devel("Slab @%d is now full\n", slab);
slab->full = 1;
} else {
slab->freeEl = (void *)(*next);
}
return (void *)next;
vaddr_t *next = slab->freeEl;
if (*next == (vaddr_t)NULL) {
pr_devel("Slab @%d is now full\n", slab);
slab->full = 1;
} else {
slab->freeEl = (void *)(*next);
}
return (void *)next;
}
void *malloc(size_t size)
{
if (size > (1U << SLUB_SIZE)) {
printf("implement malloc for big size\n");
return NULL;
}
struct slabDesc *slab;
uint slubIdx;
list_foreach(slub, slab, slubIdx)
{
if (size <= slab->size)
break;
}
struct slabEntry *slabEntry;
int slabIdx;
list_foreach(&slab->slab, slabEntry, slabIdx)
{
if (!slabEntry->full) {
// pr_devel("found place in slub %d at idx %d for size %d\n", slubIdx,
// slabIdx, size);
return allocFromSlab(slabEntry);
}
if (size > (1U << SLUB_SIZE)) {
printf("implement malloc for big size\n");
return NULL;
}
struct slabDesc *slab;
uint slubIdx;
list_foreach(slub, slab, slubIdx)
{
if (size <= slab->size)
break;
}
struct slabEntry *slabEntry;
int slabIdx;
list_foreach(&slab->slab, slabEntry, slabIdx)
{
if (!slabEntry->full) {
// pr_devel("found place in slub %d at idx %d for size %d\n", slubIdx,
// slabIdx, size);
return allocFromSlab(slabEntry);
}
}
// No room found
struct slabEntry *newSlabEntry;
struct slabEntry *slabList = &slab->slab;
int ret;
if ((ret = allocSlabEntry(&newSlabEntry, slab->size, IS_SELF_CONTAINED(&slab->slab)))) {
pr_devel("Fail to allocSlabEntry %d\n", ret);
return NULL;
}
pr_devel("Allocate new slab for object of size %d\n", slab->size);
list_add_tail(slabList, newSlabEntry);
return allocFromSlab(newSlabEntry);
// No room found
struct slabEntry *newSlabEntry;
struct slabEntry *slabList = &slab->slab;
int ret;
if ((ret = allocSlabEntry(&newSlabEntry, slab->size, IS_SELF_CONTAINED(&slab->slab)))) {
pr_devel("Fail to allocSlabEntry %d\n", ret);
return NULL;
}
pr_devel("Allocate new slab for object of size %d\n", slab->size);
list_add_tail(slabList, newSlabEntry);
return allocFromSlab(newSlabEntry);
}
int freeFromSlab(void *ptr, struct slabEntry *slab)
{
struct slabEntry *slabEntry;
int slabIdx;
list_foreach(slab, slabEntry, slabIdx)
{
if ((slabEntry->page <= (vaddr_t)ptr) &&
((vaddr_t)ptr < (slabEntry->page + PAGE_SIZE))) {
// pr_devel("free place! was %d is now %d\n", slabEntry->freeEl, ptr);
if (slabEntry->full) {
*((vaddr_t *)ptr) = (vaddr_t)NULL;
} else {
*((vaddr_t *)ptr) = (vaddr_t)slabEntry->freeEl;
}
slabEntry->freeEl = ptr;
slabEntry->full = 0;
return 1;
}
struct slabEntry *slabEntry;
int slabIdx;
list_foreach(slab, slabEntry, slabIdx)
{
if ((slabEntry->page <= (vaddr_t)ptr) &&
((vaddr_t)ptr < (slabEntry->page + PAGE_SIZE))) {
// pr_devel("free place! was %d is now %d\n", slabEntry->freeEl, ptr);
if (slabEntry->full) {
*((vaddr_t *)ptr) = (vaddr_t)NULL;
} else {
*((vaddr_t *)ptr) = (vaddr_t)slabEntry->freeEl;
}
slabEntry->freeEl = ptr;
slabEntry->full = 0;
return 1;
}
return 0;
}
return 0;
}
void free(void *ptr)
{
if (!ptr)
return;
if (!ptr)
return;
struct slabDesc *slab;
int slabIdx;
list_foreach(slub, slab, slabIdx)
struct slabDesc *slab;
int slabIdx;
list_foreach(slub, slab, slabIdx)
{
struct slabEntry *slabEntry;
int entryIdx;
list_foreach(&slab->slab, slabEntry, entryIdx)
{
struct slabEntry *slabEntry;
int entryIdx;
list_foreach(&slab->slab, slabEntry, entryIdx)
{
if (freeFromSlab(ptr, slabEntry))
return;
}
if (freeFromSlab(ptr, slabEntry))
return;
}
pr_devel("free: slab not found\n");
}
pr_devel("free: slab not found\n");
}

View File

@ -3,21 +3,21 @@
#include "stdarg.h"
struct slabEntry {
vaddr_t page;
void *freeEl;
char full;
struct slabEntry *next;
struct slabEntry *prev;
vaddr_t page;
void *freeEl;
char full;
struct slabEntry *next;
struct slabEntry *prev;
};
struct slabDesc {
struct slabEntry slab;
size_t size;
struct slabDesc *next;
struct slabDesc *prev;
struct slabEntry slab;
size_t size;
struct slabDesc *next;
struct slabDesc *prev;
};
int allocInit(void);
int allocBookSlab(size_t size, int selfContained);
void *malloc(size_t size);
void free(void* ptr);
void free(void *ptr);

View File

@ -1,22 +1,24 @@
#pragma once
#include "stack.h"
#include "klibc.h"
#include "stack.h"
#define assert(p) do { \
if (!(p)) { \
printf("BUG at %s:%d assert(%s)\n", \
__FILE__, __LINE__, #p); \
printStackTrace(5); \
while(1){} \
} \
} while (0)
#define assert(p) \
do { \
if (!(p)) { \
printf("BUG at %s:%d assert(%s)\n", __FILE__, __LINE__, #p); \
printStackTrace(5); \
while (1) { \
} \
} \
} while (0)
#define assertmsg(p, ...) do { \
if (!(p)) { \
printf("BUG at %s:%d assert(%s)\n", \
__FILE__, __LINE__, #p); \
printf(__VA_ARGS__); \
printStackTrace(5); \
while(1){} \
} \
} while (0)
#define assertmsg(p, ...) \
do { \
if (!(p)) { \
printf("BUG at %s:%d assert(%s)\n", __FILE__, __LINE__, #p); \
printf(__VA_ARGS__); \
printStackTrace(5); \
while (1) { \
} \
} \
} while (0)

View File

@ -24,34 +24,34 @@
* irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above.
*/
struct cpu_state {
/* (Lower addresses) */
/* (Lower addresses) */
/* These are Matos/SOS convention */
uint16_t gs;
uint16_t fs;
uint16_t es;
uint16_t ds;
uint16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the
Kernel context (CPL0) of the interrupted
thread, even for a user thread */
uint16_t alignment_padding; /* unused */
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t ebp;
uint32_t esp;
uint32_t esi;
uint32_t edi;
/* These are Matos/SOS convention */
uint16_t gs;
uint16_t fs;
uint16_t es;
uint16_t ds;
uint16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the
Kernel context (CPL0) of the interrupted
thread, even for a user thread */
uint16_t alignment_padding; /* unused */
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t ebp;
uint32_t esp;
uint32_t esi;
uint32_t edi;
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */
uint32_t error_code;
vaddr_t eip;
uint32_t cs; /* 32bits according to the specs ! However, the CS
register is really 16bits long */
uint32_t eflags;
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */
uint32_t error_code;
vaddr_t eip;
uint32_t cs; /* 32bits according to the specs ! However, the CS
register is really 16bits long */
uint32_t eflags;
/* (Higher addresses) */
/* (Higher addresses) */
} __attribute__((packed));
/**
@ -73,7 +73,7 @@ struct cpu_state {
* Structure of an interrupted Kernel thread's context
*/
struct cpu_kstate {
struct cpu_state regs;
struct cpu_state regs;
} __attribute__((packed));
/**
@ -88,125 +88,125 @@ static void core_routine(cpu_kstate_function_arg1_t *start_func, void *start_arg
static void core_routine(cpu_kstate_function_arg1_t *start_func, void *start_arg,
cpu_kstate_function_arg1_t *exit_func, void *exit_arg)
{
start_func(start_arg);
exit_func(exit_arg);
start_func(start_arg);
exit_func(exit_arg);
assert(!"The exit function of the thread should NOT return !");
for (;;)
;
assert(!"The exit function of the thread should NOT return !");
for (;;)
;
}
int cpu_kstate_init(struct cpu_state **ctxt, cpu_kstate_function_arg1_t *start_func,
vaddr_t start_arg, vaddr_t stack_bottom, size_t stack_size,
cpu_kstate_function_arg1_t *exit_func, vaddr_t exit_arg)
{
/* We are initializing a Kernel thread's context */
struct cpu_kstate *kctxt;
/* We are initializing a Kernel thread's context */
struct cpu_kstate *kctxt;
/* This is a critical internal function, so that it is assumed that
the caller knows what he does: we legitimally assume that values
for ctxt, start_func, stack_* and exit_func are allways VALID ! */
/* This is a critical internal function, so that it is assumed that
the caller knows what he does: we legitimally assume that values
for ctxt, start_func, stack_* and exit_func are allways VALID ! */
/* Setup the stack.
*
* On x86, the stack goes downward. Each frame is configured this
* way (higher addresses first):
*
* - (optional unused space. As of gcc 3.3, this space is 24 bytes)
* - arg n
* - arg n-1
* - ...
* - arg 1
* - return instruction address: The address the function returns to
* once finished
* - local variables
*
* The remaining of the code should be read from the end upward to
* understand how the processor will handle it.
*/
/* Setup the stack.
*
* On x86, the stack goes downward. Each frame is configured this
* way (higher addresses first):
*
* - (optional unused space. As of gcc 3.3, this space is 24 bytes)
* - arg n
* - arg n-1
* - ...
* - arg 1
* - return instruction address: The address the function returns to
* once finished
* - local variables
*
* The remaining of the code should be read from the end upward to
* understand how the processor will handle it.
*/
vaddr_t tmp_vaddr = stack_bottom + stack_size;
uint32_t *stack = (uint32_t *)tmp_vaddr;
vaddr_t tmp_vaddr = stack_bottom + stack_size;
uint32_t *stack = (uint32_t *)tmp_vaddr;
/* If needed, poison the stack */
/* If needed, poison the stack */
#ifdef CPU_STATE_DETECT_UNINIT_KERNEL_VARS
memset((void *)stack_bottom, CPU_STATE_STACK_POISON, stack_size);
memset((void *)stack_bottom, CPU_STATE_STACK_POISON, stack_size);
#elif defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
#endif
/* Simulate a call to the core_routine() function: prepare its
arguments */
*(--stack) = exit_arg;
*(--stack) = (uint32_t)exit_func;
*(--stack) = start_arg;
*(--stack) = (uint32_t)start_func;
*(--stack) = 0; /* Return address of core_routine => force page fault */
/* Simulate a call to the core_routine() function: prepare its
arguments */
*(--stack) = exit_arg;
*(--stack) = (uint32_t)exit_func;
*(--stack) = start_arg;
*(--stack) = (uint32_t)start_func;
*(--stack) = 0; /* Return address of core_routine => force page fault */
/*
* Setup the initial context structure, so that the CPU will execute
* the function core_routine() once this new context has been
* restored on CPU
*/
/*
* Setup the initial context structure, so that the CPU will execute
* the function core_routine() once this new context has been
* restored on CPU
*/
/* Compute the base address of the structure, which must be located
below the previous elements */
tmp_vaddr = ((vaddr_t)stack) - sizeof(struct cpu_kstate);
kctxt = (struct cpu_kstate *)tmp_vaddr;
/* Compute the base address of the structure, which must be located
below the previous elements */
tmp_vaddr = ((vaddr_t)stack) - sizeof(struct cpu_kstate);
kctxt = (struct cpu_kstate *)tmp_vaddr;
/* Initialize the CPU context structure */
memset(kctxt, 0x0, sizeof(struct cpu_kstate));
/* Initialize the CPU context structure */
memset(kctxt, 0x0, sizeof(struct cpu_kstate));
/* Tell the CPU context structure that the first instruction to
execute will be that of the core_routine() function */
kctxt->regs.eip = (uint32_t)core_routine;
/* Tell the CPU context structure that the first instruction to
execute will be that of the core_routine() function */
kctxt->regs.eip = (uint32_t)core_routine;
/* Setup the segment registers */
kctxt->regs.cs = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KCODE); /* Code */
kctxt->regs.ds = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Data */
kctxt->regs.es = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Data */
kctxt->regs.cpl0_ss = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Stack */
/* fs and gs unused for the moment. */
/* Setup the segment registers */
kctxt->regs.cs = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KCODE); /* Code */
kctxt->regs.ds = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Data */
kctxt->regs.es = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Data */
kctxt->regs.cpl0_ss = BUILD_SEGMENT_REG_VALUE(0, FALSE, SEG_KDATA); /* Stack */
/* fs and gs unused for the moment. */
/* The newly created context is initially interruptible */
kctxt->regs.eflags = (1 << 9); /* set IF bit */
/* The newly created context is initially interruptible */
kctxt->regs.eflags = (1 << 9); /* set IF bit */
/* Finally, update the generic kernel/user thread context */
*ctxt = (struct cpu_state *)kctxt;
/* Finally, update the generic kernel/user thread context */
*ctxt = (struct cpu_state *)kctxt;
return 0;
return 0;
}
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
vaddr_t stack_bottom, size_t stack_size)
{
(void)ctxt;
size_t poison_size = CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
if (poison_size > stack_size)
poison_size = stack_size;
(void)ctxt;
size_t poison_size = CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
if (poison_size > stack_size)
poison_size = stack_size;
memset((void *)stack_bottom, CPU_STATE_STACK_POISON, poison_size);
memset((void *)stack_bottom, CPU_STATE_STACK_POISON, poison_size);
}
void cpu_state_detect_kernel_stack_overflow(const struct cpu_state *ctxt, vaddr_t stack_bottom,
size_t stack_size)
{
unsigned char *c;
size_t i;
unsigned char *c;
size_t i;
/* On Matos/SOS, "ctxt" corresponds to the address of the esp register of
the saved context in Kernel mode (always, even for the interrupted
context of a user thread). Here we make sure that this stack
pointer is within the allowed stack area */
assert(((vaddr_t)ctxt) >= stack_bottom);
assert(((vaddr_t)ctxt) + sizeof(struct cpu_kstate) <= stack_bottom + stack_size);
/* On Matos/SOS, "ctxt" corresponds to the address of the esp register of
the saved context in Kernel mode (always, even for the interrupted
context of a user thread). Here we make sure that this stack
pointer is within the allowed stack area */
assert(((vaddr_t)ctxt) >= stack_bottom);
assert(((vaddr_t)ctxt) + sizeof(struct cpu_kstate) <= stack_bottom + stack_size);
/* Check that the bottom of the stack has not been altered */
for (c = (unsigned char *)stack_bottom, i = 0;
(i < CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size); c++, i++) {
assert(CPU_STATE_STACK_POISON == *c);
}
/* Check that the bottom of the stack has not been altered */
for (c = (unsigned char *)stack_bottom, i = 0;
(i < CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size); c++, i++) {
assert(CPU_STATE_STACK_POISON == *c);
}
}
#endif
@ -216,26 +216,27 @@ void cpu_state_detect_kernel_stack_overflow(const struct cpu_state *ctxt, vaddr_
vaddr_t cpu_context_get_PC(const struct cpu_state *ctxt)
{
assert(NULL != ctxt);
assert(NULL != ctxt);
/* This is the PC of the interrupted context (ie kernel or user
context). */
return ctxt->eip;
/* This is the PC of the interrupted context (ie kernel or user
context). */
return ctxt->eip;
}
vaddr_t cpu_context_get_SP(const struct cpu_state *ctxt)
{
assert(NULL != ctxt);
assert(NULL != ctxt);
/* On Matos/SOS, "ctxt" corresponds to the address of the esp register of
the saved context in Kernel mode (always, even for the interrupted
context of a user thread). */
return (vaddr_t)ctxt;
/* On Matos/SOS, "ctxt" corresponds to the address of the esp register of
the saved context in Kernel mode (always, even for the interrupted
context of a user thread). */
return (vaddr_t)ctxt;
}
void cpu_context_dump(const struct cpu_state *ctxt)
{
printf("CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", (unsigned)ctxt->eip,
(unsigned)ctxt, (unsigned)ctxt->eflags, (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
(unsigned)ctxt->ds, (unsigned)ctxt->cpl0_ss, (unsigned)ctxt->error_code);
printf("CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", (unsigned)ctxt->eip,
(unsigned)ctxt, (unsigned)ctxt->eflags,
(unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
(unsigned)ctxt->cpl0_ss, (unsigned)ctxt->error_code);
}

View File

@ -5,16 +5,16 @@
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
USA.
*/
#pragma once
@ -25,10 +25,9 @@
* be some kind of architecture-independent.
*/
#include "errno.h"
#include "stdarg.h"
#include "types.h"
#include "errno.h"
/**
* Opaque structure storing the CPU context of an inactive kernel or
@ -41,13 +40,11 @@
*/
struct cpu_state;
/**
* The type of the functions passed as arguments to the Kernel thread
* related functions.
*/
typedef void (cpu_kstate_function_arg1_t(void * arg1));
typedef void(cpu_kstate_function_arg1_t(void *arg1));
/**
* Function to create an initial context for a kernel thread starting
@ -83,14 +80,9 @@ typedef void (cpu_kstate_function_arg1_t(void * arg1));
*
* @note the newly created context is INTERRUPTIBLE by default !
*/
int cpu_kstate_init(struct cpu_state **kctxt,
cpu_kstate_function_arg1_t *start_func,
vaddr_t start_arg,
vaddr_t stack_bottom,
size_t stack_size,
cpu_kstate_function_arg1_t *exit_func,
vaddr_t exit_arg);
int cpu_kstate_init(struct cpu_state **kctxt, cpu_kstate_function_arg1_t *start_func,
vaddr_t start_arg, vaddr_t stack_bottom, size_t stack_size,
cpu_kstate_function_arg1_t *exit_func, vaddr_t exit_arg);
/**
* Function that performs an immediate context-switch from one
@ -103,9 +95,7 @@ int cpu_kstate_init(struct cpu_state **kctxt,
* @param to_ctxt The CPU will resume its execution with the struct
* cpu_state located at this address. Must NOT be NULL.
*/
void cpu_context_switch(struct cpu_state **from_ctxt,
struct cpu_state *to_ctxt);
void cpu_context_switch(struct cpu_state **from_ctxt, struct cpu_state *to_ctxt);
/*
* Switch to the new given context (of a kernel/user thread) without
@ -121,52 +111,43 @@ void cpu_context_switch(struct cpu_state **from_ctxt,
* called after having changed the stack, but before restoring the CPU
* context to switch_to_ctxt.
*/
void
cpu_context_exit_to(struct cpu_state *switch_to_ctxt,
cpu_kstate_function_arg1_t *reclaiming_func,
uint32_t reclaiming_arg) __attribute__((noreturn));
void cpu_context_exit_to(struct cpu_state *switch_to_ctxt,
cpu_kstate_function_arg1_t *reclaiming_func, uint32_t reclaiming_arg)
__attribute__((noreturn));
/* =======================================================================
* Public Accessor functions
*/
/**
* Return Program Counter stored in the saved kernel/user context
*/
vaddr_t cpu_context_get_PC(const struct cpu_state *ctxt);
/**
* Return Stack Pointer stored in the saved kernel/user context
*/
vaddr_t cpu_context_get_SP(const struct cpu_state *ctxt);
/**
* Dump the contents of the CPU context (bochs + x86_videomem)
*/
void cpu_context_dump(const struct cpu_state *ctxt);
/* =======================================================================
* Public Accessor functions TO BE USED ONLY BY Exception handlers
*/
/**
* Return the argument passed by the CPU upon exception, as stored in the
* saved context
*/
uint32_t cpu_context_get_EX_info(const struct cpu_state *ctxt);
/**
* Return the faulting address of the exception
*/
vaddr_t
cpu_context_get_EX_faulting_vaddr(const struct cpu_state *ctxt);
vaddr_t cpu_context_get_EX_faulting_vaddr(const struct cpu_state *ctxt);
/* =======================================================================
* Macros controlling stack poisoning.
@ -189,20 +170,18 @@ cpu_context_get_EX_faulting_vaddr(const struct cpu_state *ctxt);
* probable stack overflow. Its value indicates the number of bytes
* used for this detection.
*/
#define CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW 64
#define CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW 64
/* #undef CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW */
#if defined(CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
void
cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
vaddr_t kernel_stack_bottom,
size_t kernel_stack_size);
void cpu_state_prepare_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
vaddr_t kernel_stack_bottom,
size_t kernel_stack_size);
void cpu_state_detect_kernel_stack_overflow(const struct cpu_state *ctxt,
vaddr_t kernel_stack_bottom,
size_t kernel_stack_size);
vaddr_t kernel_stack_bottom,
size_t kernel_stack_size);
#else
# define cpu_state_prepare_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \
({ /* nop */ })
# define cpu_state_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \
({ /* nop */ })
#define cpu_state_prepare_detect_kernel_stack_overflow(ctxt, stkbottom, stksize) ({/* nop \
*/})
#define cpu_state_detect_kernel_stack_overflow(ctxt, stkbottom, stksize) ({/* nop */})
#endif

View File

@ -1,37 +1,36 @@
#pragma once
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */

View File

@ -9,16 +9,15 @@ exception_handler exception_handler_array[EXCEPTION_NUM] = {
int exceptionSetRoutine(int exception, exception_handler handler)
{
uint32_t flags;
if ((exception < 0) || exception >= EXCEPTION_NUM)
return -1;
uint32_t flags;
if ((exception < 0) || exception >= EXCEPTION_NUM)
return -1;
disable_IRQs(flags);
disable_IRQs(flags);
exception_handler_array[exception] = handler;
exception_handler_array[exception] = handler;
idt_set_handler(EXCEPTION_INTERRUPT_BASE_ADDRESS + exception, (unsigned int)handler,
0);
restore_IRQs(flags);
return 0;
idt_set_handler(EXCEPTION_INTERRUPT_BASE_ADDRESS + exception, (unsigned int)handler, 0);
restore_IRQs(flags);
return 0;
}

View File

@ -41,6 +41,5 @@
#define EXCEPTION_NUM 32
typedef void (*exception_handler) (struct interrupt_frame *frame, ulong error_code);
typedef void (*exception_handler)(struct interrupt_frame *frame, ulong error_code);
int exceptionSetRoutine(int exception, exception_handler handler);

View File

@ -6,16 +6,16 @@
// Need GCC > 6
#define DEFINE_INTERRUPT(int_nb) \
__attribute__((interrupt)) void print_handler_##int_nb(struct interrupt_frame *frame, \
ulong error_code) \
{ \
int intNbInt = int_nb; \
printStringDetails("EXCEPTION ", RED, BLACK, 0, VGA_HEIGHT - 1); \
printIntDetails(intNbInt, RED, BLACK, 11, VGA_HEIGHT - 1); \
printIntDetails(error_code, RED, BLACK, 14, VGA_HEIGHT - 1); \
printf("Exception %d (Err %d) at 0x%x\n", int_nb, error_code, frame->eip); \
asm("hlt"); \
}
__attribute__((interrupt)) void print_handler_##int_nb(struct interrupt_frame *frame, \
ulong error_code) \
{ \
int intNbInt = int_nb; \
printStringDetails("EXCEPTION ", RED, BLACK, 0, VGA_HEIGHT - 1); \
printIntDetails(intNbInt, RED, BLACK, 11, VGA_HEIGHT - 1); \
printIntDetails(error_code, RED, BLACK, 14, VGA_HEIGHT - 1); \
printf("Exception %d (Err %d) at 0x%x\n", int_nb, error_code, frame->eip); \
asm("hlt"); \
}
DEFINE_INTERRUPT(EXCEPTION_DOUBLE_FAULT)
DEFINE_INTERRUPT(EXCEPTION_DIVIDE_ZERO)
@ -56,14 +56,14 @@ DEFINE_INTERRUPT(EXCEPTION_RESERVED_11)
__attribute__((interrupt)) void pagefault_handler(struct interrupt_frame *frame,
ulong error_code)
{
// A page fault has occurred.
// The faulting address is stored in the CR2 register.
uint32_t faulting_address;
asm volatile("mov %%cr2, %0" : "=r"(faulting_address));
// A page fault has occurred.
// The faulting address is stored in the CR2 register.
uint32_t faulting_address;
asm volatile("mov %%cr2, %0" : "=r"(faulting_address));
printStringDetails("PAGE FAULT", RED, BLACK, 0, VGA_HEIGHT - 1);
printIntDetails(error_code, RED, BLACK, 11, VGA_HEIGHT - 1);
(void)faulting_address;
(void)frame;
(void)error_code;
printStringDetails("PAGE FAULT", RED, BLACK, 0, VGA_HEIGHT - 1);
printIntDetails(error_code, RED, BLACK, 11, VGA_HEIGHT - 1);
(void)faulting_address;
(void)frame;
(void)error_code;
}

View File

@ -75,33 +75,32 @@ struct x86_gdt_register {
* 0..4GB addresses to be mapped to the linear 0..4GB linear
* addresses.
*/
#define BUILD_GDTE(descr_privilege_level, is_code) \
((struct x86_segment_descriptor){ \
.limit_15_0 = 0xffff, \
.base_paged_addr_15_0 = 0, \
.base_paged_addr_23_16 = 0, \
.segment_type = \
((is_code) ? 0xb : 0x3), /* With descriptor_type (below) = 1 (code/data), \
* see Figure 3-1 of section 3.4.3.1 in Intel \
* x86 vol 3: \
* - Code (bit 3 = 1): \
* bit 0: 1=Accessed \
* bit 1: 1=Readable \
* bit 2: 0=Non-Conforming \
* - Data (bit 3 = 0): \
* bit 0: 1=Accessed \
* bit 1: 1=Writable \
* bit 2: 0=Expand up (stack-related) \
* For Conforming/non conforming segments, see \
* Intel x86 Vol 3 section 4.8.1.1 \
*/ \
.descriptor_type = 1, /* 1=Code/Data */ \
.dpl = ((descr_privilege_level)&0x3), \
.present = 1, \
.limit_19_16 = 0xf, \
.custom = 0, \
.op_size = 1, /* 32 bits instr/data */ \
.granularity = 1 /* limit is in 4kB Pages */ \
#define BUILD_GDTE(descr_privilege_level, is_code) \
((struct x86_segment_descriptor){ \
.limit_15_0 = 0xffff, \
.base_paged_addr_15_0 = 0, \
.base_paged_addr_23_16 = 0, \
.segment_type = ((is_code) ? 0xb : 0x3), /* With descriptor_type (below) = 1 \
* (code/data), see Figure 3-1 of \
* section 3.4.3.1 in Intel x86 vol 3: \
* - Code (bit 3 = 1): \
* bit 0: 1=Accessed \
* bit 1: 1=Readable \
* bit 2: 0=Non-Conforming \
* - Data (bit 3 = 0): \
* bit 0: 1=Accessed \
* bit 1: 1=Writable \
* bit 2: 0=Expand up (stack-related) \
* For Conforming/non conforming segments, \
* see Intel x86 Vol 3 section 4.8.1.1 \
*/ \
.descriptor_type = 1, /* 1=Code/Data */ \
.dpl = ((descr_privilege_level)&0x3), \
.present = 1, \
.limit_19_16 = 0xf, \
.custom = 0, \
.op_size = 1, /* 32 bits instr/data */ \
.granularity = 1 /* limit is in 4kB Pages */ \
})
/** The actual GDT */

View File

@ -5,16 +5,16 @@
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
USA.
*/
#pragma once
@ -34,4 +34,3 @@
* address space (ie "flat" virtual space).
*/
int gdtSetup(void);

View File

@ -13,16 +13,16 @@
#define SEGMENT_IDX_DATA 2
struct idtEntry {
uint16_t offset_low;
uint16_t seg_sel;
uint8_t reserved : 5;
uint8_t flags : 3;
uint8_t type : 3;
uint8_t op_size : 1;
uint8_t zero : 1;
uint8_t dpl : 2;
uint8_t present : 1;
uint16_t offset_high;
uint16_t offset_low;
uint16_t seg_sel;
uint8_t reserved : 5;
uint8_t flags : 3;
uint8_t type : 3;
uint8_t op_size : 1;
uint8_t zero : 1;
uint8_t dpl : 2;
uint8_t present : 1;
uint16_t offset_high;
} __attribute__((packed));
/**
@ -32,14 +32,13 @@ struct idtEntry {
* @see Intel x86 doc vol 3, section 2.4, figure 2-4
*/
struct idtRegister {
uint16_t limit;
uint32_t base_addr;
uint16_t limit;
uint32_t base_addr;
} __attribute__((packed, aligned(8)));
/* Build segment http://wiki.osdev.org/Selector*/
#define BUILD_SEGMENT_SELECTOR(desc_privilege, in_ldt, index) \
((((desc_privilege)&0x3) << 0) | (((in_ldt) ? 1 : 0) << 2) | \
((index) << 3))
#define BUILD_SEGMENT_SELECTOR(desc_privilege, in_ldt, index) \
((((desc_privilege)&0x3) << 0) | (((in_ldt) ? 1 : 0) << 2) | ((index) << 3))
int idtSetup();
int idt_set_handler(int index, unsigned int addr, int priviledge);

View File

@ -1,20 +1,19 @@
#pragma once
#include "stdarg.h"
// c.f. intel software-developer-vol-1 6.4.1
struct interrupt_frame {
uint32_t eip;
uint32_t cs;
uint32_t eflags;
uint32_t esp;
uint32_t ss;
uint32_t eip;
uint32_t cs;
uint32_t eflags;
uint32_t esp;
uint32_t ss;
};
// Exception
// Exception
#define DECLARE_INTERRUPT(int_nb) \
void print_handler_##int_nb(struct interrupt_frame *frame, ulong error_code);
void print_handler_##int_nb(struct interrupt_frame *frame, ulong error_code);
#define ACCESS_INTERRUPT(int_nb) print_handler_##int_nb

View File

@ -4,18 +4,17 @@
// NIH http://wiki.osdev.org/Inline_Assembly/Examples#I.2FO_access
static inline void outb(uint16_t port, uint8_t val)
{
asm volatile ( "outb %0, %1" : : "a"(val), "Nd"(port) );
/* There's an outb %al, $imm8 encoding, for compile-time constant port numbers that fit in 8b. (N constraint).
* Wider immediate constants would be truncated at assemble-time (e.g. "i" constraint).
* The outb %al, %dx encoding is the only option for all other cases.
* %1 expands to %dx because port is a uint16_t. %w1 could be used if we had the port number a wider C type */
asm volatile("outb %0, %1" : : "a"(val), "Nd"(port));
/* There's an outb %al, $imm8 encoding, for compile-time constant port numbers that fit in
* 8b. (N constraint). Wider immediate constants would be truncated at assemble-time (e.g.
* "i" constraint). The outb %al, %dx encoding is the only option for all other cases.
* %1 expands to %dx because port is a uint16_t. %w1 could be used if we had the port
* number a wider C type */
}
static inline uint8_t inb(uint16_t port)
{
uint8_t ret;
asm volatile ( "inb %1, %0"
: "=a"(ret)
: "Nd"(port) );
asm volatile("inb %1, %0" : "=a"(ret) : "Nd"(port));
return ret;
}

View File

@ -5,11 +5,13 @@
int irqSetup()
{
initPic();
return 0;
initPic();
return 0;
}
irq_handler irq_handler_array[IRQ_NUM] = {NULL, };
irq_handler irq_handler_array[IRQ_NUM] = {
NULL,
};
int irqSetRoutine(int irq, irq_handler handler)
{
@ -22,8 +24,8 @@ int irqSetRoutine(int irq, irq_handler handler)
irq_handler_array[irq] = handler;
if (handler != NULL) {
int ret =
idt_set_handler(IRQ_INTERRUPT_BASE_ADDRESS + irq, (unsigned int)irq_handler_array[irq], 0);
int ret = idt_set_handler(IRQ_INTERRUPT_BASE_ADDRESS + irq,
(unsigned int)irq_handler_array[irq], 0);
if (!ret)
enableIrq(irq);
}

View File

@ -11,29 +11,29 @@
})
#define restore_IRQs(flags) restore_flags(flags)
#define IRQ_TIMER 0 // MASTER IRQ
#define IRQ_KEYBOARD 1
#define IRQ_SLAVE_PIC 2
#define IRQ_COM2 3
#define IRQ_COM1 4
#define IRQ_LPT2 5
#define IRQ_FLOPPY 6
#define IRQ_LPT1 7
#define IRQ_TIMER 0 // MASTER IRQ
#define IRQ_KEYBOARD 1
#define IRQ_SLAVE_PIC 2
#define IRQ_COM2 3
#define IRQ_COM1 4
#define IRQ_LPT2 5
#define IRQ_FLOPPY 6
#define IRQ_LPT1 7
#define IRQ_8_NOT_DEFINED 8 // SLAVE
#define IRQ_RESERVED_1 9 // SLAVE IRQ
#define IRQ_RESERVED_2 10
#define IRQ_RESERVED_3 11
#define IRQ_RESERVED_4 12
#define IRQ_COPROCESSOR 13
#define IRQ_HARDDISK 14
#define IRQ_RESERVED_5 15
#define IRQ_RESERVED_1 9 // SLAVE IRQ
#define IRQ_RESERVED_2 10
#define IRQ_RESERVED_3 11
#define IRQ_RESERVED_4 12
#define IRQ_COPROCESSOR 13
#define IRQ_HARDDISK 14
#define IRQ_RESERVED_5 15
#define IRQ_INTERRUPT_BASE_ADDRESS 0x20
#define IRQ_NUM 16