Multiple page allocation
This commit is contained in:
parent
ba9e0f1bff
commit
9815cc062f
@ -1,5 +1,6 @@
|
||||
#include "exception.h"
|
||||
#include "klibc.h"
|
||||
#include "kthread.h"
|
||||
#include "vga.h"
|
||||
#define STR_HELPER(x) #x
|
||||
#define STR(x) STR_HELPER(x)
|
||||
@ -61,6 +62,8 @@ __attribute__((interrupt)) void pagefault_handler(struct interrupt_frame *frame,
|
||||
uint32_t faulting_address;
|
||||
asm volatile("mov %%cr2, %0" : "=r"(faulting_address));
|
||||
|
||||
struct kthread *current = getCurrentThread();
|
||||
printf("page fault while in thread %s\n", current->name);
|
||||
printStringDetails("PAGE FAULT", RED, BLACK, 0, VGA_HEIGHT - 1);
|
||||
printIntDetails(error_code, RED, BLACK, 11, VGA_HEIGHT - 1);
|
||||
(void)faulting_address;
|
||||
|
@ -76,7 +76,7 @@ int pagingSetup(paddr_t upperKernelAddr)
|
||||
struct pdbr cr3;
|
||||
|
||||
// x86 got 1024 of pde for 4Byte each: 4ko !
|
||||
struct pde *pd = (struct pde *)allocPhyPage();
|
||||
struct pde *pd = (struct pde *)allocPhyPage(1);
|
||||
|
||||
memset(pd, 0, PAGE_SIZE);
|
||||
memset(&cr3, 0x0, sizeof(struct pdbr));
|
||||
@ -93,7 +93,7 @@ int pagingSetup(paddr_t upperKernelAddr)
|
||||
pt = (struct pte *)(pd[pdEntry].pt_addr << PT_SHIFT);
|
||||
refPhyPage((paddr_t)pt);
|
||||
} else {
|
||||
pt = (struct pte *)allocPhyPage();
|
||||
pt = (struct pte *)allocPhyPage(1);
|
||||
|
||||
memset(pt, 0, PAGE_SIZE);
|
||||
pd[pdEntry].present = 1;
|
||||
@ -137,7 +137,7 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
|
||||
struct pte *pt = (struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
|
||||
|
||||
if (!pd[pdEntry].present) {
|
||||
paddr_t ptPhy = allocPhyPage();
|
||||
paddr_t ptPhy = allocPhyPage(1);
|
||||
if (ptPhy == (vaddr_t)NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
142
core/alloc.c
142
core/alloc.c
@ -3,35 +3,49 @@
|
||||
#include "alloc.h"
|
||||
#include "errno.h"
|
||||
#include "irq.h"
|
||||
#include "kernel.h"
|
||||
#include "klibc.h"
|
||||
#include "list.h"
|
||||
#include "math.h"
|
||||
#include "mem.h"
|
||||
#include "paging.h"
|
||||
|
||||
#define IS_SELF_CONTAINED(desc) ((vaddr_t)((desc)->page) == (vaddr_t)(desc))
|
||||
// Slab will contains object from sizeof(void *) to PAGE_SIZE/2 by pow2
|
||||
#define SLUB_SIZE (PAGE_SHIFT)
|
||||
static struct slabDesc *slub;
|
||||
|
||||
int allocSlab(struct slabDesc **desc, size_t size, int self_containing);
|
||||
int allocSlabEntry(struct slabEntry **desc, size_t size, int selfContained);
|
||||
static int formatPage(struct slabEntry *desc, size_t size, int selfContained);
|
||||
int allocSlab(struct slabDesc **desc, size_t sizeEl, size_t sizeSlab, int self_containing);
|
||||
int allocSlabEntry(struct slabEntry **desc, size_t sizeEl, size_t sizeSlab, int selfContained);
|
||||
static int formatPage(struct slabEntry *desc, size_t size, size_t sizeSlab, int selfContained);
|
||||
|
||||
static struct {
|
||||
size_t elementSize;
|
||||
size_t slabSize;
|
||||
unsigned char isSelf;
|
||||
} initSlab[] = {{sizeof(struct slabDesc), PAGE_SIZE, 1},
|
||||
{sizeof(struct slabEntry), PAGE_SIZE, 1},
|
||||
{4, PAGE_SIZE, 0},
|
||||
{8, PAGE_SIZE, 0},
|
||||
{16, PAGE_SIZE, 0},
|
||||
{32, PAGE_SIZE, 0},
|
||||
{64, PAGE_SIZE, 0},
|
||||
{128, PAGE_SIZE, 0},
|
||||
{256, 2 * PAGE_SIZE, 0},
|
||||
{1024, 2 * PAGE_SIZE, 0},
|
||||
{2048, 3 * PAGE_SIZE, 0},
|
||||
{4096, 4 * PAGE_SIZE, 0},
|
||||
{8192, 8 * PAGE_SIZE, 0},
|
||||
{16384, 12 * PAGE_SIZE, 0},
|
||||
{0, 0, 0}};
|
||||
|
||||
int allocSetup(void)
|
||||
{
|
||||
uint start = log2(sizeof(void *));
|
||||
list_init(slub);
|
||||
int ret;
|
||||
if ((ret = allocBookSlab(sizeof(struct slabDesc), 1))) {
|
||||
pr_devel("Fail to allocBookSlab %d for slabDesc :( \n", ret);
|
||||
return ret;
|
||||
}
|
||||
if ((ret = allocBookSlab(sizeof(struct slabEntry), 1))) {
|
||||
pr_devel("Fail to allocBookSlab %d for slabEntry :( \n", ret);
|
||||
return ret;
|
||||
}
|
||||
for (uint i = start; i <= SLUB_SIZE; i++) {
|
||||
if ((ret = allocBookSlab(1U << i, 0))) {
|
||||
|
||||
for (uint i = 0; initSlab[i].elementSize != 0; i++) {
|
||||
int ret;
|
||||
if ((ret = allocBookSlab(initSlab[i].elementSize, initSlab[i].slabSize,
|
||||
initSlab[i].isSelf))) {
|
||||
if (ret == -EEXIST)
|
||||
continue;
|
||||
pr_devel("Fail to allocBookSlab %d for %d \n", ret, (1U << i));
|
||||
@ -41,10 +55,11 @@ int allocSetup(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int allocBookSlab(size_t size, int selfContained)
|
||||
int allocBookSlab(size_t sizeEl, size_t sizeSlab, int selfContained)
|
||||
{
|
||||
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
|
||||
struct slabDesc *slab = NULL;
|
||||
pr_devel("%s for element of size %d is self %d\n", __func__, sizeEl, selfContained);
|
||||
struct slabDesc *slab = NULL;
|
||||
struct slabDesc *newSlab = NULL;
|
||||
int slabIdx;
|
||||
int ret;
|
||||
int flags;
|
||||
@ -52,38 +67,49 @@ int allocBookSlab(size_t size, int selfContained)
|
||||
disable_IRQs(flags);
|
||||
list_foreach(slub, slab, slabIdx)
|
||||
{
|
||||
if (slab->size == size) {
|
||||
if (slab->size == sizeEl) {
|
||||
restore_IRQs(flags);
|
||||
return -EEXIST;
|
||||
}
|
||||
if (slab->size > size) {
|
||||
if (slab->size > sizeEl) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
struct slabDesc *newSlab;
|
||||
if ((ret = allocSlab(&newSlab, size, selfContained))) {
|
||||
|
||||
if ((ret = allocSlab(&newSlab, sizeEl, sizeSlab, selfContained))) {
|
||||
restore_IRQs(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (list_foreach_early_break(slub, slab, slabIdx)) {
|
||||
list_insert_before(slub, slab, newSlab);
|
||||
} else {
|
||||
list_add_tail(slub, newSlab);
|
||||
}
|
||||
|
||||
restore_IRQs(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int allocSlab(struct slabDesc **desc, size_t size, int selfContained)
|
||||
int allocSlab(struct slabDesc **desc, size_t size, size_t sizeSlab, int selfContained)
|
||||
{
|
||||
// pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
|
||||
if (size > PAGE_SIZE)
|
||||
uint nbPage, i;
|
||||
|
||||
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
|
||||
sizeSlab = MAX(sizeSlab, PAGE_SIZE);
|
||||
if (size > sizeSlab) {
|
||||
pr_devel("%s size of element %d are bigger than slab size %d\n", size, sizeSlab);
|
||||
return -ENOENT;
|
||||
paddr_t alloc = allocPhyPage();
|
||||
}
|
||||
|
||||
nbPage = DIV_ROUND_UP(sizeSlab, PAGE_SIZE);
|
||||
paddr_t alloc = allocPhyPage(nbPage);
|
||||
if (alloc == (paddr_t)NULL)
|
||||
return -ENOMEM;
|
||||
if (pageMap((vaddr_t)alloc, alloc, PAGING_MEM_WRITE))
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < nbPage; i++) {
|
||||
if (pageMap((vaddr_t)alloc + i * PAGE_SIZE, alloc + i * PAGE_SIZE, PAGING_MEM_WRITE))
|
||||
goto free_page;
|
||||
}
|
||||
|
||||
if (selfContained) {
|
||||
*desc = (struct slabDesc *)alloc;
|
||||
@ -96,21 +122,37 @@ int allocSlab(struct slabDesc **desc, size_t size, int selfContained)
|
||||
list_singleton(slab, slab);
|
||||
slab->page = (vaddr_t)alloc;
|
||||
slab->full = 0;
|
||||
slab->size = sizeSlab;
|
||||
(*desc)->size = size;
|
||||
// pr_devel("got page %d for size %d first %d", alloc, size, (*desc)->slab.freeEl);
|
||||
return formatPage(&(*desc)->slab, size, selfContained);
|
||||
return formatPage(&(*desc)->slab, size, sizeSlab, selfContained);
|
||||
|
||||
free_page:
|
||||
for (uint j = 0; j < i; j++) {
|
||||
pageUnmap((vaddr_t)alloc + i * PAGE_SIZE);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int allocSlabEntry(struct slabEntry **desc, size_t size, int selfContained)
|
||||
int allocSlabEntry(struct slabEntry **desc, size_t size, size_t sizeSlab, int selfContained)
|
||||
{
|
||||
uint nbPage, i;
|
||||
|
||||
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
|
||||
if (size > PAGE_SIZE)
|
||||
sizeSlab = MAX(sizeSlab, PAGE_SIZE);
|
||||
if (size > sizeSlab) {
|
||||
pr_devel("%s size of element %d are bigger than slab size %d\n", size, sizeSlab);
|
||||
return -ENOENT;
|
||||
paddr_t alloc = allocPhyPage();
|
||||
}
|
||||
|
||||
nbPage = DIV_ROUND_UP(sizeSlab, PAGE_SIZE);
|
||||
paddr_t alloc = allocPhyPage(nbPage);
|
||||
if (alloc == (paddr_t)NULL)
|
||||
return -ENOMEM;
|
||||
if (pageMap((vaddr_t)alloc, alloc, PAGING_MEM_WRITE))
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < nbPage; i++) {
|
||||
if (pageMap((vaddr_t)alloc + i * PAGE_SIZE, alloc + i * PAGE_SIZE, PAGING_MEM_WRITE))
|
||||
goto free_page;
|
||||
}
|
||||
|
||||
if (selfContained) {
|
||||
*desc = (struct slabEntry *)alloc;
|
||||
@ -122,16 +164,23 @@ int allocSlabEntry(struct slabEntry **desc, size_t size, int selfContained)
|
||||
list_singleton(*desc, *desc);
|
||||
(*desc)->page = (vaddr_t)alloc;
|
||||
(*desc)->full = 0;
|
||||
(*desc)->size = sizeSlab;
|
||||
// pr_devel("got page %d for size %d first %d", alloc, size, (*desc)->freeEl);
|
||||
return formatPage((*desc), size, selfContained);
|
||||
return formatPage((*desc), size, sizeSlab, selfContained);
|
||||
|
||||
free_page:
|
||||
for (uint j = 0; j < i; j++) {
|
||||
pageUnmap((vaddr_t)alloc + i * PAGE_SIZE);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int formatPage(struct slabEntry *desc, size_t size, int selfContained)
|
||||
static int formatPage(struct slabEntry *desc, size_t size, size_t sizeSlab, int selfContained)
|
||||
{
|
||||
char *cur = desc->freeEl;
|
||||
ulong nbEl = PAGE_SIZE / size - 1;
|
||||
ulong nbEl = sizeSlab / size - 1;
|
||||
if (selfContained)
|
||||
nbEl = (PAGE_SIZE - sizeof(struct slabDesc)) / size - 1;
|
||||
nbEl = (sizeSlab - sizeof(struct slabDesc)) / size - 1;
|
||||
ulong i;
|
||||
for (i = 0; i < nbEl; i++) {
|
||||
*((vaddr_t *)cur) = (vaddr_t)cur + size;
|
||||
@ -157,16 +206,12 @@ static void *allocFromSlab(struct slabEntry *slab)
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
int flags;
|
||||
void *ret;
|
||||
|
||||
if (size > (1U << SLUB_SIZE)) {
|
||||
printf("implement malloc for big size\n");
|
||||
return NULL;
|
||||
}
|
||||
struct slabDesc *slab;
|
||||
uint slubIdx;
|
||||
void *ret;
|
||||
|
||||
disable_IRQs(flags);
|
||||
|
||||
list_foreach(slub, slab, slubIdx)
|
||||
{
|
||||
if (size <= slab->size)
|
||||
@ -188,9 +233,10 @@ void *malloc(size_t size)
|
||||
// No room found
|
||||
struct slabEntry *newSlabEntry;
|
||||
struct slabEntry *slabList = &slab->slab;
|
||||
size_t slabSize = MAX(PAGE_SIZE, size);
|
||||
int retSlab;
|
||||
if ((retSlab =
|
||||
allocSlabEntry(&newSlabEntry, slab->size, IS_SELF_CONTAINED(&slab->slab)))) {
|
||||
if ((retSlab = allocSlabEntry(&newSlabEntry, slab->size, slabSize,
|
||||
IS_SELF_CONTAINED(&slab->slab)))) {
|
||||
pr_devel("Fail to allocSlabEntry %d\n", retSlab);
|
||||
restore_IRQs(flags);
|
||||
return NULL;
|
||||
@ -209,7 +255,7 @@ int freeFromSlab(void *ptr, struct slabEntry *slab)
|
||||
list_foreach(slab, slabEntry, slabIdx)
|
||||
{
|
||||
if ((slabEntry->page <= (vaddr_t)ptr) &&
|
||||
((vaddr_t)ptr < (slabEntry->page + PAGE_SIZE))) {
|
||||
((vaddr_t)ptr < (slabEntry->page + slabEntry->size))) {
|
||||
// pr_devel("free place! was %d is now %d\n", slabEntry->freeEl, ptr);
|
||||
if (slabEntry->full) {
|
||||
*((vaddr_t *)ptr) = (vaddr_t)NULL;
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
struct slabEntry {
|
||||
vaddr_t page;
|
||||
size_t size;
|
||||
void *freeEl;
|
||||
char full;
|
||||
struct slabEntry *next;
|
||||
@ -17,7 +18,7 @@ struct slabDesc {
|
||||
struct slabDesc *prev;
|
||||
};
|
||||
int allocSetup(void);
|
||||
int allocBookSlab(size_t size, int selfContained);
|
||||
int allocBookSlab(size_t size, size_t sizeSlab, int selfContained);
|
||||
|
||||
void *malloc(size_t size);
|
||||
void free(void *ptr);
|
||||
|
73
core/kernel.h
Normal file
73
core/kernel.h
Normal file
@ -0,0 +1,73 @@
|
||||
#pragma once
|
||||
|
||||
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
|
||||
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a)-1)
|
||||
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
||||
|
||||
/* @a is a power of 2 value */
|
||||
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
|
||||
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a)-1), (a))
|
||||
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
|
||||
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
|
||||
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a)-1)) == 0)
|
||||
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||
|
||||
/*
|
||||
* This looks more complex than it should be. But we need to
|
||||
* get the type for the ~ right in round_down (it needs to be
|
||||
* as wide as the result!), and we want to evaluate the macro
|
||||
* arguments just once each.
|
||||
*/
|
||||
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
|
||||
/**
|
||||
* round_up - round up to next specified power of 2
|
||||
* @x: the value to round
|
||||
* @y: multiple to round up to (must be a power of 2)
|
||||
*
|
||||
* Rounds @x up to next multiple of @y (which must be a power of 2).
|
||||
* To perform arbitrary rounding up, use roundup() below.
|
||||
*/
|
||||
#define round_up(x, y) ((((x)-1) | __round_mask(x, y)) + 1)
|
||||
/**
|
||||
* round_down - round down to next specified power of 2
|
||||
* @x: the value to round
|
||||
* @y: multiple to round down to (must be a power of 2)
|
||||
*
|
||||
* Rounds @x down to next multiple of @y (which must be a power of 2).
|
||||
* To perform arbitrary rounding down, use rounddown() below.
|
||||
*/
|
||||
#define round_down(x, y) ((x) & ~__round_mask(x, y))
|
||||
|
||||
#define typeof_member(T, m) typeof(((T *)0)->m)
|
||||
|
||||
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
|
||||
/**
|
||||
* roundup - round up to the next specified multiple
|
||||
* @x: the value to up
|
||||
* @y: multiple to round up to
|
||||
*
|
||||
* Rounds @x up to next multiple of @y. If @y will always be a power
|
||||
* of 2, consider using the faster round_up().
|
||||
*/
|
||||
#define roundup(x, y) \
|
||||
({ \
|
||||
typeof(y) __y = y; \
|
||||
(((x) + (__y - 1)) / __y) * __y; \
|
||||
})
|
||||
/**
|
||||
* rounddown - round down to next specified multiple
|
||||
* @x: the value to round
|
||||
* @y: multiple to round down to
|
||||
*
|
||||
* Rounds @x down to next multiple of @y. If @y will always be a power
|
||||
* of 2, consider using the faster round_down().
|
||||
*/
|
||||
#define rounddown(x, y) \
|
||||
({ \
|
||||
typeof(x) __x = (x); \
|
||||
__x - (__x % (y)); \
|
||||
})
|
||||
|
||||
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
|
@ -35,6 +35,10 @@ void printf(const char *format, ...);
|
||||
0; \
|
||||
})
|
||||
|
||||
#ifndef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
#define pr_devel(fmt, ...) printf(pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#else
|
||||
|
@ -50,7 +50,7 @@ struct kthread *kthreadCreate(const char *name, cpu_kstate_function_arg1_t func,
|
||||
|
||||
thread->stackAddr = (vaddr_t)malloc(KTHREAD_DEFAULT_STACK_SIZE);
|
||||
#ifdef DEBUG
|
||||
printf("Alloc stask at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
||||
printf("Alloc stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
||||
#endif
|
||||
thread->stackSize = KTHREAD_DEFAULT_STACK_SIZE;
|
||||
|
||||
@ -85,6 +85,9 @@ void kthreadDelete(struct kthread *thread)
|
||||
disable_IRQs(flags);
|
||||
list_delete(currentThread, thread);
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Free stack at 0x%x struct at 0x%x\n", thread->stackAddr, thread);
|
||||
#endif
|
||||
free((void *)thread->stackAddr);
|
||||
free((void *)thread);
|
||||
restore_IRQs(flags);
|
||||
|
58
core/mem.c
58
core/mem.c
@ -1,4 +1,5 @@
|
||||
#include "mem.h"
|
||||
#include "kernel.h"
|
||||
#include "klibc.h"
|
||||
#include "list.h"
|
||||
#include "types.h"
|
||||
@ -50,16 +51,59 @@ struct mem_desc *addr2memDesc(paddr_t addr)
|
||||
return page_desc + idx;
|
||||
}
|
||||
|
||||
paddr_t allocPhyPage(void)
|
||||
struct mem_desc *memFindConsecutiveFreePage(uint nbPage)
|
||||
{
|
||||
struct mem_desc *mem, *head;
|
||||
uint memIdx, count;
|
||||
|
||||
if (list_is_empty(free_page)) {
|
||||
return NULL;
|
||||
}
|
||||
count = 1;
|
||||
memIdx = 0;
|
||||
head = free_page;
|
||||
mem = free_page;
|
||||
|
||||
while (count < nbPage && (!memIdx || mem != free_page)) {
|
||||
memIdx++;
|
||||
mem = mem->next;
|
||||
if (mem->phy_addr == head->phy_addr + count * PAGE_SIZE) {
|
||||
count++;
|
||||
} else {
|
||||
count = 1;
|
||||
head = mem;
|
||||
}
|
||||
}
|
||||
|
||||
if (count < nbPage) {
|
||||
return NULL;
|
||||
}
|
||||
return head;
|
||||
}
|
||||
|
||||
paddr_t allocPhyPage(uint nbPage)
|
||||
{
|
||||
struct mem_desc *mem, *head, *next;
|
||||
uint count;
|
||||
|
||||
head = memFindConsecutiveFreePage(nbPage);
|
||||
|
||||
if (head == NULL) {
|
||||
pr_devel("Cannot find %d consecutive page\n", nbPage);
|
||||
return (unsigned long)NULL;
|
||||
}
|
||||
struct mem_desc *mem = list_pop_head(free_page);
|
||||
mem->ref = 1;
|
||||
list_add_tail(used_page, mem);
|
||||
allocatedPage++;
|
||||
return mem->phy_addr;
|
||||
|
||||
mem = head;
|
||||
next = head->next;
|
||||
for (count = 0; count < nbPage; count++) {
|
||||
list_delete(free_page, mem);
|
||||
mem->ref = 1;
|
||||
list_add_tail(used_page, mem);
|
||||
mem = next;
|
||||
next = mem->next;
|
||||
}
|
||||
allocatedPage += nbPage;
|
||||
return head->phy_addr;
|
||||
}
|
||||
|
||||
int unrefPhyPage(paddr_t addr)
|
||||
@ -72,7 +116,7 @@ int unrefPhyPage(paddr_t addr)
|
||||
if (mem->ref == 0) {
|
||||
allocatedPage--;
|
||||
list_delete(used_page, mem);
|
||||
list_add_tail(free_page, mem);
|
||||
list_add_tail(free_page, mem); // TODO find the right place to keep free_page sorted;
|
||||
}
|
||||
|
||||
return mem->ref;
|
||||
|
@ -16,7 +16,7 @@ struct mem_desc {
|
||||
};
|
||||
|
||||
int memSetup(paddr_t upperMem, paddr_t *lastUsed);
|
||||
paddr_t allocPhyPage(void);
|
||||
paddr_t allocPhyPage(uint nbPage);
|
||||
int unrefPhyPage(paddr_t addr);
|
||||
int refPhyPage(paddr_t addr);
|
||||
unsigned long getNbAllocatedPage(void);
|
||||
|
12
core/types.h
12
core/types.h
@ -28,18 +28,6 @@
|
||||
#define S64_MAX ((s64)(U64_MAX >> 1))
|
||||
#define S64_MIN ((s64)(-S64_MAX - 1))
|
||||
|
||||
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a)-1)
|
||||
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
||||
|
||||
/* @a is a power of 2 value */
|
||||
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
|
||||
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a)-1), (a))
|
||||
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
|
||||
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
|
||||
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a)-1)) == 0)
|
||||
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||
|
||||
// Virtual address
|
||||
typedef unsigned long vaddr_t;
|
||||
|
||||
|
@ -17,10 +17,6 @@
|
||||
#define VGA_WIDTH 80
|
||||
#define VGA_HEIGHT 25
|
||||
|
||||
#ifndef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
#endif
|
||||
|
||||
int VGASetup(uint bgColor, uint color);
|
||||
void VGAputc(const char str);
|
||||
void clearScreen(uint bgColor);
|
||||
|
@ -21,7 +21,7 @@ void testPhymem(void)
|
||||
int allocCount = 0;
|
||||
int freeCount = 0;
|
||||
|
||||
while ((page = (struct mem_desc *)allocPhyPage()) != NULL) {
|
||||
while ((page = (struct mem_desc *)allocPhyPage(1)) != NULL) {
|
||||
page->phy_addr = allocCount;
|
||||
allocCount++;
|
||||
list_add_tail(allocated_page_list, page);
|
||||
@ -35,7 +35,7 @@ void testPhymem(void)
|
||||
}
|
||||
printf("%d pages freed\n", freeCount);
|
||||
|
||||
assertmsg((page = (struct mem_desc *)allocPhyPage()) != NULL, "Cannot allocate memory\n");
|
||||
assertmsg((page = (struct mem_desc *)allocPhyPage(1)) != NULL, "Cannot allocate memory\n");
|
||||
unrefPhyPage((ulong)page);
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ static void testPaging(void)
|
||||
int allocCount = 0;
|
||||
int freeCount = 0;
|
||||
|
||||
while ((page = (struct mem_desc *)allocPhyPage()) != NULL) {
|
||||
while ((page = (struct mem_desc *)allocPhyPage(1)) != NULL) {
|
||||
assertmsg(pageMap((vaddr_t)page, (paddr_t)page, PAGING_MEM_WRITE) == 0,
|
||||
"Fail to map page %d\n", allocCount);
|
||||
memset(page, allocCount, PAGE_SIZE);
|
||||
@ -121,7 +121,7 @@ static void testPaging(void)
|
||||
}
|
||||
printf("%d pages freed\n", freeCount);
|
||||
|
||||
assertmsg((page = (struct mem_desc *)allocPhyPage()) != NULL, "Cannot allocate memory\n");
|
||||
assertmsg((page = (struct mem_desc *)allocPhyPage(1)) != NULL, "Cannot allocate memory\n");
|
||||
unrefPhyPage((ulong)page);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user