2019-04-11 22:34:20 +02:00
|
|
|
#define pr_fmt(fmt) "[alloc]: " fmt
|
|
|
|
//#define DEBUG
|
|
|
|
#include "alloc.h"
|
|
|
|
#include "errno.h"
|
2020-08-21 00:05:53 +02:00
|
|
|
#include "irq.h"
|
2020-08-28 22:38:05 +02:00
|
|
|
#include "kernel.h"
|
2019-05-17 09:35:23 +02:00
|
|
|
#include "klibc.h"
|
2019-04-11 22:34:20 +02:00
|
|
|
#include "list.h"
|
|
|
|
#include "math.h"
|
|
|
|
#include "mem.h"
|
2020-08-28 22:38:05 +02:00
|
|
|
#include "paging.h"
|
2019-04-11 22:34:20 +02:00
|
|
|
|
2019-04-15 22:48:37 +02:00
|
|
|
#define IS_SELF_CONTAINED(desc) ((vaddr_t)((desc)->page) == (vaddr_t)(desc))
|
2019-04-11 22:34:20 +02:00
|
|
|
// Slab will contains object from sizeof(void *) to PAGE_SIZE/2 by pow2
|
2020-04-22 16:54:30 +02:00
|
|
|
static struct slabDesc *slub;
|
2019-04-11 22:34:20 +02:00
|
|
|
|
2021-01-25 20:05:38 +01:00
|
|
|
static int allocSlab(struct slabDesc **desc, size_t sizeEl, size_t sizeSlab,
|
|
|
|
int self_containing);
|
|
|
|
static int allocSlabEntry(struct slabEntry **desc, size_t sizeEl, size_t sizeSlab,
|
|
|
|
int selfContained);
|
2020-08-28 22:38:05 +02:00
|
|
|
static int formatPage(struct slabEntry *desc, size_t size, size_t sizeSlab, int selfContained);
|
2021-01-25 20:05:38 +01:00
|
|
|
static int freeFromSlab(void *ptr, struct slabEntry *slab);
|
2020-08-28 22:38:05 +02:00
|
|
|
|
|
|
|
static struct {
|
|
|
|
size_t elementSize;
|
|
|
|
size_t slabSize;
|
|
|
|
unsigned char isSelf;
|
|
|
|
} initSlab[] = {{sizeof(struct slabDesc), PAGE_SIZE, 1},
|
|
|
|
{sizeof(struct slabEntry), PAGE_SIZE, 1},
|
|
|
|
{4, PAGE_SIZE, 0},
|
|
|
|
{8, PAGE_SIZE, 0},
|
|
|
|
{16, PAGE_SIZE, 0},
|
|
|
|
{32, PAGE_SIZE, 0},
|
|
|
|
{64, PAGE_SIZE, 0},
|
|
|
|
{128, PAGE_SIZE, 0},
|
|
|
|
{256, 2 * PAGE_SIZE, 0},
|
|
|
|
{1024, 2 * PAGE_SIZE, 0},
|
|
|
|
{2048, 3 * PAGE_SIZE, 0},
|
|
|
|
{4096, 4 * PAGE_SIZE, 0},
|
|
|
|
{8192, 8 * PAGE_SIZE, 0},
|
|
|
|
{16384, 12 * PAGE_SIZE, 0},
|
|
|
|
{0, 0, 0}};
|
2019-04-11 22:34:20 +02:00
|
|
|
|
2020-04-29 23:07:01 +02:00
|
|
|
int allocSetup(void)
|
2019-04-11 22:34:20 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
list_init(slub);
|
2020-08-28 22:38:05 +02:00
|
|
|
|
|
|
|
for (uint i = 0; initSlab[i].elementSize != 0; i++) {
|
|
|
|
int ret;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
if ((ret = allocBookSlab(initSlab[i].elementSize, initSlab[i].slabSize,
|
|
|
|
initSlab[i].isSelf))) {
|
2020-04-27 00:14:37 +02:00
|
|
|
if (ret == -EEXIST)
|
|
|
|
continue;
|
|
|
|
pr_devel("Fail to allocBookSlab %d for %d \n", ret, (1U << i));
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return ret;
|
2019-04-16 23:12:16 +02:00
|
|
|
}
|
2020-04-27 00:14:37 +02:00
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2019-04-15 21:35:38 +02:00
|
|
|
}
|
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
int allocBookSlab(size_t sizeEl, size_t sizeSlab, int selfContained)
|
2019-04-15 21:35:38 +02:00
|
|
|
{
|
2020-08-28 22:38:05 +02:00
|
|
|
struct slabDesc *slab = NULL;
|
|
|
|
struct slabDesc *newSlab = NULL;
|
2020-04-27 00:14:37 +02:00
|
|
|
int slabIdx;
|
|
|
|
int ret;
|
2020-08-21 00:05:53 +02:00
|
|
|
int flags;
|
|
|
|
|
2021-01-25 20:05:38 +01:00
|
|
|
pr_devel("%s for element of size %d is self %d\n", __func__, sizeEl, selfContained);
|
|
|
|
|
2020-08-21 00:05:53 +02:00
|
|
|
disable_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
list_foreach(slub, slab, slabIdx)
|
|
|
|
{
|
2020-08-28 22:38:05 +02:00
|
|
|
if (slab->size == sizeEl) {
|
2020-08-21 00:05:53 +02:00
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
return -EEXIST;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
2020-08-28 22:38:05 +02:00
|
|
|
if (slab->size > sizeEl) {
|
2020-04-27 00:14:37 +02:00
|
|
|
break;
|
2019-04-16 23:12:16 +02:00
|
|
|
}
|
2020-04-27 00:14:37 +02:00
|
|
|
}
|
2020-08-28 22:38:05 +02:00
|
|
|
|
|
|
|
if ((ret = allocSlab(&newSlab, sizeEl, sizeSlab, selfContained))) {
|
2020-08-21 00:05:53 +02:00
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
return ret;
|
2020-08-21 00:05:53 +02:00
|
|
|
}
|
2020-08-28 22:38:05 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (list_foreach_early_break(slub, slab, slabIdx)) {
|
|
|
|
list_insert_before(slub, slab, newSlab);
|
|
|
|
} else {
|
|
|
|
list_add_tail(slub, newSlab);
|
|
|
|
}
|
2020-08-28 22:38:05 +02:00
|
|
|
|
2020-08-21 00:05:53 +02:00
|
|
|
restore_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
|
|
|
|
2021-01-25 20:05:38 +01:00
|
|
|
static int allocSlab(struct slabDesc **desc, size_t size, size_t sizeSlab, int selfContained)
|
2019-04-16 23:12:16 +02:00
|
|
|
{
|
2020-08-28 22:38:05 +02:00
|
|
|
uint nbPage, i;
|
|
|
|
|
|
|
|
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
sizeSlab = MAX(sizeSlab, PAGE_SIZE);
|
|
|
|
if (size > sizeSlab) {
|
2021-01-25 14:00:06 +01:00
|
|
|
pr_devel("size of element %d are bigger than slab size %d\n", size, sizeSlab);
|
2020-04-27 00:14:37 +02:00
|
|
|
return -ENOENT;
|
2020-08-28 22:38:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
nbPage = DIV_ROUND_UP(sizeSlab, PAGE_SIZE);
|
|
|
|
paddr_t alloc = allocPhyPage(nbPage);
|
2020-04-27 00:14:37 +02:00
|
|
|
if (alloc == (paddr_t)NULL)
|
|
|
|
return -ENOMEM;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
for (i = 0; i < nbPage; i++) {
|
|
|
|
if (pageMap((vaddr_t)alloc + i * PAGE_SIZE, alloc + i * PAGE_SIZE, PAGING_MEM_WRITE))
|
|
|
|
goto free_page;
|
|
|
|
}
|
2019-04-16 23:12:16 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (selfContained) {
|
|
|
|
*desc = (struct slabDesc *)alloc;
|
|
|
|
((*desc)->slab).freeEl = (char *)(*desc) + sizeof(struct slabDesc);
|
|
|
|
} else {
|
|
|
|
*desc = malloc(sizeof(struct slabDesc));
|
2021-03-22 22:48:23 +01:00
|
|
|
if (*desc == NULL)
|
|
|
|
return -ENOMEM;
|
2020-04-27 00:14:37 +02:00
|
|
|
(*desc)->slab.freeEl = (void *)alloc;
|
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
struct slabEntry *slab = &(*desc)->slab;
|
|
|
|
list_singleton(slab, slab);
|
|
|
|
slab->page = (vaddr_t)alloc;
|
|
|
|
slab->full = 0;
|
2020-08-28 22:38:05 +02:00
|
|
|
slab->size = sizeSlab;
|
2020-04-27 00:14:37 +02:00
|
|
|
(*desc)->size = size;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
return formatPage(&(*desc)->slab, size, sizeSlab, selfContained);
|
|
|
|
|
|
|
|
free_page:
|
|
|
|
for (uint j = 0; j < i; j++) {
|
|
|
|
pageUnmap((vaddr_t)alloc + i * PAGE_SIZE);
|
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
return -ENOMEM;
|
2019-04-16 23:12:16 +02:00
|
|
|
}
|
|
|
|
|
2021-01-25 20:05:38 +01:00
|
|
|
static int allocSlabEntry(struct slabEntry **desc, size_t size, size_t sizeSlab,
|
|
|
|
int selfContained)
|
2019-04-11 22:34:20 +02:00
|
|
|
{
|
2020-08-28 22:38:05 +02:00
|
|
|
uint nbPage, i;
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
pr_devel("%s for size %d is self %d\n", __func__, size, selfContained);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
sizeSlab = MAX(sizeSlab, PAGE_SIZE);
|
|
|
|
if (size > sizeSlab) {
|
2021-01-25 14:00:06 +01:00
|
|
|
pr_devel("size of element %d are bigger than slab size %d\n", size, sizeSlab);
|
2020-04-27 00:14:37 +02:00
|
|
|
return -ENOENT;
|
2020-08-28 22:38:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
nbPage = DIV_ROUND_UP(sizeSlab, PAGE_SIZE);
|
|
|
|
paddr_t alloc = allocPhyPage(nbPage);
|
2020-04-27 00:14:37 +02:00
|
|
|
if (alloc == (paddr_t)NULL)
|
|
|
|
return -ENOMEM;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
for (i = 0; i < nbPage; i++) {
|
|
|
|
if (pageMap((vaddr_t)alloc + i * PAGE_SIZE, alloc + i * PAGE_SIZE, PAGING_MEM_WRITE))
|
|
|
|
goto free_page;
|
|
|
|
}
|
2019-04-15 21:35:38 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (selfContained) {
|
|
|
|
*desc = (struct slabEntry *)alloc;
|
|
|
|
(*desc)->freeEl = (char *)(*desc) + sizeof(struct slabEntry);
|
|
|
|
} else {
|
|
|
|
*desc = malloc(sizeof(struct slabEntry));
|
2021-03-22 22:48:23 +01:00
|
|
|
if (*desc == NULL)
|
|
|
|
return -ENOMEM;
|
2020-04-27 00:14:37 +02:00
|
|
|
(*desc)->freeEl = (void *)alloc;
|
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_singleton(*desc, *desc);
|
|
|
|
(*desc)->page = (vaddr_t)alloc;
|
|
|
|
(*desc)->full = 0;
|
2020-08-28 22:38:05 +02:00
|
|
|
(*desc)->size = sizeSlab;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
return formatPage((*desc), size, sizeSlab, selfContained);
|
|
|
|
|
|
|
|
free_page:
|
|
|
|
for (uint j = 0; j < i; j++) {
|
|
|
|
pageUnmap((vaddr_t)alloc + i * PAGE_SIZE);
|
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
return -ENOMEM;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
static int formatPage(struct slabEntry *desc, size_t size, size_t sizeSlab, int selfContained)
|
2019-04-11 22:34:20 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
char *cur = desc->freeEl;
|
2020-08-28 22:38:05 +02:00
|
|
|
ulong nbEl = sizeSlab / size - 1;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (selfContained)
|
2020-08-28 22:38:05 +02:00
|
|
|
nbEl = (sizeSlab - sizeof(struct slabDesc)) / size - 1;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
|
|
|
for (ulong i = 0; i < nbEl; i++) {
|
2020-04-27 00:14:37 +02:00
|
|
|
*((vaddr_t *)cur) = (vaddr_t)cur + size;
|
|
|
|
cur += size;
|
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
*((vaddr_t *)cur) = (vaddr_t)NULL;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
|
|
|
|
2019-04-16 23:12:16 +02:00
|
|
|
static void *allocFromSlab(struct slabEntry *slab)
|
2019-04-11 22:34:20 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
vaddr_t *next = slab->freeEl;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
if (*next == (vaddr_t)NULL) {
|
|
|
|
pr_devel("Slab @%d is now full\n", slab);
|
|
|
|
slab->full = 1;
|
|
|
|
}
|
2021-01-26 08:45:13 +01:00
|
|
|
slab->freeEl = (void *)(*next);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return (void *)next;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *malloc(size_t size)
|
|
|
|
{
|
2021-01-25 20:05:38 +01:00
|
|
|
struct slabEntry *slabEntry;
|
2021-01-16 22:43:43 +01:00
|
|
|
struct slabDesc *slab = NULL;
|
2020-04-27 00:14:37 +02:00
|
|
|
uint slubIdx;
|
2020-08-28 22:38:05 +02:00
|
|
|
void *ret;
|
2021-01-16 22:43:43 +01:00
|
|
|
int flags;
|
2021-01-25 20:05:38 +01:00
|
|
|
int slabIdx;
|
2020-08-21 00:05:53 +02:00
|
|
|
|
|
|
|
disable_IRQs(flags);
|
2020-08-28 22:38:05 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_foreach(slub, slab, slubIdx)
|
|
|
|
{
|
|
|
|
if (size <= slab->size)
|
|
|
|
break;
|
|
|
|
}
|
2021-01-16 22:43:43 +01:00
|
|
|
|
|
|
|
if (!list_foreach_early_break(slub, slab, slubIdx)) {
|
|
|
|
pr_devel("No slab found for %d\n", size);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_foreach(&slab->slab, slabEntry, slabIdx)
|
|
|
|
{
|
|
|
|
if (!slabEntry->full) {
|
|
|
|
// pr_devel("found place in slub %d at idx %d for size %d\n", slubIdx,
|
|
|
|
// slabIdx, size);
|
2020-08-21 00:05:53 +02:00
|
|
|
ret = allocFromSlab(slabEntry);
|
|
|
|
restore_IRQs(flags);
|
|
|
|
return ret;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
2020-04-27 00:14:37 +02:00
|
|
|
}
|
2019-04-15 22:48:37 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
// No room found
|
|
|
|
struct slabEntry *newSlabEntry;
|
|
|
|
struct slabEntry *slabList = &slab->slab;
|
2020-08-28 22:38:05 +02:00
|
|
|
size_t slabSize = MAX(PAGE_SIZE, size);
|
2020-08-21 00:05:53 +02:00
|
|
|
int retSlab;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-28 22:38:05 +02:00
|
|
|
if ((retSlab = allocSlabEntry(&newSlabEntry, slab->size, slabSize,
|
|
|
|
IS_SELF_CONTAINED(&slab->slab)))) {
|
2020-08-21 00:05:53 +02:00
|
|
|
pr_devel("Fail to allocSlabEntry %d\n", retSlab);
|
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
pr_devel("Allocate new slab for object of size %d\n", slab->size);
|
|
|
|
list_add_tail(slabList, newSlabEntry);
|
2020-08-21 00:05:53 +02:00
|
|
|
ret = allocFromSlab(newSlabEntry);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-21 00:05:53 +02:00
|
|
|
restore_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-21 00:05:53 +02:00
|
|
|
return ret;
|
2019-04-11 22:34:20 +02:00
|
|
|
}
|
2019-04-15 23:09:09 +02:00
|
|
|
|
2021-01-25 20:05:38 +01:00
|
|
|
static int freeFromSlab(void *ptr, struct slabEntry *slab)
|
2019-04-16 20:11:24 +02:00
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
struct slabEntry *slabEntry;
|
|
|
|
int slabIdx;
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_foreach(slab, slabEntry, slabIdx)
|
|
|
|
{
|
|
|
|
if ((slabEntry->page <= (vaddr_t)ptr) &&
|
2020-08-28 22:38:05 +02:00
|
|
|
((vaddr_t)ptr < (slabEntry->page + slabEntry->size))) {
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2021-01-26 08:45:13 +01:00
|
|
|
*((vaddr_t *)ptr) = (vaddr_t)slabEntry->freeEl;
|
2020-04-27 00:14:37 +02:00
|
|
|
slabEntry->freeEl = ptr;
|
|
|
|
slabEntry->full = 0;
|
|
|
|
return 1;
|
2019-04-15 23:09:09 +02:00
|
|
|
}
|
2020-04-27 00:14:37 +02:00
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
return 0;
|
2019-04-15 23:09:09 +02:00
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2019-04-16 20:11:24 +02:00
|
|
|
void free(void *ptr)
|
|
|
|
{
|
2020-04-27 00:14:37 +02:00
|
|
|
if (!ptr)
|
|
|
|
return;
|
2019-04-15 23:09:09 +02:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
struct slabDesc *slab;
|
|
|
|
int slabIdx;
|
2020-08-21 00:05:53 +02:00
|
|
|
int flags;
|
|
|
|
|
|
|
|
disable_IRQs(flags);
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-04-27 00:14:37 +02:00
|
|
|
list_foreach(slub, slab, slabIdx)
|
|
|
|
{
|
|
|
|
struct slabEntry *slabEntry;
|
|
|
|
int entryIdx;
|
|
|
|
list_foreach(&slab->slab, slabEntry, entryIdx)
|
2019-04-16 20:11:24 +02:00
|
|
|
{
|
2020-08-21 00:05:53 +02:00
|
|
|
if (freeFromSlab(ptr, slabEntry)) {
|
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
return;
|
2020-08-21 00:05:53 +02:00
|
|
|
}
|
2019-04-15 23:09:09 +02:00
|
|
|
}
|
2020-04-27 00:14:37 +02:00
|
|
|
}
|
2021-01-25 20:05:38 +01:00
|
|
|
|
2020-08-21 00:05:53 +02:00
|
|
|
restore_IRQs(flags);
|
2020-04-27 00:14:37 +02:00
|
|
|
pr_devel("free: slab not found\n");
|
2019-04-15 23:09:09 +02:00
|
|
|
}
|