diff --git a/core/elf.c b/core/elf.c index ab9a559..0e481c3 100644 --- a/core/elf.c +++ b/core/elf.c @@ -148,7 +148,7 @@ uaddr_t loadElfProg(const char *prog, struct process *proc) // Hack: Even if already allocated mark the adresse space as managed by a ressource // So this address space is not used by another ressource. uaddr = elf_phdrs[i].p_vaddr; - zeroMmap(as, &uaddr, elf_phdrs[i].p_memsz, PAGING_MEM_USER | PAGING_MEM_WRITE | PAGING_MEM_READ, 0); + assert(zeroMmap(as, &uaddr, elf_phdrs[i].p_memsz, PAGING_MEM_USER | PAGING_MEM_WRITE | PAGING_MEM_READ, 0) == 0); } processInitHeap(proc, lastUserAddr); diff --git a/core/uaddrspace.c b/core/uaddrspace.c index 3d1e6d8..885e462 100644 --- a/core/uaddrspace.c +++ b/core/uaddrspace.c @@ -216,7 +216,8 @@ int uAddrSpaceUnmap(struct uAddrSpace *as, uaddr_t uaddr, size_t size) reg->res->ops->unmap(reg, uaddr, size); break; // Only affect the end - } else if (uaddr > reg->addr && uaddr + size > reg->addr + reg->size) { + } else if (uaddr > reg->addr && uaddr < reg->addr + size && + uaddr + size > reg->addr + reg->size) { size_t unmapSize = reg->addr + reg->size - uaddr; reg->size = uaddr - reg->addr; @@ -306,7 +307,13 @@ int uAddrSpaceHeapCheckNAlloc(struct uAddrSpace *as, vaddr_t addr) newReg->size = PAGE_SIZE; newReg->right = right; - list_add_tail_named(as->listVirtualReg, newReg, nextInAddrSpace, prevInAddrSpace); + // keep the AS list sorted + struct uAddrVirtualReg *prev = findVirtualRegionBeforeAddr(as, addrAlign); + if (prev) + list_insert_after_named(as->listVirtualReg, prev, newReg, prevInAddrSpace, + nextInAddrSpace); + else + list_add_tail_named(as->listVirtualReg, newReg, nextInAddrSpace, prevInAddrSpace); unrefPhyPage(ppage);