fix(vmm): make MMU bring-up and kernel mappings reliable
This commit is contained in:
@@ -0,0 +1,101 @@
|
||||
#include <VM/Heap.h>
|
||||
#include <VM/PMM.h>
|
||||
#include <VM/VMM.h>
|
||||
#include <Lib/String.h>
|
||||
#include <OS/Panic.h>
|
||||
|
||||
static VMHeapBlockHeader* sVMHeapListHead = nullptr;
|
||||
|
||||
static void CombineForward(VMHeapBlockHeader* current) {
|
||||
if (!current->next || !current->next->isFree) return;
|
||||
current->size += sizeof(VMHeapBlockHeader) + current->next->size;
|
||||
current->next = current->next->next;
|
||||
if (current->next) current->next->previous = current; // what the fuck
|
||||
}
|
||||
|
||||
void HeapInitialize() {
|
||||
Address heapStart = kKernelHeapStart;
|
||||
|
||||
for (UInt64 i = 0; i < kHeapSizePages; i++) {
|
||||
Address physical = (Address)PMMAllocatePage();
|
||||
if (!physical) OSPanic("OOM during heap init");
|
||||
|
||||
Address virtual = heapStart + (i * kVMPageSize);
|
||||
VMMMapPage(gVMKernelL0Table, physical, virtual, kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX);
|
||||
}
|
||||
|
||||
sVMHeapListHead = (VMHeapBlockHeader*)heapStart;
|
||||
sVMHeapListHead->magic = kHeapBlockHeaderMagic;
|
||||
sVMHeapListHead->size = (kHeapSizePages * kVMPageSize) - sizeof(VMHeapBlockHeader);
|
||||
sVMHeapListHead->isFree = true;
|
||||
sVMHeapListHead->next = nullptr;
|
||||
sVMHeapListHead->previous = nullptr;
|
||||
}
|
||||
|
||||
Pointer HeapAllocate(Size size) {
|
||||
if (size == 0) return nullptr;
|
||||
Size alignedSize = (size + 15) & ~15;
|
||||
|
||||
VMHeapBlockHeader* current = sVMHeapListHead;
|
||||
while (current) {
|
||||
if (current->isFree && current->size >= alignedSize) {
|
||||
if (current->size > alignedSize + sizeof(VMHeapBlockHeader) + 16) {
|
||||
VMHeapBlockHeader* new_block = (VMHeapBlockHeader*)((Address)current + sizeof(VMHeapBlockHeader) + alignedSize);
|
||||
new_block->size = current->size - alignedSize - sizeof(VMHeapBlockHeader);
|
||||
new_block->isFree = true;
|
||||
new_block->next = current->next;
|
||||
new_block->previous = current;
|
||||
new_block->magic = kHeapBlockHeaderMagic;
|
||||
|
||||
if (current->next) current->next->previous = new_block;
|
||||
current->next = new_block;
|
||||
current->size = alignedSize;
|
||||
}
|
||||
current->isFree = false;
|
||||
return (Pointer)((Address)current + sizeof(VMHeapBlockHeader));
|
||||
}
|
||||
current = current->next;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void HeapFree(Pointer pointer) {
|
||||
if (!pointer) return;
|
||||
|
||||
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((Address)pointer - sizeof(VMHeapBlockHeader));
|
||||
if (current->magic != kHeapBlockHeaderMagic) return;
|
||||
|
||||
current->isFree = true;
|
||||
if (current->next && current->next->isFree) CombineForward(current);
|
||||
if (current->previous && current->previous->isFree) CombineForward(current->previous);
|
||||
}
|
||||
|
||||
Pointer HeapResize(Pointer pointer, Size newSize) {
|
||||
if (!pointer) return HeapAllocate(newSize);
|
||||
if (newSize == 0) {
|
||||
HeapFree(pointer);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Size alignedSize = (newSize + 15) & ~15;
|
||||
|
||||
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((Address)pointer - sizeof(VMHeapBlockHeader));
|
||||
if (current->size >= alignedSize) {
|
||||
return pointer;
|
||||
}
|
||||
|
||||
if (current->next && current->next->isFree &&
|
||||
(current->size + sizeof(VMHeapBlockHeader) + current->next->size) >= alignedSize) {
|
||||
CombineForward(current);
|
||||
return pointer;
|
||||
}
|
||||
|
||||
Pointer newPointer = HeapAllocate(newSize);
|
||||
if (newPointer) {
|
||||
MemoryCopy(newPointer, pointer, current->size);
|
||||
HeapFree(pointer);
|
||||
}
|
||||
|
||||
return newPointer;
|
||||
}
|
||||
Reference in New Issue
Block a user