REF: Renamed kernel -> System; userspace -> Runtime; bootloader -> Boot

del: KSH
This commit is contained in:
Karina
2026-04-04 19:44:39 +04:00
parent 2f58f64175
commit 47735bb1bd
117 changed files with 10 additions and 274 deletions
+131
View File
@@ -0,0 +1,131 @@
// SPDX-License-Identifier: GPL-3.0-or-later
// Copyright (c) 2026 0xKarinyash
#include <VM/Heap.h>
#include <VM/PMM.h>
#include <VM/VMM.h>
#include <lib/String.h>
#include <OS/OSPanic.h>
#include <OS/OSSpinlock.h>
#include <types.h>
extern UInt64* gVMKernelPML4;
static VMHeapBlockHeader* sVMHeapListHead = nullptr;
static OSSpinlock sVMHeapLock = {0};
static void sVMHeapCombineForward(VMHeapBlockHeader* current) {
if (!current->next || !current->next->isFree) return;
current->size += sizeof(VMHeapBlockHeader) + current->next->size;
current->next = current->next->next;
if (current->next) current->next->previous = current; // what the fuck
}
void VMHeapInitialize() {
UInt64 heapStart = kVMKernelHeapStart;
for (UInt64 i = 0; i < kVMHeapSizePages; i++) {
UInt64 physical = (UInt64)VMPhysicalMemoryAllocatePage();
if (!physical) OSPanic("OOM during heap init");
UInt64 virtual = heapStart + (i * kVMPageSize);
VMVirtualMemoryMapPage(gVMKernelPML4, physical, virtual, PTE_PRESENT | PTE_RW);
}
sVMHeapListHead = (VMHeapBlockHeader*)heapStart;
sVMHeapListHead->magic = kVMHeapBlockHeaderMagic;
sVMHeapListHead->size = (kVMHeapSizePages * kVMPageSize) - sizeof(VMHeapBlockHeader);
sVMHeapListHead->isFree = true;
sVMHeapListHead->next = nullptr;
sVMHeapListHead->previous = nullptr;
}
static void* sVMHeapAllocateInternal(UInt64 size) {
if (size == 0) return nullptr;
UInt64 alignedSize = (size + 15) & ~15;
VMHeapBlockHeader* current = sVMHeapListHead;
while (current) {
if (current->isFree && current->size >= alignedSize) {
if (current->size > alignedSize + sizeof(VMHeapBlockHeader) + 16) {
VMHeapBlockHeader* new_block = (VMHeapBlockHeader*)((UInt64)current + sizeof(VMHeapBlockHeader) + alignedSize);
new_block->size = current->size - alignedSize - sizeof(VMHeapBlockHeader);
new_block->isFree = true;
new_block->next = current->next;
new_block->previous = current;
new_block->magic = kVMHeapBlockHeaderMagic;
if (current->next) current->next->previous = new_block;
current->next = new_block;
current->size = alignedSize;
}
current->isFree = false;
return (void*)((UInt64)current + sizeof(VMHeapBlockHeader));
}
current = current->next;
}
return nullptr;
}
static void sVMHeapFreeInternal(void* pointer) {
if (!pointer) return;
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((UInt64)pointer - sizeof(VMHeapBlockHeader));
if (current->magic != kVMHeapBlockHeaderMagic) return;
current->isFree = true;
if (current->next && current->next->isFree) sVMHeapCombineForward(current);
if (current->previous && current->previous->isFree) sVMHeapCombineForward(current->previous);
}
void* VMHeapAllocate(UInt64 size) {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMHeapLock, &state);
void* result = sVMHeapAllocateInternal(size);
OSSpinlockUnlockIRQ(&sVMHeapLock, &state);
return result;
}
void VMHeapFree(void* pointer) {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMHeapLock, &state);
sVMHeapFreeInternal(pointer);
OSSpinlockUnlockIRQ(&sVMHeapLock, &state);
}
void* VMHeapResize(void* pointer, UInt64 newSize) {
if (!pointer) return VMHeapAllocate(newSize);
if (newSize == 0) {
VMHeapFree(pointer);
return nullptr;
}
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMHeapLock, &state);
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((UInt64)pointer - sizeof(VMHeapBlockHeader));
if (current->size >= newSize) {
OSSpinlockUnlockIRQ(&sVMHeapLock, &state);
return pointer;
}
if (current->next && current->next->isFree &&
(current->size + sizeof(VMHeapBlockHeader) + current->next->size) >= newSize) { // why ts so fucking unreadable
sVMHeapCombineForward(current);
OSSpinlockUnlockIRQ(&sVMHeapLock, &state);
return pointer;
}
void* newPointer = sVMHeapAllocateInternal(newSize);
if (newPointer) {
MemoryCopy(newPointer, pointer, current->size);
sVMHeapFreeInternal(pointer);
}
OSSpinlockUnlockIRQ(&sVMHeapLock, &state);
return newPointer;
}
+131
View File
@@ -0,0 +1,131 @@
// SPDX-License-Identifier: GPL-3.0-or-later
// Copyright (c) 2026 0xKarinyash
#include <VM/PMM.h>
#include <VM/VMM.h>
#include <lib/String.h>
#include <lib/Math.h>
#include <OS/OSPanic.h>
#include <OS/OSSpinlock.h>
#include <types.h>
#include "bootinfo.h"
UInt8* gVMPhycalMemoryBitmap = nullptr;
UInt64 gVMPhycalMemoryBitmapSize = 0;
UInt64 gVMPhycalMemoryTotalMemorySize = 0;
static OSSpinlock sVMPMMLock = {0};
extern UInt64 _kernel_start;
extern UInt64 _kernel_end;
UInt64 VMPhysicalMemoryGetTotalMemorySize() {
return gVMPhycalMemoryTotalMemorySize;
}
void VMPhysicalMemoryInitialize(BIMemoryMap *memoryMap) {
UInt64 descriptorCount = memoryMap->mapSize / memoryMap->descriptorSize;
UInt64 maxPhysicalAddress = 0;
for (UInt64 i = 0; i < descriptorCount; i++) {
OSMemoryDescriptor* descriptor = (OSMemoryDescriptor*)((UInt8*)memoryMap->map + (i * memoryMap->descriptorSize));
if (descriptor->type == kOSMemoryTypeMappedIO ||
descriptor->type == kOSMemoryTypeMappedIOPortSpace ||
descriptor->type == kOSMemoryTypeUnusable ||
descriptor->type == kOSMemoryTypeReserved ||
descriptor->type == kOSMemoryTypePalCode) {
continue;
}
UInt64 nominee = descriptor->physicalStart + (descriptor->pageCount * kVMPageSize);
if (nominee > maxPhysicalAddress) {
maxPhysicalAddress = nominee;
}
}
gVMPhycalMemoryTotalMemorySize = maxPhysicalAddress;
UInt64 totalPageCount = maxPhysicalAddress / kVMPageSize;
UInt64 bitmapSize = (totalPageCount + 7) / 8;
OSMemoryDescriptor *bitmapHostDescriptor = nullptr;
for (UInt64 i = 0; i < descriptorCount; i++) {
OSMemoryDescriptor *descriptor = (OSMemoryDescriptor*)((UInt8*)memoryMap->map + (i * memoryMap->descriptorSize));
if ((descriptor->type == kOSMemoryTypeConventional) &&
((descriptor->pageCount * kVMPageSize) >= bitmapSize) &&
(descriptor->physicalStart >= kVMSafeSpaceStartAddress)) {
bitmapHostDescriptor = descriptor;
break;
}
}
if (bitmapHostDescriptor == nullptr) {
OSPanic("PMM: Not enough RAM for memory bitmap!");
}
gVMPhycalMemoryBitmap = (UInt8*)bitmapHostDescriptor->physicalStart;
gVMPhycalMemoryBitmapSize = bitmapSize;
MemorySet(gVMPhycalMemoryBitmap, 0xFF, bitmapSize);
for (UInt64 i = 0; i < descriptorCount; i++) {
OSMemoryDescriptor *descriptor = (OSMemoryDescriptor*)((UInt8*)memoryMap->map + (i * memoryMap->descriptorSize));
if (descriptor->type != kOSMemoryTypeConventional) {
continue;
}
UInt64 startAddress = descriptor->physicalStart;
UInt64 endAddress = startAddress + (descriptor->pageCount * kVMPageSize);
for (UInt64 addr = startAddress; addr < endAddress; addr += kVMPageSize) {
BITMAP_UNSET(gVMPhycalMemoryBitmap, addr);
}
}
UInt64 kernelStart = KERNEL_VIRT_TO_PHYS((UInt64)&_kernel_start);
UInt64 kernelEnd = KERNEL_VIRT_TO_PHYS((UInt64)&_kernel_end);
UInt64 bitmapStart = (UInt64)gVMPhycalMemoryBitmap;
UInt64 bitmapEnd = bitmapStart + gVMPhycalMemoryBitmapSize;
for (UInt64 addr = 0; addr < kVMSafeSpaceStartAddress; addr += kVMPageSize) BITMAP_SET(gVMPhycalMemoryBitmap, addr);
for (UInt64 addr = kernelStart; addr < kernelEnd; addr += kVMPageSize) BITMAP_SET(gVMPhycalMemoryBitmap, addr);
for (UInt64 addr = bitmapStart; addr < bitmapEnd; addr += kVMPageSize) BITMAP_SET(gVMPhycalMemoryBitmap, addr);
}
void* VMPhysicalMemoryAllocatePage() {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMPMMLock, &state);
for (UInt64 i = 0; i < gVMPhycalMemoryBitmapSize; i++) {
if (gVMPhycalMemoryBitmap[i] == 0xFF) continue;
for (UInt64 bit = 0; bit < 8; bit++) {
if ((gVMPhycalMemoryBitmap[i] & (1 << bit)) == 0) {
UInt64 address = (i * 8 + bit) * kVMPageSize;
BITMAP_SET(gVMPhycalMemoryBitmap, address);
OSSpinlockUnlockIRQ(&sVMPMMLock, &state);
return (void*)address;
}
}
}
OSSpinlockUnlockIRQ(&sVMPMMLock, &state);
return nullptr; // Out of memory
}
void VMPhysicalMemoryFreePage(void* address) {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMPMMLock, &state);
BITMAP_UNSET(gVMPhycalMemoryBitmap, (UInt64)address);
OSSpinlockUnlockIRQ(&sVMPMMLock, &state);
}
+207
View File
@@ -0,0 +1,207 @@
// SPDX-License-Identifier: GPL-3.0-or-later
// Copyright (c) 2026 0xKarinyash
#include <VM/VMM.h>
#include <VM/PMM.h>
#include <OS/OSPanic.h>
#include <OS/OSSpinlock.h>
#include <GDT.h>
#include <IDT.h>
#include <lib/String.h>
#include <types.h>
#include "bootinfo.h"
enum {
kVMUserStackTop = 0x70000000,
kVMUserStackSize = 0x4000
};
UInt64* gVMKernelPML4 = nullptr;
UInt64 gVMKernelPML4Physical = 0;
static OSSpinlock sVMVMMlock = {0};
static bool isInitialized = false;
extern UInt64 _kernel_start;
extern UInt64 _kernel_end;
extern UInt8* gVMPhycalMemoryBitmap;
extern UInt64 gVMPhycalMemoryBitmapSize;
extern HALGlobalDescriptorTable gHALGlobalDescriptorTable[];
extern HALInterruptsDescriptorTableEntry gHALInterruptsDescriptorTable[];
extern UInt8 gHALDoubleFaultStack[];
extern UInt8 stack_guard;
static UInt64* sVMGetVirtualTable(UInt64 phys) {
if (isInitialized) return (UInt64*)PHYS_TO_HHDM(phys);
return (UInt64*)phys;
}
static UInt64* sVMVirtualMemoryMapPageInternal(UInt64* pml4, UInt64 phys, UInt64 virt, UInt64 flags) {
UInt64 pt_idx = VMM_PT_INDEX(virt);
UInt64 pd_idx = VMM_PD_INDEX(virt);
UInt64 pdpt_idx = VMM_PDPT_INDEX(virt);
UInt64 pml4_idx = VMM_PML4_INDEX(virt);
UInt64* pml4_virt = pml4;
if (isInitialized) pml4_virt = (UInt64*)PHYS_TO_HHDM((UInt64)pml4);
UInt64 table_flags = PTE_PRESENT | PTE_RW | (flags & PTE_USER);
if (!(pml4_virt[pml4_idx] & PTE_PRESENT)) {
UInt64* addr = VMPhysicalMemoryAllocatePage();
if (!addr) return nullptr;
UInt64* addr_virt = sVMGetVirtualTable((UInt64)addr);
MemorySet(addr_virt, 0, kVMPageSize);
pml4_virt[pml4_idx] = (UInt64)addr | table_flags;
} else {
pml4_virt[pml4_idx] |= (flags & PTE_USER);
}
UInt64* pdpt = (UInt64*)PTE_GET_ADDR(pml4_virt[pml4_idx]);
UInt64* pdpt_virt = sVMGetVirtualTable((UInt64)pdpt);
if (!(pdpt_virt[pdpt_idx] & PTE_PRESENT)) {
UInt64* addr = VMPhysicalMemoryAllocatePage();
if (!addr) return nullptr;
UInt64* addr_virt = sVMGetVirtualTable((UInt64)addr);
MemorySet(addr_virt, 0, kVMPageSize);
pdpt_virt[pdpt_idx] = (UInt64)addr | table_flags;
} else {
pdpt_virt[pdpt_idx] |= (flags & PTE_USER);
}
UInt64* pd = (UInt64*)PTE_GET_ADDR(pdpt_virt[pdpt_idx]);
UInt64* pd_virt = sVMGetVirtualTable((UInt64)pd);
if (!(pd_virt[pd_idx] & PTE_PRESENT)) {
UInt64* addr = VMPhysicalMemoryAllocatePage();
if (!addr) return nullptr;
UInt64* addr_virt = sVMGetVirtualTable((UInt64)addr);
MemorySet(addr_virt, 0, kVMPageSize);
pd_virt[pd_idx] = (UInt64)addr | table_flags;
} else {
pd_virt[pd_idx] |= (flags & PTE_USER);
}
UInt64* pt = (UInt64*)PTE_GET_ADDR(pd_virt[pd_idx]);
UInt64* pt_virt = sVMGetVirtualTable((UInt64)pt);
pt_virt[pt_idx] = phys | flags;
__asm__ volatile("invlpg (%0)" :: "r" (virt) : "memory");
return (UInt64*)virt;
}
static void sVMVirtualMemoryUnmapPageInternal(UInt64* pml4, UInt64 virt) {
UInt64 pt_idx = VMM_PT_INDEX(virt);
UInt64 pd_idx = VMM_PD_INDEX(virt);
UInt64 pdpt_idx = VMM_PDPT_INDEX(virt);
UInt64 pml4_idx = VMM_PML4_INDEX(virt);
UInt64* pml4_virt = pml4;
if (isInitialized) pml4_virt = (UInt64*)PHYS_TO_HHDM((UInt64)pml4);
if (!(pml4_virt[pml4_idx] & PTE_PRESENT)) return;
UInt64* pdpt_virt = sVMGetVirtualTable(PTE_GET_ADDR(pml4_virt[pml4_idx]));
if (!(pdpt_virt[pdpt_idx] & PTE_PRESENT)) return;
UInt64* pd_virt = sVMGetVirtualTable(PTE_GET_ADDR(pdpt_virt[pdpt_idx]));
if (!(pd_virt[pd_idx] & PTE_PRESENT)) return;
UInt64* pt_virt = sVMGetVirtualTable(PTE_GET_ADDR(pd_virt[pd_idx]));
pt_virt[pt_idx] = 0;
__asm__ volatile("invlpg (%0)" :: "r" (virt) : "memory");
}
UInt64* VMVirtualMemoryMapPage(UInt64* pml4, UInt64 phys, UInt64 virt, UInt64 flags) {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMVMMlock, &state);
UInt64* result = sVMVirtualMemoryMapPageInternal(pml4, phys, virt, flags);
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return result;
}
void VMVirtualMemoryUnmapPage(UInt64* pml4, UInt64 virt) {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMVMMlock, &state);
sVMVirtualMemoryUnmapPageInternal(pml4, virt);
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
}
void VMLoadCR3(UInt64 pml4_addr) {
__asm__ volatile ("mov %0, %%cr3" :: "r"(pml4_addr) : "memory");
}
void VMVirtualMemoryInitialize(Bootinfo* info) {
gVMKernelPML4Physical = (UInt64)VMPhysicalMemoryAllocatePage();
gVMKernelPML4 = (UInt64*)gVMKernelPML4Physical;
MemorySet(gVMKernelPML4, 0, kVMPageSize);
UInt64 k_virt_start = (UInt64)&_kernel_start;
UInt64 k_virt_end = (UInt64)&_kernel_end;
UInt64 fb_start = (UInt64)info->framebuffer.base;
UInt64 fb_end = fb_start + info->framebuffer.baseSize;
UInt64 fb_size = fb_end - fb_start;
UInt64 max_mem = VMPhysicalMemoryGetTotalMemorySize();
for (UInt64 i = 0; i < max_mem; i += kVMPageSize) VMVirtualMemoryMapPage(gVMKernelPML4, i, PHYS_TO_HHDM(i), PTE_PRESENT | PTE_RW);
for (UInt64 i = k_virt_start; i < k_virt_end; i += kVMPageSize) VMVirtualMemoryMapPage(gVMKernelPML4, KERNEL_VIRT_TO_PHYS(i), i, PTE_PRESENT | PTE_RW);
for (UInt64 i = 0; i < fb_size; i += kVMPageSize) VMVirtualMemoryMapPage(gVMKernelPML4, fb_start + i, FB_VIRT_BASE + i, PTE_PRESENT | PTE_RW);
VMVirtualMemoryUnmapPage(gVMKernelPML4, (UInt64)&stack_guard);
gVMPhycalMemoryBitmap = (UInt8*)PHYS_TO_HHDM((UInt64)gVMPhycalMemoryBitmap);
info->framebuffer.base = (UInt32*)FB_VIRT_BASE;
VMLoadCR3(gVMKernelPML4Physical);
isInitialized = true;
}
UInt64 VMVirtualMemoryCreateAddressSpace() {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMVMMlock, &state);
UInt64 phys = (UInt64)VMPhysicalMemoryAllocatePage();
if (!phys) {
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return 0;
};
UInt64* virt = (UInt64*)PHYS_TO_HHDM(phys);
MemorySet(virt, 0, kVMPageSize);
UInt64* kernel_pml4_virt = sVMGetVirtualTable((UInt64)gVMKernelPML4);
for (UInt32 i = 256; i < 512; i++) {
virt[i] = kernel_pml4_virt[i];
}
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return phys;
}
UInt64 VMGetCurrentCR3() {
UInt64 cr3;
__asm__ volatile("mov %%cr3, %0" : "=r"(cr3));
return cr3;
}
void VMVirtualMemorySetupUserStack(UInt64* pml4_phys) {
UInt64 stack_bottom = kVMUserStackTop - kVMUserStackSize;
for (UInt64 addr = stack_bottom; addr < kVMUserStackTop; addr += 4096) {
void* phys = VMPhysicalMemoryAllocatePage();
if (!phys) OSPanic("OOM in user stack setup");
MemorySet((void*)PHYS_TO_HHDM((UInt64)phys), 0, 4096);
VMVirtualMemoryMapPage((UInt64*)pml4_phys, (UInt64)phys, addr, PTE_PRESENT | PTE_RW | PTE_USER);
}
}