diff --git a/System/inc/VM/VMM.h b/System/inc/VM/VMM.h index 74cb5e7..6d35bcb 100644 --- a/System/inc/VM/VMM.h +++ b/System/inc/VM/VMM.h @@ -37,6 +37,7 @@ void VMVirtualMemoryInitialize(Bootinfo* info); UInt64* VMVirtualMemoryMapPage(UInt64* PML4, UInt64 physical, UInt64 virtual, UInt64 flags); +void* VMVirtualMemoryGetOrAllocatePage(UInt64* PML4, UInt64 virtual, UInt64 flags); UInt64 VMVirtualMemoryCreateAddressSpace(); UInt64 VMGetCurrentCR3(); void VMLoadCR3(UInt64 PML4Address); diff --git a/System/src/OS/Exec/HOTLoader.c b/System/src/OS/Exec/HOTLoader.c index 5d8906f..cf0c7a3 100644 --- a/System/src/OS/Exec/HOTLoader.c +++ b/System/src/OS/Exec/HOTLoader.c @@ -26,10 +26,10 @@ UInt64 HOTLoad(OSProcess* process, UInt8* data) { UInt64 end = (segment->vaddr + segment->memsz + 0xFFF) & ~(0xFFFULL); for (UInt64 address = start; address < end; address += kVMPageSize) { - void* physicalPage = VMPhysicalMemoryAllocatePage(); - VMVirtualMemoryMapPage((UInt64*)process->physicalPML4, (UInt64)physicalPage, address, PTE_USER | PTE_RW | PTE_PRESENT); - void* kernelVirtAddress = (void*)((UInt64)physicalPage + HHDM_OFFSET); - MemorySet(kernelVirtAddress, 0, kVMPageSize); + void* kernelVirtAddress = VMVirtualMemoryGetOrAllocatePage((UInt64*)process->physicalPML4, address, PTE_USER | PTE_RW | PTE_PRESENT); + if (!kernelVirtAddress) { + return 0; + } UInt64 pageOverleapStart = (address > segment->vaddr) ? address : segment->vaddr; UInt64 pageOverleapEnd = (address + kVMPageSize < segment->vaddr + segment->filesz) ? (address + kVMPageSize) diff --git a/System/src/VM/VMM.c b/System/src/VM/VMM.c index 386fe5d..c9dfb6d 100644 --- a/System/src/VM/VMM.c +++ b/System/src/VM/VMM.c @@ -94,6 +94,28 @@ static UInt64* sVMVirtualMemoryMapPageInternal(UInt64* pml4, UInt64 phys, UInt64 return (UInt64*)virt; } +static void* sVMVirtualMemoryGetMappedPageInternal(UInt64* pml4, UInt64 virt) { + UInt64 pt_idx = VMM_PT_INDEX(virt); + UInt64 pd_idx = VMM_PD_INDEX(virt); + UInt64 pdpt_idx = VMM_PDPT_INDEX(virt); + UInt64 pml4_idx = VMM_PML4_INDEX(virt); + + UInt64* pml4_virt = pml4; + if (isInitialized) pml4_virt = (UInt64*)PHYS_TO_HHDM((UInt64)pml4); + + if (!(pml4_virt[pml4_idx] & PTE_PRESENT)) return nullptr; + UInt64* pdpt_virt = sVMGetVirtualTable(PTE_GET_ADDR(pml4_virt[pml4_idx])); + + if (!(pdpt_virt[pdpt_idx] & PTE_PRESENT)) return nullptr; + UInt64* pd_virt = sVMGetVirtualTable(PTE_GET_ADDR(pdpt_virt[pdpt_idx])); + + if (!(pd_virt[pd_idx] & PTE_PRESENT)) return nullptr; + UInt64* pt_virt = sVMGetVirtualTable(PTE_GET_ADDR(pd_virt[pd_idx])); + + if (!(pt_virt[pt_idx] & PTE_PRESENT)) return nullptr; + return (void*)PHYS_TO_HHDM(PTE_GET_ADDR(pt_virt[pt_idx])); +} + static void sVMVirtualMemoryUnmapPageInternal(UInt64* pml4, UInt64 virt) { UInt64 pt_idx = VMM_PT_INDEX(virt); UInt64 pd_idx = VMM_PD_INDEX(virt); @@ -128,6 +150,35 @@ UInt64* VMVirtualMemoryMapPage(UInt64* pml4, UInt64 phys, UInt64 virt, UInt64 fl return result; } +void* VMVirtualMemoryGetOrAllocatePage(UInt64* pml4, UInt64 virt, UInt64 flags) { + OSSpinlockState state; + OSSpinlockLockIRQ(&sVMVMMlock, &state); + + void* mappedPage = sVMVirtualMemoryGetMappedPageInternal(pml4, virt); + if (mappedPage) { + OSSpinlockUnlockIRQ(&sVMVMMlock, &state); + return mappedPage; + } + + void* physicalPage = VMPhysicalMemoryAllocatePage(); + if (!physicalPage) { + OSSpinlockUnlockIRQ(&sVMVMMlock, &state); + return nullptr; + } + + UInt64* result = sVMVirtualMemoryMapPageInternal(pml4, (UInt64)physicalPage, virt, flags); + if (!result) { + OSSpinlockUnlockIRQ(&sVMVMMlock, &state); + return nullptr; + } + + void* kernelVirtualAddress = (void*)PHYS_TO_HHDM((UInt64)physicalPage); + MemorySet(kernelVirtualAddress, 0, kVMPageSize); + + OSSpinlockUnlockIRQ(&sVMVMMlock, &state); + return kernelVirtualAddress; +} + void VMVirtualMemoryUnmapPage(UInt64* pml4, UInt64 virt) { OSSpinlockState state; OSSpinlockLockIRQ(&sVMVMMlock, &state);