fix: fix hot loader page reuse for overlapping init segments

This commit is contained in:
karina
2026-04-21 00:30:41 +04:00
parent 41578c29d6
commit b7a4a90e63
3 changed files with 56 additions and 4 deletions
+1
View File
@@ -37,6 +37,7 @@
void VMVirtualMemoryInitialize(Bootinfo* info); void VMVirtualMemoryInitialize(Bootinfo* info);
UInt64* VMVirtualMemoryMapPage(UInt64* PML4, UInt64 physical, UInt64 virtual, UInt64 flags); UInt64* VMVirtualMemoryMapPage(UInt64* PML4, UInt64 physical, UInt64 virtual, UInt64 flags);
void* VMVirtualMemoryGetOrAllocatePage(UInt64* PML4, UInt64 virtual, UInt64 flags);
UInt64 VMVirtualMemoryCreateAddressSpace(); UInt64 VMVirtualMemoryCreateAddressSpace();
UInt64 VMGetCurrentCR3(); UInt64 VMGetCurrentCR3();
void VMLoadCR3(UInt64 PML4Address); void VMLoadCR3(UInt64 PML4Address);
+4 -4
View File
@@ -26,10 +26,10 @@ UInt64 HOTLoad(OSProcess* process, UInt8* data) {
UInt64 end = (segment->vaddr + segment->memsz + 0xFFF) & ~(0xFFFULL); UInt64 end = (segment->vaddr + segment->memsz + 0xFFF) & ~(0xFFFULL);
for (UInt64 address = start; address < end; address += kVMPageSize) { for (UInt64 address = start; address < end; address += kVMPageSize) {
void* physicalPage = VMPhysicalMemoryAllocatePage(); void* kernelVirtAddress = VMVirtualMemoryGetOrAllocatePage((UInt64*)process->physicalPML4, address, PTE_USER | PTE_RW | PTE_PRESENT);
VMVirtualMemoryMapPage((UInt64*)process->physicalPML4, (UInt64)physicalPage, address, PTE_USER | PTE_RW | PTE_PRESENT); if (!kernelVirtAddress) {
void* kernelVirtAddress = (void*)((UInt64)physicalPage + HHDM_OFFSET); return 0;
MemorySet(kernelVirtAddress, 0, kVMPageSize); }
UInt64 pageOverleapStart = (address > segment->vaddr) ? address : segment->vaddr; UInt64 pageOverleapStart = (address > segment->vaddr) ? address : segment->vaddr;
UInt64 pageOverleapEnd = (address + kVMPageSize < segment->vaddr + segment->filesz) UInt64 pageOverleapEnd = (address + kVMPageSize < segment->vaddr + segment->filesz)
? (address + kVMPageSize) ? (address + kVMPageSize)
+51
View File
@@ -94,6 +94,28 @@ static UInt64* sVMVirtualMemoryMapPageInternal(UInt64* pml4, UInt64 phys, UInt64
return (UInt64*)virt; return (UInt64*)virt;
} }
static void* sVMVirtualMemoryGetMappedPageInternal(UInt64* pml4, UInt64 virt) {
UInt64 pt_idx = VMM_PT_INDEX(virt);
UInt64 pd_idx = VMM_PD_INDEX(virt);
UInt64 pdpt_idx = VMM_PDPT_INDEX(virt);
UInt64 pml4_idx = VMM_PML4_INDEX(virt);
UInt64* pml4_virt = pml4;
if (isInitialized) pml4_virt = (UInt64*)PHYS_TO_HHDM((UInt64)pml4);
if (!(pml4_virt[pml4_idx] & PTE_PRESENT)) return nullptr;
UInt64* pdpt_virt = sVMGetVirtualTable(PTE_GET_ADDR(pml4_virt[pml4_idx]));
if (!(pdpt_virt[pdpt_idx] & PTE_PRESENT)) return nullptr;
UInt64* pd_virt = sVMGetVirtualTable(PTE_GET_ADDR(pdpt_virt[pdpt_idx]));
if (!(pd_virt[pd_idx] & PTE_PRESENT)) return nullptr;
UInt64* pt_virt = sVMGetVirtualTable(PTE_GET_ADDR(pd_virt[pd_idx]));
if (!(pt_virt[pt_idx] & PTE_PRESENT)) return nullptr;
return (void*)PHYS_TO_HHDM(PTE_GET_ADDR(pt_virt[pt_idx]));
}
static void sVMVirtualMemoryUnmapPageInternal(UInt64* pml4, UInt64 virt) { static void sVMVirtualMemoryUnmapPageInternal(UInt64* pml4, UInt64 virt) {
UInt64 pt_idx = VMM_PT_INDEX(virt); UInt64 pt_idx = VMM_PT_INDEX(virt);
UInt64 pd_idx = VMM_PD_INDEX(virt); UInt64 pd_idx = VMM_PD_INDEX(virt);
@@ -128,6 +150,35 @@ UInt64* VMVirtualMemoryMapPage(UInt64* pml4, UInt64 phys, UInt64 virt, UInt64 fl
return result; return result;
} }
void* VMVirtualMemoryGetOrAllocatePage(UInt64* pml4, UInt64 virt, UInt64 flags) {
OSSpinlockState state;
OSSpinlockLockIRQ(&sVMVMMlock, &state);
void* mappedPage = sVMVirtualMemoryGetMappedPageInternal(pml4, virt);
if (mappedPage) {
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return mappedPage;
}
void* physicalPage = VMPhysicalMemoryAllocatePage();
if (!physicalPage) {
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return nullptr;
}
UInt64* result = sVMVirtualMemoryMapPageInternal(pml4, (UInt64)physicalPage, virt, flags);
if (!result) {
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return nullptr;
}
void* kernelVirtualAddress = (void*)PHYS_TO_HHDM((UInt64)physicalPage);
MemorySet(kernelVirtualAddress, 0, kVMPageSize);
OSSpinlockUnlockIRQ(&sVMVMMlock, &state);
return kernelVirtualAddress;
}
void VMVirtualMemoryUnmapPage(UInt64* pml4, UInt64 virt) { void VMVirtualMemoryUnmapPage(UInt64* pml4, UInt64 virt) {
OSSpinlockState state; OSSpinlockState state;
OSSpinlockLockIRQ(&sVMVMMlock, &state); OSSpinlockLockIRQ(&sVMVMMlock, &state);