Merge pull request #15 from 0xKSor/feat/vmm

feat(arm64): higher-half kernel, VMM, and working MMU bring-up
This commit is contained in:
hwachakarter
2026-04-27 21:12:09 +09:00
committed by GitHub
16 changed files with 625 additions and 51 deletions
+16 -8
View File
@@ -129,20 +129,18 @@ static efi_status_t parse_elf_headers(efi_physical_address_t kernel_addr) {
return EFI_SUCCESS; return EFI_SUCCESS;
} }
static efi_status_t load_elf_segments(efi_physical_address_t kernel_addr, efi_file_handle_t* kernel_file) { static efi_status_t load_elf_segments(efi_physical_address_t kernel_addr, efi_file_handle_t* kernel_file, efi_physical_address_t* out_phys_entry) {
Elf64_Ehdr* elf_header = (Elf64_Ehdr*)kernel_addr; Elf64_Ehdr* elf_header = (Elf64_Ehdr*)kernel_addr;
*out_phys_entry = 0;
for (int i = 0; i < elf_header->e_phnum; i++) { for (int i = 0; i < elf_header->e_phnum; i++) {
Elf64_Phdr* phdr = (Elf64_Phdr*)(kernel_addr + elf_header->e_phoff + i * elf_header->e_phentsize); Elf64_Phdr* phdr = (Elf64_Phdr*)(kernel_addr + elf_header->e_phoff + i * elf_header->e_phentsize);
if (phdr->p_type != PT_LOAD) continue; if (phdr->p_type != PT_LOAD) continue;
if (phdr->p_vaddr < 0x40000000) {
print(WSTR("Skipping low/weird segment\r\n"));
continue;
}
uintn_t pages = (phdr->p_memsz + 0xFFF) / 0x1000; uintn_t pages = (phdr->p_memsz + 0xFFF) / 0x1000;
efi_physical_address_t segment_addr = phdr->p_vaddr;
efi_physical_address_t segment_addr = phdr->p_paddr;
efi_status_t status = gBS->AllocatePages(AllocateAddress, EfiLoaderData, pages, &segment_addr); efi_status_t status = gBS->AllocatePages(AllocateAddress, EfiLoaderData, pages, &segment_addr);
if (EFI_ERROR(status)) { if (EFI_ERROR(status)) {
@@ -159,6 +157,15 @@ static efi_status_t load_elf_segments(efi_physical_address_t kernel_addr, efi_fi
if (EFI_ERROR(status) || size_to_read != phdr->p_filesz) { if (EFI_ERROR(status) || size_to_read != phdr->p_filesz) {
return fail(WSTR("File read error\r\n")); return fail(WSTR("File read error\r\n"));
} }
if (elf_header->e_entry >= phdr->p_vaddr && elf_header->e_entry < phdr->p_vaddr + phdr->p_memsz) {
uint64_t entry_offset = elf_header->e_entry - phdr->p_vaddr;
*out_phys_entry = segment_addr + entry_offset;
}
}
if (*out_phys_entry == 0) {
return fail(WSTR("Entry point not found in PT_LOAD segments\r\n"));
} }
return EFI_SUCCESS; return EFI_SUCCESS;
@@ -235,7 +242,8 @@ efi_status_t bootloader_main(void) {
status = parse_elf_headers(kernel_addr); status = parse_elf_headers(kernel_addr);
if (EFI_ERROR(status)) return status; if (EFI_ERROR(status)) return status;
status = load_elf_segments(kernel_addr, kernel_file); efi_physical_address_t phys_entry = 0;
status = load_elf_segments(kernel_addr, kernel_file, &phys_entry);
if (EFI_ERROR(status)) return status; if (EFI_ERROR(status)) return status;
Bootinfo* boot_info = NULL; Bootinfo* boot_info = NULL;
@@ -261,7 +269,7 @@ efi_status_t bootloader_main(void) {
} }
typedef void (*kernel_entry_t)(Bootinfo*); typedef void (*kernel_entry_t)(Bootinfo*);
kernel_entry_t kernel_main = (kernel_entry_t)elf_header->e_entry; kernel_entry_t kernel_main = (kernel_entry_t)phys_entry;
kernel_main(boot_info); kernel_main(boot_info);
return EFI_SUCCESS; return EFI_SUCCESS;
+51
View File
@@ -21,4 +21,55 @@ static inline UInt64 CPUGetFAR() {
UInt64 far; UInt64 far;
__asm__ volatile ("mrs %0, far_el1" : "=r" (far)); __asm__ volatile ("mrs %0, far_el1" : "=r" (far));
return far; return far;
}
static inline void CPUInvalidateTLB(Address virt) {
__asm__ volatile(
"dsb ishst\n"
"tlbi vaae1is, %0\n"
"dsb ish\n"
"isb\n"
:: "r" (virt >> 12) : "memory"
);
}
static inline void CPUEnableMMU(Address l0PhysicalAddress) {
// MAIR_EL1 (Memory Attribute Indirection Register)
// kPTENormalMem is index 0 and kPTEDeviceMem is index 1
// 0xFF = Normal, 0x00 = Device
UInt64 mair = (0xFFULL << 0) | (0x00ULL << 8);
// TCR_EL1 (Translation Control Register)
// configures the mmu for 4kb pages and 48bit virtual addresses
// t0sz/t1sz = 16 (64-48 = 16)
// tg0/tg1 = 4kb granule
UInt64 tcr = (16ULL << 0) | // T0SZ (userspace size)
(16ULL << 16) | // T1SZ (kernelspace size)
(0ULL << 14) | // TG0 (User 4KB)
(2ULL << 30) | // TG1 (Kernel 4KB)
(3ULL << 28) | // SH1 (Inner Shareable)
(3ULL << 12) | // SH0 (Inner Shareable)
(5ULL << 32); // IPS
__asm__ volatile (
"msr mair_el1, %0\n"
"msr tcr_el1, %1\n"
"msr ttbr0_el1, %2\n" // set userspace root
"msr ttbr1_el1, %2\n" // set kernelspace root
"tlbi vmalle1is\n"
"isb\n" // Instruction Synchronization Barrier
:: "r"(mair), "r"(tcr), "r"(l0PhysicalAddress) : "memory"
);
// turn on the MMU in SCTLR_EL1 (System Control Register)
// Bit 0 = M (MMU Enable), Bit 2 = C (Data Cache Enable), Bit 12 = I (Instruction Cache Enable)
UInt64 sctlr;
UInt64 sctlr_flags = 0x1005; // set bits 0 (M), 2 (C), and 12 (I)
__asm__ volatile (
"mrs %0, sctlr_el1\n"
"orr %0, %0, %1\n"
"msr sctlr_el1, %0\n"
"isb\n"
: "=r"(sctlr) : "r"(sctlr_flags) : "memory"
);
} }
+2 -2
View File
@@ -2,8 +2,8 @@
#include <Types.h> #include <Types.h>
#include <Lib/VAArgs.h> #include <Lib/VAArgs.h>
void* StringSet(BytePointer destination, ASCII value, Size count); Pointer MemorySet(Pointer destination, ASCII value, Size count);
void* MemoryCopy(void* destination, const void* source, Size count); Pointer MemoryCopy(Pointer destination, const Pointer source, Size count);
Int32 StringCompare(const ASCII* firstString, const ASCII* secondString); Int32 StringCompare(const ASCII* firstString, const ASCII* secondString);
Int32 StringCompareWithLimit(const ASCII* firstString, const ASCII* secondString, Size limit); Int32 StringCompareWithLimit(const ASCII* firstString, const ASCII* secondString, Size limit);
+24
View File
@@ -0,0 +1,24 @@
#pragma once
#include <Types.h>
enum {
kHeapSizePages = 1024,
kHeapBlockHeaderMagic = 0x43555445, // CUTE
kKernelHeapStart = 0xFFFFFFFFC0000000
};
static inline Address VMPhysToHeap(Address phys) { return phys + kKernelHeapStart; }
static inline Address VMHeapToPhys(Address heap) { return heap - kKernelHeapStart; }
typedef struct __attribute__((aligned(16))) VMHeapBlockHeader {
UInt64 magic;
struct VMHeapBlockHeader* next;
struct VMHeapBlockHeader* previous;
UInt64 size;
bool isFree;
} VMHeapBlockHeader;
void HeapInitialize();
Pointer HeapAllocate(Size size);
void HeapFree(Pointer pointer);
Pointer HeapResize(Pointer pointer, Size newSize);
+3
View File
@@ -1,5 +1,6 @@
#pragma once #pragma once
#include <Types.h> #include <Types.h>
#include "../Common/bootinfo.h"
enum { enum {
kVMPageSize = 4096, kVMPageSize = 4096,
@@ -12,10 +13,12 @@ typedef struct {
Size size; Size size;
} VMMemoryRegion; } VMMemoryRegion;
typedef struct { typedef struct {
VMMemoryRegion totalRAM; VMMemoryRegion totalRAM;
VMMemoryRegion reserved[kVMMaxReservedRegions]; VMMemoryRegion reserved[kVMMaxReservedRegions];
UInt32 reservedCount; UInt32 reservedCount;
VMMemoryRegion UART;
} VMBootMemoryMap; } VMBootMemoryMap;
void PMMInitialize(VMBootMemoryMap* bootMap); void PMMInitialize(VMBootMemoryMap* bootMap);
+49
View File
@@ -0,0 +1,49 @@
#pragma once
#include <Types.h>
#include <VM/PMM.h>
#include "../Common/bootinfo.h"
enum VMPTEFlags {
kPTEValid = (1ULL << 0), // 1 = Present (Will page fault if 0)
kPTETable = (1ULL << 1), // 1 = Valid for L0/L1/L2 Directory
kPTEPage = (1ULL << 1), // 1 = Valid for L3 Page (Same bit)
kPTENormalMem = (0ULL << 2), // Cached, Normal RAM
kPTEDeviceMem = (1ULL << 2), // Uncached, MMIO Device
kPTEAccessRW = (0ULL << 6), // Read/Write
kPTEAccessRO = (1ULL << 6), // Read-Only
kPTEUser = (1ULL << 7), // 1 = EL0, 0 = EL1
kPTEInnerShare = (3ULL << 8), // Inner Shareable (SMP safe)
kPTEAccessFlag = (1ULL << 10), // CPU access tracking (MUST be 1 to avoid faults)
kPTEPrivNX = (1ULL << 53), // PXN: Privileged Execute Never
kPTEUserNX = (1ULL << 54) // UXN: Unprivileged Execute Never
};
enum {
kVMKernelVMA = 0xFFFFFFFF80000000,
kHHDMOffset = 0xFFFF888000000000,
kVMFbVirtBase = 0xFFFFFFFFFC000000,
};
static inline Address VMKernelVirtToPhys(Address virt) {
return virt - 0xFFFFFFFF80100000 + 0x40100000; // TODO: hardcode is awful
}
static inline Address VMPhysToHHDM(Address phys) {
return phys + kHHDMOffset;
}
static inline Address VMHHDMToPhys(Address virt) {
return virt - kHHDMOffset;
}
extern Address* gVMKernelL0Table;
extern Address gVMKernelL0Physical;
Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags);
void VMMUnmapPage(Address* l0Table, Address virt);
Pointer VMMGetOrAllocatePage(Address* l0Table, Address virt, UInt64 flags);
void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info);
+5 -1
View File
@@ -1,4 +1,3 @@
#include "Types.h"
#include <Arch/DTB.h> #include <Arch/DTB.h>
#include <OS/Panic.h> #include <OS/Panic.h>
#include <OS/Log.h> #include <OS/Log.h>
@@ -73,6 +72,11 @@ void DTBParse(Pointer dtb, VMBootMemoryMap* bootMap) {
bootMap->reserved[index].size = size; bootMap->reserved[index].size = size;
bootMap->reservedCount++; bootMap->reservedCount++;
} }
else if (StringStartsWith(currentNode, "pl011")) {
UInt32* cells = (UInt32*)structs;
bootMap->UART.base = Merge32To64(BytesSwap32(cells[1]), BytesSwap32(cells[0]));
bootMap->UART.size = Merge32To64(BytesSwap32(cells[3]), BytesSwap32(cells[2]));
}
} }
structs += propertyLength; structs += propertyLength;
+116 -5
View File
@@ -1,10 +1,121 @@
.section .text.boot, "ax" .section .text.boot, "ax"
.global _start .global _start
_start: _start:
sub sp, sp, #16 // disable interrupts
str x0, [sp] msr daifset, #3
// save phys addr of Bootinfo* in x0 to x20
mov x20, x0
// get phys addr of tables
adrp x0, early_ttbr0_l0
adrp x1, early_ttbr1_l0
// memzero that tables (4 tables = 16 KB)
mov x2, #16384
mov x3, x0
1: str xzr, [x3], #8
subs x2, x2, #8
b.ne 1b
// set up ttbr0 (identity map of 512 gb)
adrp x0, early_ttbr0_l0
adrp x1, early_ttbr1_l0
// early_ttbr0_l0[0] -> early_ttbr0_l1 (Valid + Table = 0x3)
ldr x2, =0x3
orr x3, x1, x2
str x3, [x0, #0]
// fill l1 table with 512 entrie
// flags 0x701 = valid + block + accessflag + innershareable + normalram
mov x2, xzr
mov x3, #512
ldr x4, =0x701
mov x6, #(1 << 30)
2: orr x5, x2, x4
str x5, [x1], #8 // early_ttbr0_l1[i] = base | flags
add x2, x2, x6
subs x3, x3, #1
b.ne 2b
// set up ttbr1 (HH: 0xFFFFFFFF80000000)
adrp x0, early_ttbr1_l0
adrp x1, early_ttbr1_l1
// early_ttbr1_l0[511] -> early_ttbr1_l1 (Valid + Table = 0x3)
ldr x2, =0x3
orr x3, x1, x2
mov x4, #(511 * 8)
str x3, [x0, x4]
// determine where is kernel rn
adr x2, _start // curr pc in absoule addr
lsr x2, x2, #30 // leave only number of gig
lsl x2, x2, #30 // return it in absoule addr
// map that at 510 (0xFFFFFFFF80000000)
ldr x3, =0x701 // flags
orr x2, x2, x3
mov x4, #(510 * 8)
str x2, [x1, x4] // early_ttbr1_l1[510] = base | flags
// enable MMU (MAIR, TCR, SCTLR)
// see Kernel/Include/Arch/CPU.h for explanaition
ldr x2, =((0xFF << 0) | (0x00 << 8))
msr mair_el1, x2
ldr x2, =((16 << 0) | (16 << 16) | (0 << 14) | (2 << 30) | (3 << 28) | (3 << 12) | (5 << 32))
msr tcr_el1, x2
adrp x0, early_ttbr0_l0
adrp x1, early_ttbr1_l0
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb ish
isb
mrs x2, sctlr_el1
ldr x3, =0x1005
orr x2, x2, x3
msr sctlr_el1, x2
isb
ldr x2, =higher_half_jump
br x2
higher_half_jump:
ldr x3, =_boot_stack_top
mov sp, x3
// clean .bss
ldr x1, =__bss_start
ldr x2, =__bss_end
cbz x1, 4f
cmp x1, x2
b.eq 4f
3: str xzr, [x1], #8
cmp x1, x2
b.lt 3b
4:
bl ExceptionsVectorsInit bl ExceptionsVectorsInit
ldr x0, [sp]
add sp, sp, #16 mov x0, x20 // return phys of Bootinfo* in x20
bl KernelMain bl KernelMain
b .
halt:
wfi
b halt
.section .data
.align 12
early_ttbr0_l0: .fill 4096, 1, 0
early_ttbr0_l1: .fill 4096, 1, 0
early_ttbr1_l0: .fill 4096, 1, 0
early_ttbr1_l1: .fill 4096, 1, 0
.section .bss
.align 16
.skip 16384
_boot_stack_top:
+7 -1
View File
@@ -1,6 +1,8 @@
#include "../Common/bootinfo.h" #include "../Common/bootinfo.h"
#include <VM/PMM.h>
#include <Arch/DTB.h> #include <Arch/DTB.h>
#include <VM/PMM.h>
#include <VM/VMM.h>
#include <VM/Heap.h>
#include <OS/Log.h> #include <OS/Log.h>
#include <OS/Panic.h> #include <OS/Panic.h>
@@ -14,4 +16,8 @@ void KernelMain(Bootinfo* bootinfo) {
bootMap.reservedCount = 0; bootMap.reservedCount = 0;
DTBParse(bootinfo->dtb, &bootMap); DTBParse(bootinfo->dtb, &bootMap);
PMMInitialize(&bootMap); PMMInitialize(&bootMap);
VMMInitialize(&bootMap, bootinfo);
HeapInitialize();
OSLog("Kernel initialized.\n");
} }
+5 -5
View File
@@ -8,15 +8,15 @@ static void BufferAdd(ASCII* buffer, Size bufferSize, Size* written, ASCII chara
(*written)++; (*written)++;
} }
void* StringSet(BytePointer destination, ASCII value, Size count) { Pointer MemorySet(Pointer destination, ASCII value, Size count) {
BytePointer savedDestination = destination; BytePointer savedDestination = (BytePointer) destination;
while (count--) { while (count--) {
*destination++ = (UInt8) value; *savedDestination++ = (UInt8) value;
} }
return savedDestination; return destination;
} }
void* MemoryCopy(void* destination, const void* source, Size count) { Pointer MemoryCopy(Pointer destination, const Pointer source, Size count) {
BytePointer destinationBuffer = (BytePointer) destination; BytePointer destinationBuffer = (BytePointer) destination;
const UInt8* sourceBuffer = (const UInt8*) source; const UInt8* sourceBuffer = (const UInt8*) source;
+1 -1
View File
@@ -2,5 +2,5 @@
#include <Lib/String.h> #include <Lib/String.h>
void* memset(void* destination, int value, Size count) { void* memset(void* destination, int value, Size count) {
return StringSet(destination, value, count); return MemorySet(destination, value, count);
} }
+1 -1
View File
@@ -21,7 +21,7 @@ static const ASCII* GetExceptionClassString(UInt32 class) {
} }
__attribute__((noreturn)) static void Halt() { __attribute__((noreturn)) static void Halt() {
while (1) { loop {
CPUDisableInterrupts(); CPUDisableInterrupts();
CPUWaitForInterrupt(); CPUWaitForInterrupt();
} }
+101
View File
@@ -0,0 +1,101 @@
#include <VM/Heap.h>
#include <VM/PMM.h>
#include <VM/VMM.h>
#include <Lib/String.h>
#include <OS/Panic.h>
static VMHeapBlockHeader* sVMHeapListHead = nullptr;
static void CombineForward(VMHeapBlockHeader* current) {
if (!current->next || !current->next->isFree) return;
current->size += sizeof(VMHeapBlockHeader) + current->next->size;
current->next = current->next->next;
if (current->next) current->next->previous = current; // what the fuck
}
void HeapInitialize() {
Address heapStart = kKernelHeapStart;
for (UInt64 i = 0; i < kHeapSizePages; i++) {
Address physical = (Address)PMMAllocatePage();
if (!physical) OSPanic("OOM during heap init");
Address virtual = heapStart + (i * kVMPageSize);
VMMMapPage(gVMKernelL0Table, physical, virtual, kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX);
}
sVMHeapListHead = (VMHeapBlockHeader*)heapStart;
sVMHeapListHead->magic = kHeapBlockHeaderMagic;
sVMHeapListHead->size = (kHeapSizePages * kVMPageSize) - sizeof(VMHeapBlockHeader);
sVMHeapListHead->isFree = true;
sVMHeapListHead->next = nullptr;
sVMHeapListHead->previous = nullptr;
}
Pointer HeapAllocate(Size size) {
if (size == 0) return nullptr;
Size alignedSize = (size + 15) & ~15;
VMHeapBlockHeader* current = sVMHeapListHead;
while (current) {
if (current->isFree && current->size >= alignedSize) {
if (current->size > alignedSize + sizeof(VMHeapBlockHeader) + 16) {
VMHeapBlockHeader* new_block = (VMHeapBlockHeader*)((Address)current + sizeof(VMHeapBlockHeader) + alignedSize);
new_block->size = current->size - alignedSize - sizeof(VMHeapBlockHeader);
new_block->isFree = true;
new_block->next = current->next;
new_block->previous = current;
new_block->magic = kHeapBlockHeaderMagic;
if (current->next) current->next->previous = new_block;
current->next = new_block;
current->size = alignedSize;
}
current->isFree = false;
return (Pointer)((Address)current + sizeof(VMHeapBlockHeader));
}
current = current->next;
}
return nullptr;
}
void HeapFree(Pointer pointer) {
if (!pointer) return;
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((Address)pointer - sizeof(VMHeapBlockHeader));
if (current->magic != kHeapBlockHeaderMagic) return;
current->isFree = true;
if (current->next && current->next->isFree) CombineForward(current);
if (current->previous && current->previous->isFree) CombineForward(current->previous);
}
Pointer HeapResize(Pointer pointer, Size newSize) {
if (!pointer) return HeapAllocate(newSize);
if (newSize == 0) {
HeapFree(pointer);
return nullptr;
}
Size alignedSize = (newSize + 15) & ~15;
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((Address)pointer - sizeof(VMHeapBlockHeader));
if (current->size >= alignedSize) {
return pointer;
}
if (current->next && current->next->isFree &&
(current->size + sizeof(VMHeapBlockHeader) + current->next->size) >= alignedSize) {
CombineForward(current);
return pointer;
}
Pointer newPointer = HeapAllocate(newSize);
if (newPointer) {
MemoryCopy(newPointer, pointer, current->size);
HeapFree(pointer);
}
return newPointer;
}
+21 -22
View File
@@ -4,16 +4,17 @@
extern char _kernelStart[]; extern char _kernelStart[];
extern char _kernelEnd[]; extern char _kernelEnd[];
static Address sPMMRamBase = 0;
static MemoryPointer sPMMBitmap;
static Size sPMMBitmapSize;
static Size sPMMTotalPages;
static inline Size BitmapGetByteIndex(Address address) { static inline Size BitmapGetByteIndex(Address address) {
return (address / kVMPageSize) / kVMBlocksPerByte; return ((address - sPMMRamBase) / kVMPageSize) / kVMBlocksPerByte;
} }
static inline UInt8 BitmapGetBitOffset(Address address) { static inline UInt8 BitmapGetBitOffset(Address address) {
return (UInt8)((address / kVMPageSize) % kVMBlocksPerByte); return (UInt8)(((address - sPMMRamBase) / kVMPageSize) % kVMBlocksPerByte);
}
static inline Boolean BitmapTest(const MemoryPointer bitmap, Address address) {
return (bitmap[BitmapGetByteIndex(address)] & (1U << BitmapGetBitOffset(address))) != 0;
} }
static inline void BitmapSet(MemoryPointer bitmap, Address address) { static inline void BitmapSet(MemoryPointer bitmap, Address address) {
@@ -24,27 +25,23 @@ static inline void BitmapUnset(MemoryPointer bitmap, Address address) {
bitmap[BitmapGetByteIndex(address)] &= ~(1U << BitmapGetBitOffset(address)); bitmap[BitmapGetByteIndex(address)] &= ~(1U << BitmapGetBitOffset(address));
} }
static MemoryPointer sPMMBitmap;
static Size sPMMBitmapSize;
static Size sPMMTotalPages;
void PMMInitialize(VMBootMemoryMap* bootMap) { void PMMInitialize(VMBootMemoryMap* bootMap) {
UInt32 vIndex = bootMap->reservedCount; sPMMRamBase = bootMap->totalRAM.base;
bootMap->reserved[vIndex].base = 0x0; sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
bootMap->reserved[vIndex].size = bootMap->totalRAM.base; sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
sPMMBitmap = (MemoryPointer)_kernelEnd;
MemorySet(sPMMBitmap, 0, sPMMBitmapSize);
UInt32 safeIndex = bootMap->reservedCount;
bootMap->reserved[safeIndex].base = sPMMRamBase;
bootMap->reserved[safeIndex].size = 16 * 1024 * 1024; // 16 Mb
bootMap->reservedCount++; bootMap->reservedCount++;
UInt32 kIndex = bootMap->reservedCount; UInt32 kIndex = bootMap->reservedCount;
bootMap->reserved[kIndex].base = (Address)_kernelStart; bootMap->reserved[kIndex].base = (Address)_kernelStart;
bootMap->reserved[kIndex].size = (Address)_kernelEnd - (Address)_kernelStart; bootMap->reserved[kIndex].size = (Address)_kernelEnd - (Address)_kernelStart;
bootMap->reservedCount++; bootMap->reservedCount++;
sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
sPMMBitmap = (MemoryPointer)_kernelEnd;
StringSet(sPMMBitmap, 0, sPMMBitmapSize);
UInt32 bIndex = bootMap->reservedCount; UInt32 bIndex = bootMap->reservedCount;
bootMap->reserved[bIndex].base = (Address)sPMMBitmap; bootMap->reserved[bIndex].base = (Address)sPMMBitmap;
bootMap->reserved[bIndex].size = sPMMBitmapSize; bootMap->reserved[bIndex].size = sPMMBitmapSize;
@@ -58,7 +55,9 @@ void PMMInitialize(VMBootMemoryMap* bootMap) {
for (Size p = 0; p < pagesToReserve; p++) { for (Size p = 0; p < pagesToReserve; p++) {
Address pageAdress = regionBase + (p * kVMPageSize); Address pageAdress = regionBase + (p * kVMPageSize);
BitmapSet(sPMMBitmap, pageAdress); if (pageAdress >= sPMMRamBase && pageAdress < (sPMMRamBase + bootMap->totalRAM.size)) {
BitmapSet(sPMMBitmap, pageAdress);
}
} }
} }
} }
@@ -68,7 +67,7 @@ Pointer PMMAllocatePage() {
if (sPMMBitmap[i] == 0xFF) continue; if (sPMMBitmap[i] == 0xFF) continue;
for (Size bit = 0; bit < kVMBlocksPerByte; bit++) { for (Size bit = 0; bit < kVMBlocksPerByte; bit++) {
if ((sPMMBitmap[i] & (1 << bit)) == 0) { if ((sPMMBitmap[i] & (1 << bit)) == 0) {
Address address = (i * kVMBlocksPerByte + bit) * kVMPageSize; Address address = sPMMRamBase + (i * kVMBlocksPerByte + bit) * kVMPageSize;
BitmapSet(sPMMBitmap, address); BitmapSet(sPMMBitmap, address);
return (Pointer)address; return (Pointer)address;
} }
+204
View File
@@ -0,0 +1,204 @@
#include <VM/VMM.h>
#include <VM/PMM.h>
#include <Lib/String.h>
#include <Arch/CPU.h>
#include <OS/Panic.h>
#include <OS/Log.h>
#include "../Common/bootinfo.h"
static const UInt64 kPTEAddressMask = 0x0000FFFFFFFFF000ULL;
static inline Address GetPTEAddress(UInt64 entry) { return entry & kPTEAddressMask; }
static inline UInt16 GetL0Index(Address virt) { return (virt >> 39) & 0x1FF; }
static inline UInt16 GetL1Index(Address virt) { return (virt >> 30) & 0x1FF; }
static inline UInt16 GetL2Index(Address virt) { return (virt >> 21) & 0x1FF; }
static inline UInt16 GetL3Index(Address virt) { return (virt >> 12) & 0x1FF; }
static Boolean isInitialized = false;
Address* gVMKernelL0Table = nullptr;
Address gVMKernelL0Physical = 0;
extern char _kernelStart[];
extern char _kernelEnd[];
static Address* GetVirtualTable(Address phys) {
if (isInitialized) return (Address*)VMPhysToHHDM(phys);
return (Address*)phys;
}
static inline Address* GetOrAllocateTable(Address* parentTable, Size index, UInt64 flags, UInt64 directoryFlags) {
if (!(parentTable[index] & kPTEValid)) {
Pointer newTable = PMMAllocatePage();
if (!newTable) return nullptr;
Address* newTableVirt = GetVirtualTable((Address)newTable);
MemorySet(newTableVirt, 0, kVMPageSize);
parentTable[index] = (Address)newTable | directoryFlags;
return newTableVirt;
}
parentTable[index] |= (flags & kPTEUser);
Address physAddress = GetPTEAddress(parentTable[index]);
return GetVirtualTable(physAddress);
}
static Address GetMappedPhysicalAddress(Address* l0Table, Address virt) {
// A little bit of Monica in my life
UInt16 l0Index = GetL0Index(virt);
// A little bit of Erica by my side
UInt16 l1Index = GetL1Index(virt);
// A little bit of Rita's all I need
UInt16 l2Index = GetL2Index(virt);
// A little bit of Tina's what I see
UInt16 l3Index = GetL3Index(virt);
// A little bit of Sandra in the sun
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
if (!(l0Virt[l0Index] & kPTEValid)) return 0;
// A little bit of Mary all night long...
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
if (!(l1Virt[l1Index] & kPTEValid)) return 0;
// A little bit of Jessica, here I am!
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
if (!(l2Virt[l2Index] & kPTEValid)) return 0;
// A little bit of you makes me your man
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
if (!(l3Virt[l3Index] & kPTEValid)) return 0;
return GetPTEAddress(l3Virt[l3Index]);
}
Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags) {
UInt16 l0Index = GetL0Index(virt);
UInt16 l1Index = GetL1Index(virt);
UInt16 l2Index = GetL2Index(virt);
UInt16 l3Index = GetL3Index(virt);
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
UInt64 directoryFlags = kPTEValid | kPTETable | (flags & kPTEUser);
Address* l1Virt = GetOrAllocateTable(l0Virt, l0Index, flags, directoryFlags);
if (!l1Virt) return nullptr;
Address* l2Virt = GetOrAllocateTable(l1Virt, l1Index, flags, directoryFlags);
if (!l2Virt) return nullptr;
Address* l3Virt = GetOrAllocateTable(l2Virt, l2Index, flags, directoryFlags);
if (!l3Virt) return nullptr;
l3Virt[l3Index] = phys | flags | kPTEPage | kPTEAccessFlag | kPTEValid;
if (isInitialized) CPUInvalidateTLB(virt);
return l3Virt;
}
void VMMUnmapPage(Address* l0Table, Address virt) {
UInt16 l0Index = GetL0Index(virt);
UInt16 l1Index = GetL1Index(virt);
UInt16 l2Index = GetL2Index(virt);
UInt16 l3Index = GetL3Index(virt);
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
if (!(l0Virt[l0Index] & kPTEValid)) return;
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
if (!(l1Virt[l1Index] & kPTEValid)) return;
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
if (!(l2Virt[l2Index] & kPTEValid)) return;
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
l3Virt[l3Index] = 0;
CPUInvalidateTLB(virt);
}
Pointer VMMGetOrAllocatePage(Address* l0Table, Address virt, UInt64 flags) {
Address existingPhys = GetMappedPhysicalAddress(l0Table, virt);
if (existingPhys) return (Pointer)GetVirtualTable(existingPhys);
Pointer newPhys = PMMAllocatePage();
if (!newPhys) return nullptr; // OOM
Address* mappedVirt = VMMMapPage(l0Table, (Address) newPhys, virt, flags);
if (!mappedVirt) return nullptr;
Pointer finalVirtAddress = (Pointer)GetVirtualTable((Address)newPhys);
MemorySet(finalVirtAddress, 0, kVMPageSize);
return finalVirtAddress;
}
void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
gVMKernelL0Physical = (Address)PMMAllocatePage();
gVMKernelL0Table = (Address*)gVMKernelL0Physical;
if (!gVMKernelL0Physical) OSPanic("Failed to allocate kernel L0 table");
MemorySet(gVMKernelL0Table, 0, kVMPageSize);
OSLog("Mapping RAM.. Can take a while\n");
Size totalRAM = bootMap->totalRAM.size;
Size ramEnd = bootMap->totalRAM.base + totalRAM;
for (Address phys = bootMap->totalRAM.base; phys < ramEnd; phys += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table,
phys, VMPhysToHHDM(phys),
kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX
);
}
OSLog("RAM mapped\n");
Size pmmBitmapSize = (bootMap->totalRAM.size / kVMPageSize) / 8;
Size kernelSize = ((Address)_kernelEnd - (Address)_kernelStart) + pmmBitmapSize;
kernelSize = (kernelSize + kVMPageSize - 1) & ~(kVMPageSize - 1);
Address kernelPhysStart = 0x40100000; // TODO: hardcode is awful
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
Address phys = kernelPhysStart + offset;
Address virt = (Address)_kernelStart + offset;
VMMMapPage(gVMKernelL0Table, phys, virt, kPTENormalMem | kPTEAccessRW);
}
OSLog("Kernel mapped to HHDM\n");
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
VMMMapPage(gVMKernelL0Table, kernelPhysStart + offset, kernelPhysStart + offset, kPTENormalMem | kPTEAccessRW);
}
OSLog("Kernel Identity mapped\n");
Address fbPhys = (Address)info->framebuffer.base;
Size fbSize = info->framebuffer.baseSize;
for (Address offset = 0; offset < fbSize; offset += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table, fbPhys + offset,
kVMFbVirtBase + offset,
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
}
OSLog("Framebuffer mapped\n");
Address UARTPhys = bootMap->UART.base;
if (!UARTPhys) UARTPhys = 0x09000000;
VMMMapPage(
gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys),
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
VMMMapPage(
gVMKernelL0Table, UARTPhys, UARTPhys,
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
OSLog("UART mapped\n");
info->framebuffer.base = (BIUInt32*)kVMFbVirtBase;;
OSLog("Enabling MMU...\n");
CPUEnableMMU(gVMKernelL0Physical);
isInitialized = true;
}
+19 -5
View File
@@ -1,3 +1,8 @@
ENTRY(_start)
KERNEL_PA = 0x40100000;
KERNEL_VA = 0xFFFFFFFF80100000;
PHDRS PHDRS
{ {
text PT_LOAD FLAGS(5); /* Read | Execute */ text PT_LOAD FLAGS(5); /* Read | Execute */
@@ -6,22 +11,31 @@ PHDRS
SECTIONS SECTIONS
{ {
. = 0x40100000; . = KERNEL_VA;
_kernelStart = .; _kernelStart = .;
.text : { .text : AT(ADDR(.text) - KERNEL_VA + KERNEL_PA) {
*(.text.boot) *(.text.boot)
*(.text*) *(.text*)
} :text } :text
. = ALIGN(8); . = ALIGN(8);
.rodata : { *(.rodata*) } :text .rodata : AT(ADDR(.rodata) - KERNEL_VA + KERNEL_PA) {
*(.rodata*)
} :text
. = ALIGN(4096); . = ALIGN(4096);
.data : { *(.data*) } :data .data : AT(ADDR(.data) - KERNEL_VA + KERNEL_PA) {
*(.data*)
} :data
. = ALIGN(8); . = ALIGN(8);
.bss : { *(.bss*) *(COMMON) } :data .bss : AT(ADDR(.bss) - KERNEL_VA + KERNEL_PA) {
__bss_start = .;
*(.bss*)
*(COMMON)
__bss_end = .;
} :data
. = ALIGN(4096); . = ALIGN(4096);
_kernelEnd = .; _kernelEnd = .;