feat: vmm; solved rectest() issue

This commit is contained in:
Karina
2025-12-26 20:14:28 +04:00
parent 412c3bacc0
commit 0aa7086589
6 changed files with 152 additions and 13 deletions
+1
View File
@@ -16,6 +16,7 @@ add_executable(kernel
src/modules/idt.c
src/modules/interrupts.c
src/modules/pmm.c
src/modules/vmm.c
src/modules/memory.c
src/modules/serial.c
src/modules/console.c
+6 -3
View File
@@ -1,19 +1,22 @@
#ifndef PMM_H
#define PMM_H
#include "types.h"
#include "../common/bootinfo.h"
#define PAGE_SIZE 4096
#define BLOCKS_PER_BYTE 8
#define SAFE_SPACE_START_ADDR 0x100000
#define BITMAP_BYTE_INDEX(addr) ((addr / PAGE_SIZE) / BLOCKS_PER_BYTE)
#define BITMAP_BIT_OFFSET(addr) ((addr / PAGE_SIZE) % BLOCKS_PER_BYTE)
#define BITMAP_TEST(bitmap, addr) (bitmap[BITMAP_BYTE_INDEX(addr)] & (1 << BITMAP_BIT_OFFSET(addr)))
#define BITMAP_SET(bitmap, addr) (bitmap[BITMAP_BYTE_INDEX(addr)] |= (1 << BITMAP_BIT_OFFSET(addr)))
#define BITMAP_UNSET(bitmap, addr) (bitmap[BITMAP_BYTE_INDEX(addr)] &= ~(1 << BITMAP_BIT_OFFSET(addr)))
void pmm_init(BI_MemoryMap mmap);
u8* get_bitmap();
void pmm_init(BI_MemoryMap mmap);
void* pmm_alloc_page();
void pmm_free_page(void* addr);
#endif
+31
View File
@@ -0,0 +1,31 @@
#ifndef VMM_H
#define VMM_H
#include "../common/bootinfo.h"
#include "types.h"
#define PTE_PRESENT (1ULL << 0) // 0 -- not present: page fault when trying to access; 1 -- present, can RW
#define PTE_RW (1ULL << 1) // 0 -- RO: page fault trying to write; 1 -- RW
#define PTE_USER (1ULL << 2) // 0 -- Ring 0: R3 get segfault gtrying to access; 1 -- Ring 3
#define PTE_PWT (1ULL << 3) // 1 -- skip L1/L2 cache when writing to RAM
#define PTE_PCD (1ULL << 4) // 1 -- cache disabled for page
#define PTE_ACCESSED (1ULL << 5) // CPU sets it (by itself) to 1 when RW this page
#define PTE_DIRTY (1ULL << 6) // CPU sets it when writing to this page
#define PTE_NX (1ULL << 63) // No execute
#define PTE_ADDR_MASK 0x000FFFFFFFFFF000 // mask to get clean physical addres that located at bits 12-51
#define PTE_GET_ADDR(entry) ((entry) & PTE_ADDR_MASK) // get physical address
#define PTE_GET_FLAGS(entry) ((entry) & ~PTE_ADDR_MASK) // get flags
#define VMM_PT_INDEX(virt) (((virt) >> 12) & 0x1FF) // Table Index (bits 12-20)
#define VMM_PD_INDEX(virt) (((virt) >> 21) & 0x1FF) // Page Directory Index (bits 21-29)
#define VMM_PDPT_INDEX(virt) (((virt) >> 30) & 0x1FF) // PDPT Index (bits 30-38)
#define VMM_PML4_INDEX(virt) (((virt) >> 39) & 0x1FF) // PML4 Index (bits 39-47): main page
void vmm_init(Bootinfo* info);
void vmm_map_page(u64* pml4, u64 phys, u64 virt, u64 flags);
#endif
+23 -2
View File
@@ -9,16 +9,22 @@
#include "gdt.h"
#include "idt.h"
#include "pmm.h"
#include "vmm.h"
#include "../data/logo.h"
#include "vmm.h"
int rectest(int a) {
volatile int b = a + 1;
kprintf("%d", b);
return rectest(b * 2);
}
extern u64 _kernel_end;
extern u8* bitmap;
extern u64 bitmap_size_g;
extern u64* pml4_kernel;
void kmain(Bootinfo* info) {
u32 *fb = (u32*)info->framebuffer.base;
@@ -53,14 +59,29 @@ void kmain(Bootinfo* info) {
kprintf("MemoryMap located at ^g%x^0 (^r%X^0); \
\nMemory map size is ^g%x^0\
\nKernel ends at ^g%x^0\
\nBITMAP located at ^g%x^0", (u64)info->mem.map, (u64)info->mem.map,(u64)info->mem.map_size, &_kernel_end, get_bitmap());
\nBITMAP located at ^g%x^0 (^r%x^0)", (u64)info->mem.map, (u64)info->mem.map,(u64)info->mem.map_size, &_kernel_end, bitmap, bitmap_size_g);
vmm_init(info);
kprintf("\nIM ALIVE :D");
kprintf("\nSetting up guard page test");
u64* new_stack_phys = pmm_alloc_page();
u64 stack_top = 0x40000000;
vmm_map_page(pml4_kernel, (u64)new_stack_phys, stack_top, PTE_PRESENT | PTE_RW);
__asm__ volatile (
"mov %0, %%rsp \n"
"call *%1"
:: "r"(stack_top + 4096), "r"(rectest), "D"(0) : "memory"
);
// kfetch();
// kprintf("I cant do anything yet lol");
// kprintf("stack overflow protection test");
// rectest(0);
rectest(0);
// __asm__("ud2"); // panic :(
+2 -8
View File
@@ -5,8 +5,6 @@
#include "memory.h" // IWYU pragma: keep // clangd is stupid af
#include "math.h"
#define SAFE_SPACE_START_ADDR 0x100000
extern u64 _kernel_start;
extern u64 _kernel_end;
@@ -59,10 +57,10 @@ void pmm_init(BI_MemoryMap mmap) {
u64 k_end = (u64)&_kernel_end;
u64 bitmap_start = (u64)bitmap;
u64 bitmap_end = bitmap_start + bitmap_size_g;
u64 bitmap_end = bitmap_start + bitmap_size_g;
for (u64 i = 0; i < SAFE_SPACE_START_ADDR; i += PAGE_SIZE) BITMAP_SET(bitmap, i); // mem, that addr < 1MB is NOT safe to use on x86
for (u64 i = k_start; i < k_end; i += PAGE_SIZE) BITMAP_SET(bitmap, i);
for (u64 i = k_start; i < k_end; i += PAGE_SIZE) BITMAP_SET(bitmap, i);
for (u64 i = bitmap_start; i < bitmap_end; i += PAGE_SIZE) BITMAP_SET(bitmap, i);
}
@@ -83,7 +81,3 @@ void* pmm_alloc_page() {
void pmm_free_page(void* addr) {
BITMAP_UNSET(bitmap, (u64)addr);
}
u8* get_bitmap() {
return bitmap;
}
+89
View File
@@ -0,0 +1,89 @@
#include "../common/bootinfo.h"
#include "vmm.h"
#include "gdt.h"
#include "idt.h"
#include "pmm.h"
#include "memory.h" // IWYU pragma: keep // shut the fuck up I DONT NEED <string.h> CLANGD PLEASE
#include "types.h"
u64* pml4_kernel = nullptr;
extern u64 _kernel_start;
extern u64 _kernel_end;
extern u8* bitmap;
extern u64 bitmap_size_g;
extern GDTDescriptor gdt[];
extern TSS tss[];
extern IDTEntry idt[];
extern u8 double_fault_stack[];
void vmm_map_page(u64* pml4, u64 phys, u64 virt, u64 flags) {
u64 pt_idx = VMM_PT_INDEX(virt);
u64 pd_idx = VMM_PD_INDEX(virt);
u64 pdpt_idx = VMM_PDPT_INDEX(virt);
u64 pml4_idx = VMM_PML4_INDEX(virt);
if (!(pml4[pml4_idx] & PTE_PRESENT)) {
u64* addr = pmm_alloc_page();
memset(addr, 0, PAGE_SIZE);
pml4[pml4_idx] = (u64)addr | PTE_PRESENT | PTE_RW;
}
u64* pdpt = (u64*)PTE_GET_ADDR(pml4[pml4_idx]);
if (!(pdpt[pdpt_idx] & PTE_PRESENT)) {
u64* addr = pmm_alloc_page();
memset(addr, 0, PAGE_SIZE);
pdpt[pdpt_idx] = (u64)addr | PTE_PRESENT | PTE_RW;
}
u64* pd = (u64*)PTE_GET_ADDR(pdpt[pdpt_idx]);
if (!(pd[pd_idx] & PTE_PRESENT)) {
u64* addr = pmm_alloc_page();
memset(addr, 0, PAGE_SIZE);
pd[pd_idx] = (u64)addr | PTE_PRESENT | PTE_RW;
}
u64* pt = (u64*)PTE_GET_ADDR(pd[pd_idx]);
pt[pt_idx] = phys | flags;
}
static inline void load_cr3(u64 pml4_addr) {
__asm__ volatile ("mov %0, %%cr3" :: "r"(pml4_addr) : "memory");
}
void vmm_init(Bootinfo* info) {
pml4_kernel = pmm_alloc_page();
memset(pml4_kernel, 0, PAGE_SIZE);
u64 k_start = (u64)&_kernel_start;
u64 k_end = (u64)&_kernel_end;
u64 bitmap_start = (u64)bitmap;
u64 bitmap_end = bitmap_start + bitmap_size_g;
u64 bi_addr = (u64)info;
u64 fb_start = (u64)info->framebuffer.base;
u64 fb_end = fb_start + info->framebuffer.base_size;
u64 gdt_addr = (u64)&gdt;
u64 idt_addr = (u64)&idt;
u64 tss_addr = (u64)&tss;
u64 df_stack_addr = (u64)double_fault_stack;
for (u64 i = k_start; i < k_end; i += PAGE_SIZE) vmm_map_page(pml4_kernel, i, i, PTE_PRESENT | PTE_RW);
for (u64 i = bitmap_start; i < bitmap_end; i += PAGE_SIZE) vmm_map_page(pml4_kernel, i, i, PTE_PRESENT | PTE_RW);
for (u64 i = fb_start; i < fb_end; i += PAGE_SIZE) vmm_map_page(pml4_kernel, i, i, PTE_PRESENT | PTE_RW);
for (u64 i = 0; i < SAFE_SPACE_START_ADDR; i += PAGE_SIZE) vmm_map_page(pml4_kernel, i, i, PTE_PRESENT | PTE_RW);
vmm_map_page(pml4_kernel, bi_addr, bi_addr, PTE_PRESENT | PTE_RW);
vmm_map_page(pml4_kernel, gdt_addr, gdt_addr, PTE_PRESENT | PTE_RW);
vmm_map_page(pml4_kernel, idt_addr, idt_addr, PTE_PRESENT | PTE_RW);
vmm_map_page(pml4_kernel, tss_addr, tss_addr, PTE_PRESENT | PTE_RW);
vmm_map_page(pml4_kernel, df_stack_addr, df_stack_addr, PTE_PRESENT | PTE_RW);
vmm_map_page(pml4_kernel, df_stack_addr + 4096, df_stack_addr + 4096, PTE_PRESENT | PTE_RW);
load_cr3((u64)pml4_kernel);
}