diff --git a/cmake/source.cmake b/cmake/source.cmake index 677c066..f12b391 100644 --- a/cmake/source.cmake +++ b/cmake/source.cmake @@ -43,6 +43,7 @@ set(KERNEL_ETC_SOURCES set(ATOMIC_IPC_SOURCES kernel/atomic/Atomics.c + kernel/atomic/cpp/Spinlock.cpp kernel/ipc/Ipc.c ) @@ -57,11 +58,13 @@ set(EXECF_SOURCES set(MM_SOURCES mm/PMem.c mm/MemOps.c - mm/VMem.c + mm/VMem.cpp mm/StackGuard.c mm/MemPool.c mm/trace/StackTrace.c mm/security/Cerberus.c + mm/dynamic/cpp/BuddyAllocator.cpp + mm/dynamic/cpp/new.cpp mm/dynamic/c/Magazine.c mm/PageFaultHandler.c ) @@ -180,7 +183,9 @@ include_directories( include include/Switch include/Vector + kernel kernel/atomic + kernel/atomic/cpp kernel/core kernel/etc kernel/execf @@ -194,6 +199,7 @@ include_directories( mm/asm mm/dynamic mm/dynamic/c + mm/dynamic/cpp mm/dynamic/rust mm/security mm/trace diff --git a/drivers/virtio/VirtioBlk.c b/drivers/virtio/VirtioBlk.c index 546f154..be7ef7f 100644 --- a/drivers/virtio/VirtioBlk.c +++ b/drivers/virtio/VirtioBlk.c @@ -3,9 +3,7 @@ #include #include #include -#include #include -#include #include #include #include diff --git a/include/Io.h b/include/Io.h index e4bcde5..153cdf6 100644 --- a/include/Io.h +++ b/include/Io.h @@ -3,6 +3,10 @@ #include +#ifdef __cplusplus +extern "C" { +#endif + static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %0, %1" : : "a"(val), "Nd"(port)); } @@ -72,5 +76,9 @@ void cpuid(uint32_t leaf, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* uint64_t rdmsr(uint32_t msr); void wrmsr(uint32_t msr, uint64_t value); +#ifdef __cplusplus +} +#endif + #endif diff --git a/include/stdbool.h b/include/stdbool.h index 00f251d..8f5f829 100644 --- a/include/stdbool.h +++ b/include/stdbool.h @@ -1,6 +1,8 @@ #ifndef VOIDFRAME_STDBOOL_H #define VOIDFRAME_STDBOOL_H +#ifndef __cplusplus typedef int bool; #define true 1 #define false 0 #endif +#endif diff --git a/kernel/atomic/Spinlock.h b/kernel/atomic/Spinlock.h deleted file mode 100644 index 02c4536..0000000 --- a/kernel/atomic/Spinlock.h +++ /dev/null @@ -1,159 +0,0 @@ -#ifndef SPINLOCK_H -#define SPINLOCK_H - -#include -#include -#include - -#define DEADLOCK_TIMEOUT_CYCLES 100000000ULL -#define MAX_BACKOFF_CYCLES 1024 - -// Exponential backoff delay -static inline void backoff_delay(uint64_t cycles) { - uint64_t start = rdtsc(); - while (rdtsc() - start < cycles) { - __builtin_ia32_pause(); - } -} - -// Advanced spinlock with multiple anti-race mechanisms -static inline void SpinLock(volatile int* lock) { - uint64_t start = rdtsc(); - uint64_t backoff = 1; - uint32_t attempts = 0; - - while (1) { - // Try to acquire without contention first - if (!*lock && !__atomic_test_and_set(lock, __ATOMIC_ACQUIRE)) { - return; - } - - // Deadlock detection - if (rdtsc() - start > DEADLOCK_TIMEOUT_CYCLES) { - backoff_delay(MAX_BACKOFF_CYCLES); - start = rdtsc(); - attempts = 0; - continue; - } - - attempts++; - - // Adaptive spinning strategy - if (attempts < 100) { - // Initial fast spinning with pause - for (int i = 0; i < 64; i++) { - if (!*lock) break; - __builtin_ia32_pause(); - } - } else { - // Switch to exponential backoff after many attempts - backoff_delay(backoff); - backoff = (backoff * 2) > MAX_BACKOFF_CYCLES ? MAX_BACKOFF_CYCLES : (backoff * 2); - } - } -} - -// MCS-style queue lock (more fair, less cache bouncing) -typedef struct mcs_node { - volatile struct mcs_node* next; - volatile int locked; -} mcs_node_t; - -static inline void MCSLock(volatile mcs_node_t** lock, mcs_node_t* node) { - node->next = NULL; - node->locked = 1; - - mcs_node_t* prev = (mcs_node_t*)__sync_lock_test_and_set(lock, node); - if (prev) { - prev->next = node; - while (node->locked) __builtin_ia32_pause(); - } -} - -static inline void MCSUnlock(volatile mcs_node_t** lock, mcs_node_t* node) { - if (!node->next) { - if (__sync_bool_compare_and_swap(lock, node, NULL)) { - return; - } - while (!node->next) __builtin_ia32_pause(); - } - node->next->locked = 0; -} - -// Reader-Writer spinlock (if you need shared/exclusive access) -typedef struct { - volatile int readers; - volatile int writer; - volatile uint32_t owner; - volatile int recursion; -} rwlock_t; - -#define RWLOCK_INIT { .readers = 0, .writer = 0, .owner = 0, .recursion = 0 } - -static inline void ReadLock(rwlock_t* lock, uint32_t owner_id) { - if (lock->writer && lock->owner == owner_id) { - // The current process holds the write lock, so it can "read" - return; - } - while (1) { - while (lock->writer) __builtin_ia32_pause(); - __sync_fetch_and_add(&lock->readers, 1); - if (!lock->writer) break; - __sync_fetch_and_sub(&lock->readers, 1); - } -} - -static inline void ReadUnlock(rwlock_t* lock, uint32_t owner_id) { - if (lock->writer && lock->owner == owner_id) { - __atomic_thread_fence(__ATOMIC_RELEASE); - return; - } - __sync_fetch_and_sub(&lock->readers, 1); -} - -static inline void WriteLock(rwlock_t* lock, uint32_t owner_id) { - if (lock->writer && lock->owner == owner_id) { - lock->recursion++; - return; - } - - while (__sync_lock_test_and_set(&lock->writer, 1)) { - while (lock->writer) __builtin_ia32_pause(); - } - while (lock->readers) __builtin_ia32_pause(); - - lock->owner = owner_id; - lock->recursion = 1; -} - -static inline void WriteUnlock(rwlock_t* lock) { - if (lock->recursion <= 0) { - lock->recursion = 0; - lock->owner = 0; - __sync_lock_release(&lock->writer); - return; - } - if (--lock->recursion == 0) { - lock->owner = 0; - __sync_lock_release(&lock->writer); - } -} - -// Original API preserved -static inline void SpinUnlock(volatile int* lock) { - __sync_lock_release(lock); -} - -static inline irq_flags_t SpinLockIrqSave(volatile int* lock) { - irq_flags_t flags = save_irq_flags(); - cli(); - SpinLock(lock); // Uses the advanced version above - return flags; -} - -static inline void SpinUnlockIrqRestore(volatile int* lock, irq_flags_t flags) { - __atomic_clear(lock, __ATOMIC_RELEASE); - restore_irq_flags(flags); -} - -#endif // SPINLOCK_H \ No newline at end of file diff --git a/kernel/atomic/cpp/Spinlock.cpp b/kernel/atomic/cpp/Spinlock.cpp new file mode 100644 index 0000000..19373c7 --- /dev/null +++ b/kernel/atomic/cpp/Spinlock.cpp @@ -0,0 +1,50 @@ +#include + +#ifdef __cplusplus + +Spinlock::Spinlock() : locked(false) {} + +void Spinlock::lock() { + while (__atomic_test_and_set(&locked, __ATOMIC_ACQUIRE)) { + while (__atomic_load_n(&locked, __ATOMIC_RELAXED)) + __asm__ __volatile__("pause"); + } +} + +void Spinlock::unlock() { + __atomic_clear(&locked, __ATOMIC_RELEASE); +} + +bool Spinlock::try_lock() { + return !__atomic_test_and_set(&locked, __ATOMIC_ACQUIRE); +} + +SpinlockGuard::SpinlockGuard(Spinlock& lock) : lock(lock) { + lock.lock(); +} + +SpinlockGuard::~SpinlockGuard() { + lock.unlock(); +} + +#endif // __cplusplus + +#ifdef __cplusplus +extern "C" { +#endif + +void spinlock_lock(Spinlock* lock) { + lock->lock(); +} + +void spinlock_unlock(Spinlock* lock) { + lock->unlock(); +} + +bool spinlock_try_lock(Spinlock* lock) { + return lock->try_lock(); +} + +#ifdef __cplusplus +} +#endif \ No newline at end of file diff --git a/kernel/atomic/cpp/Spinlock.h b/kernel/atomic/cpp/Spinlock.h new file mode 100644 index 0000000..7968909 --- /dev/null +++ b/kernel/atomic/cpp/Spinlock.h @@ -0,0 +1,42 @@ +#pragma once + +#ifdef __cplusplus + +class Spinlock { +public: + Spinlock(); + void lock(); + void unlock(); + bool try_lock(); + +private: + volatile int locked; +}; + +class SpinlockGuard { +public: + explicit SpinlockGuard(Spinlock& lock); + ~SpinlockGuard(); + SpinlockGuard(const SpinlockGuard&) = delete; + SpinlockGuard& operator=(const SpinlockGuard&) = delete; + SpinlockGuard(SpinlockGuard&&) = delete; + SpinlockGuard& operator=(SpinlockGuard&&) = delete; +private: + Spinlock& lock; +}; + +#endif // __cplusplus + +#ifdef __cplusplus +extern "C" { +#endif + +typedef Spinlock Spinlock; + +void spinlock_lock(Spinlock* lock); +void spinlock_unlock(Spinlock* lock); +bool spinlock_try_lock(Spinlock* lock); + +#ifdef __cplusplus +} +#endif diff --git a/kernel/core/Panic.h b/kernel/core/Panic.h index 95fe822..661cf32 100644 --- a/kernel/core/Panic.h +++ b/kernel/core/Panic.h @@ -22,6 +22,10 @@ typedef enum { PANIC_ASSERTION = 0x0008 } PanicCode; +#ifdef __cplusplus +extern "C" { +#endif + // --- Public Panic API --- void __attribute__((noreturn)) Panic(const char* message); void __attribute__((noreturn)) PanicWithCode(const char* message, uint64_t error_code); @@ -48,5 +52,8 @@ PanicWithContext(msg, PANIC_GENERAL, __FUNCTION__, __FILE__, __LINE__) #define PANIC_CODE(msg, code) \ PanicWithContext(msg, code, __FUNCTION__, __FILE__, __LINE__) +#ifdef __cplusplus +} +#endif #endif // PANIC_H \ No newline at end of file diff --git a/mm/KernelHeap.h b/mm/KernelHeap.h index 16fe905..47fe21f 100644 --- a/mm/KernelHeap.h +++ b/mm/KernelHeap.h @@ -16,7 +16,7 @@ #define MAGAZINE_MAX_SIZE 1024 // Tier 2: Rust Allocator for general-purpose medium-sized allocations -#define RUST_MAX_SIZE (128 * 1024) +#define RUST_MAX_SIZE (64 * 1024) // Helper function to wrap large VMem allocations with a header static inline void* LargeBlockAlloc(size_t size) { diff --git a/mm/MemOps.h b/mm/MemOps.h index 5fcc596..ba1fe2a 100644 --- a/mm/MemOps.h +++ b/mm/MemOps.h @@ -3,14 +3,22 @@ #include -void* FastMemset(void* restrict dest, int value, uint64_t size); -void* FastMemcpy(void* restrict dest, const void* restrict src, uint64_t size); -int FastMemcmp(const void* restrict ptr1, const void* restrict ptr2, uint64_t size); -void FastZeroPage(void* restrict page); +#ifdef __cplusplus +extern "C" { +#endif + +void* FastMemset(void* dest, int value, uint64_t size); +void* FastMemcpy(void* dest, const void* src, uint64_t size); +int FastMemcmp(const void* ptr1, const void* ptr2, uint64_t size); +void FastZeroPage(void* page); // Wrapper for host compilers -void* memset(void* restrict dest, int value, unsigned long size); -void* memcpy(void* restrict dest, const void* restrict src, unsigned long size); -int memcmp(const void* restrict s1, const void* restrict s2, unsigned long); +void* memset(void* dest, int value, unsigned long size); +void* memcpy(void* dest, const void* src, unsigned long size); +int memcmp(const void* s1, const void* s2, unsigned long); + +#ifdef __cplusplus +} +#endif #endif \ No newline at end of file diff --git a/mm/PMem.h b/mm/PMem.h index 90fa764..6c1300d 100644 --- a/mm/PMem.h +++ b/mm/PMem.h @@ -15,8 +15,12 @@ typedef struct MemoryStats { uint64_t largest_free_block; // Size of largest contiguous free block } MemoryStats; - #define PAGE_SIZE 4096 + +#ifdef __cplusplus +extern "C" { +#endif + extern uint64_t total_pages; int MemoryInit(uint32_t multiboot_info_addr); @@ -31,4 +35,7 @@ void GetDetailedMemoryStats(MemoryStats* stats); int IsPageFree(uint64_t page_idx); uint64_t GetFreeMemory(void); +#ifdef __cplusplus +} +#endif #endif \ No newline at end of file diff --git a/mm/VMem.c b/mm/VMem.c deleted file mode 100644 index 2ae4109..0000000 --- a/mm/VMem.c +++ /dev/null @@ -1,865 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -// Global variable for dynamic identity mapping size -uint64_t g_identity_map_size = 4ULL * 1024 * 1024 * 1024; // Default to 4GB, will be updated during init - -static VirtAddrSpace kernel_space; -static volatile int vmem_lock = 0; -static uint64_t vmem_allocations = 0; -static uint64_t vmem_frees = 0; -static uint64_t tlb_flushes = 0; - -// Buddy allocator constants -#define BUDDY_MIN_ORDER 12 // 4KB pages -#define BUDDY_MAX_ORDER 30 // 1GB max allocation -#define BUDDY_NUM_ORDERS (BUDDY_MAX_ORDER - BUDDY_MIN_ORDER + 1) - -// Buddy free lists - one per order -static VMemFreeBlock* buddy_free_lists[BUDDY_NUM_ORDERS]; -static uint64_t buddy_bitmap[(1ULL << (BUDDY_MAX_ORDER - BUDDY_MIN_ORDER)) / 64]; - -// Pre-allocated pool for buddy nodes -#define MAX_BUDDY_NODES 2048 -static VMemFreeBlock buddy_node_pool[MAX_BUDDY_NODES]; -static VMemFreeBlock* buddy_node_head = NULL; - -// Hash table for fast buddy lookup -#define HASH_TABLE_SIZE 4096 // Must be a power of 2 -static VMemFreeBlock* buddy_hash_table[HASH_TABLE_SIZE]; - -static inline uint32_t HashAddress(uint64_t addr) { - // Multiplicative hashing for better distribution - return (uint32_t)(((addr >> BUDDY_MIN_ORDER) * 2654435761) & (HASH_TABLE_SIZE - 1)); -} - -// TLB flush batching for efficiency -#define MAX_TLB_BATCH 64 -static uint64_t tlb_batch[MAX_TLB_BATCH]; -static uint32_t tlb_batch_count = 0; - -// Identity-mapped page table cache for better allocation -#define PT_CACHE_SIZE 16 -static void* pt_cache[PT_CACHE_SIZE]; -static uint32_t pt_cache_count = 0; - -// Buddy allocator regions -static uint64_t buddy_region_start[2]; -static uint64_t buddy_region_size[2]; - -extern uint64_t total_pages; -extern uint8_t _kernel_phys_start[]; -extern uint8_t _kernel_phys_end[]; -extern uint8_t _text_start[]; -extern uint8_t _text_end[]; -extern uint8_t _rodata_start[]; -extern uint8_t _rodata_end[]; -extern uint8_t _data_start[]; -extern uint8_t _data_end[]; -extern uint8_t _bss_start[]; -extern uint8_t _bss_end[]; - -static void InitBuddyNodePool(void) { - buddy_node_head = &buddy_node_pool[0]; - for (int i = 0; i < MAX_BUDDY_NODES - 1; ++i) { - buddy_node_pool[i].next = &buddy_node_pool[i + 1]; - } - buddy_node_pool[MAX_BUDDY_NODES - 1].next = NULL; -} - -static VMemFreeBlock* AllocBuddyNode(void) { - if (!buddy_node_head) return NULL; - VMemFreeBlock* node = buddy_node_head; - buddy_node_head = node->next; - return node; -} - -static void ReleaseBuddyNode(VMemFreeBlock* node) { - node->next = buddy_node_head; - buddy_node_head = node; -} - -static inline uint32_t GetOrder(uint64_t size) { - if (size <= PAGE_SIZE) return 0; - return 64 - __builtin_clzll(size - 1) - BUDDY_MIN_ORDER; -} - -static inline uint64_t OrderToSize(uint32_t order) { - return 1ULL << (order + BUDDY_MIN_ORDER); -} - -static inline uint64_t GetBuddyAddr(uint64_t addr, uint32_t order) { - return addr ^ OrderToSize(order); -} - -static void BuddyRemoveFreeBlock(VMemFreeBlock* node, uint32_t order); - -static void BuddyAddFreeBlock(uint64_t addr, uint32_t order) { - if (order >= BUDDY_NUM_ORDERS) return; - - VMemFreeBlock* node = AllocBuddyNode(); - if (!node) return; - - node->base = addr; - node->size = OrderToSize(order); - node->prev = NULL; - node->hnext = NULL; - - // Add to the head of the free list for this order - node->next = buddy_free_lists[order]; - if (buddy_free_lists[order]) { - buddy_free_lists[order]->prev = node; - } - buddy_free_lists[order] = node; - - // Add to the hash table - uint32_t hash = HashAddress(addr); - node->hnext = buddy_hash_table[hash]; - buddy_hash_table[hash] = node; -} - -static VMemFreeBlock* BuddyFindFreeBlock(uint64_t addr, uint32_t order) { - uint32_t hash = HashAddress(addr); - VMemFreeBlock* curr = buddy_hash_table[hash]; - uint64_t size = OrderToSize(order); - - while (curr) { - if (curr->base == addr && curr->size == size) { - return curr; - } - curr = curr->hnext; - } - return NULL; -} - -static void BuddyRemoveFreeBlock(VMemFreeBlock* node, uint32_t order) { - // Remove from the doubly-linked free list - if (node->prev) { - node->prev->next = node->next; - } else { - buddy_free_lists[order] = node->next; - } - if (node->next) { - node->next->prev = node->prev; - } - - // Remove from the hash table - uint32_t hash = HashAddress(node->base); - VMemFreeBlock* prev_h = NULL; - VMemFreeBlock* curr_h = buddy_hash_table[hash]; - while (curr_h) { - if (curr_h == node) { - if (prev_h) { - prev_h->hnext = curr_h->hnext; - } else { - buddy_hash_table[hash] = curr_h->hnext; - } - break; - } - prev_h = curr_h; - curr_h = curr_h->hnext; - } - - // The node is now unlinked, release it back to the pool - ReleaseBuddyNode(node); -} - -static uint64_t BuddyAlloc(uint64_t size) { - uint32_t order = GetOrder(size); - if (order >= BUDDY_NUM_ORDERS) return 0; - - // Find smallest available block - for (uint32_t curr_order = order; curr_order < BUDDY_NUM_ORDERS; curr_order++) { - VMemFreeBlock* block = buddy_free_lists[curr_order]; - if (!block) continue; - - // Unlink the block from the head of the list - if (block->next) { - block->next->prev = NULL; - } - buddy_free_lists[curr_order] = block->next; - - // Remove from hash table - uint32_t hash = HashAddress(block->base); - VMemFreeBlock* prev_h = NULL; - VMemFreeBlock* curr_h = buddy_hash_table[hash]; - while (curr_h) { - if (curr_h == block) { - if (prev_h) { - prev_h->hnext = curr_h->hnext; - } else { - buddy_hash_table[hash] = curr_h->hnext; - } - break; - } - prev_h = curr_h; - curr_h = curr_h->hnext; - } - - uint64_t addr = block->base; - ReleaseBuddyNode(block); - - // Split down to required order - while (curr_order > order) { - curr_order--; - uint64_t buddy_addr = addr + OrderToSize(curr_order); - BuddyAddFreeBlock(buddy_addr, curr_order); - } - - return addr; - } - - return 0; // No free blocks -} - -static void BuddyFree(uint64_t addr, uint64_t size) { - uint32_t order = GetOrder(size); - if (order >= BUDDY_NUM_ORDERS) return; - - // Try to coalesce with buddy - while (order < BUDDY_NUM_ORDERS - 1) { - uint64_t buddy_addr = GetBuddyAddr(addr, order); - VMemFreeBlock* buddy = BuddyFindFreeBlock(buddy_addr, order); - - if (!buddy) break; // Buddy not free - - // Buddy is free, so remove it from the data structures and coalesce - BuddyRemoveFreeBlock(buddy, order); - - if (buddy_addr < addr) addr = buddy_addr; - order++; - } - - BuddyAddFreeBlock(addr, order); -} - -static inline int IsValidPhysAddr(uint64_t paddr) { - return (paddr != 0 && paddr < (total_pages * PAGE_SIZE)); -} - -static inline int IsValidVirtAddr(uint64_t vaddr) { - // Check canonical address ranges - return ((vaddr >= VIRT_ADDR_SPACE_LOW_START && vaddr <= VIRT_ADDR_SPACE_LOW_END) || - (vaddr >= VIRT_ADDR_SPACE_HIGH_START && vaddr <= VIRT_ADDR_SPACE_HIGH_END) || - (vaddr >= KERNEL_SPACE_START && vaddr <= KERNEL_SPACE_END)); -} - -static inline uint64_t* GetTableVirt(uint64_t phys_addr) { - return (phys_addr < IDENTITY_MAP_SIZE) ? - (uint64_t*)phys_addr : (uint64_t*)PHYS_TO_VIRT(phys_addr); -} - -static void flush_tlb_batch(void) { - if (tlb_batch_count == 0) return; - - if (tlb_batch_count > 8) { - VMemFlushTLB(); - } else { - for (uint32_t i = 0; i < tlb_batch_count; i++) { - __asm__ volatile("invlpg (%0)" :: "r"(tlb_batch[i]) : "memory"); - } - } - tlb_batch_count = 0; - tlb_flushes++; -} - -static void add_to_tlb_batch(uint64_t vaddr) { - if (tlb_batch_count >= MAX_TLB_BATCH) { - flush_tlb_batch(); - } - tlb_batch[tlb_batch_count++] = vaddr; -} - -static void* alloc_identity_page_table(void) { - if (pt_cache_count > 0) { - return pt_cache[--pt_cache_count]; - } - - for (uint32_t attempt = 0; attempt < 32; attempt++) { - void* candidate = AllocPage(); - if (!candidate) break; - if ((uint64_t)candidate < IDENTITY_MAP_SIZE) { - FastZeroPage(candidate); - return candidate; - } - FreePage(candidate); - } - return NULL; -} - -static void cache_page_table(void* pt) { - if (pt_cache_count < PT_CACHE_SIZE && (uint64_t)pt < IDENTITY_MAP_SIZE) { - pt_cache[pt_cache_count++] = pt; - } else { - FreePage(pt); - } -} - -void VMemInit(void) { - InitBuddyNodePool(); - - // Initialize buddy allocator - for (int i = 0; i < BUDDY_NUM_ORDERS; i++) { - buddy_free_lists[i] = NULL; - } - - // Initialize hash table - for (int i = 0; i < HASH_TABLE_SIZE; i++) { - buddy_hash_table[i] = NULL; - } - - // Set up buddy regions - buddy_region_start[0] = VIRT_ADDR_SPACE_LOW_START; - buddy_region_size[0] = VIRT_ADDR_SPACE_LOW_END - VIRT_ADDR_SPACE_LOW_START + 1; - buddy_region_start[1] = VIRT_ADDR_SPACE_HIGH_START; - buddy_region_size[1] = VIRT_ADDR_SPACE_HIGH_END - VIRT_ADDR_SPACE_HIGH_START + 1; - - // Add initial free blocks (largest possible) - for (int region = 0; region < 2; region++) { - uint64_t addr = buddy_region_start[region]; - uint64_t remaining = buddy_region_size[region]; - - while (remaining >= PAGE_SIZE) { - uint32_t order = BUDDY_NUM_ORDERS - 1; - while (order > 0 && OrderToSize(order) > remaining) { - order--; - } - - BuddyAddFreeBlock(addr, order); - uint64_t block_size = OrderToSize(order); - addr += block_size; - remaining -= block_size; - } - } - - // Get current PML4 from CR3 (set by bootstrap) - uint64_t pml4_phys_addr; - __asm__ volatile("mov %%cr3, %0" : "=r"(pml4_phys_addr)); - pml4_phys_addr &= ~0xFFF; - - kernel_space.pml4 = (uint64_t*)pml4_phys_addr; - kernel_space.used_pages = 0; - kernel_space.total_mapped = IDENTITY_MAP_SIZE; - - // Test identity mapping - if (VMemGetPhysAddr(0x100000) != 0x100000) { - PANIC("Bootstrap identity mapping failed - VALIDATION FAILED"); - } - const uint64_t probe = IDENTITY_MAP_SIZE - PAGE_SIZE; - if (VMemGetPhysAddr(probe) != probe) { - PANIC("Bootstrap identity mapping failed at IDENTITY_MAP_SIZE boundary"); - } - PrintKernelSuccess("VMem: Buddy allocator initialized with PML4: "); - PrintKernelHex(pml4_phys_addr); - PrintKernel("\n"); -} - -static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, uint32_t level, int create) { - if (!IsValidPhysAddr(pml4_phys)) return 0; - - uint64_t* table_virt = GetTableVirt(pml4_phys); - uint32_t shift = 39U - (level * 9U); - uint32_t index = (vaddr >> shift) & PT_INDEX_MASK; - - if (!(table_virt[index] & PAGE_PRESENT)) { - if (!create) return 0; - - void* new_table_phys = alloc_identity_page_table(); - if (!new_table_phys || !IsValidPhysAddr((uint64_t)new_table_phys)) { - if (new_table_phys) FreePage(new_table_phys); - return 0; - } - - table_virt[index] = (uint64_t)new_table_phys | PAGE_PRESENT | PAGE_WRITABLE; - return (uint64_t)new_table_phys; - } - - return table_virt[index] & PT_ADDR_MASK; -} - -int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { - if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr)) { - return VMEM_ERROR_ALIGN; - } - if (!IsValidPhysAddr(paddr) || !IsValidVirtAddr(vaddr)) { - return VMEM_ERROR_INVALID_ADDR; - } - - irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock); - - uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 1); - if (!pdp_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1); - if (!pd_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 1); - if (!pt_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t* pt_virt = GetTableVirt(pt_phys); - uint32_t pt_index = (vaddr >> PT_SHIFT) & PT_INDEX_MASK; - - if (pt_virt[pt_index] & PAGE_PRESENT) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_ALREADY_MAPPED; - } - - pt_virt[pt_index] = paddr | flags | PAGE_PRESENT; - add_to_tlb_batch(vaddr); - - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_SUCCESS; -} - -int VMemMapHuge(uint64_t vaddr, uint64_t paddr, uint64_t flags) { - if (!IS_HUGE_PAGE_ALIGNED(vaddr) || !IS_HUGE_PAGE_ALIGNED(paddr)) { - return VMEM_ERROR_ALIGN; - } - if (!IsValidPhysAddr(paddr) || !IsValidVirtAddr(vaddr)) { - return VMEM_ERROR_INVALID_ADDR; - } - - irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock); - - uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 1); - if (!pdp_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1); - if (!pd_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t* pd_virt = GetTableVirt(pd_phys); - uint32_t pd_index = (vaddr >> PD_SHIFT) & PT_INDEX_MASK; - - if (pd_virt[pd_index] & PAGE_PRESENT) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_ALREADY_MAPPED; - } - - pd_virt[pd_index] = paddr | flags | PAGE_PRESENT | PAGE_LARGE; - add_to_tlb_batch(vaddr); - - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_SUCCESS; -} - -void* VMemAlloc(uint64_t size) { - if (size == 0) return NULL; - size = PAGE_ALIGN_UP(size); - - irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - - uint64_t vaddr = BuddyAlloc(size); - if (!vaddr) { - SpinUnlockIrqRestore(&vmem_lock, flags); - return NULL; - } - - vmem_allocations++; - SpinUnlockIrqRestore(&vmem_lock, flags); - - // Map physical pages - for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { - void* paddr = AllocPage(); - if (!paddr) { - VMemFree((void*)vaddr, size); - return NULL; - } - if (VMemMap(vaddr + offset, (uint64_t)paddr, PAGE_WRITABLE) != VMEM_SUCCESS) { - FreePage(paddr); - VMemFree((void*)vaddr, size); - return NULL; - } - } - - flush_tlb_batch(); - - flags = SpinLockIrqSave(&vmem_lock); - kernel_space.used_pages += size / PAGE_SIZE; - kernel_space.total_mapped += size; - SpinUnlockIrqRestore(&vmem_lock, flags); - - FastMemset((void*)vaddr, 0, size); - return (void*)vaddr; -} - -void VMemFree(void* vaddr, uint64_t size) { - if (!vaddr || size == 0) return; - - uint64_t start_vaddr = PAGE_ALIGN_DOWN((uint64_t)vaddr); - size = PAGE_ALIGN_UP(size); - - // Unmap all pages and free physical frames - for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { - uint64_t current_vaddr = start_vaddr + offset; - uint64_t paddr = VMemGetPhysAddr(current_vaddr); - if (paddr != 0) { - VMemUnmap(current_vaddr, PAGE_SIZE); - FreePage((void*)paddr); - } - } - - flush_tlb_batch(); - - irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - BuddyFree(start_vaddr, size); - kernel_space.used_pages -= size / PAGE_SIZE; - kernel_space.total_mapped -= size; - vmem_frees++; - SpinUnlockIrqRestore(&vmem_lock, flags); -} - -void* VMemAllocWithGuards(uint64_t size) { - if (size == 0) return NULL; - size = PAGE_ALIGN_UP(size); - - // Allocate space for the user area plus two guard pages - uint64_t total_size = size + (2 * PAGE_SIZE); - void* base_ptr = VMemAlloc(total_size); - if (!base_ptr) return NULL; - - uint64_t base_addr = (uint64_t)base_ptr; - uint64_t guard1_vaddr = base_addr; - uint64_t guard2_vaddr = base_addr + size + PAGE_SIZE; - - // Get the physical pages backing the guard pages - uint64_t paddr1 = VMemGetPhysAddr(guard1_vaddr); - uint64_t paddr2 = VMemGetPhysAddr(guard2_vaddr); - - // Unmap the guard pages - any access will now cause a page fault - VMemUnmap(guard1_vaddr, PAGE_SIZE); - VMemUnmap(guard2_vaddr, PAGE_SIZE); - - // Return the physical pages to the allocator - if (paddr1) FreePage((void*)paddr1); - if (paddr2) FreePage((void*)paddr2); - - // Return the address of the usable memory region - return (void*)(base_addr + PAGE_SIZE); -} - -void VMemFreeWithGuards(void* ptr, uint64_t size) { - if (!ptr) return; - size = PAGE_ALIGN_UP(size); - - // Calculate the original base address including the first guard page - uint64_t base_addr = (uint64_t)ptr - PAGE_SIZE; - uint64_t total_size = size + (2 * PAGE_SIZE); - - // We don't need to check for corruption; a #PF would have already occurred. - VMemFree((void*)base_addr, total_size); -} - -uint64_t VMemGetPhysAddr(uint64_t vaddr) { - uint64_t pml4_phys = (uint64_t)kernel_space.pml4; - uint64_t pdp_phys = VMemGetPageTablePhys(pml4_phys, vaddr, 0, 0); - if (!pdp_phys) return 0; - - uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 0); - if (!pd_phys) return 0; - - uint64_t* pd_virt = GetTableVirt(pd_phys); - uint32_t pd_index = (vaddr >> PD_SHIFT) & PT_INDEX_MASK; - uint64_t pde = pd_virt[pd_index]; - - if (!(pde & PAGE_PRESENT)) return 0; - - if (pde & PAGE_LARGE) { - uint64_t base = pde & PT_ADDR_MASK; - return (base & ~(HUGE_PAGE_SIZE - 1)) | (vaddr & (HUGE_PAGE_SIZE - 1)); - } - - uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 0); - if (!pt_phys) return 0; - - uint64_t* pt_virt = GetTableVirt(pt_phys); - uint32_t pt_index = (vaddr >> PT_SHIFT) & PT_INDEX_MASK; - uint64_t pte = pt_virt[pt_index]; - - if (!(pte & PAGE_PRESENT)) return 0; - return (pte & PT_ADDR_MASK) | (vaddr & PAGE_MASK); -} - -int VMemIsPageMapped(uint64_t vaddr) { - return VMemGetPhysAddr(vaddr) != 0; -} - -void VMemFlushTLB(void) { - __asm__ volatile( - "mov %%cr3, %%rax\n" - "mov %%rax, %%cr3\n" - ::: "rax", "memory" - ); -} - -void VMemFlushTLBSingle(uint64_t vaddr) { - __asm__ volatile("invlpg (%0)" :: "r"(vaddr) : "memory"); - tlb_flushes++; -} - -int VMemUnmap(uint64_t vaddr, uint64_t size) { - if (size == 0) return VMEM_SUCCESS; - - uint64_t start = PAGE_ALIGN_DOWN(vaddr); - uint64_t end = PAGE_ALIGN_UP(vaddr + size); - uint64_t num_pages = (end - start) / PAGE_SIZE; - - irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - - for (uint64_t i = 0; i < num_pages; i++) { - uint64_t current_vaddr = start + (i * PAGE_SIZE); - - uint64_t pml4_phys = (uint64_t)kernel_space.pml4; - uint64_t pdp_phys = VMemGetPageTablePhys(pml4_phys, current_vaddr, 0, 0); - if (!pdp_phys) continue; - - uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0); - if (!pd_phys) continue; - - uint64_t* pd_virt = GetTableVirt(pd_phys); - uint32_t pd_index = (current_vaddr >> PD_SHIFT) & PT_INDEX_MASK; - uint64_t pde = pd_virt[pd_index]; - - if ((pde & PAGE_PRESENT) && (pde & PAGE_LARGE)) { - if (IS_HUGE_PAGE_ALIGNED(current_vaddr) && (end - current_vaddr) >= HUGE_PAGE_SIZE) { - pd_virt[pd_index] = 0; - kernel_space.used_pages -= (HUGE_PAGE_SIZE / PAGE_SIZE); - kernel_space.total_mapped -= HUGE_PAGE_SIZE; - add_to_tlb_batch(current_vaddr); - i += (HUGE_PAGE_SIZE / PAGE_SIZE) - 1; - continue; - } - } - - uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0); - if (!pt_phys) continue; - - uint64_t* pt_virt = GetTableVirt(pt_phys); - uint32_t pt_index = (current_vaddr >> PT_SHIFT) & PT_INDEX_MASK; - - if (pt_virt[pt_index] & PAGE_PRESENT) { - pt_virt[pt_index] = 0; - kernel_space.used_pages--; - kernel_space.total_mapped -= PAGE_SIZE; - add_to_tlb_batch(current_vaddr); - } - } - - flush_tlb_batch(); - SpinUnlockIrqRestore(&vmem_lock, flags); - return VMEM_SUCCESS; -} - -/** - * @brief Gets virtual memory statistics - */ -void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) { - SpinLock(&vmem_lock); - if (used_pages) *used_pages = kernel_space.used_pages; - if (total_mapped) *total_mapped = kernel_space.total_mapped; - SpinUnlock(&vmem_lock); -} - -void PrintVMemStats(void) { - irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - uint64_t used = kernel_space.used_pages; - uint64_t mapped = kernel_space.total_mapped; - uint64_t allocs = vmem_allocations; - uint64_t frees = vmem_frees; - uint64_t flushes = tlb_flushes; - SpinUnlockIrqRestore(&vmem_lock, flags); - - PrintKernel("[VMEM] Stats:\n"); - PrintKernel(" Used pages: "); PrintKernelInt(used); PrintKernel("\n"); - PrintKernel(" Mapped: "); PrintKernelInt(mapped / (1024 * 1024)); PrintKernel("MB\n"); - PrintKernel(" Allocs: "); PrintKernelInt(allocs); PrintKernel(", Frees: "); PrintKernelInt(frees); PrintKernel("\n"); - PrintKernel(" TLB flushes: "); PrintKernelInt(flushes); PrintKernel("\n"); -} - -uint64_t VMemGetPML4PhysAddr(void) { - return (uint64_t)kernel_space.pml4; // This is already physical -} - -int VMemMapMMIO(uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t flags) { - if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size)) { - return VMEM_ERROR_ALIGN; - } - if (!IsValidVirtAddr(vaddr)) { - return VMEM_ERROR_INVALID_ADDR; - } - - uint64_t mmio_flags = flags | PAGE_PRESENT | PAGE_NOCACHE | PAGE_WRITETHROUGH; - uint64_t num_pages = size / PAGE_SIZE; - - irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock); - - for (uint64_t i = 0; i < num_pages; i++) { - uint64_t current_vaddr = vaddr + (i * PAGE_SIZE); - uint64_t current_paddr = paddr + (i * PAGE_SIZE); - - uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, current_vaddr, 0, 1); - if (!pdp_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 1); - if (!pd_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 1); - if (!pt_phys) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_NOMEM; - } - - uint64_t* pt_virt = GetTableVirt(pt_phys); - uint32_t pt_index = (current_vaddr >> PT_SHIFT) & PT_INDEX_MASK; - - if (pt_virt[pt_index] & PAGE_PRESENT) { - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - return VMEM_ERROR_ALREADY_MAPPED; - } - - pt_virt[pt_index] = current_paddr | mmio_flags; - add_to_tlb_batch(current_vaddr); - } - - flush_tlb_batch(); - SpinUnlockIrqRestore(&vmem_lock, irq_flags); - __asm__ volatile("mfence" ::: "memory"); - return VMEM_SUCCESS; -} - -void VMemUnmapMMIO(uint64_t vaddr, uint64_t size) { - if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size) || size == 0) { - return; - } - - uint64_t num_pages = size / PAGE_SIZE; - irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock); - uint64_t pml4_phys = VMemGetPML4PhysAddr(); - - for (uint64_t i = 0; i < num_pages; i++) { - uint64_t current_vaddr = vaddr + (i * PAGE_SIZE); - - uint64_t pdp_phys = VMemGetPageTablePhys(pml4_phys, current_vaddr, 0, 0); - if (!pdp_phys) continue; - - uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0); - if (!pd_phys) continue; - - uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0); - if (!pt_phys) continue; - - uint64_t* pt_table = GetTableVirt(pt_phys); - uint32_t pt_index = (current_vaddr >> PT_SHIFT) & PT_INDEX_MASK; - - if (pt_table[pt_index] & PAGE_PRESENT) { - pt_table[pt_index] = 0; - add_to_tlb_batch(current_vaddr); - } - } - - flush_tlb_batch(); - SpinUnlockIrqRestore(&vmem_lock, irq_flags); -} - -void* VMemAllocStack(uint64_t size) { - if (size == 0) return NULL; - - uint64_t stack_size = PAGE_ALIGN_UP(size); - // We need space for the stack itself, plus one guard page at the bottom. - uint64_t total_size = stack_size + PAGE_SIZE; - - // Allocate the entire region (guard + stack) - void* base_ptr = VMemAlloc(total_size); - if (!base_ptr) return NULL; - - uint64_t base_addr = (uint64_t)base_ptr; - - // The guard page is the very first page in the allocation. - uint64_t guard_page_vaddr = base_addr; - - // Get the physical page backing the guard page so we can free it. - uint64_t paddr_guard = VMemGetPhysAddr(guard_page_vaddr); - - // Unmap the guard page. Any write to it will now cause a #PF. - VMemUnmap(guard_page_vaddr, PAGE_SIZE); - - // Return the physical page to the system's page allocator. - if (paddr_guard) { - FreePage((void*)paddr_guard); - } - - // IMPORTANT: The stack pointer must start at the TOP of the allocated region. - // The top is the base address + the total size allocated. - uint64_t stack_top = base_addr + total_size; - - return (void*)stack_top; -} - -void VMemFreeStack(void* stack_top, uint64_t size) { - if (!stack_top || size == 0) return; - - uint64_t stack_size = PAGE_ALIGN_UP(size); - uint64_t total_size = stack_size + PAGE_SIZE; - - // Re-calculate the original base address of the allocation. - uint64_t top_addr = (uint64_t)stack_top; - uint64_t base_addr = top_addr - total_size; - - // The guard page is already unmapped. VMemFree will handle the virtual space. - // We just need to free the original allocation block. - VMemFree((void*)base_addr, total_size); -} - -void VMemDumpFreeList(void) { - irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - PrintKernel("[VMEM] Buddy Allocator Free Blocks:\n"); - - uint64_t total_free = 0; - for (uint32_t order = 0; order < BUDDY_NUM_ORDERS; order++) { - uint32_t count = 0; - VMemFreeBlock* current = buddy_free_lists[order]; - while (current) { - count++; - current = current->next; - } - - if (count > 0) { - uint64_t block_size = OrderToSize(order); - uint64_t total_size = count * block_size; - total_free += total_size; - - PrintKernel(" Order "); PrintKernelInt(order); - PrintKernel(" ("); PrintKernelInt(block_size / 1024); PrintKernel("KB): "); - PrintKernelInt(count); PrintKernel(" blocks, "); - PrintKernelInt(total_size / (1024 * 1024)); PrintKernel("MB total\n"); - } - } - - PrintKernel("[VMEM] Total free: "); PrintKernelInt(total_free / (1024 * 1024)); PrintKernel("MB\n"); - SpinUnlockIrqRestore(&vmem_lock, flags); -} \ No newline at end of file diff --git a/mm/VMem.cpp b/mm/VMem.cpp new file mode 100644 index 0000000..05e75f6 --- /dev/null +++ b/mm/VMem.cpp @@ -0,0 +1,651 @@ +#include +#include +#include +#include +#include +#include +#include + +// Global variable for dynamic identity mapping size +uint64_t g_identity_map_size = 4ULL * 1024 * 1024 * 1024; // Default to 4GB, will be updated during init + +extern BuddyAllocator g_buddy_allocator; +extern uint64_t total_pages; +extern uint8_t _kernel_phys_start[]; +extern uint8_t _kernel_phys_end[]; +extern uint8_t _text_start[]; +extern uint8_t _text_end[]; +extern uint8_t _rodata_start[]; +extern uint8_t _rodata_end[]; +extern uint8_t _data_start[]; +extern uint8_t _data_end[]; +extern uint8_t _bss_start[]; +extern uint8_t _bss_end[]; + +#define MAX_TLB_BATCH 64 +#define PT_CACHE_SIZE 16 + +static VirtAddrSpace kernel_space; +static Spinlock vmem_lock; +static uint64_t vmem_allocations; +static uint64_t vmem_frees; +static uint64_t tlb_flushes; + +static uint64_t tlb_batch[MAX_TLB_BATCH]; +static uint32_t tlb_batch_count; + +static void* pt_cache[PT_CACHE_SIZE]; +static uint32_t pt_cache_count; + + +void VMM::init() { + vmem_allocations = 0; + vmem_frees = 0; + tlb_flushes = 0; + tlb_batch_count = 0; + pt_cache_count = 0; + // The VMem system is split into two regions, so we create two buddy allocators. + uint64_t low_region_size = VIRT_ADDR_SPACE_LOW_END - VIRT_ADDR_SPACE_LOW_START + 1; + BuddyAllocator_Create(VIRT_ADDR_SPACE_LOW_START, low_region_size); + + // Get current PML4 from CR3 (set by bootstrap) + uint64_t pml4_phys_addr; + __asm__ volatile("mov %%cr3, %0" : "=r"(pml4_phys_addr)); + pml4_phys_addr &= ~0xFFF; + + kernel_space.pml4 = reinterpret_cast(pml4_phys_addr); + kernel_space.used_pages = 0; + kernel_space.total_mapped = IDENTITY_MAP_SIZE; + + // Test identity mapping + if (VMM_VMemGetPhysAddr(0x100000) != 0x100000) { + PANIC("Bootstrap identity mapping failed - VALIDATION FAILED"); + } + if (const uint64_t probe = IDENTITY_MAP_SIZE - PAGE_SIZE; VMM_VMemGetPhysAddr(probe) != probe) { + PANIC("Bootstrap identity mapping failed at IDENTITY_MAP_SIZE boundary"); + } + PrintKernelSuccess("VMem: Buddy allocator initialized with PML4: "); + PrintKernelHex(pml4_phys_addr); + PrintKernel("\n"); +} + +int VMM::VMM_VMemMap(const uint64_t vaddr, const uint64_t paddr, const uint64_t flags) { + if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr)) { + return VMEM_ERROR_ALIGN; + } + if (!VMM_IsValidPhysAddr(paddr) || !VMM_IsValidVirtAddr(vaddr)) { + return VMEM_ERROR_INVALID_ADDR; + } + + SpinlockGuard lock(vmem_lock); + + const uint64_t pdp_phys = VMM_VMemGetPageTablePhys(reinterpret_cast(kernel_space.pml4), vaddr, 0, 1); + if (!pdp_phys) { + return VMEM_ERROR_NOMEM; + } + + const uint64_t pd_phys = VMM_VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1); + if (!pd_phys) { + return VMEM_ERROR_NOMEM; + } + + const uint64_t pt_phys = VMM_VMemGetPageTablePhys(pd_phys, vaddr, 2, 1); + if (!pt_phys) { + return VMEM_ERROR_NOMEM; + } + + uint64_t* pt_virt = VMM_GetTableVirt(pt_phys); + const uint32_t pt_index = vaddr >> PT_SHIFT & PT_INDEX_MASK; + + if (pt_virt[pt_index] & PAGE_PRESENT) { + return VMEM_ERROR_ALREADY_MAPPED; + } + + pt_virt[pt_index] = paddr | flags | PAGE_PRESENT; + VMM_add_to_tlb_batch(vaddr); + + return VMEM_SUCCESS; +} + +int VMM::VMM_VMemMapHuge(const uint64_t vaddr, const uint64_t paddr, const uint64_t flags) { + if (!IS_HUGE_PAGE_ALIGNED(vaddr) || !IS_HUGE_PAGE_ALIGNED(paddr)) { + return VMEM_ERROR_ALIGN; + } + if (!VMM_IsValidPhysAddr(paddr) || !VMM_IsValidVirtAddr(vaddr)) { + return VMEM_ERROR_INVALID_ADDR; + } + + SpinlockGuard lock(vmem_lock); + + const uint64_t pdp_phys = VMM_VMemGetPageTablePhys(reinterpret_cast(kernel_space.pml4), vaddr, 0, 1); + if (!pdp_phys) { + return VMEM_ERROR_NOMEM; + } + + const uint64_t pd_phys = VMM_VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1); + if (!pd_phys) { + return VMEM_ERROR_NOMEM; + } + + uint64_t* pd_virt = VMM_GetTableVirt(pd_phys); + const uint32_t pd_index = vaddr >> PD_SHIFT & PT_INDEX_MASK; + + if (pd_virt[pd_index] & PAGE_PRESENT) { + return VMEM_ERROR_ALREADY_MAPPED; + } + + pd_virt[pd_index] = paddr | flags | PAGE_PRESENT | PAGE_LARGE; + VMM_add_to_tlb_batch(vaddr); + + return VMEM_SUCCESS; +} + +void* VMM::VMM_VMemAlloc(const uint64_t size_p) { + if (size_p == 0) return nullptr; + const uint64_t size = PAGE_ALIGN_UP(size_p); + + uint64_t vaddr; + { + SpinlockGuard lock(vmem_lock); + vaddr = BuddyAllocator_Allocate(&g_buddy_allocator, size); + if (!vaddr) { + return nullptr; + } + vmem_allocations++; + } + + // Map physical pages + for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { + if (void* paddr = AllocPage(); VMM_VMemMap(vaddr + offset, reinterpret_cast(paddr), PAGE_WRITABLE) != VMEM_SUCCESS) { + FreePage(paddr); + VMM_VMemFree(reinterpret_cast(vaddr), size); + return nullptr; + } + } + + VMM_flush_tlb_batch(); + + { + SpinlockGuard lock(vmem_lock); + kernel_space.used_pages += size / PAGE_SIZE; + kernel_space.total_mapped += size; + } + + FastMemset(reinterpret_cast(vaddr), 0, size); + return reinterpret_cast(vaddr); +} + +void VMM::VMM_VMemFree(void* vaddr, const uint64_t size_p) { + if (!vaddr || size_p == 0) return; + + const uint64_t start_vaddr = PAGE_ALIGN_DOWN(reinterpret_cast(vaddr)); + const uint64_t size = PAGE_ALIGN_UP(size_p); + + // Unmap all pages and free physical frames + for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { + const uint64_t current_vaddr = start_vaddr + offset; + if (const uint64_t paddr = VMM_VMemGetPhysAddr(current_vaddr); paddr != 0) { + VMM_VMemUnmap(current_vaddr, PAGE_SIZE); + FreePage(reinterpret_cast(paddr)); + } + } + + VMM_flush_tlb_batch(); + + SpinlockGuard lock(vmem_lock); + BuddyAllocator_Free(&g_buddy_allocator, start_vaddr, size); + kernel_space.used_pages -= size / PAGE_SIZE; + kernel_space.total_mapped -= size; + vmem_frees++; +} + +void* VMM::VMM_VMemAllocWithGuards(const uint64_t size_p) { + if (size_p == 0) return nullptr; + const uint64_t size = PAGE_ALIGN_UP(size_p); + + const uint64_t total_size = size + 2 * PAGE_SIZE; + void* base_ptr = VMemAlloc(total_size); + if (!base_ptr) return nullptr; + + // Allocate space for the user area plus two guard pages + const auto base_addr = reinterpret_cast(base_ptr); + const uint64_t guard1_vaddr = base_addr; + const uint64_t guard2_vaddr = base_addr + size + PAGE_SIZE; + + // Get the physical pages backing the guard pages + const uint64_t paddr1 = VMM_VMemGetPhysAddr(guard1_vaddr); + const uint64_t paddr2 = VMM_VMemGetPhysAddr(guard2_vaddr); + + // Unmap the guard pages - any access will now cause a page fault + VMM_VMemUnmap(guard1_vaddr, PAGE_SIZE); + VMM_VMemUnmap(guard2_vaddr, PAGE_SIZE); + + // Return the physical pages to the allocator + if (paddr1) FreePage(reinterpret_cast(paddr1)); + if (paddr2) FreePage(reinterpret_cast(paddr2)); + + // Return the address of the usable memory region + return reinterpret_cast(base_addr + PAGE_SIZE); +} + +void VMM::VMM_VMemFreeWithGuards(void* ptr, const uint64_t size_p) { + if (!ptr) return; + const uint64_t size = PAGE_ALIGN_UP(size_p); + + // Calculate the original base address including the first guard page + const uint64_t base_addr = reinterpret_cast(ptr) - PAGE_SIZE; + const uint64_t total_size = size + 2 * PAGE_SIZE; + + // We don't need to check for corruption; a #PF would have already occurred. + VMM_VMemFree(reinterpret_cast(base_addr), total_size); +} + +uint64_t VMM::VMM_VMemGetPhysAddr(const uint64_t vaddr) { + const auto pml4_phys = reinterpret_cast(kernel_space.pml4); + const uint64_t pdp_phys = VMM_VMemGetPageTablePhys(pml4_phys, vaddr, 0, 0); + if (!pdp_phys) return 0; + + const uint64_t pd_phys = VMM_VMemGetPageTablePhys(pdp_phys, vaddr, 1, 0); + if (!pd_phys) return 0; + + const uint64_t* pd_virt = VMM_GetTableVirt(pd_phys); + const uint32_t pd_index = vaddr >> PD_SHIFT & PT_INDEX_MASK; + const uint64_t pde = pd_virt[pd_index]; + + if (!(pde & PAGE_PRESENT)) return 0; + + if (pde & PAGE_LARGE) { + const uint64_t base = pde & PT_ADDR_MASK; + return base & ~(HUGE_PAGE_SIZE - 1) | vaddr & HUGE_PAGE_SIZE - 1; + } + + const uint64_t pt_phys = VMM_VMemGetPageTablePhys(pd_phys, vaddr, 2, 0); + if (!pt_phys) return 0; + + const uint64_t* pt_virt = VMM_GetTableVirt(pt_phys); + const uint32_t pt_index = vaddr >> PT_SHIFT & PT_INDEX_MASK; + const uint64_t pte = pt_virt[pt_index]; + + if (!(pte & PAGE_PRESENT)) return 0; + return pte & PT_ADDR_MASK | vaddr & PAGE_MASK; +} + +int VMM::VMM_VMemUnmap(const uint64_t vaddr, const uint64_t size) { + if (size == 0) return VMEM_SUCCESS; + + const uint64_t start = PAGE_ALIGN_DOWN(vaddr); + const uint64_t end = PAGE_ALIGN_UP(vaddr + size); + const uint64_t num_pages = (end - start) / PAGE_SIZE; + + SpinlockGuard lock(vmem_lock); + + for (uint64_t i = 0; i < num_pages; i++) { + const uint64_t current_vaddr = start + i * PAGE_SIZE; + + const auto pml4_phys = reinterpret_cast(kernel_space.pml4); + const uint64_t pdp_phys = VMM_VMemGetPageTablePhys(pml4_phys, current_vaddr, 0, 0); + if (!pdp_phys) continue; + + const uint64_t pd_phys = VMM_VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0); + if (!pd_phys) continue; + + uint64_t* pd_virt = VMM_GetTableVirt(pd_phys); + const uint32_t pd_index = current_vaddr >> PD_SHIFT & PT_INDEX_MASK; + + if (const uint64_t pde = pd_virt[pd_index]; pde & PAGE_PRESENT && pde & PAGE_LARGE) { + if (IS_HUGE_PAGE_ALIGNED(current_vaddr) && end - current_vaddr >= HUGE_PAGE_SIZE) { + pd_virt[pd_index] = 0; + kernel_space.used_pages -= HUGE_PAGE_SIZE / PAGE_SIZE; + kernel_space.total_mapped -= HUGE_PAGE_SIZE; + VMM_add_to_tlb_batch(current_vaddr); + i += HUGE_PAGE_SIZE / PAGE_SIZE - 1; + continue; + } + } + + const uint64_t pt_phys = VMM_VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0); + if (!pt_phys) continue; + + uint64_t* pt_virt = VMM_GetTableVirt(pt_phys); + + if (const uint32_t pt_index = current_vaddr >> PT_SHIFT & PT_INDEX_MASK; pt_virt[pt_index] & PAGE_PRESENT) { + pt_virt[pt_index] = 0; + kernel_space.used_pages--; + kernel_space.total_mapped -= PAGE_SIZE; + VMM_add_to_tlb_batch(current_vaddr); + } + } + + VMM_flush_tlb_batch(); + return VMEM_SUCCESS; +} + +void VMM::VMM_VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) { + SpinlockGuard lock(vmem_lock); + if (used_pages) *used_pages = kernel_space.used_pages; + if (total_mapped) *total_mapped = kernel_space.total_mapped; +} + +int VMM::VMM_VMemMapMMIO(uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t flags) { + if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size)) { + return VMEM_ERROR_ALIGN; + } + if (!VMM_IsValidVirtAddr(vaddr)) { + return VMEM_ERROR_INVALID_ADDR; + } + + uint64_t mmio_flags = flags | PAGE_PRESENT | PAGE_NOCACHE | PAGE_WRITETHROUGH; + uint64_t num_pages = size / PAGE_SIZE; + + SpinlockGuard lock(vmem_lock); + + for (uint64_t i = 0; i < num_pages; i++) { + uint64_t current_vaddr = vaddr + i * PAGE_SIZE; + uint64_t current_paddr = paddr + i * PAGE_SIZE; + + uint64_t pdp_phys = VMM_VMemGetPageTablePhys(reinterpret_cast(kernel_space.pml4), current_vaddr, 0, 1); + if (!pdp_phys) { + return VMEM_ERROR_NOMEM; + } + + uint64_t pd_phys = VMM_VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 1); + if (!pd_phys) { + return VMEM_ERROR_NOMEM; + } + + uint64_t pt_phys = VMM_VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 1); + if (!pt_phys) { + return VMEM_ERROR_NOMEM; + } + + uint64_t* pt_virt = VMM_GetTableVirt(pt_phys); + uint32_t pt_index = current_vaddr >> PT_SHIFT & PT_INDEX_MASK; + + if (pt_virt[pt_index] & PAGE_PRESENT) { + return VMEM_ERROR_ALREADY_MAPPED; + } + + pt_virt[pt_index] = current_paddr | mmio_flags; + VMM_add_to_tlb_batch(current_vaddr); + } + + VMM_flush_tlb_batch(); + __asm__ volatile("mfence" ::: "memory"); + return VMEM_SUCCESS; +} + +void VMM::VMM_VMemUnmapMMIO(const uint64_t vaddr, const uint64_t size) { + if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size) || size == 0) { + return; + } + + const uint64_t num_pages = size / PAGE_SIZE; + SpinlockGuard lock(vmem_lock); + const uint64_t pml4_phys = VMemGetPML4PhysAddr(); + + for (uint64_t i = 0; i < num_pages; i++) { + const uint64_t current_vaddr = vaddr + i * PAGE_SIZE; + + const uint64_t pdp_phys = VMM_VMemGetPageTablePhys(pml4_phys, current_vaddr, 0, 0); + if (!pdp_phys) continue; + + const uint64_t pd_phys = VMM_VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0); + if (!pd_phys) continue; + + const uint64_t pt_phys = VMM_VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0); + if (!pt_phys) continue; + + uint64_t* pt_table = VMM_GetTableVirt(pt_phys); + + if (const uint32_t pt_index = current_vaddr >> PT_SHIFT & PT_INDEX_MASK; + pt_table[pt_index] & PAGE_PRESENT) { + pt_table[pt_index] = 0; + VMM_add_to_tlb_batch(current_vaddr); + } + } + + VMM_flush_tlb_batch(); +} + +void* VMM::VMM_VMemAllocStack(const uint64_t size) { + if (size == 0) return nullptr; + + const uint64_t stack_size = PAGE_ALIGN_UP(size); + // We need space for the stack itself, plus one guard page at the bottom. + const uint64_t total_size = stack_size + PAGE_SIZE; + + // Allocate the entire region (guard + stack) + void* base_ptr = VMM_VMemAlloc(total_size); + if (!base_ptr) return nullptr; + + const auto base_addr = reinterpret_cast(base_ptr); + + // The guard page is the very first page in the allocation. + const uint64_t guard_page_vaddr = base_addr; + + // Get the physical page backing the guard page so we can free it. + const uint64_t paddr_guard = VMM_VMemGetPhysAddr(guard_page_vaddr); + + // Unmap the guard page. Any write to it will now cause a #PF. + VMM_VMemUnmap(guard_page_vaddr, PAGE_SIZE); + + // Return the physical page to the system's page allocator. + if (paddr_guard) { + FreePage(reinterpret_cast(paddr_guard)); + } + + // IMPORTANT: The stack pointer must start at the TOP of the allocated region. + // The top is the base address + the total size allocated. + const uint64_t stack_top = base_addr + total_size; + + return reinterpret_cast(stack_top); +} + +void VMM::VMM_VMemFreeStack(void* stack_top, const uint64_t size) { + if (!stack_top || size == 0) return; + + const uint64_t stack_size = PAGE_ALIGN_UP(size); + const uint64_t total_size = stack_size + PAGE_SIZE; + + // Re-calculate the original base address of the allocation. + const auto top_addr = reinterpret_cast(stack_top); + const uint64_t base_addr = top_addr - total_size; + + // The guard page is already unmapped. VMemFree will handle the virtual space. + // We just need to free the original allocation block. + VMM_VMemFree(reinterpret_cast(base_addr), total_size); +} + +void VMM::VMM_PrintVMemStats() { + SpinlockGuard lock(vmem_lock); + const uint64_t used = kernel_space.used_pages; + const uint64_t mapped = kernel_space.total_mapped; + const uint64_t allocs = vmem_allocations; + const uint64_t frees = vmem_frees; + const uint64_t flushes = tlb_flushes; + + PrintKernel("[VMEM] Stats:\n"); + PrintKernel(" Used pages: "); PrintKernelInt(static_cast(used)); PrintKernel("\n"); + PrintKernel(" Mapped: "); PrintKernelInt(static_cast(mapped / (1024 * 1024))); PrintKernel("MB\n"); + PrintKernel(" Allocs: "); PrintKernelInt(static_cast(allocs)); PrintKernel(", Frees: "); + PrintKernelInt(static_cast(frees)); PrintKernel("\n"); + PrintKernel(" TLB flushes: "); PrintKernelInt(static_cast(flushes)); PrintKernel("\n"); +} + +uint64_t VMM::VMM_VMemGetPML4PhysAddr() { + return reinterpret_cast(kernel_space.pml4); // This is already physical +} + +int VMM::VMM_IsValidPhysAddr(const uint64_t paddr) { + return paddr != 0 && paddr < total_pages * PAGE_SIZE; +} + +int VMM::VMM_IsValidVirtAddr(const uint64_t vaddr) { + // Check canonical address ranges + return (vaddr >= VIRT_ADDR_SPACE_LOW_START && vaddr <= VIRT_ADDR_SPACE_LOW_END) || + (vaddr >= VIRT_ADDR_SPACE_HIGH_START && vaddr <= VIRT_ADDR_SPACE_HIGH_END) || + (vaddr >= KERNEL_SPACE_START && vaddr <= KERNEL_SPACE_END); +} + +uint64_t* VMM::VMM_GetTableVirt(const uint64_t phys_addr) { + return phys_addr < IDENTITY_MAP_SIZE ? + reinterpret_cast(phys_addr) : static_cast(PHYS_TO_VIRT(phys_addr)); +} + +void VMM::VMM_flush_tlb_batch() { + if (tlb_batch_count == 0) return; + + if (tlb_batch_count > 8) { + VMM_VMemFlushTLB(); + } else { + for (uint32_t i = 0; i < tlb_batch_count; i++) { + __asm__ volatile("invlpg (%0)" :: "r"(tlb_batch[i]) : "memory"); + } + } + tlb_batch_count = 0; + tlb_flushes++; +} + +void VMM::VMM_add_to_tlb_batch(const uint64_t vaddr) { + if (tlb_batch_count >= MAX_TLB_BATCH) { + VMM_flush_tlb_batch(); + } + tlb_batch[tlb_batch_count++] = vaddr; +} + +static void* VMM_alloc_identity_page_table() { + if (pt_cache_count > 0) { + return pt_cache[--pt_cache_count]; + } + + for (uint32_t attempt = 0; attempt < 32; attempt++) { + void* candidate = AllocPage(); + if (!candidate) break; + if (reinterpret_cast(candidate) < IDENTITY_MAP_SIZE) { + FastZeroPage(candidate); + return candidate; + } + FreePage(candidate); + } + return nullptr; +} + +void VMM::VMM_cache_page_table(void* pt) { + if (pt_cache_count < PT_CACHE_SIZE && reinterpret_cast(pt) < IDENTITY_MAP_SIZE) { + pt_cache[pt_cache_count++] = pt; + } else { + FreePage(pt); + } +} + +uint64_t VMM::VMM_VMemGetPageTablePhys(const uint64_t pml4_phys, const uint64_t vaddr, const uint32_t level, const int create) { + if (!VMM_IsValidPhysAddr(pml4_phys)) return 0; + + uint64_t* table_virt = VMM_GetTableVirt(pml4_phys); + const uint32_t shift = 39U - level * 9U; + const uint32_t index = vaddr >> shift & PT_INDEX_MASK; + + if (!(table_virt[index] & PAGE_PRESENT)) { + if (!create) return 0; + + void* new_table_phys = VMM_alloc_identity_page_table(); + if (!new_table_phys || !VMM_IsValidPhysAddr(reinterpret_cast(new_table_phys))) { + if (new_table_phys) FreePage(new_table_phys); + return 0; + } + + table_virt[index] = reinterpret_cast(new_table_phys) | PAGE_PRESENT | PAGE_WRITABLE; + return reinterpret_cast(new_table_phys); + } + + return table_virt[index] & PT_ADDR_MASK; +} + +void VMM::VMM_VMemFlushTLB() { + __asm__ volatile( + "mov %%cr3, %%rax\n" + "mov %%rax, %%cr3\n" + ::: "rax", "memory" + ); +} + +void VMM::VMM_VMemFlushTLBSingle(uint64_t vaddr) { + __asm__ volatile("invlpg (%0)" :: "r"(vaddr) : "memory"); + tlb_flushes++; +} + +/** + *@section C-compatible interface + */ + +void VMemInit() { + VMM::init(); +} + +int VMemMap(const uint64_t vaddr, const uint64_t paddr, const uint64_t flags) { + return VMM::VMM_VMemMap(vaddr, paddr, flags); +} + +int VMemMapHuge(const uint64_t vaddr, const uint64_t paddr, const uint64_t flags) { + return VMM::VMM_VMemMapHuge(vaddr, paddr, flags); +} + +void* VMemAlloc(const uint64_t size) { + return VMM::VMM_VMemAlloc(size); +} + +void VMemFree(void* vaddr, const uint64_t size) { + VMM::VMM_VMemFree(vaddr, size); +} + +void* VMemAllocWithGuards(const uint64_t size) { + return VMM::VMM_VMemAllocWithGuards(size); +} + +void VMemFreeWithGuards(void* ptr, const uint64_t size) { + VMM::VMM_VMemFreeWithGuards(ptr, size); +} + +uint64_t VMemGetPhysAddr(const uint64_t vaddr) { + return VMM::VMM_VMemGetPhysAddr(vaddr); +} + +int VMemIsPageMapped(const uint64_t vaddr) { + return VMM::VMM_VMemGetPhysAddr(vaddr) != 0; +} + +int VMemUnmap(const uint64_t vaddr, const uint64_t size) { + return VMM::VMM_VMemUnmap(vaddr, size); +} + +void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) { + VMM::VMM_VMemGetStats(used_pages, total_mapped); +} + +void PrintVMemStats() { + VMM::VMM_PrintVMemStats(); +} + +uint64_t VMemGetPML4PhysAddr() { + return VMM::VMM_VMemGetPML4PhysAddr(); +} + +int VMemMapMMIO(const uint64_t vaddr, const uint64_t paddr, const uint64_t size, const uint64_t flags) { + return VMM::VMM_VMemMapMMIO(vaddr, paddr, size, flags); +} + +void VMemUnmapMMIO(const uint64_t vaddr, const uint64_t size) { + VMM::VMM_VMemUnmapMMIO(vaddr, size); +} + +void* VMemAllocStack(const uint64_t size) { + return VMM::VMM_VMemAllocStack(size); +} + +void VMemFreeStack(void* stack_top, const uint64_t size) { + VMM::VMM_VMemFreeStack(stack_top, size); +} + +void VMemDumpFreeList() { + BuddyAllocator_DumpFreeList(&g_buddy_allocator); +} diff --git a/mm/VMem.h b/mm/VMem.h index 7f56909..38b8def 100644 --- a/mm/VMem.h +++ b/mm/VMem.h @@ -72,20 +72,8 @@ _Static_assert(VIRT_ADDR_SPACE_HIGH_END < KERNEL_VIRTUAL_OFFSET, "High canonical #define PAGE_ALIGN_UP(addr) (((addr) + PAGE_MASK) & ~PAGE_MASK) #define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_MASK) == 0) #define VALIDATE_PTR(ptr) do { if (!(ptr)) return NULL; } while(0) - #define VMEM_GUARD_PAGES 1 // Add guard pages around allocations -/** - * @brief Represents a block of free virtual address space. - */ -typedef struct VMemFreeBlock { - uint64_t base; - uint64_t size; - struct VMemFreeBlock* next; - struct VMemFreeBlock* prev; - struct VMemFreeBlock* hnext; // For hash table chaining -} VMemFreeBlock; - /** * @brief Virtual address space structure */ @@ -118,6 +106,10 @@ typedef enum { VMEM_ERROR_NO_VSPACE = -6, } VMem_Result; +#ifdef __cplusplus +extern "C" { +#endif + // Core virtual memory functions void VMemInit(void); void* VMemAlloc(uint64_t size); @@ -141,25 +133,47 @@ int VMemMapMMIO(uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t flags); void VMemUnmapMMIO(uint64_t vaddr, uint64_t size); // Page table management functions -uint64_t* VMemGetPageTable(uint64_t* pml4, uint64_t vaddr, int level, int create); -int VMemSetPageFlags(uint64_t vaddr, uint64_t flags); uint64_t VMemGetPhysAddr(uint64_t vaddr); int VMemIsPageMapped(uint64_t vaddr); - -// Address space management -VirtAddrSpace* VMemCreateAddressSpace(void); -void VMemDestroyAddressSpace(VirtAddrSpace* space); -int VMemSwitchAddressSpace(VirtAddrSpace* space); - -// Utility functions -void VMemFlushTLB(void); -void VMemFlushTLBSingle(uint64_t vaddr); void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped); uint64_t VMemGetPML4PhysAddr(void); - -// Debug functions -void VMemDumpPageTable(uint64_t vaddr); -void VMemValidatePageTable(uint64_t* pml4); void VMemDumpFreeList(void); -#endif // VMEM_H \ No newline at end of file +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus +class VMM { +public: + VMM() = default; + static void init(); + static int VMM_VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags); + static int VMM_VMemUnmap(uint64_t vaddr, uint64_t size); + static int VMM_VMemMapHuge(uint64_t vaddr, uint64_t paddr, uint64_t flags); + static void* VMM_VMemAlloc(uint64_t size_p); + static void VMM_VMemFree(void* vaddr, uint64_t size_p); + static void* VMM_VMemAllocWithGuards(uint64_t size_p); + static void VMM_VMemFreeWithGuards(void* ptr, uint64_t size_p); + static void* VMM_VMemAllocStack(uint64_t size); + static void VMM_VMemFreeStack(void* stack_top, uint64_t size); + static int VMM_VMemMapMMIO(uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t flags); + static void VMM_VMemUnmapMMIO(uint64_t vaddr, uint64_t size); + static void VMM_VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped); + static uint64_t VMM_VMemGetPML4PhysAddr(); + static uint64_t VMM_VMemGetPhysAddr(uint64_t vaddr); + static void VMM_PrintVMemStats(); +private: + static uint64_t VMM_VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, uint32_t level, int create); + static void VMM_add_to_tlb_batch(uint64_t vaddr); + static void VMM_cache_page_table(void *pt); + static void VMM_flush_tlb_batch(); + static void VMM_VMemFlushTLBSingle(uint64_t vaddr); + static void VMM_VMemFlushTLB(); + static int VMM_IsValidPhysAddr(uint64_t paddr); + static int VMM_IsValidVirtAddr(uint64_t vaddr); + static uint64_t* VMM_GetTableVirt(uint64_t phys_addr); +}; +#endif + +#endif // VMEM_H diff --git a/mm/dynamic/c/Magazine.h b/mm/dynamic/c/Magazine.h index c7e0dfb..7740cc8 100644 --- a/mm/dynamic/c/Magazine.h +++ b/mm/dynamic/c/Magazine.h @@ -122,6 +122,9 @@ extern PerCpuCache per_cpu_caches[MAX_CPU_CORES]; // The single, global depot. extern Depot depot; +#ifdef __cplusplus +extern "C" { +#endif void MagazineInit(); void* MagazineAlloc(size_t size); @@ -133,4 +136,8 @@ void MagazineSetValidationLevel(int level); void MagazineFlushCaches(void); void MagazineSetPerfMode(int mode); +#ifdef __cplusplus +} +#endif + #endif // MAGAZINE_HEAP_H diff --git a/mm/dynamic/cpp/BuddyAllocator.cpp b/mm/dynamic/cpp/BuddyAllocator.cpp new file mode 100644 index 0000000..7d4bb99 --- /dev/null +++ b/mm/dynamic/cpp/BuddyAllocator.cpp @@ -0,0 +1,236 @@ +#include +#include +#include +#include <../../../kernel/atomic/cpp/Spinlock.h> + +// Buddy allocator constants +#define BUDDY_MIN_ORDER 12 // 4KB pages +#define BUDDY_MAX_ORDER 30 // 1GB max allocation +#define BUDDY_NUM_ORDERS (BUDDY_MAX_ORDER - BUDDY_MIN_ORDER + 1) + +// Hash table for fast buddy lookup +#define HASH_TABLE_SIZE 4096 // Must be a power of 2 + +// Pre-allocated pool for buddy nodes +#define MAX_BUDDY_NODES 2048 + +typedef struct VMemFreeBlock { + uint64_t base; + uint64_t size; + struct VMemFreeBlock* next; + struct VMemFreeBlock* prev; + struct VMemFreeBlock* hnext; // For hash table chaining +} VMemFreeBlock; + +struct BuddyAllocator { + VMemFreeBlock* free_lists[BUDDY_NUM_ORDERS]; + VMemFreeBlock* hash_table[HASH_TABLE_SIZE]; + VMemFreeBlock node_pool[MAX_BUDDY_NODES]; + VMemFreeBlock* node_head; + Spinlock lock; +}; + +BuddyAllocator g_buddy_allocator; + +static uint32_t HashAddress(uint64_t addr) { + return static_cast((addr >> BUDDY_MIN_ORDER) * 2654435761) & (HASH_TABLE_SIZE - 1); +} + +static uint32_t GetOrder(uint64_t size) { + if (size <= PAGE_SIZE) return 0; + return 64 - __builtin_clzll(size - 1) - BUDDY_MIN_ORDER; +} + +static uint64_t OrderToSize(uint32_t order) { + return 1ULL << (order + BUDDY_MIN_ORDER); +} + +static void InitBuddyNodePool(BuddyAllocator* allocator) { + allocator->node_head = &allocator->node_pool[0]; + for (int i = 0; i < MAX_BUDDY_NODES - 1; ++i) { + allocator->node_pool[i].next = &allocator->node_pool[i + 1]; + } + allocator->node_pool[MAX_BUDDY_NODES - 1].next = nullptr; +} + +static VMemFreeBlock* AllocBuddyNode(BuddyAllocator* allocator) { + if (!allocator->node_head) return nullptr; + VMemFreeBlock* node = allocator->node_head; + allocator->node_head = node->next; + return node; +} + +static void ReleaseBuddyNode(BuddyAllocator* allocator, VMemFreeBlock* node) { + node->next = allocator->node_head; + allocator->node_head = node; +} + +static void AddFreeBlock(BuddyAllocator* allocator, uint64_t addr, uint32_t order) { + if (order >= BUDDY_NUM_ORDERS) return; + + VMemFreeBlock* node = AllocBuddyNode(allocator); + if (!node) return; + + node->base = addr; + node->size = OrderToSize(order); + node->prev = nullptr; + node->hnext = nullptr; + + node->next = allocator->free_lists[order]; + if (allocator->free_lists[order]) { + allocator->free_lists[order]->prev = node; + } + allocator->free_lists[order] = node; + + uint32_t hash = HashAddress(addr); + node->hnext = allocator->hash_table[hash]; + allocator->hash_table[hash] = node; +} + +static VMemFreeBlock* FindFreeBlock(BuddyAllocator* allocator, uint64_t addr, uint32_t order) { + uint32_t hash = HashAddress(addr); + VMemFreeBlock* curr = allocator->hash_table[hash]; + uint64_t size = OrderToSize(order); + + while (curr) { + if (curr->base == addr && curr->size == size) { + return curr; + } + curr = curr->hnext; + } + return nullptr; +} + +static void RemoveFreeBlock(BuddyAllocator* allocator, VMemFreeBlock* node, uint32_t order) { + if (node->prev) { + node->prev->next = node->next; + } else { + allocator->free_lists[order] = node->next; + } + if (node->next) { + node->next->prev = node->prev; + } + + uint32_t hash = HashAddress(node->base); + VMemFreeBlock* prev_h = nullptr; + VMemFreeBlock* curr_h = allocator->hash_table[hash]; + while (curr_h) { + if (curr_h == node) { + if (prev_h) { + prev_h->hnext = curr_h->hnext; + } else { + allocator->hash_table[hash] = curr_h->hnext; + } + break; + } + prev_h = curr_h; + curr_h = curr_h->hnext; + } + + ReleaseBuddyNode(allocator, node); +} + +void BuddyAllocator_Create(uint64_t start, uint64_t size) { + for (auto& list : g_buddy_allocator.free_lists) { + list = nullptr; + } + for (auto& entry : g_buddy_allocator.hash_table) { + entry = nullptr; + } + + InitBuddyNodePool(&g_buddy_allocator); + + uint64_t addr = start; + uint64_t remaining = size; + while (remaining >= PAGE_SIZE) { + uint32_t order = BUDDY_NUM_ORDERS - 1; + while (order > 0 && OrderToSize(order) > remaining) { + order--; + } + AddFreeBlock(&g_buddy_allocator, addr, order); + uint64_t block_size = OrderToSize(order); + addr += block_size; + remaining -= block_size; + } +} + +uint64_t BuddyAllocator_Allocate(BuddyAllocator* allocator, uint64_t size) { + SpinlockGuard lock(allocator->lock); + + uint32_t order = GetOrder(size); + if (order >= BUDDY_NUM_ORDERS) { + return 0; + } + + for (uint32_t curr_order = order; curr_order < BUDDY_NUM_ORDERS; curr_order++) { + VMemFreeBlock* block = allocator->free_lists[curr_order]; + if (!block) continue; + + RemoveFreeBlock(allocator, block, curr_order); + + uint64_t addr = block->base; + + while (curr_order > order) { + curr_order--; + uint64_t buddy_addr = addr + OrderToSize(curr_order); + AddFreeBlock(allocator, buddy_addr, curr_order); + } + + return addr; + } + + return 0; +} + +void BuddyAllocator_Free(BuddyAllocator* allocator, uint64_t address, uint64_t size) { + SpinlockGuard lock(allocator->lock); + + uint32_t order = GetOrder(size); + if (order >= BUDDY_NUM_ORDERS) { + return; + } + + while (order < BUDDY_NUM_ORDERS - 1) { + uint64_t buddy_addr = address ^ OrderToSize(order); + VMemFreeBlock* buddy = FindFreeBlock(allocator, buddy_addr, order); + + if (!buddy) break; + + RemoveFreeBlock(allocator, buddy, order); + + if (buddy_addr < address) { + address = buddy_addr; + } + order++; + } + + AddFreeBlock(allocator, address, order); +} + +void BuddyAllocator_DumpFreeList(BuddyAllocator* allocator) { + SpinlockGuard lock(allocator->lock); + PrintKernel("[VMEM] Buddy Allocator Free Blocks:\n"); + + uint64_t total_free = 0; + for (uint32_t order = 0; order < BUDDY_NUM_ORDERS; order++) { + uint32_t count = 0; + VMemFreeBlock* current = allocator->free_lists[order]; + while (current) { + count++; + current = current->next; + } + + if (count > 0) { + uint64_t block_size = OrderToSize(order); + uint64_t total_size = count * block_size; + total_free += total_size; + + PrintKernel(" Order "); PrintKernelInt(order); + PrintKernel(" ("); PrintKernelInt(block_size / 1024); PrintKernel("KB): "); + PrintKernelInt(count); PrintKernel(" blocks, "); + PrintKernelInt(total_size / (1024 * 1024)); PrintKernel("MB total\n"); + } + } + + PrintKernel("[VMEM] Total free: "); PrintKernelInt(total_free / (1024 * 1024)); PrintKernel("MB\n"); +} diff --git a/mm/dynamic/cpp/BuddyAllocator.h b/mm/dynamic/cpp/BuddyAllocator.h new file mode 100644 index 0000000..31b5ad3 --- /dev/null +++ b/mm/dynamic/cpp/BuddyAllocator.h @@ -0,0 +1,47 @@ +#pragma once + +#include + +// Forward declaration for the C-style struct +typedef struct BuddyAllocator BuddyAllocator; + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Initializes the buddy allocator with a memory region. + * + * @param start The start address of the memory region. + * @param size The size of the memory region. + */ +void BuddyAllocator_Create(uint64_t start, uint64_t size); + +/** + * @brief Allocates a block of memory of at least `size` bytes. + * + * @param allocator The allocator instance. + * @param size The minimum size of the block to allocate. + * @return The address of the allocated block, or 0 if no block is available. + */ +uint64_t BuddyAllocator_Allocate(BuddyAllocator* allocator, uint64_t size); + +/** + * @brief Frees a previously allocated block of memory. + * + * @param allocator The allocator instance. + * @param address The address of the block to free. + * @param size The size of the block to free. + */ +void BuddyAllocator_Free(BuddyAllocator* allocator, uint64_t address, uint64_t size); + +/** + * @brief Dumps the state of the free list to the kernel console. + * + * @param allocator The allocator instance. + */ +void BuddyAllocator_DumpFreeList(BuddyAllocator* allocator); + +#ifdef __cplusplus +} +#endif diff --git a/mm/dynamic/cpp/new.cpp b/mm/dynamic/cpp/new.cpp new file mode 100644 index 0000000..fe6af33 --- /dev/null +++ b/mm/dynamic/cpp/new.cpp @@ -0,0 +1,9 @@ +#include + +void* operator new(unsigned long size) { + return KernelMemoryAlloc(size); +} + +void operator delete(void* ptr) { + KernelFree(ptr); +}