From 67fb94e8069bda99da69cc11182ff58684091d42 Mon Sep 17 00:00:00 2001 From: PeterWrighten Date: Tue, 11 Nov 2025 20:37:53 +0900 Subject: [PATCH 1/2] feat(RV64): implement dynamic memory/cpu decider, timer and interrupt - Created FDT parser for dynamic memory/cpu detection from device tree, with fallback mechanisms - Register and implement RISC-V timer - Register and implement interrupt controller and IPI --- awkernel_lib/Cargo.toml | 3 +- awkernel_lib/src/arch/rv64.rs | 144 +++++++++ awkernel_lib/src/arch/rv64/address.rs | 6 +- awkernel_lib/src/arch/rv64/cpu.rs | 4 +- awkernel_lib/src/arch/rv64/delay.rs | 33 +- awkernel_lib/src/arch/rv64/fdt.rs | 163 ++++++++++ awkernel_lib/src/arch/rv64/interrupt.rs | 2 + awkernel_lib/src/arch/rv64/vm.rs | 102 +++++- awkernel_lib/src/interrupt.rs | 51 +++ kernel/ld/rv64-link.lds | 5 +- kernel/src/arch/rv64.rs | 2 + kernel/src/arch/rv64/boot.S | 226 +++++++++++++- kernel/src/arch/rv64/config.rs | 2 - kernel/src/arch/rv64/interrupt_controller.rs | 197 ++++++++++++ kernel/src/arch/rv64/kernel_main.rs | 309 +++++++++++++++---- kernel/src/arch/rv64/timer.rs | 85 +++++ 16 files changed, 1252 insertions(+), 82 deletions(-) create mode 100644 awkernel_lib/src/arch/rv64/fdt.rs create mode 100644 kernel/src/arch/rv64/interrupt_controller.rs create mode 100644 kernel/src/arch/rv64/timer.rs diff --git a/awkernel_lib/Cargo.toml b/awkernel_lib/Cargo.toml index aeef0e19f..873ead8f8 100644 --- a/awkernel_lib/Cargo.toml +++ b/awkernel_lib/Cargo.toml @@ -13,6 +13,7 @@ array-macro = "2.1" embedded-graphics-core = "0.4" embedded-graphics = "0.8" chrono = { version = "0.4", default-features = false, features = ["clock"], optional = true } +fdt = { version = "0.1", optional = true } [dependencies.awkernel_sync] git = "https://github.com/tier4/awkernel_sync.git" @@ -82,7 +83,7 @@ x86 = [ ] aarch64 = ["dep:awkernel_aarch64", "awkernel_sync/aarch64"] rv32 = ["dep:riscv", "awkernel_sync/rv32"] -rv64 = ["awkernel_sync/rv64"] +rv64 = ["awkernel_sync/rv64", "dep:fdt"] std = ["dep:libc", "dep:socket2", "awkernel_sync/std"] spinlock = ["awkernel_sync/spinlock"] # LFN (Long File Name) support diff --git a/awkernel_lib/src/arch/rv64.rs b/awkernel_lib/src/arch/rv64.rs index e23965343..9c373e8c8 100644 --- a/awkernel_lib/src/arch/rv64.rs +++ b/awkernel_lib/src/arch/rv64.rs @@ -3,16 +3,35 @@ pub mod barrier; pub(super) mod cpu; pub(super) mod delay; pub(super) mod dvfs; +pub(super) mod fdt; pub(super) mod frame_allocator; pub(super) mod interrupt; pub(super) mod page_table; pub(super) mod paging; pub(super) mod vm; +use core::sync::atomic::{AtomicUsize, Ordering}; + pub struct RV64; impl super::Arch for RV64 {} +/// # Safety +/// +/// This function must be called at initialization, +/// and called by the primary CPU. +pub unsafe fn init_primary() { + delay::init_primary(); +} + +/// # Safety +/// +/// This function must be called at initialization, +/// and called by non-primary CPUs. +pub unsafe fn init_non_primary() { + delay::init_non_primary(); +} + pub fn init_page_allocator() { frame_allocator::init_page_allocator(); } @@ -39,3 +58,128 @@ pub fn translate_kernel_address(vpn: address::VirtPageNum) -> Option usize { + vm::get_heap_size() +} + +static BOOT_DTB_PTR: AtomicUsize = AtomicUsize::new(0); + +/// Record the DTB pointer supplied by firmware/bootloader. +pub fn set_boot_dtb_ptr(addr: usize) { + BOOT_DTB_PTR.store(addr, Ordering::Relaxed); +} + +fn boot_dtb_ptr() -> Option { + match BOOT_DTB_PTR.load(Ordering::Relaxed) { + 0 => None, + addr => Some(addr), + } +} + +/// Detect memory size from device tree or by probing +/// +/// Returns the detected memory size in bytes, or None if detection fails +pub fn detect_memory_size() -> Option { + unsafe { + if let Some(dtb_addr) = boot_dtb_ptr() { + if let Some(region) = fdt::detect_memory_from_dtb(dtb_addr) { + unsafe_puts("Memory detected from boot DTB pointer at 0x"); + crate::console::unsafe_print_hex_u32((dtb_addr >> 16) as u32); + crate::console::unsafe_print_hex_u32(dtb_addr as u32); + unsafe_puts("\r\n"); + return Some(region.size); + } else { + unsafe_puts("Boot DTB pointer invalid, falling back to probe\r\n"); + } + } + + // First, try to find DTB at common locations + if let Some(dtb_addr) = fdt::probe_dtb_locations() { + if let Some(region) = fdt::detect_memory_from_dtb(dtb_addr) { + unsafe_puts("Memory detected from DTB at 0x"); + crate::console::unsafe_print_hex_u32((dtb_addr >> 16) as u32); + crate::console::unsafe_print_hex_u32(dtb_addr as u32); + unsafe_puts("\r\n"); + return Some(region.size); + } + } + + // Fallback: Try memory probing + if let Some(size) = fdt::probe_memory_size() { + use crate::console::unsafe_puts; + unsafe_puts("Memory detected by probing\r\n"); + return Some(size); + } + } + None +} + +use crate::console::unsafe_puts; + +/// Get the memory end address based on detected or default memory size +/// +/// This function attempts to detect the actual available RAM and returns +/// an appropriate MEMORY_END value. Falls back to a safe default if detection fails. +pub fn get_memory_end() -> u64 { + const DEFAULT_MEMORY_END: u64 = 0xB0000000; // 768MB default + const DRAM_BASE: u64 = 0x80000000; + + if let Some(detected_size) = detect_memory_size() { + // Use detected size, but cap it at reasonable limits + let max_memory_end = DRAM_BASE + detected_size; + + // Don't exceed 2GB to be safe + let memory_end = max_memory_end.min(DRAM_BASE + 0x80000000); + + // Log the detection + unsafe { + use crate::console::unsafe_puts; + unsafe_puts("Detected RAM size: 0x"); + crate::console::unsafe_print_hex_u32((detected_size >> 32) as u32); + crate::console::unsafe_print_hex_u32(detected_size as u32); + unsafe_puts("\r\n"); + } + + memory_end + } else { + // Fall back to default + unsafe { + use crate::console::unsafe_puts; + unsafe_puts("Using default MEMORY_END (DTB not found)\r\n"); + } + DEFAULT_MEMORY_END + } +} + +/// Detect CPU count from device tree or return default +/// +/// Returns the detected number of CPUs, or a safe default if detection fails +pub fn detect_cpu_count() -> usize { + const DEFAULT_CPU_COUNT: usize = 4; + + unsafe { + if let Some(dtb_addr) = boot_dtb_ptr() { + if let Some(count) = fdt::detect_cpu_count_from_dtb(dtb_addr) { + unsafe_puts("Detected "); + crate::console::unsafe_print_hex_u32(count as u32); + unsafe_puts(" CPUs from boot DTB\r\n"); + return count; + } + } + + // Try to find DTB at common locations + if let Some(dtb_addr) = fdt::probe_dtb_locations() { + if let Some(count) = fdt::detect_cpu_count_from_dtb(dtb_addr) { + unsafe_puts("Detected "); + crate::console::unsafe_print_hex_u32(count as u32); + unsafe_puts(" CPUs from probed DTB\r\n"); + return count; + } + } + + // Fall back to default + unsafe_puts("Using default CPU count\r\n"); + DEFAULT_CPU_COUNT + } +} diff --git a/awkernel_lib/src/arch/rv64/address.rs b/awkernel_lib/src/arch/rv64/address.rs index cf896ca1b..2a0e00f1b 100644 --- a/awkernel_lib/src/arch/rv64/address.rs +++ b/awkernel_lib/src/arch/rv64/address.rs @@ -8,7 +8,11 @@ use core::{ pub const PAGE_SIZE: usize = 0x1000; // 4 KiB pub const PAGE_OFFSET: usize = 0xc; // 12 bits -pub const MEMORY_END: u64 = 0x8080_0000; +// Memory end for QEMU virt with 1GB RAM (-m 1G) +// QEMU provides 1GB at 0x80000000 - 0xC0000000 +// Using 0xC0000000 directly may be too aggressive, +// so we use 0xB0000000 (768MB from start) for safety +pub const MEMORY_END: u64 = 0xB0000000; /// Design abstraction struct: /// PhysAddr, VirtAddr, PhysPageNum, VirtPageNum diff --git a/awkernel_lib/src/arch/rv64/cpu.rs b/awkernel_lib/src/arch/rv64/cpu.rs index c579ddeab..309cdf978 100644 --- a/awkernel_lib/src/arch/rv64/cpu.rs +++ b/awkernel_lib/src/arch/rv64/cpu.rs @@ -4,7 +4,9 @@ impl CPU for super::RV64 { fn cpu_id() -> usize { let hartid: usize; unsafe { - core::arch::asm!("csrr {}, mhartid", out(reg) hartid); + // In M-mode, read from tp register (set during boot) + // This avoids repeatedly accessing mhartid CSR + core::arch::asm!("mv {}, tp", out(reg) hartid); } hartid } diff --git a/awkernel_lib/src/arch/rv64/delay.rs b/awkernel_lib/src/arch/rv64/delay.rs index 74aa52711..f55c4f3ae 100644 --- a/awkernel_lib/src/arch/rv64/delay.rs +++ b/awkernel_lib/src/arch/rv64/delay.rs @@ -1,9 +1,12 @@ use crate::delay::Delay; +use core::sync::atomic::{AtomicU64, Ordering}; // we should get this info from device tree const ACLINT_MTIME_BASE: u32 = 0x0200_0000 + 0x0000bff8; const RISCV_TIMEBASE_FREQ: u64 = 10_000_000; +static COUNT_START: AtomicU64 = AtomicU64::new(0); + impl Delay for super::RV64 { fn wait_interrupt() { unsafe { core::arch::asm!("wfi") }; @@ -16,19 +19,19 @@ impl Delay for super::RV64 { } fn uptime() -> u64 { - // as microsec - unsafe { - let mtime = ACLINT_MTIME_BASE as *const u64; - *mtime * 1_000_000 / RISCV_TIMEBASE_FREQ - } + let start = COUNT_START.load(Ordering::Acquire); + let mtime = ACLINT_MTIME_BASE as *const u64; + let now = unsafe { core::ptr::read_volatile(mtime) }; + let diff = now - start; + diff * 1_000_000 / RISCV_TIMEBASE_FREQ } fn uptime_nano() -> u128 { - // as microsec - unsafe { - let mtime = ACLINT_MTIME_BASE as *const u128; - *mtime * 1_000_000_000 / RISCV_TIMEBASE_FREQ as u128 - } + let start = COUNT_START.load(Ordering::Acquire); + let mtime = ACLINT_MTIME_BASE as *const u64; + let now = unsafe { core::ptr::read_volatile(mtime) }; + let diff = now - start; + diff as u128 * 1_000_000_000 / RISCV_TIMEBASE_FREQ as u128 } fn cpu_counter() -> u64 { @@ -39,3 +42,13 @@ impl Delay for super::RV64 { cycle } } + +pub(super) unsafe fn init_primary() { + let mtime = ACLINT_MTIME_BASE as *const u64; + let count = core::ptr::read_volatile(mtime); + COUNT_START.store(count, Ordering::Release); +} + +pub(super) unsafe fn init_non_primary() { + // No additional initialization needed for non-primary CPUs +} diff --git a/awkernel_lib/src/arch/rv64/fdt.rs b/awkernel_lib/src/arch/rv64/fdt.rs new file mode 100644 index 000000000..a1fc9443f --- /dev/null +++ b/awkernel_lib/src/arch/rv64/fdt.rs @@ -0,0 +1,163 @@ +//! Simple Flattened Device Tree (FDT) helpers for memory detection +//! +//! This module now leverages the `fdt` crate to extract memory information +//! while keeping a small, RV64-centric surface for the rest of the kernel. + +use fdt::Fdt; + +/// FDT Magic number (0xd00dfeed in big-endian) +const FDT_MAGIC: u32 = 0xd00dfeed; + +const RISCV_DRAM_BASE: u64 = 0x8000_0000; +const RISCV_DRAM_MAX_BASE: u64 = 0x9000_0000; +const MAX_REASONABLE_REGION_SIZE: u64 = 0x1_0000_0000; // 4GB + +/// Convert big-endian u32 to native endian +#[inline] +fn be32_to_cpu(val: u32) -> u32 { + u32::from_be(val) +} + +/// Memory region information +#[derive(Debug, Clone, Copy)] +pub struct MemoryRegion { + pub base: u64, + pub size: u64, +} + +/// Detect memory size from device tree blob +/// +/// # Safety +/// +/// This function reads from the DTB address which must point to valid memory. +pub unsafe fn detect_memory_from_dtb(dtb_addr: usize) -> Option { + // Check if DTB address looks valid (aligned and non-zero) + if dtb_addr == 0 || dtb_addr & 0x3 != 0 { + return None; + } + + let fdt = unsafe { Fdt::from_ptr(dtb_addr as *const u8).ok()? }; + let memory = fdt.memory(); + let mut regions = memory.regions().filter_map(convert_region); + + regions.find(is_reasonable_region) +} + +fn convert_region(region: fdt::standard_nodes::MemoryRegion) -> Option { + let size = region.size? as u64; + if size == 0 { + return None; + } + + Some(MemoryRegion { + base: region.starting_address as usize as u64, + size, + }) +} + +fn is_reasonable_region(region: &MemoryRegion) -> bool { + region.base >= RISCV_DRAM_BASE + && region.base < RISCV_DRAM_MAX_BASE + && region.size > 0 + && region.size < MAX_REASONABLE_REGION_SIZE +} + +/// Probe common DTB locations used by QEMU and firmware +pub unsafe fn probe_dtb_locations() -> Option { + // Common DTB locations for RISC-V: + // 1. 0x82000000 - QEMU often places DTB here + // 2. 0x88000000 - Alternative location + // 3. 0x87000000 - Another common location + // 4. 0xBFE00000 - End of 1GB RAM (QEMU default for some modes) + + let locations = [ + 0xBFE00000, // Try end of 1GB first (QEMU -bios none) + 0x82000000, 0x88000000, 0x87000000, 0xBFF00000, // Alternative end location + ]; + + for &addr in &locations { + // Check if this looks like a valid DTB + let magic_ptr = addr as *const u32; + if let Some(magic) = unsafe { magic_ptr.as_ref() } { + if be32_to_cpu(*magic) == FDT_MAGIC { + return Some(addr); + } + } + } + + None +} + +/// Probe available memory by checking if addresses are accessible +/// +/// This is a fallback when DTB is not available. We probe memory +/// in chunks to find where RAM ends. +pub unsafe fn probe_memory_size() -> Option { + const DRAM_BASE: usize = 0x80000000; + const PROBE_STEP: usize = 64 * 1024 * 1024; // 64MB chunks + const MAX_PROBE: usize = 2 * 1024 * 1024 * 1024; // Don't probe beyond 2GB + + let mut current = DRAM_BASE + PROBE_STEP; + let mut last_valid = DRAM_BASE; + + while current < DRAM_BASE + MAX_PROBE { + // Use read-write-verify test pattern for reliable memory detection + // This is the standard approach to distinguish valid RAM from MMIO/ROM/unmapped regions + let test_addr = current as *mut u32; + + if let Some(ptr) = test_addr.as_mut() { + // Save the original value + let original = core::ptr::read_volatile(ptr); + + // Write alternating bit pattern (industry standard memory test pattern) + core::ptr::write_volatile(ptr, 0xA5A5A5A5); + + // Read back and verify + let test_read = core::ptr::read_volatile(ptr); + + // Restore original value to avoid corrupting memory + core::ptr::write_volatile(ptr, original); + + // If write was successful, this is valid writable RAM + if test_read == 0xA5A5A5A5 { + last_valid = current; + } else { + // Write failed - likely MMIO, ROM, or invalid address + break; + } + } else { + break; + } + + current += PROBE_STEP; + } + + if last_valid > DRAM_BASE { + Some((last_valid - DRAM_BASE) as u64) + } else { + None + } +} + +/// Detect the number of CPUs from the device tree blob +/// +/// # Safety +/// +/// This function reads from the DTB address which must point to valid memory. +pub unsafe fn detect_cpu_count_from_dtb(dtb_addr: usize) -> Option { + // Check if DTB address looks valid (aligned and non-zero) + if dtb_addr == 0 || dtb_addr & 0x3 != 0 { + return None; + } + + let fdt = unsafe { Fdt::from_ptr(dtb_addr as *const u8).ok()? }; + + // Count CPUs by iterating through /cpus node + let cpu_count = fdt.cpus().count(); + + if cpu_count > 0 { + Some(cpu_count) + } else { + None + } +} diff --git a/awkernel_lib/src/arch/rv64/interrupt.rs b/awkernel_lib/src/arch/rv64/interrupt.rs index 72ade36d8..f82576645 100644 --- a/awkernel_lib/src/arch/rv64/interrupt.rs +++ b/awkernel_lib/src/arch/rv64/interrupt.rs @@ -1,5 +1,7 @@ use crate::interrupt::Interrupt; +// RV64 runs in M-mode without OpenSBI, so we use mstatus +// MIE (Machine Interrupt Enable) is bit 3 (0x08) in mstatus impl Interrupt for super::RV64 { fn get_flag() -> usize { let x: usize; diff --git a/awkernel_lib/src/arch/rv64/vm.rs b/awkernel_lib/src/arch/rv64/vm.rs index bb330ad66..9e4dd27f9 100644 --- a/awkernel_lib/src/arch/rv64/vm.rs +++ b/awkernel_lib/src/arch/rv64/vm.rs @@ -5,6 +5,7 @@ use crate::addr::{virt_addr::VirtAddr, Addr}; use crate::{console::unsafe_puts, paging::PAGESIZE}; use alloc::vec::Vec; use core::arch::asm; +use core::sync::atomic::{AtomicUsize, Ordering}; extern "C" { fn stext(); @@ -153,6 +154,17 @@ impl MemorySet { } } + #[allow(dead_code)] + pub fn get_heap_size(&self) -> Option { + // Return heap size if calculated during init_kernel_heap + let size = HEAP_SIZE.load(core::sync::atomic::Ordering::Relaxed); + if size == 0 { + None + } else { + Some(size) + } + } + #[allow(dead_code)] pub fn token(&self) -> usize { self.page_table.token() @@ -195,6 +207,9 @@ impl MemorySet { pub fn new_kernel() -> Self { let mut memory_set = Self::new_bare(); + // Detect actual memory end dynamically + let memory_end = super::get_memory_end(); + // Map kernel sections unsafe { unsafe_puts("Mapping kernel text section...\r\n"); @@ -250,11 +265,18 @@ impl MemorySet { unsafe { unsafe_puts("Mapping physical memory...\r\n"); + unsafe_puts(" from ekernel: 0x"); + crate::console::unsafe_print_hex_u64(ekernel as usize as u64); + unsafe_puts("\r\n to MEMORY_END: 0x"); + crate::console::unsafe_print_hex_u64(memory_end as usize as u64); + unsafe_puts(" (dynamic)\r\n"); } + // Map physical memory range for kernel use + // This creates an identity mapping for available RAM memory_set.push( MapArea::new( VirtAddr::from_usize(ekernel as usize), - VirtAddr::from_usize(super::address::MEMORY_END as usize), + VirtAddr::from_usize(memory_end as usize), MapType::Identical, MapPermission::R | MapPermission::W, ), @@ -300,16 +322,66 @@ impl MapArea { } } -// Static kernel memory set +// Static kernel memory set and heap size tracking use crate::sync::mcs::MCSNode; use crate::sync::mutex::Mutex; pub static KERNEL_SPACE: Mutex> = Mutex::new(None); +static HEAP_SIZE: AtomicUsize = AtomicUsize::new(0); pub fn init_kernel_space() { let mut node = MCSNode::new(); let mut kernel_space = KERNEL_SPACE.lock(&mut node); - *kernel_space = Some(MemorySet::new_kernel()); + let mut memory_set = MemorySet::new_kernel(); + + // Calculate dynamic heap size after kernel mapping + let heap_size = calculate_dynamic_heap_size(&mut memory_set); + HEAP_SIZE.store(heap_size, Ordering::Relaxed); + + *kernel_space = Some(memory_set); +} + +fn calculate_dynamic_heap_size(_memory_set: &mut MemorySet) -> usize { + extern "C" { + fn ekernel(); + } + use crate::addr::{phy_addr::PhyAddr, Addr}; + + // Get dynamically detected memory end + let memory_end_addr = super::get_memory_end(); + + // Calculate available memory after kernel + let kernel_end = PhyAddr::from_usize(ekernel as usize); + let memory_end = PhyAddr::from_usize(memory_end_addr as usize); + + // Start heap mapping after kernel, aligned to page boundary + let heap_start_phy = + PhyAddr::from_usize((kernel_end.as_usize() + PAGESIZE - 1) & !(PAGESIZE - 1)); + + if memory_end <= heap_start_phy { + unsafe { + unsafe_puts("Warning: No memory available for heap\r\n"); + } + return 0; + } + + // Calculate maximum possible heap size + let max_heap_size = memory_end.as_usize() - heap_start_phy.as_usize(); + + unsafe { + unsafe_puts("Dynamic heap calculation:\r\n"); + unsafe_puts(" Kernel end: 0x"); + crate::console::unsafe_print_hex_u64(kernel_end.as_usize() as u64); + unsafe_puts("\r\n Heap start: 0x"); + crate::console::unsafe_print_hex_u64(heap_start_phy.as_usize() as u64); + unsafe_puts("\r\n Memory end: 0x"); + crate::console::unsafe_print_hex_u64(memory_end.as_usize() as u64); + unsafe_puts("\r\n Max heap size: 0x"); + crate::console::unsafe_print_hex_u64(max_heap_size as u64); + unsafe_puts("\r\n"); + } + + max_heap_size } pub fn activate_kernel_space() { @@ -329,3 +401,27 @@ pub fn kernel_token() -> usize { panic!("Kernel space not initialized") } } + +pub fn get_heap_size() -> usize { + let heap_size = HEAP_SIZE.load(Ordering::Relaxed); + if heap_size == 0 { + // Fallback calculation if not initialized yet + extern "C" { + fn ekernel(); + } + use crate::addr::{phy_addr::PhyAddr, Addr}; + + let memory_end_addr = super::get_memory_end(); + + let kernel_end = PhyAddr::from_usize(ekernel as usize); + let memory_end = PhyAddr::from_usize(memory_end_addr as usize); + + if memory_end > kernel_end { + memory_end.as_usize() - kernel_end.as_usize() + } else { + 0 + } + } else { + heap_size + } +} diff --git a/awkernel_lib/src/interrupt.rs b/awkernel_lib/src/interrupt.rs index 12aaf798a..ae2fdce4e 100644 --- a/awkernel_lib/src/interrupt.rs +++ b/awkernel_lib/src/interrupt.rs @@ -403,6 +403,57 @@ pub fn handle_preemption() { preemption(); } +/// Handle all pending interrupt requests for RISC-V. +/// This is called from the M-mode interrupt handler. +#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] +pub fn handle_irqs(is_task: bool) { + use crate::{heap, unwind::catch_unwind}; + use core::mem::transmute; + + let handlers = IRQ_HANDLERS.read(); + let mut need_preemption = false; + + let controller = INTERRUPT_CONTROLLER.read(); + if let Some(ctrl) = controller.as_ref() { + let iter = ctrl.pending_irqs(); + drop(controller); // unlock + + // Use the primary allocator. + #[cfg(not(feature = "std"))] + let _guard = { + let g = heap::TALLOC.save(); + unsafe { heap::TALLOC.use_primary() }; + g + }; + + for irq in iter { + if irq == PREEMPT_IRQ.load(Ordering::Relaxed) { + need_preemption = true; + continue; + } + + if let Some((_, handler)) = handlers.get(&irq) { + if let Err(err) = catch_unwind(|| { + handler(irq); + }) { + log::warn!("an interrupt handler has been panicked\n{err:?}"); + } + } + } + } + + if need_preemption && is_task { + let ptr = PREEMPT_FN.load(Ordering::Relaxed); + let preemption = unsafe { transmute::<*mut (), fn()>(ptr) }; + preemption(); + } + + // Because IPIs are edge-trigger, + // IPI sent during interrupt handlers will be ignored. + // To notify the interrupt again, we setup a timer. + crate::cpu::reset_wakeup_timer(); +} + /// Disable interrupts and automatically restored the configuration. /// /// ``` diff --git a/kernel/ld/rv64-link.lds b/kernel/ld/rv64-link.lds index c85dd2016..19d184e13 100644 --- a/kernel/ld/rv64-link.lds +++ b/kernel/ld/rv64-link.lds @@ -3,10 +3,11 @@ ENTRY(_start) SECTIONS { - . = 0x80200000; + /* Boot code at DRAM start for -bios none */ + . = 0x80000000; PROVIDE (__executable_start = .); __ram_start = .; - + PROVIDE (stext = .); .init : { KEEP(*(.init)) } .text : { *(.text .text.* .gnu.linkonce.t*) } diff --git a/kernel/src/arch/rv64.rs b/kernel/src/arch/rv64.rs index 58902c929..c2b8f9464 100644 --- a/kernel/src/arch/rv64.rs +++ b/kernel/src/arch/rv64.rs @@ -1,3 +1,5 @@ pub mod config; mod console; +mod interrupt_controller; mod kernel_main; +mod timer; diff --git a/kernel/src/arch/rv64/boot.S b/kernel/src/arch/rv64/boot.S index 8e3095ac4..3740c2482 100644 --- a/kernel/src/arch/rv64/boot.S +++ b/kernel/src/arch/rv64/boot.S @@ -7,13 +7,28 @@ .attribute arch, "rv64gc" _start: + # Read hart ID from mhartid CSR (M-mode) + csrr a0, mhartid + mv tp, a0 # save hartid to thread pointer for later use + + # Preserve DTB pointer that QEMU/firmware passes in a1 + la t0, dtb_ptr + sd a1, 0(t0) + + # set up a simple trap handler for M-mode + la t0, early_trap_handler + csrw mtvec, t0 + + # Initialize mscratch to 0 (will be set properly later if needed) + csrw mscratch, zero + # set up the 8MB initial stack for each cpu. li sp, 0x80800000 - li a0, 0x00800000 - csrr a1, mhartid - addi a1, a1, 1 - mul a0, a0, a1 - add sp, sp, a0 + li t0, 0x00800000 + mv t1, a0 # use hartid + addi t1, t1, 1 + mul t0, t0, t1 + add sp, sp, t0 # clear the BSS la t0, __bss_start la t1, __bss_end @@ -24,4 +39,203 @@ _start: # jump to kernel_main jal ra, kernel_main 2: - j 2b \ No newline at end of file + j 2b + +# M-mode trap handler - handles interrupts and exceptions +.align 4 +early_trap_handler: + # Check if this is an interrupt or exception + csrr t0, mcause + blt t0, zero, handle_interrupt # If MSB is set, it's an interrupt + + # This is an exception - print debug info and halt + j unhandled_trap + +handle_interrupt: + # Get interrupt code (lower bits of mcause) + li t1, 0x7FFFFFFFFFFFFFFF + and t0, t0, t1 + + # Check interrupt type + li t1, 7 # M-mode timer interrupt + beq t0, t1, handle_timer_interrupt + + li t1, 3 # M-mode software interrupt (IPI) + beq t0, t1, handle_software_interrupt + + # Unknown interrupt - just return + mret + +handle_timer_interrupt: + # Save all registers that might be clobbered by the Rust handler + addi sp, sp, -256 + sd ra, 0(sp) + sd t0, 8(sp) + sd t1, 16(sp) + sd t2, 24(sp) + sd a0, 32(sp) + sd a1, 40(sp) + sd a2, 48(sp) + sd a3, 56(sp) + sd a4, 64(sp) + sd a5, 72(sp) + sd a6, 80(sp) + sd a7, 88(sp) + sd t3, 96(sp) + sd t4, 104(sp) + sd t5, 112(sp) + sd t6, 120(sp) + + # Disable timer interrupt temporarily by setting mtimecmp to max + # MTIMECMP base is 0x02004000, each hart has 8-byte register + csrr t0, mhartid + slli t0, t0, 3 # Multiply by 8 + li t1, 0x02004000 + add t0, t0, t1 + li t1, -1 + sd t1, 0(t0) + + # Call the Rust timer handler + call riscv_handle_timer + + # Restore registers + ld ra, 0(sp) + ld t0, 8(sp) + ld t1, 16(sp) + ld t2, 24(sp) + ld a0, 32(sp) + ld a1, 40(sp) + ld a2, 48(sp) + ld a3, 56(sp) + ld a4, 64(sp) + ld a5, 72(sp) + ld a6, 80(sp) + ld a7, 88(sp) + ld t3, 96(sp) + ld t4, 104(sp) + ld t5, 112(sp) + ld t6, 120(sp) + addi sp, sp, 256 + + # Return from interrupt + mret + +handle_software_interrupt: + # Save all registers that might be clobbered by the Rust handler + addi sp, sp, -256 + sd ra, 0(sp) + sd t0, 8(sp) + sd t1, 16(sp) + sd t2, 24(sp) + sd a0, 32(sp) + sd a1, 40(sp) + sd a2, 48(sp) + sd a3, 56(sp) + sd a4, 64(sp) + sd a5, 72(sp) + sd a6, 80(sp) + sd a7, 88(sp) + sd t3, 96(sp) + sd t4, 104(sp) + sd t5, 112(sp) + sd t6, 120(sp) + + # Clear the software interrupt by writing 0 to MSIP + # MSIP base is 0x02000000, each hart has 4-byte register + csrr t0, mhartid + slli t0, t0, 2 # Multiply by 4 + li t1, 0x02000000 + add t0, t0, t1 + sw zero, 0(t0) # Clear MSIP + + # Call the Rust IPI handler + call riscv_handle_ipi + + # Restore registers + ld ra, 0(sp) + ld t0, 8(sp) + ld t1, 16(sp) + ld t2, 24(sp) + ld a0, 32(sp) + ld a1, 40(sp) + ld a2, 48(sp) + ld a3, 56(sp) + ld a4, 64(sp) + ld a5, 72(sp) + ld a6, 80(sp) + ld a7, 88(sp) + ld t3, 96(sp) + ld t4, 104(sp) + ld t5, 112(sp) + ld t6, 120(sp) + addi sp, sp, 256 + + # Return from interrupt + mret + +unhandled_trap: + # Write 'TRAP!' to UART + li t0, 0x10000000 + li t1, 'T' + sb t1, 0(t0) + li t1, 'R' + sb t1, 0(t0) + li t1, 'A' + sb t1, 0(t0) + li t1, 'P' + sb t1, 0(t0) + li t1, '!' + sb t1, 0(t0) + li t1, '\r' + sb t1, 0(t0) + li t1, '\n' + sb t1, 0(t0) + + # Print mcause + csrr a0, mcause + call print_hex + + # Print mepc + csrr a0, mepc + call print_hex + + # Print mtval + csrr a0, mtval + call print_hex + + # Infinite loop +1: + j 1b + +# Helper function to print a hex value +print_hex: + li t0, 0x10000000 + li t2, 60 # shift amount: 60, 56, 52, ..., 0 + li t3, 16 # counter +1: + srl t1, a0, t2 + andi t1, t1, 0xF + li t4, 10 + blt t1, t4, 2f + addi t1, t1, 'A'-10 + j 3f +2: + addi t1, t1, '0' +3: + sb t1, 0(t0) + addi t2, t2, -4 + addi t3, t3, -1 + bnez t3, 1b + + # Print newline + li t1, '\r' + sb t1, 0(t0) + li t1, '\n' + sb t1, 0(t0) + ret + + .section .data + .align 3 + .global dtb_ptr +dtb_ptr: + .dword 0 diff --git a/kernel/src/arch/rv64/config.rs b/kernel/src/arch/rv64/config.rs index a6333deee..bc9ffe72c 100644 --- a/kernel/src/arch/rv64/config.rs +++ b/kernel/src/arch/rv64/config.rs @@ -1,4 +1,2 @@ -pub const HEAP_START: usize = 0x0008_8000_0000; - pub const PREEMPT_IRQ: u16 = 0; pub const WAKEUP_IRQ: u16 = 1; diff --git a/kernel/src/arch/rv64/interrupt_controller.rs b/kernel/src/arch/rv64/interrupt_controller.rs new file mode 100644 index 000000000..1d7112d2d --- /dev/null +++ b/kernel/src/arch/rv64/interrupt_controller.rs @@ -0,0 +1,197 @@ +use alloc::boxed::Box; +use awkernel_lib::interrupt::InterruptController; +use core::ptr::{read_volatile, write_volatile}; +use core::sync::atomic::Ordering; + +/// RISC-V PLIC (Platform-Level Interrupt Controller) implementation +/// Combined with CLINT/ACLINT for IPI support +pub struct RiscvPlic { + base_address: usize, + max_priority: u32, + num_sources: u16, +} + +// CLINT/ACLINT base address for IPIs +const ACLINT_BASE: usize = 0x0200_0000; +const MSIP_OFFSET: usize = 0x0000; // Machine Software Interrupt Pending + +// Import the detected CPU count from kernel_main +use super::kernel_main::NUM_CPUS; + +impl RiscvPlic { + /// Create a new RISC-V PLIC controller + pub const fn new(base_address: usize, num_sources: u16) -> Self { + Self { + base_address, + max_priority: 7, // Typical PLIC priority levels: 0-7 + num_sources, + } + } + + /// Get the priority register address for a given interrupt source + fn priority_reg(&self, source: u16) -> *mut u32 { + (self.base_address + (source as usize * 4)) as *mut u32 + } + + /// Get the enable register address for a given context and interrupt source + fn enable_reg(&self, context: usize, source: u16) -> *mut u32 { + let reg_index = source as usize / 32; + let base = self.base_address + 0x2000 + context * 0x80; + (base + reg_index * 4) as *mut u32 + } + + /// Get the threshold register address for a given context + fn threshold_reg(&self, context: usize) -> *mut u32 { + (self.base_address + 0x200000 + context * 0x1000) as *mut u32 + } + + /// Get the claim register address for a given context + fn claim_reg(&self, context: usize) -> *mut u32 { + (self.base_address + 0x200004 + context * 0x1000) as *mut u32 + } + + /// Set priority for an interrupt source + fn set_priority(&self, source: u16, priority: u32) { + if source > 0 && source <= self.num_sources && priority <= self.max_priority { + let reg = self.priority_reg(source); + unsafe { write_volatile(reg, priority) }; + } + } + + /// Enable interrupt for a specific context + fn enable_interrupt(&self, context: usize, source: u16) { + if source > 0 && source <= self.num_sources { + let reg = self.enable_reg(context, source); + let bit_pos = source as usize % 32; + unsafe { + let current = read_volatile(reg); + write_volatile(reg, current | (1 << bit_pos)); + } + } + } + + /// Disable interrupt for a specific context + fn disable_interrupt(&self, context: usize, source: u16) { + if source > 0 && source <= self.num_sources { + let reg = self.enable_reg(context, source); + let bit_pos = source as usize % 32; + unsafe { + let current = read_volatile(reg); + write_volatile(reg, current & !(1 << bit_pos)); + } + } + } + + /// Get the current hart ID (CPU ID) + fn get_hart_id(&self) -> usize { + // Use mhartid CSR to get hart ID + let hart_id: usize; + unsafe { + core::arch::asm!("csrr {}, mhartid", out(reg) hart_id); + } + hart_id + } + + /// Get supervisor mode context for current hart + fn get_supervisor_context(&self) -> usize { + // Typically supervisor mode context = hartid * 2 + 1 + self.get_hart_id() * 2 + 1 + } + + /// Send software interrupt (IPI) to a specific hart + fn send_software_interrupt(&self, hart_id: u32) { + // MSIP register for each hart is at ACLINT_BASE + MSIP_OFFSET + (hart_id * 4) + let msip_addr = (ACLINT_BASE + MSIP_OFFSET + (hart_id as usize * 4)) as *mut u32; + unsafe { + write_volatile(msip_addr, 1); + } + } + + /// Enable machine software interrupts + fn enable_software_interrupts(&self) { + unsafe { + // Set MIE.MSIE (Machine Software Interrupt Enable) bit (bit 3) + core::arch::asm!("csrrs t0, mie, {}", in(reg) 1 << 3); + } + } +} + +impl InterruptController for RiscvPlic { + fn enable_irq(&mut self, irq: u16) { + // Set a reasonable priority for the interrupt + self.set_priority(irq, 1); + + // Enable for supervisor mode (we're running in supervisor mode) + let context = self.get_supervisor_context(); + self.enable_interrupt(context, irq); + } + + fn disable_irq(&mut self, irq: u16) { + let context = self.get_supervisor_context(); + self.disable_interrupt(context, irq); + } + + fn pending_irqs(&self) -> Box> { + // Check pending interrupts by claiming them + let context = self.get_supervisor_context(); + let claim_reg = self.claim_reg(context); + + let mut pending = alloc::vec::Vec::new(); + + unsafe { + let claimed = read_volatile(claim_reg); + if claimed != 0 { + pending.push(claimed as u16); + // Complete the interrupt (write back the claim) + write_volatile(claim_reg, claimed); + } + } + + Box::new(pending.into_iter()) + } + + fn send_ipi(&mut self, _irq: u16, cpu_id: u32) { + // Send machine software interrupt to the target hart + self.send_software_interrupt(cpu_id); + } + + fn send_ipi_broadcast(&mut self, _irq: u16) { + // Send IPI to all CPUs + let num_cpus = NUM_CPUS.load(Ordering::Acquire) as u32; + for cpu_id in 0..num_cpus { + self.send_software_interrupt(cpu_id); + } + } + + fn send_ipi_broadcast_without_self(&mut self, _irq: u16) { + // Send IPI to all CPUs except current + let current_hart = self.get_hart_id() as u32; + let num_cpus = NUM_CPUS.load(Ordering::Acquire) as u32; + for cpu_id in 0..num_cpus { + if cpu_id != current_hart { + self.send_software_interrupt(cpu_id); + } + } + } + + fn init_non_primary(&mut self) { + // Set threshold to 0 to accept all interrupts + let context = self.get_supervisor_context(); + let threshold_reg = self.threshold_reg(context); + unsafe { write_volatile(threshold_reg, 0) }; + + // Enable software interrupts for IPIs + self.enable_software_interrupts(); + } + + fn irq_range(&self) -> (u16, u16) { + // PLIC interrupt sources typically range from 1 to num_sources + (1, self.num_sources + 1) + } + + fn irq_range_for_pnp(&self) -> (u16, u16) { + // Reserve lower IRQs for system use, higher for PnP + let start = self.num_sources / 2; + (start, self.num_sources + 1) + } +} diff --git a/kernel/src/arch/rv64/kernel_main.rs b/kernel/src/arch/rv64/kernel_main.rs index 03955b43f..1506878ea 100644 --- a/kernel/src/arch/rv64/kernel_main.rs +++ b/kernel/src/arch/rv64/kernel_main.rs @@ -1,76 +1,116 @@ use super::console; -use crate::{ - config::{BACKUP_HEAP_SIZE, HEAP_START}, - kernel_info::KernelInfo, -}; +use crate::{config::BACKUP_HEAP_SIZE, kernel_info::KernelInfo}; use awkernel_lib::{cpu, heap}; use core::{ arch::global_asm, - fmt::Write, - // mem::MaybeUninit, sync::atomic::{AtomicBool, Ordering}, }; -use ns16550a::Uart; const UART_BASE: usize = 0x1000_0000; -const HEAP_SIZE: usize = 1024 * 1024 * 512; - -// TODO: set initial stack 4MB for each CPU on 0x8040_0000. see boot.S -// const MAX_HARTS: usize = 8; -// const INITIAL_STACK: usize = 0x8040_0000; -// const INITIAL_STACK_SIZE: usize = 0x0040_0000; -// #[repr(align(4096))] -// struct InitialStack([MaybeUninit; INITIAL_STACK_SIZE * MAX_HARTS]); -// #[no_mangle] -// static INITIAL_STACK: InitialStack = unsafe { MaybeUninit::uninit().assume_init() }; +extern "C" { + static dtb_ptr: usize; +} static PRIMARY_INITIALIZED: AtomicBool = AtomicBool::new(false); +pub(super) static NUM_CPUS: core::sync::atomic::AtomicUsize = + core::sync::atomic::AtomicUsize::new(4); global_asm!(include_str!("boot.S")); +/// M-mode software interrupt (IPI) handler called from assembly +/// +/// # Safety +/// +/// This function is called from the M-mode trap handler in boot.S #[no_mangle] -pub unsafe extern "C" fn kernel_main() { - unsafe { crate::config::init() }; +pub unsafe extern "C" fn riscv_handle_ipi() { + // Handle all pending interrupts, including IPI preemption + awkernel_lib::interrupt::handle_irqs(true); +} - let hartid: usize = cpu::cpu_id(); - if hartid == 0 { - primary_hart(hartid); - } else { - non_primary_hart(hartid); - } +/// M-mode timer interrupt handler called from assembly +/// +/// # Safety +/// +/// This function is called from the M-mode trap handler in boot.S +#[no_mangle] +pub unsafe extern "C" fn riscv_handle_timer() { + // Timer interrupt is already disabled in assembly by setting mtimecmp to max + // Handle any pending interrupts (the timer handler will be invoked if registered) + awkernel_lib::interrupt::handle_irqs(true); } -unsafe fn primary_hart(hartid: usize) { - // setup interrupt; TODO; +/// Write to UART during early boot for debugging +/// +/// # Safety +/// +/// UART must be initialized before calling this function +#[inline] +unsafe fn uart_debug_puts(s: &str) { + let uart_base = UART_BASE as *mut u8; + const LSR: usize = 5; + const THR: usize = 0; - super::console::init_port(UART_BASE); + for byte in s.bytes() { + while uart_base.add(LSR).read_volatile() & 0x20 == 0 { + core::hint::spin_loop(); + } + uart_base.add(THR).write_volatile(byte); + } +} - // Initialize memory management (page allocator) - awkernel_lib::arch::rv64::init_page_allocator(); +fn register_boot_dtb_pointer() { + unsafe { + let addr = dtb_ptr; + if addr != 0 { + awkernel_lib::arch::rv64::set_boot_dtb_ptr(addr); + } + } +} - // Initialize virtual memory system - awkernel_lib::arch::rv64::init_kernel_space(); +/// Initialize UART for early debugging and console output. +/// +/// # Safety +/// +/// Must be called during early boot on the primary hart. +unsafe fn init_uart() { + let uart_base = UART_BASE as *mut u8; - // Activate virtual memory (enable MMU and page tables) - awkernel_lib::arch::rv64::activate_kernel_space(); + // 16550 UART register offsets + const IER: usize = 1; + const FCR: usize = 2; + const LCR: usize = 3; + const MCR: usize = 4; + const LSR: usize = 5; + const THR: usize = 0; - // Verify VM system is working by getting kernel token - let _kernel_token = awkernel_lib::arch::rv64::get_kernel_token(); + // Initialize 16550 UART for early debugging + uart_base.add(IER).write_volatile(0x00); // Disable interrupts + uart_base.add(LCR).write_volatile(0x80); // Enable DLAB + uart_base.add(THR).write_volatile(0x03); // Baud rate divisor low + uart_base.add(IER).write_volatile(0x00); // Baud rate divisor high + uart_base.add(LCR).write_volatile(0x03); // 8N1 + uart_base.add(FCR).write_volatile(0xC7); // Enable FIFO + uart_base.add(MCR).write_volatile(0x0B); // RTS/DTR - // setup the VM - let backup_start = HEAP_START; - let backup_size = BACKUP_HEAP_SIZE; - let primary_start = HEAP_START + BACKUP_HEAP_SIZE; - let primary_size = HEAP_SIZE; + // Helper to write strings during early boot + let uart_puts = |s: &str| { + for byte in s.bytes() { + while uart_base.add(LSR).read_volatile() & 0x20 == 0 { + core::hint::spin_loop(); + } + uart_base.add(THR).write_volatile(byte); + } + }; + + uart_puts("\r\n=== AWkernel RV64 Starting ===\r\n"); - // enable heap allocator - heap::init_primary(primary_start, primary_size); - heap::init_backup(backup_start, backup_size); - heap::TALLOC.use_primary_then_backup(); // use backup allocator + super::console::init_port(UART_BASE); + uart_puts("Console port initialized\r\n"); - // initialize serial device and dump booting logo - let mut port = Uart::new(UART_BASE); + // Initialize full UART driver + let mut port = ns16550a::Uart::new(UART_BASE); port.init( ns16550a::WordLength::EIGHT, ns16550a::StopBits::ONE, @@ -82,39 +122,196 @@ unsafe fn primary_hart(hartid: usize) { ns16550a::Divisor::BAUD115200, ); - let _ = port.write_str("\r\nAwkernel is booting\r\n\r\n"); + use core::fmt::Write; + let _ = port.write_str("UART driver initialized\r\n"); +} - // initialize console driver to which log messages are dumped +/// Initialize memory management including page allocator and virtual memory. +/// +/// # Safety +/// +/// Must be called after heap initialization on the primary hart. +unsafe fn init_memory_management() { + uart_debug_puts("Initializing page allocator...\r\n"); + awkernel_lib::arch::rv64::init_page_allocator(); + + uart_debug_puts("Initializing kernel space...\r\n"); + awkernel_lib::arch::rv64::init_kernel_space(); + + uart_debug_puts("Activating kernel space...\r\n"); + awkernel_lib::arch::rv64::activate_kernel_space(); + + uart_debug_puts("Virtual memory system initialized\r\n"); +} + +/// Setup dynamic heap allocation based on available memory. +/// +/// # Safety +/// +/// Must be called before memory management initialization on the primary hart. +unsafe fn init_heap_allocation() { + uart_debug_puts("Setting up heap allocation...\r\n"); + + extern "C" { + fn ekernel(); + } + + let heap_start = (ekernel as usize + 0xfff) & !0xfff; // Align to 4K + let backup_size = BACKUP_HEAP_SIZE; + let total_heap_size = awkernel_lib::arch::rv64::get_heap_size(); + + if total_heap_size <= backup_size { + // Use minimal heap if not enough memory + uart_debug_puts("Using minimal heap configuration\r\n"); + let primary_size = 64 * 1024 * 1024; // 64MB minimum + heap::init_primary(heap_start + backup_size, primary_size); + heap::init_backup(heap_start, backup_size); + } else { + // Use dynamic calculation + uart_debug_puts("Using dynamic heap configuration\r\n"); + let primary_size = total_heap_size - backup_size; + heap::init_primary(heap_start + backup_size, primary_size); + heap::init_backup(heap_start, backup_size); + } + + heap::TALLOC.use_primary_then_backup(); + uart_debug_puts("Heap allocation complete\r\n"); +} + +/// Initialize timer and interrupt controller for RV64. +/// +/// # Safety +/// +/// Must be called after heap and memory management initialization on the primary hart. +unsafe fn init_timer_and_interrupts() { + use super::interrupt_controller::RiscvPlic; + use super::timer::RiscvTimer; + use alloc::boxed::Box; + + uart_debug_puts("Initializing RISC-V interrupt controller...\r\n"); + + // RISC-V PLIC (Platform-Level Interrupt Controller) base address + // This should match the device tree or platform specification + const PLIC_BASE: usize = 0x0c000000; + const NUM_SOURCES: u16 = 128; // Typical PLIC configuration + const TIMER_IRQ: u16 = 5; // Machine timer interrupt is typically IRQ 5 for PLIC + + // Initialize and register interrupt controller + let plic = Box::new(RiscvPlic::new(PLIC_BASE, NUM_SOURCES)); + awkernel_lib::interrupt::register_interrupt_controller(plic); + + uart_debug_puts("Initializing RISC-V timer...\r\n"); + + // Initialize and register timer + let timer = Box::new(RiscvTimer::new(TIMER_IRQ)); + awkernel_lib::timer::register_timer(timer); + + uart_debug_puts("Timer and interrupt controller initialized\r\n"); +} + +#[no_mangle] +pub unsafe extern "C" fn kernel_main() { + unsafe { crate::config::init() }; + + register_boot_dtb_pointer(); + + let hartid: usize = cpu::cpu_id(); + if hartid == 0 { + primary_hart(hartid); + } else { + non_primary_hart(hartid); + } +} + +/// Primary CPU initialization sequence. +/// 1. Initialize UART for early debugging +/// 2. Setup heap allocation FIRST (required for page tables) +/// 3. Initialize memory management (page allocator and virtual memory) +/// 4. Initialize architecture-specific features +/// 5. Initialize console driver +/// 6. Initialize timer and interrupt controller +/// 7. Detect CPU count from device tree +/// 8. Wake up secondary CPUs +/// 9. Start the kernel main function +/// +/// # Safety +/// +/// Must be called exactly once by the primary hart during boot. +unsafe fn primary_hart(hartid: usize) { + // 1. Initialize UART for early debugging + init_uart(); + + // 2. Setup heap allocation FIRST - page tables need this! + init_heap_allocation(); + + // 3. Initialize memory management (now that heap exists) + init_memory_management(); + + // 4. Initialize architecture-specific features + awkernel_lib::arch::rv64::init_primary(); + + // 5. Initialize console driver console::init(UART_BASE); - // switch to S-Mode; TODO; - // * currntly this impl. holds both kernel and userland - // * in M-Mode, which is the highest priority. + // 6. Initialize timer and interrupt controller + init_timer_and_interrupts(); + + // Enable software interrupts for IPIs on primary hart + unsafe { + core::arch::asm!("csrrs t0, mie, {}", in(reg) 1 << 3); + } - // wake up another harts + log::info!("AWkernel RV64 primary CPU initialization complete"); + + // 7. Detect CPU count from device tree and store it + let num_cpu = awkernel_lib::arch::rv64::detect_cpu_count(); + NUM_CPUS.store(num_cpu, Ordering::Release); + + // 8. Wake up secondary CPUs PRIMARY_INITIALIZED.store(true, Ordering::SeqCst); let kernel_info = KernelInfo { info: (), cpu_id: hartid, - num_cpu: 4, // TODO: get the number of CPUs + num_cpu, }; + // 9. Start the kernel main function crate::main::<()>(kernel_info); } +/// Secondary CPU initialization sequence. +/// 1. Wait for primary CPU to complete initialization +/// 2. Initialize architecture-specific features for non-primary CPUs +/// 3. Setup heap allocator usage +/// 4. Get CPU count from primary hart +/// 5. Start the kernel main function +/// +/// # Safety +/// +/// Must be called by non-primary harts during boot. unsafe fn non_primary_hart(hartid: usize) { + // 1. Wait for primary CPU to complete initialization while !PRIMARY_INITIALIZED.load(Ordering::SeqCst) { core::hint::spin_loop(); } - heap::TALLOC.use_primary_then_backup(); // use backup allocator + // 2. Initialize architecture-specific features for non-primary CPUs + awkernel_lib::arch::rv64::init_non_primary(); + awkernel_lib::interrupt::init_non_primary(); + + // 3. Setup heap allocator usage + heap::TALLOC.use_primary_then_backup(); + + // 4. Get CPU count detected by primary hart + let num_cpu = NUM_CPUS.load(Ordering::Acquire); let kernel_info = KernelInfo { info: (), cpu_id: hartid, - num_cpu: 4, // TODO: get the number of CPUs + num_cpu, }; + // 5. Start the kernel main function crate::main::<()>(kernel_info); } diff --git a/kernel/src/arch/rv64/timer.rs b/kernel/src/arch/rv64/timer.rs new file mode 100644 index 000000000..631215e09 --- /dev/null +++ b/kernel/src/arch/rv64/timer.rs @@ -0,0 +1,85 @@ +use core::ptr::{read_volatile, write_volatile}; +use core::time::Duration; + +/// RISC-V Timer Implementation +/// Uses MTIME (machine time) and MTIMECMP (machine time compare) registers +pub struct RiscvTimer { + irq: u16, +} + +// RISC-V timer memory-mapped register addresses +// These should match the device tree or platform configuration +const ACLINT_MTIME_BASE: usize = 0x0200_0000 + 0x0000bff8; +const ACLINT_MTIMECMP_BASE: usize = 0x0200_0000 + 0x00004000; +const RISCV_TIMEBASE_FREQ: u64 = 10_000_000; // 10 MHz + +impl RiscvTimer { + pub const fn new(irq: u16) -> Self { + RiscvTimer { irq } + } + + /// Get the current machine time + fn get_mtime() -> u64 { + let mtime = ACLINT_MTIME_BASE as *const u64; + unsafe { read_volatile(mtime) } + } + + /// Get the current hart ID + fn get_hart_id() -> usize { + let hart_id: usize; + unsafe { + core::arch::asm!("csrr {}, mhartid", out(reg) hart_id); + } + hart_id + } + + /// Set the machine time compare value + fn set_mtimecmp(&self, value: u64) { + // Each hart has its own MTIMECMP register at ACLINT_MTIMECMP_BASE + (hartid * 8) + let hart_id = Self::get_hart_id(); + let mtimecmp = (ACLINT_MTIMECMP_BASE + (hart_id * 8)) as *mut u64; + unsafe { write_volatile(mtimecmp, value) }; + } + + /// Enable machine timer interrupt + fn enable_mti(&self) { + unsafe { + // Set MIE.MTIE (Machine Timer Interrupt Enable) bit + core::arch::asm!("csrrs t0, mie, {}", in(reg) 1 << 7); + } + } + + /// Disable machine timer interrupt + fn disable_mti(&self) { + unsafe { + // Clear MIE.MTIE (Machine Timer Interrupt Enable) bit + core::arch::asm!("csrrc t0, mie, {}", in(reg) 1 << 7); + } + } +} + +impl awkernel_lib::timer::Timer for RiscvTimer { + fn reset(&self, dur: Duration) { + let current_time = Self::get_mtime(); + let ticks = (dur.as_nanos() as u64 * RISCV_TIMEBASE_FREQ) / 1_000_000_000; + let target_time = current_time.saturating_add(ticks); + + // Set the compare value + self.set_mtimecmp(target_time); + + // Enable machine timer interrupt + self.enable_mti(); + } + + fn irq_id(&self) -> u16 { + self.irq + } + + fn disable(&self) { + // Disable machine timer interrupt + self.disable_mti(); + + // Set MTIMECMP to maximum value to prevent spurious interrupts + self.set_mtimecmp(u64::MAX); + } +} From b52a912bde85540b8871744a08325611d5747455 Mon Sep 17 00:00:00 2001 From: PeterWrighten Date: Wed, 19 Nov 2025 20:08:41 +0900 Subject: [PATCH 2/2] refactor(RV64): refactor Dynamic Decider's memory end detector for explicit implementation --- .github/workflows/rust.yml | 4 +- .vscode/settings.json | 12 +-- Makefile | 2 +- README.md | 4 +- awkernel_async_lib/src/dag/graph.rs | 33 +------ .../src/dag/graph/iter_format.rs | 2 +- awkernel_async_lib/src/dag/visit.rs | 6 +- awkernel_async_lib/src/time_interval.rs | 9 +- awkernel_async_lib_verified/src/ringq.rs | 2 +- awkernel_drivers/src/pcie/intel/igb/igb_hw.rs | 7 +- awkernel_drivers/src/pcie/intel/igc.rs | 4 +- .../src/pcie/intel/ixgbe/ixgbe_operations.rs | 2 +- awkernel_drivers/src/pcie/nvme/nvme_regs.rs | 15 +-- .../src/pcie/virtio/virtio_net.rs | 2 +- awkernel_lib/src/arch/rv32/frame_allocator.rs | 2 +- awkernel_lib/src/arch/rv32/vm.rs | 18 ++-- awkernel_lib/src/arch/rv64.rs | 31 ++---- awkernel_lib/src/arch/rv64/fdt.rs | 8 +- awkernel_lib/src/arch/rv64/frame_allocator.rs | 2 +- awkernel_lib/src/arch/rv64/vm.rs | 98 ++++++++++--------- awkernel_lib/src/arch/x86_64/acpi/dmar.rs | 2 +- .../src/arch/x86_64/acpi/dmar/atsr.rs | 2 +- .../src/arch/x86_64/acpi/dmar/dhrd.rs | 2 +- .../src/arch/x86_64/acpi/dmar/rmrr.rs | 2 +- .../src/arch/x86_64/acpi/dmar/satc.rs | 2 +- .../src/arch/x86_64/acpi/dmar/sidp.rs | 2 +- awkernel_lib/src/arch/x86_64/acpi/srat.rs | 2 +- awkernel_lib/src/context/x86_64.rs | 2 +- awkernel_lib/src/device_tree/prop.rs | 2 +- awkernel_lib/src/device_tree/traits.rs | 2 +- awkernel_lib/src/file/fatfs/boot_sector.rs | 4 +- awkernel_lib/src/file/fatfs/file.rs | 4 +- awkernel_lib/src/file/fatfs/table.rs | 5 +- awkernel_lib/src/heap.rs | 2 +- awkernel_lib/src/net/ether.rs | 2 +- awkernel_lib/src/unwind.rs | 4 +- docker/Dockerfile | 4 +- kernel/src/arch/aarch64/types.rs | 2 +- kernel/src/arch/x86_64/kernel_main.rs | 7 +- smoltcp/src/storage/assembler.rs | 2 +- targets/aarch64-kernel.json | 4 +- targets/x86_64-kernel.json | 4 +- 42 files changed, 145 insertions(+), 182 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 182410484..ba86b81c7 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -19,9 +19,9 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust nightly - run: rustup toolchain install nightly-2025-05-22 + run: rustup toolchain install nightly-2025-11-16 - name: Use Rust nightly - run: rustup default nightly-2025-05-22 + run: rustup default nightly-2025-11-16 - name: Add Rust components run: rustup component add rust-src llvm-tools-preview clippy rustfmt - name: Add Rust targets diff --git a/.vscode/settings.json b/.vscode/settings.json index 23d4766b7..bdae02be9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,13 +5,13 @@ // For AArch64 Virt // "rust-analyzer.cargo.buildScripts.overrideCommand": [ // "cargo", - // "+nightly-2025-05-22", + // "+nightly-2025-11-16", // "check_no_std", // "--target=targets/aarch64-kernel.json", // ], // "rust-analyzer.check.overrideCommand": [ // "cargo", - // "+nightly-2025-05-22", + // "+nightly-2025-11-16", // "check_no_std", // "--target=targets/aarch64-kernel.json", // ], @@ -21,13 +21,13 @@ // For x86_64 "rust-analyzer.cargo.buildScripts.overrideCommand": [ "cargo", - "+nightly-2025-05-22", + "+nightly-2025-11-16", "check_no_std", "--target=targets/x86_64-kernel.json", ], "rust-analyzer.check.overrideCommand": [ "cargo", - "+nightly-2025-05-22", + "+nightly-2025-11-16", "check_no_std", "--target=targets/x86_64-kernel.json", ], @@ -37,14 +37,14 @@ // For Linux // "rust-analyzer.cargo.buildScripts.overrideCommand": [ // "cargo", - // "+nightly-2025-05-22", + // "+nightly-2025-11-16", // "check_std", // "--quiet", // "--message-format=json" // ], // "rust-analyzer.check.overrideCommand": [ // "cargo", - // "+nightly-2025-05-22", + // "+nightly-2025-11-16", // "check_std", // "--quiet", // "--message-format=json" diff --git a/Makefile b/Makefile index e017042b4..cf0f49e35 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ X86_64_LD=$(LINKERDIR)/x86_64-link.lds RV32_LD=$(LINKERDIR)/rv32-link.lds RV64_LD=$(LINKERDIR)/rv64-link.lds -RUSTV=nightly-2025-05-22 +RUSTV=nightly-2025-11-16 all: aarch64 x86_64 riscv32 riscv64 std diff --git a/README.md b/README.md index 936ca4401..f74093d7b 100644 --- a/README.md +++ b/README.md @@ -9,8 +9,8 @@ It can execute async/await applications in kernel space safely. ```text $ sudo apt install clang qemu-system-arm qemu-system-x86 qemu-system-misc python3-pyelftools -$ rustup toolchain install nightly-2025-05-22 -$ rustup default nightly-2025-05-22 +$ rustup toolchain install nightly-2025-11-16 +$ rustup default nightly-2025-11-16 $ rustup component add rust-src llvm-tools-preview $ rustup target add x86_64-unknown-none aarch64-unknown-none riscv64gc-unknown-none-elf riscv32imac-unknown-none-elf ``` diff --git a/awkernel_async_lib/src/dag/graph.rs b/awkernel_async_lib/src/dag/graph.rs index ca3be159c..9953b4c8a 100644 --- a/awkernel_async_lib/src/dag/graph.rs +++ b/awkernel_async_lib/src/dag/graph.rs @@ -525,7 +525,7 @@ where /// not borrow from the graph. /// /// [1]: struct.Neighbors.html#method.detach - pub fn neighbors(&self, a: NodeIndex) -> Neighbors { + pub fn neighbors(&self, a: NodeIndex) -> Neighbors<'_, E, Ix> { self.neighbors_directed(a, Outgoing) } @@ -546,7 +546,7 @@ where /// not borrow from the graph. /// /// [1]: struct.Neighbors.html#method.detach - pub fn neighbors_directed(&self, a: NodeIndex, dir: Direction) -> Neighbors { + pub fn neighbors_directed(&self, a: NodeIndex, dir: Direction) -> Neighbors<'_, E, Ix> { let mut iter = self.neighbors_undirected(a); let k = dir.index(); iter.next[1 - k] = EdgeIndex::end(); @@ -567,7 +567,7 @@ where /// not borrow from the graph. /// /// [1]: struct.Neighbors.html#method.detach - pub fn neighbors_undirected(&self, a: NodeIndex) -> Neighbors { + pub fn neighbors_undirected(&self, a: NodeIndex) -> Neighbors<'_, E, Ix> { Neighbors { skip_start: a, edges: &self.edges, @@ -589,7 +589,7 @@ where /// just the nodes without edges. /// /// The whole iteration computes in **O(|V|)** time where V is the set of nodes. - pub fn externals(&self, dir: Direction) -> Externals { + pub fn externals(&self, dir: Direction) -> Externals<'_, N, Ix> { Externals { iter: self.nodes.iter().enumerate(), dir, @@ -607,7 +607,7 @@ where /// Create an iterator over all edges, in indexed order. /// /// Iterator element type is `EdgeReference`. - pub fn edge_references(&self) -> EdgeReferences { + pub fn edge_references(&self) -> EdgeReferences<'_, E, Ix> { EdgeReferences { iter: self.edges.iter().enumerate(), } @@ -691,29 +691,6 @@ where clone_fields!(Neighbors, skip_start, edges, next,); } -/// A “walker” object that can be used to step through the edge list of a node. -/// -/// Created with [`.detach()`](struct.Neighbors.html#method.detach). -/// -/// The walker does not borrow from the graph, so it lets you step through -/// neighbors or incident edges while also mutating graph weights. -pub struct WalkNeighbors { - skip_start: NodeIndex, - next: [EdgeIndex; 2], -} - -impl Clone for WalkNeighbors -where - Ix: IndexType, -{ - fn clone(&self) -> Self { - WalkNeighbors { - skip_start: self.skip_start, - next: self.next, - } - } -} - /// Iterator over the node indices of a graph. #[derive(Clone, Debug)] pub struct NodeIndices { diff --git a/awkernel_async_lib/src/dag/graph/iter_format.rs b/awkernel_async_lib/src/dag/graph/iter_format.rs index adf6112ae..b2ee4b69a 100644 --- a/awkernel_async_lib/src/dag/graph/iter_format.rs +++ b/awkernel_async_lib/src/dag/graph/iter_format.rs @@ -110,7 +110,7 @@ pub struct Format<'a, I> { } pub trait IterFormatExt: Iterator { - fn format(self, separator: &str) -> Format + fn format(self, separator: &str) -> Format<'_, Self> where Self: Sized, { diff --git a/awkernel_async_lib/src/dag/visit.rs b/awkernel_async_lib/src/dag/visit.rs index 4a853c831..529a5420f 100644 --- a/awkernel_async_lib/src/dag/visit.rs +++ b/awkernel_async_lib/src/dag/visit.rs @@ -133,7 +133,7 @@ IntoEdgeReferences! {delegate_impl [] } trait_template! { /// The graph’s `NodeId`s map to indices - #[allow(clippy::needless_arbitrary_self_type)] + #[allow(clippy::needless_arbitrary_self_type, dead_code)] pub trait NodeIndexable : GraphBase { @section self /// Return an upper bound of the node indices in the graph @@ -150,7 +150,7 @@ NodeIndexable! {delegate_impl []} trait_template! { /// A graph with a known node count. -#[allow(clippy::needless_arbitrary_self_type)] +#[allow(clippy::needless_arbitrary_self_type, dead_code)] pub trait NodeCount : GraphBase { @section self fn node_count(self: &Self) -> usize; @@ -244,7 +244,7 @@ where trait_template! { /// A graph that can create a map that tracks the visited status of its nodes. -#[allow(clippy::needless_arbitrary_self_type)] +#[allow(clippy::needless_arbitrary_self_type, dead_code)] pub trait Visitable : GraphBase { @section type /// The associated map type diff --git a/awkernel_async_lib/src/time_interval.rs b/awkernel_async_lib/src/time_interval.rs index 9b8dd09a9..96ce16219 100644 --- a/awkernel_async_lib/src/time_interval.rs +++ b/awkernel_async_lib/src/time_interval.rs @@ -40,7 +40,7 @@ use core::{ use futures::Stream; use futures::StreamExt; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)] #[allow(unused)] pub enum MissedTickBehavior { /// Ticks as fast as possible until caught up. @@ -50,6 +50,7 @@ pub enum MissedTickBehavior { /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | /// Actual ticks: | work -----| delay | work | work | work -| work -----| /// ``` + #[default] Burst, /// Ticks at the next available interval after the delay. @@ -71,12 +72,6 @@ pub enum MissedTickBehavior { Skip, } -impl Default for MissedTickBehavior { - fn default() -> Self { - Self::Burst - } -} - /// Creates a new interval that ticks at the specified `period`. /// The first tick completes immediately. /// diff --git a/awkernel_async_lib_verified/src/ringq.rs b/awkernel_async_lib_verified/src/ringq.rs index b33c1fe6c..3dd67410e 100644 --- a/awkernel_async_lib_verified/src/ringq.rs +++ b/awkernel_async_lib_verified/src/ringq.rs @@ -87,7 +87,7 @@ impl RingQ { /// Get a iterator. #[inline(always)] - pub fn iter(&self) -> IterRingQ { + pub fn iter(&self) -> IterRingQ<'_, T> { IterRingQ { ringq: self, pos: self.head, diff --git a/awkernel_drivers/src/pcie/intel/igb/igb_hw.rs b/awkernel_drivers/src/pcie/intel/igb/igb_hw.rs index 6a92916e5..661dae9de 100644 --- a/awkernel_drivers/src/pcie/intel/igb/igb_hw.rs +++ b/awkernel_drivers/src/pcie/intel/igb/igb_hw.rs @@ -6005,9 +6005,8 @@ impl IgbHw { // operation, while the smaller eeproms are capable of an // 8-byte PAGE WRITE operation. Break the inner loop to pass // new address - if (((offset + widx as u32) * 2) - % self.eeprom.page_size.ok_or(IgbDriverErr::EEPROM)? as u32) - == 0 + if ((offset + widx as u32) * 2) + .is_multiple_of(self.eeprom.page_size.ok_or(IgbDriverErr::EEPROM)? as u32) { self.standby_eeprom(info)?; break; @@ -6215,7 +6214,7 @@ impl IgbHw { let mut i = 0; let mut add; while i < data.len() { - let act_offset = if (offset + i as u32) % 2 != 0 { + let act_offset = if !(offset + i as u32).is_multiple_of(2) { add = 1; bank_offset as u32 + (offset + i as u32 - 1) * 2 } else { diff --git a/awkernel_drivers/src/pcie/intel/igc.rs b/awkernel_drivers/src/pcie/intel/igc.rs index b91be7df4..8103ac530 100644 --- a/awkernel_drivers/src/pcie/intel/igc.rs +++ b/awkernel_drivers/src/pcie/intel/igc.rs @@ -1070,8 +1070,8 @@ fn igc_allocate_queues( info: &PCIeInfo, irqs: &[IRQ], ) -> Result<(Vec, BTreeMap), PCIeDeviceErr> { - assert!(core::mem::size_of::() % PAGESIZE == 0); - assert!(core::mem::size_of::() % PAGESIZE == 0); + assert!(core::mem::size_of::().is_multiple_of(PAGESIZE)); + assert!(core::mem::size_of::().is_multiple_of(PAGESIZE)); let mut irq_to_queue = BTreeMap::new(); let mut que = Vec::with_capacity(irqs.len()); diff --git a/awkernel_drivers/src/pcie/intel/ixgbe/ixgbe_operations.rs b/awkernel_drivers/src/pcie/intel/ixgbe/ixgbe_operations.rs index 57c9998f5..aa436e2db 100644 --- a/awkernel_drivers/src/pcie/intel/ixgbe/ixgbe_operations.rs +++ b/awkernel_drivers/src/pcie/intel/ixgbe/ixgbe_operations.rs @@ -2251,7 +2251,7 @@ pub fn hic_unlocked( } // Calculate length in DWORDs. We must be DWORD aligned - if length % (core::mem::size_of::() as u32) != 0 { + if !length.is_multiple_of(core::mem::size_of::() as u32) { log::debug!("Buffer length failure, not aligned to dword"); return Err(InvalidArgument); } diff --git a/awkernel_drivers/src/pcie/nvme/nvme_regs.rs b/awkernel_drivers/src/pcie/nvme/nvme_regs.rs index 0e6d907df..3c35a8a79 100644 --- a/awkernel_drivers/src/pcie/nvme/nvme_regs.rs +++ b/awkernel_drivers/src/pcie/nvme/nvme_regs.rs @@ -11,19 +11,19 @@ pub const NVME_VS: usize = 0x0008; /* Version */ pub const NVME_INTMC: usize = 0x0010; /* Interrupt Mask Clear */ pub const NVME_CC: usize = 0x0014; /* Controller Configuration */ -pub const NVME_CC_IOCQES: fn(u32) -> u32 = |_v| (((_v) & 0xf) << 20); +pub const NVME_CC_IOCQES: fn(u32) -> u32 = |_v| ((_v) & 0xf) << 20; pub const NVME_CC_IOCQES_MASK: u32 = 0xf << 20; -pub const NVME_CC_IOSQES: fn(u32) -> u32 = |_v| (((_v) & 0xf) << 16); +pub const NVME_CC_IOSQES: fn(u32) -> u32 = |_v| ((_v) & 0xf) << 16; pub const NVME_CC_IOSQES_MASK: u32 = 0xf << 16; -pub const NVME_CC_SHN: fn(u32) -> u32 = |_v| (((_v) & 0x3) << 14); +pub const NVME_CC_SHN: fn(u32) -> u32 = |_v| ((_v) & 0x3) << 14; pub const NVME_CC_SHN_MASK: u32 = 0x3 << 14; pub const NVME_CC_SHN_NONE: u32 = 0; -pub const NVME_CC_AMS: fn(u32) -> u32 = |_v| (((_v) & 0x7) << 11); +pub const NVME_CC_AMS: fn(u32) -> u32 = |_v| ((_v) & 0x7) << 11; pub const NVME_CC_AMS_MASK: u32 = 0x7 << 11; pub const NVME_CC_AMS_RR: u32 = 0; /* round-robin */ -pub const NVME_CC_MPS: fn(u32) -> u32 = |_v| ((((_v) - 12) & 0xf) << 7); +pub const NVME_CC_MPS: fn(u32) -> u32 = |_v| (((_v) - 12) & 0xf) << 7; pub const NVME_CC_MPS_MASK: u32 = 0xf << 7; -pub const NVME_CC_CSS: fn(u32) -> u32 = |_v| (((_v) & 0x7) << 4); +pub const NVME_CC_CSS: fn(u32) -> u32 = |_v| ((_v) & 0x7) << 4; pub const NVME_CC_CSS_MASK: u32 = 0x7 << 4; pub const NVME_CC_CSS_NVM: u32 = 0; pub const NVME_CC_EN: u32 = 1 << 0; @@ -34,7 +34,7 @@ pub const NVME_CSTS_RDY: u32 = 1 << 0; pub const NVME_AQA: usize = 0x0024; /* Admin Queue Attributes */ /* Admin Completion Queue Size */ -pub const NVME_AQA_ACQS: fn(u32) -> u32 = |_v| (((_v) - 1) << 16); +pub const NVME_AQA_ACQS: fn(u32) -> u32 = |_v| ((_v) - 1) << 16; /* Admin Submission Queue Size */ pub const NVME_AQA_ASQS: fn(u32) -> u32 = |_v| (_v) - 1; pub const NVME_ASQ: usize = 0x0028; /* Admin Submission Queue Base Address */ @@ -249,6 +249,7 @@ pub struct ComQueue { pub _phase: u16, } +#[allow(dead_code)] // may be used in the future, remove this line if used #[repr(C)] #[derive(Debug, Clone, Copy, Default)] pub struct SubQueueEntryIo { diff --git a/awkernel_drivers/src/pcie/virtio/virtio_net.rs b/awkernel_drivers/src/pcie/virtio/virtio_net.rs index bf34035e6..b2a26f7bd 100644 --- a/awkernel_drivers/src/pcie/virtio/virtio_net.rs +++ b/awkernel_drivers/src/pcie/virtio/virtio_net.rs @@ -1069,7 +1069,7 @@ impl VirtioNetInner { let should_kick = { let queue_idx = (vq_idx / 2) as usize; let mut node = MCSNode::new(); - let vq = if vq_idx % 2 == 0 { + let vq = if vq_idx.is_multiple_of(2) { &mut self.virtqueues[queue_idx].rx.lock(&mut node) } else { &mut self.virtqueues[queue_idx].tx.lock(&mut node) diff --git a/awkernel_lib/src/arch/rv32/frame_allocator.rs b/awkernel_lib/src/arch/rv32/frame_allocator.rs index e0466bede..09a524862 100644 --- a/awkernel_lib/src/arch/rv32/frame_allocator.rs +++ b/awkernel_lib/src/arch/rv32/frame_allocator.rs @@ -44,7 +44,7 @@ pub fn init_page_allocator() { } if let Some(allocator_ref) = allocator.as_mut() { allocator_ref.init( - PhyAddr::from_usize(ekernel as usize).ceil(), + PhyAddr::from_usize(ekernel as *const () as usize).ceil(), PhyAddr::from_usize(MEMORY_END as usize).floor(), ); } else { diff --git a/awkernel_lib/src/arch/rv32/vm.rs b/awkernel_lib/src/arch/rv32/vm.rs index 36369631c..48de7708a 100644 --- a/awkernel_lib/src/arch/rv32/vm.rs +++ b/awkernel_lib/src/arch/rv32/vm.rs @@ -201,8 +201,8 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(stext as usize), - VirtAddr::from_usize(etext as usize), + VirtAddr::from_usize(stext as *const () as usize), + VirtAddr::from_usize(etext as *const () as usize), MapType::Identical, MapPermission::R | MapPermission::X, ), @@ -214,8 +214,8 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(srodata as usize), - VirtAddr::from_usize(erodata as usize), + VirtAddr::from_usize(srodata as *const () as usize), + VirtAddr::from_usize(erodata as *const () as usize), MapType::Identical, MapPermission::R, ), @@ -227,8 +227,8 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(sdata as usize), - VirtAddr::from_usize(edata as usize), + VirtAddr::from_usize(sdata as *const () as usize), + VirtAddr::from_usize(edata as *const () as usize), MapType::Identical, MapPermission::R | MapPermission::W, ), @@ -240,8 +240,8 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(sbss_with_stack as usize), - VirtAddr::from_usize(ebss as usize), + VirtAddr::from_usize(sbss_with_stack as *const () as usize), + VirtAddr::from_usize(ebss as *const () as usize), MapType::Identical, MapPermission::R | MapPermission::W, ), @@ -253,7 +253,7 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(ekernel as usize), + VirtAddr::from_usize(ekernel as *const () as usize), VirtAddr::from_usize(super::address::MEMORY_END as usize), MapType::Identical, MapPermission::R | MapPermission::W, diff --git a/awkernel_lib/src/arch/rv64.rs b/awkernel_lib/src/arch/rv64.rs index 9c373e8c8..8476abb98 100644 --- a/awkernel_lib/src/arch/rv64.rs +++ b/awkernel_lib/src/arch/rv64.rs @@ -117,20 +117,16 @@ pub fn detect_memory_size() -> Option { use crate::console::unsafe_puts; -/// Get the memory end address based on detected or default memory size +/// Get the memory end address based on detected memory size /// /// This function attempts to detect the actual available RAM and returns -/// an appropriate MEMORY_END value. Falls back to a safe default if detection fails. +/// an appropriate MEMORY_END value. Panics if detection fails. pub fn get_memory_end() -> u64 { - const DEFAULT_MEMORY_END: u64 = 0xB0000000; // 768MB default const DRAM_BASE: u64 = 0x80000000; if let Some(detected_size) = detect_memory_size() { - // Use detected size, but cap it at reasonable limits - let max_memory_end = DRAM_BASE + detected_size; - - // Don't exceed 2GB to be safe - let memory_end = max_memory_end.min(DRAM_BASE + 0x80000000); + // Use detected size directly + let memory_end = DRAM_BASE + detected_size; // Log the detection unsafe { @@ -143,21 +139,15 @@ pub fn get_memory_end() -> u64 { memory_end } else { - // Fall back to default - unsafe { - use crate::console::unsafe_puts; - unsafe_puts("Using default MEMORY_END (DTB not found)\r\n"); - } - DEFAULT_MEMORY_END + // Failed to detect memory - cannot continue boot safely + panic!("Failed to detect memory size from DTB or probing - cannot boot"); } } -/// Detect CPU count from device tree or return default +/// Detect CPU count from device tree /// -/// Returns the detected number of CPUs, or a safe default if detection fails +/// Returns the detected number of CPUs. Panics if detection fails. pub fn detect_cpu_count() -> usize { - const DEFAULT_CPU_COUNT: usize = 4; - unsafe { if let Some(dtb_addr) = boot_dtb_ptr() { if let Some(count) = fdt::detect_cpu_count_from_dtb(dtb_addr) { @@ -178,8 +168,7 @@ pub fn detect_cpu_count() -> usize { } } - // Fall back to default - unsafe_puts("Using default CPU count\r\n"); - DEFAULT_CPU_COUNT + // Failed to detect CPU count - cannot continue boot safely + panic!("Failed to detect CPU count from DTB - cannot boot"); } } diff --git a/awkernel_lib/src/arch/rv64/fdt.rs b/awkernel_lib/src/arch/rv64/fdt.rs index a1fc9443f..70e7b4b87 100644 --- a/awkernel_lib/src/arch/rv64/fdt.rs +++ b/awkernel_lib/src/arch/rv64/fdt.rs @@ -12,12 +12,6 @@ const RISCV_DRAM_BASE: u64 = 0x8000_0000; const RISCV_DRAM_MAX_BASE: u64 = 0x9000_0000; const MAX_REASONABLE_REGION_SIZE: u64 = 0x1_0000_0000; // 4GB -/// Convert big-endian u32 to native endian -#[inline] -fn be32_to_cpu(val: u32) -> u32 { - u32::from_be(val) -} - /// Memory region information #[derive(Debug, Clone, Copy)] pub struct MemoryRegion { @@ -79,7 +73,7 @@ pub unsafe fn probe_dtb_locations() -> Option { // Check if this looks like a valid DTB let magic_ptr = addr as *const u32; if let Some(magic) = unsafe { magic_ptr.as_ref() } { - if be32_to_cpu(*magic) == FDT_MAGIC { + if u32::from_be(*magic) == FDT_MAGIC { return Some(addr); } } diff --git a/awkernel_lib/src/arch/rv64/frame_allocator.rs b/awkernel_lib/src/arch/rv64/frame_allocator.rs index e0466bede..09a524862 100644 --- a/awkernel_lib/src/arch/rv64/frame_allocator.rs +++ b/awkernel_lib/src/arch/rv64/frame_allocator.rs @@ -44,7 +44,7 @@ pub fn init_page_allocator() { } if let Some(allocator_ref) = allocator.as_mut() { allocator_ref.init( - PhyAddr::from_usize(ekernel as usize).ceil(), + PhyAddr::from_usize(ekernel as *const () as usize).ceil(), PhyAddr::from_usize(MEMORY_END as usize).floor(), ); } else { diff --git a/awkernel_lib/src/arch/rv64/vm.rs b/awkernel_lib/src/arch/rv64/vm.rs index 9e4dd27f9..724d99946 100644 --- a/awkernel_lib/src/arch/rv64/vm.rs +++ b/awkernel_lib/src/arch/rv64/vm.rs @@ -7,6 +7,7 @@ use alloc::vec::Vec; use core::arch::asm; use core::sync::atomic::{AtomicUsize, Ordering}; +#[allow(dead_code)] extern "C" { fn stext(); fn etext(); @@ -154,7 +155,6 @@ impl MemorySet { } } - #[allow(dead_code)] pub fn get_heap_size(&self) -> Option { // Return heap size if calculated during init_kernel_heap let size = HEAP_SIZE.load(core::sync::atomic::Ordering::Relaxed); @@ -207,17 +207,14 @@ impl MemorySet { pub fn new_kernel() -> Self { let mut memory_set = Self::new_bare(); - // Detect actual memory end dynamically - let memory_end = super::get_memory_end(); - // Map kernel sections unsafe { unsafe_puts("Mapping kernel text section...\r\n"); } memory_set.push( MapArea::new( - VirtAddr::from_usize(stext as usize), - VirtAddr::from_usize(etext as usize), + VirtAddr::from_usize(stext as *const () as usize), + VirtAddr::from_usize(etext as *const () as usize), MapType::Identical, MapPermission::R | MapPermission::X, ), @@ -229,8 +226,8 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(srodata as usize), - VirtAddr::from_usize(erodata as usize), + VirtAddr::from_usize(srodata as *const () as usize), + VirtAddr::from_usize(erodata as *const () as usize), MapType::Identical, MapPermission::R, ), @@ -242,8 +239,8 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(sdata as usize), - VirtAddr::from_usize(edata as usize), + VirtAddr::from_usize(sdata as *const () as usize), + VirtAddr::from_usize(edata as *const () as usize), MapType::Identical, MapPermission::R | MapPermission::W, ), @@ -255,34 +252,15 @@ impl MemorySet { } memory_set.push( MapArea::new( - VirtAddr::from_usize(sbss_with_stack as usize), - VirtAddr::from_usize(ebss as usize), - MapType::Identical, - MapPermission::R | MapPermission::W, - ), - None, - ); - - unsafe { - unsafe_puts("Mapping physical memory...\r\n"); - unsafe_puts(" from ekernel: 0x"); - crate::console::unsafe_print_hex_u64(ekernel as usize as u64); - unsafe_puts("\r\n to MEMORY_END: 0x"); - crate::console::unsafe_print_hex_u64(memory_end as usize as u64); - unsafe_puts(" (dynamic)\r\n"); - } - // Map physical memory range for kernel use - // This creates an identity mapping for available RAM - memory_set.push( - MapArea::new( - VirtAddr::from_usize(ekernel as usize), - VirtAddr::from_usize(memory_end as usize), + VirtAddr::from_usize(sbss_with_stack as *const () as usize), + VirtAddr::from_usize(ebss as *const () as usize), MapType::Identical, MapPermission::R | MapPermission::W, ), None, ); + // Note: Heap mapping is created separately in calculate_dynamic_heap_size() memory_set } @@ -293,7 +271,7 @@ impl MemorySet { asm!("sfence.vma"); } } - #[allow(dead_code)] + pub fn translate(&mut self, vpn: VirtPageNum) -> Option { self.page_table.translate(vpn) } @@ -341,7 +319,7 @@ pub fn init_kernel_space() { *kernel_space = Some(memory_set); } -fn calculate_dynamic_heap_size(_memory_set: &mut MemorySet) -> usize { +fn calculate_dynamic_heap_size(memory_set: &mut MemorySet) -> usize { extern "C" { fn ekernel(); } @@ -351,7 +329,7 @@ fn calculate_dynamic_heap_size(_memory_set: &mut MemorySet) -> usize { let memory_end_addr = super::get_memory_end(); // Calculate available memory after kernel - let kernel_end = PhyAddr::from_usize(ekernel as usize); + let kernel_end = PhyAddr::from_usize(ekernel as *const () as usize); let memory_end = PhyAddr::from_usize(memory_end_addr as usize); // Start heap mapping after kernel, aligned to page boundary @@ -365,23 +343,41 @@ fn calculate_dynamic_heap_size(_memory_set: &mut MemorySet) -> usize { return 0; } - // Calculate maximum possible heap size - let max_heap_size = memory_end.as_usize() - heap_start_phy.as_usize(); + // Calculate heap region + let heap_start = heap_start_phy.as_usize(); + let heap_end = memory_end.as_usize(); + let heap_size = heap_end - heap_start; unsafe { unsafe_puts("Dynamic heap calculation:\r\n"); unsafe_puts(" Kernel end: 0x"); crate::console::unsafe_print_hex_u64(kernel_end.as_usize() as u64); unsafe_puts("\r\n Heap start: 0x"); - crate::console::unsafe_print_hex_u64(heap_start_phy.as_usize() as u64); - unsafe_puts("\r\n Memory end: 0x"); - crate::console::unsafe_print_hex_u64(memory_end.as_usize() as u64); - unsafe_puts("\r\n Max heap size: 0x"); - crate::console::unsafe_print_hex_u64(max_heap_size as u64); + crate::console::unsafe_print_hex_u64(heap_start as u64); + unsafe_puts("\r\n Heap end: 0x"); + crate::console::unsafe_print_hex_u64(heap_end as u64); + unsafe_puts("\r\n Heap size: 0x"); + crate::console::unsafe_print_hex_u64(heap_size as u64); unsafe_puts("\r\n"); + unsafe_puts("Creating dedicated heap mapping...\r\n"); } - max_heap_size + // Create dedicated heap mapping using memory_set + memory_set.push( + MapArea::new( + VirtAddr::from_usize(heap_start), + VirtAddr::from_usize(heap_end), + MapType::Identical, + MapPermission::R | MapPermission::W, + ), + None, + ); + + unsafe { + unsafe_puts("Heap mapping created successfully\r\n"); + } + + heap_size } pub fn activate_kernel_space() { @@ -391,7 +387,7 @@ pub fn activate_kernel_space() { kernel_space.activate(); } } -#[allow(dead_code)] + pub fn kernel_token() -> usize { let mut node = MCSNode::new(); let kernel_space = KERNEL_SPACE.lock(&mut node); @@ -403,9 +399,19 @@ pub fn kernel_token() -> usize { } pub fn get_heap_size() -> usize { + // Try to get heap size from kernel space MemorySet first + let mut node = MCSNode::new(); + let kernel_space = KERNEL_SPACE.lock(&mut node); + if let Some(ref space) = *kernel_space { + if let Some(size) = space.get_heap_size() { + return size; + } + } + + // Fallback: read from HEAP_SIZE atomic if not in MemorySet let heap_size = HEAP_SIZE.load(Ordering::Relaxed); if heap_size == 0 { - // Fallback calculation if not initialized yet + // Last resort: calculate from memory end extern "C" { fn ekernel(); } @@ -413,7 +419,7 @@ pub fn get_heap_size() -> usize { let memory_end_addr = super::get_memory_end(); - let kernel_end = PhyAddr::from_usize(ekernel as usize); + let kernel_end = PhyAddr::from_usize(ekernel as *const () as usize); let memory_end = PhyAddr::from_usize(memory_end_addr as usize); if memory_end > kernel_end { diff --git a/awkernel_lib/src/arch/x86_64/acpi/dmar.rs b/awkernel_lib/src/arch/x86_64/acpi/dmar.rs index 784258858..6bebb1833 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/dmar.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/dmar.rs @@ -113,7 +113,7 @@ impl<'a> Iterator for DmarEntryIter<'a> { } impl Dmar { - pub fn entries(&self) -> DmarEntryIter { + pub fn entries(&self) -> DmarEntryIter<'_> { DmarEntryIter { pointer: unsafe { (self as *const Dmar as *const u8).add(mem::size_of::()) }, remaining_length: self diff --git a/awkernel_lib/src/arch/x86_64/acpi/dmar/atsr.rs b/awkernel_lib/src/arch/x86_64/acpi/dmar/atsr.rs index 48ac5d22e..2614ff5a1 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/dmar/atsr.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/dmar/atsr.rs @@ -15,7 +15,7 @@ pub struct DmarAtsr { } impl DmarAtsr { - pub fn device_scopes(&self) -> DeviceScopeIter { + pub fn device_scopes(&self) -> DeviceScopeIter<'_> { DeviceScopeIter { pointer: unsafe { (self as *const DmarAtsr as *const u8).add(mem::size_of::()) diff --git a/awkernel_lib/src/arch/x86_64/acpi/dmar/dhrd.rs b/awkernel_lib/src/arch/x86_64/acpi/dmar/dhrd.rs index eefb37b61..3196a7db8 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/dmar/dhrd.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/dmar/dhrd.rs @@ -16,7 +16,7 @@ pub struct DmarDrhd { } impl DmarDrhd { - pub fn device_scopes(&self) -> DeviceScopeIter { + pub fn device_scopes(&self) -> DeviceScopeIter<'_> { DeviceScopeIter { pointer: unsafe { (self as *const DmarDrhd as *const u8).add(mem::size_of::()) diff --git a/awkernel_lib/src/arch/x86_64/acpi/dmar/rmrr.rs b/awkernel_lib/src/arch/x86_64/acpi/dmar/rmrr.rs index 26ef925d1..f2a0e30a9 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/dmar/rmrr.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/dmar/rmrr.rs @@ -17,7 +17,7 @@ pub struct DmarRmrr { } impl DmarRmrr { - pub fn device_scopes(&self) -> DeviceScopeIter { + pub fn device_scopes(&self) -> DeviceScopeIter<'_> { DeviceScopeIter { pointer: unsafe { (self as *const DmarRmrr as *const u8).add(mem::size_of::()) diff --git a/awkernel_lib/src/arch/x86_64/acpi/dmar/satc.rs b/awkernel_lib/src/arch/x86_64/acpi/dmar/satc.rs index a1b536272..18ec7ac0e 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/dmar/satc.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/dmar/satc.rs @@ -15,7 +15,7 @@ pub struct DmarSatc { } impl DmarSatc { - pub fn device_scopes(&self) -> DeviceScopeIter { + pub fn device_scopes(&self) -> DeviceScopeIter<'_> { DeviceScopeIter { pointer: unsafe { (self as *const DmarSatc as *const u8).add(mem::size_of::()) diff --git a/awkernel_lib/src/arch/x86_64/acpi/dmar/sidp.rs b/awkernel_lib/src/arch/x86_64/acpi/dmar/sidp.rs index b3544cbc7..620ba9036 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/dmar/sidp.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/dmar/sidp.rs @@ -14,7 +14,7 @@ pub struct DmarSidp { } impl DmarSidp { - pub fn device_scopes(&self) -> DeviceScopeIter { + pub fn device_scopes(&self) -> DeviceScopeIter<'_> { DeviceScopeIter { pointer: unsafe { (self as *const DmarSidp as *const u8).add(mem::size_of::()) diff --git a/awkernel_lib/src/arch/x86_64/acpi/srat.rs b/awkernel_lib/src/arch/x86_64/acpi/srat.rs index c96d46a0f..bd0167544 100644 --- a/awkernel_lib/src/arch/x86_64/acpi/srat.rs +++ b/awkernel_lib/src/arch/x86_64/acpi/srat.rs @@ -143,7 +143,7 @@ unsafe impl AcpiTable for Srat { } impl Srat { - pub fn entries(&self) -> SratEntryIter { + pub fn entries(&self) -> SratEntryIter<'_> { SratEntryIter { pointer: unsafe { (self as *const Srat as *const u8).add(mem::size_of::()) }, remaining_length: self diff --git a/awkernel_lib/src/context/x86_64.rs b/awkernel_lib/src/context/x86_64.rs index b3d8b010a..8d9a44254 100644 --- a/awkernel_lib/src/context/x86_64.rs +++ b/awkernel_lib/src/context/x86_64.rs @@ -48,7 +48,7 @@ impl crate::context::Context for Context { self.r12 = arg as u64; self.r13 = entry as usize as u64; - let entry_point_addr = entry_point as usize as u64; + let entry_point_addr = entry_point as *const () as u64; unsafe { core::arch::asm!("mov {}, rsp", lateout(reg) self.r15); diff --git a/awkernel_lib/src/device_tree/prop.rs b/awkernel_lib/src/device_tree/prop.rs index 75bbf39de..0651d4433 100644 --- a/awkernel_lib/src/device_tree/prop.rs +++ b/awkernel_lib/src/device_tree/prop.rs @@ -274,7 +274,7 @@ impl<'a, A: Allocator + Clone> NodeProperty<'a, A> { } } _ => { - let a = raw_value.len() % BLOCK_SIZE == 0; + let a = raw_value.len().is_multiple_of(BLOCK_SIZE); let b = *safe_index(raw_value, 0)? != b'\0' && *safe_index(raw_value, raw_value.len() - 1)? == b'\0' && raw_value.is_ascii(); diff --git a/awkernel_lib/src/device_tree/traits.rs b/awkernel_lib/src/device_tree/traits.rs index c10cedb4f..b60827fe4 100644 --- a/awkernel_lib/src/device_tree/traits.rs +++ b/awkernel_lib/src/device_tree/traits.rs @@ -7,5 +7,5 @@ pub trait HasNamedChildNode { fn has_children(&self) -> bool; /// Look for a child by its name - fn find_child(&self, name: &str) -> Option<&DeviceTreeNode>; + fn find_child(&self, name: &str) -> Option<&DeviceTreeNode<'_, A>>; } diff --git a/awkernel_lib/src/file/fatfs/boot_sector.rs b/awkernel_lib/src/file/fatfs/boot_sector.rs index 4b3e0fb8d..976a5fb0a 100644 --- a/awkernel_lib/src/file/fatfs/boot_sector.rs +++ b/awkernel_lib/src/file/fatfs/boot_sector.rs @@ -228,7 +228,9 @@ impl BiosParameterBlock { ); return Err(Error::CorruptedFileSystem); } - if (u32::from(self.root_entries) * DIR_ENTRY_SIZE) % u32::from(self.bytes_per_sector) != 0 { + if !(u32::from(self.root_entries) * DIR_ENTRY_SIZE) + .is_multiple_of(u32::from(self.bytes_per_sector)) + { log::warn!("Root entries should fill sectors fully"); } Ok(()) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 495b04f6f..e40fa6a70 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -305,7 +305,7 @@ impl Read for File { @@ -391,7 +391,7 @@ impl Write for File fat.read_u16_le(), _ => fat.read_u8().map(u16::from), }; - let packed_val = match res { - Err(err) => return Err(err.into()), - Ok(n) => n, - }; + let packed_val = res?; let val = match cluster & 1 { 0 => packed_val & 0x0FFF, _ => (packed_val << 8) | (prev_packed_val >> 12), diff --git a/awkernel_lib/src/heap.rs b/awkernel_lib/src/heap.rs index 513d0e48d..c3b8df9f4 100644 --- a/awkernel_lib/src/heap.rs +++ b/awkernel_lib/src/heap.rs @@ -255,7 +255,7 @@ impl Talloc { /// Save the configuration and it will be restored when dropping `Guard`. #[inline] - pub fn save(&self) -> Guard { + pub fn save(&self) -> Guard<'_> { let (index, cpu_id) = Self::cpu_index(); let mask = 1 << cpu_id; let flag = mask & self.flags[index].load(Ordering::Relaxed); diff --git a/awkernel_lib/src/net/ether.rs b/awkernel_lib/src/net/ether.rs index 49558c15b..33b5bbea1 100644 --- a/awkernel_lib/src/net/ether.rs +++ b/awkernel_lib/src/net/ether.rs @@ -59,7 +59,7 @@ pub struct EtherExtracted<'a> { pub transport: TransportHdr<'a>, } -pub fn extract_headers(buf: &[u8]) -> Result { +pub fn extract_headers(buf: &[u8]) -> Result, &'static str> { let mut remain = buf.len(); if core::mem::size_of::() > remain { diff --git a/awkernel_lib/src/unwind.rs b/awkernel_lib/src/unwind.rs index 027d295a4..5acd92a50 100644 --- a/awkernel_lib/src/unwind.rs +++ b/awkernel_lib/src/unwind.rs @@ -1,7 +1,7 @@ use core::any::Any; #[cfg(feature = "std")] -pub fn catch_unwind(f: F) -> Result> +pub fn catch_unwind(f: F) -> Result> where F: FnOnce() -> R, { @@ -14,7 +14,7 @@ where use alloc::boxed::Box; #[cfg(not(feature = "std"))] -pub fn catch_unwind(f: F) -> Result> +pub fn catch_unwind(f: F) -> Result> where F: FnOnce() -> R, { diff --git a/docker/Dockerfile b/docker/Dockerfile index 84115f27e..dc551b7c2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,8 +16,8 @@ RUN . /usr/local/cargo/env && \ cargo install mdbook-mermaid RUN . /usr/local/cargo/env && \ - rustup toolchain install nightly-2025-04-29 && \ - rustup default nightly-2025-04-29 && \ + rustup toolchain install nightly-2025-11-16 && \ + rustup default nightly-2025-11-16 && \ rustup component add rust-src llvm-tools-preview && \ rustup target add x86_64-unknown-none aarch64-unknown-none riscv32imac-unknown-none-elf diff --git a/kernel/src/arch/aarch64/types.rs b/kernel/src/arch/aarch64/types.rs index 476b8ddf2..816944612 100644 --- a/kernel/src/arch/aarch64/types.rs +++ b/kernel/src/arch/aarch64/types.rs @@ -1,7 +1,7 @@ use core::fmt; pub fn ceiling_div(size: usize, units: usize) -> usize { - (size / units) + (size % units != 0) as usize + (size / units) + (!size.is_multiple_of(units) as usize) } pub fn align_up(length: usize, alignment: usize) -> usize { diff --git a/kernel/src/arch/x86_64/kernel_main.rs b/kernel/src/arch/x86_64/kernel_main.rs index 5f03e8bef..17a6803d5 100644 --- a/kernel/src/arch/x86_64/kernel_main.rs +++ b/kernel/src/arch/x86_64/kernel_main.rs @@ -438,9 +438,12 @@ fn write_boot_images(offset: u64, mpboot_start: u64) { // Write non_primary_kernel_main. log::info!( "write the kernel entry of 0x{:08x} to 0x{main_addr:08x}", - non_primary_kernel_main as usize + non_primary_kernel_main as *const () as usize + ); + write_volatile( + main_addr.as_mut_ptr(), + non_primary_kernel_main as *const () as usize, ); - write_volatile(main_addr.as_mut_ptr(), non_primary_kernel_main as usize); // Write CR3. log::info!("write CR3 of 0x{cr3:08x} to 0x{cr3_phy_addr:08x}"); diff --git a/smoltcp/src/storage/assembler.rs b/smoltcp/src/storage/assembler.rs index 5a4dff13d..088d21c8d 100644 --- a/smoltcp/src/storage/assembler.rs +++ b/smoltcp/src/storage/assembler.rs @@ -327,7 +327,7 @@ impl Assembler { /// |--- 100 ---|--- 200 ---|--- 100 ---| /// /// An offset of 1500 would return the ranges: ``(1500, 1600), (1800, 1900)`` - pub fn iter_data(&self, first_offset: usize) -> AssemblerIter { + pub fn iter_data(&self, first_offset: usize) -> AssemblerIter<'_> { AssemblerIter::new(self, first_offset) } } diff --git a/targets/aarch64-kernel.json b/targets/aarch64-kernel.json index 7c1b6525f..54a5b0821 100644 --- a/targets/aarch64-kernel.json +++ b/targets/aarch64-kernel.json @@ -24,8 +24,8 @@ "max-atomic-width": 128, "os": "none", "relocation-model": "static", - "target-c-int-width": "32", + "target-c-int-width": 32, "target-endian": "little", - "target-pointer-width": "64", + "target-pointer-width": 64, "vendor": "TIER IV, inc." } \ No newline at end of file diff --git a/targets/x86_64-kernel.json b/targets/x86_64-kernel.json index ede73dbea..03a5aab02 100644 --- a/targets/x86_64-kernel.json +++ b/targets/x86_64-kernel.json @@ -3,8 +3,8 @@ "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", "arch": "x86_64", "target-endian": "little", - "target-pointer-width": "64", - "target-c-int-width": "32", + "target-pointer-width": 64, + "target-c-int-width": 32, "os": "none", "executables": true, "linker-flavor": "ld.lld",