Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
179 changes: 179 additions & 0 deletions include/kernel/arch/i686/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,25 @@

#include <kernel/types.h>

#include <utils/bits.h>
#include <utils/compiler.h>
#include <utils/map.h>

#define CPU_CACHE_ALIGN 64 /* 64B L1 cache lines */

#define X86_FEATURE_WORDS 2 /* Number of CPUID leaves that contain features. */

struct x86_cpuinfo {
const char *vendor;
u32 features[X86_FEATURE_WORDS];
};

extern struct x86_cpuinfo cpuinfo;

/*
* Register read/write wrappers.
*/

// Read from a 32-bits register
#define READ_REGISTER_OPS(_reg) \
static ALWAYS_INLINE u32 read_##_reg() \
Expand All @@ -32,6 +48,20 @@ MAP(WRITE_REGISTER_OPS, CPU_32BIT_REGISTERS)
#undef WRITE_REGISTER_OPS
#undef READ_REGISTER_OPS

/*
* CPU control registers.
*/

#define CR0_PG BIT(31) /* Paging enable */
#define CR0_CD BIT(30) /* Cache disable */
#define CR0_NW BIT(29) /* Not write-through */

#define CR4_PAE BIT(5) /* PAE paging enable */

/*
* ASM instruction wrappers.
*/

/* Write a single byte at a given I/O port address. */
static ALWAYS_INLINE void outb(uint16_t port, uint8_t val)
{
Expand Down Expand Up @@ -109,4 +139,153 @@ static ALWAYS_INLINE void insl(uint16_t port, uint32_t *buffer, size_t size)
: "memory");
}

#include <cpuid.h> /* provided by GCC */

#define cpuid(leaf, eax, ebx, ecx, edx) __get_cpuid(leaf, eax, ebx, ecx, edx)

/*
* Define quick helper functions for CPUID calls that only need to access one
* of the result registers.
*/
#define CPUID_FUNCTION(_reg) \
static inline uint32_t cpuid_##_reg(uint32_t leaf) \
{ \
uint32_t eax; \
uint32_t ebx; \
uint32_t ecx; \
uint32_t edx; \
\
cpuid(leaf, &eax, &ebx, &ecx, &edx); \
return _reg; \
}

CPUID_FUNCTION(eax)
CPUID_FUNCTION(ebx)
CPUID_FUNCTION(ecx)
CPUID_FUNCTION(edx)

#undef CPUID_FUNCTION

#define CPUID_LEAF_GETVENDOR 0
#define CPUID_LEAF_GETFEATURES 1
#define CPUID_LEAF_GETFEATURES_EXT 7

/* Vendor codes used by popular hypervisors. */
#define signature_QEMU_ebx 0x47435443 // [TCGT]CGTCGTCG
#define signature_KVM_ebx 0x4D564B20 // [ KVM]KVMKVM
#define signature_VMWARE_ebx 0x61774D56 // [VMwa]reVMware
#define signature_VIRTUALBOX_ebx 0x786F4256 // [VBox]VBoxVBox
#define signature_XEN_ebx 0x566E6558 // [XenV]MMXenVMM
#define signature_HYPERV_ebx 0x7263694D // [Micr]osoft Hv
#define signature_PARALLELS_ebx 0x6C727020 // [ prl] hyperv
#define signature_PARALLELS_ALT_ebx 0x6570726C // [lrpe]pyh vr
#define signature_BHYVE_ebx 0x76796862 // [bhyv]e bhyve
#define signature_QNX_ebx 0x20584E51 // [ QNX]QVMBSQG

#define X86_FEATURES(F) \
\
/* Features in %ecx for leaf 1 */ \
F(SSE3, 0, 0), \
F(PCLMUL, 0, 1), \
F(DTES64, 0, 2), \
F(MONITOR, 0, 3), \
F(DSCPL, 0, 4), \
F(VMX, 0, 5), \
F(SMX, 0, 6), \
F(EIST, 0, 7), \
F(TM2, 0, 8), \
F(SSSE3, 0, 9), \
F(CNXTID, 0, 10), \
F(FMA, 0, 12), \
F(CMPXCHG16B, 0, 13), \
F(xTPR, 0, 14), \
F(PDCM, 0, 15), \
F(PCID, 0, 17), \
F(DCA, 0, 18), \
F(SSE41, 0, 19), \
F(SSE42, 0, 20), \
F(x2APIC, 0, 21), \
F(MOVBE, 0, 22), \
F(POPCNT, 0, 23), \
F(TSCDeadline, 0, 24), \
F(AES, 0, 25), \
F(XSAVE, 0, 26), \
F(OSXSAVE, 0, 27), \
F(AVX, 0, 28), \
F(F16C, 0, 29), \
F(RDRND, 0, 30), \
\
/* Features in %edx for leaf 1 */ \
F(FPU, 1, 0), \
F(VME, 1, 1), \
F(DE, 1, 2), \
F(PSE, 1, 3), \
F(TSC, 1, 4), \
F(MSR, 1, 5), \
F(PAE, 1, 6), \
F(MCE, 1, 7), \
F(CMPXCHG8B, 1, 8), \
F(APIC, 1, 9), \
F(SEP, 1, 11), \
F(MTRR, 1, 12), \
F(PGE, 1, 13), \
F(MCA, 1, 14), \
F(CMOV, 1, 15), \
F(PAT, 1, 16), \
F(PSE36, 1, 17), \
F(PSN, 1, 18), \
F(CLFSH, 1, 19), \
F(DS, 1, 21), \
F(ACPI, 1, 22), \
F(MMX, 1, 23), \
F(FXSAVE, 1, 24), \
F(SSE, 1, 25), \
F(SSE2, 1, 26), \
F(SS, 1, 27), \
F(HTT, 1, 28), \
F(TM, 1, 29), \
F(PBE, 1, 31), \

#define X86_FEATURE_NAME(_feature) X86_FEATURE_##_feature
#define X86_FEATURE_VAL(_word, _bit) ((_word << X86_FEATURE_WORD_OFF) | (_bit & 0xff))
#define X86_FEATURE_WORD_OFF 8

enum x86_cpu_feature {
#define DEFINE_X86_FEATURE(_name, _word, _bit) \
X86_FEATURE_NAME(_name) = X86_FEATURE_VAL(_word, _bit)
X86_FEATURES(DEFINE_X86_FEATURE)
#undef DEFINE_X86_FEATURE
};

static inline bool cpu_test_feature(enum x86_cpu_feature feature)
{
int leaf = (feature >> X86_FEATURE_WORD_OFF);
int bit = feature & (BIT(X86_FEATURE_WORD_OFF) - 1);

return BIT_READ(cpuinfo.features[leaf], bit);
}

#define cpu_has_feature(_feature) cpu_test_feature(X86_FEATURE_NAME(_feature))

enum x86_msr {
MSR_PAT = 0x277,
};

/* Read from specific register */
static inline uint64_t rdmsr(uint32_t msr)
{
uint32_t eax;
uint32_t edx;
ASM("rdmsr" : "=a"(eax), "=d"(edx) : "c"(msr));
return (((uint64_t)edx) << 32) | eax;
}

/* Write into model specific register */
static inline void wrmsr(uint32_t msr, uint64_t val)
{
uint32_t eax = val;
uint32_t edx = val >> 32;
ASM("wrmsr" : : "a"(eax), "d"(edx), "c"(msr));
}

#endif /* KERNEL_I686_UTILS_CPU_OPS_H */
36 changes: 33 additions & 3 deletions include/kernel/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
#include <kernel/error.h>
#include <kernel/types.h>

#include <utils/bits.h>

#include <stdbool.h>

/**
Expand All @@ -44,6 +46,16 @@ typedef enum mmu_prot {
PROT_KERNEL = 0x8, /*!< Pages should be accessible only from the kernel */
} mmu_prot;

/** @enum mmu_caching_policy
* @brief Caching policies.
*/
typedef enum mmu_caching_policy {
POLICY_UC = BIT(6), /*!< Uncachealbe memory. */
POLICY_WC = BIT(7), /*!< Write-combining memory. */
POLICY_WT = BIT(8), /*!< Write-through memory. */
POLICY_WB = BIT(9), /*!< Write-back memory. */
} mmu_policy_t;

/** Initialize the MMU's paging system
*
* This function is responsible for setting any required bit inside the CPU's
Expand Down Expand Up @@ -87,7 +99,7 @@ void mmu_load(paddr_t mmu);
* @param virt The virtual address
* @param physical Its physical equivalent
* @param prot Protection rule in use for this page.
* A combination of @ref mmu_prot flags.
* A combination of @ref mmu_prot and @ref mmu_caching_policy flags.
*
* @return False if the address was already mapped before
*/
Expand All @@ -100,7 +112,7 @@ bool mmu_map(vaddr_t virt, paddr_t physical, int prot);
* @param physical Its physical equivalent
* @param size The size of the region to map
* @param prot Protection rule in use for this page.
* A combination of @ref mmu_prot flags.
* A combination of @ref mmu_prot and @ref mmu_caching_policy flags.
*
* @return False if the address was already mapped before
*/
Expand Down Expand Up @@ -136,7 +148,7 @@ void mmu_unmap_range(vaddr_t start, vaddr_t end);
* @param start the starting page of the address range
* @param end the ending address of the address range
* @param prot Protection rule in use for this page.
* A combination of @ref mmu_prot flags.
* A combination of @ref mmu_prot and @ref mmu_caching_policy flags.
*/
void mmu_identity_map(paddr_t start, paddr_t end, int prot);

Expand All @@ -151,4 +163,22 @@ static inline bool mmu_is_mapped(vaddr_t addr)
return !IS_ERR(mmu_find_physical(addr));
}

/** Configure the caching policy in effect when accessing a page.
*
* @param vaddr The page's virtual address
* @param policy Caching policy applied to this page.
* A combination of @ref mmu_caching_policy flags.
*/
error_t mmu_set_policy(vaddr_t, mmu_policy_t policy);

/** Configure the caching policy in effect when accessing a range of pages.
*
* @param start The virtual address of the first page in the address range
* @param size The size of the address range
* @param policy Caching policy applied to this page.
* A combination of @ref mmu_caching_policy flags.
*/
error_t mmu_set_policy_range(vaddr_t range_start, size_t range_size,
mmu_policy_t policy);

#endif /* KERNEL_MMU_H */
21 changes: 21 additions & 0 deletions include/kernel/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,12 @@ typedef enum vm_flags {
VM_KERNEL = BIT(3), /*!< Pages should only be accessible from kernel */
VM_CLEAR = BIT(4), /*!< Page content should be reset when allocating */
VM_FIXED = BIT(5), /*!< Start address in vm_alloc_at() is not a hint */

/* Caching policies. */
VM_CACHE_UC = BIT(6), /*!< Uncacheable. */
VM_CACHE_WC = BIT(7), /*!< Write-combining. */
VM_CACHE_WT = BIT(8), /*!< Write-through. */
VM_CACHE_WB = BIT(9), /*!< Write-back (default). */
} vm_flags_t;

#define VM_KERNEL_RO (VM_KERNEL | VM_READ)
Expand All @@ -72,6 +78,14 @@ typedef enum vm_flags {
#define VM_USER_WO (VM_WRITE)
#define VM_USER_RW (VM_READ | VM_WRITE)

/*
* Memory-mapped I/O should be mapped as uncacheable since writes/read can
* have side-effects and should not be combined or delayed.
*/
#define VM_IOMEM (VM_KERNEL_RW | VM_CACHE_UC)

#define VM_CACHE_MASK (VM_CACHE_UC | VM_CACHE_WT | VM_CACHE_WB | VM_CACHE_WC)

/** Segment driver
*
* There exists different types of memory segments. A segment driver defines
Expand Down Expand Up @@ -136,6 +150,10 @@ struct vm_segment_driver {

/** Map this segment onto a physical address. */
error_t (*vm_map)(struct address_space *, struct vm_segment *, vm_flags_t);

/** Configure the effective caching policy for a segment. */
error_t (*vm_set_policy)(struct address_space *, struct vm_segment *,
vm_flags_t policy);
};

/** Kernel-only address-space.
Expand Down Expand Up @@ -233,4 +251,7 @@ struct vm_segment *vm_find(const struct address_space *, void *);
*/
error_t vm_map(struct address_space *, void *);

/** Change the effective caching policy for address inside a segment. */
error_t vm_set_policy(struct address_space *, void *, vm_flags_t policy);

#endif /* KERNEL_VM_H */
1 change: 1 addition & 0 deletions kernel/arch/i686/build.mk
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ QEMU := qemu-system-i386

KERNEL_ARCH_SRCS := \
crt0.S \
cpu.c \
gdt.S \
gdt.c \
interrupts.c \
Expand Down
Loading