Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added docs/specs/bonwick_slab.pdf
Binary file not shown.
57 changes: 50 additions & 7 deletions include/kernel/kmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@

#include <kernel/types.h>

#include <utils/compiler.h>

/**
* @enum kmalloc_flags
* @brief Feature flags passed to the kmalloc function family
Expand All @@ -59,17 +61,56 @@ typedef enum kmalloc_flags {
KMALLOC_KERNEL = 0, /* Default allocation flags. */
} kmalloc_flags;

/**
* Allocate @c size bytes and return a pointer to the allocated memory.
#define KMALLOC_CACHE_MIN_SIZE 16
#define KMALLOC_CACHE_MAX_SIZE 16384
#define KMALLOC_CACHE_COUNT 11

/*
* @return The index of the smallest cache that can contain a @size bytes object
*
* An area allocated using this function MUST be freed manually.
* This function is inlined and marked 'const' so that it is optimized out by
* the compiler when passing a value known at compile time.
*/
static ALWAYS_INLINE __attribute__((const)) int kmalloc_cache_index(size_t size)
{
if (size <= 16) return 0;
if (size <= 32) return 1;
if (size <= 64) return 2;
if (size <= 128) return 3;
if (size <= 256) return 4;
if (size <= 512) return 5;
if (size <= 1024) return 6;
if (size <= 2048) return 7;
if (size <= 4096) return 8;
if (size <= 8192) return 9;
if (size <= 16384) return 10;

return -1;
}

/** Allocate kernel memory from one of the global memory caches.
*
* @param size The number of bytes to allocate
* @param flags Feature flags, must be a combination of @ref kmalloc_flags
* @see kmalloc_cache_index() to know which cache_index to use.
*
* @return The starting address of the newly allocated area
* @param cache_index Index of the cache to allocate an object from.
* @param flags Allocation flags to use.
*/
void *kmalloc(size_t size, int flags);
void *kmalloc_from_cache(int cache_index, int flags);

/*
*
*/
static ALWAYS_INLINE void *kmalloc(size_t size, int flags)
{
int cache_index;

cache_index = kmalloc_cache_index(size);
if (cache_index < 0)
return NULL;

return kmalloc_from_cache(cache_index, flags);
}


/**
* Allocate @c nmemb members of @c size bytes and initialize its content to 0.
Expand Down Expand Up @@ -127,4 +168,6 @@ void *kmalloc_dma(size_t size);
/** Free a buffer allocated through @ref kmalloc_dma */
void kfree_dma(void *dma_ptr);

void kmalloc_api_init(void);

/** @} */
16 changes: 16 additions & 0 deletions include/kernel/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,4 +170,20 @@ extern u32 _kernel_code_end;
#define PAGE_ALIGN_UP(_ptr) align_up_ptr(_ptr, PAGE_SIZE)
#define PAGE_ALIGNED(_ptr) is_aligned_ptr(_ptr, PAGE_SIZE)

#ifndef __ASSEMBLER__

struct multiboot_info;

/**
* Initialize the memory subsystem.
*
* After this function returns it is safe to call the different memory
* allocation APIs (vm_*, kmalloc).
*
* @param mbt System information structure passed by the bootloader.
*/
void memory_init(struct multiboot_info *);

#endif /* !__ASSEMBLER__ */

#endif /* KERNEL_MEMORY_H */
72 changes: 72 additions & 0 deletions include/kernel/memory/slab.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#ifndef KERNEL_MEMORY_SLAB_H
#define KERNEL_MEMORY_SLAB_H

#include <kernel/atomic.h>
#include <kernel/spinlock.h>
#include <kernel/types.h>

#include <libalgo/linked_list.h>
#include <utils/macro.h>

/*
*
*/
struct kmem_cache {
llist_t slabs_full;
llist_t slabs_partial;
llist_t slabs_free;
spinlock_t lock;

size_t obj_size;
int obj_align;
size_t obj_real_size;
unsigned int coloring_offset_next;

void (*constructor)(void *data);
void (*destructor)(void *data);

const char *name;
int flags;
};

/*
*
*/
struct kmem_slab {
void *page;
struct kmem_bufctl *free;
struct kmem_cache *cache;
atomic_t refcount;
unsigned int coloring_offset;
node_t this;
};

/** Create a new cache. */
struct kmem_cache *kmem_cache_create(const char *name, size_t obj_size,
int obj_align, void (*constructor)(void *),
void (*destructor)(void *));

/** Allocate an object from a cache
*
* @param cache The cache
* @param flags Combination of allocation flags
*/
void *kmem_cache_alloc(struct kmem_cache *cache, int flags);

/** Free a cache and all its slabs.
*
* NOTE: The caller should be sure that no objects allocated from this cache
* are still being used when calling this function.
*/
void kmem_cache_destroy(struct kmem_cache *cache);

/** Free an object allocated by a cache.
*
* @param cache The cache the object was allocated from
* @param obj The object to free
*/
void kmem_cache_free(struct kmem_cache *cache, void *obj);

int kmem_cache_api_init(void);

#endif /* KERNEL_MEMORY_SLAB_H */
10 changes: 10 additions & 0 deletions include/kernel/pmm.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
enum page_flags {
PAGE_AVAILABLE = BIT(0), ///< This page has not been allocated
PAGE_COW = BIT(1), ///< Currently used in a CoW mapping
PAGE_SLAB = BIT(2), ///< Page allocated by the slab allocator
};

/** Represents a physical pageframe
Expand All @@ -71,6 +72,15 @@ enum page_flags {
struct page {
uint8_t flags; ///< Combination of @ref page_flags
uint8_t refcount; ///< How many processes reference that page

union {
/*
* Data for pages allocated by the slab allocator (flags & PAGE_SLAB).
*/
struct {
struct kmem_cache *cache;
} slab;
};
};

/**
Expand Down
6 changes: 6 additions & 0 deletions include/kernel/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,12 @@ static ALWAYS_INLINE void spinlock_release(spinlock_t *lock)
__atomic_clear(&lock->locked, __ATOMIC_RELEASE);
}

/** Check whether a lock is currently held by someone. */
static ALWAYS_INLINE bool spinlock_is_held(const spinlock_t *lock)
{
return __atomic_load_n(&lock->locked, __ATOMIC_ACQUIRE);
}

typedef struct {
spinlock_t *lock;
bool done;
Expand Down
9 changes: 9 additions & 0 deletions include/kernel/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,9 @@ struct vm_segment_driver {
* located.
*/
error_t (*vm_fault)(struct address_space *, struct vm_segment *);

/** Map this segment onto a physical address. */
error_t (*vm_map)(struct address_space *, struct vm_segment *, vm_flags_t);
};

/** Kernel-only address-space.
Expand Down Expand Up @@ -224,4 +227,10 @@ void vm_free(struct address_space *, void *);
/** Find the address space's segment that contains the given address */
struct vm_segment *vm_find(const struct address_space *, void *);

/** Map a virtual address segment onto a physical page.
*
* @return E_EXIST if the virtual address contained a previous mapping.
*/
error_t vm_map(struct address_space *, void *);

#endif /* KERNEL_VM_H */
3 changes: 3 additions & 0 deletions include/utils/math.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@
_tmp < 0 ? -_tmp : _tmp; \
})

/** @return whether a value is a power of two */
#define is_power_of_2(_x) (_x != 0 && ((_x & (_x - 1)) == 0))

#define __align_mask(_value, _power) ((__typeof__(_value))((_power)-1))

/** @brief Align @c _value to the next multiple of @c _power
Expand Down
6 changes: 6 additions & 0 deletions kernel/arch/i686/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,10 +609,16 @@ static INTERRUPT_HANDLER_FUNCTION(page_fault)
if (!error.present || is_cow) {
as = IS_KERNEL_ADDRESS(faulty_address) ? &kernel_address_space
: current->process->as;
if (unlikely(!as)) {
log_err("page_fault: address space is NULL");
goto page_fault_panic;
}

if (!address_space_fault(as, faulty_address, is_cow))
return E_SUCCESS;
}

page_fault_panic:
PANIC("PAGE FAULT at " FMT32 ": %s access on a %s page %s", faulty_address,
error.write ? "write" : "read",
error.present ? "protected" : "non-present",
Expand Down
2 changes: 2 additions & 0 deletions kernel/build.mk
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,12 @@ KERNEL_SRCS := \
misc/worker.c \
misc/semaphore.c \
misc/uacpi.c \
memory/memory.c \
memory/pmm.c \
memory/vmm.c \
memory/address_space.c \
memory/vm_normal.c \
memory/slab.c \
net/net.c \
net/packet.c \
net/socket.c \
Expand Down
2 changes: 1 addition & 1 deletion kernel/fs/tar.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ static void tar_node_free(tree_node_t *node)
/* Do not free vnode, let it be free'd by vnode_release(). */
if (tar_node->vnode)
tar_node->vnode->pdata = NULL;
kfree(node);
kfree(tar_node);
}

/*
Expand Down
14 changes: 4 additions & 10 deletions kernel/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,9 @@
#include <kernel/interrupts.h>
#include <kernel/kmalloc.h>
#include <kernel/logger.h>
#include <kernel/mmu.h>
#include <kernel/memory.h>
#include <kernel/net/icmp.h>
#include <kernel/net/ipv4.h>
#include <kernel/pmm.h>
#include <kernel/process.h>
#include <kernel/sched.h>
#include <kernel/semaphore.h>
Expand Down Expand Up @@ -170,16 +169,11 @@ void kernel_main(struct multiboot_info *mbt, unsigned int magic)
/*
* Now that we have a minimal working setup, we can enable paging
* and initialize the virtual memory allocation API.
* After this step, we are able to allocate & free kernel memory as usual.
*
* After this step, we are able to allocate & free kernel memory.
*/
if (!pmm_init(mbt))
PANIC("Could not initialize the physical memory manager");
memory_init(mbt);

log_info("Initializing MMU");
if (!mmu_init())
PANIC("Failed to initialize virtual address space");

address_space_init(&kernel_address_space);
process_init_kernel_process();

/*
Expand Down
26 changes: 26 additions & 0 deletions kernel/memory/address_space.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,6 +358,32 @@ void vm_free(struct address_space *as, void *addr)
}
}

error_t vm_map(struct address_space *as, void *addr)
{
struct vm_segment *segment;
error_t ret;

if (addr == NULL)
return E_SUCCESS;

if ((vaddr_t)addr % PAGE_SIZE) {
log_warn("freeing unaligned virtual address: %p (skipping)", addr);
return E_INVAL;
}

locked_scope (&as->lock) {
segment = vm_find(as, addr);
if (!segment) {
log_dbg("free: no backing segment for %p", addr);
return E_NOENT;
}

ret = segment->driver->vm_map(as, segment, segment->flags);
}

return ret;
}

static int vm_segment_contains(const void *this, const void *addr)
{
const struct vm_segment *segment = to_segment(this);
Expand Down
Loading