Commit c406a153 authored by Steffen Vogel's avatar Steffen Vogel 🎅🏼

memory: add new mmap allocator

parent 995e4b5e
......@@ -53,7 +53,8 @@ struct memory_type {
};
extern struct memory_type memory_heap;
extern struct memory_type memory_hugepage;
extern struct memory_type memory_mmap;
extern struct memory_type memory_mmap_hugetlb;
extern struct memory_type *memory_default;
struct memory_type * memory_ib(struct node *n, struct memory_type *parent);
......
......@@ -45,8 +45,8 @@ set(LIB_SRC
mapping.cpp
memory.cpp
memory/heap.cpp
memory/hugepage.cpp
memory/managed.cpp
memory/mmap.cpp
node_direction.cpp
node_type.cpp
node.cpp
......
......@@ -44,10 +44,19 @@ int memory_init(int hugepages)
info("Initialize memory sub-system: #hugepages=%d", hugepages);
if (hugepages > 0) {
ret = memory_hugepage_init(hugepages);
if (ret)
return ret;
memory_default = &memory_mmap_hugetlb;
}
else {
memory_default = &memory_mmap;
warning("Hugepage allocator disabled.");
}
size_t lock = kernel_get_hugepage_size() * hugepages;
ret = memory_lock(lock);
......
/** Hugepage memory allocator.
/** mmap memory allocator.
*
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
* @copyright 2014-2019, Institute for Automation of Complex Power Systems, EONERC
......@@ -74,11 +74,9 @@ int memory_hugepage_init(int hugepages)
return 0;
}
/** Allocate memory backed by hugepages with malloc() like interface */
static struct memory_allocation * memory_hugepage_alloc(struct memory_type *m, size_t len, size_t alignment)
/** Allocate memory backed by mmaps with malloc() like interface */
static struct memory_allocation * memory_mmap_alloc(size_t len, size_t alignment, struct memory_type *m)
{
static bool use_huge = true;
int flags, fd;
size_t sz;
......@@ -86,7 +84,7 @@ static struct memory_allocation * memory_hugepage_alloc(struct memory_type *m, s
if (!ma)
return nullptr;
retry: if (use_huge) {
if (m->flags & (int) MemoryFlags::HUGEPAGE) {
#ifdef __linux__
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
#else
......@@ -99,15 +97,19 @@ retry: if (use_huge) {
fd = -1;
#endif
sz = hugepgsz;
info("allocate %#zx bytes mmap_hugetlb memory", len);
}
else {
flags = MAP_PRIVATE | MAP_ANONYMOUS;
fd = -1;
sz = pgsz;
info("allocate %#zx bytes mmap memory", len);
}
/** We must make sure that len is a multiple of the (huge)page size
/** We must make sure that len is a multiple of the page size
*
* See: https://lkml.org/lkml/2014/10/22/925
*/
......@@ -117,21 +119,14 @@ retry: if (use_huge) {
ma->address = mmap(nullptr, ma->length, PROT_READ | PROT_WRITE, flags, fd, 0);
if (ma->address == MAP_FAILED) {
if (use_huge) {
warning("Failed to map hugepages, try with normal pages instead!");
use_huge = false;
goto retry;
}
else {
free(ma);
return nullptr;
}
free(ma);
return nullptr;
}
return ma;
}
static int memory_hugepage_free(struct memory_type *m, struct memory_allocation *ma)
static int memory_mmap_free(struct memory_allocation *ma, struct memory_type *m)
{
int ret;
......@@ -142,10 +137,18 @@ static int memory_hugepage_free(struct memory_type *m, struct memory_allocation
return 0;
}
struct memory_type memory_hugepage = {
.name = "mmap_hugepages",
struct memory_type memory_mmap = {
.name = "mmap",
.flags = (int) MemoryFlags::MMAP,
.alignment = 12, /* 4k page */
.alloc = memory_mmap_alloc,
.free = memory_mmap_free
};
struct memory_type memory_mmap_hugetlb = {
.name = "mmap",
.flags = (int) MemoryFlags::MMAP | (int) MemoryFlags::HUGEPAGE,
.alignment = 21, /* 2 MiB hugepage */
.alloc = memory_hugepage_alloc,
.free = memory_hugepage_free
.alloc = memory_mmap_alloc,
.free = memory_mmap_free
};
......@@ -50,7 +50,7 @@ Theory((size_t len, size_t align, struct memory_type *mt), memory, aligned, .ini
cr_assert(IS_ALIGNED(ptr, align), "Memory at %p is not alligned to %#zx byte bounary", ptr, align);
#ifndef __APPLE__
if (mt == &memory_hugepage) {
if (mt == &memory_mmap_hugetlb) {
cr_assert(IS_ALIGNED(ptr, HUGEPAGESIZE), "Memory at %p is not alligned to %#x byte bounary", ptr, HUGEPAGESIZE);
}
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment