Single-threaded version of sark-branch memory page management

This commit is contained in:
Maria Matejka 2022-03-09 09:10:44 +01:00
parent 06ece3265e
commit c78247f9b9
5 changed files with 141 additions and 83 deletions

View file

@ -270,6 +270,8 @@ rlookup(unsigned long a)
void
resource_init(void)
{
resource_sys_init();
root_pool.r.class = &pool_class;
root_pool.name = "Root";
init_list(&root_pool.inside);

View file

@ -110,10 +110,11 @@ void sl_free(slab *, void *);
void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size);
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
u64 get_page_size(void);
extern long page_size;
void *alloc_page(void);
void free_page(void *);
extern uint pages_kept;
void resource_sys_init(void);
#ifdef HAVE_LIBDMALLOC
/*

View file

@ -180,7 +180,7 @@ struct sl_alignment { /* Magic structure for testing of alignment */
int x[0];
};
#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(get_page_size()-1)))
#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(page_size-1)))
/**
* sl_new - create a new Slab
@ -202,7 +202,6 @@ sl_new(pool *p, uint size)
s->obj_size = size;
s->head_size = sizeof(struct sl_head);
u64 page_size = get_page_size();
do {
s->objs_per_slab = (page_size - s->head_size) / size;
@ -273,7 +272,7 @@ no_partial:
}
h = alloc_page();
#ifdef POISON
memset(h, 0xba, get_page_size());
memset(h, 0xba, page_size);
#endif
ASSERT_DIE(SL_GET_HEAD(h) == h);
memset(h, 0, s->head_size);
@ -332,7 +331,7 @@ sl_free(slab *s, void *oo)
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
{
#ifdef POISON
memset(h, 0xde, get_page_size());
memset(h, 0xde, page_size);
#endif
free_page(h);
}
@ -399,7 +398,7 @@ slab_memsize(resource *r)
return (struct resmem) {
.effective = eff,
.overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * get_page_size() - eff,
.overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size - eff,
};
}
@ -410,10 +409,10 @@ slab_lookup(resource *r, unsigned long a)
struct sl_head *h;
WALK_LIST(h, s->partial_heads)
if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
WALK_LIST(h, s->full_heads)
if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;
}

View file

@ -108,6 +108,7 @@ print_size(char *dsc, struct resmem vals)
extern pool *rt_table_pool;
extern pool *rta_pool;
extern uint *pages_kept;
void
cmd_show_memory(void)
@ -119,8 +120,8 @@ cmd_show_memory(void)
print_size("Protocols:", rmemsize(proto_pool));
struct resmem total = rmemsize(&root_pool);
#ifdef HAVE_MMAP
print_size("Standby memory:", (struct resmem) { .overhead = get_page_size() * pages_kept });
total.overhead += get_page_size() * pages_kept;
print_size("Standby memory:", (struct resmem) { .overhead = page_size * *pages_kept });
total.overhead += page_size * *pages_kept;
#endif
print_size("Total:", total);
cli_msg(0, "");

View file

@ -11,6 +11,7 @@
#include "lib/lists.h"
#include "lib/event.h"
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
@ -18,113 +19,167 @@
#include <sys/mman.h>
#endif
#ifdef HAVE_MMAP
#define KEEP_PAGES 512
long page_size = 0;
#ifdef HAVE_MMAP
#define KEEP_PAGES_MAIN_MAX 256
#define KEEP_PAGES_MAIN_MIN 8
#define CLEANUP_PAGES_BULK 256
_Static_assert(KEEP_PAGES_MAIN_MIN * 4 < KEEP_PAGES_MAIN_MAX);
static u64 page_size = 0;
static _Bool use_fake = 0;
uint pages_kept = 0;
static list pages_list;
static void cleanup_pages(void *data);
static event page_cleanup_event = { .hook = cleanup_pages };
#if DEBUGGING
struct free_page {
node unused[42];
node n;
};
#else
static const u64 page_size = 4096; /* Fake page size */
struct free_page {
node n;
};
#endif
u64 get_page_size(void)
struct free_pages {
list pages;
u16 min, max; /* Minimal and maximal number of free pages kept */
uint cnt; /* Number of empty pages */
event cleanup;
};
static void global_free_pages_cleanup_event(void *);
static struct free_pages global_free_pages = {
.min = KEEP_PAGES_MAIN_MIN,
.max = KEEP_PAGES_MAIN_MAX,
.cleanup = { .hook = global_free_pages_cleanup_event },
};
uint *pages_kept = &global_free_pages.cnt;
static void *
alloc_sys_page(void)
{
if (page_size)
return page_size;
void *ptr = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#ifdef HAVE_MMAP
if (page_size = sysconf(_SC_PAGESIZE))
{
if ((u64_popcount(page_size) > 1) || (page_size > 16384))
{
/* Too big or strange page, use the aligned allocator instead */
page_size = 4096;
use_fake = 1;
}
return page_size;
}
if (ptr == MAP_FAILED)
bug("mmap(%lu) failed: %m", page_size);
bug("Page size must be non-zero");
#endif
return ptr;
}
extern int shutting_down; /* Shutdown requested. */
#else // ! HAVE_MMAP
#define use_fake 1
#endif
void *
alloc_page(void)
{
#ifdef HAVE_MMAP
if (pages_kept)
{
node *page = TAIL(pages_list);
rem_node(page);
pages_kept--;
memset(page, 0, get_page_size());
return page;
}
if (!use_fake)
{
void *ret = mmap(NULL, get_page_size(), PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ret == MAP_FAILED)
bug("mmap(%lu) failed: %m", (long unsigned int) page_size);
return ret;
}
else
#endif
if (use_fake)
{
void *ptr = NULL;
int err = posix_memalign(&ptr, page_size, page_size);
if (err || !ptr)
bug("posix_memalign(%lu) failed", (long unsigned int) page_size);
return ptr;
}
#ifdef HAVE_MMAP
struct free_pages *fps = &global_free_pages;
if (fps->cnt)
{
struct free_page *fp = SKIP_BACK(struct free_page, n, HEAD(fps->pages));
rem_node(&fp->n);
if ((--fps->cnt < fps->min) && !shutting_down)
ev_schedule(&fps->cleanup);
bzero(fp, page_size);
return fp;
}
return alloc_sys_page();
#endif
}
void
free_page(void *ptr)
{
#ifdef HAVE_MMAP
if (!use_fake)
if (use_fake)
{
if (!pages_kept)
init_list(&pages_list);
memset(ptr, 0, sizeof(node));
add_tail(&pages_list, ptr);
if (++pages_kept > KEEP_PAGES)
ev_schedule(&page_cleanup_event);
}
else
#endif
free(ptr);
return;
}
#ifdef HAVE_MMAP
struct free_pages *fps = &global_free_pages;
struct free_page *fp = ptr;
fp->n = (node) {};
add_tail(&fps->pages, &fp->n);
if ((++fps->cnt > fps->max) && !shutting_down)
ev_schedule(&fps->cleanup);
#endif
}
#ifdef HAVE_MMAP
static void
cleanup_pages(void *data UNUSED)
global_free_pages_cleanup_event(void *data UNUSED)
{
for (uint seen = 0; (pages_kept > KEEP_PAGES) && (seen < KEEP_PAGES); seen++)
if (shutting_down)
return;
struct free_pages *fps = &global_free_pages;
while (fps->cnt / 2 < fps->min)
{
void *ptr = HEAD(pages_list);
rem_node(ptr);
if (munmap(ptr, get_page_size()) == 0)
pages_kept--;
#ifdef ENOMEM
else if (errno == ENOMEM)
add_tail(&pages_list, ptr);
#endif
else
bug("munmap(%p) failed: %m", ptr);
struct free_page *fp = alloc_sys_page();
fp->n = (node) {};
add_tail(&fps->pages, &fp->n);
fps->cnt++;
}
if (pages_kept > KEEP_PAGES)
ev_schedule(&page_cleanup_event);
for (uint seen = 0; (seen < CLEANUP_PAGES_BULK) && (fps->cnt > fps->max / 2); seen++)
{
struct free_page *fp = SKIP_BACK(struct free_page, n, TAIL(fps->pages));
rem_node(&fp->n);
if (munmap(fp, page_size) == 0)
fps->cnt--;
else if (errno == ENOMEM)
add_head(&fps->pages, &fp->n);
else
bug("munmap(%p) failed: %m", fp);
}
}
#endif
void
resource_sys_init(void)
{
#ifdef HAVE_MMAP
if (!(page_size = sysconf(_SC_PAGESIZE)))
die("System page size must be non-zero");
if (u64_popcount(page_size) == 1)
{
struct free_pages *fps = &global_free_pages;
init_list(&fps->pages);
global_free_pages_cleanup_event(NULL);
return;
}
/* Too big or strange page, use the aligned allocator instead */
log(L_WARN "Got strange memory page size (%lu), using the aligned allocator instead", page_size);
use_fake = 1;
#endif
page_size = 4096;
}