Revert "Bound allocated pages to resource pools with page caches to avoid unnecessary syscalls"

This reverts commit 7f0e598208.
This commit is contained in:
Maria Matejka 2022-03-09 09:13:31 +01:00
parent c20506dc07
commit 0c59f7ff01
4 changed files with 35 additions and 92 deletions

View file

@ -31,18 +31,9 @@
struct pool { struct pool {
resource r; resource r;
list inside; list inside;
struct pool_pages *pages;
const char *name; const char *name;
}; };
struct pool_pages {
uint free;
uint used;
void *ptr[0];
};
#define POOL_PAGES_MAX ((page_size - sizeof(struct pool_pages)) / sizeof (void *))
static void pool_dump(resource *); static void pool_dump(resource *);
static void pool_free(resource *); static void pool_free(resource *);
static resource *pool_lookup(resource *, unsigned long); static resource *pool_lookup(resource *, unsigned long);
@ -59,10 +50,6 @@ static struct resclass pool_class = {
pool root_pool; pool root_pool;
void *alloc_sys_page(void);
void free_sys_page(void *);
void resource_sys_init(void);
static int indent; static int indent;
/** /**
@ -95,14 +82,6 @@ pool_free(resource *P)
xfree(r); xfree(r);
r = rr; r = rr;
} }
if (p->pages)
{
ASSERT_DIE(!p->pages->used);
for (uint i=0; i<p->pages->free; i++)
free_sys_page(p->pages->ptr[i]);
free_sys_page(p->pages);
}
} }
static void static void
@ -128,9 +107,6 @@ pool_memsize(resource *P)
WALK_LIST(r, p->inside) WALK_LIST(r, p->inside)
sum += rmemsize(r); sum += rmemsize(r);
if (p->pages)
sum += page_size * (p->pages->used + p->pages->free + 1);
return sum; return sum;
} }
@ -283,7 +259,6 @@ rlookup(unsigned long a)
void void
resource_init(void) resource_init(void)
{ {
resource_sys_init();
root_pool.r.class = &pool_class; root_pool.r.class = &pool_class;
root_pool.name = "Root"; root_pool.name = "Root";
init_list(&root_pool.inside); init_list(&root_pool.inside);
@ -450,39 +425,6 @@ mb_free(void *m)
rfree(b); rfree(b);
} }
void *
alloc_page(pool *p)
{
if (!p->pages)
{
p->pages = alloc_sys_page();
p->pages->free = 0;
p->pages->used = 1;
}
else
p->pages->used++;
if (p->pages->free)
{
void *ptr = p->pages->ptr[--p->pages->free];
bzero(ptr, page_size);
return ptr;
}
else
return alloc_sys_page();
}
void
free_page(pool *p, void *ptr)
{
ASSERT_DIE(p->pages);
p->pages->used--;
if (p->pages->free >= POOL_PAGES_MAX)
return free_sys_page(ptr);
else
p->pages->ptr[p->pages->free++] = ptr;
}
#define STEP_UP(x) ((x) + (x)/2 + 4) #define STEP_UP(x) ((x) + (x)/2 + 4)

View file

@ -94,12 +94,10 @@ void sl_free(slab *, void *);
void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size); void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size);
extern long page_size;
/* Allocator of whole pages; for use in slabs and other high-level allocators. */ /* Allocator of whole pages; for use in slabs and other high-level allocators. */
void *alloc_page(pool *); u64 get_page_size(void);
void free_page(pool *, void *); void *alloc_page(void);
#define PAGE_HEAD(x) ((void *) (((intptr_t) (x)) & ~(page_size-1))) void free_page(void *);
#ifdef HAVE_LIBDMALLOC #ifdef HAVE_LIBDMALLOC
/* /*

View file

@ -152,7 +152,6 @@ slab_memsize(resource *r)
struct slab { struct slab {
resource r; resource r;
pool *p;
uint obj_size, head_size, head_bitfield_len; uint obj_size, head_size, head_bitfield_len;
uint objs_per_slab, num_empty_heads, data_size; uint objs_per_slab, num_empty_heads, data_size;
list empty_heads, partial_heads, full_heads; list empty_heads, partial_heads, full_heads;
@ -192,7 +191,6 @@ slab *
sl_new(pool *p, uint size) sl_new(pool *p, uint size)
{ {
slab *s = ralloc(p, &sl_class); slab *s = ralloc(p, &sl_class);
s->p = p;
uint align = sizeof(struct sl_alignment); uint align = sizeof(struct sl_alignment);
if (align < sizeof(int)) if (align < sizeof(int))
align = sizeof(int); align = sizeof(int);
@ -201,6 +199,7 @@ sl_new(pool *p, uint size)
s->obj_size = size; s->obj_size = size;
s->head_size = sizeof(struct sl_head); s->head_size = sizeof(struct sl_head);
u64 page_size = get_page_size();
do { do {
s->objs_per_slab = (page_size - s->head_size) / size; s->objs_per_slab = (page_size - s->head_size) / size;
@ -269,9 +268,9 @@ no_partial:
s->num_empty_heads--; s->num_empty_heads--;
goto okay; goto okay;
} }
h = alloc_page(s->p); h = alloc_page();
#ifdef POISON #ifdef POISON
memset(h, 0xba, page_size); memset(h, 0xba, get_page_size());
#endif #endif
ASSERT_DIE(SL_GET_HEAD(h) == h); ASSERT_DIE(SL_GET_HEAD(h) == h);
memset(h, 0, s->head_size); memset(h, 0, s->head_size);
@ -330,9 +329,9 @@ sl_free(slab *s, void *oo)
if (s->num_empty_heads >= MAX_EMPTY_HEADS) if (s->num_empty_heads >= MAX_EMPTY_HEADS)
{ {
#ifdef POISON #ifdef POISON
memset(h, 0xde, page_size); memset(h, 0xde, get_page_size());
#endif #endif
free_page(s->p, h); free_page(h);
} }
else else
{ {
@ -349,11 +348,11 @@ slab_free(resource *r)
struct sl_head *h, *g; struct sl_head *h, *g;
WALK_LIST_DELSAFE(h, g, s->empty_heads) WALK_LIST_DELSAFE(h, g, s->empty_heads)
free_page(s->p, h); free_page(h);
WALK_LIST_DELSAFE(h, g, s->partial_heads) WALK_LIST_DELSAFE(h, g, s->partial_heads)
free_page(s->p, h); free_page(h);
WALK_LIST_DELSAFE(h, g, s->full_heads) WALK_LIST_DELSAFE(h, g, s->full_heads)
free_page(s->p, h); free_page(h);
} }
static void static void
@ -386,8 +385,7 @@ slab_memsize(resource *r)
WALK_LIST(h, s->full_heads) WALK_LIST(h, s->full_heads)
heads++; heads++;
// return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + page_size); return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + get_page_size());
return ALLOC_OVERHEAD + sizeof(struct slab); /* The page sizes are accounted for in the pool */
} }
static resource * static resource *
@ -397,10 +395,10 @@ slab_lookup(resource *r, unsigned long a)
struct sl_head *h; struct sl_head *h;
WALK_LIST(h, s->partial_heads) WALK_LIST(h, s->partial_heads)
if ((unsigned long) h < a && (unsigned long) h + page_size < a) if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
return r; return r;
WALK_LIST(h, s->full_heads) WALK_LIST(h, s->full_heads)
if ((unsigned long) h < a && (unsigned long) h + page_size < a) if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
return r; return r;
return NULL; return NULL;
} }

View file

@ -16,36 +16,41 @@
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
long page_size = 0;
#ifdef HAVE_MMAP #ifdef HAVE_MMAP
static u64 page_size = 0;
static _Bool use_fake = 0; static _Bool use_fake = 0;
#else #else
static _Bool use_fake = 1; static const u64 page_size = 4096; /* Fake page size */
#endif #endif
void resource_sys_init(void) u64 get_page_size(void)
{ {
#ifdef HAVE_MMAP if (page_size)
if (!(page_size = sysconf(_SC_PAGESIZE))) return page_size;
die("System page size must be non-zero");
if ((u64_popcount(page_size) > 1) || (page_size > 16384)) #ifdef HAVE_MMAP
if (page_size = sysconf(_SC_PAGESIZE))
{ {
#endif if ((u64_popcount(page_size) > 1) || (page_size > 16384))
/* Too big or strange page, use the aligned allocator instead */ {
page_size = 4096; /* Too big or strange page, use the aligned allocator instead */
use_fake = 1; page_size = 4096;
use_fake = 1;
}
return page_size;
} }
bug("Page size must be non-zero");
#endif
} }
void * void *
alloc_sys_page(void) alloc_page(void)
{ {
#ifdef HAVE_MMAP #ifdef HAVE_MMAP
if (!use_fake) if (!use_fake)
{ {
void *ret = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); void *ret = mmap(NULL, get_page_size(), PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ret == MAP_FAILED) if (ret == MAP_FAILED)
bug("mmap(%lu) failed: %m", page_size); bug("mmap(%lu) failed: %m", page_size);
return ret; return ret;
@ -61,12 +66,12 @@ alloc_sys_page(void)
} }
void void
free_sys_page(void *ptr) free_page(void *ptr)
{ {
#ifdef HAVE_MMAP #ifdef HAVE_MMAP
if (!use_fake) if (!use_fake)
{ {
if (munmap(ptr, page_size) < 0) if (munmap(ptr, get_page_size()) < 0)
bug("munmap(%p) failed: %m", ptr); bug("munmap(%p) failed: %m", ptr);
} }
else else