Linpools with pages fixed to the final page allocator version

This commit is contained in:
Maria Matejka 2022-03-09 10:30:33 +01:00
parent eeec9ddbf2
commit b90c9b164f

View file

@ -37,7 +37,6 @@ const int lp_chunk_size = sizeof(struct lp_chunk);
struct linpool { struct linpool {
resource r; resource r;
byte *ptr, *end; byte *ptr, *end;
pool *p;
struct lp_chunk *first, *current; /* Normal (reusable) chunks */ struct lp_chunk *first, *current; /* Normal (reusable) chunks */
struct lp_chunk *first_large; /* Large chunks */ struct lp_chunk *first_large; /* Large chunks */
uint chunk_size, threshold, total:31, use_pages:1, total_large; uint chunk_size, threshold, total:31, use_pages:1, total_large;
@ -72,7 +71,6 @@ linpool
*lp_new(pool *p, uint blk) *lp_new(pool *p, uint blk)
{ {
linpool *m = ralloc(p, &lp_class); linpool *m = ralloc(p, &lp_class);
m->p = p;
if (!blk) if (!blk)
{ {
m->use_pages = 1; m->use_pages = 1;
@ -132,7 +130,7 @@ lp_alloc(linpool *m, uint size)
{ {
/* Need to allocate a new chunk */ /* Need to allocate a new chunk */
if (m->use_pages) if (m->use_pages)
c = alloc_page(m->p); c = alloc_page();
else else
c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size); c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size);
@ -273,7 +271,7 @@ lp_free(resource *r)
{ {
c = d->next; c = d->next;
if (m->use_pages) if (m->use_pages)
free_page(m->p, d); free_page(d);
else else
xfree(d); xfree(d);
} }
@ -308,19 +306,24 @@ static struct resmem
lp_memsize(resource *r) lp_memsize(resource *r)
{ {
linpool *m = (linpool *) r; linpool *m = (linpool *) r;
struct lp_chunk *c; struct resmem sz = {
int cnt = 0; .overhead = sizeof(struct linpool) + ALLOC_OVERHEAD,
for(c=m->first; c; c=c->next)
cnt++;
for(c=m->first_large; c; c=c->next)
cnt++;
return (struct resmem) {
.effective = m->total + m->total_large,
.overhead = ALLOC_OVERHEAD + sizeof(struct linpool) +
cnt * (ALLOC_OVERHEAD + sizeof(struct lp_chunk)),
}; };
for (struct lp_chunk *c = m->first_large; c; c = c->next)
{
sz.effective += c->size;
sz.overhead += lp_chunk_size + ALLOC_OVERHEAD;
}
uint regular = 0;
for (struct lp_chunk *c = m->first; c; c = c->next)
regular++;
sz.effective += m->chunk_size * regular;
sz.overhead += (lp_chunk_size + ALLOC_OVERHEAD) * regular;
return sz;
} }