Implemented real slab allocator. If you suspect it from being buggy,

just #define FAKE_SLAB at the top of lib/slab.c to bypass it.
This commit is contained in:
Martin Mares 2000-03-05 22:48:30 +00:00
parent 9f4929e749
commit 3549667925
2 changed files with 198 additions and 15 deletions

2
TODO
View file

@ -1,7 +1,5 @@
Core Core
~~~~ ~~~~
- better memory allocators
- static: check validity of route destination? - static: check validity of route destination?
- static: allow specifying a per-route filter program for setting route attributes? - static: allow specifying a per-route filter program for setting route attributes?

View file

@ -1,7 +1,9 @@
/* /*
* BIRD Resource Manager -- SLABs * BIRD Resource Manager -- A SLAB-like Memory Allocator
* *
* (c) 1998 Martin Mares <mj@ucw.cz> * Heavily inspired by the original SLAB paper by Jeff Bonwick.
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
* *
* Can be freely distributed and used under the terms of the GNU GPL. * Can be freely distributed and used under the terms of the GNU GPL.
*/ */
@ -12,14 +14,16 @@
#include "nest/bird.h" #include "nest/bird.h"
#include "lib/resource.h" #include "lib/resource.h"
/* #undef FAKE_SLAB /* Turn on if you want to debug memory allocations */
* These are only fake, soon to change...
*/
struct sl_obj { static void slab_free(resource *r);
node n; static void slab_dump(resource *r);
byte data[0];
}; #ifdef FAKE_SLAB
/*
* Fake version used for debugging.
*/
struct slab { struct slab {
resource r; resource r;
@ -27,16 +31,18 @@ struct slab {
list objs; list objs;
}; };
static void slab_free(resource *r);
static void slab_dump(resource *r);
static struct resclass sl_class = { static struct resclass sl_class = {
"Slab", "FakeSlab",
sizeof(struct slab), sizeof(struct slab),
slab_free, slab_free,
slab_dump slab_dump
}; };
struct sl_obj {
node n;
byte data[0];
};
slab * slab *
sl_new(pool *p, unsigned size) sl_new(pool *p, unsigned size)
{ {
@ -85,3 +91,182 @@ slab_dump(resource *r)
cnt++; cnt++;
debug("(%d objects per %d bytes)\n", cnt, s->size); debug("(%d objects per %d bytes)\n", cnt, s->size);
} }
#else
/*
* Real efficient version.
*/
#define SLAB_SIZE 4096
#define MAX_EMPTY_HEADS 1
struct slab {
resource r;
unsigned obj_size, head_size, objs_per_slab, num_empty_heads;
list empty_heads, partial_heads, full_heads;
};
static struct resclass sl_class = {
"Slab",
sizeof(struct slab),
slab_free,
slab_dump
};
struct sl_head {
node n;
struct sl_obj *first_free;
int num_full;
};
struct sl_obj {
struct sl_head *slab;
union {
struct sl_obj *next;
byte data[0];
} u;
};
struct sl_alignment { /* Magic structure for testing of alignment */
byte data;
int x[0];
};
slab *
sl_new(pool *p, unsigned size)
{
slab *s = ralloc(p, &sl_class);
unsigned int align = sizeof(struct sl_alignment);
if (align < sizeof(int))
align = sizeof(int);
size += OFFSETOF(struct sl_obj, u.data);
if (size < sizeof(struct sl_obj))
size = sizeof(struct sl_obj);
size = (size + align - 1) / align * align;
s->obj_size = size;
s->head_size = (sizeof(struct sl_head) + align - 1) / align * align;
s->objs_per_slab = (SLAB_SIZE - s->head_size) / size;
if (!s->objs_per_slab)
bug("Slab: object too large");
s->num_empty_heads = 0;
init_list(&s->empty_heads);
init_list(&s->partial_heads);
init_list(&s->full_heads);
return s;
}
struct sl_head *
sl_new_head(slab *s)
{
struct sl_head *h = xmalloc(SLAB_SIZE);
struct sl_obj *o = (struct sl_obj *)((byte *)h+s->head_size);
struct sl_obj *no;
unsigned int n = s->objs_per_slab;
h->first_free = o;
h->num_full = 0;
while (n--)
{
o->slab = h;
no = (struct sl_obj *)((char *) o+s->obj_size);
o->u.next = n ? no : NULL;
o = no;
}
return h;
}
void *
sl_alloc(slab *s)
{
struct sl_head *h;
struct sl_obj *o;
redo:
h = HEAD(s->partial_heads);
if (!h->n.next)
goto no_partial;
okay:
o = h->first_free;
if (!o)
goto full_partial;
h->first_free = o->u.next;
h->num_full++;
return o->u.data;
full_partial:
rem_node(&h->n);
add_tail(&s->full_heads, &h->n);
goto redo;
no_partial:
h = HEAD(s->empty_heads);
if (h->n.next)
{
rem_node(&h->n);
add_head(&s->partial_heads, &h->n);
s->num_empty_heads--;
goto okay;
}
h = sl_new_head(s);
add_head(&s->partial_heads, &h->n);
goto okay;
}
void
sl_free(slab *s, void *oo)
{
struct sl_obj *o = SKIP_BACK(struct sl_obj, u.data, oo);
struct sl_head *h = o->slab;
o->u.next = h->first_free;
h->first_free = o;
if (!--h->num_full)
{
rem_node(&h->n);
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
xfree(h);
else
{
add_head(&s->empty_heads, &h->n);
s->num_empty_heads++;
}
}
else if (!o->u.next)
{
rem_node(&h->n);
add_head(&s->partial_heads, &h->n);
}
}
static void
slab_free(resource *r)
{
slab *s = (slab *) r;
struct sl_head *h, *g;
WALK_LIST_DELSAFE(h, g, s->empty_heads)
xfree(h);
WALK_LIST_DELSAFE(h, g, s->partial_heads)
xfree(h);
WALK_LIST_DELSAFE(h, g, s->full_heads)
xfree(h);
}
static void
slab_dump(resource *r)
{
slab *s = (slab *) r;
int ec=0, pc=0, fc=0;
struct sl_head *h;
WALK_LIST(h, s->empty_heads)
ec++;
WALK_LIST(h, s->partial_heads)
pc++;
WALK_LIST(h, s->full_heads)
fc++;
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
#endif