Routing table is now a resource allocated from its own pool
This also fixes memory leaks from import/export tables being never cleaned up and freed.
This commit is contained in:
parent
a9938b1792
commit
ff397df7ed
5 changed files with 131 additions and 51 deletions
11
nest/proto.c
11
nest/proto.c
|
@ -518,11 +518,12 @@ void
|
||||||
channel_setup_in_table(struct channel *c)
|
channel_setup_in_table(struct channel *c)
|
||||||
{
|
{
|
||||||
struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
|
struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
|
||||||
|
|
||||||
cf->name = "import";
|
cf->name = "import";
|
||||||
cf->addr_type = c->net_type;
|
cf->addr_type = c->net_type;
|
||||||
|
cf->internal = 1;
|
||||||
|
|
||||||
c->in_table = mb_allocz(c->proto->pool, sizeof(struct rtable));
|
c->in_table = rt_setup(c->proto->pool, cf);
|
||||||
rt_setup(c->proto->pool, c->in_table, cf);
|
|
||||||
|
|
||||||
c->reload_event = ev_new_init(c->proto->pool, channel_reload_loop, c);
|
c->reload_event = ev_new_init(c->proto->pool, channel_reload_loop, c);
|
||||||
}
|
}
|
||||||
|
@ -534,9 +535,9 @@ channel_setup_out_table(struct channel *c)
|
||||||
struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
|
struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
|
||||||
cf->name = "export";
|
cf->name = "export";
|
||||||
cf->addr_type = c->net_type;
|
cf->addr_type = c->net_type;
|
||||||
|
cf->internal = 1;
|
||||||
|
|
||||||
c->out_table = mb_allocz(c->proto->pool, sizeof(struct rtable));
|
c->out_table = rt_setup(c->proto->pool, cf);
|
||||||
rt_setup(c->proto->pool, c->out_table, cf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -609,6 +610,8 @@ channel_do_down(struct channel *c)
|
||||||
c->reload_event = NULL;
|
c->reload_event = NULL;
|
||||||
c->out_table = NULL;
|
c->out_table = NULL;
|
||||||
|
|
||||||
|
/* The in_table and out_table are going to be freed by freeing their resource pools. */
|
||||||
|
|
||||||
CALL(c->channel->cleanup, c);
|
CALL(c->channel->cleanup, c);
|
||||||
|
|
||||||
/* Schedule protocol shutddown */
|
/* Schedule protocol shutddown */
|
||||||
|
|
|
@ -148,12 +148,15 @@ struct rtable_config {
|
||||||
int gc_max_ops; /* Maximum number of operations before GC is run */
|
int gc_max_ops; /* Maximum number of operations before GC is run */
|
||||||
int gc_min_time; /* Minimum time between two consecutive GC runs */
|
int gc_min_time; /* Minimum time between two consecutive GC runs */
|
||||||
byte sorted; /* Routes of network are sorted according to rte_better() */
|
byte sorted; /* Routes of network are sorted according to rte_better() */
|
||||||
|
byte internal; /* Internal table of a protocol */
|
||||||
btime min_settle_time; /* Minimum settle time for notifications */
|
btime min_settle_time; /* Minimum settle time for notifications */
|
||||||
btime max_settle_time; /* Maximum settle time for notifications */
|
btime max_settle_time; /* Maximum settle time for notifications */
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct rtable {
|
typedef struct rtable {
|
||||||
|
resource r;
|
||||||
node n; /* Node in list of all tables */
|
node n; /* Node in list of all tables */
|
||||||
|
pool *rp; /* Resource pool to allocate everything from, including itself */
|
||||||
struct fib fib;
|
struct fib fib;
|
||||||
char *name; /* Name of this table */
|
char *name; /* Name of this table */
|
||||||
list channels; /* List of attached channels (struct channel) */
|
list channels; /* List of attached channels (struct channel) */
|
||||||
|
@ -311,7 +314,9 @@ void rt_lock_table(rtable *);
|
||||||
void rt_unlock_table(rtable *);
|
void rt_unlock_table(rtable *);
|
||||||
void rt_subscribe(rtable *tab, struct rt_subscription *s);
|
void rt_subscribe(rtable *tab, struct rt_subscription *s);
|
||||||
void rt_unsubscribe(struct rt_subscription *s);
|
void rt_unsubscribe(struct rt_subscription *s);
|
||||||
void rt_setup(pool *, rtable *, struct rtable_config *);
|
rtable *rt_setup(pool *, struct rtable_config *);
|
||||||
|
static inline void rt_shutdown(rtable *r) { rfree(r->rp); }
|
||||||
|
|
||||||
static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
|
static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
|
||||||
static inline net *net_find_valid(rtable *tab, const net_addr *addr)
|
static inline net *net_find_valid(rtable *tab, const net_addr *addr)
|
||||||
{ net *n = net_find(tab, addr); return (n && rte_is_valid(n->routes)) ? n : NULL; }
|
{ net *n = net_find(tab, addr); return (n && rte_is_valid(n->routes)) ? n : NULL; }
|
||||||
|
|
151
nest/rt-table.c
151
nest/rt-table.c
|
@ -1839,7 +1839,7 @@ rt_kick_settle_timer(rtable *tab)
|
||||||
tab->base_settle_time = current_time();
|
tab->base_settle_time = current_time();
|
||||||
|
|
||||||
if (!tab->settle_timer)
|
if (!tab->settle_timer)
|
||||||
tab->settle_timer = tm_new_init(rt_table_pool, rt_settle_timer, tab, 0, 0);
|
tab->settle_timer = tm_new_init(tab->rp, rt_settle_timer, tab, 0, 0);
|
||||||
|
|
||||||
if (!tm_active(tab->settle_timer))
|
if (!tm_active(tab->settle_timer))
|
||||||
tm_set(tab->settle_timer, rt_settled_time(tab));
|
tm_set(tab->settle_timer, rt_settled_time(tab));
|
||||||
|
@ -1872,23 +1872,78 @@ rt_unsubscribe(struct rt_subscription *s)
|
||||||
rt_unlock_table(s->tab);
|
rt_unlock_table(s->tab);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
rt_setup(pool *p, rtable *t, struct rtable_config *cf)
|
rt_free(resource *_r)
|
||||||
{
|
{
|
||||||
bzero(t, sizeof(*t));
|
rtable *r = (rtable *) _r;
|
||||||
|
|
||||||
|
DBG("Deleting routing table %s\n", r->name);
|
||||||
|
ASSERT_DIE(r->use_count == 0);
|
||||||
|
|
||||||
|
r->config->table = NULL;
|
||||||
|
rem_node(&r->n);
|
||||||
|
|
||||||
|
if (r->hostcache)
|
||||||
|
rt_free_hostcache(r);
|
||||||
|
|
||||||
|
/* Freed automagically by the resource pool
|
||||||
|
fib_free(&r->fib);
|
||||||
|
hmap_free(&r->id_map);
|
||||||
|
rfree(r->rt_event);
|
||||||
|
rfree(r->settle_timer);
|
||||||
|
mb_free(r);
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rt_res_dump(resource *_r)
|
||||||
|
{
|
||||||
|
rtable *r = (rtable *) _r;
|
||||||
|
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
|
||||||
|
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct resclass rt_class = {
|
||||||
|
.name = "Routing table",
|
||||||
|
.size = sizeof(struct rtable),
|
||||||
|
.free = rt_free,
|
||||||
|
.dump = rt_res_dump,
|
||||||
|
.lookup = NULL,
|
||||||
|
.memsize = NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
rtable *
|
||||||
|
rt_setup(pool *pp, struct rtable_config *cf)
|
||||||
|
{
|
||||||
|
int ns = strlen("Routing table ") + strlen(cf->name) + 1;
|
||||||
|
void *nb = mb_alloc(pp, ns);
|
||||||
|
ASSERT_DIE(ns - 1 == bsnprintf(nb, ns, "Routing table %s", cf->name));
|
||||||
|
|
||||||
|
pool *p = rp_new(pp, nb);
|
||||||
|
mb_move(nb, p);
|
||||||
|
|
||||||
|
rtable *t = ralloc(p, &rt_class);
|
||||||
|
t->rp = p;
|
||||||
|
|
||||||
t->name = cf->name;
|
t->name = cf->name;
|
||||||
t->config = cf;
|
t->config = cf;
|
||||||
t->addr_type = cf->addr_type;
|
t->addr_type = cf->addr_type;
|
||||||
|
|
||||||
fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL);
|
fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL);
|
||||||
init_list(&t->channels);
|
|
||||||
|
|
||||||
hmap_init(&t->id_map, p, 1024);
|
if (!cf->internal)
|
||||||
hmap_set(&t->id_map, 0);
|
{
|
||||||
|
init_list(&t->channels);
|
||||||
|
hmap_init(&t->id_map, p, 1024);
|
||||||
|
hmap_set(&t->id_map, 0);
|
||||||
|
|
||||||
t->rt_event = ev_new_init(p, rt_event, t);
|
init_list(&t->subscribers);
|
||||||
t->last_rt_change = t->gc_time = current_time();
|
|
||||||
|
|
||||||
init_list(&t->subscribers);
|
t->rt_event = ev_new_init(p, rt_event, t);
|
||||||
|
t->last_rt_change = t->gc_time = current_time();
|
||||||
|
}
|
||||||
|
|
||||||
|
return t;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2326,16 +2381,9 @@ rt_unlock_table(rtable *r)
|
||||||
if (!--r->use_count && r->deleted)
|
if (!--r->use_count && r->deleted)
|
||||||
{
|
{
|
||||||
struct config *conf = r->deleted;
|
struct config *conf = r->deleted;
|
||||||
DBG("Deleting routing table %s\n", r->name);
|
|
||||||
r->config->table = NULL;
|
/* Delete the routing table by freeing its pool */
|
||||||
if (r->hostcache)
|
rt_shutdown(r);
|
||||||
rt_free_hostcache(r);
|
|
||||||
rem_node(&r->n);
|
|
||||||
fib_free(&r->fib);
|
|
||||||
hmap_free(&r->id_map);
|
|
||||||
rfree(r->rt_event);
|
|
||||||
rfree(r->settle_timer);
|
|
||||||
mb_free(r);
|
|
||||||
config_del_obstacle(conf);
|
config_del_obstacle(conf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2397,11 +2445,9 @@ rt_commit(struct config *new, struct config *old)
|
||||||
WALK_LIST(r, new->tables)
|
WALK_LIST(r, new->tables)
|
||||||
if (!r->table)
|
if (!r->table)
|
||||||
{
|
{
|
||||||
rtable *t = mb_allocz(rt_table_pool, sizeof(struct rtable));
|
r->table = rt_setup(rt_table_pool, r);
|
||||||
DBG("\t%s: created\n", r->name);
|
DBG("\t%s: created\n", r->name);
|
||||||
rt_setup(rt_table_pool, t, r);
|
add_tail(&routing_tables, &r->table->n);
|
||||||
add_tail(&routing_tables, &t->n);
|
|
||||||
r->table = t;
|
|
||||||
}
|
}
|
||||||
DBG("\tdone\n");
|
DBG("\tdone\n");
|
||||||
}
|
}
|
||||||
|
@ -2566,6 +2612,9 @@ rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *sr
|
||||||
if (!old)
|
if (!old)
|
||||||
goto drop_withdraw;
|
goto drop_withdraw;
|
||||||
|
|
||||||
|
if (!net->routes)
|
||||||
|
fib_delete(&tab->fib, net);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2600,6 +2649,10 @@ drop_update:
|
||||||
c->stats.imp_updates_received++;
|
c->stats.imp_updates_received++;
|
||||||
c->stats.imp_updates_ignored++;
|
c->stats.imp_updates_ignored++;
|
||||||
rte_free(new);
|
rte_free(new);
|
||||||
|
|
||||||
|
if (!net->routes)
|
||||||
|
fib_delete(&tab->fib, net);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
drop_withdraw:
|
drop_withdraw:
|
||||||
|
@ -2669,9 +2722,15 @@ rt_reload_channel_abort(struct channel *c)
|
||||||
void
|
void
|
||||||
rt_prune_sync(rtable *t, int all)
|
rt_prune_sync(rtable *t, int all)
|
||||||
{
|
{
|
||||||
FIB_WALK(&t->fib, net, n)
|
struct fib_iterator fit;
|
||||||
|
|
||||||
|
FIB_ITERATE_INIT(&fit, &t->fib);
|
||||||
|
|
||||||
|
again:
|
||||||
|
FIB_ITERATE_START(&t->fib, &fit, net, n)
|
||||||
{
|
{
|
||||||
rte *e, **ee = &n->routes;
|
rte *e, **ee = &n->routes;
|
||||||
|
|
||||||
while (e = *ee)
|
while (e = *ee)
|
||||||
{
|
{
|
||||||
if (all || (e->flags & (REF_STALE | REF_DISCARD)))
|
if (all || (e->flags & (REF_STALE | REF_DISCARD)))
|
||||||
|
@ -2683,8 +2742,15 @@ rt_prune_sync(rtable *t, int all)
|
||||||
else
|
else
|
||||||
ee = &e->next;
|
ee = &e->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (all || !n->routes)
|
||||||
|
{
|
||||||
|
FIB_ITERATE_PUT(&fit);
|
||||||
|
fib_delete(&t->fib, n);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
FIB_WALK_END;
|
FIB_ITERATE_END;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2750,6 +2816,9 @@ rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old0, int re
|
||||||
if (!old)
|
if (!old)
|
||||||
goto drop_withdraw;
|
goto drop_withdraw;
|
||||||
|
|
||||||
|
if (!net->routes)
|
||||||
|
fib_delete(&tab->fib, net);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2809,7 +2878,7 @@ hc_remove(struct hostcache *hc, struct hostentry *he)
|
||||||
#define HC_LO_ORDER 10
|
#define HC_LO_ORDER 10
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hc_alloc_table(struct hostcache *hc, unsigned order)
|
hc_alloc_table(struct hostcache *hc, pool *p, unsigned order)
|
||||||
{
|
{
|
||||||
uint hsize = 1 << order;
|
uint hsize = 1 << order;
|
||||||
hc->hash_order = order;
|
hc->hash_order = order;
|
||||||
|
@ -2817,18 +2886,18 @@ hc_alloc_table(struct hostcache *hc, unsigned order)
|
||||||
hc->hash_max = (order >= HC_HI_ORDER) ? ~0U : (hsize HC_HI_MARK);
|
hc->hash_max = (order >= HC_HI_ORDER) ? ~0U : (hsize HC_HI_MARK);
|
||||||
hc->hash_min = (order <= HC_LO_ORDER) ? 0U : (hsize HC_LO_MARK);
|
hc->hash_min = (order <= HC_LO_ORDER) ? 0U : (hsize HC_LO_MARK);
|
||||||
|
|
||||||
hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
|
hc->hash_table = mb_allocz(p, hsize * sizeof(struct hostentry *));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hc_resize(struct hostcache *hc, unsigned new_order)
|
hc_resize(struct hostcache *hc, pool *p, unsigned new_order)
|
||||||
{
|
{
|
||||||
struct hostentry **old_table = hc->hash_table;
|
struct hostentry **old_table = hc->hash_table;
|
||||||
struct hostentry *he, *hen;
|
struct hostentry *he, *hen;
|
||||||
uint old_size = 1 << hc->hash_order;
|
uint old_size = 1 << hc->hash_order;
|
||||||
uint i;
|
uint i;
|
||||||
|
|
||||||
hc_alloc_table(hc, new_order);
|
hc_alloc_table(hc, p, new_order);
|
||||||
for (i = 0; i < old_size; i++)
|
for (i = 0; i < old_size; i++)
|
||||||
for (he = old_table[i]; he != NULL; he=hen)
|
for (he = old_table[i]; he != NULL; he=hen)
|
||||||
{
|
{
|
||||||
|
@ -2839,7 +2908,7 @@ hc_resize(struct hostcache *hc, unsigned new_order)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct hostentry *
|
static struct hostentry *
|
||||||
hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
|
hc_new_hostentry(struct hostcache *hc, pool *p, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
|
||||||
{
|
{
|
||||||
struct hostentry *he = sl_alloc(hc->slab);
|
struct hostentry *he = sl_alloc(hc->slab);
|
||||||
|
|
||||||
|
@ -2855,13 +2924,13 @@ hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsig
|
||||||
|
|
||||||
hc->hash_items++;
|
hc->hash_items++;
|
||||||
if (hc->hash_items > hc->hash_max)
|
if (hc->hash_items > hc->hash_max)
|
||||||
hc_resize(hc, hc->hash_order + HC_HI_STEP);
|
hc_resize(hc, p, hc->hash_order + HC_HI_STEP);
|
||||||
|
|
||||||
return he;
|
return he;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
|
hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he)
|
||||||
{
|
{
|
||||||
rta_free(he->src);
|
rta_free(he->src);
|
||||||
|
|
||||||
|
@ -2871,20 +2940,20 @@ hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
|
||||||
|
|
||||||
hc->hash_items--;
|
hc->hash_items--;
|
||||||
if (hc->hash_items < hc->hash_min)
|
if (hc->hash_items < hc->hash_min)
|
||||||
hc_resize(hc, hc->hash_order - HC_LO_STEP);
|
hc_resize(hc, p, hc->hash_order - HC_LO_STEP);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rt_init_hostcache(rtable *tab)
|
rt_init_hostcache(rtable *tab)
|
||||||
{
|
{
|
||||||
struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
|
struct hostcache *hc = mb_allocz(tab->rp, sizeof(struct hostcache));
|
||||||
init_list(&hc->hostentries);
|
init_list(&hc->hostentries);
|
||||||
|
|
||||||
hc->hash_items = 0;
|
hc->hash_items = 0;
|
||||||
hc_alloc_table(hc, HC_DEF_ORDER);
|
hc_alloc_table(hc, tab->rp, HC_DEF_ORDER);
|
||||||
hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
|
hc->slab = sl_new(tab->rp, sizeof(struct hostentry));
|
||||||
|
|
||||||
hc->lp = lp_new(rt_table_pool, LP_GOOD_SIZE(1024));
|
hc->lp = lp_new(tab->rp, LP_GOOD_SIZE(1024));
|
||||||
hc->trie = f_new_trie(hc->lp, 0);
|
hc->trie = f_new_trie(hc->lp, 0);
|
||||||
|
|
||||||
tab->hostcache = hc;
|
tab->hostcache = hc;
|
||||||
|
@ -2905,10 +2974,12 @@ rt_free_hostcache(rtable *tab)
|
||||||
log(L_ERR "Hostcache is not empty in table %s", tab->name);
|
log(L_ERR "Hostcache is not empty in table %s", tab->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Freed automagically by the resource pool
|
||||||
rfree(hc->slab);
|
rfree(hc->slab);
|
||||||
rfree(hc->lp);
|
rfree(hc->lp);
|
||||||
mb_free(hc->hash_table);
|
mb_free(hc->hash_table);
|
||||||
mb_free(hc);
|
mb_free(hc);
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -3051,7 +3122,7 @@ rt_update_hostcache(rtable *tab)
|
||||||
he = SKIP_BACK(struct hostentry, ln, n);
|
he = SKIP_BACK(struct hostentry, ln, n);
|
||||||
if (!he->uc)
|
if (!he->uc)
|
||||||
{
|
{
|
||||||
hc_delete_hostentry(hc, he);
|
hc_delete_hostentry(hc, tab->rp, he);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3076,7 +3147,7 @@ rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
|
||||||
if (ipa_equal(he->addr, a) && (he->tab == dep))
|
if (ipa_equal(he->addr, a) && (he->tab == dep))
|
||||||
return he;
|
return he;
|
||||||
|
|
||||||
he = hc_new_hostentry(hc, a, ipa_zero(ll) ? a : ll, dep, k);
|
he = hc_new_hostentry(hc, tab->rp, a, ipa_zero(ll) ? a : ll, dep, k);
|
||||||
rt_update_hostentry(tab, he);
|
rt_update_hostentry(tab, he);
|
||||||
return he;
|
return he;
|
||||||
}
|
}
|
||||||
|
|
|
@ -317,7 +317,7 @@ static void
|
||||||
krt_learn_scan(struct krt_proto *p, rte *e)
|
krt_learn_scan(struct krt_proto *p, rte *e)
|
||||||
{
|
{
|
||||||
net *n0 = e->net;
|
net *n0 = e->net;
|
||||||
net *n = net_get(&p->krt_table, n0->n.addr);
|
net *n = net_get(p->krt_table, n0->n.addr);
|
||||||
rte *m, **mm;
|
rte *m, **mm;
|
||||||
|
|
||||||
e->attrs = rta_lookup(e->attrs);
|
e->attrs = rta_lookup(e->attrs);
|
||||||
|
@ -354,7 +354,7 @@ krt_learn_scan(struct krt_proto *p, rte *e)
|
||||||
static void
|
static void
|
||||||
krt_learn_prune(struct krt_proto *p)
|
krt_learn_prune(struct krt_proto *p)
|
||||||
{
|
{
|
||||||
struct fib *fib = &p->krt_table.fib;
|
struct fib *fib = &p->krt_table->fib;
|
||||||
struct fib_iterator fit;
|
struct fib_iterator fit;
|
||||||
|
|
||||||
KRT_TRACE(p, D_EVENTS, "Pruning inherited routes");
|
KRT_TRACE(p, D_EVENTS, "Pruning inherited routes");
|
||||||
|
@ -430,7 +430,7 @@ static void
|
||||||
krt_learn_async(struct krt_proto *p, rte *e, int new)
|
krt_learn_async(struct krt_proto *p, rte *e, int new)
|
||||||
{
|
{
|
||||||
net *n0 = e->net;
|
net *n0 = e->net;
|
||||||
net *n = net_get(&p->krt_table, n0->n.addr);
|
net *n = net_get(p->krt_table, n0->n.addr);
|
||||||
rte *g, **gg, *best, **bestp, *old_best;
|
rte *g, **gg, *best, **bestp, *old_best;
|
||||||
|
|
||||||
e->attrs = rta_lookup(e->attrs);
|
e->attrs = rta_lookup(e->attrs);
|
||||||
|
@ -511,8 +511,9 @@ krt_learn_init(struct krt_proto *p)
|
||||||
struct rtable_config *cf = mb_allocz(p->p.pool, sizeof(struct rtable_config));
|
struct rtable_config *cf = mb_allocz(p->p.pool, sizeof(struct rtable_config));
|
||||||
cf->name = "Inherited";
|
cf->name = "Inherited";
|
||||||
cf->addr_type = p->p.net_type;
|
cf->addr_type = p->p.net_type;
|
||||||
|
cf->internal = 1;
|
||||||
|
|
||||||
rt_setup(p->p.pool, &p->krt_table, cf);
|
p->krt_table = rt_setup(p->p.pool, cf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,7 +525,7 @@ krt_dump(struct proto *P)
|
||||||
if (!KRT_CF->learn)
|
if (!KRT_CF->learn)
|
||||||
return;
|
return;
|
||||||
debug("KRT: Table of inheritable routes\n");
|
debug("KRT: Table of inheritable routes\n");
|
||||||
rt_dump(&p->krt_table);
|
rt_dump(p->krt_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -49,7 +49,7 @@ struct krt_proto {
|
||||||
struct krt_state sys; /* Sysdep state */
|
struct krt_state sys; /* Sysdep state */
|
||||||
|
|
||||||
#ifdef KRT_ALLOW_LEARN
|
#ifdef KRT_ALLOW_LEARN
|
||||||
struct rtable krt_table; /* Internal table of inherited routes */
|
struct rtable *krt_table; /* Internal table of inherited routes */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_ALL_TABLES_AT_ONCE
|
#ifndef CONFIG_ALL_TABLES_AT_ONCE
|
||||||
|
|
Loading…
Reference in a new issue