1998-05-15 15:54:32 +08:00
|
|
|
/*
|
2000-06-02 01:12:19 +08:00
|
|
|
* BIRD -- Routing Tables
|
1998-05-15 15:54:32 +08:00
|
|
|
*
|
2000-01-17 00:44:50 +08:00
|
|
|
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
|
1998-05-15 15:54:32 +08:00
|
|
|
*
|
|
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
|
|
*/
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* DOC: Routing tables
|
|
|
|
*
|
|
|
|
* Routing tables are probably the most important structures BIRD uses. They
|
|
|
|
* hold all the information about known networks, the associated routes and
|
|
|
|
* their attributes.
|
|
|
|
*
|
2000-06-08 20:37:21 +08:00
|
|
|
* There are multiple routing tables (a primary one together with any
|
2000-06-02 01:12:19 +08:00
|
|
|
* number of secondary ones if requested by the configuration). Each table
|
|
|
|
* is basically a FIB containing entries describing the individual
|
2000-06-07 21:25:53 +08:00
|
|
|
* destination networks. For each network (represented by structure &net),
|
2000-06-08 20:37:21 +08:00
|
|
|
* there is a one-way linked list of route entries (&rte), the first entry
|
|
|
|
* on the list being the best one (i.e., the one we currently use
|
2000-06-02 01:12:19 +08:00
|
|
|
* for routing), the order of the other ones is undetermined.
|
|
|
|
*
|
|
|
|
* The &rte contains information specific to the route (preference, protocol
|
|
|
|
* metrics, time of last modification etc.) and a pointer to a &rta structure
|
|
|
|
* (see the route attribute module for a precise explanation) holding the
|
|
|
|
* remaining route attributes which are expected to be shared by multiple
|
|
|
|
* routes in order to conserve memory.
|
|
|
|
*/
|
|
|
|
|
2000-03-13 05:01:38 +08:00
|
|
|
#undef LOCAL_DEBUG
|
1999-02-14 03:15:28 +08:00
|
|
|
|
1998-05-15 15:54:32 +08:00
|
|
|
#include "nest/bird.h"
|
|
|
|
#include "nest/route.h"
|
1998-05-20 19:54:33 +08:00
|
|
|
#include "nest/protocol.h"
|
1999-12-01 23:10:21 +08:00
|
|
|
#include "nest/iface.h"
|
1998-05-20 19:54:33 +08:00
|
|
|
#include "lib/resource.h"
|
1999-02-14 05:29:01 +08:00
|
|
|
#include "lib/event.h"
|
2021-02-10 10:09:57 +08:00
|
|
|
#include "lib/timer.h"
|
1999-12-01 23:10:21 +08:00
|
|
|
#include "lib/string.h"
|
1999-05-18 04:14:52 +08:00
|
|
|
#include "conf/conf.h"
|
1999-03-17 22:31:26 +08:00
|
|
|
#include "filter/filter.h"
|
2019-02-08 20:38:12 +08:00
|
|
|
#include "filter/data.h"
|
2018-06-27 22:51:53 +08:00
|
|
|
#include "lib/hash.h"
|
2000-04-01 07:30:21 +08:00
|
|
|
#include "lib/string.h"
|
2004-06-01 01:16:47 +08:00
|
|
|
#include "lib/alloca.h"
|
2002-11-13 16:47:06 +08:00
|
|
|
|
2019-09-28 20:17:20 +08:00
|
|
|
#ifdef CONFIG_BGP
|
|
|
|
#include "proto/bgp/bgp.h"
|
|
|
|
#endif
|
|
|
|
|
2010-06-03 04:20:40 +08:00
|
|
|
pool *rt_table_pool;
|
|
|
|
|
1998-05-20 19:54:33 +08:00
|
|
|
static slab *rte_slab;
|
1999-04-06 04:25:03 +08:00
|
|
|
static linpool *rte_update_pool;
|
1998-05-20 19:54:33 +08:00
|
|
|
|
2018-11-21 00:38:19 +08:00
|
|
|
list routing_tables;
|
1999-02-14 05:29:01 +08:00
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
static void rt_free_hostcache(rtable *tab);
|
|
|
|
static void rt_notify_hostcache(rtable *tab, net *net);
|
|
|
|
static void rt_update_hostcache(rtable *tab);
|
|
|
|
static void rt_next_hop_update(rtable *tab);
|
2016-01-26 18:48:58 +08:00
|
|
|
static inline void rt_prune_table(rtable *tab);
|
2021-02-10 10:09:57 +08:00
|
|
|
static inline void rt_schedule_notify(rtable *tab);
|
2014-03-20 21:07:12 +08:00
|
|
|
|
2000-03-13 04:30:53 +08:00
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
static void
|
|
|
|
net_init_with_trie(struct fib *f, void *N)
|
|
|
|
{
|
|
|
|
rtable *tab = SKIP_BACK(rtable, fib, f);
|
|
|
|
net *n = N;
|
|
|
|
|
|
|
|
if (tab->trie)
|
|
|
|
trie_add_prefix(tab->trie, n->n.addr, n->n.addr->pxlen, n->n.addr->pxlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline net *
|
|
|
|
net_route_ip4_trie(rtable *t, const net_addr_ip4 *n0)
|
|
|
|
{
|
|
|
|
TRIE_WALK_TO_ROOT_IP4(t->trie, n0, n)
|
|
|
|
{
|
|
|
|
net *r;
|
|
|
|
if (r = net_find_valid(t, (net_addr *) &n))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline net *
|
|
|
|
net_route_vpn4_trie(rtable *t, const net_addr_vpn4 *n0)
|
|
|
|
{
|
|
|
|
TRIE_WALK_TO_ROOT_IP4(t->trie, (const net_addr_ip4 *) n0, px)
|
|
|
|
{
|
|
|
|
net_addr_vpn4 n = NET_ADDR_VPN4(px.prefix, px.pxlen, n0->rd);
|
|
|
|
|
|
|
|
net *r;
|
|
|
|
if (r = net_find_valid(t, (net_addr *) &n))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline net *
|
|
|
|
net_route_ip6_trie(rtable *t, const net_addr_ip6 *n0)
|
|
|
|
{
|
|
|
|
TRIE_WALK_TO_ROOT_IP6(t->trie, n0, n)
|
|
|
|
{
|
|
|
|
net *r;
|
|
|
|
if (r = net_find_valid(t, (net_addr *) &n))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline net *
|
|
|
|
net_route_vpn6_trie(rtable *t, const net_addr_vpn6 *n0)
|
|
|
|
{
|
|
|
|
TRIE_WALK_TO_ROOT_IP6(t->trie, (const net_addr_ip6 *) n0, px)
|
|
|
|
{
|
|
|
|
net_addr_vpn6 n = NET_ADDR_VPN6(px.prefix, px.pxlen, n0->rd);
|
|
|
|
|
|
|
|
net *r;
|
|
|
|
if (r = net_find_valid(t, (net_addr *) &n))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
static inline void *
|
2021-11-30 02:23:42 +08:00
|
|
|
net_route_ip6_sadr_trie(rtable *t, const net_addr_ip6_sadr *n0)
|
|
|
|
{
|
|
|
|
TRIE_WALK_TO_ROOT_IP6(t->trie, (const net_addr_ip6 *) n0, px)
|
|
|
|
{
|
|
|
|
net_addr_ip6_sadr n = NET_ADDR_IP6_SADR(px.prefix, px.pxlen, n0->src_prefix, n0->src_pxlen);
|
|
|
|
net *best = NULL;
|
|
|
|
int best_pxlen = 0;
|
|
|
|
|
|
|
|
/* We need to do dst first matching. Since sadr addresses are hashed on dst
|
|
|
|
prefix only, find the hash table chain and go through it to find the
|
|
|
|
match with the longest matching src prefix. */
|
|
|
|
for (struct fib_node *fn = fib_get_chain(&t->fib, (net_addr *) &n); fn; fn = fn->next)
|
|
|
|
{
|
|
|
|
net_addr_ip6_sadr *a = (void *) fn->addr;
|
|
|
|
|
|
|
|
if (net_equal_dst_ip6_sadr(&n, a) &&
|
|
|
|
net_in_net_src_ip6_sadr(&n, a) &&
|
|
|
|
(a->src_pxlen >= best_pxlen))
|
|
|
|
{
|
|
|
|
best = fib_node_to_user(&t->fib, fn);
|
|
|
|
best_pxlen = a->src_pxlen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (best)
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline net *
|
|
|
|
net_route_ip4_fib(rtable *t, const net_addr_ip4 *n0)
|
2010-07-31 07:04:32 +08:00
|
|
|
{
|
2021-11-30 02:23:42 +08:00
|
|
|
net_addr_ip4 n;
|
|
|
|
net_copy_ip4(&n, n0);
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
net *r;
|
2021-11-30 02:23:42 +08:00
|
|
|
while (r = net_find_valid(t, (net_addr *) &n), (!r) && (n.pxlen > 0))
|
|
|
|
{
|
|
|
|
n.pxlen--;
|
|
|
|
ip4_clrbit(&n.prefix, n.pxlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
2010-07-31 07:04:32 +08:00
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
static inline net *
|
|
|
|
net_route_vpn4_fib(rtable *t, const net_addr_vpn4 *n0)
|
|
|
|
{
|
|
|
|
net_addr_vpn4 n;
|
|
|
|
net_copy_vpn4(&n, n0);
|
|
|
|
|
|
|
|
net *r;
|
|
|
|
while (r = net_find_valid(t, (net_addr *) &n), (!r) && (n.pxlen > 0))
|
2015-12-24 22:52:03 +08:00
|
|
|
{
|
2021-11-30 02:23:42 +08:00
|
|
|
n.pxlen--;
|
|
|
|
ip4_clrbit(&n.prefix, n.pxlen);
|
2015-12-24 22:52:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
static inline net *
|
|
|
|
net_route_ip6_fib(rtable *t, const net_addr_ip6 *n0)
|
2015-12-24 22:52:03 +08:00
|
|
|
{
|
2021-11-30 02:23:42 +08:00
|
|
|
net_addr_ip6 n;
|
|
|
|
net_copy_ip6(&n, n0);
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
net *r;
|
2021-11-30 02:23:42 +08:00
|
|
|
while (r = net_find_valid(t, (net_addr *) &n), (!r) && (n.pxlen > 0))
|
|
|
|
{
|
|
|
|
n.pxlen--;
|
|
|
|
ip6_clrbit(&n.prefix, n.pxlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
2015-12-24 22:52:03 +08:00
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
static inline net *
|
|
|
|
net_route_vpn6_fib(rtable *t, const net_addr_vpn6 *n0)
|
|
|
|
{
|
|
|
|
net_addr_vpn6 n;
|
|
|
|
net_copy_vpn6(&n, n0);
|
|
|
|
|
|
|
|
net *r;
|
|
|
|
while (r = net_find_valid(t, (net_addr *) &n), (!r) && (n.pxlen > 0))
|
2015-12-24 22:52:03 +08:00
|
|
|
{
|
2021-11-30 02:23:42 +08:00
|
|
|
n.pxlen--;
|
|
|
|
ip6_clrbit(&n.prefix, n.pxlen);
|
2015-12-24 22:52:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-02-13 23:27:57 +08:00
|
|
|
static inline void *
|
2021-11-30 02:23:42 +08:00
|
|
|
net_route_ip6_sadr_fib(rtable *t, const net_addr_ip6_sadr *n0)
|
2018-02-13 23:27:57 +08:00
|
|
|
{
|
2021-11-30 02:23:42 +08:00
|
|
|
net_addr_ip6_sadr n;
|
|
|
|
net_copy_ip6_sadr(&n, n0);
|
2018-02-13 23:27:57 +08:00
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
net *best = NULL;
|
|
|
|
int best_pxlen = 0;
|
|
|
|
|
|
|
|
/* We need to do dst first matching. Since sadr addresses are hashed on dst
|
|
|
|
prefix only, find the hash table chain and go through it to find the
|
2021-11-30 02:23:42 +08:00
|
|
|
match with the longest matching src prefix. */
|
|
|
|
for (struct fib_node *fn = fib_get_chain(&t->fib, (net_addr *) &n); fn; fn = fn->next)
|
2018-02-13 23:27:57 +08:00
|
|
|
{
|
|
|
|
net_addr_ip6_sadr *a = (void *) fn->addr;
|
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
if (net_equal_dst_ip6_sadr(&n, a) &&
|
|
|
|
net_in_net_src_ip6_sadr(&n, a) &&
|
2018-02-13 23:27:57 +08:00
|
|
|
(a->src_pxlen >= best_pxlen))
|
|
|
|
{
|
|
|
|
best = fib_node_to_user(&t->fib, fn);
|
|
|
|
best_pxlen = a->src_pxlen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (best)
|
|
|
|
return best;
|
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
if (!n.dst_pxlen)
|
2018-02-13 23:27:57 +08:00
|
|
|
break;
|
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
n.dst_pxlen--;
|
|
|
|
ip6_clrbit(&n.dst_prefix, n.dst_pxlen);
|
2018-02-13 23:27:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
net *
|
2016-05-12 22:04:47 +08:00
|
|
|
net_route(rtable *tab, const net_addr *n)
|
2016-01-20 22:38:37 +08:00
|
|
|
{
|
2016-05-12 22:04:47 +08:00
|
|
|
ASSERT(tab->addr_type == n->type);
|
2016-01-20 22:38:37 +08:00
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
switch (n->type)
|
|
|
|
{
|
|
|
|
case NET_IP4:
|
2021-11-30 02:23:42 +08:00
|
|
|
if (tab->trie)
|
|
|
|
return net_route_ip4_trie(tab, (net_addr_ip4 *) n);
|
|
|
|
else
|
|
|
|
return net_route_ip4_fib (tab, (net_addr_ip4 *) n);
|
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
case NET_VPN4:
|
2021-11-30 02:23:42 +08:00
|
|
|
if (tab->trie)
|
|
|
|
return net_route_vpn4_trie(tab, (net_addr_vpn4 *) n);
|
|
|
|
else
|
|
|
|
return net_route_vpn4_fib (tab, (net_addr_vpn4 *) n);
|
2016-05-12 22:04:47 +08:00
|
|
|
|
|
|
|
case NET_IP6:
|
2021-11-30 02:23:42 +08:00
|
|
|
if (tab->trie)
|
|
|
|
return net_route_ip6_trie(tab, (net_addr_ip6 *) n);
|
|
|
|
else
|
|
|
|
return net_route_ip6_fib (tab, (net_addr_ip6 *) n);
|
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
case NET_VPN6:
|
2021-11-30 02:23:42 +08:00
|
|
|
if (tab->trie)
|
|
|
|
return net_route_vpn6_trie(tab, (net_addr_vpn6 *) n);
|
|
|
|
else
|
|
|
|
return net_route_vpn6_fib (tab, (net_addr_vpn6 *) n);
|
2016-05-12 22:04:47 +08:00
|
|
|
|
2018-02-13 23:27:57 +08:00
|
|
|
case NET_IP6_SADR:
|
2021-11-30 02:23:42 +08:00
|
|
|
if (tab->trie)
|
|
|
|
return net_route_ip6_sadr_trie(tab, (net_addr_ip6_sadr *) n);
|
|
|
|
else
|
|
|
|
return net_route_ip6_sadr_fib (tab, (net_addr_ip6_sadr *) n);
|
2018-02-13 23:27:57 +08:00
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-11-30 02:23:42 +08:00
|
|
|
net_roa_check_ip4_trie(rtable *tab, const net_addr_ip4 *px, u32 asn)
|
|
|
|
{
|
|
|
|
int anything = 0;
|
|
|
|
|
|
|
|
TRIE_WALK_TO_ROOT_IP4(tab->trie, px, px0)
|
|
|
|
{
|
|
|
|
net_addr_roa4 roa0 = NET_ADDR_ROA4(px0.prefix, px0.pxlen, 0, 0);
|
|
|
|
|
|
|
|
struct fib_node *fn;
|
|
|
|
for (fn = fib_get_chain(&tab->fib, (net_addr *) &roa0); fn; fn = fn->next)
|
|
|
|
{
|
|
|
|
net_addr_roa4 *roa = (void *) fn->addr;
|
|
|
|
net *r = fib_node_to_user(&tab->fib, fn);
|
|
|
|
|
|
|
|
if (net_equal_prefix_roa4(roa, &roa0) && rte_is_valid(r->routes))
|
|
|
|
{
|
|
|
|
anything = 1;
|
|
|
|
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
|
|
|
|
return ROA_VALID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return anything ? ROA_INVALID : ROA_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
net_roa_check_ip4_fib(rtable *tab, const net_addr_ip4 *px, u32 asn)
|
2016-05-12 22:04:47 +08:00
|
|
|
{
|
|
|
|
struct net_addr_roa4 n = NET_ADDR_ROA4(px->prefix, px->pxlen, 0, 0);
|
2016-01-20 22:38:37 +08:00
|
|
|
struct fib_node *fn;
|
2016-05-12 22:04:47 +08:00
|
|
|
int anything = 0;
|
|
|
|
|
2016-01-20 22:38:37 +08:00
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
for (fn = fib_get_chain(&tab->fib, (net_addr *) &n); fn; fn = fn->next)
|
|
|
|
{
|
2016-05-12 22:04:47 +08:00
|
|
|
net_addr_roa4 *roa = (void *) fn->addr;
|
2016-01-20 22:38:37 +08:00
|
|
|
net *r = fib_node_to_user(&tab->fib, fn);
|
2016-05-12 22:04:47 +08:00
|
|
|
|
|
|
|
if (net_equal_prefix_roa4(roa, &n) && rte_is_valid(r->routes))
|
2016-01-20 22:38:37 +08:00
|
|
|
{
|
|
|
|
anything = 1;
|
|
|
|
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
|
|
|
|
return ROA_VALID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n.pxlen == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
n.pxlen--;
|
|
|
|
ip4_clrbit(&n.prefix, n.pxlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return anything ? ROA_INVALID : ROA_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
static int
|
2021-11-30 02:23:42 +08:00
|
|
|
net_roa_check_ip6_trie(rtable *tab, const net_addr_ip6 *px, u32 asn)
|
|
|
|
{
|
|
|
|
int anything = 0;
|
|
|
|
|
|
|
|
TRIE_WALK_TO_ROOT_IP6(tab->trie, px, px0)
|
|
|
|
{
|
|
|
|
net_addr_roa6 roa0 = NET_ADDR_ROA6(px0.prefix, px0.pxlen, 0, 0);
|
|
|
|
|
|
|
|
struct fib_node *fn;
|
|
|
|
for (fn = fib_get_chain(&tab->fib, (net_addr *) &roa0); fn; fn = fn->next)
|
|
|
|
{
|
|
|
|
net_addr_roa6 *roa = (void *) fn->addr;
|
|
|
|
net *r = fib_node_to_user(&tab->fib, fn);
|
|
|
|
|
|
|
|
if (net_equal_prefix_roa6(roa, &roa0) && rte_is_valid(r->routes))
|
|
|
|
{
|
|
|
|
anything = 1;
|
|
|
|
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
|
|
|
|
return ROA_VALID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TRIE_WALK_TO_ROOT_END;
|
|
|
|
|
|
|
|
return anything ? ROA_INVALID : ROA_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
net_roa_check_ip6_fib(rtable *tab, const net_addr_ip6 *px, u32 asn)
|
2016-01-20 22:38:37 +08:00
|
|
|
{
|
|
|
|
struct net_addr_roa6 n = NET_ADDR_ROA6(px->prefix, px->pxlen, 0, 0);
|
|
|
|
struct fib_node *fn;
|
2016-05-12 22:04:47 +08:00
|
|
|
int anything = 0;
|
|
|
|
|
2016-01-20 22:38:37 +08:00
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
for (fn = fib_get_chain(&tab->fib, (net_addr *) &n); fn; fn = fn->next)
|
|
|
|
{
|
2016-05-12 22:04:47 +08:00
|
|
|
net_addr_roa6 *roa = (void *) fn->addr;
|
2016-01-20 22:38:37 +08:00
|
|
|
net *r = fib_node_to_user(&tab->fib, fn);
|
2016-05-12 22:04:47 +08:00
|
|
|
|
|
|
|
if (net_equal_prefix_roa6(roa, &n) && rte_is_valid(r->routes))
|
2016-01-20 22:38:37 +08:00
|
|
|
{
|
|
|
|
anything = 1;
|
|
|
|
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
|
|
|
|
return ROA_VALID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n.pxlen == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
n.pxlen--;
|
|
|
|
ip6_clrbit(&n.prefix, n.pxlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return anything ? ROA_INVALID : ROA_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
/**
|
|
|
|
* roa_check - check validity of route origination in a ROA table
|
|
|
|
* @tab: ROA table
|
|
|
|
* @n: network prefix to check
|
|
|
|
* @asn: AS number of network prefix
|
|
|
|
*
|
|
|
|
* Implements RFC 6483 route validation for the given network prefix. The
|
|
|
|
* procedure is to find all candidate ROAs - ROAs whose prefixes cover the given
|
|
|
|
* network prefix. If there is no candidate ROA, return ROA_UNKNOWN. If there is
|
|
|
|
* a candidate ROA with matching ASN and maxlen field greater than or equal to
|
|
|
|
* the given prefix length, return ROA_VALID. Otherwise, return ROA_INVALID. If
|
|
|
|
* caller cannot determine origin AS, 0 could be used (in that case ROA_VALID
|
|
|
|
* cannot happen). Table @tab must have type NET_ROA4 or NET_ROA6, network @n
|
|
|
|
* must have type NET_IP4 or NET_IP6, respectively.
|
|
|
|
*/
|
|
|
|
int
|
2016-01-20 22:38:37 +08:00
|
|
|
net_roa_check(rtable *tab, const net_addr *n, u32 asn)
|
|
|
|
{
|
2016-05-12 22:04:47 +08:00
|
|
|
if ((tab->addr_type == NET_ROA4) && (n->type == NET_IP4))
|
2021-11-30 02:23:42 +08:00
|
|
|
{
|
|
|
|
if (tab->trie)
|
|
|
|
return net_roa_check_ip4_trie(tab, (const net_addr_ip4 *) n, asn);
|
|
|
|
else
|
|
|
|
return net_roa_check_ip4_fib (tab, (const net_addr_ip4 *) n, asn);
|
|
|
|
}
|
2016-05-12 22:04:47 +08:00
|
|
|
else if ((tab->addr_type == NET_ROA6) && (n->type == NET_IP6))
|
2021-11-30 02:23:42 +08:00
|
|
|
{
|
|
|
|
if (tab->trie)
|
|
|
|
return net_roa_check_ip6_trie(tab, (const net_addr_ip6 *) n, asn);
|
|
|
|
else
|
|
|
|
return net_roa_check_ip6_fib (tab, (const net_addr_ip6 *) n, asn);
|
|
|
|
}
|
2016-01-20 22:38:37 +08:00
|
|
|
else
|
2016-05-12 22:04:47 +08:00
|
|
|
return ROA_UNKNOWN; /* Should not happen */
|
2010-07-31 07:04:32 +08:00
|
|
|
}
|
1998-05-20 19:54:33 +08:00
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rte_find - find a route
|
|
|
|
* @net: network node
|
2012-08-14 22:25:22 +08:00
|
|
|
* @src: route source
|
2000-06-02 01:12:19 +08:00
|
|
|
*
|
|
|
|
* The rte_find() function returns a route for destination @net
|
2012-08-14 22:25:22 +08:00
|
|
|
* which is from route source @src.
|
2000-06-02 01:12:19 +08:00
|
|
|
*/
|
1998-05-20 19:54:33 +08:00
|
|
|
rte *
|
2012-08-14 22:25:22 +08:00
|
|
|
rte_find(net *net, struct rte_src *src)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
|
|
|
rte *e = net->routes;
|
|
|
|
|
2012-08-14 22:25:22 +08:00
|
|
|
while (e && e->attrs->src != src)
|
1998-05-20 19:54:33 +08:00
|
|
|
e = e->next;
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rte_get_temp - get a temporary &rte
|
2000-06-02 20:29:55 +08:00
|
|
|
* @a: attributes to assign to the new route (a &rta; in case it's
|
2000-06-07 20:29:08 +08:00
|
|
|
* un-cached, rte_update() will create a cached copy automatically)
|
2000-06-02 01:12:19 +08:00
|
|
|
*
|
|
|
|
* Create a temporary &rte and bind it with the attributes @a.
|
|
|
|
* Also set route preference to the default preference set for
|
|
|
|
* the protocol.
|
|
|
|
*/
|
1998-05-20 19:54:33 +08:00
|
|
|
rte *
|
|
|
|
rte_get_temp(rta *a)
|
|
|
|
{
|
|
|
|
rte *e = sl_alloc(rte_slab);
|
|
|
|
|
|
|
|
e->attrs = a;
|
2019-09-09 08:55:32 +08:00
|
|
|
e->id = 0;
|
1998-06-05 04:28:19 +08:00
|
|
|
e->flags = 0;
|
2016-01-26 18:48:58 +08:00
|
|
|
e->pref = 0;
|
1998-05-20 19:54:33 +08:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
1999-04-06 04:25:03 +08:00
|
|
|
rte *
|
|
|
|
rte_do_cow(rte *r)
|
|
|
|
{
|
|
|
|
rte *e = sl_alloc(rte_slab);
|
|
|
|
|
|
|
|
memcpy(e, r, sizeof(rte));
|
|
|
|
e->attrs = rta_clone(r->attrs);
|
|
|
|
e->flags = 0;
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2015-06-08 08:20:43 +08:00
|
|
|
/**
|
|
|
|
* rte_cow_rta - get a private writable copy of &rte with writable &rta
|
|
|
|
* @r: a route entry to be copied
|
|
|
|
* @lp: a linpool from which to allocate &rta
|
|
|
|
*
|
|
|
|
* rte_cow_rta() takes a &rte and prepares it and associated &rta for
|
|
|
|
* modification. There are three possibilities: First, both &rte and &rta are
|
|
|
|
* private copies, in that case they are returned unchanged. Second, &rte is
|
|
|
|
* private copy, but &rta is cached, in that case &rta is duplicated using
|
|
|
|
* rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
|
|
|
|
* both structures are duplicated by rte_do_cow() and rta_do_cow().
|
|
|
|
*
|
|
|
|
* Note that in the second case, cached &rta loses one reference, while private
|
|
|
|
* copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
|
|
|
|
* nexthops, ...) with it. To work properly, original shared &rta should have
|
|
|
|
* another reference during the life of created private copy.
|
|
|
|
*
|
|
|
|
* Result: a pointer to the new writable &rte with writable &rta.
|
|
|
|
*/
|
|
|
|
rte *
|
|
|
|
rte_cow_rta(rte *r, linpool *lp)
|
|
|
|
{
|
|
|
|
if (!rta_is_cached(r->attrs))
|
|
|
|
return r;
|
|
|
|
|
2018-05-29 18:08:12 +08:00
|
|
|
r = rte_cow(r);
|
2015-06-08 08:20:43 +08:00
|
|
|
rta *a = rta_do_cow(r->attrs, lp);
|
2018-05-29 18:08:12 +08:00
|
|
|
rta_free(r->attrs);
|
|
|
|
r->attrs = a;
|
|
|
|
return r;
|
2015-06-08 08:20:43 +08:00
|
|
|
}
|
|
|
|
|
2019-03-07 01:14:12 +08:00
|
|
|
|
2019-03-15 00:22:22 +08:00
|
|
|
/**
|
|
|
|
* rte_init_tmp_attrs - initialize temporary ea_list for route
|
|
|
|
* @r: route entry to be modified
|
|
|
|
* @lp: linpool from which to allocate attributes
|
|
|
|
* @max: maximum number of added temporary attribus
|
|
|
|
*
|
|
|
|
* This function is supposed to be called from make_tmp_attrs() and
|
|
|
|
* store_tmp_attrs() hooks before rte_make_tmp_attr() / rte_store_tmp_attr()
|
|
|
|
* functions. It allocates &ea_list with length for @max items for temporary
|
|
|
|
* attributes and puts it on top of eattrs stack.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rte_init_tmp_attrs(rte *r, linpool *lp, uint max)
|
|
|
|
{
|
|
|
|
struct ea_list *e = lp_alloc(lp, sizeof(struct ea_list) + max * sizeof(eattr));
|
|
|
|
|
|
|
|
e->next = r->attrs->eattrs;
|
|
|
|
e->flags = EALF_SORTED | EALF_TEMP;
|
|
|
|
e->count = 0;
|
|
|
|
|
|
|
|
r->attrs->eattrs = e;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rte_make_tmp_attr - make temporary eattr from private route fields
|
|
|
|
* @r: route entry to be modified
|
|
|
|
* @id: attribute ID
|
|
|
|
* @type: attribute type
|
|
|
|
* @val: attribute value (u32 or adata ptr)
|
|
|
|
*
|
|
|
|
* This function is supposed to be called from make_tmp_attrs() hook for
|
|
|
|
* each temporary attribute, after temporary &ea_list was initialized by
|
|
|
|
* rte_init_tmp_attrs(). It checks whether temporary attribute is supposed to
|
|
|
|
* be defined (based on route pflags) and if so then it fills &eattr field in
|
|
|
|
* preallocated temporary &ea_list on top of route @r eattrs stack.
|
|
|
|
*
|
|
|
|
* Note that it may require free &eattr in temporary &ea_list, so it must not be
|
|
|
|
* called more times than @max argument of rte_init_tmp_attrs().
|
|
|
|
*/
|
2019-03-07 01:14:12 +08:00
|
|
|
void
|
2019-03-15 00:22:22 +08:00
|
|
|
rte_make_tmp_attr(rte *r, uint id, uint type, uintptr_t val)
|
2019-03-07 01:14:12 +08:00
|
|
|
{
|
|
|
|
if (r->pflags & EA_ID_FLAG(id))
|
|
|
|
{
|
2019-03-15 00:22:22 +08:00
|
|
|
ea_list *e = r->attrs->eattrs;
|
2019-03-07 01:14:12 +08:00
|
|
|
eattr *a = &e->attrs[e->count++];
|
|
|
|
a->id = id;
|
2019-03-15 00:22:22 +08:00
|
|
|
a->type = type;
|
2019-03-07 01:14:12 +08:00
|
|
|
a->flags = 0;
|
2019-03-15 00:22:22 +08:00
|
|
|
|
|
|
|
if (type & EAF_EMBEDDED)
|
|
|
|
a->u.data = (u32) val;
|
|
|
|
else
|
|
|
|
a->u.ptr = (struct adata *) val;
|
2019-03-07 01:14:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-15 00:22:22 +08:00
|
|
|
/**
|
|
|
|
* rte_store_tmp_attr - store temporary eattr to private route fields
|
|
|
|
* @r: route entry to be modified
|
|
|
|
* @id: attribute ID
|
|
|
|
*
|
|
|
|
* This function is supposed to be called from store_tmp_attrs() hook for
|
|
|
|
* each temporary attribute, after temporary &ea_list was initialized by
|
|
|
|
* rte_init_tmp_attrs(). It checks whether temporary attribute is defined in
|
|
|
|
* route @r eattrs stack, updates route pflags accordingly, undefines it by
|
|
|
|
* filling &eattr field in preallocated temporary &ea_list on top of the eattrs
|
|
|
|
* stack, and returns the value. Caller is supposed to store it in the
|
|
|
|
* appropriate private field.
|
|
|
|
*
|
|
|
|
* Note that it may require free &eattr in temporary &ea_list, so it must not be
|
|
|
|
* called more times than @max argument of rte_init_tmp_attrs()
|
|
|
|
*/
|
|
|
|
uintptr_t
|
2019-03-07 01:14:12 +08:00
|
|
|
rte_store_tmp_attr(rte *r, uint id)
|
|
|
|
{
|
2019-03-15 00:22:22 +08:00
|
|
|
ea_list *e = r->attrs->eattrs;
|
|
|
|
eattr *a = ea_find(e->next, id);
|
|
|
|
|
|
|
|
if (a)
|
2019-03-07 01:14:12 +08:00
|
|
|
{
|
2019-03-15 00:22:22 +08:00
|
|
|
e->attrs[e->count++] = (struct eattr) { .id = id, .type = EAF_TYPE_UNDEF };
|
2019-03-07 01:14:12 +08:00
|
|
|
r->pflags |= EA_ID_FLAG(id);
|
2019-03-15 00:22:22 +08:00
|
|
|
return (a->type & EAF_EMBEDDED) ? a->u.data : (uintptr_t) a->u.ptr;
|
2019-03-07 01:14:12 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
r->pflags &= ~EA_ID_FLAG(id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-15 00:22:22 +08:00
|
|
|
/**
|
|
|
|
* rte_make_tmp_attrs - prepare route by adding all relevant temporary route attributes
|
|
|
|
* @r: route entry to be modified (may be replaced if COW)
|
|
|
|
* @lp: linpool from which to allocate attributes
|
|
|
|
* @old_attrs: temporary ref to old &rta (may be NULL)
|
|
|
|
*
|
|
|
|
* This function expands privately stored protocol-dependent route attributes
|
|
|
|
* to a uniform &eattr / &ea_list representation. It is essentially a wrapper
|
|
|
|
* around protocol make_tmp_attrs() hook, which does some additional work like
|
|
|
|
* ensuring that route @r is writable.
|
|
|
|
*
|
|
|
|
* The route @r may be read-only (with %REF_COW flag), in that case rw copy is
|
|
|
|
* obtained by rte_cow() and @r is replaced. If @rte is originally rw, it may be
|
|
|
|
* directly modified (and it is never copied).
|
|
|
|
*
|
|
|
|
* If the @old_attrs ptr is supplied, the function obtains another reference of
|
|
|
|
* old cached &rta, that is necessary in some cases (see rte_cow_rta() for
|
|
|
|
* details). It is freed by rte_store_tmp_attrs(), or manually by rta_free().
|
|
|
|
*
|
|
|
|
* Generally, if caller ensures that @r is read-only (e.g. in route export) then
|
|
|
|
* it may ignore @old_attrs (and set it to NULL), but must handle replacement of
|
|
|
|
* @r. If caller ensures that @r is writable (e.g. in route import) then it may
|
|
|
|
* ignore replacement of @r, but it must handle @old_attrs.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rte_make_tmp_attrs(rte **r, linpool *lp, rta **old_attrs)
|
|
|
|
{
|
|
|
|
void (*make_tmp_attrs)(rte *r, linpool *lp);
|
|
|
|
make_tmp_attrs = (*r)->attrs->src->proto->make_tmp_attrs;
|
|
|
|
|
|
|
|
if (!make_tmp_attrs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We may need to keep ref to old attributes, will be freed in rte_store_tmp_attrs() */
|
|
|
|
if (old_attrs)
|
|
|
|
*old_attrs = rta_is_cached((*r)->attrs) ? rta_clone((*r)->attrs) : NULL;
|
|
|
|
|
|
|
|
*r = rte_cow_rta(*r, lp);
|
|
|
|
make_tmp_attrs(*r, lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rte_store_tmp_attrs - store temporary route attributes back to private route fields
|
|
|
|
* @r: route entry to be modified
|
|
|
|
* @lp: linpool from which to allocate attributes
|
|
|
|
* @old_attrs: temporary ref to old &rta
|
|
|
|
*
|
|
|
|
* This function stores temporary route attributes that were expanded by
|
|
|
|
* rte_make_tmp_attrs() back to private route fields and also undefines them.
|
|
|
|
* It is essentially a wrapper around protocol store_tmp_attrs() hook, which
|
|
|
|
* does some additional work like shortcut if there is no change and cleanup
|
|
|
|
* of @old_attrs reference obtained by rte_make_tmp_attrs().
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rte_store_tmp_attrs(rte *r, linpool *lp, rta *old_attrs)
|
|
|
|
{
|
|
|
|
void (*store_tmp_attrs)(rte *rt, linpool *lp);
|
|
|
|
store_tmp_attrs = r->attrs->src->proto->store_tmp_attrs;
|
|
|
|
|
|
|
|
if (!store_tmp_attrs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ASSERT(!rta_is_cached(r->attrs));
|
|
|
|
|
|
|
|
/* If there is no new ea_list, we just skip the temporary ea_list */
|
|
|
|
ea_list *ea = r->attrs->eattrs;
|
|
|
|
if (ea && (ea->flags & EALF_TEMP))
|
|
|
|
r->attrs->eattrs = ea->next;
|
|
|
|
else
|
|
|
|
store_tmp_attrs(r, lp);
|
|
|
|
|
|
|
|
/* Free ref we got in rte_make_tmp_attrs(), have to do rta_lookup() first */
|
|
|
|
r->attrs = rta_lookup(r->attrs);
|
|
|
|
rta_free(old_attrs);
|
|
|
|
}
|
|
|
|
|
2019-03-07 01:14:12 +08:00
|
|
|
|
1998-05-20 19:54:33 +08:00
|
|
|
static int /* Actually better or at least as good as */
|
|
|
|
rte_better(rte *new, rte *old)
|
|
|
|
{
|
1998-06-03 16:40:10 +08:00
|
|
|
int (*better)(rte *, rte *);
|
|
|
|
|
2012-11-10 21:26:13 +08:00
|
|
|
if (!rte_is_valid(old))
|
1998-05-20 19:54:33 +08:00
|
|
|
return 1;
|
2012-11-10 21:26:13 +08:00
|
|
|
if (!rte_is_valid(new))
|
|
|
|
return 0;
|
|
|
|
|
1998-05-20 19:54:33 +08:00
|
|
|
if (new->pref > old->pref)
|
|
|
|
return 1;
|
|
|
|
if (new->pref < old->pref)
|
|
|
|
return 0;
|
2012-08-14 22:25:22 +08:00
|
|
|
if (new->attrs->src->proto->proto != old->attrs->src->proto->proto)
|
2000-03-01 19:48:11 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the user has configured protocol preferences, so that two different protocols
|
|
|
|
* have the same preference, try to break the tie by comparing addresses. Not too
|
|
|
|
* useful, but keeps the ordering of routes unambiguous.
|
|
|
|
*/
|
2012-08-14 22:25:22 +08:00
|
|
|
return new->attrs->src->proto->proto > old->attrs->src->proto->proto;
|
2000-03-01 19:48:11 +08:00
|
|
|
}
|
2012-08-14 22:25:22 +08:00
|
|
|
if (better = new->attrs->src->proto->rte_better)
|
1998-06-03 16:40:10 +08:00
|
|
|
return better(new, old);
|
|
|
|
return 0;
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
|
|
|
|
2015-06-08 08:20:43 +08:00
|
|
|
static int
|
|
|
|
rte_mergable(rte *pri, rte *sec)
|
|
|
|
{
|
|
|
|
int (*mergable)(rte *, rte *);
|
|
|
|
|
|
|
|
if (!rte_is_valid(pri) || !rte_is_valid(sec))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (pri->pref != sec->pref)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (pri->attrs->src->proto->proto != sec->attrs->src->proto->proto)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (mergable = pri->attrs->src->proto->rte_mergable)
|
|
|
|
return mergable(pri, sec);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2000-03-13 04:30:53 +08:00
|
|
|
static void
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace(struct channel *c, rte *e, int dir, char *msg)
|
2000-03-13 04:30:53 +08:00
|
|
|
{
|
2020-12-08 05:19:40 +08:00
|
|
|
log(L_TRACE "%s.%s %c %s %N %s",
|
|
|
|
c->proto->name, c->name ?: "?", dir, msg, e->net->n.addr,
|
|
|
|
rta_dest_name(e->attrs->dest));
|
2000-03-13 04:30:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(uint flag, struct channel *c, rte *e, char *msg)
|
2000-03-13 04:30:53 +08:00
|
|
|
{
|
2020-12-08 05:19:40 +08:00
|
|
|
if ((c->debug & flag) || (c->proto->debug & flag))
|
|
|
|
rte_trace(c, e, '>', msg);
|
2000-03-13 04:30:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(uint flag, struct channel *c, rte *e, char *msg)
|
2000-03-13 04:30:53 +08:00
|
|
|
{
|
2020-12-08 05:19:40 +08:00
|
|
|
if ((c->debug & flag) || (c->proto->debug & flag))
|
|
|
|
rte_trace(c, e, '<', msg);
|
2000-03-13 04:30:53 +08:00
|
|
|
}
|
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
static rte *
|
2018-05-29 18:08:12 +08:00
|
|
|
export_filter_(struct channel *c, rte *rt0, rte **rt_free, linpool *pool, int silent)
|
1999-03-17 22:31:26 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
struct proto *p = c->proto;
|
2019-02-15 20:53:17 +08:00
|
|
|
const struct filter *filter = c->out_filter;
|
2016-01-26 18:48:58 +08:00
|
|
|
struct proto_stats *stats = &c->stats;
|
2012-04-15 21:07:58 +08:00
|
|
|
rte *rt;
|
|
|
|
int v;
|
2012-03-15 18:58:08 +08:00
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
rt = rt0;
|
|
|
|
*rt_free = NULL;
|
1999-08-04 03:33:22 +08:00
|
|
|
|
Terminology cleanup: The import_control hook is now called preexport.
Once upon a time, far far away, there were the old Bird developers
discussing what direction of route flow shall be called import and
export. They decided to say "import to protocol" and "export to table"
when speaking about a protocol. When speaking about a table, they
spoke about "importing to table" and "exporting to protocol".
The latter terminology was adopted in configuration, then also the
bird CLI in commit ea2ae6dd0 started to use it (in year 2009). Now
it's 2018 and the terminology is the latter. Import is from protocol to
table, export is from table to protocol. Anyway, there was still an
import_control hook which executed right before route export.
One thing is funny. There are two commits in April 1999 with just two
minutes between them. The older announces the final settlement
on config terminology, the newer uses the other definition. Let's see
their commit messages as the git-log tool shows them (the newer first):
commit 9e0e485e50ea74c4f1c5cb65bdfe6ce819c2cee2
Author: Martin Mares <mj@ucw.cz>
Date: Mon Apr 5 20:17:59 1999 +0000
Added some new protocol hooks (look at the comments for better explanation):
make_tmp_attrs Convert inline attributes to ea_list
store_tmp_attrs Convert ea_list to inline attributes
import_control Pre-import decisions
commit 5056c559c4eb253a4eee10cf35b694faec5265eb
Author: Martin Mares <mj@ucw.cz>
Date: Mon Apr 5 20:15:31 1999 +0000
Changed syntax of attaching filters to protocols to hopefully the final
version:
EXPORT <filter-spec> for outbound routes (i.e., those announced
by BIRD to the rest of the world).
IMPORT <filter-spec> for inbound routes (i.e., those imported
by BIRD from the rest of the world).
where <filter-spec> is one of:
ALL pass all routes
NONE drop all routes
FILTER <name> use named filter
FILTER { <filter> } use explicitly defined filter
For all protocols, the default is IMPORT ALL, EXPORT NONE. This includes
the kernel protocol, so that you need to add EXPORT ALL to get the previous
configuration of kernel syncer (as usually, see doc/bird.conf.example for
a bird.conf example :)).
Let's say RIP to this almost 19-years-old inconsistency. For now, if you
import a route, it is always from protocol to table. If you export a
route, it is always from table to protocol.
And they lived happily ever after.
2018-02-14 20:42:53 +08:00
|
|
|
v = p->preexport ? p->preexport(p, &rt, pool) : 0;
|
2012-04-15 21:07:58 +08:00
|
|
|
if (v < 0)
|
|
|
|
{
|
|
|
|
if (silent)
|
|
|
|
goto reject;
|
2009-12-03 05:19:47 +08:00
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
stats->exp_updates_rejected++;
|
2013-02-09 06:58:27 +08:00
|
|
|
if (v == RIC_REJECT)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_FILTERS, c, rt, "rejected by protocol");
|
2012-04-15 21:07:58 +08:00
|
|
|
goto reject;
|
|
|
|
}
|
|
|
|
if (v > 0)
|
1999-04-06 04:25:03 +08:00
|
|
|
{
|
2012-04-15 21:07:58 +08:00
|
|
|
if (!silent)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_FILTERS, c, rt, "forced accept by protocol");
|
2012-04-15 21:07:58 +08:00
|
|
|
goto accept;
|
1999-04-06 04:25:03 +08:00
|
|
|
}
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2019-03-15 00:22:22 +08:00
|
|
|
rte_make_tmp_attrs(&rt, pool, NULL);
|
2018-12-14 23:10:19 +08:00
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
v = filter && ((filter == FILTER_REJECT) ||
|
2018-05-29 18:08:12 +08:00
|
|
|
(f_run(filter, &rt, pool,
|
|
|
|
(silent ? FF_SILENT : 0)) > F_ACCEPT));
|
2012-04-15 21:07:58 +08:00
|
|
|
if (v)
|
|
|
|
{
|
|
|
|
if (silent)
|
|
|
|
goto reject;
|
|
|
|
|
|
|
|
stats->exp_updates_filtered++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_FILTERS, c, rt, "filtered out");
|
2012-04-15 21:07:58 +08:00
|
|
|
goto reject;
|
1999-04-06 04:25:03 +08:00
|
|
|
}
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2021-06-15 02:02:50 +08:00
|
|
|
#ifdef CONFIG_PIPE
|
|
|
|
/* Pipes need rte with stored tmpattrs, remaining protocols need expanded tmpattrs */
|
|
|
|
if (p->proto == &proto_pipe)
|
|
|
|
rte_store_tmp_attrs(rt, pool, NULL);
|
|
|
|
#endif
|
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
accept:
|
|
|
|
if (rt != rt0)
|
|
|
|
*rt_free = rt;
|
|
|
|
return rt;
|
|
|
|
|
|
|
|
reject:
|
|
|
|
/* Discard temporary rte */
|
|
|
|
if (rt != rt0)
|
|
|
|
rte_free(rt);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-09-06 23:08:45 +08:00
|
|
|
static inline rte *
|
2018-05-29 18:08:12 +08:00
|
|
|
export_filter(struct channel *c, rte *rt0, rte **rt_free, int silent)
|
2016-09-06 23:08:45 +08:00
|
|
|
{
|
2018-05-29 18:08:12 +08:00
|
|
|
return export_filter_(c, rt0, rt_free, rte_update_pool, silent);
|
2016-09-06 23:08:45 +08:00
|
|
|
}
|
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
static void
|
2018-05-29 18:08:12 +08:00
|
|
|
do_rt_notify(struct channel *c, net *net, rte *new, rte *old, int refeed)
|
2012-04-15 21:07:58 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
struct proto *p = c->proto;
|
|
|
|
struct proto_stats *stats = &c->stats;
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (refeed && new)
|
|
|
|
c->refeed_count++;
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Apply export limit */
|
2016-01-26 18:48:58 +08:00
|
|
|
struct channel_limit *l = &c->out_limit;
|
2019-09-09 08:55:32 +08:00
|
|
|
if (l->action && !old && new)
|
|
|
|
{
|
|
|
|
if (stats->exp_routes >= l->limit)
|
|
|
|
channel_notify_limit(c, l, PLD_OUT, stats->exp_routes);
|
2012-07-16 20:44:45 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (l->state == PLS_BLOCKED)
|
|
|
|
{
|
|
|
|
stats->exp_updates_rejected++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_FILTERS, c, new, "rejected [limit]");
|
2019-09-09 08:55:32 +08:00
|
|
|
return;
|
2012-04-25 05:39:57 +08:00
|
|
|
}
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
2012-04-25 05:39:57 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Apply export table */
|
2019-08-14 00:22:07 +08:00
|
|
|
if (c->out_table && !rte_update_out(c, net->n.addr, new, old, refeed))
|
|
|
|
return;
|
2012-04-28 18:59:40 +08:00
|
|
|
|
2009-06-04 07:22:56 +08:00
|
|
|
if (new)
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->exp_updates_accepted++;
|
2009-06-04 07:22:56 +08:00
|
|
|
else
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->exp_withdraws_accepted++;
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (old)
|
|
|
|
{
|
|
|
|
bmap_clear(&c->export_map, old->id);
|
|
|
|
stats->exp_routes--;
|
|
|
|
}
|
|
|
|
|
2009-06-04 07:22:56 +08:00
|
|
|
if (new)
|
2019-09-09 08:55:32 +08:00
|
|
|
{
|
|
|
|
bmap_set(&c->export_map, new->id);
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->exp_routes++;
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2000-03-13 04:30:53 +08:00
|
|
|
if (p->debug & D_ROUTES)
|
2019-09-09 08:55:32 +08:00
|
|
|
{
|
|
|
|
if (new && old)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_ROUTES, c, new, "replaced");
|
2019-09-09 08:55:32 +08:00
|
|
|
else if (new)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_ROUTES, c, new, "added");
|
2019-09-09 08:55:32 +08:00
|
|
|
else if (old)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_out(D_ROUTES, c, old, "removed");
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
|
|
|
|
2018-05-29 18:08:12 +08:00
|
|
|
p->rt_notify(p, c, net, new, old);
|
2012-04-15 21:07:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-09-09 08:55:32 +08:00
|
|
|
rt_notify_basic(struct channel *c, net *net, rte *new, rte *old, int refeed)
|
2012-04-15 21:07:58 +08:00
|
|
|
{
|
2019-09-09 08:55:32 +08:00
|
|
|
// struct proto *p = c->proto;
|
2012-04-15 21:07:58 +08:00
|
|
|
rte *new_free = NULL;
|
|
|
|
|
|
|
|
if (new)
|
2016-01-26 18:48:58 +08:00
|
|
|
c->stats.exp_updates_received++;
|
2012-04-15 21:07:58 +08:00
|
|
|
else
|
2016-01-26 18:48:58 +08:00
|
|
|
c->stats.exp_withdraws_received++;
|
2012-04-15 21:07:58 +08:00
|
|
|
|
|
|
|
if (new)
|
2018-05-29 18:08:12 +08:00
|
|
|
new = export_filter(c, new, &new_free, 0);
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (old && !bmap_test(&c->export_map, old->id))
|
|
|
|
old = NULL;
|
2012-04-15 21:07:58 +08:00
|
|
|
|
|
|
|
if (!new && !old)
|
|
|
|
return;
|
|
|
|
|
2018-05-29 18:08:12 +08:00
|
|
|
do_rt_notify(c, net, new, old, refeed);
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Discard temporary rte */
|
2012-04-15 21:07:58 +08:00
|
|
|
if (new_free)
|
|
|
|
rte_free(new_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-09-09 08:55:32 +08:00
|
|
|
rt_notify_accepted(struct channel *c, net *net, rte *new_changed, rte *old_changed, int refeed)
|
2012-04-15 21:07:58 +08:00
|
|
|
{
|
2019-09-09 08:55:32 +08:00
|
|
|
// struct proto *p = c->proto;
|
2012-04-15 21:07:58 +08:00
|
|
|
rte *new_best = NULL;
|
|
|
|
rte *old_best = NULL;
|
|
|
|
rte *new_free = NULL;
|
2019-09-09 08:55:32 +08:00
|
|
|
int new_first = 0;
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/*
|
|
|
|
* We assume that there are no changes in net route order except (added)
|
|
|
|
* new_changed and (removed) old_changed. Therefore, the function is not
|
|
|
|
* compatible with deterministic_med (where nontrivial reordering can happen
|
|
|
|
* as a result of a route change) and with recomputation of recursive routes
|
|
|
|
* due to next hop update (where many routes can be changed in one step).
|
|
|
|
*
|
|
|
|
* Note that we need this assumption just for optimizations, we could just
|
|
|
|
* run full new_best recomputation otherwise.
|
|
|
|
*
|
|
|
|
* There are three cases:
|
|
|
|
* feed or old_best is old_changed -> we need to recompute new_best
|
|
|
|
* old_best is before new_changed -> new_best is old_best, ignore
|
|
|
|
* old_best is after new_changed -> try new_changed, otherwise old_best
|
|
|
|
*/
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (net->routes)
|
2016-01-26 18:48:58 +08:00
|
|
|
c->stats.exp_updates_received++;
|
2012-04-15 21:07:58 +08:00
|
|
|
else
|
2016-01-26 18:48:58 +08:00
|
|
|
c->stats.exp_withdraws_received++;
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Find old_best - either old_changed, or route for net->routes */
|
|
|
|
if (old_changed && bmap_test(&c->export_map, old_changed->id))
|
|
|
|
old_best = old_changed;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (rte *r = net->routes; rte_is_valid(r); r = r->next)
|
2012-04-15 21:07:58 +08:00
|
|
|
{
|
2019-09-09 08:55:32 +08:00
|
|
|
if (bmap_test(&c->export_map, r->id))
|
|
|
|
{
|
|
|
|
old_best = r;
|
2012-04-15 21:07:58 +08:00
|
|
|
break;
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Note if new_changed found before old_best */
|
|
|
|
if (r == new_changed)
|
|
|
|
new_first = 1;
|
2018-07-06 08:04:45 +08:00
|
|
|
}
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
2018-07-06 08:04:45 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Find new_best */
|
|
|
|
if ((new_changed == old_changed) || (old_best == old_changed))
|
|
|
|
{
|
|
|
|
/* Feed or old_best changed -> find first accepted by filters */
|
|
|
|
for (rte *r = net->routes; rte_is_valid(r); r = r->next)
|
|
|
|
if (new_best = export_filter(c, r, &new_free, 0))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Other cases -> either new_changed, or old_best (and nothing changed) */
|
|
|
|
if (new_first && (new_changed = export_filter(c, new_changed, &new_free, 0)))
|
|
|
|
new_best = new_changed;
|
|
|
|
else
|
2012-04-15 21:07:58 +08:00
|
|
|
return;
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (!new_best && !old_best)
|
|
|
|
return;
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
do_rt_notify(c, net, new_best, old_best, refeed);
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Discard temporary rte */
|
2012-04-15 21:07:58 +08:00
|
|
|
if (new_free)
|
|
|
|
rte_free(new_free);
|
1999-03-17 22:31:26 +08:00
|
|
|
}
|
|
|
|
|
2015-06-08 08:20:43 +08:00
|
|
|
|
2016-05-06 21:48:35 +08:00
|
|
|
static struct nexthop *
|
|
|
|
nexthop_merge_rta(struct nexthop *nhs, rta *a, linpool *pool, int max)
|
2015-06-08 08:20:43 +08:00
|
|
|
{
|
2016-05-06 21:48:35 +08:00
|
|
|
return nexthop_merge(nhs, &(a->nh), 1, 0, max, pool);
|
2015-06-08 08:20:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rte *
|
2018-05-29 18:08:12 +08:00
|
|
|
rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent)
|
2015-06-08 08:20:43 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
// struct proto *p = c->proto;
|
2016-05-06 21:48:35 +08:00
|
|
|
struct nexthop *nhs = NULL;
|
2015-06-08 08:20:43 +08:00
|
|
|
rte *best0, *best, *rt0, *rt, *tmp;
|
|
|
|
|
|
|
|
best0 = net->routes;
|
|
|
|
*rt_free = NULL;
|
|
|
|
|
|
|
|
if (!rte_is_valid(best0))
|
|
|
|
return NULL;
|
|
|
|
|
2018-05-29 18:08:12 +08:00
|
|
|
best = export_filter_(c, best0, rt_free, pool, silent);
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
if (!best || !rte_is_reachable(best))
|
|
|
|
return best;
|
|
|
|
|
|
|
|
for (rt0 = best0->next; rt0; rt0 = rt0->next)
|
|
|
|
{
|
|
|
|
if (!rte_mergable(best0, rt0))
|
|
|
|
continue;
|
|
|
|
|
2018-05-29 18:08:12 +08:00
|
|
|
rt = export_filter_(c, rt0, &tmp, pool, 1);
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
if (!rt)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (rte_is_reachable(rt))
|
2016-05-06 21:48:35 +08:00
|
|
|
nhs = nexthop_merge_rta(nhs, rt->attrs, pool, c->merge_limit);
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
if (tmp)
|
|
|
|
rte_free(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nhs)
|
|
|
|
{
|
2016-05-06 21:48:35 +08:00
|
|
|
nhs = nexthop_merge_rta(nhs, best->attrs, pool, c->merge_limit);
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
if (nhs->next)
|
|
|
|
{
|
2016-09-06 23:08:45 +08:00
|
|
|
best = rte_cow_rta(best, pool);
|
2016-05-06 21:48:35 +08:00
|
|
|
nexthop_link(best->attrs, nhs);
|
2015-06-08 08:20:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (best != best0)
|
|
|
|
*rt_free = best;
|
|
|
|
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_notify_merged(struct channel *c, net *net, rte *new_changed, rte *old_changed,
|
2019-09-09 08:55:32 +08:00
|
|
|
rte *new_best, rte *old_best, int refeed)
|
2015-06-08 08:20:43 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
// struct proto *p = c->proto;
|
2019-09-09 08:55:32 +08:00
|
|
|
rte *new_free = NULL;
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
/* We assume that all rte arguments are either NULL or rte_is_valid() */
|
|
|
|
|
|
|
|
/* This check should be done by the caller */
|
|
|
|
if (!new_best && !old_best)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Check whether the change is relevant to the merged route */
|
2019-09-09 08:55:32 +08:00
|
|
|
if ((new_best == old_best) &&
|
|
|
|
(new_changed != old_changed) &&
|
|
|
|
!rte_mergable(new_best, new_changed) &&
|
|
|
|
!rte_mergable(old_best, old_changed))
|
|
|
|
return;
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
if (new_best)
|
2016-01-26 18:48:58 +08:00
|
|
|
c->stats.exp_updates_received++;
|
2015-06-08 08:20:43 +08:00
|
|
|
else
|
2016-01-26 18:48:58 +08:00
|
|
|
c->stats.exp_withdraws_received++;
|
2015-06-08 08:20:43 +08:00
|
|
|
|
|
|
|
/* Prepare new merged route */
|
|
|
|
if (new_best)
|
2019-09-09 08:55:32 +08:00
|
|
|
new_best = rt_export_merged(c, net, &new_free, rte_update_pool, 0);
|
|
|
|
|
|
|
|
/* Check old merged route */
|
|
|
|
if (old_best && !bmap_test(&c->export_map, old_best->id))
|
|
|
|
old_best = NULL;
|
2015-06-08 08:20:43 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (!new_best && !old_best)
|
|
|
|
return;
|
2015-06-08 08:20:43 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
do_rt_notify(c, net, new_best, old_best, refeed);
|
2015-06-08 08:20:43 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Discard temporary rte */
|
|
|
|
if (new_free)
|
|
|
|
rte_free(new_free);
|
2015-06-08 08:20:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-06-02 20:41:25 +08:00
|
|
|
/**
|
|
|
|
* rte_announce - announce a routing table change
|
|
|
|
* @tab: table the route has been added to
|
2019-09-09 08:55:32 +08:00
|
|
|
* @type: type of route announcement (RA_UNDEF or RA_ANY)
|
2000-06-02 20:41:25 +08:00
|
|
|
* @net: network in question
|
2019-09-09 08:55:32 +08:00
|
|
|
* @new: the new or changed route
|
|
|
|
* @old: the previous route replaced by the new one
|
2016-05-12 21:49:44 +08:00
|
|
|
* @new_best: the new best route for the same network
|
|
|
|
* @old_best: the previous best route for the same network
|
2000-06-02 20:41:25 +08:00
|
|
|
*
|
2019-09-09 08:55:32 +08:00
|
|
|
* This function gets a routing table update and announces it to all protocols
|
|
|
|
* that are connected to the same table by their channels.
|
2000-06-02 20:41:25 +08:00
|
|
|
*
|
2019-09-09 08:55:32 +08:00
|
|
|
* There are two ways of how routing table changes are announced. First, there
|
|
|
|
* is a change of just one route in @net (which may caused a change of the best
|
|
|
|
* route of the network). In this case @new and @old describes the changed route
|
|
|
|
* and @new_best and @old_best describes best routes. Other routes are not
|
|
|
|
* affected, but in sorted table the order of other routes might change.
|
2009-05-31 21:24:27 +08:00
|
|
|
*
|
2019-09-09 08:55:32 +08:00
|
|
|
* Second, There is a bulk change of multiple routes in @net, with shared best
|
|
|
|
* route selection. In such case separate route changes are described using
|
|
|
|
* @type of %RA_ANY, with @new and @old specifying the changed route, while
|
|
|
|
* @new_best and @old_best are NULL. After that, another notification is done
|
|
|
|
* where @new_best and @old_best are filled (may be the same), but @new and @old
|
|
|
|
* are NULL.
|
2009-06-01 20:07:13 +08:00
|
|
|
*
|
2019-09-09 08:55:32 +08:00
|
|
|
* The function announces the change to all associated channels. For each
|
|
|
|
* channel, an appropriate preprocessing is done according to channel &ra_mode.
|
|
|
|
* For example, %RA_OPTIMAL channels receive just changes of best routes.
|
|
|
|
*
|
|
|
|
* In general, we first call preexport() hook of a protocol, which performs
|
|
|
|
* basic checks on the route (each protocol has a right to veto or force accept
|
|
|
|
* of the route before any filter is asked). Then we consult an export filter
|
|
|
|
* of the channel and verify the old route in an export map of the channel.
|
|
|
|
* Finally, the rt_notify() hook of the protocol gets called.
|
|
|
|
*
|
|
|
|
* Note that there are also calls of rt_notify() hooks due to feed, but that is
|
|
|
|
* done outside of scope of rte_announce().
|
2000-06-02 20:41:25 +08:00
|
|
|
*/
|
1999-04-06 04:25:03 +08:00
|
|
|
static void
|
2019-09-09 08:55:32 +08:00
|
|
|
rte_announce(rtable *tab, uint type, net *net, rte *new, rte *old,
|
|
|
|
rte *new_best, rte *old_best)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2015-06-08 08:20:43 +08:00
|
|
|
if (!rte_is_valid(new))
|
|
|
|
new = NULL;
|
|
|
|
|
2012-11-10 21:26:13 +08:00
|
|
|
if (!rte_is_valid(old))
|
2019-09-09 08:55:32 +08:00
|
|
|
old = NULL;
|
2012-11-10 21:26:13 +08:00
|
|
|
|
2015-06-08 08:20:43 +08:00
|
|
|
if (!rte_is_valid(new_best))
|
|
|
|
new_best = NULL;
|
|
|
|
|
|
|
|
if (!rte_is_valid(old_best))
|
|
|
|
old_best = NULL;
|
2012-11-10 21:26:13 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (!new && !old && !new_best && !old_best)
|
2012-11-10 21:26:13 +08:00
|
|
|
return;
|
1998-05-20 19:54:33 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
if (new_best != old_best)
|
2019-02-02 20:28:16 +08:00
|
|
|
{
|
2019-09-09 08:55:32 +08:00
|
|
|
if (new_best)
|
|
|
|
new_best->sender->stats.pref_routes++;
|
|
|
|
if (old_best)
|
|
|
|
old_best->sender->stats.pref_routes--;
|
2019-02-02 20:28:16 +08:00
|
|
|
|
|
|
|
if (tab->hostcache)
|
|
|
|
rt_notify_hostcache(tab, net);
|
|
|
|
}
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2021-02-10 10:09:57 +08:00
|
|
|
rt_schedule_notify(tab);
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
struct channel *c; node *n;
|
|
|
|
WALK_LIST2(c, n, tab->channels, table_node)
|
2019-09-09 08:55:32 +08:00
|
|
|
{
|
|
|
|
if (c->export_state == ES_DOWN)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (type && (type != c->ra_mode))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (c->ra_mode)
|
1999-02-14 04:19:24 +08:00
|
|
|
{
|
2019-09-09 08:55:32 +08:00
|
|
|
case RA_OPTIMAL:
|
|
|
|
if (new_best != old_best)
|
|
|
|
rt_notify_basic(c, net, new_best, old_best, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RA_ANY:
|
|
|
|
if (new != old)
|
|
|
|
rt_notify_basic(c, net, new, old, 0);
|
|
|
|
break;
|
2016-01-26 18:48:58 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
case RA_ACCEPTED:
|
|
|
|
rt_notify_accepted(c, net, new, old, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RA_MERGED:
|
|
|
|
rt_notify_merged(c, net, new, old, new_best, old_best, 0);
|
|
|
|
break;
|
1999-02-14 04:19:24 +08:00
|
|
|
}
|
2019-09-09 08:55:32 +08:00
|
|
|
}
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
|
|
|
|
1999-03-17 23:01:07 +08:00
|
|
|
static inline int
|
|
|
|
rte_validate(rte *e)
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
net *n = e->net;
|
|
|
|
|
2015-11-05 19:48:52 +08:00
|
|
|
if (!net_validate(n->n.addr))
|
|
|
|
{
|
|
|
|
log(L_WARN "Ignoring bogus prefix %N received via %s",
|
|
|
|
n->n.addr, e->sender->proto->name);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-02-26 17:55:58 +08:00
|
|
|
|
2017-12-10 07:55:34 +08:00
|
|
|
/* FIXME: better handling different nettypes */
|
|
|
|
c = !net_is_flow(n->n.addr) ?
|
|
|
|
net_classify(n->n.addr): (IADDR_HOST | SCOPE_UNIVERSE);
|
2010-02-26 17:55:58 +08:00
|
|
|
if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
|
2015-11-05 19:48:52 +08:00
|
|
|
{
|
|
|
|
log(L_WARN "Ignoring bogus route %N received via %s",
|
|
|
|
n->n.addr, e->sender->proto->name);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-02-26 17:55:58 +08:00
|
|
|
|
2017-04-05 22:16:04 +08:00
|
|
|
if (net_type_match(n->n.addr, NB_DEST) == !e->attrs->dest)
|
|
|
|
{
|
|
|
|
log(L_WARN "Ignoring route %N with invalid dest %d received via %s",
|
|
|
|
n->n.addr, e->attrs->dest, e->sender->proto->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-06 21:48:35 +08:00
|
|
|
if ((e->attrs->dest == RTD_UNICAST) && !nexthop_is_sorted(&(e->attrs->nh)))
|
2017-04-05 22:16:04 +08:00
|
|
|
{
|
|
|
|
log(L_WARN "Ignoring unsorted multipath route %N received via %s",
|
|
|
|
n->n.addr, e->sender->proto->name);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-08-30 23:17:27 +08:00
|
|
|
|
1999-03-17 23:01:07 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rte_free - delete a &rte
|
|
|
|
* @e: &rte to be deleted
|
|
|
|
*
|
|
|
|
* rte_free() deletes the given &rte from the routing table it's linked to.
|
|
|
|
*/
|
1998-12-08 05:59:15 +08:00
|
|
|
void
|
1998-05-20 19:54:33 +08:00
|
|
|
rte_free(rte *e)
|
1998-12-08 05:59:15 +08:00
|
|
|
{
|
2012-08-14 22:25:22 +08:00
|
|
|
if (rta_is_cached(e->attrs))
|
1998-12-08 05:59:15 +08:00
|
|
|
rta_free(e->attrs);
|
|
|
|
sl_free(rte_slab, e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rte_free_quick(rte *e)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
|
|
|
rta_free(e->attrs);
|
|
|
|
sl_free(rte_slab, e);
|
|
|
|
}
|
|
|
|
|
2000-05-07 05:21:19 +08:00
|
|
|
static int
|
|
|
|
rte_same(rte *x, rte *y)
|
|
|
|
{
|
2019-02-22 09:16:39 +08:00
|
|
|
/* rte.flags are not checked, as they are mostly internal to rtable */
|
2000-05-07 05:21:19 +08:00
|
|
|
return
|
|
|
|
x->attrs == y->attrs &&
|
|
|
|
x->pflags == y->pflags &&
|
|
|
|
x->pref == y->pref &&
|
2019-02-22 09:16:39 +08:00
|
|
|
(!x->attrs->src->proto->rte_same || x->attrs->src->proto->rte_same(x, y)) &&
|
|
|
|
rte_is_filtered(x) == rte_is_filtered(y);
|
2000-05-07 05:21:19 +08:00
|
|
|
}
|
|
|
|
|
2012-11-16 20:29:16 +08:00
|
|
|
static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
|
|
|
|
|
1999-04-06 04:25:03 +08:00
|
|
|
static void
|
2016-01-26 18:48:58 +08:00
|
|
|
rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
struct proto *p = c->proto;
|
|
|
|
struct rtable *table = c->table;
|
|
|
|
struct proto_stats *stats = &c->stats;
|
2014-10-02 17:41:34 +08:00
|
|
|
static struct tbf rl_pipe = TBF_DEFAULT_LOG_LIMITS;
|
2012-04-15 21:07:58 +08:00
|
|
|
rte *before_old = NULL;
|
1998-05-20 19:54:33 +08:00
|
|
|
rte *old_best = net->routes;
|
|
|
|
rte *old = NULL;
|
2012-04-15 21:07:58 +08:00
|
|
|
rte **k;
|
1998-05-20 19:54:33 +08:00
|
|
|
|
|
|
|
k = &net->routes; /* Find and remove original route from the same protocol */
|
|
|
|
while (old = *k)
|
|
|
|
{
|
2012-08-14 22:25:22 +08:00
|
|
|
if (old->attrs->src == src)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2009-12-03 00:26:16 +08:00
|
|
|
/* If there is the same route in the routing table but from
|
|
|
|
* a different sender, then there are two paths from the
|
|
|
|
* source protocol to this routing table through transparent
|
|
|
|
* pipes, which is not allowed.
|
|
|
|
*
|
|
|
|
* We log that and ignore the route. If it is withdraw, we
|
|
|
|
* ignore it completely (there might be 'spurious withdraws',
|
|
|
|
* see FIXME in do_rte_announce())
|
|
|
|
*/
|
2012-03-15 18:58:08 +08:00
|
|
|
if (old->sender->proto != p)
|
2009-12-03 00:26:16 +08:00
|
|
|
{
|
|
|
|
if (new)
|
|
|
|
{
|
2015-11-05 19:48:52 +08:00
|
|
|
log_rl(&rl_pipe, L_ERR "Pipe collision detected when sending %N to table %s",
|
|
|
|
net->n.addr, table->name);
|
2009-12-03 00:26:16 +08:00
|
|
|
rte_free_quick(new);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2000-05-07 05:31:41 +08:00
|
|
|
if (new && rte_same(old, new))
|
2000-05-07 05:21:19 +08:00
|
|
|
{
|
2019-02-22 09:16:39 +08:00
|
|
|
/* No changes, ignore the new route and refresh the old one */
|
|
|
|
|
|
|
|
old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
|
2012-11-10 21:26:13 +08:00
|
|
|
|
2012-11-15 08:29:01 +08:00
|
|
|
if (!rte_is_filtered(new))
|
2012-11-10 21:26:13 +08:00
|
|
|
{
|
|
|
|
stats->imp_updates_ignored++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_ROUTES, c, new, "ignored");
|
2012-11-10 21:26:13 +08:00
|
|
|
}
|
|
|
|
|
2000-05-07 05:21:19 +08:00
|
|
|
rte_free_quick(new);
|
|
|
|
return;
|
|
|
|
}
|
1998-05-20 19:54:33 +08:00
|
|
|
*k = old->next;
|
2018-12-11 20:52:30 +08:00
|
|
|
table->rt_count--;
|
1998-05-20 19:54:33 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
k = &old->next;
|
2012-04-15 21:07:58 +08:00
|
|
|
before_old = old;
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
|
|
|
|
2020-07-16 21:02:10 +08:00
|
|
|
/* Save the last accessed position */
|
|
|
|
rte **pos = k;
|
|
|
|
|
2012-04-15 21:07:58 +08:00
|
|
|
if (!old)
|
|
|
|
before_old = NULL;
|
|
|
|
|
2009-06-04 07:22:56 +08:00
|
|
|
if (!old && !new)
|
|
|
|
{
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->imp_withdraws_ignored++;
|
2009-06-04 07:22:56 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-01-10 20:07:33 +08:00
|
|
|
int new_ok = rte_is_ok(new);
|
|
|
|
int old_ok = rte_is_ok(old);
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
struct channel_limit *l = &c->rx_limit;
|
2018-12-11 20:52:30 +08:00
|
|
|
if (l->action && !old && new && !c->in_table)
|
2012-04-15 21:28:29 +08:00
|
|
|
{
|
2012-11-15 08:29:01 +08:00
|
|
|
u32 all_routes = stats->imp_routes + stats->filt_routes;
|
2012-11-10 21:26:13 +08:00
|
|
|
|
|
|
|
if (all_routes >= l->limit)
|
2016-01-26 18:48:58 +08:00
|
|
|
channel_notify_limit(c, l, PLD_RX, all_routes);
|
2012-04-22 03:05:36 +08:00
|
|
|
|
|
|
|
if (l->state == PLS_BLOCKED)
|
|
|
|
{
|
2013-01-10 20:07:33 +08:00
|
|
|
/* In receive limit the situation is simple, old is NULL so
|
|
|
|
we just free new and exit like nothing happened */
|
|
|
|
|
2012-04-22 03:05:36 +08:00
|
|
|
stats->imp_updates_ignored++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
|
2012-04-22 03:05:36 +08:00
|
|
|
rte_free_quick(new);
|
|
|
|
return;
|
|
|
|
}
|
2012-04-15 21:28:29 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
l = &c->in_limit;
|
|
|
|
if (l->action && !old_ok && new_ok)
|
2013-01-10 20:07:33 +08:00
|
|
|
{
|
|
|
|
if (stats->imp_routes >= l->limit)
|
2016-01-26 18:48:58 +08:00
|
|
|
channel_notify_limit(c, l, PLD_IN, stats->imp_routes);
|
2013-01-10 20:07:33 +08:00
|
|
|
|
|
|
|
if (l->state == PLS_BLOCKED)
|
|
|
|
{
|
|
|
|
/* In import limit the situation is more complicated. We
|
|
|
|
shouldn't just drop the route, we should handle it like
|
|
|
|
it was filtered. We also have to continue the route
|
|
|
|
processing if old or new is non-NULL, but we should exit
|
|
|
|
if both are NULL as this case is probably assumed to be
|
|
|
|
already handled. */
|
|
|
|
|
|
|
|
stats->imp_updates_ignored++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
|
2013-01-10 20:07:33 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if (c->in_keep_filtered)
|
2013-01-10 20:07:33 +08:00
|
|
|
new->flags |= REF_FILTERED;
|
|
|
|
else
|
|
|
|
{ rte_free_quick(new); new = NULL; }
|
|
|
|
|
|
|
|
/* Note that old && !new could be possible when
|
2016-01-26 18:48:58 +08:00
|
|
|
c->in_keep_filtered changed in the recent past. */
|
2013-01-10 20:07:33 +08:00
|
|
|
|
|
|
|
if (!old && !new)
|
|
|
|
return;
|
|
|
|
|
|
|
|
new_ok = 0;
|
|
|
|
goto skip_stats1;
|
|
|
|
}
|
|
|
|
}
|
2012-11-16 20:29:16 +08:00
|
|
|
|
|
|
|
if (new_ok)
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->imp_updates_accepted++;
|
2012-11-16 20:29:16 +08:00
|
|
|
else if (old_ok)
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->imp_withdraws_accepted++;
|
2012-11-16 20:29:16 +08:00
|
|
|
else
|
|
|
|
stats->imp_withdraws_ignored++;
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2021-02-10 10:09:57 +08:00
|
|
|
if (old_ok || new_ok)
|
|
|
|
table->last_rt_change = current_time();
|
|
|
|
|
2013-01-10 20:07:33 +08:00
|
|
|
skip_stats1:
|
2009-06-04 07:22:56 +08:00
|
|
|
|
|
|
|
if (new)
|
2012-11-15 08:29:01 +08:00
|
|
|
rte_is_filtered(new) ? stats->filt_routes++ : stats->imp_routes++;
|
2009-06-04 07:22:56 +08:00
|
|
|
if (old)
|
2012-11-15 08:29:01 +08:00
|
|
|
rte_is_filtered(old) ? stats->filt_routes-- : stats->imp_routes--;
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2012-07-05 03:31:03 +08:00
|
|
|
if (table->config->sorted)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2012-07-05 03:31:03 +08:00
|
|
|
/* If routes are sorted, just insert new route to appropriate position */
|
|
|
|
if (new)
|
|
|
|
{
|
|
|
|
if (before_old && !rte_better(new, before_old))
|
|
|
|
k = &before_old->next;
|
|
|
|
else
|
|
|
|
k = &net->routes;
|
2009-08-11 21:49:56 +08:00
|
|
|
|
2012-07-05 03:31:03 +08:00
|
|
|
for (; *k; k=&(*k)->next)
|
|
|
|
if (rte_better(new, *k))
|
|
|
|
break;
|
2009-08-11 21:49:56 +08:00
|
|
|
|
2012-07-05 03:31:03 +08:00
|
|
|
new->next = *k;
|
|
|
|
*k = new;
|
2020-07-16 21:02:10 +08:00
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
table->rt_count++;
|
2012-07-05 03:31:03 +08:00
|
|
|
}
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
2012-07-05 03:31:03 +08:00
|
|
|
else
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2012-07-05 03:31:03 +08:00
|
|
|
/* If routes are not sorted, find the best route and move it on
|
|
|
|
the first position. There are several optimized cases. */
|
|
|
|
|
2012-08-14 22:25:22 +08:00
|
|
|
if (src->proto->rte_recalculate && src->proto->rte_recalculate(table, net, new, old, old_best))
|
2012-07-05 03:31:03 +08:00
|
|
|
goto do_recalculate;
|
|
|
|
|
|
|
|
if (new && rte_better(new, old_best))
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2012-07-05 03:31:03 +08:00
|
|
|
/* The first case - the new route is cleary optimal,
|
|
|
|
we link it at the first position */
|
|
|
|
|
2009-08-11 21:49:56 +08:00
|
|
|
new->next = net->routes;
|
|
|
|
net->routes = new;
|
2020-07-16 21:02:10 +08:00
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
table->rt_count++;
|
2009-08-11 21:49:56 +08:00
|
|
|
}
|
2012-07-05 03:31:03 +08:00
|
|
|
else if (old == old_best)
|
2009-08-11 21:49:56 +08:00
|
|
|
{
|
2012-07-05 03:31:03 +08:00
|
|
|
/* The second case - the old best route disappeared, we add the
|
|
|
|
new route (if we have any) to the list (we don't care about
|
|
|
|
position) and then we elect the new optimal route and relink
|
|
|
|
that route at the first position and announce it. New optimal
|
|
|
|
route might be NULL if there is no more routes */
|
|
|
|
|
|
|
|
do_recalculate:
|
|
|
|
/* Add the new route to the list */
|
|
|
|
if (new)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
2020-07-16 21:02:10 +08:00
|
|
|
new->next = *pos;
|
|
|
|
*pos = new;
|
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
table->rt_count++;
|
2012-07-05 03:31:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Find a new optimal route (if there is any) */
|
|
|
|
if (net->routes)
|
|
|
|
{
|
|
|
|
rte **bp = &net->routes;
|
|
|
|
for (k=&(*bp)->next; *k; k=&(*k)->next)
|
|
|
|
if (rte_better(*k, *bp))
|
|
|
|
bp = k;
|
|
|
|
|
|
|
|
/* And relink it */
|
|
|
|
rte *best = *bp;
|
|
|
|
*bp = best->next;
|
|
|
|
best->next = net->routes;
|
|
|
|
net->routes = best;
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
|
|
|
}
|
2012-07-05 03:31:03 +08:00
|
|
|
else if (new)
|
|
|
|
{
|
|
|
|
/* The third case - the new route is not better than the old
|
|
|
|
best route (therefore old_best != NULL) and the old best
|
|
|
|
route was not removed (therefore old_best == net->routes).
|
2020-07-16 21:02:10 +08:00
|
|
|
We just link the new route to the old/last position. */
|
|
|
|
|
|
|
|
new->next = *pos;
|
|
|
|
*pos = new;
|
2012-07-05 03:31:03 +08:00
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
table->rt_count++;
|
2012-07-05 03:31:03 +08:00
|
|
|
}
|
|
|
|
/* The fourth (empty) case - suboptimal route was removed, nothing to do */
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
2009-08-11 21:49:56 +08:00
|
|
|
|
2012-07-05 03:31:03 +08:00
|
|
|
if (new)
|
2019-09-09 08:55:32 +08:00
|
|
|
{
|
|
|
|
new->lastmod = current_time();
|
|
|
|
|
|
|
|
if (!old)
|
|
|
|
{
|
|
|
|
new->id = hmap_first_zero(&table->id_map);
|
|
|
|
hmap_set(&table->id_map, new->id);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
new->id = old->id;
|
|
|
|
}
|
2012-07-05 03:31:03 +08:00
|
|
|
|
|
|
|
/* Log the route change */
|
2020-12-08 05:19:40 +08:00
|
|
|
if ((c->debug & D_ROUTES) || (p->debug & D_ROUTES))
|
2009-12-02 21:33:34 +08:00
|
|
|
{
|
2012-11-16 20:29:16 +08:00
|
|
|
if (new_ok)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace(c, new, '>', new == net->routes ? "added [best]" : "added");
|
2012-11-16 20:29:16 +08:00
|
|
|
else if (old_ok)
|
|
|
|
{
|
|
|
|
if (old != old_best)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace(c, old, '>', "removed");
|
2012-11-16 20:29:16 +08:00
|
|
|
else if (rte_is_ok(net->routes))
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace(c, old, '>', "removed [replaced]");
|
2012-11-16 20:29:16 +08:00
|
|
|
else
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace(c, old, '>', "removed [sole]");
|
2012-11-16 20:29:16 +08:00
|
|
|
}
|
2009-08-11 21:49:56 +08:00
|
|
|
}
|
|
|
|
|
2012-07-05 03:31:03 +08:00
|
|
|
/* Propagate the route change */
|
2019-09-09 08:55:32 +08:00
|
|
|
rte_announce(table, RA_UNDEF, net, new, old, net->routes, old_best);
|
2012-04-15 21:07:58 +08:00
|
|
|
|
|
|
|
if (!net->routes &&
|
|
|
|
(table->gc_counter++ >= table->config->gc_max_ops) &&
|
2017-06-06 22:47:30 +08:00
|
|
|
(table->gc_time + table->config->gc_min_time <= current_time()))
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_schedule_prune(table);
|
2012-04-15 21:07:58 +08:00
|
|
|
|
2012-11-16 20:29:16 +08:00
|
|
|
if (old_ok && p->rte_remove)
|
|
|
|
p->rte_remove(net, old);
|
|
|
|
if (new_ok && p->rte_insert)
|
|
|
|
p->rte_insert(net, new);
|
|
|
|
|
1998-05-20 19:54:33 +08:00
|
|
|
if (old)
|
2019-09-09 08:55:32 +08:00
|
|
|
{
|
|
|
|
if (!new)
|
|
|
|
hmap_clear(&table->id_map, old->id);
|
|
|
|
|
|
|
|
rte_free_quick(old);
|
|
|
|
}
|
1998-10-18 19:13:16 +08:00
|
|
|
}
|
|
|
|
|
1999-04-06 04:25:03 +08:00
|
|
|
static int rte_update_nest_cnt; /* Nesting counter to allow recursive updates */
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rte_update_lock(void)
|
|
|
|
{
|
|
|
|
rte_update_nest_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rte_update_unlock(void)
|
|
|
|
{
|
|
|
|
if (!--rte_update_nest_cnt)
|
|
|
|
lp_flush(rte_update_pool);
|
|
|
|
}
|
|
|
|
|
2013-06-13 17:27:14 +08:00
|
|
|
static inline void
|
|
|
|
rte_hide_dummy_routes(net *net, rte **dummy)
|
|
|
|
{
|
|
|
|
if (net->routes && net->routes->attrs->source == RTS_DUMMY)
|
|
|
|
{
|
|
|
|
*dummy = net->routes;
|
|
|
|
net->routes = (*dummy)->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rte_unhide_dummy_routes(net *net, rte **dummy)
|
|
|
|
{
|
|
|
|
if (*dummy)
|
|
|
|
{
|
|
|
|
(*dummy)->next = net->routes;
|
|
|
|
net->routes = *dummy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rte_update - enter a new update to a routing table
|
|
|
|
* @table: table to be updated
|
2016-01-26 18:48:58 +08:00
|
|
|
* @c: channel doing the update
|
2000-06-02 01:12:19 +08:00
|
|
|
* @net: network node
|
|
|
|
* @p: protocol submitting the update
|
2009-06-01 20:07:13 +08:00
|
|
|
* @src: protocol originating the update
|
2000-06-02 01:12:19 +08:00
|
|
|
* @new: a &rte representing the new route or %NULL for route removal.
|
|
|
|
*
|
|
|
|
* This function is called by the routing protocols whenever they discover
|
|
|
|
* a new route or wish to update/remove an existing route. The right announcement
|
2000-06-07 20:29:08 +08:00
|
|
|
* sequence is to build route attributes first (either un-cached with @aflags set
|
2000-06-02 01:12:19 +08:00
|
|
|
* to zero or a cached one using rta_lookup(); in this case please note that
|
|
|
|
* you need to increase the use count of the attributes yourself by calling
|
|
|
|
* rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
|
|
|
|
* the appropriate data and finally submit the new &rte by calling rte_update().
|
|
|
|
*
|
2009-06-01 20:07:13 +08:00
|
|
|
* @src specifies the protocol that originally created the route and the meaning
|
|
|
|
* of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
|
|
|
|
* same value as @new->attrs->proto. @p specifies the protocol that called
|
|
|
|
* rte_update(). In most cases it is the same protocol as @src. rte_update()
|
|
|
|
* stores @p in @new->sender;
|
|
|
|
*
|
2000-06-02 20:41:25 +08:00
|
|
|
* When rte_update() gets any route, it automatically validates it (checks,
|
|
|
|
* whether the network and next hop address are valid IP addresses and also
|
|
|
|
* whether a normal routing protocol doesn't try to smuggle a host or link
|
|
|
|
* scope route to the table), converts all protocol dependent attributes stored
|
|
|
|
* in the &rte to temporary extended attributes, consults import filters of the
|
|
|
|
* protocol to see if the route should be accepted and/or its attributes modified,
|
|
|
|
* stores the temporary attributes back to the &rte.
|
|
|
|
*
|
|
|
|
* Now, having a "public" version of the route, we
|
2009-06-01 20:07:13 +08:00
|
|
|
* automatically find any old route defined by the protocol @src
|
2000-06-02 01:12:19 +08:00
|
|
|
* for network @n, replace it by the new one (or removing it if @new is %NULL),
|
|
|
|
* recalculate the optimal route for this destination and finally broadcast
|
2000-06-02 20:41:25 +08:00
|
|
|
* the change (if any) to all routing protocols by calling rte_announce().
|
2000-06-02 20:29:55 +08:00
|
|
|
*
|
|
|
|
* All memory used for attribute lists and other temporary allocations is taken
|
|
|
|
* from a special linear pool @rte_update_pool and freed when rte_update()
|
|
|
|
* finishes.
|
2000-06-02 01:12:19 +08:00
|
|
|
*/
|
2009-05-31 21:24:27 +08:00
|
|
|
|
|
|
|
void
|
2015-09-17 23:15:30 +08:00
|
|
|
rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
1999-04-06 04:25:03 +08:00
|
|
|
{
|
2020-12-08 05:19:40 +08:00
|
|
|
// struct proto *p = c->proto;
|
2016-01-26 18:48:58 +08:00
|
|
|
struct proto_stats *stats = &c->stats;
|
2019-02-15 20:53:17 +08:00
|
|
|
const struct filter *filter = c->in_filter;
|
2013-06-13 17:27:14 +08:00
|
|
|
rte *dummy = NULL;
|
2016-04-08 19:08:03 +08:00
|
|
|
net *nn;
|
1999-04-06 04:25:03 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
ASSERT(c->channel_state == CS_UP);
|
|
|
|
|
1999-04-06 04:25:03 +08:00
|
|
|
rte_update_lock();
|
|
|
|
if (new)
|
|
|
|
{
|
2019-01-31 22:02:15 +08:00
|
|
|
/* Create a temporary table node */
|
|
|
|
nn = alloca(sizeof(net) + n->length);
|
|
|
|
memset(nn, 0, sizeof(net) + n->length);
|
|
|
|
net_copy(nn->n.addr, n);
|
2016-04-08 19:08:03 +08:00
|
|
|
|
|
|
|
new->net = nn;
|
2016-01-26 18:48:58 +08:00
|
|
|
new->sender = c;
|
|
|
|
|
|
|
|
if (!new->pref)
|
|
|
|
new->pref = c->preference;
|
2009-03-26 02:05:52 +08:00
|
|
|
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->imp_updates_received++;
|
2000-03-13 04:30:53 +08:00
|
|
|
if (!rte_validate(new))
|
|
|
|
{
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_FILTERS, c, new, "invalid");
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->imp_updates_invalid++;
|
2000-03-13 04:30:53 +08:00
|
|
|
goto drop;
|
|
|
|
}
|
2012-11-10 21:26:13 +08:00
|
|
|
|
2009-03-26 02:05:52 +08:00
|
|
|
if (filter == FILTER_REJECT)
|
2000-03-13 04:30:53 +08:00
|
|
|
{
|
2010-02-13 17:44:46 +08:00
|
|
|
stats->imp_updates_filtered++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_FILTERS, c, new, "filtered out");
|
2012-08-14 22:25:22 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if (! c->in_keep_filtered)
|
2012-11-10 21:26:13 +08:00
|
|
|
goto drop;
|
|
|
|
|
|
|
|
/* new is a private copy, i could modify it */
|
2012-11-15 08:29:01 +08:00
|
|
|
new->flags |= REF_FILTERED;
|
2000-03-13 04:30:53 +08:00
|
|
|
}
|
2019-03-15 00:22:22 +08:00
|
|
|
else if (filter)
|
1999-04-06 04:25:03 +08:00
|
|
|
{
|
2019-03-23 04:40:35 +08:00
|
|
|
rta *old_attrs = NULL;
|
2019-03-15 00:22:22 +08:00
|
|
|
rte_make_tmp_attrs(&new, rte_update_pool, &old_attrs);
|
2012-11-10 21:26:13 +08:00
|
|
|
|
2019-03-15 00:22:22 +08:00
|
|
|
int fr = f_run(filter, &new, rte_update_pool, 0);
|
|
|
|
if (fr > F_ACCEPT)
|
|
|
|
{
|
|
|
|
stats->imp_updates_filtered++;
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_FILTERS, c, new, "filtered out");
|
2012-11-10 21:26:13 +08:00
|
|
|
|
2019-03-15 00:22:22 +08:00
|
|
|
if (! c->in_keep_filtered)
|
|
|
|
{
|
|
|
|
rta_free(old_attrs);
|
|
|
|
goto drop;
|
2000-03-13 04:30:53 +08:00
|
|
|
}
|
2019-03-15 00:22:22 +08:00
|
|
|
|
|
|
|
new->flags |= REF_FILTERED;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_store_tmp_attrs(new, rte_update_pool, old_attrs);
|
1999-04-06 04:25:03 +08:00
|
|
|
}
|
2012-08-14 22:25:22 +08:00
|
|
|
if (!rta_is_cached(new->attrs)) /* Need to copy attributes */
|
1999-04-06 04:25:03 +08:00
|
|
|
new->attrs = rta_lookup(new->attrs);
|
|
|
|
new->flags |= REF_COW;
|
2019-01-31 22:02:15 +08:00
|
|
|
|
|
|
|
/* Use the actual struct network, not the dummy one */
|
|
|
|
nn = net_get(c->table, n);
|
|
|
|
new->net = nn;
|
1999-04-06 04:25:03 +08:00
|
|
|
}
|
2009-06-04 07:22:56 +08:00
|
|
|
else
|
2012-08-14 22:25:22 +08:00
|
|
|
{
|
|
|
|
stats->imp_withdraws_received++;
|
|
|
|
|
2016-04-08 19:08:03 +08:00
|
|
|
if (!(nn = net_find(c->table, n)) || !src)
|
2012-08-14 22:25:22 +08:00
|
|
|
{
|
|
|
|
stats->imp_withdraws_ignored++;
|
|
|
|
rte_update_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2009-06-04 07:22:56 +08:00
|
|
|
|
2013-06-13 17:27:14 +08:00
|
|
|
recalc:
|
2019-01-31 22:02:15 +08:00
|
|
|
/* And recalculate the best route */
|
2016-04-08 19:08:03 +08:00
|
|
|
rte_hide_dummy_routes(nn, &dummy);
|
|
|
|
rte_recalculate(c, nn, new, src);
|
|
|
|
rte_unhide_dummy_routes(nn, &dummy);
|
2019-01-31 22:02:15 +08:00
|
|
|
|
1999-04-06 04:25:03 +08:00
|
|
|
rte_update_unlock();
|
|
|
|
return;
|
|
|
|
|
2013-06-13 17:27:14 +08:00
|
|
|
drop:
|
1999-04-06 04:25:03 +08:00
|
|
|
rte_free(new);
|
2013-06-13 17:27:14 +08:00
|
|
|
new = NULL;
|
2019-01-31 22:02:15 +08:00
|
|
|
if (nn = net_find(c->table, n))
|
|
|
|
goto recalc;
|
|
|
|
|
|
|
|
rte_update_unlock();
|
1999-04-06 04:25:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
/* Independent call to rte_announce(), used from next hop
|
|
|
|
recalculation, outside of rte_update(). new must be non-NULL */
|
2018-02-07 00:43:55 +08:00
|
|
|
static inline void
|
2019-09-09 08:55:32 +08:00
|
|
|
rte_announce_i(rtable *tab, uint type, net *net, rte *new, rte *old,
|
2015-06-08 08:20:43 +08:00
|
|
|
rte *new_best, rte *old_best)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
|
|
|
rte_update_lock();
|
2019-09-09 08:55:32 +08:00
|
|
|
rte_announce(tab, type, net, new, old, new_best, old_best);
|
2010-07-05 23:50:19 +08:00
|
|
|
rte_update_unlock();
|
|
|
|
}
|
|
|
|
|
2016-10-14 21:37:04 +08:00
|
|
|
static inline void
|
|
|
|
rte_discard(rte *old) /* Non-filtered route deletion, used during garbage collection */
|
1998-10-18 19:13:16 +08:00
|
|
|
{
|
1999-04-06 04:25:03 +08:00
|
|
|
rte_update_lock();
|
2015-06-04 17:35:26 +08:00
|
|
|
rte_recalculate(old->sender, old->net, NULL, old->attrs->src);
|
1999-04-06 04:25:03 +08:00
|
|
|
rte_update_unlock();
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
|
|
|
|
2018-08-01 00:40:38 +08:00
|
|
|
/* Modify existing route by protocol hook, used for long-lived graceful restart */
|
|
|
|
static inline void
|
|
|
|
rte_modify(rte *old)
|
|
|
|
{
|
|
|
|
rte_update_lock();
|
|
|
|
|
|
|
|
rte *new = old->sender->proto->rte_modify(old, rte_update_pool);
|
|
|
|
if (new != old)
|
|
|
|
{
|
|
|
|
if (new)
|
|
|
|
{
|
|
|
|
if (!rta_is_cached(new->attrs))
|
|
|
|
new->attrs = rta_lookup(new->attrs);
|
|
|
|
new->flags = (old->flags & ~REF_MODIFY) | REF_COW;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_recalculate(old->sender, old->net, new, old->attrs->src);
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_update_unlock();
|
|
|
|
}
|
|
|
|
|
2013-02-09 06:58:27 +08:00
|
|
|
/* Check rtable for best route to given net whether it would be exported do p */
|
|
|
|
int
|
2019-02-15 20:53:17 +08:00
|
|
|
rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter)
|
2013-02-09 06:58:27 +08:00
|
|
|
{
|
2015-11-05 19:48:52 +08:00
|
|
|
net *n = net_find(t, a);
|
2013-02-09 06:58:27 +08:00
|
|
|
rte *rt = n ? n->routes : NULL;
|
|
|
|
|
|
|
|
if (!rte_is_valid(rt))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rte_update_lock();
|
|
|
|
|
|
|
|
/* Rest is stripped down export_filter() */
|
Terminology cleanup: The import_control hook is now called preexport.
Once upon a time, far far away, there were the old Bird developers
discussing what direction of route flow shall be called import and
export. They decided to say "import to protocol" and "export to table"
when speaking about a protocol. When speaking about a table, they
spoke about "importing to table" and "exporting to protocol".
The latter terminology was adopted in configuration, then also the
bird CLI in commit ea2ae6dd0 started to use it (in year 2009). Now
it's 2018 and the terminology is the latter. Import is from protocol to
table, export is from table to protocol. Anyway, there was still an
import_control hook which executed right before route export.
One thing is funny. There are two commits in April 1999 with just two
minutes between them. The older announces the final settlement
on config terminology, the newer uses the other definition. Let's see
their commit messages as the git-log tool shows them (the newer first):
commit 9e0e485e50ea74c4f1c5cb65bdfe6ce819c2cee2
Author: Martin Mares <mj@ucw.cz>
Date: Mon Apr 5 20:17:59 1999 +0000
Added some new protocol hooks (look at the comments for better explanation):
make_tmp_attrs Convert inline attributes to ea_list
store_tmp_attrs Convert ea_list to inline attributes
import_control Pre-import decisions
commit 5056c559c4eb253a4eee10cf35b694faec5265eb
Author: Martin Mares <mj@ucw.cz>
Date: Mon Apr 5 20:15:31 1999 +0000
Changed syntax of attaching filters to protocols to hopefully the final
version:
EXPORT <filter-spec> for outbound routes (i.e., those announced
by BIRD to the rest of the world).
IMPORT <filter-spec> for inbound routes (i.e., those imported
by BIRD from the rest of the world).
where <filter-spec> is one of:
ALL pass all routes
NONE drop all routes
FILTER <name> use named filter
FILTER { <filter> } use explicitly defined filter
For all protocols, the default is IMPORT ALL, EXPORT NONE. This includes
the kernel protocol, so that you need to add EXPORT ALL to get the previous
configuration of kernel syncer (as usually, see doc/bird.conf.example for
a bird.conf example :)).
Let's say RIP to this almost 19-years-old inconsistency. For now, if you
import a route, it is always from protocol to table. If you export a
route, it is always from table to protocol.
And they lived happily ever after.
2018-02-14 20:42:53 +08:00
|
|
|
int v = p->preexport ? p->preexport(p, &rt, rte_update_pool) : 0;
|
2013-02-09 06:58:27 +08:00
|
|
|
if (v == RIC_PROCESS)
|
2018-12-14 23:10:19 +08:00
|
|
|
{
|
2019-03-15 00:22:22 +08:00
|
|
|
rte_make_tmp_attrs(&rt, rte_update_pool, NULL);
|
2018-05-29 18:08:12 +08:00
|
|
|
v = (f_run(filter, &rt, rte_update_pool, FF_SILENT) <= F_ACCEPT);
|
2018-12-14 23:10:19 +08:00
|
|
|
}
|
2013-02-09 06:58:27 +08:00
|
|
|
|
2018-12-14 23:10:19 +08:00
|
|
|
/* Discard temporary rte */
|
2013-02-09 06:58:27 +08:00
|
|
|
if (rt != n->routes)
|
|
|
|
rte_free(rt);
|
|
|
|
|
|
|
|
rte_update_unlock();
|
|
|
|
|
|
|
|
return v > 0;
|
|
|
|
}
|
|
|
|
|
2014-03-23 08:35:33 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rt_refresh_begin - start a refresh cycle
|
|
|
|
* @t: related routing table
|
2016-01-26 18:48:58 +08:00
|
|
|
* @c related channel
|
2014-03-23 08:35:33 +08:00
|
|
|
*
|
|
|
|
* This function starts a refresh cycle for given routing table and announce
|
|
|
|
* hook. The refresh cycle is a sequence where the protocol sends all its valid
|
|
|
|
* routes to the routing table (by rte_update()). After that, all protocol
|
2016-01-26 18:48:58 +08:00
|
|
|
* routes (more precisely routes with @c as @sender) not sent during the
|
2014-03-23 08:35:33 +08:00
|
|
|
* refresh cycle but still in the table from the past are pruned. This is
|
|
|
|
* implemented by marking all related routes as stale by REF_STALE flag in
|
|
|
|
* rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
|
|
|
|
* flag in rt_refresh_end() and then removing such routes in the prune loop.
|
|
|
|
*/
|
2014-03-20 21:07:12 +08:00
|
|
|
void
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_refresh_begin(rtable *t, struct channel *c)
|
2014-03-20 21:07:12 +08:00
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_WALK(&t->fib, net, n)
|
2014-03-20 21:07:12 +08:00
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
rte *e;
|
2014-03-20 21:07:12 +08:00
|
|
|
for (e = n->routes; e; e = e->next)
|
2016-01-26 18:48:58 +08:00
|
|
|
if (e->sender == c)
|
2014-03-20 21:07:12 +08:00
|
|
|
e->flags |= REF_STALE;
|
|
|
|
}
|
|
|
|
FIB_WALK_END;
|
|
|
|
}
|
|
|
|
|
2014-03-23 08:35:33 +08:00
|
|
|
/**
|
|
|
|
* rt_refresh_end - end a refresh cycle
|
|
|
|
* @t: related routing table
|
2016-01-26 18:48:58 +08:00
|
|
|
* @c: related channel
|
2014-03-23 08:35:33 +08:00
|
|
|
*
|
2016-01-26 18:48:58 +08:00
|
|
|
* This function ends a refresh cycle for given routing table and announce
|
2014-03-23 08:35:33 +08:00
|
|
|
* hook. See rt_refresh_begin() for description of refresh cycles.
|
|
|
|
*/
|
2014-03-20 21:07:12 +08:00
|
|
|
void
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_refresh_end(rtable *t, struct channel *c)
|
2014-03-20 21:07:12 +08:00
|
|
|
{
|
|
|
|
int prune = 0;
|
|
|
|
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_WALK(&t->fib, net, n)
|
2014-03-20 21:07:12 +08:00
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
rte *e;
|
2014-03-20 21:07:12 +08:00
|
|
|
for (e = n->routes; e; e = e->next)
|
2016-01-26 18:48:58 +08:00
|
|
|
if ((e->sender == c) && (e->flags & REF_STALE))
|
2014-03-20 21:07:12 +08:00
|
|
|
{
|
|
|
|
e->flags |= REF_DISCARD;
|
|
|
|
prune = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
FIB_WALK_END;
|
|
|
|
|
|
|
|
if (prune)
|
|
|
|
rt_schedule_prune(t);
|
|
|
|
}
|
|
|
|
|
2018-08-01 00:40:38 +08:00
|
|
|
void
|
|
|
|
rt_modify_stale(rtable *t, struct channel *c)
|
|
|
|
{
|
|
|
|
int prune = 0;
|
|
|
|
|
|
|
|
FIB_WALK(&t->fib, net, n)
|
|
|
|
{
|
|
|
|
rte *e;
|
|
|
|
for (e = n->routes; e; e = e->next)
|
|
|
|
if ((e->sender == c) && (e->flags & REF_STALE) && !(e->flags & REF_FILTERED))
|
|
|
|
{
|
|
|
|
e->flags |= REF_MODIFY;
|
|
|
|
prune = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
FIB_WALK_END;
|
|
|
|
|
|
|
|
if (prune)
|
|
|
|
rt_schedule_prune(t);
|
|
|
|
}
|
2014-03-20 21:07:12 +08:00
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rte_dump - dump a route
|
|
|
|
* @e: &rte to be dumped
|
|
|
|
*
|
|
|
|
* This functions dumps contents of a &rte to debug output.
|
|
|
|
*/
|
1998-05-20 19:54:33 +08:00
|
|
|
void
|
1998-10-20 23:13:18 +08:00
|
|
|
rte_dump(rte *e)
|
1998-05-20 19:54:33 +08:00
|
|
|
{
|
1998-10-20 23:13:18 +08:00
|
|
|
net *n = e->net;
|
2015-11-05 19:48:52 +08:00
|
|
|
debug("%-1N ", n->n.addr);
|
2020-01-08 01:35:03 +08:00
|
|
|
debug("PF=%02x pref=%d ", e->pflags, e->pref);
|
1998-06-05 04:28:19 +08:00
|
|
|
rta_dump(e->attrs);
|
2012-08-14 22:25:22 +08:00
|
|
|
if (e->attrs->src->proto->proto->dump_attrs)
|
|
|
|
e->attrs->src->proto->proto->dump_attrs(e);
|
1998-06-05 04:28:19 +08:00
|
|
|
debug("\n");
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
1998-05-15 15:54:32 +08:00
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_dump - dump a routing table
|
|
|
|
* @t: routing table to be dumped
|
|
|
|
*
|
|
|
|
* This function dumps contents of a given routing table to debug output.
|
|
|
|
*/
|
1998-05-20 19:54:33 +08:00
|
|
|
void
|
|
|
|
rt_dump(rtable *t)
|
|
|
|
{
|
1998-06-05 04:28:19 +08:00
|
|
|
debug("Dump of routing table <%s>\n", t->name);
|
1998-12-20 22:01:37 +08:00
|
|
|
#ifdef DEBUGGING
|
1999-04-13 02:01:07 +08:00
|
|
|
fib_check(&t->fib);
|
1998-12-20 22:01:37 +08:00
|
|
|
#endif
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_WALK(&t->fib, net, n)
|
1999-04-13 02:01:07 +08:00
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
rte *e;
|
1999-04-13 02:01:07 +08:00
|
|
|
for(e=n->routes; e; e=e->next)
|
|
|
|
rte_dump(e);
|
1998-06-05 04:28:19 +08:00
|
|
|
}
|
1999-04-13 02:01:07 +08:00
|
|
|
FIB_WALK_END;
|
1998-06-05 04:28:19 +08:00
|
|
|
debug("\n");
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
1998-05-15 15:54:32 +08:00
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_dump_all - dump all routing tables
|
|
|
|
*
|
|
|
|
* This function dumps contents of all routing tables to debug output.
|
|
|
|
*/
|
1998-05-24 22:49:14 +08:00
|
|
|
void
|
|
|
|
rt_dump_all(void)
|
|
|
|
{
|
1999-05-18 04:14:52 +08:00
|
|
|
rtable *t;
|
2021-03-30 21:09:53 +08:00
|
|
|
node *n;
|
1999-05-18 04:14:52 +08:00
|
|
|
|
2021-03-30 21:09:53 +08:00
|
|
|
WALK_LIST2(t, n, routing_tables, n)
|
1999-05-18 04:14:52 +08:00
|
|
|
rt_dump(t);
|
1998-05-24 22:49:14 +08:00
|
|
|
}
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
static inline void
|
|
|
|
rt_schedule_hcu(rtable *tab)
|
|
|
|
{
|
|
|
|
if (tab->hcu_scheduled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tab->hcu_scheduled = 1;
|
|
|
|
ev_schedule(tab->rt_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rt_schedule_nhu(rtable *tab)
|
|
|
|
{
|
2017-02-22 21:02:03 +08:00
|
|
|
if (tab->nhu_state == NHU_CLEAN)
|
2010-07-05 23:50:19 +08:00
|
|
|
ev_schedule(tab->rt_event);
|
|
|
|
|
2017-02-22 21:02:03 +08:00
|
|
|
/* state change:
|
|
|
|
* NHU_CLEAN -> NHU_SCHEDULED
|
|
|
|
* NHU_RUNNING -> NHU_DIRTY
|
|
|
|
*/
|
|
|
|
tab->nhu_state |= NHU_SCHEDULED;
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
void
|
|
|
|
rt_schedule_prune(rtable *tab)
|
2012-03-29 00:40:04 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
if (tab->prune_state == 0)
|
|
|
|
ev_schedule(tab->rt_event);
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
/* state change 0->1, 2->3 */
|
|
|
|
tab->prune_state |= 1;
|
2012-03-29 00:40:04 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
|
2000-04-28 06:28:49 +08:00
|
|
|
static void
|
2010-07-05 23:50:19 +08:00
|
|
|
rt_event(void *ptr)
|
1999-02-14 05:29:01 +08:00
|
|
|
{
|
2010-07-05 23:50:19 +08:00
|
|
|
rtable *tab = ptr;
|
|
|
|
|
2016-05-12 22:04:47 +08:00
|
|
|
rt_lock_table(tab);
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
if (tab->hcu_scheduled)
|
|
|
|
rt_update_hostcache(tab);
|
1999-05-18 04:14:52 +08:00
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
if (tab->nhu_state)
|
|
|
|
rt_next_hop_update(tab);
|
|
|
|
|
2014-03-20 21:07:12 +08:00
|
|
|
if (tab->prune_state)
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_prune_table(tab);
|
2016-05-12 22:04:47 +08:00
|
|
|
|
|
|
|
rt_unlock_table(tab);
|
1999-02-14 05:29:01 +08:00
|
|
|
}
|
|
|
|
|
2021-02-10 10:09:57 +08:00
|
|
|
|
|
|
|
static inline btime
|
|
|
|
rt_settled_time(rtable *tab)
|
|
|
|
{
|
|
|
|
ASSUME(tab->base_settle_time != 0);
|
|
|
|
|
|
|
|
return MIN(tab->last_rt_change + tab->config->min_settle_time,
|
|
|
|
tab->base_settle_time + tab->config->max_settle_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_settle_timer(timer *t)
|
|
|
|
{
|
|
|
|
rtable *tab = t->data;
|
|
|
|
|
|
|
|
if (!tab->base_settle_time)
|
|
|
|
return;
|
|
|
|
|
|
|
|
btime settled_time = rt_settled_time(tab);
|
|
|
|
if (current_time() < settled_time)
|
|
|
|
{
|
|
|
|
tm_set(tab->settle_timer, settled_time);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Settled */
|
|
|
|
tab->base_settle_time = 0;
|
|
|
|
|
|
|
|
struct rt_subscription *s;
|
|
|
|
WALK_LIST(s, tab->subscribers)
|
|
|
|
s->hook(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_kick_settle_timer(rtable *tab)
|
|
|
|
{
|
|
|
|
tab->base_settle_time = current_time();
|
|
|
|
|
|
|
|
if (!tab->settle_timer)
|
2021-03-31 00:51:31 +08:00
|
|
|
tab->settle_timer = tm_new_init(tab->rp, rt_settle_timer, tab, 0, 0);
|
2021-02-10 10:09:57 +08:00
|
|
|
|
|
|
|
if (!tm_active(tab->settle_timer))
|
|
|
|
tm_set(tab->settle_timer, rt_settled_time(tab));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rt_schedule_notify(rtable *tab)
|
|
|
|
{
|
|
|
|
if (EMPTY_LIST(tab->subscribers))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (tab->base_settle_time)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rt_kick_settle_timer(tab);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rt_subscribe(rtable *tab, struct rt_subscription *s)
|
|
|
|
{
|
|
|
|
s->tab = tab;
|
|
|
|
rt_lock_table(tab);
|
|
|
|
add_tail(&tab->subscribers, &s->n);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rt_unsubscribe(struct rt_subscription *s)
|
|
|
|
{
|
|
|
|
rem_node(&s->n);
|
|
|
|
rt_unlock_table(s->tab);
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
static void
|
|
|
|
rt_free(resource *_r)
|
|
|
|
{
|
|
|
|
rtable *r = (rtable *) _r;
|
|
|
|
|
|
|
|
DBG("Deleting routing table %s\n", r->name);
|
|
|
|
ASSERT_DIE(r->use_count == 0);
|
|
|
|
|
2021-04-19 21:13:20 +08:00
|
|
|
if (r->internal)
|
|
|
|
return;
|
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
r->config->table = NULL;
|
|
|
|
rem_node(&r->n);
|
|
|
|
|
|
|
|
if (r->hostcache)
|
|
|
|
rt_free_hostcache(r);
|
|
|
|
|
|
|
|
/* Freed automagically by the resource pool
|
|
|
|
fib_free(&r->fib);
|
|
|
|
hmap_free(&r->id_map);
|
|
|
|
rfree(r->rt_event);
|
|
|
|
rfree(r->settle_timer);
|
|
|
|
mb_free(r);
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_res_dump(resource *_r)
|
|
|
|
{
|
|
|
|
rtable *r = (rtable *) _r;
|
|
|
|
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
|
|
|
|
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct resclass rt_class = {
|
|
|
|
.name = "Routing table",
|
|
|
|
.size = sizeof(struct rtable),
|
|
|
|
.free = rt_free,
|
|
|
|
.dump = rt_res_dump,
|
|
|
|
.lookup = NULL,
|
|
|
|
.memsize = NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
rtable *
|
|
|
|
rt_setup(pool *pp, struct rtable_config *cf)
|
2000-03-05 06:21:06 +08:00
|
|
|
{
|
2021-03-31 00:51:31 +08:00
|
|
|
int ns = strlen("Routing table ") + strlen(cf->name) + 1;
|
|
|
|
void *nb = mb_alloc(pp, ns);
|
|
|
|
ASSERT_DIE(ns - 1 == bsnprintf(nb, ns, "Routing table %s", cf->name));
|
|
|
|
|
|
|
|
pool *p = rp_new(pp, nb);
|
|
|
|
mb_move(nb, p);
|
|
|
|
|
|
|
|
rtable *t = ralloc(p, &rt_class);
|
|
|
|
t->rp = p;
|
|
|
|
|
2018-02-06 23:08:45 +08:00
|
|
|
t->name = cf->name;
|
2000-03-05 06:21:06 +08:00
|
|
|
t->config = cf;
|
2018-02-06 23:08:45 +08:00
|
|
|
t->addr_type = cf->addr_type;
|
2021-03-31 00:51:31 +08:00
|
|
|
|
2015-11-05 19:48:52 +08:00
|
|
|
fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL);
|
2016-01-26 18:48:58 +08:00
|
|
|
|
2021-11-30 02:23:42 +08:00
|
|
|
if (cf->trie_used)
|
|
|
|
{
|
|
|
|
t->trie = f_new_trie(lp_new_default(p), 0);
|
|
|
|
t->trie->ipv4 = net_val_match(t->addr_type, NB_IP4 | NB_VPN4 | NB_ROA4);
|
|
|
|
|
|
|
|
t->fib.init = net_init_with_trie;
|
|
|
|
}
|
|
|
|
|
2021-04-19 21:13:20 +08:00
|
|
|
if (!(t->internal = cf->internal))
|
2021-03-31 00:51:31 +08:00
|
|
|
{
|
|
|
|
init_list(&t->channels);
|
|
|
|
hmap_init(&t->id_map, p, 1024);
|
|
|
|
hmap_set(&t->id_map, 0);
|
|
|
|
|
|
|
|
init_list(&t->subscribers);
|
2019-09-09 08:55:32 +08:00
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
t->rt_event = ev_new_init(p, rt_event, t);
|
|
|
|
t->last_rt_change = t->gc_time = current_time();
|
|
|
|
}
|
2021-02-10 10:09:57 +08:00
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
return t;
|
2000-03-05 06:21:06 +08:00
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_init - initialize routing tables
|
|
|
|
*
|
|
|
|
* This function is called during BIRD startup. It initializes the
|
|
|
|
* routing table module.
|
|
|
|
*/
|
1998-05-20 19:54:33 +08:00
|
|
|
void
|
|
|
|
rt_init(void)
|
|
|
|
{
|
|
|
|
rta_init();
|
1999-02-14 05:29:01 +08:00
|
|
|
rt_table_pool = rp_new(&root_pool, "Routing tables");
|
2017-05-16 20:31:16 +08:00
|
|
|
rte_update_pool = lp_new_default(rt_table_pool);
|
1999-02-14 05:29:01 +08:00
|
|
|
rte_slab = sl_new(rt_table_pool, sizeof(rte));
|
1999-05-18 04:14:52 +08:00
|
|
|
init_list(&routing_tables);
|
1998-05-20 19:54:33 +08:00
|
|
|
}
|
1999-02-14 03:15:28 +08:00
|
|
|
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
/**
|
|
|
|
* rt_prune_table - prune a routing table
|
|
|
|
*
|
|
|
|
* The prune loop scans routing tables and removes routes belonging to flushing
|
|
|
|
* protocols, discarded routes and also stale network entries. It is called from
|
|
|
|
* rt_event(). The event is rescheduled if the current iteration do not finish
|
|
|
|
* the table. The pruning is directed by the prune state (@prune_state),
|
|
|
|
* specifying whether the prune cycle is scheduled or running, and there
|
|
|
|
* is also a persistent pruning iterator (@prune_fit).
|
|
|
|
*
|
|
|
|
* The prune loop is used also for channel flushing. For this purpose, the
|
|
|
|
* channels to flush are marked before the iteration and notified after the
|
|
|
|
* iteration.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rt_prune_table(rtable *tab)
|
2012-03-29 00:40:04 +08:00
|
|
|
{
|
|
|
|
struct fib_iterator *fit = &tab->prune_fit;
|
2016-01-26 18:48:58 +08:00
|
|
|
int limit = 512;
|
|
|
|
|
|
|
|
struct channel *c;
|
|
|
|
node *n, *x;
|
1999-02-14 03:15:28 +08:00
|
|
|
|
|
|
|
DBG("Pruning route table %s\n", tab->name);
|
2000-05-09 06:33:02 +08:00
|
|
|
#ifdef DEBUGGING
|
|
|
|
fib_check(&tab->fib);
|
|
|
|
#endif
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if (tab->prune_state == 0)
|
|
|
|
return;
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if (tab->prune_state == 1)
|
|
|
|
{
|
|
|
|
/* Mark channels to flush */
|
|
|
|
WALK_LIST2(c, n, tab->channels, table_node)
|
|
|
|
if (c->channel_state == CS_FLUSHING)
|
|
|
|
c->flush_active = 1;
|
|
|
|
|
|
|
|
FIB_ITERATE_INIT(fit, &tab->fib);
|
|
|
|
tab->prune_state = 2;
|
|
|
|
}
|
2012-03-29 00:40:04 +08:00
|
|
|
|
1999-04-13 02:01:07 +08:00
|
|
|
again:
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_START(&tab->fib, fit, net, n)
|
1999-02-14 03:15:28 +08:00
|
|
|
{
|
1999-04-13 02:01:07 +08:00
|
|
|
rte *e;
|
2012-03-29 00:40:04 +08:00
|
|
|
|
1999-04-13 02:01:07 +08:00
|
|
|
rescan:
|
2012-03-29 00:40:04 +08:00
|
|
|
for (e=n->routes; e; e=e->next)
|
2018-08-01 00:40:38 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
if (e->sender->flush_active || (e->flags & REF_DISCARD))
|
1999-04-13 02:01:07 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
if (limit <= 0)
|
2012-03-29 00:40:04 +08:00
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_PUT(fit);
|
2016-01-26 18:48:58 +08:00
|
|
|
ev_schedule(tab->rt_event);
|
|
|
|
return;
|
2012-03-29 00:40:04 +08:00
|
|
|
}
|
|
|
|
|
2016-10-14 21:37:04 +08:00
|
|
|
rte_discard(e);
|
2016-01-26 18:48:58 +08:00
|
|
|
limit--;
|
2012-03-29 00:40:04 +08:00
|
|
|
|
1999-04-13 02:01:07 +08:00
|
|
|
goto rescan;
|
|
|
|
}
|
2016-01-26 18:48:58 +08:00
|
|
|
|
2018-08-01 00:40:38 +08:00
|
|
|
if (e->flags & REF_MODIFY)
|
|
|
|
{
|
|
|
|
if (limit <= 0)
|
|
|
|
{
|
|
|
|
FIB_ITERATE_PUT(fit);
|
|
|
|
ev_schedule(tab->rt_event);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_modify(e);
|
|
|
|
limit--;
|
|
|
|
|
|
|
|
goto rescan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-29 00:40:04 +08:00
|
|
|
if (!n->routes) /* Orphaned FIB entry */
|
1999-02-14 03:15:28 +08:00
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_PUT(fit);
|
|
|
|
fib_delete(&tab->fib, n);
|
1999-04-13 02:01:07 +08:00
|
|
|
goto again;
|
1999-02-14 03:15:28 +08:00
|
|
|
}
|
|
|
|
}
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_END;
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2000-05-09 06:33:02 +08:00
|
|
|
#ifdef DEBUGGING
|
|
|
|
fib_check(&tab->fib);
|
|
|
|
#endif
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
tab->gc_counter = 0;
|
2017-06-06 22:47:30 +08:00
|
|
|
tab->gc_time = current_time();
|
1999-05-18 04:14:52 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
/* state change 2->0, 3->1 */
|
|
|
|
tab->prune_state &= 1;
|
2014-03-20 21:07:12 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if (tab->prune_state > 0)
|
|
|
|
ev_schedule(tab->rt_event);
|
1999-05-18 04:14:52 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
/* FIXME: This should be handled in a better way */
|
|
|
|
rt_prune_sources();
|
2012-03-29 00:40:04 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
/* Close flushed channels */
|
|
|
|
WALK_LIST2_DELSAFE(c, n, x, tab->channels, table_node)
|
|
|
|
if (c->flush_active)
|
|
|
|
{
|
|
|
|
c->flush_active = 0;
|
2016-05-12 22:04:47 +08:00
|
|
|
channel_set_state(c, CS_DOWN);
|
2016-01-26 18:48:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
1999-05-18 04:14:52 +08:00
|
|
|
}
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
void
|
|
|
|
rt_preconfig(struct config *c)
|
|
|
|
{
|
|
|
|
init_list(&c->tables);
|
2016-01-26 18:48:58 +08:00
|
|
|
|
|
|
|
rt_new_table(cf_get_symbol("master4"), NET_IP4);
|
|
|
|
rt_new_table(cf_get_symbol("master6"), NET_IP6);
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
/*
|
2010-07-05 23:50:19 +08:00
|
|
|
* Some functions for handing internal next hop updates
|
|
|
|
* triggered by rt_schedule_nhu().
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
rta_next_hop_outdated(rta *a)
|
|
|
|
{
|
|
|
|
struct hostentry *he = a->hostentry;
|
2010-12-08 06:33:55 +08:00
|
|
|
|
|
|
|
if (!he)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!he->src)
|
|
|
|
return a->dest != RTD_UNREACHABLE;
|
|
|
|
|
2016-05-06 21:48:35 +08:00
|
|
|
return (a->dest != he->dest) || (a->igp_metric != he->igp_metric) ||
|
2017-02-24 21:05:11 +08:00
|
|
|
(!he->nexthop_linkable) || !nexthop_same(&(a->nh), &(he->src->nh));
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
2017-03-22 22:00:07 +08:00
|
|
|
void
|
2017-03-17 22:48:09 +08:00
|
|
|
rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
|
|
|
a->hostentry = he;
|
|
|
|
a->dest = he->dest;
|
2010-07-31 07:04:32 +08:00
|
|
|
a->igp_metric = he->igp_metric;
|
2016-08-09 20:47:51 +08:00
|
|
|
|
2017-03-17 22:48:09 +08:00
|
|
|
if (a->dest != RTD_UNICAST)
|
2016-08-09 20:47:51 +08:00
|
|
|
{
|
2017-03-17 22:48:09 +08:00
|
|
|
/* No nexthop */
|
|
|
|
no_nexthop:
|
|
|
|
a->nh = (struct nexthop) {};
|
|
|
|
if (mls)
|
|
|
|
{ /* Store the label stack for later changes */
|
|
|
|
a->nh.labels_orig = a->nh.labels = mls->len;
|
|
|
|
memcpy(a->nh.label, mls->stack, mls->len * sizeof(u32));
|
|
|
|
}
|
2016-08-09 20:47:51 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-17 22:48:09 +08:00
|
|
|
if (((!mls) || (!mls->len)) && he->nexthop_linkable)
|
|
|
|
{ /* Just link the nexthop chain, no label append happens. */
|
|
|
|
memcpy(&(a->nh), &(he->src->nh), nexthop_size(&(he->src->nh)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nexthop *nhp = NULL, *nhr = NULL;
|
|
|
|
int skip_nexthop = 0;
|
2017-03-22 22:00:07 +08:00
|
|
|
|
2017-03-17 22:48:09 +08:00
|
|
|
for (struct nexthop *nh = &(he->src->nh); nh; nh = nh->next)
|
2016-08-09 20:47:51 +08:00
|
|
|
{
|
2017-03-17 22:48:09 +08:00
|
|
|
if (skip_nexthop)
|
|
|
|
skip_nexthop--;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nhr = nhp;
|
2019-10-10 20:01:16 +08:00
|
|
|
nhp = (nhp ? (nhp->next = lp_alloc(rte_update_pool, NEXTHOP_MAX_SIZE)) : &(a->nh));
|
2017-03-17 22:48:09 +08:00
|
|
|
}
|
2017-02-24 21:05:11 +08:00
|
|
|
|
2019-10-10 20:01:16 +08:00
|
|
|
memset(nhp, 0, NEXTHOP_MAX_SIZE);
|
2017-03-17 22:48:09 +08:00
|
|
|
nhp->iface = nh->iface;
|
|
|
|
nhp->weight = nh->weight;
|
2019-10-10 21:25:36 +08:00
|
|
|
|
2017-03-17 22:48:09 +08:00
|
|
|
if (mls)
|
2016-08-09 20:47:51 +08:00
|
|
|
{
|
2017-03-17 22:48:09 +08:00
|
|
|
nhp->labels = nh->labels + mls->len;
|
|
|
|
nhp->labels_orig = mls->len;
|
2017-02-24 21:05:11 +08:00
|
|
|
if (nhp->labels <= MPLS_MAX_LABEL_STACK)
|
|
|
|
{
|
|
|
|
memcpy(nhp->label, nh->label, nh->labels * sizeof(u32)); /* First the hostentry labels */
|
2017-03-17 22:48:09 +08:00
|
|
|
memcpy(&(nhp->label[nh->labels]), mls->stack, mls->len * sizeof(u32)); /* Then the bottom labels */
|
2017-02-24 21:05:11 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)",
|
2017-03-17 22:48:09 +08:00
|
|
|
nh->labels, mls->len, nhp->labels, MPLS_MAX_LABEL_STACK);
|
|
|
|
skip_nexthop++;
|
2017-02-24 21:05:11 +08:00
|
|
|
continue;
|
|
|
|
}
|
2016-08-09 20:47:51 +08:00
|
|
|
}
|
2019-10-10 21:25:36 +08:00
|
|
|
else if (nh->labels)
|
|
|
|
{
|
|
|
|
nhp->labels = nh->labels;
|
|
|
|
nhp->labels_orig = 0;
|
|
|
|
memcpy(nhp->label, nh->label, nh->labels * sizeof(u32));
|
|
|
|
}
|
|
|
|
|
2017-03-17 22:48:09 +08:00
|
|
|
if (ipa_nonzero(nh->gw))
|
2017-07-05 05:36:21 +08:00
|
|
|
{
|
|
|
|
nhp->gw = nh->gw; /* Router nexthop */
|
|
|
|
nhp->flags |= (nh->flags & RNF_ONLINK);
|
|
|
|
}
|
2019-10-10 21:06:32 +08:00
|
|
|
else if (!(nh->iface->flags & IF_MULTIACCESS) || (nh->iface->flags & IF_LOOPBACK))
|
|
|
|
nhp->gw = IPA_NONE; /* PtP link - no need for nexthop */
|
2017-03-17 22:48:09 +08:00
|
|
|
else if (ipa_nonzero(he->link))
|
|
|
|
nhp->gw = he->link; /* Device nexthop with link-local address known */
|
|
|
|
else
|
|
|
|
nhp->gw = he->addr; /* Device nexthop with link-local address unknown */
|
2016-08-09 20:47:51 +08:00
|
|
|
}
|
2017-02-24 21:05:11 +08:00
|
|
|
|
2017-03-17 22:48:09 +08:00
|
|
|
if (skip_nexthop)
|
|
|
|
if (nhr)
|
|
|
|
nhr->next = NULL;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
a->dest = RTD_UNREACHABLE;
|
|
|
|
log(L_WARN "No valid nexthop remaining, setting route unreachable");
|
|
|
|
goto no_nexthop;
|
|
|
|
}
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline rte *
|
2016-10-14 21:37:04 +08:00
|
|
|
rt_next_hop_update_rte(rtable *tab UNUSED, rte *old)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
2017-02-20 09:26:45 +08:00
|
|
|
rta *a = alloca(RTA_MAX_SIZE);
|
|
|
|
memcpy(a, old->attrs, rta_size(old->attrs));
|
2017-03-17 22:48:09 +08:00
|
|
|
|
|
|
|
mpls_label_stack mls = { .len = a->nh.labels_orig };
|
|
|
|
memcpy(mls.stack, &a->nh.label[a->nh.labels - mls.len], mls.len * sizeof(u32));
|
|
|
|
|
|
|
|
rta_apply_hostentry(a, old->attrs->hostentry, &mls);
|
2017-02-20 09:26:45 +08:00
|
|
|
a->aflags = 0;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
|
|
|
rte *e = sl_alloc(rte_slab);
|
|
|
|
memcpy(e, old, sizeof(rte));
|
2017-02-20 09:26:45 +08:00
|
|
|
e->attrs = rta_lookup(a);
|
2010-07-05 23:50:19 +08:00
|
|
|
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
rt_next_hop_update_net(rtable *tab, net *n)
|
|
|
|
{
|
|
|
|
rte **k, *e, *new, *old_best, **new_best;
|
|
|
|
int count = 0;
|
|
|
|
int free_old_best = 0;
|
|
|
|
|
|
|
|
old_best = n->routes;
|
|
|
|
if (!old_best)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (k = &n->routes; e = *k; k = &e->next)
|
2011-12-22 20:20:29 +08:00
|
|
|
if (rta_next_hop_outdated(e->attrs))
|
|
|
|
{
|
|
|
|
new = rt_next_hop_update_rte(tab, e);
|
|
|
|
*k = new;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_ROUTES, new->sender, new, "updated");
|
2019-09-09 08:55:32 +08:00
|
|
|
rte_announce_i(tab, RA_ANY, n, new, e, NULL, NULL);
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2011-12-22 20:20:29 +08:00
|
|
|
/* Call a pre-comparison hook */
|
|
|
|
/* Not really an efficient way to compute this */
|
2012-08-14 22:25:22 +08:00
|
|
|
if (e->attrs->src->proto->rte_recalculate)
|
|
|
|
e->attrs->src->proto->rte_recalculate(tab, n, new, e, NULL);
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2011-12-22 20:20:29 +08:00
|
|
|
if (e != old_best)
|
|
|
|
rte_free_quick(e);
|
|
|
|
else /* Freeing of the old best rte is postponed */
|
|
|
|
free_old_best = 1;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2011-12-22 20:20:29 +08:00
|
|
|
e = new;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Find the new best route */
|
|
|
|
new_best = NULL;
|
|
|
|
for (k = &n->routes; e = *k; k = &e->next)
|
|
|
|
{
|
2010-07-05 23:50:19 +08:00
|
|
|
if (!new_best || rte_better(e, *new_best))
|
|
|
|
new_best = k;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Relink the new best route to the first position */
|
|
|
|
new = *new_best;
|
|
|
|
if (new != n->routes)
|
|
|
|
{
|
|
|
|
*new_best = new->next;
|
|
|
|
new->next = n->routes;
|
|
|
|
n->routes = new;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Announce the new best route */
|
|
|
|
if (new != old_best)
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_ROUTES, new->sender, new, "updated [best]");
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2019-09-09 08:55:32 +08:00
|
|
|
/* Propagate changes */
|
|
|
|
rte_announce_i(tab, RA_UNDEF, n, NULL, NULL, n->routes, old_best);
|
2015-06-08 08:20:43 +08:00
|
|
|
|
2016-08-16 19:02:32 +08:00
|
|
|
if (free_old_best)
|
2010-07-05 23:50:19 +08:00
|
|
|
rte_free_quick(old_best);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_next_hop_update(rtable *tab)
|
|
|
|
{
|
|
|
|
struct fib_iterator *fit = &tab->nhu_fit;
|
|
|
|
int max_feed = 32;
|
|
|
|
|
2017-02-22 21:02:03 +08:00
|
|
|
if (tab->nhu_state == NHU_CLEAN)
|
2010-07-05 23:50:19 +08:00
|
|
|
return;
|
|
|
|
|
2017-02-22 21:02:03 +08:00
|
|
|
if (tab->nhu_state == NHU_SCHEDULED)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
|
|
|
FIB_ITERATE_INIT(fit, &tab->fib);
|
2017-02-22 21:02:03 +08:00
|
|
|
tab->nhu_state = NHU_RUNNING;
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_START(&tab->fib, fit, net, n)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
|
|
|
if (max_feed <= 0)
|
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_PUT(fit);
|
2010-07-05 23:50:19 +08:00
|
|
|
ev_schedule(tab->rt_event);
|
|
|
|
return;
|
|
|
|
}
|
2015-12-22 03:16:05 +08:00
|
|
|
max_feed -= rt_next_hop_update_net(tab, n);
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_END;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2017-02-22 21:02:03 +08:00
|
|
|
/* State change:
|
|
|
|
* NHU_DIRTY -> NHU_SCHEDULED
|
|
|
|
* NHU_RUNNING -> NHU_CLEAN
|
|
|
|
*/
|
2010-07-05 23:50:19 +08:00
|
|
|
tab->nhu_state &= 1;
|
|
|
|
|
2017-02-22 21:02:03 +08:00
|
|
|
if (tab->nhu_state != NHU_CLEAN)
|
2010-07-05 23:50:19 +08:00
|
|
|
ev_schedule(tab->rt_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-03-05 06:21:06 +08:00
|
|
|
struct rtable_config *
|
2015-11-05 19:48:52 +08:00
|
|
|
rt_new_table(struct symbol *s, uint addr_type)
|
2000-03-05 06:21:06 +08:00
|
|
|
{
|
2012-07-21 01:56:57 +08:00
|
|
|
/* Hack that allows to 'redefine' the master table */
|
2016-01-26 18:48:58 +08:00
|
|
|
if ((s->class == SYM_TABLE) &&
|
2019-02-15 20:53:17 +08:00
|
|
|
(s->table == new_config->def_tables[addr_type]) &&
|
2016-01-26 18:48:58 +08:00
|
|
|
((addr_type == NET_IP4) || (addr_type == NET_IP6)))
|
2019-02-15 20:53:17 +08:00
|
|
|
return s->table;
|
2012-07-21 01:56:57 +08:00
|
|
|
|
2000-03-05 06:21:06 +08:00
|
|
|
struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
|
|
|
|
|
2019-02-15 20:53:17 +08:00
|
|
|
cf_define_symbol(s, SYM_TABLE, table, c);
|
2000-03-05 06:21:06 +08:00
|
|
|
c->name = s->name;
|
2015-11-05 19:48:52 +08:00
|
|
|
c->addr_type = addr_type;
|
2000-05-20 03:49:33 +08:00
|
|
|
c->gc_max_ops = 1000;
|
2000-03-05 06:21:06 +08:00
|
|
|
c->gc_min_time = 5;
|
2021-02-10 10:09:57 +08:00
|
|
|
c->min_settle_time = 1 S;
|
|
|
|
c->max_settle_time = 20 S;
|
2021-11-30 02:23:42 +08:00
|
|
|
c->trie_used = net_val_match(addr_type, NB_IP | NB_VPN | NB_ROA | NB_IP6_SADR);
|
2016-01-26 18:48:58 +08:00
|
|
|
|
|
|
|
add_tail(&new_config->tables, &c->n);
|
|
|
|
|
|
|
|
/* First table of each type is kept as default */
|
|
|
|
if (! new_config->def_tables[addr_type])
|
|
|
|
new_config->def_tables[addr_type] = c;
|
|
|
|
|
2000-03-05 06:21:06 +08:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_lock_table - lock a routing table
|
|
|
|
* @r: routing table to be locked
|
|
|
|
*
|
|
|
|
* Lock a routing table, because it's in use by a protocol,
|
|
|
|
* preventing it from being freed when it gets undefined in a new
|
|
|
|
* configuration.
|
|
|
|
*/
|
1999-05-18 04:14:52 +08:00
|
|
|
void
|
2000-01-17 00:44:50 +08:00
|
|
|
rt_lock_table(rtable *r)
|
1999-05-18 04:14:52 +08:00
|
|
|
{
|
2000-01-17 00:44:50 +08:00
|
|
|
r->use_count++;
|
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_unlock_table - unlock a routing table
|
|
|
|
* @r: routing table to be unlocked
|
|
|
|
*
|
|
|
|
* Unlock a routing table formerly locked by rt_lock_table(),
|
|
|
|
* that is decrease its use count and delete it if it's scheduled
|
|
|
|
* for deletion by configuration changes.
|
|
|
|
*/
|
2000-01-17 00:44:50 +08:00
|
|
|
void
|
|
|
|
rt_unlock_table(rtable *r)
|
|
|
|
{
|
|
|
|
if (!--r->use_count && r->deleted)
|
|
|
|
{
|
|
|
|
struct config *conf = r->deleted;
|
2021-03-31 00:51:31 +08:00
|
|
|
|
|
|
|
/* Delete the routing table by freeing its pool */
|
|
|
|
rt_shutdown(r);
|
2000-01-17 00:44:50 +08:00
|
|
|
config_del_obstacle(conf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-18 20:48:47 +08:00
|
|
|
static struct rtable_config *
|
|
|
|
rt_find_table_config(struct config *cf, char *name)
|
|
|
|
{
|
|
|
|
struct symbol *sym = cf_find_symbol(cf, name);
|
2019-02-15 20:53:17 +08:00
|
|
|
return (sym && (sym->class == SYM_TABLE)) ? sym->table : NULL;
|
2018-03-18 20:48:47 +08:00
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_commit - commit new routing table configuration
|
|
|
|
* @new: new configuration
|
|
|
|
* @old: original configuration or %NULL if it's boot time config
|
|
|
|
*
|
|
|
|
* Scan differences between @old and @new configuration and modify
|
|
|
|
* the routing tables according to these changes. If @new defines a
|
|
|
|
* previously unknown table, create it, if it omits a table existing
|
|
|
|
* in @old, schedule it for deletion (it gets deleted when all protocols
|
|
|
|
* disconnect from it by calling rt_unlock_table()), if it exists
|
|
|
|
* in both configurations, leave it unchanged.
|
|
|
|
*/
|
2000-01-17 00:44:50 +08:00
|
|
|
void
|
|
|
|
rt_commit(struct config *new, struct config *old)
|
|
|
|
{
|
|
|
|
struct rtable_config *o, *r;
|
1999-05-18 04:14:52 +08:00
|
|
|
|
2000-01-17 00:44:50 +08:00
|
|
|
DBG("rt_commit:\n");
|
|
|
|
if (old)
|
1999-05-18 04:14:52 +08:00
|
|
|
{
|
2000-01-17 00:44:50 +08:00
|
|
|
WALK_LIST(o, old->tables)
|
|
|
|
{
|
|
|
|
rtable *ot = o->table;
|
|
|
|
if (!ot->deleted)
|
|
|
|
{
|
2018-03-18 20:48:47 +08:00
|
|
|
r = rt_find_table_config(new, o->name);
|
|
|
|
if (r && (r->addr_type == o->addr_type) && !new->shutdown)
|
2000-01-17 00:44:50 +08:00
|
|
|
{
|
|
|
|
DBG("\t%s: same\n", o->name);
|
|
|
|
r->table = ot;
|
|
|
|
ot->name = r->name;
|
2000-03-05 06:21:06 +08:00
|
|
|
ot->config = r;
|
2012-07-05 03:31:03 +08:00
|
|
|
if (o->sorted != r->sorted)
|
|
|
|
log(L_WARN "Reconfiguration of rtable sorted flag not implemented");
|
2000-01-17 00:44:50 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2000-01-17 01:40:26 +08:00
|
|
|
DBG("\t%s: deleted\n", o->name);
|
2000-01-17 00:44:50 +08:00
|
|
|
ot->deleted = old;
|
|
|
|
config_add_obstacle(old);
|
|
|
|
rt_lock_table(ot);
|
|
|
|
rt_unlock_table(ot);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1999-05-18 04:14:52 +08:00
|
|
|
}
|
2000-01-17 00:44:50 +08:00
|
|
|
|
|
|
|
WALK_LIST(r, new->tables)
|
|
|
|
if (!r->table)
|
|
|
|
{
|
2021-03-31 00:51:31 +08:00
|
|
|
r->table = rt_setup(rt_table_pool, r);
|
2000-01-17 00:44:50 +08:00
|
|
|
DBG("\t%s: created\n", r->name);
|
2021-03-31 00:51:31 +08:00
|
|
|
add_tail(&routing_tables, &r->table->n);
|
2000-01-17 00:44:50 +08:00
|
|
|
}
|
|
|
|
DBG("\tdone\n");
|
1999-05-18 04:14:52 +08:00
|
|
|
}
|
1999-12-01 23:10:21 +08:00
|
|
|
|
2009-05-31 21:24:27 +08:00
|
|
|
static inline void
|
2016-01-26 18:48:58 +08:00
|
|
|
do_feed_channel(struct channel *c, net *n, rte *e)
|
2009-05-31 21:24:27 +08:00
|
|
|
{
|
|
|
|
rte_update_lock();
|
2016-01-26 18:48:58 +08:00
|
|
|
if (c->ra_mode == RA_ACCEPTED)
|
2019-09-09 08:55:32 +08:00
|
|
|
rt_notify_accepted(c, n, NULL, NULL, c->refeeding);
|
2016-01-26 18:48:58 +08:00
|
|
|
else if (c->ra_mode == RA_MERGED)
|
2019-09-09 08:55:32 +08:00
|
|
|
rt_notify_merged(c, n, NULL, NULL, e, e, c->refeeding);
|
2016-01-26 18:48:58 +08:00
|
|
|
else /* RA_BASIC */
|
2019-09-09 08:55:32 +08:00
|
|
|
rt_notify_basic(c, n, e, e, c->refeeding);
|
2009-05-31 21:24:27 +08:00
|
|
|
rte_update_unlock();
|
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
2016-01-26 18:48:58 +08:00
|
|
|
* rt_feed_channel - advertise all routes to a channel
|
|
|
|
* @c: channel to be fed
|
2000-06-02 01:12:19 +08:00
|
|
|
*
|
2016-01-26 18:48:58 +08:00
|
|
|
* This function performs one pass of advertisement of routes to a channel that
|
|
|
|
* is in the ES_FEEDING state. It is called by the protocol code as long as it
|
|
|
|
* has something to do. (We avoid transferring all the routes in single pass in
|
|
|
|
* order not to monopolize CPU time.)
|
2000-06-02 01:12:19 +08:00
|
|
|
*/
|
2000-05-19 18:46:26 +08:00
|
|
|
int
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_feed_channel(struct channel *c)
|
2000-05-19 18:46:26 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
struct fib_iterator *fit = &c->feed_fit;
|
2000-05-19 18:59:47 +08:00
|
|
|
int max_feed = 256;
|
2000-05-19 18:46:26 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
ASSERT(c->export_state == ES_FEEDING);
|
|
|
|
|
|
|
|
if (!c->feed_active)
|
2000-05-19 18:46:26 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
FIB_ITERATE_INIT(fit, &c->table->fib);
|
|
|
|
c->feed_active = 1;
|
2000-05-19 18:46:26 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
FIB_ITERATE_START(&c->table->fib, fit, net, n)
|
2000-05-19 18:46:26 +08:00
|
|
|
{
|
2008-11-15 06:03:15 +08:00
|
|
|
rte *e = n->routes;
|
2000-05-19 18:59:47 +08:00
|
|
|
if (max_feed <= 0)
|
|
|
|
{
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_PUT(fit);
|
2000-05-19 18:59:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2009-05-31 21:24:27 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if ((c->ra_mode == RA_OPTIMAL) ||
|
|
|
|
(c->ra_mode == RA_ACCEPTED) ||
|
|
|
|
(c->ra_mode == RA_MERGED))
|
2012-11-10 21:26:13 +08:00
|
|
|
if (rte_is_valid(e))
|
2009-05-31 21:24:27 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
/* In the meantime, the protocol may fell down */
|
|
|
|
if (c->export_state != ES_FEEDING)
|
|
|
|
goto done;
|
2015-05-31 17:29:53 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
do_feed_channel(c, n, e);
|
2009-05-31 21:24:27 +08:00
|
|
|
max_feed--;
|
|
|
|
}
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
if (c->ra_mode == RA_ANY)
|
2015-05-31 17:29:53 +08:00
|
|
|
for(e = n->routes; e; e = e->next)
|
2009-05-31 21:24:27 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
/* In the meantime, the protocol may fell down */
|
|
|
|
if (c->export_state != ES_FEEDING)
|
|
|
|
goto done;
|
2015-05-31 17:29:53 +08:00
|
|
|
|
|
|
|
if (!rte_is_valid(e))
|
|
|
|
continue;
|
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
do_feed_channel(c, n, e);
|
2009-05-31 21:24:27 +08:00
|
|
|
max_feed--;
|
|
|
|
}
|
2000-05-19 18:46:26 +08:00
|
|
|
}
|
2015-12-22 03:16:05 +08:00
|
|
|
FIB_ITERATE_END;
|
2000-05-19 18:46:26 +08:00
|
|
|
|
2016-01-26 18:48:58 +08:00
|
|
|
done:
|
|
|
|
c->feed_active = 0;
|
|
|
|
return 1;
|
2000-05-19 18:46:26 +08:00
|
|
|
}
|
|
|
|
|
2000-06-02 01:12:19 +08:00
|
|
|
/**
|
|
|
|
* rt_feed_baby_abort - abort protocol feeding
|
2016-01-26 18:48:58 +08:00
|
|
|
* @c: channel
|
2000-06-02 01:12:19 +08:00
|
|
|
*
|
2016-01-26 18:48:58 +08:00
|
|
|
* This function is called by the protocol code when the protocol stops or
|
|
|
|
* ceases to exist during the feeding.
|
2000-06-02 01:12:19 +08:00
|
|
|
*/
|
2000-05-19 18:46:26 +08:00
|
|
|
void
|
2016-01-26 18:48:58 +08:00
|
|
|
rt_feed_channel_abort(struct channel *c)
|
2000-05-19 18:46:26 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
if (c->feed_active)
|
2000-05-19 18:46:26 +08:00
|
|
|
{
|
2016-01-26 18:48:58 +08:00
|
|
|
/* Unlink the iterator */
|
|
|
|
fit_get(&c->table->fib, &c->feed_fit);
|
|
|
|
c->feed_active = 0;
|
2000-05-19 18:46:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-28 04:57:55 +08:00
|
|
|
|
2019-08-14 00:22:07 +08:00
|
|
|
/*
|
|
|
|
* Import table
|
|
|
|
*/
|
|
|
|
|
2018-09-28 04:57:55 +08:00
|
|
|
int
|
|
|
|
rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
|
|
|
{
|
2018-12-11 20:52:30 +08:00
|
|
|
struct rtable *tab = c->in_table;
|
2018-09-28 04:57:55 +08:00
|
|
|
rte *old, **pos;
|
|
|
|
net *net;
|
|
|
|
|
|
|
|
if (new)
|
|
|
|
{
|
2018-12-11 20:52:30 +08:00
|
|
|
net = net_get(tab, n);
|
2018-09-28 04:57:55 +08:00
|
|
|
|
|
|
|
if (!new->pref)
|
|
|
|
new->pref = c->preference;
|
|
|
|
|
|
|
|
if (!rta_is_cached(new->attrs))
|
|
|
|
new->attrs = rta_lookup(new->attrs);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-12-11 20:52:30 +08:00
|
|
|
net = net_find(tab, n);
|
2018-09-28 04:57:55 +08:00
|
|
|
|
|
|
|
if (!net)
|
2018-12-11 20:52:30 +08:00
|
|
|
goto drop_withdraw;
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the old rte */
|
|
|
|
for (pos = &net->routes; old = *pos; pos = &old->next)
|
|
|
|
if (old->attrs->src == src)
|
|
|
|
{
|
|
|
|
if (new && rte_same(old, new))
|
2019-02-22 09:16:39 +08:00
|
|
|
{
|
|
|
|
/* Refresh the old rte, continue with update to main rtable */
|
|
|
|
if (old->flags & (REF_STALE | REF_DISCARD | REF_MODIFY))
|
|
|
|
{
|
|
|
|
old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
goto drop_update;
|
2019-02-22 09:16:39 +08:00
|
|
|
}
|
2018-09-28 04:57:55 +08:00
|
|
|
|
2019-08-27 03:53:56 +08:00
|
|
|
/* Move iterator if needed */
|
|
|
|
if (old == c->reload_next_rte)
|
|
|
|
c->reload_next_rte = old->next;
|
|
|
|
|
2018-09-28 04:57:55 +08:00
|
|
|
/* Remove the old rte */
|
|
|
|
*pos = old->next;
|
|
|
|
rte_free_quick(old);
|
2018-12-11 20:52:30 +08:00
|
|
|
tab->rt_count--;
|
2018-09-28 04:57:55 +08:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!new)
|
2018-12-11 20:52:30 +08:00
|
|
|
{
|
|
|
|
if (!old)
|
|
|
|
goto drop_withdraw;
|
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
if (!net->routes)
|
|
|
|
fib_delete(&tab->fib, net);
|
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct channel_limit *l = &c->rx_limit;
|
|
|
|
if (l->action && !old)
|
|
|
|
{
|
|
|
|
if (tab->rt_count >= l->limit)
|
|
|
|
channel_notify_limit(c, l, PLD_RX, tab->rt_count);
|
|
|
|
|
|
|
|
if (l->state == PLS_BLOCKED)
|
|
|
|
{
|
2020-11-15 23:01:19 +08:00
|
|
|
/* Required by rte_trace_in() */
|
|
|
|
new->net = net;
|
|
|
|
|
2020-12-08 05:19:40 +08:00
|
|
|
rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
|
2018-12-11 20:52:30 +08:00
|
|
|
goto drop_update;
|
|
|
|
}
|
|
|
|
}
|
2018-09-28 04:57:55 +08:00
|
|
|
|
|
|
|
/* Insert the new rte */
|
|
|
|
rte *e = rte_do_cow(new);
|
|
|
|
e->flags |= REF_COW;
|
|
|
|
e->net = net;
|
|
|
|
e->sender = c;
|
|
|
|
e->lastmod = current_time();
|
|
|
|
e->next = *pos;
|
|
|
|
*pos = e;
|
2018-12-11 20:52:30 +08:00
|
|
|
tab->rt_count++;
|
2018-09-28 04:57:55 +08:00
|
|
|
return 1;
|
2018-12-11 20:52:30 +08:00
|
|
|
|
|
|
|
drop_update:
|
|
|
|
c->stats.imp_updates_received++;
|
|
|
|
c->stats.imp_updates_ignored++;
|
|
|
|
rte_free(new);
|
2021-03-31 00:51:31 +08:00
|
|
|
|
|
|
|
if (!net->routes)
|
|
|
|
fib_delete(&tab->fib, net);
|
|
|
|
|
2018-12-11 20:52:30 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
drop_withdraw:
|
|
|
|
c->stats.imp_withdraws_received++;
|
|
|
|
c->stats.imp_withdraws_ignored++;
|
|
|
|
return 0;
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rt_reload_channel(struct channel *c)
|
|
|
|
{
|
|
|
|
struct rtable *tab = c->in_table;
|
|
|
|
struct fib_iterator *fit = &c->reload_fit;
|
|
|
|
int max_feed = 64;
|
|
|
|
|
|
|
|
ASSERT(c->channel_state == CS_UP);
|
|
|
|
|
|
|
|
if (!c->reload_active)
|
|
|
|
{
|
|
|
|
FIB_ITERATE_INIT(fit, &tab->fib);
|
|
|
|
c->reload_active = 1;
|
|
|
|
}
|
|
|
|
|
2019-08-27 03:53:56 +08:00
|
|
|
do {
|
|
|
|
for (rte *e = c->reload_next_rte; e; e = e->next)
|
2018-09-28 04:57:55 +08:00
|
|
|
{
|
2019-08-27 03:53:56 +08:00
|
|
|
if (max_feed-- <= 0)
|
|
|
|
{
|
|
|
|
c->reload_next_rte = e;
|
|
|
|
debug("%s channel reload burst split (max_feed=%d)", c->proto->name, max_feed);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_update2(c, e->net->n.addr, rte_do_cow(e), e->attrs->src);
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
|
|
|
|
2019-08-27 03:53:56 +08:00
|
|
|
c->reload_next_rte = NULL;
|
|
|
|
|
|
|
|
FIB_ITERATE_START(&tab->fib, fit, net, n)
|
2018-09-28 04:57:55 +08:00
|
|
|
{
|
2019-08-27 03:53:56 +08:00
|
|
|
if (c->reload_next_rte = n->routes)
|
|
|
|
{
|
|
|
|
FIB_ITERATE_PUT_NEXT(fit, &tab->fib);
|
|
|
|
break;
|
|
|
|
}
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
2019-08-27 03:53:56 +08:00
|
|
|
FIB_ITERATE_END;
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
2019-08-27 03:53:56 +08:00
|
|
|
while (c->reload_next_rte);
|
2018-09-28 04:57:55 +08:00
|
|
|
|
|
|
|
c->reload_active = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rt_reload_channel_abort(struct channel *c)
|
|
|
|
{
|
|
|
|
if (c->reload_active)
|
|
|
|
{
|
|
|
|
/* Unlink the iterator */
|
|
|
|
fit_get(&c->in_table->fib, &c->reload_fit);
|
2019-08-27 03:53:56 +08:00
|
|
|
c->reload_next_rte = NULL;
|
2018-09-28 04:57:55 +08:00
|
|
|
c->reload_active = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rt_prune_sync(rtable *t, int all)
|
|
|
|
{
|
2021-03-31 00:51:31 +08:00
|
|
|
struct fib_iterator fit;
|
|
|
|
|
|
|
|
FIB_ITERATE_INIT(&fit, &t->fib);
|
|
|
|
|
|
|
|
again:
|
|
|
|
FIB_ITERATE_START(&t->fib, &fit, net, n)
|
2018-09-28 04:57:55 +08:00
|
|
|
{
|
|
|
|
rte *e, **ee = &n->routes;
|
2021-03-31 00:51:31 +08:00
|
|
|
|
2018-09-28 04:57:55 +08:00
|
|
|
while (e = *ee)
|
|
|
|
{
|
|
|
|
if (all || (e->flags & (REF_STALE | REF_DISCARD)))
|
|
|
|
{
|
|
|
|
*ee = e->next;
|
|
|
|
rte_free_quick(e);
|
2018-12-11 20:52:30 +08:00
|
|
|
t->rt_count--;
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
ee = &e->next;
|
|
|
|
}
|
2021-03-31 00:51:31 +08:00
|
|
|
|
|
|
|
if (all || !n->routes)
|
|
|
|
{
|
|
|
|
FIB_ITERATE_PUT(&fit);
|
|
|
|
fib_delete(&t->fib, n);
|
|
|
|
goto again;
|
|
|
|
}
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
2021-03-31 00:51:31 +08:00
|
|
|
FIB_ITERATE_END;
|
2018-09-28 04:57:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-14 00:22:07 +08:00
|
|
|
/*
|
|
|
|
* Export table
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old0, int refeed)
|
|
|
|
{
|
|
|
|
struct rtable *tab = c->out_table;
|
|
|
|
struct rte_src *src;
|
|
|
|
rte *old, **pos;
|
|
|
|
net *net;
|
|
|
|
|
|
|
|
if (new)
|
|
|
|
{
|
|
|
|
net = net_get(tab, n);
|
|
|
|
src = new->attrs->src;
|
2019-09-24 23:12:15 +08:00
|
|
|
|
|
|
|
rte_store_tmp_attrs(new, rte_update_pool, NULL);
|
|
|
|
|
|
|
|
if (!rta_is_cached(new->attrs))
|
|
|
|
new->attrs = rta_lookup(new->attrs);
|
2019-08-14 00:22:07 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
net = net_find(tab, n);
|
|
|
|
src = old0->attrs->src;
|
|
|
|
|
|
|
|
if (!net)
|
|
|
|
goto drop_withdraw;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the old rte */
|
|
|
|
for (pos = &net->routes; old = *pos; pos = &old->next)
|
2019-11-04 03:25:42 +08:00
|
|
|
if ((c->ra_mode != RA_ANY) || (old->attrs->src == src))
|
2019-08-14 00:22:07 +08:00
|
|
|
{
|
|
|
|
if (new && rte_same(old, new))
|
|
|
|
{
|
|
|
|
/* REF_STALE / REF_DISCARD not used in export table */
|
|
|
|
/*
|
|
|
|
if (old->flags & (REF_STALE | REF_DISCARD | REF_MODIFY))
|
|
|
|
{
|
|
|
|
old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
goto drop_update;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the old rte */
|
|
|
|
*pos = old->next;
|
|
|
|
rte_free_quick(old);
|
|
|
|
tab->rt_count--;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!new)
|
|
|
|
{
|
|
|
|
if (!old)
|
|
|
|
goto drop_withdraw;
|
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
if (!net->routes)
|
|
|
|
fib_delete(&tab->fib, net);
|
|
|
|
|
2019-08-14 00:22:07 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert the new rte */
|
|
|
|
rte *e = rte_do_cow(new);
|
|
|
|
e->flags |= REF_COW;
|
|
|
|
e->net = net;
|
|
|
|
e->sender = c;
|
|
|
|
e->lastmod = current_time();
|
|
|
|
e->next = *pos;
|
|
|
|
*pos = e;
|
|
|
|
tab->rt_count++;
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
drop_update:
|
|
|
|
return refeed;
|
|
|
|
|
|
|
|
drop_withdraw:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hostcache
|
|
|
|
*/
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
static inline u32
|
2010-07-26 22:39:27 +08:00
|
|
|
hc_hash(ip_addr a, rtable *dep)
|
|
|
|
{
|
2015-12-24 22:52:03 +08:00
|
|
|
return ipa_hash(a) ^ ptr_hash(dep);
|
2010-07-26 22:39:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
hc_insert(struct hostcache *hc, struct hostentry *he)
|
|
|
|
{
|
2015-05-19 14:53:34 +08:00
|
|
|
uint k = he->hash_key >> hc->hash_shift;
|
2010-07-26 22:39:27 +08:00
|
|
|
he->next = hc->hash_table[k];
|
|
|
|
hc->hash_table[k] = he;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
hc_remove(struct hostcache *hc, struct hostentry *he)
|
|
|
|
{
|
|
|
|
struct hostentry **hep;
|
2015-05-19 14:53:34 +08:00
|
|
|
uint k = he->hash_key >> hc->hash_shift;
|
2010-07-26 22:39:27 +08:00
|
|
|
|
|
|
|
for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
|
|
|
|
*hep = he->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HC_DEF_ORDER 10
|
|
|
|
#define HC_HI_MARK *4
|
|
|
|
#define HC_HI_STEP 2
|
|
|
|
#define HC_HI_ORDER 16 /* Must be at most 16 */
|
|
|
|
#define HC_LO_MARK /5
|
|
|
|
#define HC_LO_STEP 2
|
|
|
|
#define HC_LO_ORDER 10
|
|
|
|
|
|
|
|
static void
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_alloc_table(struct hostcache *hc, pool *p, unsigned order)
|
2010-07-26 22:39:27 +08:00
|
|
|
{
|
2016-10-14 21:37:04 +08:00
|
|
|
uint hsize = 1 << order;
|
2010-07-26 22:39:27 +08:00
|
|
|
hc->hash_order = order;
|
2015-12-24 22:52:03 +08:00
|
|
|
hc->hash_shift = 32 - order;
|
2016-10-14 21:37:04 +08:00
|
|
|
hc->hash_max = (order >= HC_HI_ORDER) ? ~0U : (hsize HC_HI_MARK);
|
|
|
|
hc->hash_min = (order <= HC_LO_ORDER) ? 0U : (hsize HC_LO_MARK);
|
2010-07-26 22:39:27 +08:00
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
hc->hash_table = mb_allocz(p, hsize * sizeof(struct hostentry *));
|
2010-07-26 22:39:27 +08:00
|
|
|
}
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
static void
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_resize(struct hostcache *hc, pool *p, unsigned new_order)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
2010-07-26 22:39:27 +08:00
|
|
|
struct hostentry **old_table = hc->hash_table;
|
|
|
|
struct hostentry *he, *hen;
|
2016-10-14 21:37:04 +08:00
|
|
|
uint old_size = 1 << hc->hash_order;
|
|
|
|
uint i;
|
2010-07-26 22:39:27 +08:00
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_alloc_table(hc, p, new_order);
|
2010-07-26 22:39:27 +08:00
|
|
|
for (i = 0; i < old_size; i++)
|
|
|
|
for (he = old_table[i]; he != NULL; he=hen)
|
|
|
|
{
|
|
|
|
hen = he->next;
|
|
|
|
hc_insert(hc, he);
|
|
|
|
}
|
|
|
|
mb_free(old_table);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct hostentry *
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_new_hostentry(struct hostcache *hc, pool *p, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
|
2010-07-26 22:39:27 +08:00
|
|
|
{
|
|
|
|
struct hostentry *he = sl_alloc(hc->slab);
|
|
|
|
|
2017-02-24 21:05:11 +08:00
|
|
|
*he = (struct hostentry) {
|
|
|
|
.addr = a,
|
|
|
|
.link = ll,
|
|
|
|
.tab = dep,
|
|
|
|
.hash_key = k,
|
|
|
|
};
|
2010-07-26 22:39:27 +08:00
|
|
|
|
|
|
|
add_tail(&hc->hostentries, &he->ln);
|
|
|
|
hc_insert(hc, he);
|
|
|
|
|
|
|
|
hc->hash_items++;
|
|
|
|
if (hc->hash_items > hc->hash_max)
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_resize(hc, p, hc->hash_order + HC_HI_STEP);
|
2010-07-26 22:39:27 +08:00
|
|
|
|
|
|
|
return he;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he)
|
2010-07-26 22:39:27 +08:00
|
|
|
{
|
2010-12-08 06:33:55 +08:00
|
|
|
rta_free(he->src);
|
|
|
|
|
2010-07-26 22:39:27 +08:00
|
|
|
rem_node(&he->ln);
|
|
|
|
hc_remove(hc, he);
|
|
|
|
sl_free(hc->slab, he);
|
|
|
|
|
|
|
|
hc->hash_items--;
|
|
|
|
if (hc->hash_items < hc->hash_min)
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_resize(hc, p, hc->hash_order - HC_LO_STEP);
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_init_hostcache(rtable *tab)
|
|
|
|
{
|
2021-03-31 00:51:31 +08:00
|
|
|
struct hostcache *hc = mb_allocz(tab->rp, sizeof(struct hostcache));
|
2010-07-05 23:50:19 +08:00
|
|
|
init_list(&hc->hostentries);
|
2010-07-26 22:39:27 +08:00
|
|
|
|
|
|
|
hc->hash_items = 0;
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_alloc_table(hc, tab->rp, HC_DEF_ORDER);
|
|
|
|
hc->slab = sl_new(tab->rp, sizeof(struct hostentry));
|
2010-07-26 22:39:27 +08:00
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
hc->lp = lp_new(tab->rp, LP_GOOD_SIZE(1024));
|
2020-03-26 10:57:48 +08:00
|
|
|
hc->trie = f_new_trie(hc->lp, 0);
|
2010-07-28 00:20:12 +08:00
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
tab->hostcache = hc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_free_hostcache(rtable *tab)
|
|
|
|
{
|
|
|
|
struct hostcache *hc = tab->hostcache;
|
|
|
|
|
|
|
|
node *n;
|
|
|
|
WALK_LIST(n, hc->hostentries)
|
|
|
|
{
|
|
|
|
struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
|
2010-12-08 06:33:55 +08:00
|
|
|
rta_free(he->src);
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
if (he->uc)
|
|
|
|
log(L_ERR "Hostcache is not empty in table %s", tab->name);
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
/* Freed automagically by the resource pool
|
2010-07-26 22:39:27 +08:00
|
|
|
rfree(hc->slab);
|
2010-07-28 00:20:12 +08:00
|
|
|
rfree(hc->lp);
|
2010-07-26 22:39:27 +08:00
|
|
|
mb_free(hc->hash_table);
|
2010-07-05 23:50:19 +08:00
|
|
|
mb_free(hc);
|
2021-03-31 00:51:31 +08:00
|
|
|
*/
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_notify_hostcache(rtable *tab, net *net)
|
|
|
|
{
|
|
|
|
if (tab->hcu_scheduled)
|
|
|
|
return;
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
if (trie_match_net(tab->hostcache->trie, net->n.addr))
|
|
|
|
rt_schedule_hcu(tab);
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
if_local_addr(ip_addr a, struct iface *i)
|
|
|
|
{
|
|
|
|
struct ifa *b;
|
|
|
|
|
|
|
|
WALK_LIST(b, i->addrs)
|
|
|
|
if (ipa_equal(a, b->ip))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-28 20:17:20 +08:00
|
|
|
u32
|
2010-07-31 07:04:32 +08:00
|
|
|
rt_get_igp_metric(rte *rt)
|
|
|
|
{
|
2010-08-02 19:11:53 +08:00
|
|
|
eattr *ea = ea_find(rt->attrs->eattrs, EA_GEN_IGP_METRIC);
|
|
|
|
|
|
|
|
if (ea)
|
|
|
|
return ea->u.data;
|
|
|
|
|
2010-07-31 07:04:32 +08:00
|
|
|
rta *a = rt->attrs;
|
2011-05-05 20:14:20 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_OSPF
|
2010-07-31 07:04:32 +08:00
|
|
|
if ((a->source == RTS_OSPF) ||
|
|
|
|
(a->source == RTS_OSPF_IA) ||
|
|
|
|
(a->source == RTS_OSPF_EXT1))
|
|
|
|
return rt->u.ospf.metric1;
|
2011-05-05 20:14:20 +08:00
|
|
|
#endif
|
2010-07-31 07:04:32 +08:00
|
|
|
|
2011-05-05 20:14:20 +08:00
|
|
|
#ifdef CONFIG_RIP
|
2010-07-31 07:04:32 +08:00
|
|
|
if (a->source == RTS_RIP)
|
|
|
|
return rt->u.rip.metric;
|
2011-05-05 20:14:20 +08:00
|
|
|
#endif
|
2010-07-31 07:04:32 +08:00
|
|
|
|
2019-09-28 20:17:20 +08:00
|
|
|
#ifdef CONFIG_BGP
|
|
|
|
if (a->source == RTS_BGP)
|
|
|
|
{
|
|
|
|
u64 metric = bgp_total_aigp_metric(rt);
|
|
|
|
return (u32) MIN(metric, (u64) IGP_METRIC_UNKNOWN);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-12-29 09:23:54 +08:00
|
|
|
#ifdef CONFIG_BABEL
|
|
|
|
if (a->source == RTS_BABEL)
|
|
|
|
return rt->u.babel.metric;
|
|
|
|
#endif
|
|
|
|
|
2016-05-06 21:48:35 +08:00
|
|
|
if (a->source == RTS_DEVICE)
|
2010-07-31 07:04:32 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return IGP_METRIC_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
static int
|
|
|
|
rt_update_hostentry(rtable *tab, struct hostentry *he)
|
|
|
|
{
|
2010-12-08 06:33:55 +08:00
|
|
|
rta *old_src = he->src;
|
2018-01-29 19:49:37 +08:00
|
|
|
int direct = 0;
|
2010-07-28 00:20:12 +08:00
|
|
|
int pxlen = 0;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
/* Reset the hostentry */
|
2010-12-08 06:33:55 +08:00
|
|
|
he->src = NULL;
|
|
|
|
he->dest = RTD_UNREACHABLE;
|
2018-01-29 19:49:37 +08:00
|
|
|
he->nexthop_linkable = 0;
|
2010-12-08 06:33:55 +08:00
|
|
|
he->igp_metric = 0;
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
net_addr he_addr;
|
|
|
|
net_fill_ip_host(&he_addr, he->addr);
|
|
|
|
net *n = net_route(tab, &he_addr);
|
2010-07-31 07:04:32 +08:00
|
|
|
if (n)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
2012-11-10 21:26:13 +08:00
|
|
|
rte *e = n->routes;
|
|
|
|
rta *a = e->attrs;
|
2015-11-05 19:48:52 +08:00
|
|
|
pxlen = n->n.addr->pxlen;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2010-08-03 14:26:47 +08:00
|
|
|
if (a->hostentry)
|
|
|
|
{
|
|
|
|
/* Recursive route should not depend on another recursive route */
|
2015-11-05 19:48:52 +08:00
|
|
|
log(L_WARN "Next hop address %I resolvable through recursive route for %N",
|
|
|
|
he->addr, n->n.addr);
|
2010-12-08 06:33:55 +08:00
|
|
|
goto done;
|
2010-08-03 14:26:47 +08:00
|
|
|
}
|
2010-12-08 06:33:55 +08:00
|
|
|
|
2018-01-29 19:49:37 +08:00
|
|
|
if (a->dest == RTD_UNICAST)
|
2017-03-17 22:48:09 +08:00
|
|
|
{
|
|
|
|
for (struct nexthop *nh = &(a->nh); nh; nh = nh->next)
|
|
|
|
if (ipa_zero(nh->gw))
|
|
|
|
{
|
|
|
|
if (if_local_addr(he->addr, nh->iface))
|
|
|
|
{
|
|
|
|
/* The host address is a local address, this is not valid */
|
|
|
|
log(L_WARN "Next hop address %I is a local address of iface %s",
|
|
|
|
he->addr, nh->iface->name);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2018-01-29 19:49:37 +08:00
|
|
|
direct++;
|
2017-03-17 22:48:09 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-08 23:27:18 +08:00
|
|
|
|
2010-12-08 06:33:55 +08:00
|
|
|
he->src = rta_clone(a);
|
2018-01-29 19:49:37 +08:00
|
|
|
he->dest = a->dest;
|
|
|
|
he->nexthop_linkable = !direct;
|
2012-11-10 21:26:13 +08:00
|
|
|
he->igp_metric = rt_get_igp_metric(e);
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
2017-03-08 23:27:18 +08:00
|
|
|
done:
|
2010-07-28 00:20:12 +08:00
|
|
|
/* Add a prefix range to the trie */
|
2015-12-24 22:52:03 +08:00
|
|
|
trie_add_prefix(tab->hostcache->trie, &he_addr, pxlen, he_addr.pxlen);
|
2010-07-28 00:20:12 +08:00
|
|
|
|
2010-12-08 06:33:55 +08:00
|
|
|
rta_free(old_src);
|
|
|
|
return old_src != he->src;
|
2010-07-05 23:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_update_hostcache(rtable *tab)
|
|
|
|
{
|
|
|
|
struct hostcache *hc = tab->hostcache;
|
|
|
|
struct hostentry *he;
|
|
|
|
node *n, *x;
|
|
|
|
|
2010-07-28 00:20:12 +08:00
|
|
|
/* Reset the trie */
|
|
|
|
lp_flush(hc->lp);
|
2020-03-26 10:57:48 +08:00
|
|
|
hc->trie = f_new_trie(hc->lp, 0);
|
2010-07-28 00:20:12 +08:00
|
|
|
|
2010-07-05 23:50:19 +08:00
|
|
|
WALK_LIST_DELSAFE(n, x, hc->hostentries)
|
|
|
|
{
|
|
|
|
he = SKIP_BACK(struct hostentry, ln, n);
|
|
|
|
if (!he->uc)
|
|
|
|
{
|
2021-03-31 00:51:31 +08:00
|
|
|
hc_delete_hostentry(hc, tab->rp, he);
|
2010-07-05 23:50:19 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rt_update_hostentry(tab, he))
|
|
|
|
rt_schedule_nhu(he->tab);
|
|
|
|
}
|
|
|
|
|
|
|
|
tab->hcu_scheduled = 0;
|
|
|
|
}
|
|
|
|
|
2017-03-22 22:00:07 +08:00
|
|
|
struct hostentry *
|
2012-08-14 22:25:22 +08:00
|
|
|
rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
|
2010-07-05 23:50:19 +08:00
|
|
|
{
|
|
|
|
struct hostentry *he;
|
|
|
|
|
|
|
|
if (!tab->hostcache)
|
|
|
|
rt_init_hostcache(tab);
|
|
|
|
|
2015-12-24 22:52:03 +08:00
|
|
|
u32 k = hc_hash(a, dep);
|
2010-07-26 22:39:27 +08:00
|
|
|
struct hostcache *hc = tab->hostcache;
|
|
|
|
for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
|
|
|
|
if (ipa_equal(he->addr, a) && (he->tab == dep))
|
|
|
|
return he;
|
2010-07-05 23:50:19 +08:00
|
|
|
|
2021-03-31 00:51:31 +08:00
|
|
|
he = hc_new_hostentry(hc, tab->rp, a, ipa_zero(ll) ? a : ll, dep, k);
|
2010-07-26 22:39:27 +08:00
|
|
|
rt_update_hostentry(tab, he);
|
2010-07-05 23:50:19 +08:00
|
|
|
return he;
|
|
|
|
}
|
|
|
|
|
2012-08-14 22:25:22 +08:00
|
|
|
|
2000-06-02 20:29:55 +08:00
|
|
|
/*
|
|
|
|
* Documentation for functions declared inline in route.h
|
|
|
|
*/
|
|
|
|
#if 0
|
|
|
|
|
|
|
|
/**
|
|
|
|
* net_find - find a network entry
|
|
|
|
* @tab: a routing table
|
|
|
|
* @addr: address of the network
|
|
|
|
*
|
|
|
|
* net_find() looks up the given network in routing table @tab and
|
|
|
|
* returns a pointer to its &net entry or %NULL if no such network
|
|
|
|
* exists.
|
|
|
|
*/
|
2015-11-05 19:48:52 +08:00
|
|
|
static inline net *net_find(rtable *tab, net_addr *addr)
|
2000-06-02 20:29:55 +08:00
|
|
|
{ DUMMY; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* net_get - obtain a network entry
|
|
|
|
* @tab: a routing table
|
|
|
|
* @addr: address of the network
|
|
|
|
*
|
|
|
|
* net_get() looks up the given network in routing table @tab and
|
|
|
|
* returns a pointer to its &net entry. If no such entry exists, it's
|
|
|
|
* created.
|
|
|
|
*/
|
2015-11-05 19:48:52 +08:00
|
|
|
static inline net *net_get(rtable *tab, net_addr *addr)
|
2000-06-02 20:29:55 +08:00
|
|
|
{ DUMMY; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rte_cow - copy a route for writing
|
|
|
|
* @r: a route entry to be copied
|
|
|
|
*
|
|
|
|
* rte_cow() takes a &rte and prepares it for modification. The exact action
|
|
|
|
* taken depends on the flags of the &rte -- if it's a temporary entry, it's
|
|
|
|
* just returned unchanged, else a new temporary entry with the same contents
|
|
|
|
* is created.
|
|
|
|
*
|
|
|
|
* The primary use of this function is inside the filter machinery -- when
|
|
|
|
* a filter wants to modify &rte contents (to change the preference or to
|
|
|
|
* attach another set of attributes), it must ensure that the &rte is not
|
|
|
|
* shared with anyone else (and especially that it isn't stored in any routing
|
|
|
|
* table).
|
|
|
|
*
|
2000-06-07 20:29:08 +08:00
|
|
|
* Result: a pointer to the new writable &rte.
|
2000-06-02 20:29:55 +08:00
|
|
|
*/
|
|
|
|
static inline rte * rte_cow(rte *r)
|
|
|
|
{ DUMMY; }
|
|
|
|
|
|
|
|
#endif
|