Finishes add-path.
Fixes some bugs and uses generic hash implementation.
This commit is contained in:
parent
283c7dfada
commit
e7d2ac4401
10 changed files with 169 additions and 177 deletions
|
@ -6,6 +6,9 @@
|
||||||
* Can be freely distributed and used under the terms of the GNU GPL.
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef _BIRD_BITOPTS_H_
|
||||||
|
#define _BIRD_BITOPTS_H_
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit mask operations:
|
* Bit mask operations:
|
||||||
*
|
*
|
||||||
|
@ -19,3 +22,8 @@ u32 u32_mkmask(unsigned n);
|
||||||
int u32_masklen(u32 x);
|
int u32_masklen(u32 x);
|
||||||
|
|
||||||
u32 u32_log2(u32 v);
|
u32 u32_log2(u32 v);
|
||||||
|
|
||||||
|
static inline u32 u32_hash(u32 v) { return v * 2902958171u; }
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
119
lib/hash.h
119
lib/hash.h
|
@ -3,7 +3,9 @@
|
||||||
#define HASH(type) struct { type **data; uint count, order; }
|
#define HASH(type) struct { type **data; uint count, order; }
|
||||||
#define HASH_TYPE(v) typeof(** (v).data)
|
#define HASH_TYPE(v) typeof(** (v).data)
|
||||||
#define HASH_SIZE(v) (1 << (v).order)
|
#define HASH_SIZE(v) (1 << (v).order)
|
||||||
#define HASH_MASK(v) ((1 << (v).order)-1)
|
|
||||||
|
#define HASH_EQ(v,id,k1,k2...) (id##_EQ(k1, k2))
|
||||||
|
#define HASH_FN(v,id,key...) ((u32) (id##_FN(key)) >> (32 - (v).order))
|
||||||
|
|
||||||
|
|
||||||
#define HASH_INIT(v,pool,init_order) \
|
#define HASH_INIT(v,pool,init_order) \
|
||||||
|
@ -15,16 +17,16 @@
|
||||||
|
|
||||||
#define HASH_FIND(v,id,key...) \
|
#define HASH_FIND(v,id,key...) \
|
||||||
({ \
|
({ \
|
||||||
uint _h = id##_FN((key)) & HASH_MASK(v); \
|
u32 _h = HASH_FN(v, id, key); \
|
||||||
HASH_TYPE(v) *_n = (v).data[_h]; \
|
HASH_TYPE(v) *_n = (v).data[_h]; \
|
||||||
while (_n && !id##_EQ(id##_KEY(_n), (key))) \
|
while (_n && !HASH_EQ(v, id, id##_KEY(_n), key)) \
|
||||||
_n = id##_NEXT(_n); \
|
_n = id##_NEXT(_n); \
|
||||||
_n; \
|
_n; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HASH_INSERT(v,id,node) \
|
#define HASH_INSERT(v,id,node) \
|
||||||
({ \
|
({ \
|
||||||
uint _h = id##_FN(id##_KEY((node))) & HASH_MASK(v); \
|
u32 _h = HASH_FN(v, id, id##_KEY((node))); \
|
||||||
HASH_TYPE(v) **_nn = (v).data + _h; \
|
HASH_TYPE(v) **_nn = (v).data + _h; \
|
||||||
id##_NEXT(node) = *_nn; \
|
id##_NEXT(node) = *_nn; \
|
||||||
*_nn = node; \
|
*_nn = node; \
|
||||||
|
@ -33,72 +35,117 @@
|
||||||
|
|
||||||
#define HASH_DO_REMOVE(v,id,_nn) \
|
#define HASH_DO_REMOVE(v,id,_nn) \
|
||||||
({ \
|
({ \
|
||||||
HASH_TYPE(v) *_n = *_nn; \
|
*_nn = id##_NEXT((*_nn)); \
|
||||||
if (_n) \
|
(v).count--; \
|
||||||
{ \
|
|
||||||
*_nn = id##_NEXT(_n); \
|
|
||||||
(v).count--; \
|
|
||||||
} \
|
|
||||||
_n; \
|
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HASH_DELETE(v,id,key...) \
|
#define HASH_DELETE(v,id,key...) \
|
||||||
({ \
|
({ \
|
||||||
uint _h = id##_FN((key)) & HASH_MASK(v); \
|
u32 _h = HASH_FN(v, id, key); \
|
||||||
HASH_TYPE(v) **_nn = (v).data + _h; \
|
HASH_TYPE(v) *_n, **_nn = (v).data + _h; \
|
||||||
\
|
\
|
||||||
while ((*_nn) && !id##_EQ(id##_KEY((*_nn)), (key))) \
|
while ((*_nn) && !HASH_EQ(v, id, id##_KEY((*_nn)), key)) \
|
||||||
_nn = &(id##_NEXT((*_nn))); \
|
_nn = &(id##_NEXT((*_nn))); \
|
||||||
\
|
\
|
||||||
HASH_DO_REMOVE(v,id,_nn); \
|
if (_n = *_nn) \
|
||||||
|
HASH_DO_REMOVE(v,id,_nn); \
|
||||||
|
_n; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HASH_REMOVE(v,id,node) \
|
#define HASH_REMOVE(v,id,node) \
|
||||||
({ \
|
({ \
|
||||||
uint _h = id##_FN(id##_KEY((node))) & HASH_MASK(v); \
|
u32 _h = HASH_FN(v, id, id##_KEY((node))); \
|
||||||
HASH_TYPE(v) **_nn = (v).data + _h; \
|
HASH_TYPE(v) *_n, **_nn = (v).data + _h; \
|
||||||
\
|
\
|
||||||
while ((*_nn) && (*_nn != (node))) \
|
while ((*_nn) && (*_nn != (node))) \
|
||||||
_nn = &(id##_NEXT((*_nn))); \
|
_nn = &(id##_NEXT((*_nn))); \
|
||||||
\
|
\
|
||||||
HASH_DO_REMOVE(v,id,_nn); \
|
if (_n = *_nn) \
|
||||||
|
HASH_DO_REMOVE(v,id,_nn); \
|
||||||
|
_n; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
#define HASH_REHASH(v,id,pool,step) \
|
#define HASH_REHASH(v,id,pool,step) \
|
||||||
({ \
|
({ \
|
||||||
HASH_TYPE(v) *_n, *_n2, **_od; \
|
HASH_TYPE(v) *_n, *_n2, **_od; \
|
||||||
uint _i, _s; \
|
uint _i, _os; \
|
||||||
\
|
\
|
||||||
_s = HASH_SIZE(v); \
|
_os = HASH_SIZE(v); \
|
||||||
_od = (v).data; \
|
_od = (v).data; \
|
||||||
(v).count = 0; \
|
(v).count = 0; \
|
||||||
(v).order += (step); \
|
(v).order += (step); \
|
||||||
(v).data = mb_allocz(pool, HASH_SIZE(v) * sizeof(* (v).data)); \
|
(v).data = mb_allocz(pool, HASH_SIZE(v) * sizeof(* (v).data)); \
|
||||||
\
|
\
|
||||||
for (_i = 0; _i < _s; _i++) \
|
for (_i = 0; _i < _os; _i++) \
|
||||||
for (_n = _od[_i]; _n && (_n2 = id##_NEXT(_n), 1); _n = _n2) \
|
for (_n = _od[_i]; _n && (_n2 = id##_NEXT(_n), 1); _n = _n2) \
|
||||||
HASH_INSERT(v, id, _n); \
|
HASH_INSERT(v, id, _n); \
|
||||||
\
|
\
|
||||||
mb_free(_od); \
|
mb_free(_od); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HASH_DEFINE_REHASH_FN(id, type) \
|
#define REHASH_LO_MARK(a,b,c,d,e,f) a
|
||||||
static void id##_REHASH_FN(void *v, pool *p, int step) \
|
#define REHASH_HI_MARK(a,b,c,d,e,f) b
|
||||||
|
#define REHASH_LO_STEP(a,b,c,d,e,f) c
|
||||||
|
#define REHASH_HI_STEP(a,b,c,d,e,f) d
|
||||||
|
#define REHASH_LO_BOUND(a,b,c,d,e,f) e
|
||||||
|
#define REHASH_HI_BOUND(a,b,c,d,e,f) f
|
||||||
|
|
||||||
|
#define HASH_DEFINE_REHASH_FN(id,type) \
|
||||||
|
static void id##_REHASH(void *v, pool *p, int step) \
|
||||||
{ HASH_REHASH(* (HASH(type) *) v, id, p, step); }
|
{ HASH_REHASH(* (HASH(type) *) v, id, p, step); }
|
||||||
|
|
||||||
#define HASH_TRY_REHASH_UP(v,id,pool) \
|
|
||||||
({ \
|
#define HASH_MAY_STEP_UP(v,id,pool) HASH_MAY_STEP_UP_(v,pool, id##_REHASH, id##_PARAMS)
|
||||||
if (((v).order < id##_REHASH_MAX) && ((v).count > HASH_SIZE(v))) \
|
#define HASH_MAY_STEP_DOWN(v,id,pool) HASH_MAY_STEP_DOWN_(v,pool, id##_REHASH, id##_PARAMS)
|
||||||
id##_REHASH_FN(&v, pool, 1); \
|
#define HASH_MAY_RESIZE_DOWN(v,id,pool) HASH_MAY_RESIZE_DOWN_(v,pool, id##_REHASH, id##_PARAMS)
|
||||||
|
|
||||||
|
#define HASH_MAY_STEP_UP_(v,pool,rehash_fn,args) \
|
||||||
|
({ \
|
||||||
|
if (((v).count > (HASH_SIZE(v) REHASH_HI_MARK(args))) && \
|
||||||
|
((v).order < (REHASH_HI_BOUND(args)))) \
|
||||||
|
rehash_fn(&(v), pool, REHASH_HI_STEP(args)); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define HASH_TRY_REHASH_DOWN(v,id,pool) \
|
#define HASH_MAY_STEP_DOWN_(v,pool,rehash_fn,args) \
|
||||||
({ \
|
({ \
|
||||||
if (((v).order > id##_REHASH_MIN) && ((v).count < HASH_SIZE(v)/2)) \
|
if (((v).count < (HASH_SIZE(v) REHASH_LO_MARK(args))) && \
|
||||||
id##_REHASH_FN(&v, pool, -1); \
|
((v).order > (REHASH_LO_BOUND(args)))) \
|
||||||
|
rehash_fn(&(v), pool, -(REHASH_LO_STEP(args))); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define HASH_MAY_RESIZE_DOWN_(v,pool,rehash_fn,args) \
|
||||||
|
({ \
|
||||||
|
int _o = (v).order; \
|
||||||
|
while (((v).count < ((1 << _o) REHASH_LO_MARK(args))) && \
|
||||||
|
(_o > (REHASH_LO_BOUND(args)))) \
|
||||||
|
_o -= (REHASH_LO_STEP(args)); \
|
||||||
|
if (_o < (v).order) \
|
||||||
|
rehash_fn(&(v), pool, _o - (int) (v).order); \
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
#define HASH_INSERT2(v,id,pool,node) \
|
||||||
|
({ \
|
||||||
|
HASH_INSERT(v, id, node); \
|
||||||
|
HASH_MAY_STEP_UP(v, id, pool); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define HASH_DELETE2(v,id,pool,key...) \
|
||||||
|
({ \
|
||||||
|
HASH_TYPE(v) *_n = HASH_DELETE(v, id, key); \
|
||||||
|
if (_n) HASH_MAY_STEP_DOWN(v, id, pool); \
|
||||||
|
_n; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define HASH_REMOVE2(v,id,pool,node) \
|
||||||
|
({ \
|
||||||
|
HASH_TYPE(v) *_n = HASH_REMOVE(v, id, node); \
|
||||||
|
if (_n) HASH_MAY_STEP_DOWN(v, id, pool); \
|
||||||
|
_n; \
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
#define HASH_WALK(v,next,n) \
|
#define HASH_WALK(v,next,n) \
|
||||||
do { \
|
do { \
|
||||||
HASH_TYPE(v) *n; \
|
HASH_TYPE(v) *n; \
|
||||||
|
@ -121,3 +168,13 @@
|
||||||
#define HASH_WALK_DELSAFE_END } while (0)
|
#define HASH_WALK_DELSAFE_END } while (0)
|
||||||
|
|
||||||
|
|
||||||
|
#define HASH_WALK_FILTER(v,next,n,nn) \
|
||||||
|
do { \
|
||||||
|
HASH_TYPE(v) *n, **nn; \
|
||||||
|
uint _i; \
|
||||||
|
uint _s = HASH_SIZE(v); \
|
||||||
|
for (_i = 0; _i < _s; _i++) \
|
||||||
|
for (nn = (v).data + _i; n = *nn; (*nn == n) ? (nn = &n->next) : NULL)
|
||||||
|
|
||||||
|
#define HASH_WALK_FILTER_END } while (0)
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,7 @@ typedef u32 ip_addr;
|
||||||
#define ipa_mkmask(x) _MI(u32_mkmask(x))
|
#define ipa_mkmask(x) _MI(u32_mkmask(x))
|
||||||
#define ipa_mklen(x) u32_masklen(_I(x))
|
#define ipa_mklen(x) u32_masklen(_I(x))
|
||||||
#define ipa_hash(x) ipv4_hash(_I(x))
|
#define ipa_hash(x) ipv4_hash(_I(x))
|
||||||
|
#define ipa_hash32(x) ipv4_hash32(_I(x))
|
||||||
#define ipa_hton(x) x = _MI(htonl(_I(x)))
|
#define ipa_hton(x) x = _MI(htonl(_I(x)))
|
||||||
#define ipa_ntoh(x) x = _MI(ntohl(_I(x)))
|
#define ipa_ntoh(x) x = _MI(ntohl(_I(x)))
|
||||||
#define ipa_classify(x) ipv4_classify(_I(x))
|
#define ipa_classify(x) ipv4_classify(_I(x))
|
||||||
|
@ -86,6 +87,14 @@ static inline unsigned ipv4_hash(u32 a)
|
||||||
return a & 0xffff;
|
return a & 0xffff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32 ipv4_hash32(u32 a)
|
||||||
|
{
|
||||||
|
/* Returns a 32-bit value, although low-order bits are not mixed */
|
||||||
|
a ^= a << 16;
|
||||||
|
a ^= a << 12;
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int ipv4_compare(u32 x, u32 y)
|
static inline int ipv4_compare(u32 x, u32 y)
|
||||||
{
|
{
|
||||||
return (x > y) - (x < y);
|
return (x > y) - (x < y);
|
||||||
|
|
|
@ -58,6 +58,7 @@ typedef struct ipv6_addr {
|
||||||
#define ipa_mkmask(x) ipv6_mkmask(x)
|
#define ipa_mkmask(x) ipv6_mkmask(x)
|
||||||
#define ipa_mklen(x) ipv6_mklen(&(x))
|
#define ipa_mklen(x) ipv6_mklen(&(x))
|
||||||
#define ipa_hash(x) ipv6_hash(&(x))
|
#define ipa_hash(x) ipv6_hash(&(x))
|
||||||
|
#define ipa_hash32(x) ipv6_hash32(&(x))
|
||||||
#define ipa_hton(x) ipv6_hton(&(x))
|
#define ipa_hton(x) ipv6_hton(&(x))
|
||||||
#define ipa_ntoh(x) ipv6_ntoh(&(x))
|
#define ipa_ntoh(x) ipv6_ntoh(&(x))
|
||||||
#define ipa_classify(x) ipv6_classify(&(x))
|
#define ipa_classify(x) ipv6_classify(&(x))
|
||||||
|
@ -104,6 +105,13 @@ static inline unsigned ipv6_hash(ip_addr *a)
|
||||||
return (x ^ (x >> 16) ^ (x >> 8)) & 0xffff;
|
return (x ^ (x >> 16) ^ (x >> 8)) & 0xffff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32 ipv6_hash32(ip_addr *a)
|
||||||
|
{
|
||||||
|
/* Returns a 32-bit hash key, although low-order bits are not ixed */
|
||||||
|
u32 x = _I0(*a) ^ _I1(*a) ^ _I2(*a) ^ _I3(*a);
|
||||||
|
return x ^ (x << 16) ^ (x << 24);
|
||||||
|
}
|
||||||
|
|
||||||
static inline u32 ipv6_getbit(ip_addr a, u32 y)
|
static inline u32 ipv6_getbit(ip_addr a, u32 y)
|
||||||
{
|
{
|
||||||
return a.addr[y / 32] & (0x80000000 >> (y % 32));
|
return a.addr[y / 32] & (0x80000000 >> (y % 32));
|
||||||
|
|
111
nest/rt-attr.c
111
nest/rt-attr.c
|
@ -51,6 +51,7 @@
|
||||||
#include "nest/cli.h"
|
#include "nest/cli.h"
|
||||||
#include "nest/attrs.h"
|
#include "nest/attrs.h"
|
||||||
#include "lib/alloca.h"
|
#include "lib/alloca.h"
|
||||||
|
#include "lib/hash.h"
|
||||||
#include "lib/resource.h"
|
#include "lib/resource.h"
|
||||||
#include "lib/string.h"
|
#include "lib/string.h"
|
||||||
|
|
||||||
|
@ -63,14 +64,20 @@ static slab *rte_src_slab;
|
||||||
/* rte source ID bitmap */
|
/* rte source ID bitmap */
|
||||||
static u32 *src_ids;
|
static u32 *src_ids;
|
||||||
static u32 src_id_size, src_id_used, src_id_pos;
|
static u32 src_id_size, src_id_used, src_id_pos;
|
||||||
#define SRC_ID_SIZE_DEF 4
|
#define SRC_ID_INIT_SIZE 4
|
||||||
|
|
||||||
/* rte source hash */
|
/* rte source hash */
|
||||||
static struct rte_src **src_table;
|
|
||||||
static u32 src_hash_order, src_hash_size, src_hash_count;
|
#define RSH_KEY(n) n->proto, n->private_id
|
||||||
#define SRC_HASH_ORDER_DEF 6
|
#define RSH_NEXT(n) n->next
|
||||||
#define SRC_HASH_ORDER_MAX 18
|
#define RSH_EQ(p1,n1,p2,n2) p1 == p2 && n1 == n2
|
||||||
#define SRC_HASH_ORDER_MIN 10
|
#define RSH_FN(p,n) p->hash_key ^ u32_hash(n)
|
||||||
|
|
||||||
|
#define RSH_REHASH rte_src_rehash
|
||||||
|
#define RSH_PARAMS /2, *2, 1, 1, 8, 20
|
||||||
|
#define RSH_INIT_ORDER 6
|
||||||
|
|
||||||
|
static HASH(struct rte_src) src_hash;
|
||||||
|
|
||||||
struct protocol *attr_class_to_protocol[EAP_MAX];
|
struct protocol *attr_class_to_protocol[EAP_MAX];
|
||||||
|
|
||||||
|
@ -81,17 +88,14 @@ rte_src_init(void)
|
||||||
rte_src_slab = sl_new(rta_pool, sizeof(struct rte_src));
|
rte_src_slab = sl_new(rta_pool, sizeof(struct rte_src));
|
||||||
|
|
||||||
src_id_pos = 0;
|
src_id_pos = 0;
|
||||||
src_id_size = SRC_ID_SIZE_DEF;
|
src_id_size = SRC_ID_INIT_SIZE;
|
||||||
src_ids = mb_allocz(rta_pool, src_id_size * sizeof(u32));
|
src_ids = mb_allocz(rta_pool, src_id_size * sizeof(u32));
|
||||||
|
|
||||||
/* ID 0 is reserved */
|
/* ID 0 is reserved */
|
||||||
src_ids[0] = 1;
|
src_ids[0] = 1;
|
||||||
src_id_used = 1;
|
src_id_used = 1;
|
||||||
|
|
||||||
src_hash_count = 0;
|
HASH_INIT(src_hash, rta_pool, RSH_INIT_ORDER);
|
||||||
src_hash_order = SRC_HASH_ORDER_DEF;
|
|
||||||
src_hash_size = 1 << src_hash_order;
|
|
||||||
src_table = mb_allocz(rta_pool, src_hash_size * sizeof(struct rte_src *));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int u32_cto(unsigned int x) { return ffs(~x) - 1; }
|
static inline int u32_cto(unsigned int x) { return ffs(~x) - 1; }
|
||||||
|
@ -141,56 +145,22 @@ rte_src_free_id(u32 id)
|
||||||
src_id_used--;
|
src_id_used--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 rte_src_hash(struct proto *p, u32 x, u32 order)
|
|
||||||
{ return (x * 2902958171u) >> (32 - order); }
|
|
||||||
|
|
||||||
static void
|
HASH_DEFINE_REHASH_FN(RSH, struct rte_src)
|
||||||
rte_src_rehash(int step)
|
|
||||||
{
|
|
||||||
struct rte_src **old_tab, *src, *src_next;
|
|
||||||
u32 old_size, hash, i;
|
|
||||||
|
|
||||||
old_tab = src_table;
|
|
||||||
old_size = src_hash_size;
|
|
||||||
|
|
||||||
src_hash_order += step;
|
|
||||||
src_hash_size = 1 << src_hash_order;
|
|
||||||
src_table = mb_allocz(rta_pool, src_hash_size * sizeof(struct rte_src *));
|
|
||||||
|
|
||||||
for (i = 0; i < old_size; i++)
|
|
||||||
for (src = old_tab[i]; src; src = src_next)
|
|
||||||
{
|
|
||||||
src_next = src->next;
|
|
||||||
hash = rte_src_hash(src->proto, src->private_id, src_hash_order);
|
|
||||||
src->next = src_table[hash];
|
|
||||||
src_table[hash] = src;
|
|
||||||
}
|
|
||||||
|
|
||||||
mb_free(old_tab);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct rte_src *
|
struct rte_src *
|
||||||
rt_find_source(struct proto *p, u32 id)
|
rt_find_source(struct proto *p, u32 id)
|
||||||
{
|
{
|
||||||
struct rte_src *src;
|
return HASH_FIND(src_hash, RSH, p, id);
|
||||||
u32 hash = rte_src_hash(p, id, src_hash_order);
|
|
||||||
|
|
||||||
for (src = src_table[hash]; src; src = src->next)
|
|
||||||
if ((src->proto == p) && (src->private_id == id))
|
|
||||||
return src;
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct rte_src *
|
struct rte_src *
|
||||||
rt_get_source(struct proto *p, u32 id)
|
rt_get_source(struct proto *p, u32 id)
|
||||||
{
|
{
|
||||||
struct rte_src *src;
|
struct rte_src *src = rt_find_source(p, id);
|
||||||
u32 hash = rte_src_hash(p, id, src_hash_order);
|
|
||||||
|
|
||||||
for (src = src_table[hash]; src; src = src->next)
|
if (src)
|
||||||
if ((src->proto == p) && (src->private_id == id))
|
return src;
|
||||||
return src;
|
|
||||||
|
|
||||||
src = sl_alloc(rte_src_slab);
|
src = sl_alloc(rte_src_slab);
|
||||||
src->proto = p;
|
src->proto = p;
|
||||||
|
@ -198,47 +168,26 @@ rt_get_source(struct proto *p, u32 id)
|
||||||
src->global_id = rte_src_alloc_id();
|
src->global_id = rte_src_alloc_id();
|
||||||
src->uc = 0;
|
src->uc = 0;
|
||||||
|
|
||||||
src->next = src_table[hash];
|
HASH_INSERT2(src_hash, RSH, rta_pool, src);
|
||||||
src_table[hash] = src;
|
|
||||||
|
|
||||||
src_hash_count++;
|
|
||||||
if ((src_hash_count > src_hash_size) && (src_hash_order < SRC_HASH_ORDER_MAX))
|
|
||||||
rte_src_rehash(1);
|
|
||||||
|
|
||||||
return src;
|
return src;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
rt_remove_source(struct rte_src **sp)
|
|
||||||
{
|
|
||||||
struct rte_src *src = *sp;
|
|
||||||
|
|
||||||
*sp = src->next;
|
|
||||||
rte_src_free_id(src->global_id);
|
|
||||||
sl_free(rte_src_slab, src);
|
|
||||||
src_hash_count--;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
rt_prune_sources(void)
|
rt_prune_sources(void)
|
||||||
{
|
{
|
||||||
struct rte_src **sp;
|
HASH_WALK_FILTER(src_hash, next, src, sp)
|
||||||
int i;
|
{
|
||||||
|
if (src->uc == 0)
|
||||||
for (i = 0; i < src_hash_size; i++)
|
|
||||||
{
|
{
|
||||||
sp = &src_table[i];
|
HASH_DO_REMOVE(src_hash, RSH, sp);
|
||||||
while (*sp)
|
rte_src_free_id(src->global_id);
|
||||||
{
|
sl_free(rte_src_slab, src);
|
||||||
if ((*sp)->uc == 0)
|
|
||||||
rt_remove_source(sp);
|
|
||||||
else
|
|
||||||
sp = &(*sp)->next;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
HASH_WALK_FILTER_END;
|
||||||
|
|
||||||
while ((src_hash_count < (src_hash_size / 4)) && (src_hash_order > SRC_HASH_ORDER_MIN))
|
HASH_MAY_RESIZE_DOWN(src_hash, RSH, rta_pool);
|
||||||
rte_src_rehash(-1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -106,13 +106,13 @@
|
||||||
|
|
||||||
#define HASH_ID_KEY(n) n->loc_id
|
#define HASH_ID_KEY(n) n->loc_id
|
||||||
#define HASH_ID_NEXT(n) n->next_id
|
#define HASH_ID_NEXT(n) n->next_id
|
||||||
#define HASH_ID_EQ(a,b) (a == b)
|
#define HASH_ID_EQ(a,b) a == b
|
||||||
#define HASH_ID_FN(k) (k)
|
#define HASH_ID_FN(k) k
|
||||||
|
|
||||||
#define HASH_IP_KEY(n) n->addr
|
#define HASH_IP_KEY(n) n->addr
|
||||||
#define HASH_IP_NEXT(n) n->next_ip
|
#define HASH_IP_NEXT(n) n->next_ip
|
||||||
#define HASH_IP_EQ(a,b) ipa_equal(a,b)
|
#define HASH_IP_EQ(a,b) ipa_equal(a,b)
|
||||||
#define HASH_IP_FN(k) ipa_hash(k)
|
#define HASH_IP_FN(k) ipa_hash32(k)
|
||||||
|
|
||||||
static list bfd_proto_list;
|
static list bfd_proto_list;
|
||||||
static list bfd_wait_list;
|
static list bfd_wait_list;
|
||||||
|
|
|
@ -58,6 +58,7 @@
|
||||||
* bgp_reconstruct_4b_attrs()).
|
* bgp_reconstruct_4b_attrs()).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
static byte bgp_mandatory_attrs[] = { BA_ORIGIN, BA_AS_PATH
|
static byte bgp_mandatory_attrs[] = { BA_ORIGIN, BA_AS_PATH
|
||||||
#ifndef IPV6
|
#ifndef IPV6
|
||||||
,BA_NEXT_HOP
|
,BA_NEXT_HOP
|
||||||
|
@ -875,70 +876,40 @@ bgp_free_bucket(struct bgp_proto *p, struct bgp_bucket *buck)
|
||||||
|
|
||||||
/* Prefix hash table */
|
/* Prefix hash table */
|
||||||
|
|
||||||
static inline u32 prefix_hash(ip_addr prefix, int pxlen, u32 path_id, u32 order)
|
#define PXH_KEY(n1) n1->n.prefix, n1->n.pxlen, n1->path_id
|
||||||
{
|
#define PXH_NEXT(n) n->next
|
||||||
u32 x = ipa_hash(prefix) + pxlen + path_id;
|
#define PXH_EQ(p1,l1,i1,p2,l2,i2) ipa_equal(p1, p2) && l1 == l2 && i1 == i2
|
||||||
return (x * 2902958171u) >> (32 - order);
|
#define PXH_FN(p,l,i) ipa_hash32(p) ^ u32_hash((l << 16) ^ i)
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 px_hash_size(struct bgp_proto *p)
|
#define PXH_REHASH bgp_pxh_rehash
|
||||||
{ return 1 << p->px_hash_order; }
|
#define PXH_PARAMS /8, *2, 2, 2, 8, 20
|
||||||
|
|
||||||
|
|
||||||
|
HASH_DEFINE_REHASH_FN(PXH, struct bgp_prefix)
|
||||||
|
|
||||||
void
|
void
|
||||||
bgp_init_prefix_table(struct bgp_proto *p, u32 order)
|
bgp_init_prefix_table(struct bgp_proto *p, u32 order)
|
||||||
{
|
{
|
||||||
p->px_hash_count = 0;
|
HASH_INIT(p->prefix_hash, p->p.pool, order);
|
||||||
p->px_hash_order = order;
|
|
||||||
p->prefix_table = mb_allocz(p->p.pool, px_hash_size(p) * sizeof(struct bgp_prefix *));
|
|
||||||
p->prefix_slab = sl_new(p->p.pool, sizeof(struct bgp_prefix));
|
p->prefix_slab = sl_new(p->p.pool, sizeof(struct bgp_prefix));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
bgp_rehash_prefix_table(struct bgp_proto *p, int step)
|
|
||||||
{
|
|
||||||
struct bgp_prefix **old_tab, *px, *px_next;
|
|
||||||
u32 old_size, hash, i;
|
|
||||||
|
|
||||||
old_tab = p->prefix_table;
|
|
||||||
old_size = px_hash_size(p);
|
|
||||||
|
|
||||||
p->px_hash_order += step;
|
|
||||||
p->prefix_table = mb_allocz(p->p.pool, px_hash_size(p) * sizeof(struct bgp_prefix *));
|
|
||||||
|
|
||||||
for (i = 0; i < old_size; i++)
|
|
||||||
for (px = old_tab[i]; px; px = px_next)
|
|
||||||
{
|
|
||||||
px_next = px->next;
|
|
||||||
hash = prefix_hash(px->n.prefix, px->n.pxlen, px->path_id, p->px_hash_order);
|
|
||||||
px->next = p->prefix_table[hash];
|
|
||||||
p->prefix_table[hash] = px;
|
|
||||||
}
|
|
||||||
|
|
||||||
mb_free(old_tab);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct bgp_prefix *
|
static struct bgp_prefix *
|
||||||
bgp_get_prefix(struct bgp_proto *p, ip_addr prefix, int pxlen, u32 path_id)
|
bgp_get_prefix(struct bgp_proto *p, ip_addr prefix, int pxlen, u32 path_id)
|
||||||
{
|
{
|
||||||
struct bgp_prefix *bp;
|
struct bgp_prefix *bp = HASH_FIND(p->prefix_hash, PXH, prefix, pxlen, path_id);
|
||||||
u32 hash = prefix_hash(prefix, pxlen, path_id, p->px_hash_order);
|
|
||||||
|
|
||||||
for (bp = p->prefix_table[hash]; bp; bp = bp->next)
|
if (bp)
|
||||||
if (bp->n.pxlen == pxlen && ipa_equal(bp->n.prefix, prefix) && bp->path_id == path_id)
|
return bp;
|
||||||
return bp;
|
|
||||||
|
|
||||||
bp = sl_alloc(p->prefix_slab);
|
bp = sl_alloc(p->prefix_slab);
|
||||||
bp->n.prefix = prefix;
|
bp->n.prefix = prefix;
|
||||||
bp->n.pxlen = pxlen;
|
bp->n.pxlen = pxlen;
|
||||||
bp->path_id = path_id;
|
bp->path_id = path_id;
|
||||||
bp->next = p->prefix_table[hash];
|
|
||||||
p->prefix_table[hash] = bp;
|
|
||||||
|
|
||||||
bp->bucket_node.next = NULL;
|
bp->bucket_node.next = NULL;
|
||||||
|
|
||||||
p->px_hash_count++;
|
HASH_INSERT2(p->prefix_hash, PXH, p->p.pool, bp);
|
||||||
if ((p->px_hash_count > px_hash_size(p)) && (p->px_hash_order < 18))
|
|
||||||
bgp_rehash_prefix_table(p, 1);
|
|
||||||
|
|
||||||
return bp;
|
return bp;
|
||||||
}
|
}
|
||||||
|
@ -946,19 +917,8 @@ bgp_get_prefix(struct bgp_proto *p, ip_addr prefix, int pxlen, u32 path_id)
|
||||||
void
|
void
|
||||||
bgp_free_prefix(struct bgp_proto *p, struct bgp_prefix *bp)
|
bgp_free_prefix(struct bgp_proto *p, struct bgp_prefix *bp)
|
||||||
{
|
{
|
||||||
struct bgp_prefix **bpp;
|
HASH_REMOVE2(p->prefix_hash, PXH, p->p.pool, bp);
|
||||||
u32 hash = prefix_hash(bp->n.prefix, bp->n.pxlen, bp->path_id, p->px_hash_order);
|
|
||||||
|
|
||||||
for (bpp = &p->prefix_table[hash]; *bpp; *bpp = (*bpp)->next)
|
|
||||||
if (*bpp == bp)
|
|
||||||
break;
|
|
||||||
|
|
||||||
*bpp = bp->next;
|
|
||||||
sl_free(p->prefix_slab, bp);
|
sl_free(p->prefix_slab, bp);
|
||||||
|
|
||||||
p->px_hash_count--;
|
|
||||||
if ((p->px_hash_count < (px_hash_size(p) / 4)) && (p->px_hash_order > 10))
|
|
||||||
bgp_rehash_prefix_table(p, -1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -68,6 +68,7 @@
|
||||||
|
|
||||||
#include "bgp.h"
|
#include "bgp.h"
|
||||||
|
|
||||||
|
|
||||||
struct linpool *bgp_linpool; /* Global temporary pool */
|
struct linpool *bgp_linpool; /* Global temporary pool */
|
||||||
static sock *bgp_listen_sk; /* Global listening socket */
|
static sock *bgp_listen_sk; /* Global listening socket */
|
||||||
static int bgp_counter; /* Number of protocol instances using the listening socket */
|
static int bgp_counter; /* Number of protocol instances using the listening socket */
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "nest/route.h"
|
#include "nest/route.h"
|
||||||
#include "nest/bfd.h"
|
#include "nest/bfd.h"
|
||||||
|
#include "lib/hash.h"
|
||||||
|
|
||||||
struct linpool;
|
struct linpool;
|
||||||
struct eattr;
|
struct eattr;
|
||||||
|
@ -118,10 +119,8 @@ struct bgp_proto {
|
||||||
struct timer *startup_timer; /* Timer used to delay protocol startup due to previous errors (startup_delay) */
|
struct timer *startup_timer; /* Timer used to delay protocol startup due to previous errors (startup_delay) */
|
||||||
struct bgp_bucket **bucket_hash; /* Hash table of attribute buckets */
|
struct bgp_bucket **bucket_hash; /* Hash table of attribute buckets */
|
||||||
unsigned int hash_size, hash_count, hash_limit;
|
unsigned int hash_size, hash_count, hash_limit;
|
||||||
// struct fib prefix_fib; /* Prefixes to be sent */
|
HASH(struct bgp_prefix) prefix_hash; /* Prefixes to be sent */
|
||||||
struct bgp_prefix **prefix_table; /* Prefixes to be sent */
|
|
||||||
slab *prefix_slab; /* Slab holding prefix nodes */
|
slab *prefix_slab; /* Slab holding prefix nodes */
|
||||||
u32 px_hash_order, px_hash_count;
|
|
||||||
list bucket_queue; /* Queue of buckets to send */
|
list bucket_queue; /* Queue of buckets to send */
|
||||||
struct bgp_bucket *withdraw_bucket; /* Withdrawn routes */
|
struct bgp_bucket *withdraw_bucket; /* Withdrawn routes */
|
||||||
unsigned startup_delay; /* Time to delay protocol startup by due to errors */
|
unsigned startup_delay; /* Time to delay protocol startup by due to errors */
|
||||||
|
|
|
@ -244,7 +244,7 @@ bgp_encode_prefixes(struct bgp_proto *p, byte *w, struct bgp_bucket *buck, unsig
|
||||||
ip_addr a;
|
ip_addr a;
|
||||||
int bytes;
|
int bytes;
|
||||||
|
|
||||||
while (!EMPTY_LIST(buck->prefixes) && remains >= (1+sizeof(ip_addr)))
|
while (!EMPTY_LIST(buck->prefixes) && (remains >= (5+sizeof(ip_addr))))
|
||||||
{
|
{
|
||||||
struct bgp_prefix *px = SKIP_BACK(struct bgp_prefix, bucket_node, HEAD(buck->prefixes));
|
struct bgp_prefix *px = SKIP_BACK(struct bgp_prefix, bucket_node, HEAD(buck->prefixes));
|
||||||
DBG("\tDequeued route %I/%d\n", px->n.prefix, px->n.pxlen);
|
DBG("\tDequeued route %I/%d\n", px->n.prefix, px->n.pxlen);
|
||||||
|
@ -253,6 +253,7 @@ bgp_encode_prefixes(struct bgp_proto *p, byte *w, struct bgp_bucket *buck, unsig
|
||||||
{
|
{
|
||||||
put_u32(w, px->path_id);
|
put_u32(w, px->path_id);
|
||||||
w += 4;
|
w += 4;
|
||||||
|
remains -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
*w++ = px->n.pxlen;
|
*w++ = px->n.pxlen;
|
||||||
|
|
Loading…
Reference in a new issue