Workaround for stupid callback scheduler.

There is no reak callback scheduler and previous behavior causes
bad things during hard congestion (like BGP hold timeouts).

Smart callback scheduler is still missing, but main loop was
changed such that it first processes all tx callbacks (which
are fast enough) (but max 4* per socket) + rx callbacks for CLI,
and in the second phase it processes one rx callback per
socket up to four sockets (as rx callback can be slow when
there are too many protocols, because route redistribution
is done synchronously inside rx callback). If there is event
callback ready, second phase is skipped in 90% of iterations
(to speed up CLI during congestion).
This commit is contained in:
Ondrej Zajicek 2009-10-11 18:56:16 +02:00
parent 7ea5b00f42
commit ea89da381f
3 changed files with 82 additions and 7 deletions

View file

@ -51,6 +51,7 @@ void sk_reallocate(sock *); /* Free and allocate tbuf & rbuf */
void sk_dump_all(void); void sk_dump_all(void);
int sk_set_ttl(sock *s, int ttl); /* Set TTL for given socket */ int sk_set_ttl(sock *s, int ttl); /* Set TTL for given socket */
int sk_set_md5_auth(sock *s, ip_addr a, char *passwd); /* Add or remove security associations for given passive socket */ int sk_set_md5_auth(sock *s, ip_addr a, char *passwd); /* Add or remove security associations for given passive socket */
int sk_rx_ready(sock *s);
static inline int static inline int
sk_send_buffer_empty(sock *sk) sk_send_buffer_empty(sock *sk)

View file

@ -431,8 +431,15 @@ bgp_hold_timeout(timer *t)
{ {
struct bgp_conn *conn = t->data; struct bgp_conn *conn = t->data;
DBG("BGP: Hold timeout, closing connection\n"); DBG("BGP: Hold timeout\n");
bgp_error(conn, 4, 0, NULL, 0);
/* If there is something in input queue, we are probably congested
and perhaps just not processed BGP packets in time. */
if (sk_rx_ready(conn->sk) > 0)
bgp_start_timer(conn->hold_timer, 10);
else
bgp_error(conn, 4, 0, NULL, 0);
} }
static void static void

View file

@ -30,12 +30,17 @@
#include "lib/unix.h" #include "lib/unix.h"
#include "lib/sysio.h" #include "lib/sysio.h"
/* Maximum number of calls of rx/tx handler for one socket in one /* Maximum number of calls of tx handler for one socket in one
* select iteration. Should be small enough to not monopolize CPU by * select iteration. Should be small enough to not monopolize CPU by
* one protocol instance. * one protocol instance.
*/ */
#define MAX_STEPS 4 #define MAX_STEPS 4
/* Maximum number of calls of rx handler for all sockets in one select
iteration. RX callbacks are often much more costly so we limit
this to gen small latencies */
#define MAX_RX_STEPS 4
/* /*
* Tracked Files * Tracked Files
*/ */
@ -493,6 +498,7 @@ tm_format_reltime(char *x, bird_clock_t t)
static list sock_list; static list sock_list;
static struct birdsock *current_sock; static struct birdsock *current_sock;
static struct birdsock *stored_sock;
static int sock_recalc_fdsets_p; static int sock_recalc_fdsets_p;
static inline sock * static inline sock *
@ -541,6 +547,8 @@ sk_free(resource *r)
close(s->fd); close(s->fd);
if (s == current_sock) if (s == current_sock)
current_sock = sk_next(s); current_sock = sk_next(s);
if (s == stored_sock)
stored_sock = sk_next(s);
rem_node(&s->n); rem_node(&s->n);
sock_recalc_fdsets_p = 1; sock_recalc_fdsets_p = 1;
} }
@ -1071,6 +1079,29 @@ sk_maybe_write(sock *s)
} }
} }
int
sk_rx_ready(sock *s)
{
fd_set rd, wr;
struct timeval timo;
int rv;
FD_ZERO(&rd);
FD_ZERO(&wr);
FD_SET(s->fd, &rd);
timo.tv_sec = 0;
timo.tv_usec = 0;
redo:
rv = select(s->fd+1, &rd, &wr, NULL, &timo);
if ((rv < 0) && (errno == EINTR || errno == EAGAIN))
goto redo;
return rv;
}
/** /**
* sk_send - send data to a socket * sk_send - send data to a socket
* @s: socket * @s: socket
@ -1239,6 +1270,9 @@ io_init(void)
srandom((int) now_real); srandom((int) now_real);
} }
static int short_loops = 0;
#define SHORT_LOOP_MAX 10
void void
io_loop(void) io_loop(void)
{ {
@ -1317,8 +1351,8 @@ io_loop(void)
} }
/* And finally enter select() to find active sockets */ /* And finally enter select() to find active sockets */
hi = select(hi+1, &rd, &wr, NULL, &timo); hi = select(hi+1, &rd, &wr, NULL, &timo);
if (hi < 0) if (hi < 0)
{ {
if (errno == EINTR || errno == EAGAIN) if (errno == EINTR || errno == EAGAIN)
@ -1327,13 +1361,17 @@ io_loop(void)
} }
if (hi) if (hi)
{ {
current_sock = SKIP_BACK(sock, n, HEAD(sock_list)); /* guaranteed to be non-empty */ /* guaranteed to be non-empty */
current_sock = SKIP_BACK(sock, n, HEAD(sock_list));
while (current_sock) while (current_sock)
{ {
sock *s = current_sock; sock *s = current_sock;
int e; int e;
int steps = MAX_STEPS; int steps;
if (FD_ISSET(s->fd, &rd) && s->rx_hook)
steps = MAX_STEPS;
if ((s->type >= SK_MAGIC) && FD_ISSET(s->fd, &rd) && s->rx_hook)
do do
{ {
steps--; steps--;
@ -1356,6 +1394,35 @@ io_loop(void)
current_sock = sk_next(s); current_sock = sk_next(s);
next: ; next: ;
} }
short_loops++;
if (events && (short_loops < SHORT_LOOP_MAX))
continue;
short_loops = 0;
int count = 0;
current_sock = stored_sock;
if (current_sock == NULL)
current_sock = SKIP_BACK(sock, n, HEAD(sock_list));
while (current_sock && count < MAX_RX_STEPS)
{
sock *s = current_sock;
int e;
int steps;
if ((s->type < SK_MAGIC) && FD_ISSET(s->fd, &rd) && s->rx_hook)
{
count++;
e = sk_read(s);
if (s != current_sock)
goto next2;
}
current_sock = sk_next(s);
next2: ;
}
stored_sock = current_sock;
} }
} }
} }