Rate-limit scheduling of work-events
In general, events are code handling some some condition, which is scheduled when such condition happened and executed independently from I/O loop. Work-events are a subgroup of events that are scheduled repeatedly until some (often significant) work is done (e.g. feeding routes to protocol). All scheduled events are executed during each I/O loop iteration. Separate work-events from regular events to a separate queue and rate limit their execution to a fixed number per I/O loop iteration. That should prevent excess latency when many work-events are scheduled at one time (e.g. simultaneous reload of many BGP sessions).
This commit is contained in:
parent
9cf3d53311
commit
7be3af7fa6
4 changed files with 66 additions and 6 deletions
56
lib/event.c
56
lib/event.c
|
@ -23,6 +23,7 @@
|
|||
#include "lib/event.h"
|
||||
|
||||
event_list global_event_list;
|
||||
event_list global_work_list;
|
||||
|
||||
inline void
|
||||
ev_postpone(event *e)
|
||||
|
@ -114,6 +115,22 @@ ev_schedule(event *e)
|
|||
ev_enqueue(&global_event_list, e);
|
||||
}
|
||||
|
||||
/**
|
||||
* ev_schedule_work - schedule a work-event.
|
||||
* @e: an event
|
||||
*
|
||||
* This function schedules an event by enqueueing it to a system-wide work-event
|
||||
* list which is run by the platform dependent code whenever appropriate. This
|
||||
* is designated for work-events instead of regular events. They are executed
|
||||
* less often in order to not clog I/O loop.
|
||||
*/
|
||||
void
|
||||
ev_schedule_work(event *e)
|
||||
{
|
||||
if (!ev_active(e))
|
||||
add_tail(&global_work_list, &e->n);
|
||||
}
|
||||
|
||||
void io_log_event(void *hook, void *data);
|
||||
|
||||
/**
|
||||
|
@ -136,10 +153,47 @@ ev_run_list(event_list *l)
|
|||
event *e = SKIP_BACK(event, n, n);
|
||||
|
||||
/* This is ugly hack, we want to log just events executed from the main I/O loop */
|
||||
if (l == &global_event_list)
|
||||
if ((l == &global_event_list) || (l == &global_work_list))
|
||||
io_log_event(e->hook, e->data);
|
||||
|
||||
ev_run(e);
|
||||
}
|
||||
|
||||
return !EMPTY_LIST(*l);
|
||||
}
|
||||
|
||||
int
|
||||
ev_run_list_limited(event_list *l, uint limit)
|
||||
{
|
||||
node *n;
|
||||
list tmp_list;
|
||||
|
||||
init_list(&tmp_list);
|
||||
add_tail_list(&tmp_list, l);
|
||||
init_list(l);
|
||||
|
||||
WALK_LIST_FIRST(n, tmp_list)
|
||||
{
|
||||
event *e = SKIP_BACK(event, n, n);
|
||||
|
||||
if (!limit)
|
||||
break;
|
||||
|
||||
/* This is ugly hack, we want to log just events executed from the main I/O loop */
|
||||
if ((l == &global_event_list) || (l == &global_work_list))
|
||||
io_log_event(e->hook, e->data);
|
||||
|
||||
ev_run(e);
|
||||
limit--;
|
||||
}
|
||||
|
||||
if (!EMPTY_LIST(tmp_list))
|
||||
{
|
||||
/* Attach new items after the unprocessed old items */
|
||||
add_tail_list(&tmp_list, l);
|
||||
init_list(l);
|
||||
add_tail_list(l, &tmp_list);
|
||||
}
|
||||
|
||||
return !EMPTY_LIST(*l);
|
||||
}
|
||||
|
|
|
@ -21,14 +21,17 @@ typedef struct event {
|
|||
typedef list event_list;
|
||||
|
||||
extern event_list global_event_list;
|
||||
extern event_list global_work_list;
|
||||
|
||||
event *ev_new(pool *);
|
||||
void ev_run(event *);
|
||||
#define ev_init_list(el) init_list(el)
|
||||
void ev_enqueue(event_list *, event *);
|
||||
void ev_schedule(event *);
|
||||
void ev_schedule_work(event *);
|
||||
void ev_postpone(event *);
|
||||
int ev_run_list(event_list *);
|
||||
int ev_run_list_limited(event_list *, uint);
|
||||
|
||||
static inline int
|
||||
ev_active(event *e)
|
||||
|
|
10
nest/proto.c
10
nest/proto.c
|
@ -252,7 +252,7 @@ channel_schedule_feed(struct channel *c, int initial)
|
|||
c->export_state = ES_FEEDING;
|
||||
c->refeeding = !initial;
|
||||
|
||||
ev_schedule(c->feed_event);
|
||||
ev_schedule_work(c->feed_event);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -275,7 +275,7 @@ channel_feed_loop(void *ptr)
|
|||
// DBG("Feeding protocol %s continued\n", p->name);
|
||||
if (!rt_feed_channel(c))
|
||||
{
|
||||
ev_schedule(c->feed_event);
|
||||
ev_schedule_work(c->feed_event);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -291,7 +291,7 @@ channel_feed_loop(void *ptr)
|
|||
|
||||
/* Continue in feed - it will process routing table again from beginning */
|
||||
c->refeed_count = 0;
|
||||
ev_schedule(c->feed_event);
|
||||
ev_schedule_work(c->feed_event);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -471,7 +471,7 @@ channel_schedule_reload(struct channel *c)
|
|||
ASSERT(c->channel_state == CS_UP);
|
||||
|
||||
rt_reload_channel_abort(c);
|
||||
ev_schedule(c->reload_event);
|
||||
ev_schedule_work(c->reload_event);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -485,7 +485,7 @@ channel_reload_loop(void *ptr)
|
|||
|
||||
if (!rt_reload_channel(c))
|
||||
{
|
||||
ev_schedule(c->reload_event);
|
||||
ev_schedule_work(c->reload_event);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -2161,6 +2161,7 @@ io_init(void)
|
|||
{
|
||||
init_list(&sock_list);
|
||||
init_list(&global_event_list);
|
||||
init_list(&global_work_list);
|
||||
krt_io_init();
|
||||
// XXX init_times();
|
||||
// XXX update_times();
|
||||
|
@ -2172,6 +2173,7 @@ io_init(void)
|
|||
|
||||
static int short_loops = 0;
|
||||
#define SHORT_LOOP_MAX 10
|
||||
#define WORK_EVENTS_MAX 10
|
||||
|
||||
void
|
||||
io_loop(void)
|
||||
|
@ -2189,6 +2191,7 @@ io_loop(void)
|
|||
{
|
||||
times_update(&main_timeloop);
|
||||
events = ev_run_list(&global_event_list);
|
||||
events = ev_run_list_limited(&global_work_list, WORK_EVENTS_MAX) || events;
|
||||
timers_fire(&main_timeloop);
|
||||
io_close_event();
|
||||
|
||||
|
|
Loading…
Reference in a new issue