Wrap receiver state into separate struct

For readability, wrap the state of the receiver in a separate struct
receiver_state.
This commit is contained in:
Romain Vimont 2018-11-11 12:36:08 +01:00
parent e562837c0b
commit 70579dc709
2 changed files with 42 additions and 24 deletions

View file

@ -22,36 +22,49 @@
static int read_packet(void *opaque, uint8_t *buf, int buf_size) { static int read_packet(void *opaque, uint8_t *buf, int buf_size) {
struct decoder *decoder = opaque; struct decoder *decoder = opaque;
struct receiver_state *state = &decoder->receiver_state;
// The video stream contains raw packets, without time information. When we
// record, we retrieve the timestamps separately, from a "meta" header
// added by the server before each raw packet.
//
// The "meta" header length is 12 bytes:
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
// <-------------> <-----> <-----------------------------...
// PTS packet raw packet
// size
//
// It is followed by <packet_size> bytes containing the packet/frame.
if (!state->remaining) {
// the next PTS is now for the current frame
state->pts = state->next_pts;
#define HEADER_SIZE 12
uint8_t header[HEADER_SIZE]; uint8_t header[HEADER_SIZE];
int remaining; ssize_t ret = net_recv_all(decoder->video_socket, header, HEADER_SIZE);
int ret; if (ret <= 0) {
remaining = decoder->remaining;
if (remaining == 0) {
// the previous PTS read is now for the current frame
decoder->pts = decoder->next_pts;
ret = net_recv_all(decoder->video_socket, header, HEADER_SIZE);
if (ret <= 0)
return ret; return ret;
}
// no partial read (net_recv_all()) // no partial read (net_recv_all())
SDL_assert_release(ret == HEADER_SIZE); SDL_assert_release(ret == HEADER_SIZE);
// read the PTS for the next frame state->next_pts = buffer_read64be(header);
decoder->next_pts = buffer_read64be(header); state->remaining = buffer_read32be(&header[8]);
remaining = buffer_read32be(&header[8]);
} }
if (buf_size > remaining) SDL_assert(state->remaining);
buf_size = remaining;
ret = net_recv(decoder->video_socket, buf, buf_size); if (buf_size > state->remaining)
if (ret <= 0) buf_size = state->remaining;
ssize_t ret = net_recv(decoder->video_socket, buf, buf_size);
if (ret <= 0) {
return ret; return ret;
}
remaining -= ret; SDL_assert(state->remaining >= ret);
decoder->remaining = remaining; state->remaining -= ret;
return ret; return ret;
} }
@ -107,6 +120,9 @@ static int run_decoder(void *data) {
goto run_finally_free_format_ctx; goto run_finally_free_format_ctx;
} }
// initialize the receiver state
decoder->receiver_state.remaining = 0;
AVIOContext *avio_ctx = avio_alloc_context(buffer, BUFSIZE, 0, decoder, read_packet, NULL, NULL); AVIOContext *avio_ctx = avio_alloc_context(buffer, BUFSIZE, 0, decoder, read_packet, NULL, NULL);
if (!avio_ctx) { if (!avio_ctx) {
LOGC("Could not allocate avio context"); LOGC("Could not allocate avio context");
@ -170,8 +186,8 @@ static int run_decoder(void *data) {
#endif #endif
if (decoder->recorder) { if (decoder->recorder) {
packet.pts = decoder->pts; packet.pts = decoder->receiver_state.pts;
packet.dts = decoder->pts; packet.dts = decoder->receiver_state.pts;
// no need to rescale with av_packet_rescale_ts(), the timestamps // no need to rescale with av_packet_rescale_ts(), the timestamps
// are in microseconds both in input and output // are in microseconds both in input and output

View file

@ -15,9 +15,11 @@ struct decoder {
SDL_Thread *thread; SDL_Thread *thread;
SDL_mutex *mutex; SDL_mutex *mutex;
struct recorder *recorder; struct recorder *recorder;
struct receiver_state {
uint64_t next_pts; uint64_t next_pts;
uint64_t pts; uint64_t pts;
int remaining; size_t remaining; // remaining bytes to receive for the current frame
} receiver_state;
}; };
void decoder_init(struct decoder *decoder, struct frames *frames, void decoder_init(struct decoder *decoder, struct frames *frames,