2017-12-12 22:12:07 +08:00
|
|
|
#include "decoder.h"
|
|
|
|
|
|
|
|
#include <libavformat/avformat.h>
|
2018-10-11 13:12:36 +08:00
|
|
|
#include <libavutil/time.h>
|
2018-11-12 04:26:48 +08:00
|
|
|
#include <SDL2/SDL_assert.h>
|
Replace SDL_net by custom implementation
SDL_net is not very suitable for scrcpy.
For example, SDLNet_TCP_Accept() is non-blocking, so we have to wrap it
by calling many SDL_Net-specific functions to make it blocking.
But above all, SDLNet_TCP_Open() is a server socket only when no IP is
provided; otherwise, it's a client socket. Therefore, it is not possible
to create a server socket bound to localhost, so it accepts connections
from anywhere.
This is a problem for scrcpy, because on start, the application listens
for nearly 1 second until it accepts the first connection, supposedly
from the device. If someone on the local network manages to connect to
the server socket first, then they can stream arbitrary H.264 video.
This may be troublesome, for example during a public presentation ;-)
Provide our own simplified API (net.h) instead, implemented for the
different platforms.
2018-02-16 05:59:21 +08:00
|
|
|
#include <SDL2/SDL_events.h>
|
2017-12-12 22:12:07 +08:00
|
|
|
#include <SDL2/SDL_mutex.h>
|
|
|
|
#include <SDL2/SDL_thread.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2018-02-07 19:25:52 +08:00
|
|
|
#include "config.h"
|
2018-11-11 07:58:29 +08:00
|
|
|
#include "buffer_util.h"
|
2017-12-12 22:12:07 +08:00
|
|
|
#include "events.h"
|
|
|
|
#include "frames.h"
|
2018-08-15 23:01:54 +08:00
|
|
|
#include "lock_util.h"
|
2018-02-13 17:10:18 +08:00
|
|
|
#include "log.h"
|
2018-11-09 19:21:17 +08:00
|
|
|
#include "recorder.h"
|
2017-12-12 22:12:07 +08:00
|
|
|
|
|
|
|
#define BUFSIZE 0x10000
|
|
|
|
|
2018-11-11 07:35:53 +08:00
|
|
|
#define HEADER_SIZE 12
|
2018-10-11 13:12:36 +08:00
|
|
|
|
2018-11-11 22:27:52 +08:00
|
|
|
static struct frame_meta *frame_meta_new(uint64_t pts) {
|
|
|
|
struct frame_meta *meta = malloc(sizeof(*meta));
|
|
|
|
if (!meta) {
|
|
|
|
return meta;
|
|
|
|
}
|
|
|
|
meta->pts = pts;
|
|
|
|
meta->next = NULL;
|
|
|
|
return meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void frame_meta_delete(struct frame_meta *frame_meta) {
|
|
|
|
free(frame_meta);
|
|
|
|
}
|
|
|
|
|
|
|
|
static SDL_bool receiver_state_push_meta(struct receiver_state *state,
|
|
|
|
uint64_t pts) {
|
|
|
|
struct frame_meta *frame_meta = frame_meta_new(pts);
|
|
|
|
if (!frame_meta) {
|
|
|
|
return SDL_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// append to the list
|
|
|
|
// (iterate to find the last item, in practice the list should be tiny)
|
|
|
|
struct frame_meta **p = &state->frame_meta_queue;
|
|
|
|
while (*p) {
|
|
|
|
p = &(*p)->next;
|
|
|
|
}
|
|
|
|
*p = frame_meta;
|
|
|
|
return SDL_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t receiver_state_take_meta(struct receiver_state *state) {
|
|
|
|
struct frame_meta *frame_meta = state->frame_meta_queue; // first item
|
|
|
|
SDL_assert(frame_meta); // must not be empty
|
|
|
|
uint64_t pts = frame_meta->pts;
|
|
|
|
state->frame_meta_queue = frame_meta->next; // remove the item
|
|
|
|
frame_meta_delete(frame_meta);
|
|
|
|
return pts;
|
|
|
|
}
|
|
|
|
|
2018-11-11 21:41:54 +08:00
|
|
|
static int read_packet_with_meta(void *opaque, uint8_t *buf, int buf_size) {
|
2017-12-12 22:12:07 +08:00
|
|
|
struct decoder *decoder = opaque;
|
2018-11-11 19:36:08 +08:00
|
|
|
struct receiver_state *state = &decoder->receiver_state;
|
|
|
|
|
|
|
|
// The video stream contains raw packets, without time information. When we
|
|
|
|
// record, we retrieve the timestamps separately, from a "meta" header
|
|
|
|
// added by the server before each raw packet.
|
|
|
|
//
|
|
|
|
// The "meta" header length is 12 bytes:
|
|
|
|
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
|
|
|
|
// <-------------> <-----> <-----------------------------...
|
|
|
|
// PTS packet raw packet
|
|
|
|
// size
|
|
|
|
//
|
|
|
|
// It is followed by <packet_size> bytes containing the packet/frame.
|
|
|
|
|
|
|
|
if (!state->remaining) {
|
|
|
|
#define HEADER_SIZE 12
|
|
|
|
uint8_t header[HEADER_SIZE];
|
|
|
|
ssize_t ret = net_recv_all(decoder->video_socket, header, HEADER_SIZE);
|
|
|
|
if (ret <= 0) {
|
2018-10-11 13:12:36 +08:00
|
|
|
return ret;
|
2018-11-11 19:36:08 +08:00
|
|
|
}
|
2018-11-12 04:26:48 +08:00
|
|
|
// no partial read (net_recv_all())
|
|
|
|
SDL_assert_release(ret == HEADER_SIZE);
|
|
|
|
|
2018-11-11 22:27:52 +08:00
|
|
|
uint64_t pts = buffer_read64be(header);
|
2018-11-11 19:36:08 +08:00
|
|
|
state->remaining = buffer_read32be(&header[8]);
|
2018-11-11 22:27:52 +08:00
|
|
|
|
|
|
|
if (!receiver_state_push_meta(state, pts)) {
|
|
|
|
LOGE("Could not store PTS for recording");
|
|
|
|
// we cannot save the PTS, the recording would be broken
|
|
|
|
return -1;
|
|
|
|
}
|
2018-10-11 13:12:36 +08:00
|
|
|
}
|
|
|
|
|
2018-11-11 19:36:08 +08:00
|
|
|
SDL_assert(state->remaining);
|
2018-10-11 13:12:36 +08:00
|
|
|
|
2018-11-11 19:36:08 +08:00
|
|
|
if (buf_size > state->remaining)
|
|
|
|
buf_size = state->remaining;
|
|
|
|
|
|
|
|
ssize_t ret = net_recv(decoder->video_socket, buf, buf_size);
|
|
|
|
if (ret <= 0) {
|
2018-10-11 13:12:36 +08:00
|
|
|
return ret;
|
2018-11-11 19:36:08 +08:00
|
|
|
}
|
2018-10-11 13:12:36 +08:00
|
|
|
|
2018-11-11 19:36:08 +08:00
|
|
|
SDL_assert(state->remaining >= ret);
|
|
|
|
state->remaining -= ret;
|
2018-10-11 13:12:36 +08:00
|
|
|
|
|
|
|
return ret;
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
|
|
|
|
2018-11-11 21:41:54 +08:00
|
|
|
static int read_raw_packet(void *opaque, uint8_t *buf, int buf_size) {
|
|
|
|
struct decoder *decoder = opaque;
|
|
|
|
return net_recv(decoder->video_socket, buf, buf_size);
|
|
|
|
}
|
|
|
|
|
2017-12-12 22:12:07 +08:00
|
|
|
// set the decoded frame as ready for rendering, and notify
|
|
|
|
static void push_frame(struct decoder *decoder) {
|
2018-02-09 02:23:24 +08:00
|
|
|
SDL_bool previous_frame_consumed = frames_offer_decoded_frame(decoder->frames);
|
|
|
|
if (!previous_frame_consumed) {
|
|
|
|
// the previous EVENT_NEW_FRAME will consume this frame
|
|
|
|
return;
|
2018-02-07 19:25:52 +08:00
|
|
|
}
|
2017-12-12 22:12:07 +08:00
|
|
|
static SDL_Event new_frame_event = {
|
|
|
|
.type = EVENT_NEW_FRAME,
|
|
|
|
};
|
|
|
|
SDL_PushEvent(&new_frame_event);
|
|
|
|
}
|
|
|
|
|
2018-02-16 05:56:32 +08:00
|
|
|
static void notify_stopped(void) {
|
|
|
|
SDL_Event stop_event;
|
|
|
|
stop_event.type = EVENT_DECODER_STOPPED;
|
|
|
|
SDL_PushEvent(&stop_event);
|
|
|
|
}
|
|
|
|
|
2017-12-12 22:12:07 +08:00
|
|
|
static int run_decoder(void *data) {
|
|
|
|
struct decoder *decoder = data;
|
|
|
|
|
|
|
|
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
|
|
|
|
if (!codec) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGE("H.264 decoder not found");
|
2018-03-06 03:53:13 +08:00
|
|
|
goto run_end;
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
AVCodecContext *codec_ctx = avcodec_alloc_context3(codec);
|
|
|
|
if (!codec_ctx) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGC("Could not allocate decoder context");
|
2018-03-06 03:53:13 +08:00
|
|
|
goto run_end;
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGE("Could not open H.264 codec");
|
2017-12-12 22:12:07 +08:00
|
|
|
goto run_finally_free_codec_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
AVFormatContext *format_ctx = avformat_alloc_context();
|
|
|
|
if (!format_ctx) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGC("Could not allocate format context");
|
2017-12-12 22:12:07 +08:00
|
|
|
goto run_finally_close_codec;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char *buffer = av_malloc(BUFSIZE);
|
|
|
|
if (!buffer) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGC("Could not allocate buffer");
|
2017-12-12 22:12:07 +08:00
|
|
|
goto run_finally_free_format_ctx;
|
|
|
|
}
|
|
|
|
|
2018-11-11 19:36:08 +08:00
|
|
|
// initialize the receiver state
|
2018-11-11 22:27:52 +08:00
|
|
|
decoder->receiver_state.frame_meta_queue = NULL;
|
2018-11-11 19:36:08 +08:00
|
|
|
decoder->receiver_state.remaining = 0;
|
|
|
|
|
2018-11-11 21:41:54 +08:00
|
|
|
// if recording is enabled, a "header" is sent between raw packets
|
|
|
|
int (*read_packet)(void *, uint8_t *, int) =
|
|
|
|
decoder->recorder ? read_packet_with_meta : read_raw_packet;
|
|
|
|
AVIOContext *avio_ctx = avio_alloc_context(buffer, BUFSIZE, 0, decoder,
|
|
|
|
read_packet, NULL, NULL);
|
2017-12-12 22:12:07 +08:00
|
|
|
if (!avio_ctx) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGC("Could not allocate avio context");
|
2017-12-12 22:12:07 +08:00
|
|
|
// avformat_open_input takes ownership of 'buffer'
|
|
|
|
// so only free the buffer before avformat_open_input()
|
|
|
|
av_free(buffer);
|
|
|
|
goto run_finally_free_format_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
format_ctx->pb = avio_ctx;
|
|
|
|
|
|
|
|
if (avformat_open_input(&format_ctx, NULL, NULL, NULL) < 0) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGE("Could not open video stream");
|
2017-12-12 22:12:07 +08:00
|
|
|
goto run_finally_free_avio_ctx;
|
|
|
|
}
|
|
|
|
|
2018-11-09 19:21:17 +08:00
|
|
|
if (decoder->recorder &&
|
|
|
|
!recorder_open(decoder->recorder, codec)) {
|
|
|
|
LOGE("Could not open recorder");
|
|
|
|
goto run_finally_close_input;
|
2018-10-11 13:12:36 +08:00
|
|
|
}
|
|
|
|
|
2017-12-12 22:12:07 +08:00
|
|
|
AVPacket packet;
|
|
|
|
av_init_packet(&packet);
|
|
|
|
packet.data = NULL;
|
|
|
|
packet.size = 0;
|
|
|
|
|
2018-03-23 20:57:32 +08:00
|
|
|
while (!av_read_frame(format_ctx, &packet)) {
|
2017-12-12 22:12:07 +08:00
|
|
|
// the new decoding/encoding API has been introduced by:
|
|
|
|
// <http://git.videolan.org/?p=ffmpeg.git;a=commitdiff;h=7fc329e2dd6226dfecaa4a1d7adf353bf2773726>
|
2018-03-06 04:04:39 +08:00
|
|
|
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 37, 0)
|
2018-11-09 19:21:17 +08:00
|
|
|
int ret;
|
2018-03-06 04:04:39 +08:00
|
|
|
if ((ret = avcodec_send_packet(codec_ctx, &packet)) < 0) {
|
|
|
|
LOGE("Could not send video packet: %d", ret);
|
|
|
|
goto run_quit;
|
|
|
|
}
|
2018-03-06 19:25:50 +08:00
|
|
|
ret = avcodec_receive_frame(codec_ctx, decoder->frames->decoding_frame);
|
|
|
|
if (!ret) {
|
|
|
|
// a frame was received
|
|
|
|
push_frame(decoder);
|
|
|
|
} else if (ret != AVERROR(EAGAIN)) {
|
2018-03-06 04:04:39 +08:00
|
|
|
LOGE("Could not receive video frame: %d", ret);
|
2018-03-09 04:35:30 +08:00
|
|
|
av_packet_unref(&packet);
|
2018-03-06 04:04:39 +08:00
|
|
|
goto run_quit;
|
|
|
|
}
|
|
|
|
#else
|
2017-12-12 22:12:07 +08:00
|
|
|
while (packet.size > 0) {
|
|
|
|
int got_picture;
|
|
|
|
int len = avcodec_decode_video2(codec_ctx, decoder->frames->decoding_frame, &got_picture, &packet);
|
|
|
|
if (len < 0) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGE("Could not decode video packet: %d", len);
|
2018-11-09 21:32:42 +08:00
|
|
|
av_packet_unref(&packet);
|
2017-12-12 22:12:07 +08:00
|
|
|
goto run_quit;
|
|
|
|
}
|
|
|
|
if (got_picture) {
|
|
|
|
push_frame(decoder);
|
|
|
|
}
|
|
|
|
packet.size -= len;
|
|
|
|
packet.data += len;
|
|
|
|
}
|
|
|
|
#endif
|
2018-10-11 13:12:36 +08:00
|
|
|
|
2018-11-10 02:28:28 +08:00
|
|
|
if (decoder->recorder) {
|
2018-11-11 22:27:52 +08:00
|
|
|
// we retrieve the PTS in order they were received, so they will
|
|
|
|
// be assigned to the correct frame
|
|
|
|
uint64_t pts = receiver_state_take_meta(&decoder->receiver_state);
|
|
|
|
packet.pts = pts;
|
|
|
|
packet.dts = pts;
|
2018-11-11 07:35:53 +08:00
|
|
|
|
|
|
|
// no need to rescale with av_packet_rescale_ts(), the timestamps
|
|
|
|
// are in microseconds both in input and output
|
|
|
|
if (!recorder_write(decoder->recorder, &packet)) {
|
|
|
|
LOGE("Could not write frame to output file");
|
|
|
|
av_packet_unref(&packet);
|
|
|
|
goto run_quit;
|
2018-11-10 02:28:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-09 03:44:31 +08:00
|
|
|
av_packet_unref(&packet);
|
2018-03-23 21:01:58 +08:00
|
|
|
|
|
|
|
if (avio_ctx->eof_reached) {
|
|
|
|
break;
|
|
|
|
}
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
|
|
|
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGD("End of frames");
|
2017-12-12 22:12:07 +08:00
|
|
|
|
|
|
|
run_quit:
|
2018-11-09 19:21:17 +08:00
|
|
|
if (decoder->recorder) {
|
|
|
|
recorder_close(decoder->recorder);
|
2018-10-11 13:12:36 +08:00
|
|
|
}
|
2018-11-09 19:21:17 +08:00
|
|
|
run_finally_close_input:
|
2017-12-12 22:12:07 +08:00
|
|
|
avformat_close_input(&format_ctx);
|
|
|
|
run_finally_free_avio_ctx:
|
|
|
|
av_freep(&avio_ctx);
|
|
|
|
run_finally_free_format_ctx:
|
|
|
|
avformat_free_context(format_ctx);
|
|
|
|
run_finally_close_codec:
|
|
|
|
avcodec_close(codec_ctx);
|
|
|
|
run_finally_free_codec_ctx:
|
|
|
|
avcodec_free_context(&codec_ctx);
|
2018-02-16 05:56:32 +08:00
|
|
|
notify_stopped();
|
2018-03-06 03:53:13 +08:00
|
|
|
run_end:
|
|
|
|
return 0;
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
|
|
|
|
2018-11-09 19:21:17 +08:00
|
|
|
void decoder_init(struct decoder *decoder, struct frames *frames,
|
|
|
|
socket_t video_socket, struct recorder *recorder) {
|
2018-02-09 20:29:48 +08:00
|
|
|
decoder->frames = frames;
|
|
|
|
decoder->video_socket = video_socket;
|
2018-11-09 19:21:17 +08:00
|
|
|
decoder->recorder = recorder;
|
2018-02-09 20:29:48 +08:00
|
|
|
}
|
|
|
|
|
2018-11-09 19:21:17 +08:00
|
|
|
SDL_bool decoder_start(struct decoder *decoder) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGD("Starting decoder thread");
|
2017-12-12 22:12:07 +08:00
|
|
|
|
|
|
|
decoder->thread = SDL_CreateThread(run_decoder, "video_decoder", decoder);
|
|
|
|
if (!decoder->thread) {
|
2018-02-13 17:10:18 +08:00
|
|
|
LOGC("Could not start decoder thread");
|
2017-12-15 18:27:11 +08:00
|
|
|
return SDL_FALSE;
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
2017-12-15 18:27:11 +08:00
|
|
|
return SDL_TRUE;
|
2017-12-12 22:12:07 +08:00
|
|
|
}
|
|
|
|
|
2018-02-09 15:42:39 +08:00
|
|
|
void decoder_stop(struct decoder *decoder) {
|
|
|
|
frames_stop(decoder->frames);
|
|
|
|
}
|
|
|
|
|
2017-12-12 22:12:07 +08:00
|
|
|
void decoder_join(struct decoder *decoder) {
|
|
|
|
SDL_WaitThread(decoder->thread, NULL);
|
|
|
|
}
|