Add option to select audio source

Pass --audio-source=mic to capture the microphone instead of the device
audio output.
This commit is contained in:
Romain Vimont 2023-05-30 21:29:05 +02:00
parent 360f2fea1e
commit ff5ffc892f
14 changed files with 125 additions and 7 deletions

View file

@ -7,6 +7,7 @@ _scrcpy() {
--audio-codec= --audio-codec=
--audio-codec-options= --audio-codec-options=
--audio-encoder= --audio-encoder=
--audio-source=
--audio-output-buffer= --audio-output-buffer=
-b --video-bit-rate= -b --video-bit-rate=
--crop= --crop=
@ -86,6 +87,10 @@ _scrcpy() {
COMPREPLY=($(compgen -W 'opus aac raw' -- "$cur")) COMPREPLY=($(compgen -W 'opus aac raw' -- "$cur"))
return return
;; ;;
--audio-source)
COMPREPLY=($(compgen -W 'output mic' -- "$cur"))
return
;;
--lock-video-orientation) --lock-video-orientation)
COMPREPLY=($(compgen -W 'unlocked initial 0 1 2 3' -- "$cur")) COMPREPLY=($(compgen -W 'unlocked initial 0 1 2 3' -- "$cur"))
return return

View file

@ -14,6 +14,7 @@ arguments=(
'--audio-codec=[Select the audio codec]:codec:(opus aac raw)' '--audio-codec=[Select the audio codec]:codec:(opus aac raw)'
'--audio-codec-options=[Set a list of comma-separated key\:type=value options for the device audio encoder]' '--audio-codec-options=[Set a list of comma-separated key\:type=value options for the device audio encoder]'
'--audio-encoder=[Use a specific MediaCodec audio encoder]' '--audio-encoder=[Use a specific MediaCodec audio encoder]'
'--audio-source=[Select the audio source]:source:(output mic)'
'--audio-output-buffer=[Configure the size of the SDL audio output buffer (in milliseconds)]' '--audio-output-buffer=[Configure the size of the SDL audio output buffer (in milliseconds)]'
{-b,--video-bit-rate=}'[Encode the video at the given bit-rate]' {-b,--video-bit-rate=}'[Encode the video at the given bit-rate]'
'--crop=[\[width\:height\:x\:y\] Crop the device screen on the server]' '--crop=[\[width\:height\:x\:y\] Crop the device screen on the server]'

View file

@ -55,6 +55,12 @@ Use a specific MediaCodec audio encoder (depending on the codec provided by \fB\
The available encoders can be listed by \-\-list\-encoders. The available encoders can be listed by \-\-list\-encoders.
.TP
.BI "\-\-audio\-source " source
Select the audio source (output or mic).
Default is output.
.TP .TP
.BI "\-\-audio\-output\-buffer ms .BI "\-\-audio\-output\-buffer ms
Configure the size of the SDL audio output buffer (in milliseconds). Configure the size of the SDL audio output buffer (in milliseconds).

View file

@ -76,6 +76,7 @@ enum {
OPT_NO_VIDEO, OPT_NO_VIDEO,
OPT_NO_AUDIO_PLAYBACK, OPT_NO_AUDIO_PLAYBACK,
OPT_NO_VIDEO_PLAYBACK, OPT_NO_VIDEO_PLAYBACK,
OPT_AUDIO_SOURCE,
}; };
struct sc_option { struct sc_option {
@ -161,6 +162,13 @@ static const struct sc_option options[] = {
"codec provided by --audio-codec).\n" "codec provided by --audio-codec).\n"
"The available encoders can be listed by --list-encoders.", "The available encoders can be listed by --list-encoders.",
}, },
{
.longopt_id = OPT_AUDIO_SOURCE,
.longopt = "audio-source",
.argdesc = "source",
.text = "Select the audio source (output or mic).\n"
"Default is output.",
},
{ {
.longopt_id = OPT_AUDIO_OUTPUT_BUFFER, .longopt_id = OPT_AUDIO_OUTPUT_BUFFER,
.longopt = "audio-output-buffer", .longopt = "audio-output-buffer",
@ -1588,6 +1596,22 @@ parse_audio_codec(const char *optarg, enum sc_codec *codec) {
return false; return false;
} }
static bool
parse_audio_source(const char *optarg, enum sc_audio_source *source) {
if (!strcmp(optarg, "mic")) {
*source = SC_AUDIO_SOURCE_MIC;
return true;
}
if (!strcmp(optarg, "output")) {
*source = SC_AUDIO_SOURCE_OUTPUT;
return true;
}
LOGE("Unsupported audio source: %s (expected output or mic)", optarg);
return false;
}
static bool static bool
parse_args_with_getopt(struct scrcpy_cli_args *args, int argc, char *argv[], parse_args_with_getopt(struct scrcpy_cli_args *args, int argc, char *argv[],
const char *optstring, const struct option *longopts) { const char *optstring, const struct option *longopts) {
@ -1915,6 +1939,11 @@ parse_args_with_getopt(struct scrcpy_cli_args *args, int argc, char *argv[],
return false; return false;
} }
break; break;
case OPT_AUDIO_SOURCE:
if (!parse_audio_source(optarg, &opts->audio_source)) {
return false;
}
break;
default: default:
// getopt prints the error message on stderr // getopt prints the error message on stderr
return false; return false;

View file

@ -14,6 +14,7 @@ const struct scrcpy_options scrcpy_options_default = {
.log_level = SC_LOG_LEVEL_INFO, .log_level = SC_LOG_LEVEL_INFO,
.video_codec = SC_CODEC_H264, .video_codec = SC_CODEC_H264,
.audio_codec = SC_CODEC_OPUS, .audio_codec = SC_CODEC_OPUS,
.audio_source = SC_AUDIO_SOURCE_OUTPUT,
.record_format = SC_RECORD_FORMAT_AUTO, .record_format = SC_RECORD_FORMAT_AUTO,
.keyboard_input_mode = SC_KEYBOARD_INPUT_MODE_INJECT, .keyboard_input_mode = SC_KEYBOARD_INPUT_MODE_INJECT,
.mouse_input_mode = SC_MOUSE_INPUT_MODE_INJECT, .mouse_input_mode = SC_MOUSE_INPUT_MODE_INJECT,

View file

@ -44,6 +44,11 @@ enum sc_codec {
SC_CODEC_RAW, SC_CODEC_RAW,
}; };
enum sc_audio_source {
SC_AUDIO_SOURCE_OUTPUT,
SC_AUDIO_SOURCE_MIC,
};
enum sc_lock_video_orientation { enum sc_lock_video_orientation {
SC_LOCK_VIDEO_ORIENTATION_UNLOCKED = -1, SC_LOCK_VIDEO_ORIENTATION_UNLOCKED = -1,
// lock the current orientation when scrcpy starts // lock the current orientation when scrcpy starts
@ -115,6 +120,7 @@ struct scrcpy_options {
enum sc_log_level log_level; enum sc_log_level log_level;
enum sc_codec video_codec; enum sc_codec video_codec;
enum sc_codec audio_codec; enum sc_codec audio_codec;
enum sc_audio_source audio_source;
enum sc_record_format record_format; enum sc_record_format record_format;
enum sc_keyboard_input_mode keyboard_input_mode; enum sc_keyboard_input_mode keyboard_input_mode;
enum sc_mouse_input_mode mouse_input_mode; enum sc_mouse_input_mode mouse_input_mode;

View file

@ -334,6 +334,7 @@ scrcpy(struct scrcpy_options *options) {
.log_level = options->log_level, .log_level = options->log_level,
.video_codec = options->video_codec, .video_codec = options->video_codec,
.audio_codec = options->audio_codec, .audio_codec = options->audio_codec,
.audio_source = options->audio_source,
.crop = options->crop, .crop = options->crop,
.port_range = options->port_range, .port_range = options->port_range,
.tunnel_host = options->tunnel_host, .tunnel_host = options->tunnel_host,

View file

@ -246,6 +246,10 @@ execute_server(struct sc_server *server,
ADD_PARAM("audio_codec=%s", ADD_PARAM("audio_codec=%s",
sc_server_get_codec_name(params->audio_codec)); sc_server_get_codec_name(params->audio_codec));
} }
if (params->audio_source != SC_AUDIO_SOURCE_OUTPUT) {
assert(params->audio_source == SC_AUDIO_SOURCE_MIC);
ADD_PARAM("audio_source=mic");
}
if (params->max_size) { if (params->max_size) {
ADD_PARAM("max_size=%" PRIu16, params->max_size); ADD_PARAM("max_size=%" PRIu16, params->max_size);
} }

View file

@ -26,6 +26,7 @@ struct sc_server_params {
enum sc_log_level log_level; enum sc_log_level log_level;
enum sc_codec video_codec; enum sc_codec video_codec;
enum sc_codec audio_codec; enum sc_codec audio_codec;
enum sc_audio_source audio_source;
const char *crop; const char *crop;
const char *video_codec_options; const char *video_codec_options;
const char *audio_codec_options; const char *audio_codec_options;

View file

@ -41,6 +41,24 @@ interesting to add [buffering](#buffering) to minimize glitches:
scrcpy --no-video --audio-buffer=200 scrcpy --no-video --audio-buffer=200
``` ```
## Source
By default, the device audio output is forwarded.
It is possible to capture the device microphone instead:
```
scrcpy --audio-source=mic
```
For example, to use the device as a dictaphone and record a capture directly on
the computer:
```
scrcpy --audio-source=mic --no-video --no-audio-playback --record=file.opus
```
## Codec ## Codec
The audio codec can be selected. The possible values are `opus` (default), `aac` The audio codec can be selected. The possible values are `opus` (default), `aac`

View file

@ -10,7 +10,6 @@ import android.media.AudioFormat;
import android.media.AudioRecord; import android.media.AudioRecord;
import android.media.AudioTimestamp; import android.media.AudioTimestamp;
import android.media.MediaCodec; import android.media.MediaCodec;
import android.media.MediaRecorder;
import android.os.Build; import android.os.Build;
import android.os.SystemClock; import android.os.SystemClock;
@ -18,7 +17,6 @@ import java.nio.ByteBuffer;
public final class AudioCapture { public final class AudioCapture {
public static final int SOURCE = MediaRecorder.AudioSource.REMOTE_SUBMIX;
public static final int SAMPLE_RATE = 48000; public static final int SAMPLE_RATE = 48000;
public static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_STEREO; public static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_STEREO;
public static final int CHANNELS = 2; public static final int CHANNELS = 2;
@ -26,12 +24,18 @@ public final class AudioCapture {
public static final int ENCODING = AudioFormat.ENCODING_PCM_16BIT; public static final int ENCODING = AudioFormat.ENCODING_PCM_16BIT;
public static final int BYTES_PER_SAMPLE = 2; public static final int BYTES_PER_SAMPLE = 2;
private final int audioSource;
private AudioRecord recorder; private AudioRecord recorder;
private final AudioTimestamp timestamp = new AudioTimestamp(); private final AudioTimestamp timestamp = new AudioTimestamp();
private long previousPts = 0; private long previousPts = 0;
private long nextPts = 0; private long nextPts = 0;
public AudioCapture(AudioSource audioSource) {
this.audioSource = audioSource.value();
}
public static int millisToBytes(int millis) { public static int millisToBytes(int millis) {
return SAMPLE_RATE * CHANNELS * BYTES_PER_SAMPLE * millis / 1000; return SAMPLE_RATE * CHANNELS * BYTES_PER_SAMPLE * millis / 1000;
} }
@ -46,13 +50,13 @@ public final class AudioCapture {
@TargetApi(Build.VERSION_CODES.M) @TargetApi(Build.VERSION_CODES.M)
@SuppressLint({"WrongConstant", "MissingPermission"}) @SuppressLint({"WrongConstant", "MissingPermission"})
private static AudioRecord createAudioRecord() { private static AudioRecord createAudioRecord(int audioSource) {
AudioRecord.Builder builder = new AudioRecord.Builder(); AudioRecord.Builder builder = new AudioRecord.Builder();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
// On older APIs, Workarounds.fillAppInfo() must be called beforehand // On older APIs, Workarounds.fillAppInfo() must be called beforehand
builder.setContext(FakeContext.get()); builder.setContext(FakeContext.get());
} }
builder.setAudioSource(SOURCE); builder.setAudioSource(audioSource);
builder.setAudioFormat(createAudioFormat()); builder.setAudioFormat(createAudioFormat());
int minBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, ENCODING); int minBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, ENCODING);
// This buffer size does not impact latency // This buffer size does not impact latency
@ -100,12 +104,12 @@ public final class AudioCapture {
private void startRecording() { private void startRecording() {
try { try {
recorder = createAudioRecord(); recorder = createAudioRecord(audioSource);
} catch (NullPointerException e) { } catch (NullPointerException e) {
// Creating an AudioRecord using an AudioRecord.Builder does not work on Vivo phones: // Creating an AudioRecord using an AudioRecord.Builder does not work on Vivo phones:
// - <https://github.com/Genymobile/scrcpy/issues/3805> // - <https://github.com/Genymobile/scrcpy/issues/3805>
// - <https://github.com/Genymobile/scrcpy/pull/3862> // - <https://github.com/Genymobile/scrcpy/pull/3862>
recorder = Workarounds.createAudioRecord(SOURCE, SAMPLE_RATE, CHANNEL_CONFIG, CHANNELS, CHANNEL_MASK, ENCODING); recorder = Workarounds.createAudioRecord(audioSource, SAMPLE_RATE, CHANNEL_CONFIG, CHANNELS, CHANNEL_MASK, ENCODING);
} }
recorder.startRecording(); recorder.startRecording();
} }

View file

@ -0,0 +1,30 @@
package com.genymobile.scrcpy;
import android.media.MediaRecorder;
public enum AudioSource {
OUTPUT("output", MediaRecorder.AudioSource.REMOTE_SUBMIX),
MIC("mic", MediaRecorder.AudioSource.MIC);
private final String name;
private final int value;
AudioSource(String name, int value) {
this.name = name;
this.value = value;
}
int value() {
return value;
}
static AudioSource findByName(String name) {
for (AudioSource audioSource : AudioSource.values()) {
if (name.equals(audioSource.name)) {
return audioSource;
}
}
return null;
}
}

View file

@ -14,6 +14,7 @@ public class Options {
private int maxSize; private int maxSize;
private VideoCodec videoCodec = VideoCodec.H264; private VideoCodec videoCodec = VideoCodec.H264;
private AudioCodec audioCodec = AudioCodec.OPUS; private AudioCodec audioCodec = AudioCodec.OPUS;
private AudioSource audioSource = AudioSource.OUTPUT;
private int videoBitRate = 8000000; private int videoBitRate = 8000000;
private int audioBitRate = 128000; private int audioBitRate = 128000;
private int maxFps; private int maxFps;
@ -72,6 +73,10 @@ public class Options {
return audioCodec; return audioCodec;
} }
public AudioSource getAudioSource() {
return audioSource;
}
public int getVideoBitRate() { public int getVideoBitRate() {
return videoBitRate; return videoBitRate;
} }
@ -225,6 +230,13 @@ public class Options {
} }
options.audioCodec = audioCodec; options.audioCodec = audioCodec;
break; break;
case "audio_source":
AudioSource audioSource = AudioSource.findByName(value);
if (audioSource == null) {
throw new IllegalArgumentException("Audio source " + value + " not supported");
}
options.audioSource = audioSource;
break;
case "max_size": case "max_size":
options.maxSize = Integer.parseInt(value) & ~7; // multiple of 8 options.maxSize = Integer.parseInt(value) & ~7; // multiple of 8
break; break;

View file

@ -136,7 +136,7 @@ public final class Server {
if (audio) { if (audio) {
AudioCodec audioCodec = options.getAudioCodec(); AudioCodec audioCodec = options.getAudioCodec();
AudioCapture audioCapture = new AudioCapture(); AudioCapture audioCapture = new AudioCapture(options.getAudioSource());
Streamer audioStreamer = new Streamer(connection.getAudioFd(), audioCodec, options.getSendCodecMeta(), Streamer audioStreamer = new Streamer(connection.getAudioFd(), audioCodec, options.getSendCodecMeta(),
options.getSendFrameMeta()); options.getSendFrameMeta());
AsyncProcessor audioRecorder; AsyncProcessor audioRecorder;