297 changes: 297 additions & 0 deletions mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,297 @@

#include "config.h"

using namespace std;

#include <QAndroidJniObject>
#include <QAndroidJniEnvironment>
#include <android/log.h>

#include "mythlogging.h"
#include "audiooutputaudiotrack.h"

#define CHANNELS_MIN 1
#define CHANNELS_MAX 8

#define ANDROID_EXCEPTION_CHECK \
if (env->ExceptionCheck()) { \
env->ExceptionDescribe(); \
env->ExceptionClear(); \
exception=true; \
} else \
exception=false;
// clear exception without checking
#define ANDROID_EXCEPTION_CLEAR \
if (env->ExceptionCheck()) { \
env->ExceptionDescribe(); \
env->ExceptionClear(); \
}

#define LOC QString("AudioTrack: ")

// Constants from Android Java API
// class android.media.AudioFormat
#define AF_CHANNEL_OUT_MONO 4
#define AF_CHANNEL_OUT_STEREO 12
#define AF_CHANNEL_OUT_SURROUND 1052
#define AF_ENCODING_AC3 5
#define AF_ENCODING_E_AC3 6
#define AF_ENCODING_DTS 7
#define AF_ENCODING_DOLBY_TRUEHD 14
#define AF_ENCODING_PCM_8BIT 3
#define AF_ENCODING_PCM_16BIT 2
#define AF_ENCODING_PCM_FLOAT 4

// for debugging
#include <android/log.h>

AudioOutputAudioTrack::AudioOutputAudioTrack(const AudioSettings &settings) :
AudioOutputBase(settings)
{
InitSettings(settings);
if (settings.m_init)
Reconfigure(settings);
}

AudioOutputAudioTrack::~AudioOutputAudioTrack()
{
KillAudio();
CloseDevice();
}

bool AudioOutputAudioTrack::OpenDevice()
{
bool exception=false;
QAndroidJniEnvironment env;
jint encoding = 0;
jint sampleRate = m_samplerate;

// m_bitsPer10Frames = output bits per 10 frames
m_bitsPer10Frames = m_output_bytes_per_frame * 80;

if ((m_passthru || m_enc) && m_source_bitrate > 0)
m_bitsPer10Frames = m_source_bitrate * 10 / m_source_samplerate;

// 50 milliseconds
m_fragment_size = m_bitsPer10Frames * m_source_samplerate * 5 / 8000;

if (m_fragment_size < 1536)
m_fragment_size = 1536;


if (m_passthru || m_enc)
{
switch (m_codec)
{
case AV_CODEC_ID_AC3:
encoding = AF_ENCODING_AC3;
break;
case AV_CODEC_ID_DTS:
encoding = AF_ENCODING_DTS;
break;
case AV_CODEC_ID_EAC3:
encoding = AF_ENCODING_E_AC3;
break;
case AV_CODEC_ID_TRUEHD:
encoding = AF_ENCODING_DOLBY_TRUEHD;
break;

default:
LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio passthru encoding %1").arg(m_codec));
return false;
}
}
else
{
switch (m_output_format)
{
case FORMAT_U8:
// This could be used to get the value from java instead // of haning these constants in pour header file.
// encoding = QAndroidJniObject::getStaticField<jint>
// ("android.media.AudioFormat","ENCODING_PCM_8BIT");
encoding = AF_ENCODING_PCM_8BIT;
break;
case FORMAT_S16:
encoding = AF_ENCODING_PCM_16BIT;
break;
case FORMAT_FLT:
encoding = AF_ENCODING_PCM_FLOAT;
break;
default:
LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio format %1").arg(m_output_format));
return false;
}
}

jint minBufferSize = m_fragment_size * 4;
m_soundcard_buffer_size = minBufferSize;
jint channels = m_channels;

m_audioTrack = new QAndroidJniObject("org/mythtv/audio/AudioOutputAudioTrack",
"(IIII)V", encoding, sampleRate, minBufferSize, channels);
ANDROID_EXCEPTION_CHECK

if (exception)
{
LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" Java Exception when creating AudioTrack"));
m_audioTrack = nullptr;
return false;
}
if (!m_passthru && !m_enc)
{
jint bitsPer10Frames = m_bitsPer10Frames;
m_audioTrack->callMethod<void>("setBitsPer10Frames","(I)V",bitsPer10Frames);
}
return true;
}

void AudioOutputAudioTrack::CloseDevice()
{
QAndroidJniEnvironment env;
if (m_audioTrack)
{
m_audioTrack->callMethod<void>("release");
ANDROID_EXCEPTION_CLEAR
delete m_audioTrack;
m_audioTrack = nullptr;
}
}

AudioOutputSettings* AudioOutputAudioTrack::GetOutputSettings(bool /* digital */)
{
bool exception=false;
QAndroidJniEnvironment env;
jint bufsize = 0;

AudioOutputSettings *settings = new AudioOutputSettings();

int supportedrate = 0;
while (int rate = settings->GetNextRate())
{
// Checking for valid rates using getMinBufferSize.
// See https://stackoverflow.com/questions/8043387/android-audiorecord-supported-sampling-rates/22317382
bufsize = QAndroidJniObject::callStaticMethod<jint>
("android/media/AudioTrack", "getMinBufferSize", "(III)I",
rate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_16BIT);
ANDROID_EXCEPTION_CHECK
if (bufsize > 0 && !exception)
{
settings->AddSupportedRate(rate);
// save any supported rate for later
supportedrate = rate;
}
}

// Checking for valid format using getMinBufferSize.
bufsize = QAndroidJniObject::callStaticMethod<jint>
("android/media/AudioTrack", "getMinBufferSize", "(III)I",
supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_8BIT);
ANDROID_EXCEPTION_CHECK
if (bufsize > 0 && !exception)
settings->AddSupportedFormat(FORMAT_U8);
// 16bit always supported
settings->AddSupportedFormat(FORMAT_S16);

bufsize = QAndroidJniObject::callStaticMethod<jint>
("android/media/AudioTrack", "getMinBufferSize", "(III)I",
supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_FLOAT);
ANDROID_EXCEPTION_CHECK
if (bufsize > 0 && !exception)
settings->AddSupportedFormat(FORMAT_FLT);

for (uint channels = CHANNELS_MIN; channels <= CHANNELS_MAX; channels++)
{
settings->AddSupportedChannels(channels);
}
settings->setPassthrough(0);

return settings;
}

void AudioOutputAudioTrack::WriteAudio(unsigned char* aubuf, int size)
{
bool exception=false;
QAndroidJniEnvironment env;
if (m_actually_paused)
{
jboolean param = true;
m_audioTrack->callMethod<void>("pause","(Z)V",param);
ANDROID_EXCEPTION_CLEAR
return;
}
// create a java byte array
jbyteArray arr = env->NewByteArray(size);
env->SetByteArrayRegion(arr, 0, size, reinterpret_cast<jbyte*>(aubuf));
jint ret = -99;
if (m_audioTrack)
{
ret = m_audioTrack->callMethod<jint>("write","([BI)I", arr, size);
ANDROID_EXCEPTION_CHECK
}
env->DeleteLocalRef(arr);
if (ret != size || exception)
LOG(VB_GENERAL, LOG_ERR, LOC + __func__
+ QString(" Audio Write failed, size %1 return %2 exception %3")
.arg(size).arg(ret).arg(exception));

LOG(VB_AUDIO | VB_TIMESTAMP, LOG_INFO, LOC + __func__
+ QString(" WriteAudio size=%1 written=%2")
.arg(size).arg(ret));
}


int AudioOutputAudioTrack::GetBufferedOnSoundcard(void) const
{
bool exception=false;
QAndroidJniEnvironment env;
int buffered (0);
if (m_audioTrack)
{
// This may return a negative value, because there
// is data already played that is still in the "Audio circular buffer"
buffered
= m_audioTrack->callMethod<jint>("getBufferedBytes");
ANDROID_EXCEPTION_CHECK
if (exception)
buffered = 0;
int latency
= m_audioTrack->callMethod<jint>("getLatencyViaHeadPosition");
ANDROID_EXCEPTION_CHECK
if (exception)
latency = 0;
buffered += latency * m_samplerate / 1000 * m_bitsPer10Frames / 80 ;
}

return buffered;
}

bool AudioOutputAudioTrack::AddData(void *in_buffer, int in_len,
int64_t timecode, int in_frames)
{
bool ret = AudioOutputBase::AddData
(in_buffer, in_len, timecode,in_frames);

return ret;
}

void AudioOutputAudioTrack::Pause(bool paused)
{
AudioOutputBase::Pause(paused);
jboolean param = paused;
m_audioTrack->callMethod<void>("pause","(Z)V",param);
}

void AudioOutputAudioTrack::SetSourceBitrate(int rate)
{
AudioOutputBase::SetSourceBitrate(rate);
if (m_source_bitrate > 0)
{
if (m_passthru || m_enc)
{
m_bitsPer10Frames = m_source_bitrate * 10 / m_source_samplerate;
jint bitsPer10Frames = m_bitsPer10Frames;
m_audioTrack->callMethod<void>("setBitsPer10Frames","(I)V",bitsPer10Frames);

}
}
}
42 changes: 42 additions & 0 deletions mythtv/libs/libmyth/audio/audiooutputaudiotrack.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#ifndef _AUDIOOUTPUTAUDIOTRACK_H_
#define _AUDIOOUTPUTAUDIOTRACK_H_

#include "audiooutputbase.h"

class QAndroidJniObject;
/*
Audio output for android based on android.media.AudioTrack.
This uses the java class org.mythtv.audio.AudioOutputAudioTrack
to invoke android media playback methods.
*/

class AudioOutputAudioTrack : public AudioOutputBase
{
public:
explicit AudioOutputAudioTrack(const AudioSettings &settings);
~AudioOutputAudioTrack() override;

bool AddData(void *buffer, int len, int64_t timecode, int frames) override; // AudioOutput

// Volume control
int GetVolumeChannel(int /* channel */) const override // VolumeBase
{ return 100; }
void SetVolumeChannel(int /* channel */, int /* volume */) override // VolumeBase
{}
void Pause(bool paused) override; // AudioOutput

protected:
bool OpenDevice(void) override; // AudioOutputBase
void CloseDevice(void) override; // AudioOutputBase
void WriteAudio(unsigned char *aubuf, int size) override; // AudioOutputBase
int GetBufferedOnSoundcard(void) const override; // AudioOutputBase
AudioOutputSettings* GetOutputSettings(bool digital) override; // AudioOutputBase
void SetSourceBitrate(int rate) override; // AudioOutputBase
QAndroidJniObject *m_audioTrack {nullptr};
int m_bitsPer10Frames {0};
};

#endif //_AUDIOOUTPUTAUDIOTRACK_H_
57 changes: 40 additions & 17 deletions mythtv/libs/libmyth/audio/audiooutputbase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ using namespace std;
#include "mythlogging.h"
#include "mythconfig.h"

// AC3 encode currently disabled for Android
#if defined(Q_OS_ANDROID)
#define DISABLE_AC3_ENCODE
#endif

#define LOC QString("AOBase: ")

#define WPOS (m_audiobuffer + org_waud)
Expand Down Expand Up @@ -61,6 +66,9 @@ AudioOutputBase::AudioOutputBase(const AudioSettings &settings) :
memset(m_src_in_buf, 0, sizeof(m_src_in_buf));
memset(m_audiobuffer, 0, sizeof(m_audiobuffer));

if (m_main_device.startsWith("OpenMAX:")
|| m_main_device.startsWith("AudioTrack:"))
m_usesSpdif = false;
// Handle override of SRC quality settings
if (gCoreContext->GetBoolSetting("SRCQualityOverride", false))
{
Expand Down Expand Up @@ -313,7 +321,7 @@ void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor)
m_pSoundStretch->setSampleRate(m_samplerate);
m_pSoundStretch->setChannels(channels);
m_pSoundStretch->setTempo(m_stretchfactor);
#if ARCH_ARM
#if ARCH_ARM || defined(Q_OS_ANDROID)
// use less demanding settings for Raspberry pi
m_pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 82);
m_pSoundStretch->setSetting(SETTING_USE_AA_FILTER, 0);
Expand Down Expand Up @@ -415,11 +423,11 @@ bool AudioOutputBase::SetupPassthrough(AVCodecID codec, int codec_profile,

delete m_spdifenc;

// No spdif encoder if using openmax audio
if (m_main_device.startsWith("OpenMAX:"))
m_spdifenc = nullptr;
else
// No spdif encoder needed for certain devices
if (m_usesSpdif)
m_spdifenc = new SPDIFEncoder("spdif", codec);
else
m_spdifenc = nullptr;
if (m_spdifenc && m_spdifenc->Succeeded() && codec == AV_CODEC_ID_DTS)
{
switch(codec_profile)
Expand Down Expand Up @@ -476,10 +484,11 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
m_output_settings->IsSupportedChannels(lconfigured_channels);

// check if the number of channels could be transmitted via AC3 encoding
#ifndef DISABLE_AC3_ENCODE
lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) &&
(!m_output_settings->canFeature(FEATURE_LPCM) &&
lconfigured_channels > 2 && lconfigured_channels <= 6);

#endif
if (!lenc && !cando_channels)
{
// if hardware doesn't support source audio configuration
Expand Down Expand Up @@ -517,11 +526,11 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
and we have more than 2 channels but multichannel PCM is not
supported or if the device just doesn't support the number of
channels */
#ifndef DISABLE_AC3_ENCODE
lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) &&
((!m_output_settings->canFeature(FEATURE_LPCM) &&
lconfigured_channels > 2) ||
!m_output_settings->IsSupportedChannels(lconfigured_channels));

/* Might we reencode a bitstream that's been decoded for timestretch?
If the device doesn't support the number of channels - see below */
if (m_output_settingsdigital->canFeature(FEATURE_AC3) &&
Expand All @@ -530,7 +539,7 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
{
lreenc = true;
}

#endif
// Enough channels? Upmix if not, but only from mono/stereo/5.0 to 5.1
if (IS_VALID_UPMIX_CHANNEL(settings.m_channels) &&
settings.m_channels < lconfigured_channels)
Expand Down Expand Up @@ -968,7 +977,7 @@ void AudioOutputBase::SetEffDsp(int dsprate)
/**
* Get the number of bytes in the audiobuffer
*/
inline int AudioOutputBase::audiolen()
inline int AudioOutputBase::audiolen() const
{
if (m_waud >= m_raud)
return m_waud - m_raud;
Expand All @@ -978,7 +987,7 @@ inline int AudioOutputBase::audiolen()
/**
* Get the free space in the audiobuffer in bytes
*/
int AudioOutputBase::audiofree()
int AudioOutputBase::audiofree() const
{
return kAudioRingBufferSize - audiolen() - 1;
/* There is one wasted byte in the buffer. The case where waud = raud is
Expand All @@ -993,7 +1002,7 @@ int AudioOutputBase::audiofree()
* This value can differ from that returned by audiolen if samples are
* being converted to floats and the output sample format is not 32 bits
*/
int AudioOutputBase::audioready()
int AudioOutputBase::audioready() const
{
if (m_passthru || m_enc || m_bytes_per_frame == m_output_bytes_per_frame)
return audiolen();
Expand All @@ -1008,7 +1017,20 @@ int64_t AudioOutputBase::GetAudiotime(void)
if (m_audbuf_timecode == 0 || !m_configure_succeeded)
return 0;

int obpf = m_output_bytes_per_frame;
// output bits per 10 frames
int64_t obpf;

if (m_passthru && !usesSpdif())
obpf = m_source_bitrate * 10 / m_source_samplerate;
else
if (m_enc && !usesSpdif())
{
// re-encode bitrate is hardcoded at 448000
obpf = 448000 * 10 / m_source_samplerate;
}
else
obpf = m_output_bytes_per_frame * 80;

int64_t oldaudiotime;

/* We want to calculate 'audiotime', which is the timestamp of the audio
Expand All @@ -1029,21 +1051,22 @@ int64_t AudioOutputBase::GetAudiotime(void)

QMutexLocker lockav(&m_avsync_lock);

int soundcard_buffer = GetBufferedOnSoundcard(); // bytes
int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes

/* audioready tells us how many bytes are in audiobuffer
scaled appropriately if output format != internal format */
int main_buffer = audioready();
int64_t main_buffer = audioready();

oldaudiotime = m_audiotime;

/* timecode is the stretch adjusted version
of major post-stretched buffer contents
processing latencies are catered for in AddData/SetAudiotime
to eliminate race */
m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ? (
((int64_t)(main_buffer + soundcard_buffer) * m_eff_stretchfactor) /
(m_effdsp * obpf)) : 0);

m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ?
((main_buffer + soundcard_buffer) * int64_t(m_eff_stretchfactor)
* 80 / int64_t(m_effdsp) / obpf) : 0);

/* audiotime should never go backwards, but we might get a negative
value if GetBufferedOnSoundcard() isn't updated by the driver very
Expand Down
9 changes: 6 additions & 3 deletions mythtv/libs/libmyth/audio/audiooutputbase.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,15 +155,17 @@ class AudioOutputBase : public AudioOutput, public MThread

int CheckFreeSpace(int &frames);

inline int audiolen(); // number of valid bytes in audio buffer
int audiofree(); // number of free bytes in audio buffer
int audioready(); // number of bytes ready to be written
inline int audiolen() const; // number of valid bytes in audio buffer
int audiofree() const; // number of free bytes in audio buffer
int audioready() const; // number of bytes ready to be written

void SetStretchFactorLocked(float factor);

// For audiooutputca
int GetBaseAudBufTimeCode() const { return m_audbuf_timecode; }

bool usesSpdif() const { return m_usesSpdif; }

protected:
// Basic details about the audio stream
int m_channels {-1};
Expand Down Expand Up @@ -295,6 +297,7 @@ class AudioOutputBase : public AudioOutput, public MThread
int64_t m_length_last_data {0};

// SPDIF Encoder for digital passthrough
bool m_usesSpdif {true};
SPDIFEncoder *m_spdifenc {nullptr};

// Flag indicating if SetStretchFactor enabled audio float processing
Expand Down
2 changes: 2 additions & 0 deletions mythtv/libs/libmyth/libmyth.pro
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,9 @@ unix:!cygwin {

android {
SOURCES += audio/audiooutputopensles.cpp
SOURCES += audio/audiooutputaudiotrack.cpp
HEADERS += audio/audiooutputopensles.h
HEADERS += audio/audiooutputaudiotrack.h
}

linux:DEFINES += linux
Expand Down