Skip to content
Permalink
Browse files

Fixed bug if framesize was different than 40msec

  • Loading branch information...
bear101 committed Aug 1, 2019
1 parent ab2b1d2 commit 09f2e2908bd9742a55723867a1e2c47ede5de1cc
@@ -44,9 +44,9 @@ protected void setUp() throws Exception {
this.ADMIN_USERNAME = "admin";
this.ADMIN_PASSWORD = "admin";

this.IPADDR = "tt5us.bearware.dk";
this.TCPPORT = 10335;
this.UDPPORT = 10335;
this.IPADDR = "192.168.0.68";
this.TCPPORT = 10333;
this.UDPPORT = 10333;
}

public void test_This() {
@@ -151,42 +151,72 @@ public void test_SharedAudioDevice() {
public void test_MultiClientOnSharedAudioDevice() {
TeamTalkBase ttclient1 = newClientInstance();
TeamTalkBase ttclient2 = newClientInstance();
TeamTalkBase ttclient3 = newClientInstance();
TeamTalkBase ttclient4 = newClientInstance();

int sndinputdevid = SoundDeviceConstants.TT_SOUNDDEVICE_ID_OPENSLES_DEFAULT | SoundDeviceConstants.TT_SOUNDDEVICE_SHARED_FLAG;
int sndoutputdevid = SoundDeviceConstants.TT_SOUNDDEVICE_ID_OPENSLES_DEFAULT;
assertTrue("Init ttclient1 sound input device", ttclient1.initSoundInputDevice(sndinputdevid));
assertTrue("Init ttclient1 sound output device", ttclient1.initSoundOutputDevice(sndoutputdevid));
assertTrue("Init ttclient2 sound input device", ttclient2.initSoundInputDevice(sndinputdevid));
assertTrue("Init ttclient2 sound output device", ttclient2.initSoundOutputDevice(sndoutputdevid));

connect(ttclient1);
login(ttclient1, getCurrentMethod(), "guest", "guest");
joinRoot(ttclient1);
ttclient1.DBG_SetSoundInputTone(StreamType.STREAMTYPE_VOICE, 600);

connect(ttclient2);
login(ttclient2, getCurrentMethod(), "guest", "guest");
joinRoot(ttclient2);
ttclient2.DBG_SetSoundInputTone(StreamType.STREAMTYPE_VOICE, 900);

assertTrue("Transmit audio on ttclient1", ttclient1.enableVoiceTransmission(true));
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 5000);
assertTrue("Stop transmit audio on ttclient1", ttclient1.enableVoiceTransmission(false));
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 1000);

assertTrue("Transmit audio on ttclient2", ttclient2.enableVoiceTransmission(true));
waitForEvent(ttclient2, ClientEvent.CLIENTEVENT_NONE, 5000);
assertTrue("Stop transmit audio on ttclient2", ttclient2.enableVoiceTransmission(false));
waitForEvent(ttclient2, ClientEvent.CLIENTEVENT_NONE, 1000);
for (TeamTalkBase ttclient : this.ttclients) {
assertTrue("Init ttclient sound input device", ttclient.initSoundInputDevice(sndinputdevid));
assertTrue("Init ttclient sound output device", ttclient.initSoundOutputDevice(sndoutputdevid));
}

int freq = 500;
for (TeamTalkBase ttclient : this.ttclients) {
connect(ttclient);
login(ttclient, getCurrentMethod(), "guest", "guest");
joinRoot(ttclient);
ttclient.DBG_SetSoundInputTone(StreamType.STREAMTYPE_VOICE, freq += 100);
}

assertTrue("Transmit audio on ttclient1", ttclient1.enableVoiceTransmission(true));
// now we hear all clients transmitting at the same time
for (TeamTalkBase ttclient : this.ttclients) {
assertTrue("Transmit audio on ttclient", ttclient.enableVoiceTransmission(true));
}
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 5000);
assertTrue("Stop transmit audio on ttclient1", ttclient1.enableVoiceTransmission(false));
for (TeamTalkBase ttclient : this.ttclients) {
assertTrue("Stop transmit audio on ttclient", ttclient.enableVoiceTransmission(false));
}
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 1000);

assertTrue("Transmit audio on ttclient2", ttclient2.enableVoiceTransmission(true));
waitForEvent(ttclient2, ClientEvent.CLIENTEVENT_NONE, 5000);
assertTrue("Stop transmit audio on ttclient2", ttclient2.enableVoiceTransmission(false));
waitForEvent(ttclient2, ClientEvent.CLIENTEVENT_NONE, 1000);
// Create two separate channels, one for ttclient1, ttclient2 and one for ttclient3, ttclient4.
Channel chan1 = buildDefaultChannel(ttclient1, "Opus Mono - 40 msec");
assertEquals("opus default", chan1.audiocodec.nCodec, Codec.OPUS_CODEC);
chan1.audiocodec.opus.nChannels = 1;
chan1.audiocodec.opus.nTxIntervalMSec = 40;
assertTrue("ttclient1 create channel", waitCmdSuccess(ttclient1, ttclient1.doJoinChannel(chan1), DEF_WAIT));
assertTrue("ttclient2 join ttclient1's channel", waitCmdSuccess(ttclient2, ttclient2.doJoinChannelByID(ttclient1.getMyChannelID(), chan1.szPassword), DEF_WAIT));

Channel chan2 = buildDefaultChannel(ttclient3, "Opus Stereo - 60 msec");
assertEquals("opus default", chan2.audiocodec.nCodec, Codec.OPUS_CODEC);
chan2.audiocodec.opus.nChannels = 2;
chan2.audiocodec.opus.nTxIntervalMSec = 60;
assertTrue("ttclient3 create channel", waitCmdSuccess(ttclient3, ttclient3.doJoinChannel(chan2), DEF_WAIT));
assertTrue("ttclient4 join ttclient3's channel", waitCmdSuccess(ttclient4, ttclient4.doJoinChannelByID(ttclient3.getMyChannelID(), chan2.szPassword), DEF_WAIT));

// now we should hear 5 second tone of each client on two different channels
for (TeamTalkBase ttclient : this.ttclients) {
assertTrue("Transmit audio on ttclient", ttclient.enableVoiceTransmission(true));
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 5000);
assertTrue("Stop transmit audio on ttclient", ttclient.enableVoiceTransmission(false));
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 1000);
}

// put ttclient1,ttclient2 in 20 msec channel and redo test
Channel chan3 = buildDefaultChannel(ttclient1, "Opus Stereo - 20 msec");
assertEquals("opus default", chan3.audiocodec.nCodec, Codec.OPUS_CODEC);
chan3.audiocodec.opus.nChannels = 1;
chan3.audiocodec.opus.nTxIntervalMSec = 20;
assertTrue("ttclient1 create channel", waitCmdSuccess(ttclient1, ttclient1.doJoinChannel(chan3), DEF_WAIT));
assertTrue("ttclient2 join ttclient1's channel", waitCmdSuccess(ttclient2, ttclient2.doJoinChannelByID(ttclient1.getMyChannelID(), chan3.szPassword), DEF_WAIT));

// now we should hear 5 second tone of each client on two different channels
for (TeamTalkBase ttclient : this.ttclients) {
assertTrue("Transmit audio on ttclient", ttclient.enableVoiceTransmission(true));
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 5000);
assertTrue("Stop transmit audio on ttclient", ttclient.enableVoiceTransmission(false));
waitForEvent(ttclient1, ClientEvent.CLIENTEVENT_NONE, 1000);
}
}
}
@@ -173,6 +173,8 @@ namespace soundsystem {
#define MAX_SAMPLERATES 16
#define MAX_CHANNELS 2
#define MAX_FRAMESIZE ((1 << 27) - 1)

#define DEBUG_RESAMPLER 0

template < typename INPUTSTREAMER >
class SharedStreamCapture : public StreamCapture
@@ -344,20 +346,24 @@ namespace soundsystem {
assert((streamer.inputdeviceid & SOUND_DEVICE_SHARED_FLAG) == 0);

wguard_t g(m_mutex);

// MYTRACE("Original for %p samplerate %d, framesize %d, channels %d\n",
// streamer.recorder, streamer.samplerate,
// streamer.framesize, streamer.channels);

#if DEBUG_RESAMPLER
MYTRACE("Original for %p samplerate %d, framesize %d, channels %d\n",
streamer.recorder, streamer.samplerate,
streamer.framesize, streamer.channels);
#endif

bool resample = false;
for (auto stream : m_activestreams)
{
if (SameStreamProperties(*stream, streamer))
{
stream->recorder->StreamCaptureCb(*stream, buffer, samples);
// MYTRACE("Shared for %p samplerate %d, framesize %d, channels %d\n",
// stream->recorder, stream->samplerate,
// stream->framesize, stream->channels);
#if DEBUG_RESAMPLER
MYTRACE("Shared for %p samplerate %d, framesize %d, channels %d\n",
stream->recorder, stream->samplerate,
stream->framesize, stream->channels);
#endif
}
else
{
@@ -404,23 +410,26 @@ namespace soundsystem {
assert(rsbuf != m_resample_buffers.end());
short* rsbufptr = &rsbuf->second[0];
assert(cbch);
int rsframesize = rsbuf->second.size() / cbch;
int samples = i.second->Resample(reinterpret_cast<const short*>(mb->rd_ptr()),
m_originalstream->framesize,
rsbufptr, rsbuf->second.size() / cbch);
// MYTRACE("Resampled for samplerate %d, framesize %d, channels %d\n",
// cbsr, cbframesize, cbch);
rsbufptr, rsframesize);
#if DEBUG_RESAMPLER
MYTRACE("Resampled for samplerate %d, framesize %d, channels %d\n",
cbsr, cbframesize, cbch);
#endif

MYTRACE_COND(samples != cbframesize,
MYTRACE_COND(samples != rsframesize,
ACE_TEXT("Resampled output frame for samplerate %d, channels %d doesn't match framesize %d. Was %d\n"),
cbsr, cbch, cbframesize, samples);
cbsr, cbch, rsframesize, samples);

// Now copy samples from m_resample_buffer[key] to
// m_callback_buffer[key], i.e. from original
// capture stream to shared capture stream.
//
// Here we want to use "total" samples where
// channel information is omitted.
int totalsamples = cbframesize * cbch;
// channel information (mono/stereo) is omitted.
int totalsamples = rsframesize * cbch;
int rspos = 0;
while (rspos < totalsamples)
{
@@ -434,8 +443,10 @@ namespace soundsystem {
std::size_t n_samples = std::min(cbbufspace, rsremain);

//where to copy from
// MYTRACE("Copying at cbpos %d, rspos %u for samplerate %d, framesize %d, channels %d\n",
// int(cbpos), rspos, cbsr, cbframesize, cbch);
#if DEBUG_RESAMPLER
MYTRACE("Copying at cbpos %d, rspos %u for samplerate %d, framesize %d, channels %d\n",
int(cbpos), rspos, cbsr, cbframesize, cbch);
#endif
assert(rspos + n_samples <= m_resample_buffers[key].size());
assert(cbpos + n_samples <= m_callback_buffers[key].size());

@@ -453,8 +464,10 @@ namespace soundsystem {
if (MakeKey(*streamer) == key)
{
streamer->recorder->StreamCaptureCb(*streamer, cbbufptr, cbframesize);
// MYTRACE("Callback for %p samplerate %d, framesize %d, channels %d\n",
// streamer->recorder, cbsr, cbframesize, cbch);
#if DEBUG_RESAMPLER
MYTRACE("Callback for %p samplerate %d, framesize %d, channels %d\n",
streamer->recorder, cbsr, cbframesize, cbch);
#endif
}
}
cbpos = 0;

0 comments on commit 09f2e29

Please sign in to comment.
You can’t perform that action at this time.