Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Update to libjingle 0.6.15

* Add Md5Digest, Sha1Digest and HMAC that works with any digest.
* Add app/webrtc/test.
* Remove win socket dependency from byteorder.
* Allow to use Thread without socketserver.
* Bug fixes.
Review URL: https://webrtc-codereview.appspot.com/439002

git-svn-id: http://libjingle.googlecode.com/svn/trunk@120 dd674b97-3498-5ee5-1854-bdd07cd0ff33
  • Loading branch information...
commit 28cdd1a70122d2493a843f996a1ec96f66058e91 1 parent 8b37ec0
ronghuawu@google.com authored
Showing with 4,031 additions and 718 deletions.
  1. +8 −1 CHANGELOG
  2. +26 −0 LICENSE_THIRD_PARTY
  3. +2 −0  talk/app/webrtc/candidateobserver.h
  4. +0 −2  talk/app/webrtc/jsep.h
  5. +6 −3 talk/app/webrtc/jsepicecandidate.cc
  6. +2 −2 talk/app/webrtc/jsepicecandidate.h
  7. +9 −17 talk/app/webrtc/jsepsessiondescription.cc
  8. +3 −12 talk/app/webrtc/jsepsessiondescription.h
  9. +4 −17 talk/app/webrtc/jsepsignaling.cc
  10. +3 −17 talk/app/webrtc/jsepsignaling.h
  11. +20 −38 talk/app/webrtc/jsepsignaling_unittest.cc
  12. +21 −3 talk/app/webrtc/peerconnectionimpl.cc
  13. +3 −2 talk/app/webrtc/peerconnectionimpl.h
  14. +23 −19 talk/app/webrtc/peerconnectionimpl_unittest.cc
  15. +7 −9 talk/app/webrtc/peerconnectionsignaling.cc
  16. +11 −12 talk/app/webrtc/peerconnectionsignaling.h
  17. +7 −7 talk/app/webrtc/peerconnectionsignaling_unittest.cc
  18. +6 −6 talk/app/webrtc/roapmessages_unittest.cc
  19. +692 −0 talk/app/webrtc/test/fakeaudiocapturemodule.cc
  20. +254 −0 talk/app/webrtc/test/fakeaudiocapturemodule.h
  21. +184 −0 talk/app/webrtc/test/fakevideocapturemodule.cc
  22. +205 −0 talk/app/webrtc/test/fakevideocapturemodule.h
  23. +83 −0 talk/app/webrtc/test/fileframesource.cc
  24. +59 −0 talk/app/webrtc/test/fileframesource.h
  25. +35 −0 talk/app/webrtc/test/i420framesource.cc
  26. +57 −0 talk/app/webrtc/test/i420framesource.h
  27. +53 −0 talk/app/webrtc/test/staticframesource.cc
  28. +41 −0 talk/app/webrtc/test/staticframesource.h
  29. +346 −87 talk/app/webrtc/webrtcsdp.cc
  30. +136 −50 talk/app/webrtc/webrtcsdp_unittest.cc
  31. +4 −2 talk/app/webrtc/webrtcsession.cc
  32. +3 −3 talk/app/webrtc/webrtcsession.h
  33. +7 −8 talk/app/webrtc/webrtcsession_unittest.cc
  34. +20 −0 talk/base/byteorder.h
  35. +21 −5 talk/base/fakenetwork.h
  36. +1 −1  talk/base/httpbase.cc
  37. +2 −2 talk/base/ipaddress.cc
  38. +2 −1  talk/base/ipaddress.h
  39. +39 −39 talk/base/macasyncsocket.cc
  40. +6 −5 talk/base/macasyncsocket.h
  41. +3 −3 talk/base/md5.h
  42. +63 −0 talk/base/md5digest.h
  43. +83 −0 talk/base/md5digest_unittest.cc
  44. +169 −0 talk/base/messagedigest.cc
  45. +122 −0 talk/base/messagedigest.h
  46. +8 −5 talk/base/messagequeue.cc
  47. +19 −10 talk/base/messagequeue_unittest.cc
  48. +2 −2 talk/base/natsocketfactory.cc
  49. +275 −105 talk/base/network.cc
  50. +48 −14 talk/base/network.h
  51. +307 −42 talk/base/network_unittest.cc
  52. +67 −0 talk/base/nullsocketserver.h
  53. +62 −0 talk/base/nullsocketserver_unittest.cc
  54. +45 −40 talk/base/physicalsocketserver.cc
  55. +64 −0 talk/base/sha1digest.h
  56. +86 −0 talk/base/sha1digest_unittest.cc
  57. +1 −8 talk/base/sslidentity.cc
  58. +4 −10 talk/base/sslidentity.h
  59. +6 −19 talk/base/stringdigest.h
  60. +2 −1  talk/base/virtualsocketserver.cc
  61. +41 −11 talk/base/win32.cc
  62. +35 −38 talk/base/win32socketserver.cc
  63. +5 −5 talk/base/win32socketserver.h
  64. +4 −1 talk/libjingle.scons
  65. +2 −1  talk/p2p/base/port_unittest.cc
  66. +1 −1  talk/p2p/base/relayport_unittest.cc
  67. +6 −1 talk/p2p/base/session_unittest.cc
  68. +11 −4 talk/p2p/base/sessiondescription.cc
  69. +5 −0 talk/p2p/base/sessiondescription.h
  70. +1 −1  talk/p2p/base/stunport_unittest.cc
  71. +6 −1 talk/session/phone/mediasession.h
  72. +33 −0 talk/session/phone/mediasession_unittest.cc
  73. +3 −0  talk/session/tunnel/securetunnelsessionclient.cc
  74. +3 −0  talk/session/tunnel/tunnelsessionclient.cc
  75. +19 −18 talk/xmpp/chatroommodule.h
  76. +9 −7 talk/xmpp/rostermodule.h
9 CHANGELOG
View
@@ -1,7 +1,14 @@
Libjingle
+0.6.15 - Mar 06, 2012
+ - Add Md5Digest, Sha1Digest and HMAC that works with any digest.
+ - Add app/webrtc/test.
+ - Remove win socket dependency from byteorder.
+ - Allow to use Thread without socketserver.
+ - Bug fixes.
+
0.6.14 - Feb 28, 2012
- - Initial JSEP support in WebRtc
+ - Initial JSEP support in WebRTC.
- Bug fixes.
0.6.13 - Feb 16, 2012
26 LICENSE_THIRD_PARTY
View
@@ -0,0 +1,26 @@
+This source tree contains third party source code which is governed by third
+party licenses. This file contains references to files which are under other
+licenses than the one provided in the COPYING file in the root of the source
+tree.
+
+Files governed by third party licenses:
+
+Governed by license within files (Public domain):
+Exact origin unknown
+base/base64.c
+base/base64.h
+
+Governed by http://www.fourmilab.ch/md5/ (Public domain):
+base/md5.c
+base/md5.h
+
+Governed by http://www.boost.org/LICENSE_1_0.txt (Boost license):
+base/scoped_ptr.h
+
+Governed by license within files (Public domain):
+Originally downloaded from http://svn.ghostscript.com/jbig2dec/tags/release_0_02/sha1.*
+base/sha1.c
+base/sha1.h
+
+Governed by http://sigslot.sourceforge.net/#license (Public domain):
+base/sigslot.h
2  talk/app/webrtc/candidateobserver.h
View
@@ -32,6 +32,8 @@
namespace webrtc {
+// TODO: Remove this file from the build since it is no longer used.
+
// This observer interface provides feedback about ice candidates found by a
// WebRtcSession object.
class CandidateObserver {
2  talk/app/webrtc/jsep.h
View
@@ -80,8 +80,6 @@ class SessionDescriptionInterface {
public:
virtual ~SessionDescriptionInterface() {}
virtual const cricket::SessionDescription* description() const = 0;
- // Release ownership of cricket::SessionDescription.
- virtual cricket::SessionDescription* ReleaseDescription() = 0;
// Adds the specified candidate to the description.
// Ownership is not transferred.
virtual void AddCandidate(const IceCandidateInterface* candidate) = 0;
9 talk/app/webrtc/jsepicecandidate.cc
View
@@ -40,11 +40,14 @@ JsepIceCandidate::JsepIceCandidate(const std::string& label)
: label_(label) {
}
-JsepIceCandidate::~JsepIceCandidate() {
+JsepIceCandidate::JsepIceCandidate(const std::string& label,
+ const cricket::Candidate& candidate)
+ : label_(label),
+ candidate_(candidate) {
}
-void JsepIceCandidate::SetCandidate(const cricket::Candidate& candidates) {
- candidate_ = candidates;
+JsepIceCandidate::~JsepIceCandidate() {
+
}
bool JsepIceCandidate::Initialize(const std::string& sdp) {
4 talk/app/webrtc/jsepicecandidate.h
View
@@ -41,9 +41,9 @@ namespace webrtc {
class JsepIceCandidate : public IceCandidateInterface {
public:
explicit JsepIceCandidate(const std::string& label);
+ explicit JsepIceCandidate(const std::string& label,
+ const cricket::Candidate& candidate);
~JsepIceCandidate();
-
- void SetCandidate(const cricket::Candidate& candidates);
bool Initialize(const std::string& sdp);
virtual std::string label() const { return label_;}
26 talk/app/webrtc/jsepsessiondescription.cc
View
@@ -27,12 +27,16 @@
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/webrtcsdp.h"
-#include "talk/p2p/base/sessiondescription.h"
+#include "talk/session/phone/mediasession.h"
namespace webrtc {
-JsepSessionDescription::JsepSessionDescription()
- : const_description_(NULL) {
+JsepSessionDescription::JsepSessionDescription() {
+}
+
+JsepSessionDescription::JsepSessionDescription(
+ const cricket::SessionDescription* description) {
+ description_.reset(description->Copy());
}
JsepSessionDescription::~JsepSessionDescription() {
@@ -41,27 +45,15 @@ JsepSessionDescription::~JsepSessionDescription() {
void JsepSessionDescription::SetDescription(
cricket::SessionDescription* description) {
description_.reset(description);
- const_description_ = description_.get();
-}
-
-void JsepSessionDescription::SetConstDescription(
- const cricket::SessionDescription* description) {
- description_.reset(NULL);
- const_description_ = description_.get();
}
bool JsepSessionDescription::Initialize(const std::string& sdp) {
if (description_.get() != NULL)
return false;
description_.reset(new cricket::SessionDescription());
- const_description_ = description_.get();
return SdpDeserialize(sdp, description_.get(), &candidates_);
}
-cricket::SessionDescription* JsepSessionDescription::ReleaseDescription() {
- return description_.release();
-}
-
void JsepSessionDescription::AddCandidate(
const IceCandidateInterface* candidate) {
if (candidate)
@@ -69,9 +61,9 @@ void JsepSessionDescription::AddCandidate(
}
bool JsepSessionDescription::ToString(std::string* out) const {
- if (!const_description_ || !out)
+ if (!description_.get() || !out)
return false;
- *out = SdpSerialize(*const_description_, candidates_);
+ *out = SdpSerialize(*description_.get(), candidates_);
return !out->empty();
}
15 talk/app/webrtc/jsepsessiondescription.h
View
@@ -46,30 +46,21 @@ namespace webrtc {
class JsepSessionDescription : public SessionDescriptionInterface {
public:
JsepSessionDescription();
+ explicit JsepSessionDescription(
+ const cricket::SessionDescription* description);
~JsepSessionDescription();
bool Initialize(const std::string& sdp);
void SetDescription(cricket::SessionDescription* description);
- // Currently there is no way to copy a cricket::SessionDescription.
- // Therefore we need a way to create a JsepSessionDescription with an
- // unmutable pointer to a SessionDescription to be returned in
- // local_description.
- // TODO: This is a pretty bad and ugly way- We should create a
- // way to copy a cricket::SessionDescription.
- // Since this is hidden from the application user I would like to do that
- // later.
- void SetConstDescription(const cricket::SessionDescription* description);
virtual const cricket::SessionDescription* description() const {
- return const_description_;
+ return description_.get();
}
- virtual cricket::SessionDescription* ReleaseDescription();
virtual void AddCandidate(const IceCandidateInterface* candidate);
virtual bool ToString(std::string* out) const;
private:
talk_base::scoped_ptr<cricket::SessionDescription> description_;
- const cricket::SessionDescription* const_description_;
std::vector<cricket::Candidate> candidates_;
DISALLOW_COPY_AND_ASSIGN(JsepSessionDescription);
21 talk/app/webrtc/jsepsignaling.cc
View
@@ -114,11 +114,9 @@ static cricket::ContentAction GetContentAction(JsepInterface::Action action) {
JsepSignaling::JsepSignaling(talk_base::Thread* signaling_thread,
SessionDescriptionProvider* provider,
- IceCandidateObserver* observer,
JsepRemoteMediaStreamObserver* stream_observer)
: signaling_thread_(signaling_thread),
provider_(provider),
- observer_(observer),
stream_observer_(stream_observer),
local_description_(new JsepSessionDescription()),
remote_streams_(StreamCollection::Create()),
@@ -164,18 +162,18 @@ SessionDescriptionInterface* JsepSignaling::CreateAnswer(
bool JsepSignaling::SetLocalDescription(Action action,
SessionDescriptionInterface* desc) {
cricket::ContentAction content_action = GetContentAction(action);
- bool ret = provider_->SetLocalDescription(desc->ReleaseDescription(),
+ bool ret = provider_->SetLocalDescription(desc->description()->Copy(),
content_action);
- local_description_->SetConstDescription(provider_->local_description());
+ local_description_.reset(desc);
return ret;
}
bool JsepSignaling::SetRemoteDescription(Action action,
SessionDescriptionInterface* desc) {
cricket::ContentAction content_action = GetContentAction(action);
- bool ret = provider_->SetRemoteDescription(desc->ReleaseDescription(),
+ bool ret = provider_->SetRemoteDescription(desc->description()->Copy(),
content_action);
- remote_description_->SetConstDescription(provider_->remote_description());
+ remote_description_.reset(desc);
// It is important that we have updated the provider with the
// remote SessionDescription before we update the streams.
@@ -192,17 +190,6 @@ bool JsepSignaling::ProcessIceMessage(const IceCandidateInterface* candidate) {
candidate->candidate());
}
-void JsepSignaling::OnCandidatesReady() {
- observer_->OnIceComplete();
-}
-
-void JsepSignaling::OnCandidateFound(const std::string& content_name,
- const cricket::Candidate& candidate) {
- JsepIceCandidate jsep_candidate(content_name);
- jsep_candidate.SetCandidate(candidate);
- observer_->OnIceCandidate(&jsep_candidate);
-}
-
// Updates or Creates remote MediaStream objects given a
// remote SessionDesription.
// If the remote SessionDesription contain new remote MediaStreams
20 talk/app/webrtc/jsepsignaling.h
View
@@ -34,7 +34,6 @@
#include <string>
#include <vector>
-#include "talk/app/webrtc/candidateobserver.h"
#include "talk/app/webrtc/jsep.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/base/scoped_ptr.h"
@@ -59,9 +58,6 @@ class SessionDescriptionProvider;
// JsepRemoteMediaStreamObserver is triggered when
// JsepSignaling::SetRemoteDescription is called with a new
// SessionDescription with a new set of MediaStreams.
-// TODO: It does not make sense to have two sets of observer functions-
-// JsepObserver and JsepRemoteMediaStreamObserver. It is done this way in order
-// to be able to support both ROAP and JSEP for a while.
class JsepRemoteMediaStreamObserver {
public:
// Triggered when media is received on a new stream from remote peer.
@@ -81,11 +77,10 @@ class JsepRemoteMediaStreamObserver {
//
// JsepSignaling is Thread-compatible and all non-const methods are
// expected to be called on the signaling thread.
-class JsepSignaling : public JsepInterface, public CandidateObserver {
+class JsepSignaling : public JsepInterface {
public:
JsepSignaling(talk_base::Thread* signaling_thread,
SessionDescriptionProvider* provider,
- IceCandidateObserver* observer,
JsepRemoteMediaStreamObserver* stream_observer);
virtual ~JsepSignaling();
@@ -111,14 +106,6 @@ class JsepSignaling : public JsepInterface, public CandidateObserver {
return remote_description_.get();
}
- protected:
- // Implements CandidateObserver interface.
- // OnCandidatesReady is called when all local candidates have been collected.
- virtual void OnCandidatesReady();
- // Implements CandidateObserver interface.
- virtual void OnCandidateFound(const std::string& content_name,
- const cricket::Candidate& candidate);
-
private:
// Creates and destroys remote media streams based on |remote_desc|.
void UpdateRemoteStreams(const cricket::SessionDescription* remote_desc);
@@ -131,12 +118,11 @@ class JsepSignaling : public JsepInterface, public CandidateObserver {
talk_base::Thread* signaling_thread_;
SessionDescriptionProvider* provider_;
- IceCandidateObserver* observer_;
JsepRemoteMediaStreamObserver* stream_observer_;
talk_base::scoped_refptr<StreamCollectionInterface> local_streams_;
- talk_base::scoped_ptr<JsepSessionDescription> local_description_;
+ talk_base::scoped_ptr<SessionDescriptionInterface> local_description_;
talk_base::scoped_refptr<StreamCollection> remote_streams_;
- talk_base::scoped_ptr<JsepSessionDescription> remote_description_;
+ talk_base::scoped_ptr<SessionDescriptionInterface> remote_description_;
};
} // namespace webrtc
58 talk/app/webrtc/jsepsignaling_unittest.cc
View
@@ -57,18 +57,19 @@ static const char kSdpString1[] =
"v=0\r\n"
"o=- 0 0 IN IP4 127.0.0.1\r\n"
"s=\r\n"
- "c=IN IP4 0.0.0.0\r\n"
"t=0 0\r\n"
"m=audio 1 RTP/AVPF 103\r\n"
"a=mid:audio\r\n"
"a=rtpmap:103 ISAC/16000\r\n"
- "a=ssrc:1 cname:stream1 mslabel:stream1 "
- "label:audio_1\r\n"
+ "a=ssrc:1 cname:stream1\r\n"
+ "a=ssrc:1 mslabel:stream1\r\n"
+ "a=ssrc:1 label:audio_1\r\n"
"m=video 1 RTP/AVPF 120\r\n"
"a=mid:video\r\n"
"a=rtpmap:120 VP8/90000\r\n"
- "a=ssrc:2 cname:stream1 mslabel:stream1 "
- "label:video_1\r\n";
+ "a=ssrc:2 cname:stream1\r\n"
+ "a=ssrc:2 mslabel:stream1\r\n"
+ "a=ssrc:2 label:video_1\r\n";
// Reference SDP with two MediaStreams with label "stream1" and "stream2. Each
// MediaStreams have one audio track and one video track.
@@ -76,22 +77,25 @@ static const char kSdpString2[] =
"v=0\r\n"
"o=- 0 0 IN IP4 127.0.0.1\r\n"
"s=\r\n"
- "c=IN IP4 0.0.0.0\r\n"
"t=0 0\r\n"
"m=audio 1 RTP/AVPF 103\r\n"
"a=mid:audio\r\n"
"a=rtpmap:103 ISAC/16000\r\n"
- "a=ssrc:1 cname:stream1 mslabel:stream1 "
- "label:audio_1\r\n"
- "a=ssrc:3 cname:stream2 mslabel:stream2 "
- "label:audio_2\r\n"
+ "a=ssrc:1 cname:stream1\r\n"
+ "a=ssrc:1 mslabel:stream1\r\n"
+ "a=ssrc:1 label:audio_1\r\n"
+ "a=ssrc:3 cname:stream2\r\n"
+ "a=ssrc:3 mslabel:stream2\r\n"
+ "a=ssrc:3 label:audio_2\r\n"
"m=video 1 RTP/AVPF 120\r\n"
"a=mid:video\r\n"
"a=rtpmap:120 VP8/0\r\n"
- "a=ssrc:2 cname:stream1 mslabel:stream1 "
- "label:local_video_1\r\n"
- "a=ssrc:4 cname:stream2 mslabel:stream2 "
- "label:video_2\r\n";
+ "a=ssrc:2 cname:stream1\r\n"
+ "a=ssrc:2 mslabel:stream1\r\n"
+ "a=ssrc:2 label:video_1\r\n"
+ "a=ssrc:4 cname:stream2\r\n"
+ "a=ssrc:4 mslabel:stream2\r\n"
+ "a=ssrc:4 label:video_2\r\n";
static const char kSdpCandidates[] =
"a=candidate:1 1 udp 1 127.0.0.1 1234 typ host name rtp network_name "
@@ -324,8 +328,7 @@ class FakeSessionDescriptionProvider
// MockSignalingObserver implements functions for listening to all signals from
// a JsepSignaling instance.
-class MockSignalingObserver : public webrtc::JsepRemoteMediaStreamObserver,
- public webrtc::IceCandidateObserver {
+class MockSignalingObserver : public webrtc::JsepRemoteMediaStreamObserver {
public:
MockSignalingObserver()
: ice_complete_(true),
@@ -347,16 +350,6 @@ class MockSignalingObserver : public webrtc::JsepRemoteMediaStreamObserver,
remote_media_streams_->RemoveStream(remote_stream);
}
- virtual void OnIceCandidate(const IceCandidateInterface* candidate) {
- candidate_label_ = candidate->label();
- candidate_ = candidate->candidate();
- EXPECT_TRUE(candidate->ToString(&candidate_string_));
- }
-
- virtual void OnIceComplete() {
- ice_complete_ = true;
- }
-
MediaStreamInterface* RemoteStream(const std::string& label) {
return remote_media_streams_->find(label);
}
@@ -391,15 +384,13 @@ class JsepSignalingForTest : public webrtc::JsepSignaling {
explicit JsepSignalingForTest(webrtc::SessionDescriptionProvider* provider,
MockSignalingObserver* observer)
: webrtc::JsepSignaling(talk_base::Thread::Current(), provider,
- observer, observer) {
+ observer) {
};
using webrtc::JsepSignaling::CreateOffer;
using webrtc::JsepSignaling::CreateAnswer;
using webrtc::JsepSignaling::SetLocalDescription;
using webrtc::JsepSignaling::SetRemoteDescription;
using webrtc::JsepSignaling::ProcessIceMessage;
- using webrtc::JsepSignaling::OnCandidatesReady;
- using webrtc::JsepSignaling::OnCandidateFound;
};
class JsepSignalingTest: public testing::Test {
@@ -536,15 +527,6 @@ TEST_F(JsepSignalingTest, SetRemoteDescription) {
reference2.get()));
}
-TEST_F(JsepSignalingTest, OnCandidatesFound) {
- cricket::Candidate candidate = CreateMockCandidate();
- signaling_->OnCandidateFound(cricket::CN_AUDIO, candidate);
- EXPECT_EQ(kSdpCandidates, observer_->candidate_string());
- EXPECT_TRUE(cricket::CN_AUDIO == observer_->candidate_label());
- signaling_->OnCandidatesReady();
- EXPECT_TRUE(observer_->ice_complete());
-}
-
TEST_F(JsepSignalingTest, ProcessIceMessage) {
cricket::Candidate candidate = CreateMockCandidate();
talk_base::scoped_ptr<IceCandidateInterface> ice_candidate(
24 talk/app/webrtc/peerconnectionimpl.cc
View
@@ -287,10 +287,10 @@ bool PeerConnection::Initialize(bool use_roap,
} else {
jsep_signaling_.reset(new JsepSignaling(factory_->signaling_thread(),
session_.get(),
- observer_,
this));
// Register with WebRtcSession to get Ice candidates.
- session_->RegisterObserver(jsep_signaling_.get());
+ session_->RegisterObserver(observer);
+ session_->SignalState.connect(this, &PeerConnection::OnSessionStateChange);
}
return true;
}
@@ -484,7 +484,6 @@ void PeerConnection::OnMessage(talk_base::Message* msg) {
ready_state_ != PeerConnectionInterface::kClosing) {
// TODO: Take IceOptions into consideration.
session_->StartIce();
- ChangeReadyState(PeerConnectionInterface::kNegotiating);
}
break;
}
@@ -528,6 +527,7 @@ void PeerConnection::OnMessage(talk_base::Message* msg) {
static_cast<JsepSessionDescriptionParams*> (data));
param->result = jsep_signaling_->SetLocalDescription(param->action,
param->desc);
+
stream_handler_->CommitLocalStreams(local_media_streams_);
}
break;
@@ -637,6 +637,24 @@ void PeerConnection::OnSignalingStateChange(
}
}
+void PeerConnection::OnSessionStateChange(cricket::BaseSession* /*session*/,
+ cricket::BaseSession::State state) {
+ switch (state) {
+ case cricket::BaseSession::STATE_INIT:
+ ChangeReadyState(PeerConnectionInterface::kNew);
+ case cricket::BaseSession::STATE_SENTINITIATE:
+ case cricket::BaseSession::STATE_RECEIVEDINITIATE:
+ ChangeReadyState(PeerConnectionInterface::kNegotiating);
+ break;
+ case cricket::BaseSession::STATE_SENTACCEPT:
+ case cricket::BaseSession::STATE_RECEIVEDACCEPT:
+ ChangeReadyState(PeerConnectionInterface::kActive);
+ break;
+ default:
+ break;
+ }
+}
+
void PeerConnection::OnAddStream(MediaStreamInterface* stream) {
stream_handler_->AddRemoteStream(stream);
observer_->OnAddStream(stream);
5 talk/app/webrtc/peerconnectionimpl.h
View
@@ -100,11 +100,12 @@ class PeerConnection : public PeerConnectionInterface,
void OnRemoteStreamRemoved(MediaStreamInterface* remote_stream);
void OnSignalingStateChange(PeerConnectionSignaling::State state);
- // Implement JsepRemoteMediaStreamObserver.
- // TODO: Remove these functions when we no longer need to support ROAP.
virtual void OnAddStream(MediaStreamInterface* stream);
virtual void OnRemoveStream(MediaStreamInterface* stream);
+ // Signals from WebRtcSession.
+ void OnSessionStateChange(cricket::BaseSession* session,
+ cricket::BaseSession::State state);
void ChangeReadyState(PeerConnectionInterface::ReadyState ready_state);
void ChangeSdpState(PeerConnectionInterface::SdpState sdp_state);
42 talk/app/webrtc/peerconnectionimpl_unittest.cc
View
@@ -204,7 +204,6 @@ class PeerConnectionImplTest : public testing::Test {
observer_.SetPeerConnectionInterface(pc_.get());
EXPECT_EQ(PeerConnectionInterface::kNew, observer_.state_);
pc_->StartIce(PeerConnectionInterface::kUseAll);
- EXPECT_EQ(PeerConnectionInterface::kNegotiating, observer_.state_);
}
void CreatePeerConnectionWithInvalidConfiguration() {
@@ -213,8 +212,8 @@ class PeerConnectionImplTest : public testing::Test {
EXPECT_EQ(0u, port_allocator_factory_->stun_configs().size());
EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
observer_.SetPeerConnectionInterface(pc_.get());
+ EXPECT_EQ(PeerConnectionInterface::kNew, observer_.state_);
pc_->StartIce(PeerConnectionInterface::kUseAll);
- EXPECT_EQ(PeerConnectionInterface::kNegotiating, observer_.state_);
}
void CreatePeerConnectionWithDifferentConfigurations() {
@@ -382,19 +381,22 @@ TEST_F(PeerConnectionImplTest, RoapReceiveCloseWhileExpectingAnswer) {
EXPECT_EQ(PeerConnectionInterface::kClosed, observer_.state_);
}
-TEST_F(PeerConnectionImplTest, InitiateCall) {
+TEST_F(PeerConnectionImplTest, Jsep_InitiateCall) {
CreatePeerConnection();
AddStream(kStreamLabel1);
- talk_base::scoped_ptr<SessionDescriptionInterface> offer(
- pc_->CreateOffer(webrtc::MediaHints()));
- talk_base::scoped_ptr<SessionDescriptionInterface> answer(
- pc_->CreateAnswer(webrtc::MediaHints(), offer.get()));
+ SessionDescriptionInterface* offer(pc_->CreateOffer(webrtc::MediaHints()));
+ SessionDescriptionInterface* answer(
+ pc_->CreateAnswer(webrtc::MediaHints(), offer));
+ // SetLocalDescription takes ownership of offer.
EXPECT_TRUE(pc_->SetLocalDescription(PeerConnectionInterface::kOffer,
- offer.get()));
+ offer));
+ EXPECT_EQ(PeerConnectionInterface::kNegotiating, observer_.state_);
+ // SetRemoteDescription takes ownership of answer.
EXPECT_TRUE(pc_->SetRemoteDescription(PeerConnectionInterface::kAnswer,
- answer.get()));
+ answer));
+ EXPECT_EQ(PeerConnectionInterface::kActive, observer_.state_);
// Since we answer with the same session description as we offer we can
// check if OnAddStream have been called.
@@ -405,15 +407,17 @@ TEST_F(PeerConnectionImplTest, Jsep_ReceiveCall) {
CreatePeerConnection();
AddStream(kStreamLabel1);
- talk_base::scoped_ptr<SessionDescriptionInterface> offer(
- pc_->CreateOffer(webrtc::MediaHints()));
- talk_base::scoped_ptr<SessionDescriptionInterface> answer(
- pc_->CreateAnswer(webrtc::MediaHints(), offer.get()));
-
+ SessionDescriptionInterface* offer(pc_->CreateOffer(webrtc::MediaHints()));
+ SessionDescriptionInterface* answer(pc_->CreateAnswer(webrtc::MediaHints(),
+ offer));
+ // SetRemoteDescription takes ownership of offer.
EXPECT_TRUE(pc_->SetRemoteDescription(PeerConnectionInterface::kOffer,
- offer.get()));
+ offer));
+ EXPECT_EQ(PeerConnectionInterface::kNegotiating, observer_.state_);
+ // SetLocalDescription takes ownership of answer.
EXPECT_TRUE(pc_->SetLocalDescription(PeerConnectionInterface::kAnswer,
- answer.get()));
+ answer));
+ EXPECT_EQ(PeerConnectionInterface::kActive, observer_.state_);
// Since we answer with the same session description as we offer we can
// check if OnAddStream have been called.
@@ -427,10 +431,10 @@ TEST_F(PeerConnectionImplTest, Jsep_IceCandidates) {
EXPECT_TRUE_WAIT(observer_.ice_complete_, kTimeout);
EXPECT_FALSE(pc_->ProcessIceMessage(observer_.last_candidate_.get()));
- talk_base::scoped_ptr<SessionDescriptionInterface> offer(
- pc_->CreateOffer(webrtc::MediaHints()));
+ SessionDescriptionInterface* offer(pc_->CreateOffer(webrtc::MediaHints()));
+ // SetRemoteDescription takes ownership of offer.
EXPECT_TRUE(pc_->SetRemoteDescription(PeerConnectionInterface::kOffer,
- offer.get()));
+ offer));
EXPECT_TRUE(pc_->ProcessIceMessage(observer_.last_candidate_.get()));
}
16 talk/app/webrtc/peerconnectionsignaling.cc
View
@@ -130,7 +130,7 @@ PeerConnectionSignaling::PeerConnectionSignaling(
PeerConnectionSignaling::~PeerConnectionSignaling() {}
-void PeerConnectionSignaling::OnCandidatesReady() {
+void PeerConnectionSignaling::OnIceComplete() {
if (!VERIFY(state_ == kInitializing))
return;
// If we have a queued remote offer we need to handle this first.
@@ -147,15 +147,13 @@ void PeerConnectionSignaling::OnCandidatesReady() {
}
}
-// TODO: OnCandidateFound is called from webrtcsession when a new
-// IceCandidate is found. Here we don't care about the |content_name| since
-// we currently filter the candidates on candidate names when we use ROAP in
-// WebRtcSession::SetRemoteCandidates.
+// TODO: OnIceCandidate is called from webrtcsession when a new
+// IceCandidate is found. Here we don't care about the content name since
+// we can create a valid SDP based on the candidate names.
// This function will be removed if we implement ROAP on top of JSEP.
-void PeerConnectionSignaling::OnCandidateFound(
- const std::string& content_name,
- const cricket::Candidate& candidate) {
- candidates_.push_back(candidate);
+void PeerConnectionSignaling::OnIceCandidate(
+ const IceCandidateInterface* candidate) {
+ candidates_.push_back(candidate->candidate());
}
void PeerConnectionSignaling::ChangeState(State new_state) {
23 talk/app/webrtc/peerconnectionsignaling.h
View
@@ -35,7 +35,7 @@
#include <string>
#include <vector>
-#include "talk/app/webrtc/candidateobserver.h"
+#include "talk/app/webrtc/jsep.h"
#include "talk/app/webrtc/roaperrorcodes.h"
#include "talk/app/webrtc/roapsession.h"
#include "talk/base/messagehandler.h"
@@ -113,7 +113,7 @@ class MediaStreamInterface;
// pc.ProcessSignalingMessage(remote_message, &local_streams);
-class PeerConnectionSignaling : public CandidateObserver,
+class PeerConnectionSignaling : public IceCandidateObserver,
public talk_base::MessageHandler {
public:
enum State {
@@ -160,16 +160,15 @@ class PeerConnectionSignaling : public CandidateObserver,
// After calling this no more offers or answers to offers can be created.
void SendShutDown();
- // Implements CandidateObserver interface.
- // OnCandidatesReady is called when all local candidates have been collected.
+ // Implements IceCandidateObserver interface.
+ // OnIceComplete is called when all local candidates have been collected.
// This tell PeerConnectionSignaling that it is ready to respond to offers
// or create offer messages.
- virtual void OnCandidatesReady();
+ virtual void OnIceComplete();
- // Implements CandidateObserver interface.
- // OnCandidatesFound is called when a local candidate has been collected.
- virtual void OnCandidateFound(const std::string& content_name,
- const cricket::Candidate& candidate);
+ // Implements IceCandidateObserver interface.
+ // OnIceCandidate is called when a local candidate has been collected.
+ virtual void OnIceCandidate(const IceCandidateInterface* candidate);
// Returns all current remote MediaStreams.
StreamCollection* remote_streams() { return remote_streams_.get(); }
@@ -204,12 +203,12 @@ class PeerConnectionSignaling : public CandidateObserver,
void ChangeState(State new_state);
// Creates an offer on the signaling_thread_.
- // This is either initiated by CreateOffer or OnCandidatesReady.
+ // This is either initiated by CreateOffer or OnIceComplete.
void CreateOffer_s();
// Creates an answer on the signaling thread.
// This is either initiated by ProcessSignalingMessage when a remote offer
- // have been received or OnCandidatesReady.
+ // have been received or OnIceComplete.
void CreateAnswer_s();
// Notifies the provider_ and the active remote media streams
@@ -259,7 +258,7 @@ class PeerConnectionSignaling : public CandidateObserver,
talk_base::scoped_refptr<StreamCollection> local_streams_;
// The set of local transport candidates used in negotiation.
- // This is set by OnCandidatesReady.
+ // This is set by OnIceComplete.
cricket::Candidates candidates_;
// roap_session_ holds the ROAP-specific session state and is used for
14 talk/app/webrtc/peerconnectionsignaling_unittest.cc
View
@@ -304,8 +304,8 @@ class PeerConnectionSignalingTest: public testing::Test {
// signaling1_ send stream with label kStreamLabel1 to signaling2_.
void SetUpOneWayCall() {
// Initialize signaling1_ and signaling_2 by providing the candidates.
- signaling1_->OnCandidatesReady();
- signaling2_->OnCandidatesReady();
+ signaling1_->OnIceComplete();
+ signaling2_->OnIceComplete();
// Create a local stream collection to be sent on signaling1_.
talk_base::scoped_refptr<StreamCollection> local_collection1(
@@ -375,7 +375,7 @@ TEST_F(PeerConnectionSignalingTest, SimpleOneWayCall) {
EXPECT_EQ(PeerConnectionSignaling::kInitializing, signaling1_->GetState());
// Initialize signaling1_ by providing the candidates.
- signaling1_->OnCandidatesReady();
+ signaling1_->OnIceComplete();
EXPECT_EQ(PeerConnectionSignaling::kWaitingForAnswer,
signaling1_->GetState());
// Process posted messages to allow signaling_1 to send the offer.
@@ -386,7 +386,7 @@ TEST_F(PeerConnectionSignalingTest, SimpleOneWayCall) {
EXPECT_EQ(PeerConnectionSignaling::kInitializing, signaling2_->GetState());
// Provide the candidates to signaling_2 and let it process the offer.
- signaling2_->OnCandidatesReady();
+ signaling2_->OnIceComplete();
talk_base::Thread::Current()->ProcessMessages(1);
// Verify that the offer/answer have been exchanged and the state is good.
@@ -465,8 +465,8 @@ TEST_F(PeerConnectionSignalingTest, Glare) {
TEST_F(PeerConnectionSignalingTest, AddRemoveStream) {
// Initialize signaling1_ and signaling_2 by providing the candidates.
- signaling1_->OnCandidatesReady();
- signaling2_->OnCandidatesReady();
+ signaling1_->OnIceComplete();
+ signaling2_->OnIceComplete();
// Create a local stream.
std::string label(kStreamLabel1);
talk_base::scoped_refptr<LocalMediaStreamInterface> stream(
@@ -566,7 +566,7 @@ TEST_F(PeerConnectionSignalingTest, ShutDown) {
TEST_F(PeerConnectionSignalingTest, ReceiveError) {
// Initialize signaling1_
- signaling1_->OnCandidatesReady();
+ signaling1_->OnIceComplete();
talk_base::scoped_refptr<StreamCollection> local_collection1(
CreateLocalCollection1());
12 talk/app/webrtc/roapmessages_unittest.cc
View
@@ -56,13 +56,13 @@ static const char kOfferReference[] =
" \"sdp\" : \"v=0\\r\\n"
"o=- 0 0 IN IP4 127.0.0.1\\r\\n"
"s=\\r\\n"
- "c=IN IP4 0.0.0.0\\r\\n"
"t=0 0\\r\\n"
"m=audio 1 RTP/AVPF\\r\\n"
"a=mid:audio\\r\\n"
"a=rtcp-mux\\r\\n"
- "a=ssrc:1 cname:stream_1_cname mslabel:local_stream_1 "
- "label:local_audio_1\\r\\n"
+ "a=ssrc:1 cname:stream_1_cname\\r\\n"
+ "a=ssrc:1 mslabel:local_stream_1\\r\\n"
+ "a=ssrc:1 label:local_audio_1\\r\\n"
"\",\n" // End of sdp.
" \"seq\" : 1,\n"
" \"tieBreaker\" : 0\n"
@@ -76,13 +76,13 @@ static const char kAnswerReference[] =
" \"sdp\" : \"v=0\\r\\n"
"o=- 0 0 IN IP4 127.0.0.1\\r\\n"
"s=\\r\\n"
- "c=IN IP4 0.0.0.0\\r\\n"
"t=0 0\\r\\n"
"m=audio 1 RTP/AVPF\\r\\n"
"a=mid:audio\\r\\n"
"a=rtcp-mux\\r\\n"
- "a=ssrc:1 cname:stream_1_cname mslabel:local_stream_1 "
- "label:local_audio_1\\r\\n"
+ "a=ssrc:1 cname:stream_1_cname\\r\\n"
+ "a=ssrc:1 mslabel:local_stream_1\\r\\n"
+ "a=ssrc:1 label:local_audio_1\\r\\n"
"\",\n" // End of sdp.
" \"seq\" : 1\n"
"}\n";
692 talk/app/webrtc/test/fakeaudiocapturemodule.cc
View
@@ -0,0 +1,692 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/fakeaudiocapturemodule.h"
+
+// Assert on all APIs that are not expected to be used.
+#include <assert.h>
+
+#include "talk/base/refcount.h"
+#include "talk/base/thread.h"
+#include "talk/base/timeutils.h"
+
+// Audio sample value that is high enough that it doesn't occur naturally when
+// frames are being faked. E.g. NetEq will not generate this large sample value
+// unless it has received an audio frame containing a sample of this value.
+// Even simpler buffers would likely just contain audio sample values of 0.
+static const int kHighSampleValue = 10000;
+
+// Same value as src/modules/audio_device/main/source/audio_device_config.h in
+// https://code.google.com/p/webrtc/
+static const uint32 kAdmMaxIdleTimeProcess = 1000;
+
+// Constants here are derived by running VoE using a real ADM.
+// The constants correspond to 10ms of mono audio at 44kHz.
+static const int kTimePerFrameMs = 10;
+static const int kNumberOfChannels = 1;
+static const int kSamplesPerSecond = 44000;
+static const int kTotalDelayMs = 0;
+static const int kClockDriftMs = 0;
+static const uint32_t kMaxVolume = 14392;
+
+FakeAudioCaptureModule::FakeAudioCaptureModule(
+ talk_base::Thread* process_thread)
+ : last_process_time_ms_(0),
+ audioCallback_(NULL),
+ recording_(false),
+ playing_(false),
+ play_is_initialized_(false),
+ rec_is_initialized_(false),
+ current_mic_level_(kMaxVolume),
+ started_(false),
+ next_frame_time_(0),
+ process_thread_(process_thread),
+ frames_received_(0) {
+}
+
+FakeAudioCaptureModule* FakeAudioCaptureModule::Create(
+ talk_base::Thread* process_thread) {
+ if (process_thread == NULL) return NULL;
+
+ talk_base::RefCountedObject<FakeAudioCaptureModule>* capture_module =
+ new talk_base::RefCountedObject<FakeAudioCaptureModule>(process_thread);
+ if (!capture_module->Initialize()) {
+ delete capture_module;
+ return NULL;
+ }
+ return capture_module;
+}
+
+int FakeAudioCaptureModule::frames_received() const {
+ return frames_received_;
+}
+
+int32_t FakeAudioCaptureModule::Version(char* /*version*/,
+ uint32_t& /*remaining_buffer_in_bytes*/,
+ uint32_t& /*position*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::TimeUntilNextProcess() {
+ const uint32 current_time = talk_base::Time();
+ if (current_time < last_process_time_ms_) {
+ // TODO: wraparound could be handled more gracefully.
+ return 0;
+ }
+ const uint32 elapsed_time = current_time - last_process_time_ms_;
+ if (kAdmMaxIdleTimeProcess < elapsed_time) {
+ return 0;
+ }
+ return kAdmMaxIdleTimeProcess - elapsed_time;
+}
+
+int32_t FakeAudioCaptureModule::Process() {
+ last_process_time_ms_ = talk_base::Time();
+ return 0;
+}
+
+WebRtc_Word32 FakeAudioCaptureModule::ChangeUniqueId(
+ const WebRtc_Word32 /*id*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::ActiveAudioLayer(
+ AudioLayer* /*audioLayer*/) const {
+ assert(false);
+ return 0;
+}
+
+webrtc::AudioDeviceModule::ErrorCode FakeAudioCaptureModule::LastError() const {
+ assert(false);
+ return webrtc::AudioDeviceModule::kAdmErrNone;
+}
+
+int32_t FakeAudioCaptureModule::RegisterEventObserver(
+ webrtc::AudioDeviceObserver* /*eventCallback*/) {
+ // Only used to report warnings and errors. This fake implementation won't
+ // generate any so discard this callback.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RegisterAudioCallback(
+ webrtc::AudioTransport* audioCallback) {
+ audioCallback_ = audioCallback;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::Init() {
+ // Initialize is called by the factory method. Safe to ignore this Init call.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::Terminate() {
+ // Clean up in the destructor. No action here, just success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::Initialized() const {
+ assert(false);
+ return 0;
+}
+
+int16_t FakeAudioCaptureModule::PlayoutDevices() {
+ assert(false);
+ return 0;
+}
+
+int16_t FakeAudioCaptureModule::RecordingDevices() {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutDeviceName(
+ uint16_t /*index*/,
+ char /*name*/[webrtc::kAdmMaxDeviceNameSize],
+ char /*guid*/[webrtc::kAdmMaxGuidSize]) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingDeviceName(
+ uint16_t /*index*/,
+ char /*name*/[webrtc::kAdmMaxDeviceNameSize],
+ char /*guid*/[webrtc::kAdmMaxGuidSize]) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutDevice(uint16_t /*index*/) {
+ // No playout device, just playing from file. Return success.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutDevice(WindowsDeviceType /*device*/) {
+ if (play_is_initialized_) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingDevice(uint16_t /*index*/) {
+ // No recording device, just dropping audio. Return success.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingDevice(
+ WindowsDeviceType /*device*/) {
+ if (rec_is_initialized_) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutIsAvailable(bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitPlayout() {
+ play_is_initialized_ = true;
+ return 0;
+}
+
+bool FakeAudioCaptureModule::PlayoutIsInitialized() const {
+ return play_is_initialized_;
+}
+
+int32_t FakeAudioCaptureModule::RecordingIsAvailable(bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitRecording() {
+ rec_is_initialized_ = true;
+ return 0;
+}
+
+bool FakeAudioCaptureModule::RecordingIsInitialized() const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StartPlayout() {
+ if (!play_is_initialized_) {
+ assert(false);
+ return -1;
+ }
+ playing_ = true;
+ UpdateProcessing();
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopPlayout() {
+ playing_ = false;
+ UpdateProcessing();
+ return 0;
+}
+
+bool FakeAudioCaptureModule::Playing() const {
+ return playing_;
+}
+
+int32_t FakeAudioCaptureModule::StartRecording() {
+ if (!rec_is_initialized_) {
+ assert(false);
+ return -1;
+ }
+ recording_ = true;
+ UpdateProcessing();
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopRecording() {
+ recording_ = false;
+ UpdateProcessing();
+ return 0;
+}
+
+bool FakeAudioCaptureModule::Recording() const {
+ return recording_;
+}
+
+int32_t FakeAudioCaptureModule::SetAGC(bool /*enable*/) {
+ // No AGC but not needed since audio is pregenerated. Return success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::AGC() const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetWaveOutVolume(uint16_t /*volumeLeft*/,
+ uint16_t /*volumeRight*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::WaveOutVolume(uint16_t* /*volumeLeft*/,
+ uint16_t* /*volumeRight*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerIsAvailable(bool* available) {
+ // No speaker, just dropping audio. Return success.
+ *available = true;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitSpeaker() {
+ // No speaker, just playing from file. Return success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::SpeakerIsInitialized() const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneIsAvailable(bool* available) {
+ // No microphone, just playing from file. Return success.
+ *available = true;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitMicrophone() {
+ // No microphone, just playing from file. Return success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::MicrophoneIsInitialized() const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerVolumeIsAvailable(bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetSpeakerVolume(uint32_t /*volume*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerVolume(uint32_t* /*volume*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MaxSpeakerVolume(
+ uint32_t* /*maxVolume*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MinSpeakerVolume(
+ uint32_t* /*minVolume*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerVolumeStepSize(
+ uint16_t* /*stepSize*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneVolumeIsAvailable(
+ bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetMicrophoneVolume(uint32_t /*volume*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneVolume(uint32_t* /*volume*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MaxMicrophoneVolume(uint32_t* maxVolume) const {
+ *maxVolume = kMaxVolume;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MinMicrophoneVolume(
+ uint32_t* /*minVolume*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneVolumeStepSize(
+ uint16_t* /*stepSize*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerMuteIsAvailable(bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetSpeakerMute(bool /*enable*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerMute(bool* /*enabled*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneMuteIsAvailable(bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetMicrophoneMute(bool /*enable*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneMute(bool* /*enabled*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneBoostIsAvailable(
+ bool* /*available*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetMicrophoneBoost(bool /*enable*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneBoost(bool* /*enabled*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StereoPlayoutIsAvailable(
+ bool* available) const {
+ // No recording device, just dropping audio. Stereo can be dropped just
+ // as easily as mono.
+ *available = true;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetStereoPlayout(bool /*enable*/) {
+ // No recording device, just dropping audio. Stereo can be dropped just
+ // as easily as mono.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StereoPlayout(bool* /*enabled*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StereoRecordingIsAvailable(
+ bool* available) const {
+ // Keep thing simple. No stereo recording.
+ *available = false;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetStereoRecording(bool enable) {
+ if (!enable) {
+ return 0;
+ }
+ return -1;
+}
+
+int32_t FakeAudioCaptureModule::StereoRecording(bool* /*enabled*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingChannel(
+ const ChannelType channel) {
+ if (channel != AudioDeviceModule::kChannelBoth) {
+ // There is no right or left in mono. I.e. kChannelBoth should be used for
+ // mono.
+ assert(false);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingChannel(ChannelType* channel) const {
+ // Stereo recording not supported. However, WebRTC ADM returns kChannelBoth
+ // in that case. Do the same here.
+ *channel = AudioDeviceModule::kChannelBoth;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutBuffer(const BufferType /*type*/,
+ uint16_t /*sizeMS*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutBuffer(BufferType* /*type*/,
+ uint16_t* /*sizeMS*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutDelay(uint16_t* delayMS) const {
+ // No delay since audio frames are dropped.
+ *delayMS = 0;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingDelay(uint16_t* /*delayMS*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::CPULoad(uint16_t* /*load*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StartRawOutputFileRecording(
+ const char /*pcmFileNameUTF8*/[webrtc::kAdmMaxFileNameSize]) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopRawOutputFileRecording() {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StartRawInputFileRecording(
+ const char /*pcmFileNameUTF8*/[webrtc::kAdmMaxFileNameSize]) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopRawInputFileRecording() {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingSampleRate(
+ const uint32_t /*samplesPerSec*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingSampleRate(
+ uint32_t* /*samplesPerSec*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutSampleRate(
+ const uint32_t /*samplesPerSec*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutSampleRate(
+ uint32_t* /*samplesPerSec*/) const {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::ResetAudioDevice() {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetLoudspeakerStatus(bool /*enable*/) {
+ assert(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::GetLoudspeakerStatus(bool* /*enabled*/) const {
+ assert(false);
+ return 0;
+}
+
+void FakeAudioCaptureModule::OnMessage(talk_base::Message* /*msg*/) {
+ ProcessFrame();
+}
+
+bool FakeAudioCaptureModule::Initialize() {
+ // Set the send buffer samples high enough that it would not occur on the
+ // remote side unless a packet containing a sample of that magnitude has been
+ // sent to it. Note that the audio processing pipeline will likely distort the
+ // original signal.
+ SetSendBuffer(kHighSampleValue);
+ last_process_time_ms_ = talk_base::Time();
+ return true;
+}
+
+void FakeAudioCaptureModule::SetSendBuffer(int value) {
+ uint16* buffer_ptr = reinterpret_cast<uint16*>(send_buffer_);
+ const int buffer_size_in_samples = sizeof(send_buffer_) /
+ (sizeof(uint16) / sizeof(char));
+ for (int i = 0; i < buffer_size_in_samples; ++i) {
+ buffer_ptr[i] = value;
+ }
+}
+
+void FakeAudioCaptureModule::ResetRecBuffer() {
+ memset(rec_buffer_, 0, sizeof(rec_buffer_));
+}
+
+bool FakeAudioCaptureModule::CheckRecBuffer(int value) {
+ const uint16* buffer_ptr = reinterpret_cast<const uint16*>(rec_buffer_);
+ const int buffer_size_in_samples = sizeof(rec_buffer_) /
+ (sizeof(uint16) / sizeof(char));
+ for (int i = 0; i < buffer_size_in_samples; ++i) {
+ if (buffer_ptr[i] >= value) return true;
+ }
+ return false;
+}
+
+void FakeAudioCaptureModule::UpdateProcessing() {
+ const bool process = recording_ || playing_;
+ if (process) {
+ if (started_) {
+ // Already started.
+ return;
+ }
+ process_thread_->Post(this);
+ } else {
+ if (!started_) {
+ // Already stopped.
+ return;
+ }
+ process_thread_->Clear(this);
+ }
+}
+
+void FakeAudioCaptureModule::ProcessFrame() {
+ if (!started_) {
+ next_frame_time_ = talk_base::Time();
+ started_ = true;
+ }
+ // Receive and send frames every kTimePerFrameMs.
+ if (audioCallback_ != NULL) {
+ if (playing_) {
+ ReceiveFrame();
+ }
+ if (recording_) {
+ SendFrame();
+ }
+ }
+
+ next_frame_time_ += kTimePerFrameMs;
+ const uint32 current_time = talk_base::Time();
+ const uint32 wait_time = (next_frame_time_ > current_time) ?
+ next_frame_time_ - current_time : 0;
+ process_thread_->PostDelayed(wait_time, this);
+}
+
+void FakeAudioCaptureModule::ReceiveFrame() {
+ ResetRecBuffer();
+ uint32_t nSamplesOut = 0;
+ if (audioCallback_->NeedMorePlayData(kNumberSamples, kNumberBytesPerSample,
+ kNumberOfChannels, kSamplesPerSecond,
+ rec_buffer_, nSamplesOut) != 0) {
+ assert(false);
+ }
+ assert(nSamplesOut == kNumberSamples);
+ // The SetBuffer() function ensures that after decoding, the audio buffer
+ // should contain samples of similar magnitude (there is likely to be some
+ // distortion due to the audio pipeline). If one sample is detected to
+ // have the same or greater magnitude somewhere in the frame, an actual frame
+ // has been received from the remote side (i.e. faked frames are not being
+ // pulled).
+ if (CheckRecBuffer(kHighSampleValue)) ++frames_received_;
+}
+
+void FakeAudioCaptureModule::SendFrame() {
+ if (audioCallback_->RecordedDataIsAvailable(send_buffer_, kNumberSamples,
+ kNumberBytesPerSample,
+ kNumberOfChannels,
+ kSamplesPerSecond, kTotalDelayMs,
+ kClockDriftMs, current_mic_level_,
+ current_mic_level_) != 0) {
+ assert(false);
+ }
+}
254 talk/app/webrtc/test/fakeaudiocapturemodule.h
View
@@ -0,0 +1,254 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This class implements an AudioCaptureModule that can be used to detect if
+// audio is being received properly if it is fed by another AudioCaptureModule
+// in some arbitrary audio pipeline where they are connected. It does not play
+// out or record any audio so it does not need access to any hardware and can
+// therefore be used in the gtest testing framework.
+
+#ifndef TALK_APP_WEBRTC_TEST_FAKEAUDIOCAPTUREMODULE_H_
+#define TALK_APP_WEBRTC_TEST_FAKEAUDIOCAPTUREMODULE_H_
+
+#include "talk/base/basictypes.h"
+#include "talk/base/messagehandler.h"
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_types.h"
+#include "modules/audio_device/main/interface/audio_device.h"
+#else
+#include "third_party/webrtc/files/include/audio_device.h"
+#include "third_party/webrtc/files/include/common_types.h"
+#endif // WEBRTC_RELATIVE_PATH
+
+namespace talk_base {
+
+class Thread;
+
+} // namespace talk_base
+
+class FakeAudioCaptureModule
+ : public webrtc::AudioDeviceModule,
+ public talk_base::MessageHandler {
+ public:
+ // Creates a FakeAudioCaptureModule or returns NULL on failure.
+ // |process_thread| is used to push and pull audio frames to and from the
+ // returned instance. Note: ownership of |process_thread| is not handed over.
+ static FakeAudioCaptureModule* Create(talk_base::Thread* process_thread);
+
+ // Returns the number of frames that have been successfully pulled by the
+ // instance. Note that correctly detecting success can only be done if the
+ // pulled frame was generated/pushed from a FakeAudioCaptureModule.
+ int frames_received() const;
+
+ // Following functions are inherited from webrtc::AudioDeviceModule they are
+ // implemented if they are called in some way by PeerConnection. The functions
+ // that are not called have an empty implementation returning success. If the
+ // function is not expected to be called an assertion is triggered if it is
+ // called.
+ virtual int32_t Version(char* version,
+ uint32_t& remaining_buffer_in_bytes,
+ uint32_t& position) const;
+ virtual int32_t TimeUntilNextProcess();
+ virtual int32_t Process();
+ virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+
+ virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const;
+
+ virtual ErrorCode LastError() const;
+ virtual int32_t RegisterEventObserver(
+ webrtc::AudioDeviceObserver* eventCallback);
+
+ virtual int32_t RegisterAudioCallback(webrtc::AudioTransport* audioCallback);
+
+ virtual int32_t Init();
+ virtual int32_t Terminate();
+ virtual bool Initialized() const;
+
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]);
+
+ virtual int32_t SetPlayoutDevice(uint16_t index);
+ virtual int32_t SetPlayoutDevice(WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(WindowsDeviceType device);
+
+ virtual int32_t PlayoutIsAvailable(bool* available);
+ virtual int32_t InitPlayout();
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool* available);
+ virtual int32_t InitRecording();
+ virtual bool RecordingIsInitialized() const;
+
+ virtual int32_t StartPlayout();
+ virtual int32_t StopPlayout();
+ virtual bool Playing() const;
+ virtual int32_t StartRecording();
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ virtual int32_t SetAGC(bool enable);
+ virtual bool AGC() const;
+
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight);
+ virtual int32_t WaveOutVolume(uint16_t* volumeLeft,
+ uint16_t* volumeRight) const;
+
+ virtual int32_t SpeakerIsAvailable(bool* available);
+ virtual int32_t InitSpeaker();
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t MicrophoneIsAvailable(bool* available);
+ virtual int32_t InitMicrophone();
+ virtual bool MicrophoneIsInitialized() const;
+
+ virtual int32_t SpeakerVolumeIsAvailable(bool* available);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t* volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const;
+
+ virtual int32_t MicrophoneVolumeIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t* volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const;
+
+ virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const;
+
+ virtual int32_t SpeakerMuteIsAvailable(bool* available);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool* enabled) const;
+
+ virtual int32_t MicrophoneMuteIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool* enabled) const;
+
+ virtual int32_t MicrophoneBoostIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneBoost(bool enable);
+ virtual int32_t MicrophoneBoost(bool* enabled) const;
+
+ virtual int32_t StereoPlayoutIsAvailable(bool* available) const;
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool* enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool* available) const;
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool* enabled) const;
+ virtual int32_t SetRecordingChannel(const ChannelType channel);
+ virtual int32_t RecordingChannel(ChannelType* channel) const;
+
+ virtual int32_t SetPlayoutBuffer(const BufferType type,
+ uint16_t sizeMS = 0);
+ virtual int32_t PlayoutBuffer(BufferType* type,
+ uint16_t* sizeMS) const;
+ virtual int32_t PlayoutDelay(uint16_t* delayMS) const;
+ virtual int32_t RecordingDelay(uint16_t* delayMS) const;
+
+ virtual int32_t CPULoad(uint16_t* load) const;
+
+ virtual int32_t StartRawOutputFileRecording(
+ const char pcmFileNameUTF8[webrtc::kAdmMaxFileNameSize]);
+ virtual int32_t StopRawOutputFileRecording();
+ virtual int32_t StartRawInputFileRecording(
+ const char pcmFileNameUTF8[webrtc::kAdmMaxFileNameSize]);
+ virtual int32_t StopRawInputFileRecording();
+
+ virtual int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
+ virtual int32_t RecordingSampleRate(uint32_t* samplesPerSec) const;
+ virtual int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec);
+ virtual int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const;
+
+ virtual int32_t ResetAudioDevice();
+ virtual int32_t SetLoudspeakerStatus(bool enable);
+ virtual int32_t GetLoudspeakerStatus(bool* enabled) const;
+ // End of functions inherited from webrtc::AudioDeviceModule.
+
+ // The following function is inherited from talk_base::MessageHandler.
+ virtual void OnMessage(talk_base::Message* msg);
+
+ protected:
+ explicit FakeAudioCaptureModule(talk_base::Thread* process_thread);
+
+ private:
+ // Constants here are derived by running VoE using a real ADM.
+ // The constants correspond to 10ms of mono audio at 44kHz.
+ static const uint32_t kNumberSamples = 440;
+ static const int kNumberBytesPerSample = 2;
+
+ bool Initialize();
+ // SetBuffer() sets all samples in send_buffer_ to |value|.
+ void SetSendBuffer(int value);
+ // Resets rec_buffer_. I.e. sets all rec_buffer_ samples to 0.
+ void ResetRecBuffer();
+ // Returns true if rec_buffer_ contains one or more sample greater than or
+ // equal to |value|.
+ bool CheckRecBuffer(int value);
+
+ // Starts or stops the pushing and pulling of audio frames depending on if
+ // recording or playback has been enabled/started.
+ void UpdateProcessing();
+
+ // Periodcally called function that ensures that frames are pulled and pushed
+ // periodically if enabled/started.
+ void ProcessFrame();
+ // Pulls frames from the registered webrtc::AudioTransport.
+ void ReceiveFrame();
+ // Pushes frames to the registered webrtc::AudioTransport.
+ void SendFrame();
+
+ uint32 last_process_time_ms_;
+
+ // Callback for playout and recording.
+ webrtc::AudioTransport* audioCallback_;
+
+ bool recording_;
+ bool playing_;
+
+ bool play_is_initialized_;
+ bool rec_is_initialized_;
+
+ uint32_t current_mic_level_;
+
+ bool started_;
+ uint32 next_frame_time_;
+
+ talk_base::Thread* process_thread_;
+
+ char rec_buffer_[kNumberSamples * kNumberBytesPerSample];
+ char send_buffer_[kNumberSamples * kNumberBytesPerSample];
+
+ int frames_received_;
+};
+
+#endif // TALK_APP_WEBRTC_TEST_FAKEAUDIOCAPTUREMODULE_H_
184 talk/app/webrtc/test/fakevideocapturemodule.cc
View
@@ -0,0 +1,184 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/fakevideocapturemodule.h"
+
+#include "talk/app/webrtc/test/fileframesource.h"
+#include "talk/app/webrtc/test/i420framesource.h"
+#include "talk/app/webrtc/test/staticframesource.h"
+#include "talk/base/refcount.h"
+#include "talk/base/stream.h"
+#include "talk/base/thread.h"
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "modules/video_capture/main/interface/video_capture_defines.h"
+#include "modules/video_capture/main/interface/video_capture_factory.h"
+#else
+#include "third_party/webrtc/files/include/video_capture_defines.h"
+#include "third_party/webrtc/files/include/video_capture_factory.h"
+#endif
+
+static const int kStartFrameRate = 30;
+static const int kStartWidth = 352;
+static const int kStartHeight = 288;
+static const uint32 kStartTimeStamp = 2000;
+
+FakeVideoCaptureModule::FakeVideoCaptureModule(talk_base::Thread* camera_thread)
+ : frame_source_(NULL),
+ camera_thread_(camera_thread),
+ video_capture_(NULL),
+ started_(false),
+ capture_started_(false),
+ sent_frames_(0),
+ next_frame_time_(0),
+ time_per_frame_ms_(0),
+ fps_(0),
+ width_(0),
+ height_(0) {}
+
+FakeVideoCaptureModule::~FakeVideoCaptureModule() {
+ StopCapturing();
+ // The memory associated with video_capture_ is owned by impl_.
+}
+
+FakeVideoCaptureModule*
+FakeVideoCaptureModule::Create(talk_base::Thread* camera_thread) {
+ talk_base::RefCountedObject<FakeVideoCaptureModule>* capture_module =
+ new talk_base::RefCountedObject<FakeVideoCaptureModule>(camera_thread);
+ if (!capture_module->Init(new StaticFrameSource())) {
+ delete capture_module;
+ return NULL;
+ }
+ return capture_module;
+}
+
+FakeVideoCaptureModule*
+FakeVideoCaptureModule::Create(talk_base::Thread* camera_thread,
+ const std::string& file_name) {
+ talk_base::RefCountedObject<FakeVideoCaptureModule>* capture_module =
+ new talk_base::RefCountedObject<FakeVideoCaptureModule>(camera_thread);
+ if (!capture_module->Init(FileFrameSource::Create(file_name))) {
+ delete capture_module;
+ return NULL;
+ }
+ return capture_module;
+}
+
+void FakeVideoCaptureModule::StartCapturing() {
+ camera_thread_->Clear(this);
+ // Only one post, no need to add any data to post.
+ camera_thread_->Post(this);
+}
+
+void FakeVideoCaptureModule::StopCapturing() {
+ camera_thread_->Clear(this);
+}
+
+bool FakeVideoCaptureModule::RegisterFrameSource(
+ I420FrameSource* frame_source) {
+ if (frame_source == NULL) {
+ return false;
+ }
+ frame_source_ = frame_source;
+ frame_source_->SetFrameSize(width_, height_);
+ return true;
+}
+
+// TODO: deal with the rounding error.
+bool FakeVideoCaptureModule::SetFrameRate(int fps) {
+ if (fps <= 0) {
+ return false;
+ }
+ fps_ = fps;
+ time_per_frame_ms_ = 1000 / fps;
+ return true;
+}
+
+void FakeVideoCaptureModule::SetSize(int width, int height) {
+ width_ = width;
+ height_ = height;
+ image_.reset(new uint8[GetI420FrameLengthInBytes()]);
+ if (frame_source_ != NULL) {
+ frame_source_->SetFrameSize(width_, height_);
+ }
+}
+
+bool FakeVideoCaptureModule::Init(I420FrameSource* frame_source) {
+ if (!RegisterFrameSource(frame_source)) {
+ return false;
+ }
+ SetSize(kStartWidth, kStartHeight);
+ impl_ = webrtc::VideoCaptureFactory::Create(0, // id
+ video_capture_);
+ if (impl_.get() == NULL) {
+ return false;
+ }
+ if (video_capture_ == NULL) {
+ return false;
+ }
+ if (!SetFrameRate(kStartFrameRate)) {
+ return false;
+ }
+ return true;
+}
+
+// TODO: handle time wrapparound.
+void FakeVideoCaptureModule::GenerateNewFrame() {
+ if (!started_) {
+ next_frame_time_ = talk_base::Time();
+ started_ = true;
+ }
+ size_t read = 0;
+ if (frame_source_->GetFrame(image_.get(), &read)) {
+ ASSERT(read == GetI420FrameLengthInBytes());
+
+ webrtc::VideoCaptureCapability capability;
+ capability.width = width_;
+ capability.height = height_;
+ capability.rawType = webrtc::kVideoI420;
+ video_capture_->IncomingFrame(image_.get(), GetI420FrameLengthInBytes(),
+ capability, GetTimestamp());
+ ++sent_frames_;
+ }
+ else {
+ ASSERT(false);
+ }
+ next_frame_time_ += time_per_frame_ms_;
+ const uint32 current_time = talk_base::Time();
+ const uint32 wait_time = (next_frame_time_ > current_time) ?
+ next_frame_time_ - current_time : 0;
+ camera_thread_->PostDelayed(wait_time, this);
+}
+
+size_t FakeVideoCaptureModule::GetI420FrameLengthInBytes() {
+ return webrtc_testing::GetI420FrameLengthInBytes(width_, height_);
+}
+
+// TODO: handle timestamp wrapparound.
+uint32 FakeVideoCaptureModule::GetTimestamp() {
+ return kStartTimeStamp + sent_frames_ * time_per_frame_ms_;
+}
205 talk/app/webrtc/test/fakevideocapturemodule.h
View
@@ -0,0 +1,205 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This class implements the VideoCaptureModule interface. Instead of capturing
+// frames from a camera it captures frames from a file or a static frame.
+
+#ifndef TALK_APP_WEBRTC_TEST_FAKEVIDEOCAPTUREMODULE_H_
+#define TALK_APP_WEBRTC_TEST_FAKEVIDEOCAPTUREMODULE_H_
+
+#include <string>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/messagehandler.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/scoped_ref_ptr.h"
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_types.h"
+#include "modules/video_capture/main/interface/video_capture.h"
+#else
+#include "third_party/webrtc/files/include/common_types.h"
+#include "third_party/webrtc/files/include/video_capture.h"
+#endif
+
+namespace talk_base {
+
+class Thread;
+
+} // namespace talk_base
+
+namespace webrtc {
+
+class VideoCaptureExternal;
+
+} // namespace webrtc
+
+class I420FrameSource;
+
+class FakeVideoCaptureModule
+ : public webrtc::VideoCaptureModule,
+ public talk_base::MessageHandler {
+ public:
+ virtual ~FakeVideoCaptureModule();
+
+ static FakeVideoCaptureModule* Create(talk_base::Thread* camera_thread);
+ static FakeVideoCaptureModule* Create(talk_base::Thread* camera_thread,
+ const std::string& file_name);
+
+ void StartCapturing();
+ void StopCapturing();
+
+ bool SetFrameRate(int fps);
+ void SetSize(int width, int height);
+ int sent_frames() const { return sent_frames_; }
+
+ virtual int32_t ChangeUniqueId(const int32_t id) {
+ return impl_->ChangeUniqueId(id);
+ }
+
+ virtual int32_t TimeUntilNextProcess() {
+ return impl_->TimeUntilNextProcess();
+ }
+
+ virtual int32_t Process() {
+ return impl_->Process();
+ }
+
+ virtual WebRtc_Word32 RegisterCaptureDataCallback(
+ webrtc::VideoCaptureDataCallback& dataCallback) {
+ return impl_->RegisterCaptureDataCallback(dataCallback);
+ }
+
+ virtual WebRtc_Word32 DeRegisterCaptureDataCallback() {
+ return impl_->DeRegisterCaptureDataCallback();
+ }
+
+ virtual WebRtc_Word32 RegisterCaptureCallback(
+ webrtc::VideoCaptureFeedBack& callBack) {
+ return impl_->RegisterCaptureCallback(callBack);
+ }
+
+ virtual WebRtc_Word32 DeRegisterCaptureCallback() {
+ return impl_->DeRegisterCaptureCallback();
+ }
+
+ virtual WebRtc_Word32 StartCapture(
+ const webrtc::VideoCaptureCapability& capability) {
+ capture_started_ = true;
+ return 0;
+ }
+
+ virtual WebRtc_Word32 StopCapture() {
+ capture_started_ = false;
+ return 0;
+ }
+
+ virtual WebRtc_Word32 StartSendImage(const webrtc::VideoFrame& videoFrame,
+ WebRtc_Word32 frameRate = 1) {
+ return impl_->StartSendImage(videoFrame, frameRate = 1);
+ }
+
+ virtual WebRtc_Word32 StopSendImage() {
+ return impl_->StopSendImage();
+ }
+
+ virtual const WebRtc_UWord8* CurrentDeviceName() const {
+ return impl_->CurrentDeviceName();
+ }
+
+ virtual bool CaptureStarted() {
+ return capture_started_;
+ }
+
+ virtual WebRtc_Word32 CaptureSettings(
+ webrtc::VideoCaptureCapability& settings) {
+ return impl_->CaptureSettings(settings);
+ }
+
+ virtual WebRtc_Word32 SetCaptureDelay(WebRtc_Word32 delayMS) {
+ return impl_->SetCaptureDelay(delayMS);
+ }
+
+ virtual WebRtc_Word32 CaptureDelay() {
+ return impl_->CaptureDelay();
+ }
+
+ virtual WebRtc_Word32 SetCaptureRotation(
+ webrtc::VideoCaptureRotation rotation) {
+ return impl_->SetCaptureRotation(rotation);
+ }
+
+ virtual VideoCaptureEncodeInterface* GetEncodeInterface(
+ const webrtc::VideoCodec& codec) {
+ return impl_->GetEncodeInterface(codec);
+ }
+
+ virtual WebRtc_Word32 EnableFrameRateCallback(const bool enable) {
+ return impl_->EnableFrameRateCallback(enable);
+ }
+ virtual WebRtc_Word32 EnableNoPictureAlarm(const bool enable) {
+ return impl_->EnableNoPictureAlarm(enable);
+ }
+
+ // Inherited from MesageHandler.
+ virtual void OnMessage(talk_base::Message* msg) {
+ GenerateNewFrame();
+ }
+
+ protected:
+ FakeVideoCaptureModule(talk_base::Thread* camera_thread);
+
+ private:
+ bool Init(I420FrameSource* frame_source);
+ bool RegisterFrameSource(I420FrameSource* frame_source);
+
+ void GenerateNewFrame();
+ size_t GetI420FrameLengthInBytes();
+ uint32 GetTimestamp();
+
+ // Module interface implementation.
+ talk_base::scoped_refptr<VideoCaptureModule> impl_;
+
+ // Class that generates the frames from e.g. file or staticly.
+ I420FrameSource* frame_source_;
+
+ talk_base::Thread* camera_thread_;
+ webrtc::VideoCaptureExternal* video_capture_;
+
+ bool started_;
+ bool capture_started_;
+ int sent_frames_;
+ uint32 next_frame_time_;
+ uint32 time_per_frame_ms_;
+
+ int fps_;
+ int width_;
+ int height_;
+ talk_base::scoped_array<uint8> image_;
+};
+
+#endif // TALK_APP_WEBRTC_TEST_FAKEVIDEOCAPTUREMODULE_H_
83 talk/app/webrtc/test/fileframesource.cc
View
@@ -0,0 +1,83 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/fileframesource.h"
+
+#include "talk/base/stream.h"
+
+FileFrameSource::FileFrameSource()
+ : i420_file_(new talk_base::FileStream()) {}
+
+FileFrameSource::~FileFrameSource() {}
+
+FileFrameSource* FileFrameSource::Create(const std::string& file_name) {
+ FileFrameSource* file_frame_source = new FileFrameSource();
+ if (!file_frame_source->Init(file_name)) {
+ delete file_frame_source;
+ return NULL;
+ }
+ return file_frame_source;
+}
+
+bool FileFrameSource::GetFrame(uint8* frame, size_t* size_in_bytes) {
+ int error = 0;
+ *size_in_bytes = 0;
+ talk_base::StreamResult state = i420_file_->Read(
+ frame,
+ GetI420FrameLengthInBytes(),
+ size_in_bytes,
+ &error);
+ if (state == talk_base::SR_EOS) {
+ // Loop file if end is reached.
+ if (!i420_file_->SetPosition(0)) {
+ *size_in_bytes = 0;
+ return false;
+ }
+ state = i420_file_->Read(frame, GetI420FrameLengthInBytes(), size_in_bytes,
+ &error);
+ }
+ if (state != talk_base::SR_SUCCESS) {
+ *size_in_bytes = 0;
+ return false;
+ }
+ ASSERT(*size_in_bytes == GetI420FrameLengthInBytes());
+ return true;
+}
+
+bool FileFrameSource::Init(const std::string& file_name) {
+ int error = 0;
+ const bool success = i420_file_->Open(file_name, "rb", &error);
+ if (!success) {
+ LOG(LS_ERROR) << "Opening file " << file_name <<
+ "failed with error code: " << error << ".";
+ }
+ return success;
+}
+
+size_t FileFrameSource::GetI420FrameLengthInBytes() {
+ return webrtc_testing::GetI420FrameLengthInBytes(width_, height_);
+}
59 talk/app/webrtc/test/fileframesource.h
View
@@ -0,0 +1,59 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_TEST_FILEFRAMESOURCE_H_
+#define TALK_APP_WEBRTC_TEST_FILEFRAMESOURCE_H_
+
+#include <string>
+
+#include "talk/app/webrtc/test/i420framesource.h"
+#include "talk/base/scoped_ptr.h"
+
+namespace talk_base {
+
+class FileStream;
+
+}
+
+class FileFrameSource : public I420FrameSource {
+ public:
+ static FileFrameSource* Create(const std::string& file_name);
+ virtual ~FileFrameSource();
+
+ // Interface from I420FrameSource
+ virtual bool GetFrame(uint8* frame, size_t* size_in_bytes);
+
+ private:
+ FileFrameSource();
+ bool Init(const std::string& file_name);
+
+ size_t GetI420FrameLengthInBytes();
+
+ talk_base::scoped_ptr<talk_base::FileStream> i420_file_;
+};
+
+#endif // TALK_APP_WEBRTC_TEST_FILEFRAMESOURCE_H_
35 talk/app/webrtc/test/i420framesource.cc
View
@@ -0,0 +1,35 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/i420framesource.h"
+
+I420FrameSource::I420FrameSource() : width_(0), height_(0) {}
+
+void I420FrameSource::SetFrameSize(int width, int height) {
+ width_ = width;
+ height_ = height;
+}
57 talk/app/webrtc/test/i420framesource.h
View
@@ -0,0 +1,57 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// I420 helper functions and classes.
+
+#ifndef TALK_APP_WEBRTC_TEST_I420FRAMESOURCE_H_
+#define TALK_APP_WEBRTC_TEST_I420FRAMESOURCE_H_
+
+#include "talk/base/basictypes.h"
+
+namespace webrtc_testing {