henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1 | /* |
| 2 | * libjingle |
| 3 | * Copyright 2012, Google Inc. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are met: |
| 7 | * |
| 8 | * 1. Redistributions of source code must retain the above copyright notice, |
| 9 | * this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * 3. The name of the author may not be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 17 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 18 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
| 19 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 20 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 21 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
| 22 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 23 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 24 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
| 25 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | */ |
| 27 | |
| 28 | #include <stdio.h> |
| 29 | |
| 30 | #include <algorithm> |
| 31 | #include <list> |
| 32 | #include <map> |
| 33 | #include <vector> |
| 34 | |
| 35 | #include "talk/app/webrtc/dtmfsender.h" |
| 36 | #include "talk/app/webrtc/fakeportallocatorfactory.h" |
| 37 | #include "talk/app/webrtc/localaudiosource.h" |
| 38 | #include "talk/app/webrtc/mediastreaminterface.h" |
| 39 | #include "talk/app/webrtc/peerconnectionfactory.h" |
| 40 | #include "talk/app/webrtc/peerconnectioninterface.h" |
| 41 | #include "talk/app/webrtc/test/fakeaudiocapturemodule.h" |
| 42 | #include "talk/app/webrtc/test/fakeconstraints.h" |
| 43 | #include "talk/app/webrtc/test/fakevideotrackrenderer.h" |
| 44 | #include "talk/app/webrtc/test/fakeperiodicvideocapturer.h" |
| 45 | #include "talk/app/webrtc/test/mockpeerconnectionobservers.h" |
| 46 | #include "talk/app/webrtc/videosourceinterface.h" |
| 47 | #include "talk/base/gunit.h" |
| 48 | #include "talk/base/scoped_ptr.h" |
| 49 | #include "talk/base/ssladapter.h" |
| 50 | #include "talk/base/sslstreamadapter.h" |
| 51 | #include "talk/base/thread.h" |
| 52 | #include "talk/media/webrtc/fakewebrtcvideoengine.h" |
| 53 | #include "talk/p2p/base/constants.h" |
| 54 | #include "talk/p2p/base/sessiondescription.h" |
| 55 | #include "talk/session/media/mediasession.h" |
| 56 | |
| 57 | #define MAYBE_SKIP_TEST(feature) \ |
| 58 | if (!(feature())) { \ |
| 59 | LOG(LS_INFO) << "Feature disabled... skipping"; \ |
| 60 | return; \ |
| 61 | } |
| 62 | |
| 63 | using cricket::ContentInfo; |
| 64 | using cricket::FakeWebRtcVideoDecoder; |
| 65 | using cricket::FakeWebRtcVideoDecoderFactory; |
| 66 | using cricket::FakeWebRtcVideoEncoder; |
| 67 | using cricket::FakeWebRtcVideoEncoderFactory; |
| 68 | using cricket::MediaContentDescription; |
| 69 | using webrtc::DataBuffer; |
| 70 | using webrtc::DataChannelInterface; |
| 71 | using webrtc::DtmfSender; |
| 72 | using webrtc::DtmfSenderInterface; |
| 73 | using webrtc::DtmfSenderObserverInterface; |
| 74 | using webrtc::FakeConstraints; |
| 75 | using webrtc::MediaConstraintsInterface; |
| 76 | using webrtc::MediaStreamTrackInterface; |
| 77 | using webrtc::MockCreateSessionDescriptionObserver; |
| 78 | using webrtc::MockDataChannelObserver; |
| 79 | using webrtc::MockSetSessionDescriptionObserver; |
| 80 | using webrtc::MockStatsObserver; |
| 81 | using webrtc::SessionDescriptionInterface; |
| 82 | using webrtc::StreamCollectionInterface; |
| 83 | |
| 84 | static const int kMaxWaitMs = 1000; |
| 85 | static const int kMaxWaitForStatsMs = 3000; |
| 86 | static const int kMaxWaitForFramesMs = 5000; |
| 87 | static const int kEndAudioFrameCount = 3; |
| 88 | static const int kEndVideoFrameCount = 3; |
| 89 | |
| 90 | static const char kStreamLabelBase[] = "stream_label"; |
| 91 | static const char kVideoTrackLabelBase[] = "video_track"; |
| 92 | static const char kAudioTrackLabelBase[] = "audio_track"; |
| 93 | static const char kDataChannelLabel[] = "data_channel"; |
| 94 | |
| 95 | static void RemoveLinesFromSdp(const std::string& line_start, |
| 96 | std::string* sdp) { |
| 97 | const char kSdpLineEnd[] = "\r\n"; |
| 98 | size_t ssrc_pos = 0; |
| 99 | while ((ssrc_pos = sdp->find(line_start, ssrc_pos)) != |
| 100 | std::string::npos) { |
| 101 | size_t end_ssrc = sdp->find(kSdpLineEnd, ssrc_pos); |
| 102 | sdp->erase(ssrc_pos, end_ssrc - ssrc_pos + strlen(kSdpLineEnd)); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | class SignalingMessageReceiver { |
| 107 | public: |
| 108 | protected: |
| 109 | SignalingMessageReceiver() {} |
| 110 | virtual ~SignalingMessageReceiver() {} |
| 111 | }; |
| 112 | |
| 113 | class JsepMessageReceiver : public SignalingMessageReceiver { |
| 114 | public: |
| 115 | virtual void ReceiveSdpMessage(const std::string& type, |
| 116 | std::string& msg) = 0; |
| 117 | virtual void ReceiveIceMessage(const std::string& sdp_mid, |
| 118 | int sdp_mline_index, |
| 119 | const std::string& msg) = 0; |
| 120 | |
| 121 | protected: |
| 122 | JsepMessageReceiver() {} |
| 123 | virtual ~JsepMessageReceiver() {} |
| 124 | }; |
| 125 | |
| 126 | template <typename MessageReceiver> |
| 127 | class PeerConnectionTestClientBase |
| 128 | : public webrtc::PeerConnectionObserver, |
| 129 | public MessageReceiver { |
| 130 | public: |
| 131 | ~PeerConnectionTestClientBase() { |
| 132 | while (!fake_video_renderers_.empty()) { |
| 133 | RenderMap::iterator it = fake_video_renderers_.begin(); |
| 134 | delete it->second; |
| 135 | fake_video_renderers_.erase(it); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | virtual void Negotiate() = 0; |
| 140 | |
| 141 | virtual void Negotiate(bool audio, bool video) = 0; |
| 142 | |
| 143 | virtual void SetVideoConstraints( |
| 144 | const webrtc::FakeConstraints& video_constraint) { |
| 145 | video_constraints_ = video_constraint; |
| 146 | } |
| 147 | |
| 148 | void AddMediaStream(bool audio, bool video) { |
| 149 | std::string label = kStreamLabelBase + |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 150 | talk_base::ToString<int>( |
| 151 | static_cast<int>(peer_connection_->local_streams()->count())); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 152 | talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream = |
| 153 | peer_connection_factory_->CreateLocalMediaStream(label); |
| 154 | |
| 155 | if (audio && can_receive_audio()) { |
| 156 | FakeConstraints constraints; |
| 157 | // Disable highpass filter so that we can get all the test audio frames. |
| 158 | constraints.AddMandatory( |
| 159 | MediaConstraintsInterface::kHighpassFilter, false); |
| 160 | talk_base::scoped_refptr<webrtc::LocalAudioSource> source = |
| 161 | webrtc::LocalAudioSource::Create(&constraints); |
| 162 | // TODO(perkj): Test audio source when it is implemented. Currently audio |
| 163 | // always use the default input. |
| 164 | talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track( |
| 165 | peer_connection_factory_->CreateAudioTrack(kAudioTrackLabelBase, |
| 166 | source)); |
| 167 | stream->AddTrack(audio_track); |
| 168 | } |
| 169 | if (video && can_receive_video()) { |
| 170 | stream->AddTrack(CreateLocalVideoTrack(label)); |
| 171 | } |
| 172 | |
| 173 | EXPECT_TRUE(peer_connection_->AddStream(stream, NULL)); |
| 174 | } |
| 175 | |
| 176 | size_t NumberOfLocalMediaStreams() { |
| 177 | return peer_connection_->local_streams()->count(); |
| 178 | } |
| 179 | |
| 180 | bool SessionActive() { |
| 181 | return peer_connection_->signaling_state() == |
| 182 | webrtc::PeerConnectionInterface::kStable; |
| 183 | } |
| 184 | |
| 185 | void set_signaling_message_receiver( |
| 186 | MessageReceiver* signaling_message_receiver) { |
| 187 | signaling_message_receiver_ = signaling_message_receiver; |
| 188 | } |
| 189 | |
| 190 | void EnableVideoDecoderFactory() { |
| 191 | video_decoder_factory_enabled_ = true; |
| 192 | fake_video_decoder_factory_->AddSupportedVideoCodecType( |
| 193 | webrtc::kVideoCodecVP8); |
| 194 | } |
| 195 | |
| 196 | bool AudioFramesReceivedCheck(int number_of_frames) const { |
| 197 | return number_of_frames <= fake_audio_capture_module_->frames_received(); |
| 198 | } |
| 199 | |
| 200 | bool VideoFramesReceivedCheck(int number_of_frames) { |
| 201 | if (video_decoder_factory_enabled_) { |
| 202 | const std::vector<FakeWebRtcVideoDecoder*>& decoders |
| 203 | = fake_video_decoder_factory_->decoders(); |
| 204 | if (decoders.empty()) { |
| 205 | return number_of_frames <= 0; |
| 206 | } |
| 207 | |
| 208 | for (std::vector<FakeWebRtcVideoDecoder*>::const_iterator |
| 209 | it = decoders.begin(); it != decoders.end(); ++it) { |
| 210 | if (number_of_frames > (*it)->GetNumFramesReceived()) { |
| 211 | return false; |
| 212 | } |
| 213 | } |
| 214 | return true; |
| 215 | } else { |
| 216 | if (fake_video_renderers_.empty()) { |
| 217 | return number_of_frames <= 0; |
| 218 | } |
| 219 | |
| 220 | for (RenderMap::const_iterator it = fake_video_renderers_.begin(); |
| 221 | it != fake_video_renderers_.end(); ++it) { |
| 222 | if (number_of_frames > it->second->num_rendered_frames()) { |
| 223 | return false; |
| 224 | } |
| 225 | } |
| 226 | return true; |
| 227 | } |
| 228 | } |
| 229 | // Verify the CreateDtmfSender interface |
| 230 | void VerifyDtmf() { |
| 231 | talk_base::scoped_ptr<DummyDtmfObserver> observer(new DummyDtmfObserver()); |
| 232 | talk_base::scoped_refptr<DtmfSenderInterface> dtmf_sender; |
| 233 | |
| 234 | // We can't create a DTMF sender with an invalid audio track or a non local |
| 235 | // track. |
| 236 | EXPECT_TRUE(peer_connection_->CreateDtmfSender(NULL) == NULL); |
| 237 | talk_base::scoped_refptr<webrtc::AudioTrackInterface> non_localtrack( |
| 238 | peer_connection_factory_->CreateAudioTrack("dummy_track", |
| 239 | NULL)); |
| 240 | EXPECT_TRUE(peer_connection_->CreateDtmfSender(non_localtrack) == NULL); |
| 241 | |
| 242 | // We should be able to create a DTMF sender from a local track. |
| 243 | webrtc::AudioTrackInterface* localtrack = |
| 244 | peer_connection_->local_streams()->at(0)->GetAudioTracks()[0]; |
| 245 | dtmf_sender = peer_connection_->CreateDtmfSender(localtrack); |
| 246 | EXPECT_TRUE(dtmf_sender.get() != NULL); |
| 247 | dtmf_sender->RegisterObserver(observer.get()); |
| 248 | |
| 249 | // Test the DtmfSender object just created. |
| 250 | EXPECT_TRUE(dtmf_sender->CanInsertDtmf()); |
| 251 | EXPECT_TRUE(dtmf_sender->InsertDtmf("1a", 100, 50)); |
| 252 | |
| 253 | // We don't need to verify that the DTMF tones are actually sent out because |
| 254 | // that is already covered by the tests of the lower level components. |
| 255 | |
| 256 | EXPECT_TRUE_WAIT(observer->completed(), kMaxWaitMs); |
| 257 | std::vector<std::string> tones; |
| 258 | tones.push_back("1"); |
| 259 | tones.push_back("a"); |
| 260 | tones.push_back(""); |
| 261 | observer->Verify(tones); |
| 262 | |
| 263 | dtmf_sender->UnregisterObserver(); |
| 264 | } |
| 265 | |
| 266 | // Verifies that the SessionDescription have rejected the appropriate media |
| 267 | // content. |
| 268 | void VerifyRejectedMediaInSessionDescription() { |
| 269 | ASSERT_TRUE(peer_connection_->remote_description() != NULL); |
| 270 | ASSERT_TRUE(peer_connection_->local_description() != NULL); |
| 271 | const cricket::SessionDescription* remote_desc = |
| 272 | peer_connection_->remote_description()->description(); |
| 273 | const cricket::SessionDescription* local_desc = |
| 274 | peer_connection_->local_description()->description(); |
| 275 | |
| 276 | const ContentInfo* remote_audio_content = GetFirstAudioContent(remote_desc); |
| 277 | if (remote_audio_content) { |
| 278 | const ContentInfo* audio_content = |
| 279 | GetFirstAudioContent(local_desc); |
| 280 | EXPECT_EQ(can_receive_audio(), !audio_content->rejected); |
| 281 | } |
| 282 | |
| 283 | const ContentInfo* remote_video_content = GetFirstVideoContent(remote_desc); |
| 284 | if (remote_video_content) { |
| 285 | const ContentInfo* video_content = |
| 286 | GetFirstVideoContent(local_desc); |
| 287 | EXPECT_EQ(can_receive_video(), !video_content->rejected); |
| 288 | } |
| 289 | } |
| 290 | |
| 291 | void SetExpectIceRestart(bool expect_restart) { |
| 292 | expect_ice_restart_ = expect_restart; |
| 293 | } |
| 294 | |
| 295 | bool ExpectIceRestart() const { return expect_ice_restart_; } |
| 296 | |
| 297 | void VerifyLocalIceUfragAndPassword() { |
| 298 | ASSERT_TRUE(peer_connection_->local_description() != NULL); |
| 299 | const cricket::SessionDescription* desc = |
| 300 | peer_connection_->local_description()->description(); |
| 301 | const cricket::ContentInfos& contents = desc->contents(); |
| 302 | |
| 303 | for (size_t index = 0; index < contents.size(); ++index) { |
| 304 | if (contents[index].rejected) |
| 305 | continue; |
| 306 | const cricket::TransportDescription* transport_desc = |
| 307 | desc->GetTransportDescriptionByName(contents[index].name); |
| 308 | |
| 309 | std::map<int, IceUfragPwdPair>::const_iterator ufragpair_it = |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 310 | ice_ufrag_pwd_.find(static_cast<int>(index)); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 311 | if (ufragpair_it == ice_ufrag_pwd_.end()) { |
| 312 | ASSERT_FALSE(ExpectIceRestart()); |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 313 | ice_ufrag_pwd_[static_cast<int>(index)] = |
| 314 | IceUfragPwdPair(transport_desc->ice_ufrag, transport_desc->ice_pwd); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 315 | } else if (ExpectIceRestart()) { |
| 316 | const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second; |
| 317 | EXPECT_NE(ufrag_pwd.first, transport_desc->ice_ufrag); |
| 318 | EXPECT_NE(ufrag_pwd.second, transport_desc->ice_pwd); |
| 319 | } else { |
| 320 | const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second; |
| 321 | EXPECT_EQ(ufrag_pwd.first, transport_desc->ice_ufrag); |
| 322 | EXPECT_EQ(ufrag_pwd.second, transport_desc->ice_pwd); |
| 323 | } |
| 324 | } |
| 325 | } |
| 326 | |
| 327 | int GetAudioOutputLevelStats(webrtc::MediaStreamTrackInterface* track) { |
| 328 | talk_base::scoped_refptr<MockStatsObserver> |
| 329 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 330 | EXPECT_TRUE(peer_connection_->GetStats(observer, track)); |
| 331 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 332 | return observer->AudioOutputLevel(); |
| 333 | } |
| 334 | |
| 335 | int GetAudioInputLevelStats() { |
| 336 | talk_base::scoped_refptr<MockStatsObserver> |
| 337 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 338 | EXPECT_TRUE(peer_connection_->GetStats(observer, NULL)); |
| 339 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 340 | return observer->AudioInputLevel(); |
| 341 | } |
| 342 | |
| 343 | int GetBytesReceivedStats(webrtc::MediaStreamTrackInterface* track) { |
| 344 | talk_base::scoped_refptr<MockStatsObserver> |
| 345 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 346 | EXPECT_TRUE(peer_connection_->GetStats(observer, track)); |
| 347 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 348 | return observer->BytesReceived(); |
| 349 | } |
| 350 | |
| 351 | int GetBytesSentStats(webrtc::MediaStreamTrackInterface* track) { |
| 352 | talk_base::scoped_refptr<MockStatsObserver> |
| 353 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 354 | EXPECT_TRUE(peer_connection_->GetStats(observer, track)); |
| 355 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 356 | return observer->BytesSent(); |
| 357 | } |
| 358 | |
| 359 | int rendered_width() { |
| 360 | EXPECT_FALSE(fake_video_renderers_.empty()); |
| 361 | return fake_video_renderers_.empty() ? 1 : |
| 362 | fake_video_renderers_.begin()->second->width(); |
| 363 | } |
| 364 | |
| 365 | int rendered_height() { |
| 366 | EXPECT_FALSE(fake_video_renderers_.empty()); |
| 367 | return fake_video_renderers_.empty() ? 1 : |
| 368 | fake_video_renderers_.begin()->second->height(); |
| 369 | } |
| 370 | |
| 371 | size_t number_of_remote_streams() { |
| 372 | if (!pc()) |
| 373 | return 0; |
| 374 | return pc()->remote_streams()->count(); |
| 375 | } |
| 376 | |
| 377 | StreamCollectionInterface* remote_streams() { |
| 378 | if (!pc()) { |
| 379 | ADD_FAILURE(); |
| 380 | return NULL; |
| 381 | } |
| 382 | return pc()->remote_streams(); |
| 383 | } |
| 384 | |
| 385 | StreamCollectionInterface* local_streams() { |
| 386 | if (!pc()) { |
| 387 | ADD_FAILURE(); |
| 388 | return NULL; |
| 389 | } |
| 390 | return pc()->local_streams(); |
| 391 | } |
| 392 | |
| 393 | webrtc::PeerConnectionInterface::SignalingState signaling_state() { |
| 394 | return pc()->signaling_state(); |
| 395 | } |
| 396 | |
| 397 | webrtc::PeerConnectionInterface::IceConnectionState ice_connection_state() { |
| 398 | return pc()->ice_connection_state(); |
| 399 | } |
| 400 | |
| 401 | webrtc::PeerConnectionInterface::IceGatheringState ice_gathering_state() { |
| 402 | return pc()->ice_gathering_state(); |
| 403 | } |
| 404 | |
| 405 | // PeerConnectionObserver callbacks. |
| 406 | virtual void OnError() {} |
| 407 | virtual void OnMessage(const std::string&) {} |
| 408 | virtual void OnSignalingMessage(const std::string& /*msg*/) {} |
| 409 | virtual void OnSignalingChange( |
| 410 | webrtc::PeerConnectionInterface::SignalingState new_state) { |
| 411 | EXPECT_EQ(peer_connection_->signaling_state(), new_state); |
| 412 | } |
| 413 | virtual void OnAddStream(webrtc::MediaStreamInterface* media_stream) { |
| 414 | for (size_t i = 0; i < media_stream->GetVideoTracks().size(); ++i) { |
| 415 | const std::string id = media_stream->GetVideoTracks()[i]->id(); |
| 416 | ASSERT_TRUE(fake_video_renderers_.find(id) == |
| 417 | fake_video_renderers_.end()); |
| 418 | fake_video_renderers_[id] = new webrtc::FakeVideoTrackRenderer( |
| 419 | media_stream->GetVideoTracks()[i]); |
| 420 | } |
| 421 | } |
| 422 | virtual void OnRemoveStream(webrtc::MediaStreamInterface* media_stream) {} |
| 423 | virtual void OnRenegotiationNeeded() {} |
| 424 | virtual void OnIceConnectionChange( |
| 425 | webrtc::PeerConnectionInterface::IceConnectionState new_state) { |
| 426 | EXPECT_EQ(peer_connection_->ice_connection_state(), new_state); |
| 427 | } |
| 428 | virtual void OnIceGatheringChange( |
| 429 | webrtc::PeerConnectionInterface::IceGatheringState new_state) { |
| 430 | EXPECT_EQ(peer_connection_->ice_gathering_state(), new_state); |
| 431 | } |
| 432 | virtual void OnIceCandidate( |
| 433 | const webrtc::IceCandidateInterface* /*candidate*/) {} |
| 434 | |
| 435 | webrtc::PeerConnectionInterface* pc() { |
| 436 | return peer_connection_.get(); |
| 437 | } |
| 438 | |
| 439 | protected: |
| 440 | explicit PeerConnectionTestClientBase(const std::string& id) |
| 441 | : id_(id), |
| 442 | expect_ice_restart_(false), |
| 443 | fake_video_decoder_factory_(NULL), |
| 444 | fake_video_encoder_factory_(NULL), |
| 445 | video_decoder_factory_enabled_(false), |
| 446 | signaling_message_receiver_(NULL) { |
| 447 | } |
| 448 | bool Init(const MediaConstraintsInterface* constraints) { |
| 449 | EXPECT_TRUE(!peer_connection_); |
| 450 | EXPECT_TRUE(!peer_connection_factory_); |
| 451 | allocator_factory_ = webrtc::FakePortAllocatorFactory::Create(); |
| 452 | if (!allocator_factory_) { |
| 453 | return false; |
| 454 | } |
| 455 | audio_thread_.Start(); |
| 456 | fake_audio_capture_module_ = FakeAudioCaptureModule::Create( |
| 457 | &audio_thread_); |
| 458 | |
| 459 | if (fake_audio_capture_module_ == NULL) { |
| 460 | return false; |
| 461 | } |
| 462 | fake_video_decoder_factory_ = new FakeWebRtcVideoDecoderFactory(); |
| 463 | fake_video_encoder_factory_ = new FakeWebRtcVideoEncoderFactory(); |
| 464 | peer_connection_factory_ = webrtc::CreatePeerConnectionFactory( |
| 465 | talk_base::Thread::Current(), talk_base::Thread::Current(), |
| 466 | fake_audio_capture_module_, fake_video_encoder_factory_, |
| 467 | fake_video_decoder_factory_); |
| 468 | if (!peer_connection_factory_) { |
| 469 | return false; |
| 470 | } |
| 471 | peer_connection_ = CreatePeerConnection(allocator_factory_.get(), |
| 472 | constraints); |
| 473 | return peer_connection_.get() != NULL; |
| 474 | } |
| 475 | virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface> |
| 476 | CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory, |
| 477 | const MediaConstraintsInterface* constraints) = 0; |
| 478 | MessageReceiver* signaling_message_receiver() { |
| 479 | return signaling_message_receiver_; |
| 480 | } |
| 481 | webrtc::PeerConnectionFactoryInterface* peer_connection_factory() { |
| 482 | return peer_connection_factory_.get(); |
| 483 | } |
| 484 | |
| 485 | virtual bool can_receive_audio() = 0; |
| 486 | virtual bool can_receive_video() = 0; |
| 487 | const std::string& id() const { return id_; } |
| 488 | |
| 489 | private: |
| 490 | class DummyDtmfObserver : public DtmfSenderObserverInterface { |
| 491 | public: |
| 492 | DummyDtmfObserver() : completed_(false) {} |
| 493 | |
| 494 | // Implements DtmfSenderObserverInterface. |
| 495 | void OnToneChange(const std::string& tone) { |
| 496 | tones_.push_back(tone); |
| 497 | if (tone.empty()) { |
| 498 | completed_ = true; |
| 499 | } |
| 500 | } |
| 501 | |
| 502 | void Verify(const std::vector<std::string>& tones) const { |
| 503 | ASSERT_TRUE(tones_.size() == tones.size()); |
| 504 | EXPECT_TRUE(std::equal(tones.begin(), tones.end(), tones_.begin())); |
| 505 | } |
| 506 | |
| 507 | bool completed() const { return completed_; } |
| 508 | |
| 509 | private: |
| 510 | bool completed_; |
| 511 | std::vector<std::string> tones_; |
| 512 | }; |
| 513 | |
| 514 | talk_base::scoped_refptr<webrtc::VideoTrackInterface> |
| 515 | CreateLocalVideoTrack(const std::string stream_label) { |
| 516 | // Set max frame rate to 10fps to reduce the risk of the tests to be flaky. |
| 517 | FakeConstraints source_constraints = video_constraints_; |
| 518 | source_constraints.SetMandatoryMaxFrameRate(10); |
| 519 | |
| 520 | talk_base::scoped_refptr<webrtc::VideoSourceInterface> source = |
| 521 | peer_connection_factory_->CreateVideoSource( |
| 522 | new webrtc::FakePeriodicVideoCapturer(), |
| 523 | &source_constraints); |
| 524 | std::string label = stream_label + kVideoTrackLabelBase; |
| 525 | return peer_connection_factory_->CreateVideoTrack(label, source); |
| 526 | } |
| 527 | |
| 528 | std::string id_; |
| 529 | // Separate thread for executing |fake_audio_capture_module_| tasks. Audio |
| 530 | // processing must not be performed on the same thread as signaling due to |
| 531 | // signaling time constraints and relative complexity of the audio pipeline. |
| 532 | // This is consistent with the video pipeline that us a a separate thread for |
| 533 | // encoding and decoding. |
| 534 | talk_base::Thread audio_thread_; |
| 535 | |
| 536 | talk_base::scoped_refptr<webrtc::PortAllocatorFactoryInterface> |
| 537 | allocator_factory_; |
| 538 | talk_base::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_; |
| 539 | talk_base::scoped_refptr<webrtc::PeerConnectionFactoryInterface> |
| 540 | peer_connection_factory_; |
| 541 | |
| 542 | typedef std::pair<std::string, std::string> IceUfragPwdPair; |
| 543 | std::map<int, IceUfragPwdPair> ice_ufrag_pwd_; |
| 544 | bool expect_ice_restart_; |
| 545 | |
| 546 | // Needed to keep track of number of frames send. |
| 547 | talk_base::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_; |
| 548 | // Needed to keep track of number of frames received. |
| 549 | typedef std::map<std::string, webrtc::FakeVideoTrackRenderer*> RenderMap; |
| 550 | RenderMap fake_video_renderers_; |
| 551 | // Needed to keep track of number of frames received when external decoder |
| 552 | // used. |
| 553 | FakeWebRtcVideoDecoderFactory* fake_video_decoder_factory_; |
| 554 | FakeWebRtcVideoEncoderFactory* fake_video_encoder_factory_; |
| 555 | bool video_decoder_factory_enabled_; |
| 556 | webrtc::FakeConstraints video_constraints_; |
| 557 | |
| 558 | // For remote peer communication. |
| 559 | MessageReceiver* signaling_message_receiver_; |
| 560 | }; |
| 561 | |
| 562 | class JsepTestClient |
| 563 | : public PeerConnectionTestClientBase<JsepMessageReceiver> { |
| 564 | public: |
| 565 | static JsepTestClient* CreateClient( |
| 566 | const std::string& id, |
| 567 | const MediaConstraintsInterface* constraints) { |
| 568 | JsepTestClient* client(new JsepTestClient(id)); |
| 569 | if (!client->Init(constraints)) { |
| 570 | delete client; |
| 571 | return NULL; |
| 572 | } |
| 573 | return client; |
| 574 | } |
| 575 | ~JsepTestClient() {} |
| 576 | |
| 577 | virtual void Negotiate() { |
| 578 | Negotiate(true, true); |
| 579 | } |
| 580 | virtual void Negotiate(bool audio, bool video) { |
| 581 | talk_base::scoped_ptr<SessionDescriptionInterface> offer; |
| 582 | EXPECT_TRUE(DoCreateOffer(offer.use())); |
| 583 | |
| 584 | if (offer->description()->GetContentByName("audio")) { |
| 585 | offer->description()->GetContentByName("audio")->rejected = !audio; |
| 586 | } |
| 587 | if (offer->description()->GetContentByName("video")) { |
| 588 | offer->description()->GetContentByName("video")->rejected = !video; |
| 589 | } |
| 590 | |
| 591 | std::string sdp; |
| 592 | EXPECT_TRUE(offer->ToString(&sdp)); |
| 593 | EXPECT_TRUE(DoSetLocalDescription(offer.release())); |
| 594 | signaling_message_receiver()->ReceiveSdpMessage( |
| 595 | webrtc::SessionDescriptionInterface::kOffer, sdp); |
| 596 | } |
| 597 | // JsepMessageReceiver callback. |
| 598 | virtual void ReceiveSdpMessage(const std::string& type, |
| 599 | std::string& msg) { |
| 600 | FilterIncomingSdpMessage(&msg); |
| 601 | if (type == webrtc::SessionDescriptionInterface::kOffer) { |
| 602 | HandleIncomingOffer(msg); |
| 603 | } else { |
| 604 | HandleIncomingAnswer(msg); |
| 605 | } |
| 606 | } |
| 607 | // JsepMessageReceiver callback. |
| 608 | virtual void ReceiveIceMessage(const std::string& sdp_mid, |
| 609 | int sdp_mline_index, |
| 610 | const std::string& msg) { |
| 611 | LOG(INFO) << id() << "ReceiveIceMessage"; |
| 612 | talk_base::scoped_ptr<webrtc::IceCandidateInterface> candidate( |
| 613 | webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, msg, NULL)); |
| 614 | EXPECT_TRUE(pc()->AddIceCandidate(candidate.get())); |
| 615 | } |
| 616 | // Implements PeerConnectionObserver functions needed by Jsep. |
| 617 | virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { |
| 618 | LOG(INFO) << id() << "OnIceCandidate"; |
| 619 | |
| 620 | std::string ice_sdp; |
| 621 | EXPECT_TRUE(candidate->ToString(&ice_sdp)); |
| 622 | if (signaling_message_receiver() == NULL) { |
| 623 | // Remote party may be deleted. |
| 624 | return; |
| 625 | } |
| 626 | signaling_message_receiver()->ReceiveIceMessage(candidate->sdp_mid(), |
| 627 | candidate->sdp_mline_index(), ice_sdp); |
| 628 | } |
| 629 | |
| 630 | void IceRestart() { |
| 631 | session_description_constraints_.SetMandatoryIceRestart(true); |
| 632 | SetExpectIceRestart(true); |
| 633 | } |
| 634 | |
| 635 | void SetReceiveAudioVideo(bool audio, bool video) { |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 636 | SetReceiveAudio(audio); |
| 637 | SetReceiveVideo(video); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 638 | ASSERT_EQ(audio, can_receive_audio()); |
| 639 | ASSERT_EQ(video, can_receive_video()); |
| 640 | } |
| 641 | |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 642 | void SetReceiveAudio(bool audio) { |
| 643 | if (audio && can_receive_audio()) |
| 644 | return; |
| 645 | session_description_constraints_.SetMandatoryReceiveAudio(audio); |
| 646 | } |
| 647 | |
| 648 | void SetReceiveVideo(bool video) { |
| 649 | if (video && can_receive_video()) |
| 650 | return; |
| 651 | session_description_constraints_.SetMandatoryReceiveVideo(video); |
| 652 | } |
| 653 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 654 | void RemoveMsidFromReceivedSdp(bool remove) { |
| 655 | remove_msid_ = remove; |
| 656 | } |
| 657 | |
| 658 | void RemoveSdesCryptoFromReceivedSdp(bool remove) { |
| 659 | remove_sdes_ = remove; |
| 660 | } |
| 661 | |
| 662 | void RemoveBundleFromReceivedSdp(bool remove) { |
| 663 | remove_bundle_ = remove; |
| 664 | } |
| 665 | |
| 666 | virtual bool can_receive_audio() { |
| 667 | bool value; |
| 668 | if (webrtc::FindConstraint(&session_description_constraints_, |
| 669 | MediaConstraintsInterface::kOfferToReceiveAudio, &value, NULL)) { |
| 670 | return value; |
| 671 | } |
| 672 | return true; |
| 673 | } |
| 674 | |
| 675 | virtual bool can_receive_video() { |
| 676 | bool value; |
| 677 | if (webrtc::FindConstraint(&session_description_constraints_, |
| 678 | MediaConstraintsInterface::kOfferToReceiveVideo, &value, NULL)) { |
| 679 | return value; |
| 680 | } |
| 681 | return true; |
| 682 | } |
| 683 | |
| 684 | virtual void OnIceComplete() { |
| 685 | LOG(INFO) << id() << "OnIceComplete"; |
| 686 | } |
| 687 | |
| 688 | virtual void OnDataChannel(DataChannelInterface* data_channel) { |
| 689 | LOG(INFO) << id() << "OnDataChannel"; |
| 690 | data_channel_ = data_channel; |
| 691 | data_observer_.reset(new MockDataChannelObserver(data_channel)); |
| 692 | } |
| 693 | |
| 694 | void CreateDataChannel() { |
| 695 | data_channel_ = pc()->CreateDataChannel(kDataChannelLabel, |
| 696 | NULL); |
| 697 | ASSERT_TRUE(data_channel_.get() != NULL); |
| 698 | data_observer_.reset(new MockDataChannelObserver(data_channel_)); |
| 699 | } |
| 700 | |
| 701 | DataChannelInterface* data_channel() { return data_channel_; } |
| 702 | const MockDataChannelObserver* data_observer() const { |
| 703 | return data_observer_.get(); |
| 704 | } |
| 705 | |
| 706 | protected: |
| 707 | explicit JsepTestClient(const std::string& id) |
| 708 | : PeerConnectionTestClientBase<JsepMessageReceiver>(id), |
| 709 | remove_msid_(false), |
| 710 | remove_bundle_(false), |
| 711 | remove_sdes_(false) { |
| 712 | } |
| 713 | |
| 714 | virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface> |
| 715 | CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory, |
| 716 | const MediaConstraintsInterface* constraints) { |
| 717 | // CreatePeerConnection with IceServers. |
| 718 | webrtc::PeerConnectionInterface::IceServers ice_servers; |
| 719 | webrtc::PeerConnectionInterface::IceServer ice_server; |
| 720 | ice_server.uri = "stun:stun.l.google.com:19302"; |
| 721 | ice_servers.push_back(ice_server); |
| 722 | return peer_connection_factory()->CreatePeerConnection( |
| 723 | ice_servers, constraints, factory, NULL, this); |
| 724 | } |
| 725 | |
| 726 | void HandleIncomingOffer(const std::string& msg) { |
| 727 | LOG(INFO) << id() << "HandleIncomingOffer "; |
| 728 | if (NumberOfLocalMediaStreams() == 0) { |
| 729 | // If we are not sending any streams ourselves it is time to add some. |
| 730 | AddMediaStream(true, true); |
| 731 | } |
| 732 | talk_base::scoped_ptr<SessionDescriptionInterface> desc( |
| 733 | webrtc::CreateSessionDescription("offer", msg, NULL)); |
| 734 | EXPECT_TRUE(DoSetRemoteDescription(desc.release())); |
| 735 | talk_base::scoped_ptr<SessionDescriptionInterface> answer; |
| 736 | EXPECT_TRUE(DoCreateAnswer(answer.use())); |
| 737 | std::string sdp; |
| 738 | EXPECT_TRUE(answer->ToString(&sdp)); |
| 739 | EXPECT_TRUE(DoSetLocalDescription(answer.release())); |
| 740 | if (signaling_message_receiver()) { |
| 741 | signaling_message_receiver()->ReceiveSdpMessage( |
| 742 | webrtc::SessionDescriptionInterface::kAnswer, sdp); |
| 743 | } |
| 744 | } |
| 745 | |
| 746 | void HandleIncomingAnswer(const std::string& msg) { |
| 747 | LOG(INFO) << id() << "HandleIncomingAnswer"; |
| 748 | talk_base::scoped_ptr<SessionDescriptionInterface> desc( |
| 749 | webrtc::CreateSessionDescription("answer", msg, NULL)); |
| 750 | EXPECT_TRUE(DoSetRemoteDescription(desc.release())); |
| 751 | } |
| 752 | |
| 753 | bool DoCreateOfferAnswer(SessionDescriptionInterface** desc, |
| 754 | bool offer) { |
| 755 | talk_base::scoped_refptr<MockCreateSessionDescriptionObserver> |
| 756 | observer(new talk_base::RefCountedObject< |
| 757 | MockCreateSessionDescriptionObserver>()); |
| 758 | if (offer) { |
| 759 | pc()->CreateOffer(observer, &session_description_constraints_); |
| 760 | } else { |
| 761 | pc()->CreateAnswer(observer, &session_description_constraints_); |
| 762 | } |
| 763 | EXPECT_EQ_WAIT(true, observer->called(), kMaxWaitMs); |
| 764 | *desc = observer->release_desc(); |
| 765 | if (observer->result() && ExpectIceRestart()) { |
| 766 | EXPECT_EQ(0u, (*desc)->candidates(0)->count()); |
| 767 | } |
| 768 | return observer->result(); |
| 769 | } |
| 770 | |
| 771 | bool DoCreateOffer(SessionDescriptionInterface** desc) { |
| 772 | return DoCreateOfferAnswer(desc, true); |
| 773 | } |
| 774 | |
| 775 | bool DoCreateAnswer(SessionDescriptionInterface** desc) { |
| 776 | return DoCreateOfferAnswer(desc, false); |
| 777 | } |
| 778 | |
| 779 | bool DoSetLocalDescription(SessionDescriptionInterface* desc) { |
| 780 | talk_base::scoped_refptr<MockSetSessionDescriptionObserver> |
| 781 | observer(new talk_base::RefCountedObject< |
| 782 | MockSetSessionDescriptionObserver>()); |
| 783 | LOG(INFO) << id() << "SetLocalDescription "; |
| 784 | pc()->SetLocalDescription(observer, desc); |
| 785 | // Ignore the observer result. If we wait for the result with |
| 786 | // EXPECT_TRUE_WAIT, local ice candidates might be sent to the remote peer |
| 787 | // before the offer which is an error. |
| 788 | // The reason is that EXPECT_TRUE_WAIT uses |
| 789 | // talk_base::Thread::Current()->ProcessMessages(1); |
| 790 | // ProcessMessages waits at least 1ms but processes all messages before |
| 791 | // returning. Since this test is synchronous and send messages to the remote |
| 792 | // peer whenever a callback is invoked, this can lead to messages being |
| 793 | // sent to the remote peer in the wrong order. |
| 794 | // TODO(perkj): Find a way to check the result without risking that the |
| 795 | // order of sent messages are changed. Ex- by posting all messages that are |
| 796 | // sent to the remote peer. |
| 797 | return true; |
| 798 | } |
| 799 | |
| 800 | bool DoSetRemoteDescription(SessionDescriptionInterface* desc) { |
| 801 | talk_base::scoped_refptr<MockSetSessionDescriptionObserver> |
| 802 | observer(new talk_base::RefCountedObject< |
| 803 | MockSetSessionDescriptionObserver>()); |
| 804 | LOG(INFO) << id() << "SetRemoteDescription "; |
| 805 | pc()->SetRemoteDescription(observer, desc); |
| 806 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 807 | return observer->result(); |
| 808 | } |
| 809 | |
| 810 | // This modifies all received SDP messages before they are processed. |
| 811 | void FilterIncomingSdpMessage(std::string* sdp) { |
| 812 | if (remove_msid_) { |
| 813 | const char kSdpSsrcAttribute[] = "a=ssrc:"; |
| 814 | RemoveLinesFromSdp(kSdpSsrcAttribute, sdp); |
| 815 | const char kSdpMsidSupportedAttribute[] = "a=msid-semantic:"; |
| 816 | RemoveLinesFromSdp(kSdpMsidSupportedAttribute, sdp); |
| 817 | } |
| 818 | if (remove_bundle_) { |
| 819 | const char kSdpBundleAttribute[] = "a=group:BUNDLE"; |
| 820 | RemoveLinesFromSdp(kSdpBundleAttribute, sdp); |
| 821 | } |
| 822 | if (remove_sdes_) { |
| 823 | const char kSdpSdesCryptoAttribute[] = "a=crypto"; |
| 824 | RemoveLinesFromSdp(kSdpSdesCryptoAttribute, sdp); |
| 825 | } |
| 826 | } |
| 827 | |
| 828 | private: |
| 829 | webrtc::FakeConstraints session_description_constraints_; |
| 830 | bool remove_msid_; // True if MSID should be removed in received SDP. |
| 831 | bool remove_bundle_; // True if bundle should be removed in received SDP. |
| 832 | bool remove_sdes_; // True if a=crypto should be removed in received SDP. |
| 833 | |
| 834 | talk_base::scoped_refptr<DataChannelInterface> data_channel_; |
| 835 | talk_base::scoped_ptr<MockDataChannelObserver> data_observer_; |
| 836 | }; |
| 837 | |
| 838 | template <typename SignalingClass> |
| 839 | class P2PTestConductor : public testing::Test { |
| 840 | public: |
| 841 | bool SessionActive() { |
| 842 | return initiating_client_->SessionActive() && |
| 843 | receiving_client_->SessionActive(); |
| 844 | } |
| 845 | // Return true if the number of frames provided have been received or it is |
| 846 | // known that that will never occur (e.g. no frames will be sent or |
| 847 | // captured). |
| 848 | bool FramesNotPending(int audio_frames_to_receive, |
| 849 | int video_frames_to_receive) { |
| 850 | return VideoFramesReceivedCheck(video_frames_to_receive) && |
| 851 | AudioFramesReceivedCheck(audio_frames_to_receive); |
| 852 | } |
| 853 | bool AudioFramesReceivedCheck(int frames_received) { |
| 854 | return initiating_client_->AudioFramesReceivedCheck(frames_received) && |
| 855 | receiving_client_->AudioFramesReceivedCheck(frames_received); |
| 856 | } |
| 857 | bool VideoFramesReceivedCheck(int frames_received) { |
| 858 | return initiating_client_->VideoFramesReceivedCheck(frames_received) && |
| 859 | receiving_client_->VideoFramesReceivedCheck(frames_received); |
| 860 | } |
| 861 | void VerifyDtmf() { |
| 862 | initiating_client_->VerifyDtmf(); |
| 863 | receiving_client_->VerifyDtmf(); |
| 864 | } |
| 865 | |
| 866 | void TestUpdateOfferWithRejectedContent() { |
| 867 | initiating_client_->Negotiate(true, false); |
| 868 | EXPECT_TRUE_WAIT( |
| 869 | FramesNotPending(kEndAudioFrameCount * 2, kEndVideoFrameCount), |
| 870 | kMaxWaitForFramesMs); |
| 871 | // There shouldn't be any more video frame after the new offer is |
| 872 | // negotiated. |
| 873 | EXPECT_FALSE(VideoFramesReceivedCheck(kEndVideoFrameCount + 1)); |
| 874 | } |
| 875 | |
| 876 | void VerifyRenderedSize(int width, int height) { |
| 877 | EXPECT_EQ(width, receiving_client()->rendered_width()); |
| 878 | EXPECT_EQ(height, receiving_client()->rendered_height()); |
| 879 | EXPECT_EQ(width, initializing_client()->rendered_width()); |
| 880 | EXPECT_EQ(height, initializing_client()->rendered_height()); |
| 881 | } |
| 882 | |
| 883 | void VerifySessionDescriptions() { |
| 884 | initiating_client_->VerifyRejectedMediaInSessionDescription(); |
| 885 | receiving_client_->VerifyRejectedMediaInSessionDescription(); |
| 886 | initiating_client_->VerifyLocalIceUfragAndPassword(); |
| 887 | receiving_client_->VerifyLocalIceUfragAndPassword(); |
| 888 | } |
| 889 | |
| 890 | P2PTestConductor() { |
| 891 | talk_base::InitializeSSL(NULL); |
| 892 | } |
| 893 | ~P2PTestConductor() { |
| 894 | if (initiating_client_) { |
| 895 | initiating_client_->set_signaling_message_receiver(NULL); |
| 896 | } |
| 897 | if (receiving_client_) { |
| 898 | receiving_client_->set_signaling_message_receiver(NULL); |
| 899 | } |
henrike@webrtc.org | 723d683 | 2013-07-12 16:04:50 +0000 | [diff] [blame] | 900 | talk_base::CleanupSSL(); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 901 | } |
| 902 | |
| 903 | bool CreateTestClients() { |
| 904 | return CreateTestClients(NULL, NULL); |
| 905 | } |
| 906 | |
| 907 | bool CreateTestClients(MediaConstraintsInterface* init_constraints, |
| 908 | MediaConstraintsInterface* recv_constraints) { |
| 909 | initiating_client_.reset(SignalingClass::CreateClient("Caller: ", |
| 910 | init_constraints)); |
| 911 | receiving_client_.reset(SignalingClass::CreateClient("Callee: ", |
| 912 | recv_constraints)); |
| 913 | if (!initiating_client_ || !receiving_client_) { |
| 914 | return false; |
| 915 | } |
| 916 | initiating_client_->set_signaling_message_receiver(receiving_client_.get()); |
| 917 | receiving_client_->set_signaling_message_receiver(initiating_client_.get()); |
| 918 | return true; |
| 919 | } |
| 920 | |
| 921 | void SetVideoConstraints(const webrtc::FakeConstraints& init_constraints, |
| 922 | const webrtc::FakeConstraints& recv_constraints) { |
| 923 | initiating_client_->SetVideoConstraints(init_constraints); |
| 924 | receiving_client_->SetVideoConstraints(recv_constraints); |
| 925 | } |
| 926 | |
| 927 | void EnableVideoDecoderFactory() { |
| 928 | initiating_client_->EnableVideoDecoderFactory(); |
| 929 | receiving_client_->EnableVideoDecoderFactory(); |
| 930 | } |
| 931 | |
| 932 | // This test sets up a call between two parties. Both parties send static |
| 933 | // frames to each other. Once the test is finished the number of sent frames |
| 934 | // is compared to the number of received frames. |
| 935 | void LocalP2PTest() { |
| 936 | if (initiating_client_->NumberOfLocalMediaStreams() == 0) { |
| 937 | initiating_client_->AddMediaStream(true, true); |
| 938 | } |
| 939 | initiating_client_->Negotiate(); |
| 940 | const int kMaxWaitForActivationMs = 5000; |
| 941 | // Assert true is used here since next tests are guaranteed to fail and |
| 942 | // would eat up 5 seconds. |
| 943 | ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs); |
| 944 | VerifySessionDescriptions(); |
| 945 | |
| 946 | |
| 947 | int audio_frame_count = kEndAudioFrameCount; |
| 948 | // TODO(ronghuawu): Add test to cover the case of sendonly and recvonly. |
| 949 | if (!initiating_client_->can_receive_audio() || |
| 950 | !receiving_client_->can_receive_audio()) { |
| 951 | audio_frame_count = -1; |
| 952 | } |
| 953 | int video_frame_count = kEndVideoFrameCount; |
| 954 | if (!initiating_client_->can_receive_video() || |
| 955 | !receiving_client_->can_receive_video()) { |
| 956 | video_frame_count = -1; |
| 957 | } |
| 958 | |
| 959 | if (audio_frame_count != -1 || video_frame_count != -1) { |
| 960 | // Audio or video is expected to flow, so both sides should get to the |
| 961 | // Connected state. |
| 962 | // Note: These tests have been observed to fail under heavy load at |
| 963 | // shorter timeouts, so they may be flaky. |
| 964 | EXPECT_EQ_WAIT( |
| 965 | webrtc::PeerConnectionInterface::kIceConnectionConnected, |
| 966 | initiating_client_->ice_connection_state(), |
| 967 | kMaxWaitForFramesMs); |
| 968 | EXPECT_EQ_WAIT( |
| 969 | webrtc::PeerConnectionInterface::kIceConnectionConnected, |
| 970 | receiving_client_->ice_connection_state(), |
| 971 | kMaxWaitForFramesMs); |
| 972 | } |
| 973 | |
| 974 | if (initiating_client_->can_receive_audio() || |
| 975 | initiating_client_->can_receive_video()) { |
| 976 | // The initiating client can receive media, so it must produce candidates |
| 977 | // that will serve as destinations for that media. |
| 978 | // TODO(bemasc): Understand why the state is not already Complete here, as |
| 979 | // seems to be the case for the receiving client. This may indicate a bug |
| 980 | // in the ICE gathering system. |
| 981 | EXPECT_NE(webrtc::PeerConnectionInterface::kIceGatheringNew, |
| 982 | initiating_client_->ice_gathering_state()); |
| 983 | } |
| 984 | if (receiving_client_->can_receive_audio() || |
| 985 | receiving_client_->can_receive_video()) { |
| 986 | EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceGatheringComplete, |
| 987 | receiving_client_->ice_gathering_state(), |
| 988 | kMaxWaitForFramesMs); |
| 989 | } |
| 990 | |
| 991 | EXPECT_TRUE_WAIT(FramesNotPending(audio_frame_count, video_frame_count), |
| 992 | kMaxWaitForFramesMs); |
| 993 | } |
| 994 | |
| 995 | SignalingClass* initializing_client() { return initiating_client_.get(); } |
| 996 | SignalingClass* receiving_client() { return receiving_client_.get(); } |
| 997 | |
| 998 | private: |
| 999 | talk_base::scoped_ptr<SignalingClass> initiating_client_; |
| 1000 | talk_base::scoped_ptr<SignalingClass> receiving_client_; |
| 1001 | }; |
| 1002 | typedef P2PTestConductor<JsepTestClient> JsepPeerConnectionP2PTestClient; |
| 1003 | |
| 1004 | // This test sets up a Jsep call between two parties and test Dtmf. |
stefan@webrtc.org | da79008 | 2013-09-17 13:11:38 +0000 | [diff] [blame] | 1005 | // TODO(holmer): Disabled due to sometimes crashing on buildbots. |
| 1006 | // See issue webrtc/2378. |
| 1007 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtmf) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1008 | ASSERT_TRUE(CreateTestClients()); |
| 1009 | LocalP2PTest(); |
| 1010 | VerifyDtmf(); |
| 1011 | } |
| 1012 | |
| 1013 | // This test sets up a Jsep call between two parties and test that we can get a |
| 1014 | // video aspect ratio of 16:9. |
| 1015 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) { |
| 1016 | ASSERT_TRUE(CreateTestClients()); |
| 1017 | FakeConstraints constraint; |
| 1018 | double requested_ratio = 640.0/360; |
| 1019 | constraint.SetMandatoryMinAspectRatio(requested_ratio); |
| 1020 | SetVideoConstraints(constraint, constraint); |
| 1021 | LocalP2PTest(); |
| 1022 | |
| 1023 | ASSERT_LE(0, initializing_client()->rendered_height()); |
| 1024 | double initiating_video_ratio = |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 1025 | static_cast<double>(initializing_client()->rendered_width()) / |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1026 | initializing_client()->rendered_height(); |
| 1027 | EXPECT_LE(requested_ratio, initiating_video_ratio); |
| 1028 | |
| 1029 | ASSERT_LE(0, receiving_client()->rendered_height()); |
| 1030 | double receiving_video_ratio = |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 1031 | static_cast<double>(receiving_client()->rendered_width()) / |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1032 | receiving_client()->rendered_height(); |
| 1033 | EXPECT_LE(requested_ratio, receiving_video_ratio); |
| 1034 | } |
| 1035 | |
| 1036 | // This test sets up a Jsep call between two parties and test that the |
| 1037 | // received video has a resolution of 1280*720. |
| 1038 | // TODO(mallinath): Enable when |
| 1039 | // http://code.google.com/p/webrtc/issues/detail?id=981 is fixed. |
| 1040 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) { |
| 1041 | ASSERT_TRUE(CreateTestClients()); |
| 1042 | FakeConstraints constraint; |
| 1043 | constraint.SetMandatoryMinWidth(1280); |
| 1044 | constraint.SetMandatoryMinHeight(720); |
| 1045 | SetVideoConstraints(constraint, constraint); |
| 1046 | LocalP2PTest(); |
| 1047 | VerifyRenderedSize(1280, 720); |
| 1048 | } |
| 1049 | |
| 1050 | // This test sets up a call between two endpoints that are configured to use |
| 1051 | // DTLS key agreement. As a result, DTLS is negotiated and used for transport. |
| 1052 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtls) { |
| 1053 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1054 | FakeConstraints setup_constraints; |
| 1055 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1056 | true); |
| 1057 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1058 | LocalP2PTest(); |
| 1059 | VerifyRenderedSize(640, 480); |
| 1060 | } |
| 1061 | |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 1062 | // This test sets up a audio call initially and then upgrades to audio/video, |
| 1063 | // using DTLS. |
mallinath@webrtc.org | 6fa456f | 2013-10-15 00:11:54 +0000 | [diff] [blame] | 1064 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtlsRenegotiate) { |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 1065 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1066 | FakeConstraints setup_constraints; |
| 1067 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1068 | true); |
| 1069 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1070 | receiving_client()->SetReceiveAudioVideo(true, false); |
| 1071 | LocalP2PTest(); |
| 1072 | receiving_client()->SetReceiveAudioVideo(true, true); |
| 1073 | receiving_client()->Negotiate(); |
| 1074 | } |
| 1075 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1076 | // This test sets up a call between an endpoint configured to use either SDES or |
| 1077 | // DTLS (the offerer) and just SDES (the answerer). As a result, SDES is used |
| 1078 | // instead of DTLS. |
| 1079 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsToSdes) { |
| 1080 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1081 | FakeConstraints setup_constraints; |
| 1082 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1083 | true); |
| 1084 | ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL)); |
| 1085 | LocalP2PTest(); |
| 1086 | VerifyRenderedSize(640, 480); |
| 1087 | } |
| 1088 | |
| 1089 | // This test sets up a call between an endpoint configured to use SDES |
| 1090 | // (the offerer) and either SDES or DTLS (the answerer). As a result, SDES is |
| 1091 | // used instead of DTLS. |
| 1092 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferSdesToDtls) { |
| 1093 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1094 | FakeConstraints setup_constraints; |
| 1095 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1096 | true); |
| 1097 | ASSERT_TRUE(CreateTestClients(NULL, &setup_constraints)); |
| 1098 | LocalP2PTest(); |
| 1099 | VerifyRenderedSize(640, 480); |
| 1100 | } |
| 1101 | |
| 1102 | // This test sets up a call between two endpoints that are configured to use |
| 1103 | // DTLS key agreement. The offerer don't support SDES. As a result, DTLS is |
| 1104 | // negotiated and used for transport. |
| 1105 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) { |
| 1106 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1107 | FakeConstraints setup_constraints; |
| 1108 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1109 | true); |
| 1110 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1111 | receiving_client()->RemoveSdesCryptoFromReceivedSdp(true); |
| 1112 | LocalP2PTest(); |
| 1113 | VerifyRenderedSize(640, 480); |
| 1114 | } |
| 1115 | |
| 1116 | // This test sets up a Jsep call between two parties, and the callee only |
| 1117 | // accept to receive video. |
sergeyu@chromium.org | a59696b | 2013-09-13 23:48:58 +0000 | [diff] [blame] | 1118 | // BUG=https://code.google.com/p/webrtc/issues/detail?id=2288 |
| 1119 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestAnswerVideo) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1120 | ASSERT_TRUE(CreateTestClients()); |
| 1121 | receiving_client()->SetReceiveAudioVideo(false, true); |
| 1122 | LocalP2PTest(); |
| 1123 | } |
| 1124 | |
| 1125 | // This test sets up a Jsep call between two parties, and the callee only |
| 1126 | // accept to receive audio. |
henrike@webrtc.org | c0b1a28 | 2013-08-23 14:32:21 +0000 | [diff] [blame] | 1127 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestAnswerAudio) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1128 | ASSERT_TRUE(CreateTestClients()); |
| 1129 | receiving_client()->SetReceiveAudioVideo(true, false); |
| 1130 | LocalP2PTest(); |
| 1131 | } |
| 1132 | |
| 1133 | // This test sets up a Jsep call between two parties, and the callee reject both |
| 1134 | // audio and video. |
| 1135 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) { |
| 1136 | ASSERT_TRUE(CreateTestClients()); |
| 1137 | receiving_client()->SetReceiveAudioVideo(false, false); |
| 1138 | LocalP2PTest(); |
| 1139 | } |
| 1140 | |
| 1141 | // This test sets up an audio and video call between two parties. After the call |
| 1142 | // runs for a while (10 frames), the caller sends an update offer with video |
| 1143 | // being rejected. Once the re-negotiation is done, the video flow should stop |
| 1144 | // and the audio flow should continue. |
| 1145 | TEST_F(JsepPeerConnectionP2PTestClient, UpdateOfferWithRejectedContent) { |
| 1146 | ASSERT_TRUE(CreateTestClients()); |
| 1147 | LocalP2PTest(); |
| 1148 | TestUpdateOfferWithRejectedContent(); |
| 1149 | } |
| 1150 | |
| 1151 | // This test sets up a Jsep call between two parties. The MSID is removed from |
| 1152 | // the SDP strings from the caller. |
| 1153 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestWithoutMsid) { |
| 1154 | ASSERT_TRUE(CreateTestClients()); |
| 1155 | receiving_client()->RemoveMsidFromReceivedSdp(true); |
| 1156 | // TODO(perkj): Currently there is a bug that cause audio to stop playing if |
| 1157 | // audio and video is muxed when MSID is disabled. Remove |
| 1158 | // SetRemoveBundleFromSdp once |
| 1159 | // https://code.google.com/p/webrtc/issues/detail?id=1193 is fixed. |
| 1160 | receiving_client()->RemoveBundleFromReceivedSdp(true); |
| 1161 | LocalP2PTest(); |
| 1162 | } |
| 1163 | |
| 1164 | // This test sets up a Jsep call between two parties and the initiating peer |
| 1165 | // sends two steams. |
| 1166 | // TODO(perkj): Disabled due to |
| 1167 | // https://code.google.com/p/webrtc/issues/detail?id=1454 |
| 1168 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) { |
| 1169 | ASSERT_TRUE(CreateTestClients()); |
| 1170 | // Set optional video constraint to max 320pixels to decrease CPU usage. |
| 1171 | FakeConstraints constraint; |
| 1172 | constraint.SetOptionalMaxWidth(320); |
| 1173 | SetVideoConstraints(constraint, constraint); |
| 1174 | initializing_client()->AddMediaStream(true, true); |
| 1175 | initializing_client()->AddMediaStream(false, true); |
| 1176 | ASSERT_EQ(2u, initializing_client()->NumberOfLocalMediaStreams()); |
| 1177 | LocalP2PTest(); |
| 1178 | EXPECT_EQ(2u, receiving_client()->number_of_remote_streams()); |
| 1179 | } |
| 1180 | |
| 1181 | // Test that we can receive the audio output level from a remote audio track. |
| 1182 | TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) { |
| 1183 | ASSERT_TRUE(CreateTestClients()); |
| 1184 | LocalP2PTest(); |
| 1185 | |
| 1186 | StreamCollectionInterface* remote_streams = |
| 1187 | initializing_client()->remote_streams(); |
| 1188 | ASSERT_GT(remote_streams->count(), 0u); |
| 1189 | ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u); |
| 1190 | MediaStreamTrackInterface* remote_audio_track = |
| 1191 | remote_streams->at(0)->GetAudioTracks()[0]; |
| 1192 | |
| 1193 | // Get the audio output level stats. Note that the level is not available |
| 1194 | // until a RTCP packet has been received. |
| 1195 | EXPECT_TRUE_WAIT( |
| 1196 | initializing_client()->GetAudioOutputLevelStats(remote_audio_track) > 0, |
| 1197 | kMaxWaitForStatsMs); |
| 1198 | } |
| 1199 | |
| 1200 | // Test that an audio input level is reported. |
| 1201 | TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) { |
| 1202 | ASSERT_TRUE(CreateTestClients()); |
| 1203 | LocalP2PTest(); |
| 1204 | |
| 1205 | // Get the audio input level stats. The level should be available very |
| 1206 | // soon after the test starts. |
| 1207 | EXPECT_TRUE_WAIT(initializing_client()->GetAudioInputLevelStats() > 0, |
| 1208 | kMaxWaitForStatsMs); |
| 1209 | } |
| 1210 | |
| 1211 | // Test that we can get incoming byte counts from both audio and video tracks. |
| 1212 | TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) { |
| 1213 | ASSERT_TRUE(CreateTestClients()); |
| 1214 | LocalP2PTest(); |
| 1215 | |
| 1216 | StreamCollectionInterface* remote_streams = |
| 1217 | initializing_client()->remote_streams(); |
| 1218 | ASSERT_GT(remote_streams->count(), 0u); |
| 1219 | ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u); |
| 1220 | MediaStreamTrackInterface* remote_audio_track = |
| 1221 | remote_streams->at(0)->GetAudioTracks()[0]; |
| 1222 | EXPECT_TRUE_WAIT( |
| 1223 | initializing_client()->GetBytesReceivedStats(remote_audio_track) > 0, |
| 1224 | kMaxWaitForStatsMs); |
| 1225 | |
| 1226 | MediaStreamTrackInterface* remote_video_track = |
| 1227 | remote_streams->at(0)->GetVideoTracks()[0]; |
| 1228 | EXPECT_TRUE_WAIT( |
| 1229 | initializing_client()->GetBytesReceivedStats(remote_video_track) > 0, |
| 1230 | kMaxWaitForStatsMs); |
| 1231 | } |
| 1232 | |
| 1233 | // Test that we can get outgoing byte counts from both audio and video tracks. |
| 1234 | TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) { |
| 1235 | ASSERT_TRUE(CreateTestClients()); |
| 1236 | LocalP2PTest(); |
| 1237 | |
| 1238 | StreamCollectionInterface* local_streams = |
| 1239 | initializing_client()->local_streams(); |
| 1240 | ASSERT_GT(local_streams->count(), 0u); |
| 1241 | ASSERT_GT(local_streams->at(0)->GetAudioTracks().size(), 0u); |
| 1242 | MediaStreamTrackInterface* local_audio_track = |
| 1243 | local_streams->at(0)->GetAudioTracks()[0]; |
| 1244 | EXPECT_TRUE_WAIT( |
| 1245 | initializing_client()->GetBytesSentStats(local_audio_track) > 0, |
| 1246 | kMaxWaitForStatsMs); |
| 1247 | |
| 1248 | MediaStreamTrackInterface* local_video_track = |
| 1249 | local_streams->at(0)->GetVideoTracks()[0]; |
| 1250 | EXPECT_TRUE_WAIT( |
| 1251 | initializing_client()->GetBytesSentStats(local_video_track) > 0, |
| 1252 | kMaxWaitForStatsMs); |
| 1253 | } |
| 1254 | |
| 1255 | // This test sets up a call between two parties with audio, video and data. |
| 1256 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) { |
| 1257 | FakeConstraints setup_constraints; |
| 1258 | setup_constraints.SetAllowRtpDataChannels(); |
| 1259 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1260 | initializing_client()->CreateDataChannel(); |
| 1261 | LocalP2PTest(); |
| 1262 | ASSERT_TRUE(initializing_client()->data_channel() != NULL); |
| 1263 | ASSERT_TRUE(receiving_client()->data_channel() != NULL); |
| 1264 | EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(), |
| 1265 | kMaxWaitMs); |
| 1266 | EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(), |
| 1267 | kMaxWaitMs); |
| 1268 | |
| 1269 | std::string data = "hello world"; |
| 1270 | initializing_client()->data_channel()->Send(DataBuffer(data)); |
| 1271 | EXPECT_EQ_WAIT(data, receiving_client()->data_observer()->last_message(), |
| 1272 | kMaxWaitMs); |
| 1273 | receiving_client()->data_channel()->Send(DataBuffer(data)); |
| 1274 | EXPECT_EQ_WAIT(data, initializing_client()->data_observer()->last_message(), |
| 1275 | kMaxWaitMs); |
| 1276 | |
| 1277 | receiving_client()->data_channel()->Close(); |
| 1278 | // Send new offer and answer. |
| 1279 | receiving_client()->Negotiate(); |
| 1280 | EXPECT_FALSE(initializing_client()->data_observer()->IsOpen()); |
| 1281 | EXPECT_FALSE(receiving_client()->data_observer()->IsOpen()); |
| 1282 | } |
| 1283 | |
| 1284 | // This test sets up a call between two parties and creates a data channel. |
| 1285 | // The test tests that received data is buffered unless an observer has been |
| 1286 | // registered. |
| 1287 | // Rtp data channels can receive data before the underlying |
| 1288 | // transport has detected that a channel is writable and thus data can be |
| 1289 | // received before the data channel state changes to open. That is hard to test |
| 1290 | // but the same buffering is used in that case. |
| 1291 | TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) { |
| 1292 | FakeConstraints setup_constraints; |
| 1293 | setup_constraints.SetAllowRtpDataChannels(); |
| 1294 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1295 | initializing_client()->CreateDataChannel(); |
| 1296 | initializing_client()->Negotiate(); |
| 1297 | |
| 1298 | ASSERT_TRUE(initializing_client()->data_channel() != NULL); |
| 1299 | ASSERT_TRUE(receiving_client()->data_channel() != NULL); |
| 1300 | EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(), |
| 1301 | kMaxWaitMs); |
| 1302 | EXPECT_EQ_WAIT(DataChannelInterface::kOpen, |
| 1303 | receiving_client()->data_channel()->state(), kMaxWaitMs); |
| 1304 | |
| 1305 | // Unregister the existing observer. |
| 1306 | receiving_client()->data_channel()->UnregisterObserver(); |
| 1307 | std::string data = "hello world"; |
| 1308 | initializing_client()->data_channel()->Send(DataBuffer(data)); |
| 1309 | // Wait a while to allow the sent data to arrive before an observer is |
| 1310 | // registered.. |
| 1311 | talk_base::Thread::Current()->ProcessMessages(100); |
| 1312 | |
| 1313 | MockDataChannelObserver new_observer(receiving_client()->data_channel()); |
| 1314 | EXPECT_EQ_WAIT(data, new_observer.last_message(), kMaxWaitMs); |
| 1315 | } |
| 1316 | |
| 1317 | // This test sets up a call between two parties with audio, video and but only |
| 1318 | // the initiating client support data. |
| 1319 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) { |
| 1320 | FakeConstraints setup_constraints; |
| 1321 | setup_constraints.SetAllowRtpDataChannels(); |
| 1322 | ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL)); |
| 1323 | initializing_client()->CreateDataChannel(); |
| 1324 | LocalP2PTest(); |
| 1325 | EXPECT_TRUE(initializing_client()->data_channel() != NULL); |
| 1326 | EXPECT_FALSE(receiving_client()->data_channel()); |
| 1327 | EXPECT_FALSE(initializing_client()->data_observer()->IsOpen()); |
| 1328 | } |
| 1329 | |
| 1330 | // This test sets up a call between two parties with audio, video. When audio |
| 1331 | // and video is setup and flowing and data channel is negotiated. |
| 1332 | TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) { |
| 1333 | FakeConstraints setup_constraints; |
| 1334 | setup_constraints.SetAllowRtpDataChannels(); |
| 1335 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1336 | LocalP2PTest(); |
| 1337 | initializing_client()->CreateDataChannel(); |
| 1338 | // Send new offer and answer. |
| 1339 | initializing_client()->Negotiate(); |
| 1340 | ASSERT_TRUE(initializing_client()->data_channel() != NULL); |
| 1341 | ASSERT_TRUE(receiving_client()->data_channel() != NULL); |
| 1342 | EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(), |
| 1343 | kMaxWaitMs); |
| 1344 | EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(), |
| 1345 | kMaxWaitMs); |
| 1346 | } |
| 1347 | |
| 1348 | // This test sets up a call between two parties with audio, and video. |
| 1349 | // During the call, the initializing side restart ice and the test verifies that |
| 1350 | // new ice candidates are generated and audio and video still can flow. |
| 1351 | TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) { |
| 1352 | ASSERT_TRUE(CreateTestClients()); |
| 1353 | |
| 1354 | // Negotiate and wait for ice completion and make sure audio and video plays. |
| 1355 | LocalP2PTest(); |
| 1356 | |
| 1357 | // Create a SDP string of the first audio candidate for both clients. |
| 1358 | const webrtc::IceCandidateCollection* audio_candidates_initiator = |
| 1359 | initializing_client()->pc()->local_description()->candidates(0); |
| 1360 | const webrtc::IceCandidateCollection* audio_candidates_receiver = |
| 1361 | receiving_client()->pc()->local_description()->candidates(0); |
| 1362 | ASSERT_GT(audio_candidates_initiator->count(), 0u); |
| 1363 | ASSERT_GT(audio_candidates_receiver->count(), 0u); |
| 1364 | std::string initiator_candidate; |
| 1365 | EXPECT_TRUE( |
| 1366 | audio_candidates_initiator->at(0)->ToString(&initiator_candidate)); |
| 1367 | std::string receiver_candidate; |
| 1368 | EXPECT_TRUE(audio_candidates_receiver->at(0)->ToString(&receiver_candidate)); |
| 1369 | |
| 1370 | // Restart ice on the initializing client. |
| 1371 | receiving_client()->SetExpectIceRestart(true); |
| 1372 | initializing_client()->IceRestart(); |
| 1373 | |
| 1374 | // Negotiate and wait for ice completion again and make sure audio and video |
| 1375 | // plays. |
| 1376 | LocalP2PTest(); |
| 1377 | |
| 1378 | // Create a SDP string of the first audio candidate for both clients again. |
| 1379 | const webrtc::IceCandidateCollection* audio_candidates_initiator_restart = |
| 1380 | initializing_client()->pc()->local_description()->candidates(0); |
| 1381 | const webrtc::IceCandidateCollection* audio_candidates_reciever_restart = |
| 1382 | receiving_client()->pc()->local_description()->candidates(0); |
| 1383 | ASSERT_GT(audio_candidates_initiator_restart->count(), 0u); |
| 1384 | ASSERT_GT(audio_candidates_reciever_restart->count(), 0u); |
| 1385 | std::string initiator_candidate_restart; |
| 1386 | EXPECT_TRUE(audio_candidates_initiator_restart->at(0)->ToString( |
| 1387 | &initiator_candidate_restart)); |
| 1388 | std::string receiver_candidate_restart; |
| 1389 | EXPECT_TRUE(audio_candidates_reciever_restart->at(0)->ToString( |
| 1390 | &receiver_candidate_restart)); |
| 1391 | |
| 1392 | // Verify that the first candidates in the local session descriptions has |
| 1393 | // changed. |
| 1394 | EXPECT_NE(initiator_candidate, initiator_candidate_restart); |
| 1395 | EXPECT_NE(receiver_candidate, receiver_candidate_restart); |
| 1396 | } |
| 1397 | |
| 1398 | |
| 1399 | // This test sets up a Jsep call between two parties with external |
| 1400 | // VideoDecoderFactory. |
stefan@webrtc.org | da79008 | 2013-09-17 13:11:38 +0000 | [diff] [blame] | 1401 | // TODO(holmer): Disabled due to sometimes crashing on buildbots. |
| 1402 | // See issue webrtc/2378. |
| 1403 | TEST_F(JsepPeerConnectionP2PTestClient, |
| 1404 | DISABLED_LocalP2PTestWithVideoDecoderFactory) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1405 | ASSERT_TRUE(CreateTestClients()); |
| 1406 | EnableVideoDecoderFactory(); |
| 1407 | LocalP2PTest(); |
| 1408 | } |