henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1 | /* |
| 2 | * libjingle |
| 3 | * Copyright 2012, Google Inc. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are met: |
| 7 | * |
| 8 | * 1. Redistributions of source code must retain the above copyright notice, |
| 9 | * this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * 3. The name of the author may not be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 17 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 18 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
| 19 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 20 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 21 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
| 22 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 23 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 24 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
| 25 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | */ |
| 27 | |
| 28 | #include <stdio.h> |
| 29 | |
| 30 | #include <algorithm> |
| 31 | #include <list> |
| 32 | #include <map> |
| 33 | #include <vector> |
| 34 | |
| 35 | #include "talk/app/webrtc/dtmfsender.h" |
| 36 | #include "talk/app/webrtc/fakeportallocatorfactory.h" |
| 37 | #include "talk/app/webrtc/localaudiosource.h" |
| 38 | #include "talk/app/webrtc/mediastreaminterface.h" |
| 39 | #include "talk/app/webrtc/peerconnectionfactory.h" |
| 40 | #include "talk/app/webrtc/peerconnectioninterface.h" |
| 41 | #include "talk/app/webrtc/test/fakeaudiocapturemodule.h" |
| 42 | #include "talk/app/webrtc/test/fakeconstraints.h" |
jiayl@webrtc.org | a576faf | 2014-01-29 17:45:53 +0000 | [diff] [blame] | 43 | #include "talk/app/webrtc/test/fakedtlsidentityservice.h" |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 44 | #include "talk/app/webrtc/test/fakevideotrackrenderer.h" |
| 45 | #include "talk/app/webrtc/test/fakeperiodicvideocapturer.h" |
| 46 | #include "talk/app/webrtc/test/mockpeerconnectionobservers.h" |
| 47 | #include "talk/app/webrtc/videosourceinterface.h" |
| 48 | #include "talk/base/gunit.h" |
| 49 | #include "talk/base/scoped_ptr.h" |
| 50 | #include "talk/base/ssladapter.h" |
| 51 | #include "talk/base/sslstreamadapter.h" |
| 52 | #include "talk/base/thread.h" |
| 53 | #include "talk/media/webrtc/fakewebrtcvideoengine.h" |
| 54 | #include "talk/p2p/base/constants.h" |
| 55 | #include "talk/p2p/base/sessiondescription.h" |
| 56 | #include "talk/session/media/mediasession.h" |
| 57 | |
| 58 | #define MAYBE_SKIP_TEST(feature) \ |
| 59 | if (!(feature())) { \ |
| 60 | LOG(LS_INFO) << "Feature disabled... skipping"; \ |
| 61 | return; \ |
| 62 | } |
| 63 | |
| 64 | using cricket::ContentInfo; |
| 65 | using cricket::FakeWebRtcVideoDecoder; |
| 66 | using cricket::FakeWebRtcVideoDecoderFactory; |
| 67 | using cricket::FakeWebRtcVideoEncoder; |
| 68 | using cricket::FakeWebRtcVideoEncoderFactory; |
| 69 | using cricket::MediaContentDescription; |
| 70 | using webrtc::DataBuffer; |
| 71 | using webrtc::DataChannelInterface; |
| 72 | using webrtc::DtmfSender; |
| 73 | using webrtc::DtmfSenderInterface; |
| 74 | using webrtc::DtmfSenderObserverInterface; |
| 75 | using webrtc::FakeConstraints; |
| 76 | using webrtc::MediaConstraintsInterface; |
| 77 | using webrtc::MediaStreamTrackInterface; |
| 78 | using webrtc::MockCreateSessionDescriptionObserver; |
| 79 | using webrtc::MockDataChannelObserver; |
| 80 | using webrtc::MockSetSessionDescriptionObserver; |
| 81 | using webrtc::MockStatsObserver; |
| 82 | using webrtc::SessionDescriptionInterface; |
| 83 | using webrtc::StreamCollectionInterface; |
| 84 | |
| 85 | static const int kMaxWaitMs = 1000; |
| 86 | static const int kMaxWaitForStatsMs = 3000; |
| 87 | static const int kMaxWaitForFramesMs = 5000; |
| 88 | static const int kEndAudioFrameCount = 3; |
| 89 | static const int kEndVideoFrameCount = 3; |
| 90 | |
| 91 | static const char kStreamLabelBase[] = "stream_label"; |
| 92 | static const char kVideoTrackLabelBase[] = "video_track"; |
| 93 | static const char kAudioTrackLabelBase[] = "audio_track"; |
| 94 | static const char kDataChannelLabel[] = "data_channel"; |
| 95 | |
| 96 | static void RemoveLinesFromSdp(const std::string& line_start, |
| 97 | std::string* sdp) { |
| 98 | const char kSdpLineEnd[] = "\r\n"; |
| 99 | size_t ssrc_pos = 0; |
| 100 | while ((ssrc_pos = sdp->find(line_start, ssrc_pos)) != |
| 101 | std::string::npos) { |
| 102 | size_t end_ssrc = sdp->find(kSdpLineEnd, ssrc_pos); |
| 103 | sdp->erase(ssrc_pos, end_ssrc - ssrc_pos + strlen(kSdpLineEnd)); |
| 104 | } |
| 105 | } |
| 106 | |
| 107 | class SignalingMessageReceiver { |
| 108 | public: |
| 109 | protected: |
| 110 | SignalingMessageReceiver() {} |
| 111 | virtual ~SignalingMessageReceiver() {} |
| 112 | }; |
| 113 | |
| 114 | class JsepMessageReceiver : public SignalingMessageReceiver { |
| 115 | public: |
| 116 | virtual void ReceiveSdpMessage(const std::string& type, |
| 117 | std::string& msg) = 0; |
| 118 | virtual void ReceiveIceMessage(const std::string& sdp_mid, |
| 119 | int sdp_mline_index, |
| 120 | const std::string& msg) = 0; |
| 121 | |
| 122 | protected: |
| 123 | JsepMessageReceiver() {} |
| 124 | virtual ~JsepMessageReceiver() {} |
| 125 | }; |
| 126 | |
| 127 | template <typename MessageReceiver> |
| 128 | class PeerConnectionTestClientBase |
| 129 | : public webrtc::PeerConnectionObserver, |
| 130 | public MessageReceiver { |
| 131 | public: |
| 132 | ~PeerConnectionTestClientBase() { |
| 133 | while (!fake_video_renderers_.empty()) { |
| 134 | RenderMap::iterator it = fake_video_renderers_.begin(); |
| 135 | delete it->second; |
| 136 | fake_video_renderers_.erase(it); |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | virtual void Negotiate() = 0; |
| 141 | |
| 142 | virtual void Negotiate(bool audio, bool video) = 0; |
| 143 | |
| 144 | virtual void SetVideoConstraints( |
| 145 | const webrtc::FakeConstraints& video_constraint) { |
| 146 | video_constraints_ = video_constraint; |
| 147 | } |
| 148 | |
| 149 | void AddMediaStream(bool audio, bool video) { |
| 150 | std::string label = kStreamLabelBase + |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 151 | talk_base::ToString<int>( |
| 152 | static_cast<int>(peer_connection_->local_streams()->count())); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 153 | talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream = |
| 154 | peer_connection_factory_->CreateLocalMediaStream(label); |
| 155 | |
| 156 | if (audio && can_receive_audio()) { |
| 157 | FakeConstraints constraints; |
| 158 | // Disable highpass filter so that we can get all the test audio frames. |
| 159 | constraints.AddMandatory( |
| 160 | MediaConstraintsInterface::kHighpassFilter, false); |
wu@webrtc.org | 97077a3 | 2013-10-25 21:18:33 +0000 | [diff] [blame] | 161 | talk_base::scoped_refptr<webrtc::AudioSourceInterface> source = |
| 162 | peer_connection_factory_->CreateAudioSource(&constraints); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 163 | // TODO(perkj): Test audio source when it is implemented. Currently audio |
| 164 | // always use the default input. |
| 165 | talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track( |
| 166 | peer_connection_factory_->CreateAudioTrack(kAudioTrackLabelBase, |
| 167 | source)); |
| 168 | stream->AddTrack(audio_track); |
| 169 | } |
| 170 | if (video && can_receive_video()) { |
| 171 | stream->AddTrack(CreateLocalVideoTrack(label)); |
| 172 | } |
| 173 | |
| 174 | EXPECT_TRUE(peer_connection_->AddStream(stream, NULL)); |
| 175 | } |
| 176 | |
| 177 | size_t NumberOfLocalMediaStreams() { |
| 178 | return peer_connection_->local_streams()->count(); |
| 179 | } |
| 180 | |
| 181 | bool SessionActive() { |
| 182 | return peer_connection_->signaling_state() == |
| 183 | webrtc::PeerConnectionInterface::kStable; |
| 184 | } |
| 185 | |
| 186 | void set_signaling_message_receiver( |
| 187 | MessageReceiver* signaling_message_receiver) { |
| 188 | signaling_message_receiver_ = signaling_message_receiver; |
| 189 | } |
| 190 | |
| 191 | void EnableVideoDecoderFactory() { |
| 192 | video_decoder_factory_enabled_ = true; |
| 193 | fake_video_decoder_factory_->AddSupportedVideoCodecType( |
| 194 | webrtc::kVideoCodecVP8); |
| 195 | } |
| 196 | |
| 197 | bool AudioFramesReceivedCheck(int number_of_frames) const { |
| 198 | return number_of_frames <= fake_audio_capture_module_->frames_received(); |
| 199 | } |
| 200 | |
| 201 | bool VideoFramesReceivedCheck(int number_of_frames) { |
| 202 | if (video_decoder_factory_enabled_) { |
| 203 | const std::vector<FakeWebRtcVideoDecoder*>& decoders |
| 204 | = fake_video_decoder_factory_->decoders(); |
| 205 | if (decoders.empty()) { |
| 206 | return number_of_frames <= 0; |
| 207 | } |
| 208 | |
| 209 | for (std::vector<FakeWebRtcVideoDecoder*>::const_iterator |
| 210 | it = decoders.begin(); it != decoders.end(); ++it) { |
| 211 | if (number_of_frames > (*it)->GetNumFramesReceived()) { |
| 212 | return false; |
| 213 | } |
| 214 | } |
| 215 | return true; |
| 216 | } else { |
| 217 | if (fake_video_renderers_.empty()) { |
| 218 | return number_of_frames <= 0; |
| 219 | } |
| 220 | |
| 221 | for (RenderMap::const_iterator it = fake_video_renderers_.begin(); |
| 222 | it != fake_video_renderers_.end(); ++it) { |
| 223 | if (number_of_frames > it->second->num_rendered_frames()) { |
| 224 | return false; |
| 225 | } |
| 226 | } |
| 227 | return true; |
| 228 | } |
| 229 | } |
| 230 | // Verify the CreateDtmfSender interface |
| 231 | void VerifyDtmf() { |
| 232 | talk_base::scoped_ptr<DummyDtmfObserver> observer(new DummyDtmfObserver()); |
| 233 | talk_base::scoped_refptr<DtmfSenderInterface> dtmf_sender; |
| 234 | |
| 235 | // We can't create a DTMF sender with an invalid audio track or a non local |
| 236 | // track. |
| 237 | EXPECT_TRUE(peer_connection_->CreateDtmfSender(NULL) == NULL); |
| 238 | talk_base::scoped_refptr<webrtc::AudioTrackInterface> non_localtrack( |
| 239 | peer_connection_factory_->CreateAudioTrack("dummy_track", |
| 240 | NULL)); |
| 241 | EXPECT_TRUE(peer_connection_->CreateDtmfSender(non_localtrack) == NULL); |
| 242 | |
| 243 | // We should be able to create a DTMF sender from a local track. |
| 244 | webrtc::AudioTrackInterface* localtrack = |
| 245 | peer_connection_->local_streams()->at(0)->GetAudioTracks()[0]; |
| 246 | dtmf_sender = peer_connection_->CreateDtmfSender(localtrack); |
| 247 | EXPECT_TRUE(dtmf_sender.get() != NULL); |
| 248 | dtmf_sender->RegisterObserver(observer.get()); |
| 249 | |
| 250 | // Test the DtmfSender object just created. |
| 251 | EXPECT_TRUE(dtmf_sender->CanInsertDtmf()); |
| 252 | EXPECT_TRUE(dtmf_sender->InsertDtmf("1a", 100, 50)); |
| 253 | |
| 254 | // We don't need to verify that the DTMF tones are actually sent out because |
| 255 | // that is already covered by the tests of the lower level components. |
| 256 | |
| 257 | EXPECT_TRUE_WAIT(observer->completed(), kMaxWaitMs); |
| 258 | std::vector<std::string> tones; |
| 259 | tones.push_back("1"); |
| 260 | tones.push_back("a"); |
| 261 | tones.push_back(""); |
| 262 | observer->Verify(tones); |
| 263 | |
| 264 | dtmf_sender->UnregisterObserver(); |
| 265 | } |
| 266 | |
| 267 | // Verifies that the SessionDescription have rejected the appropriate media |
| 268 | // content. |
| 269 | void VerifyRejectedMediaInSessionDescription() { |
| 270 | ASSERT_TRUE(peer_connection_->remote_description() != NULL); |
| 271 | ASSERT_TRUE(peer_connection_->local_description() != NULL); |
| 272 | const cricket::SessionDescription* remote_desc = |
| 273 | peer_connection_->remote_description()->description(); |
| 274 | const cricket::SessionDescription* local_desc = |
| 275 | peer_connection_->local_description()->description(); |
| 276 | |
| 277 | const ContentInfo* remote_audio_content = GetFirstAudioContent(remote_desc); |
| 278 | if (remote_audio_content) { |
| 279 | const ContentInfo* audio_content = |
| 280 | GetFirstAudioContent(local_desc); |
| 281 | EXPECT_EQ(can_receive_audio(), !audio_content->rejected); |
| 282 | } |
| 283 | |
| 284 | const ContentInfo* remote_video_content = GetFirstVideoContent(remote_desc); |
| 285 | if (remote_video_content) { |
| 286 | const ContentInfo* video_content = |
| 287 | GetFirstVideoContent(local_desc); |
| 288 | EXPECT_EQ(can_receive_video(), !video_content->rejected); |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | void SetExpectIceRestart(bool expect_restart) { |
| 293 | expect_ice_restart_ = expect_restart; |
| 294 | } |
| 295 | |
| 296 | bool ExpectIceRestart() const { return expect_ice_restart_; } |
| 297 | |
| 298 | void VerifyLocalIceUfragAndPassword() { |
| 299 | ASSERT_TRUE(peer_connection_->local_description() != NULL); |
| 300 | const cricket::SessionDescription* desc = |
| 301 | peer_connection_->local_description()->description(); |
| 302 | const cricket::ContentInfos& contents = desc->contents(); |
| 303 | |
| 304 | for (size_t index = 0; index < contents.size(); ++index) { |
| 305 | if (contents[index].rejected) |
| 306 | continue; |
| 307 | const cricket::TransportDescription* transport_desc = |
| 308 | desc->GetTransportDescriptionByName(contents[index].name); |
| 309 | |
| 310 | std::map<int, IceUfragPwdPair>::const_iterator ufragpair_it = |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 311 | ice_ufrag_pwd_.find(static_cast<int>(index)); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 312 | if (ufragpair_it == ice_ufrag_pwd_.end()) { |
| 313 | ASSERT_FALSE(ExpectIceRestart()); |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 314 | ice_ufrag_pwd_[static_cast<int>(index)] = |
| 315 | IceUfragPwdPair(transport_desc->ice_ufrag, transport_desc->ice_pwd); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 316 | } else if (ExpectIceRestart()) { |
| 317 | const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second; |
| 318 | EXPECT_NE(ufrag_pwd.first, transport_desc->ice_ufrag); |
| 319 | EXPECT_NE(ufrag_pwd.second, transport_desc->ice_pwd); |
| 320 | } else { |
| 321 | const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second; |
| 322 | EXPECT_EQ(ufrag_pwd.first, transport_desc->ice_ufrag); |
| 323 | EXPECT_EQ(ufrag_pwd.second, transport_desc->ice_pwd); |
| 324 | } |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | int GetAudioOutputLevelStats(webrtc::MediaStreamTrackInterface* track) { |
| 329 | talk_base::scoped_refptr<MockStatsObserver> |
| 330 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 331 | EXPECT_TRUE(peer_connection_->GetStats(observer, track)); |
| 332 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 333 | return observer->AudioOutputLevel(); |
| 334 | } |
| 335 | |
| 336 | int GetAudioInputLevelStats() { |
| 337 | talk_base::scoped_refptr<MockStatsObserver> |
| 338 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 339 | EXPECT_TRUE(peer_connection_->GetStats(observer, NULL)); |
| 340 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 341 | return observer->AudioInputLevel(); |
| 342 | } |
| 343 | |
| 344 | int GetBytesReceivedStats(webrtc::MediaStreamTrackInterface* track) { |
| 345 | talk_base::scoped_refptr<MockStatsObserver> |
| 346 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 347 | EXPECT_TRUE(peer_connection_->GetStats(observer, track)); |
| 348 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 349 | return observer->BytesReceived(); |
| 350 | } |
| 351 | |
| 352 | int GetBytesSentStats(webrtc::MediaStreamTrackInterface* track) { |
| 353 | talk_base::scoped_refptr<MockStatsObserver> |
| 354 | observer(new talk_base::RefCountedObject<MockStatsObserver>()); |
| 355 | EXPECT_TRUE(peer_connection_->GetStats(observer, track)); |
| 356 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 357 | return observer->BytesSent(); |
| 358 | } |
| 359 | |
| 360 | int rendered_width() { |
| 361 | EXPECT_FALSE(fake_video_renderers_.empty()); |
| 362 | return fake_video_renderers_.empty() ? 1 : |
| 363 | fake_video_renderers_.begin()->second->width(); |
| 364 | } |
| 365 | |
| 366 | int rendered_height() { |
| 367 | EXPECT_FALSE(fake_video_renderers_.empty()); |
| 368 | return fake_video_renderers_.empty() ? 1 : |
| 369 | fake_video_renderers_.begin()->second->height(); |
| 370 | } |
| 371 | |
| 372 | size_t number_of_remote_streams() { |
| 373 | if (!pc()) |
| 374 | return 0; |
| 375 | return pc()->remote_streams()->count(); |
| 376 | } |
| 377 | |
| 378 | StreamCollectionInterface* remote_streams() { |
| 379 | if (!pc()) { |
| 380 | ADD_FAILURE(); |
| 381 | return NULL; |
| 382 | } |
| 383 | return pc()->remote_streams(); |
| 384 | } |
| 385 | |
| 386 | StreamCollectionInterface* local_streams() { |
| 387 | if (!pc()) { |
| 388 | ADD_FAILURE(); |
| 389 | return NULL; |
| 390 | } |
| 391 | return pc()->local_streams(); |
| 392 | } |
| 393 | |
| 394 | webrtc::PeerConnectionInterface::SignalingState signaling_state() { |
| 395 | return pc()->signaling_state(); |
| 396 | } |
| 397 | |
| 398 | webrtc::PeerConnectionInterface::IceConnectionState ice_connection_state() { |
| 399 | return pc()->ice_connection_state(); |
| 400 | } |
| 401 | |
| 402 | webrtc::PeerConnectionInterface::IceGatheringState ice_gathering_state() { |
| 403 | return pc()->ice_gathering_state(); |
| 404 | } |
| 405 | |
| 406 | // PeerConnectionObserver callbacks. |
| 407 | virtual void OnError() {} |
| 408 | virtual void OnMessage(const std::string&) {} |
| 409 | virtual void OnSignalingMessage(const std::string& /*msg*/) {} |
| 410 | virtual void OnSignalingChange( |
| 411 | webrtc::PeerConnectionInterface::SignalingState new_state) { |
| 412 | EXPECT_EQ(peer_connection_->signaling_state(), new_state); |
| 413 | } |
| 414 | virtual void OnAddStream(webrtc::MediaStreamInterface* media_stream) { |
| 415 | for (size_t i = 0; i < media_stream->GetVideoTracks().size(); ++i) { |
| 416 | const std::string id = media_stream->GetVideoTracks()[i]->id(); |
| 417 | ASSERT_TRUE(fake_video_renderers_.find(id) == |
| 418 | fake_video_renderers_.end()); |
| 419 | fake_video_renderers_[id] = new webrtc::FakeVideoTrackRenderer( |
| 420 | media_stream->GetVideoTracks()[i]); |
| 421 | } |
| 422 | } |
| 423 | virtual void OnRemoveStream(webrtc::MediaStreamInterface* media_stream) {} |
| 424 | virtual void OnRenegotiationNeeded() {} |
| 425 | virtual void OnIceConnectionChange( |
| 426 | webrtc::PeerConnectionInterface::IceConnectionState new_state) { |
| 427 | EXPECT_EQ(peer_connection_->ice_connection_state(), new_state); |
| 428 | } |
| 429 | virtual void OnIceGatheringChange( |
| 430 | webrtc::PeerConnectionInterface::IceGatheringState new_state) { |
| 431 | EXPECT_EQ(peer_connection_->ice_gathering_state(), new_state); |
| 432 | } |
| 433 | virtual void OnIceCandidate( |
| 434 | const webrtc::IceCandidateInterface* /*candidate*/) {} |
| 435 | |
| 436 | webrtc::PeerConnectionInterface* pc() { |
| 437 | return peer_connection_.get(); |
| 438 | } |
| 439 | |
| 440 | protected: |
| 441 | explicit PeerConnectionTestClientBase(const std::string& id) |
| 442 | : id_(id), |
| 443 | expect_ice_restart_(false), |
| 444 | fake_video_decoder_factory_(NULL), |
| 445 | fake_video_encoder_factory_(NULL), |
| 446 | video_decoder_factory_enabled_(false), |
| 447 | signaling_message_receiver_(NULL) { |
| 448 | } |
| 449 | bool Init(const MediaConstraintsInterface* constraints) { |
| 450 | EXPECT_TRUE(!peer_connection_); |
| 451 | EXPECT_TRUE(!peer_connection_factory_); |
| 452 | allocator_factory_ = webrtc::FakePortAllocatorFactory::Create(); |
| 453 | if (!allocator_factory_) { |
| 454 | return false; |
| 455 | } |
| 456 | audio_thread_.Start(); |
| 457 | fake_audio_capture_module_ = FakeAudioCaptureModule::Create( |
| 458 | &audio_thread_); |
| 459 | |
| 460 | if (fake_audio_capture_module_ == NULL) { |
| 461 | return false; |
| 462 | } |
| 463 | fake_video_decoder_factory_ = new FakeWebRtcVideoDecoderFactory(); |
| 464 | fake_video_encoder_factory_ = new FakeWebRtcVideoEncoderFactory(); |
| 465 | peer_connection_factory_ = webrtc::CreatePeerConnectionFactory( |
| 466 | talk_base::Thread::Current(), talk_base::Thread::Current(), |
| 467 | fake_audio_capture_module_, fake_video_encoder_factory_, |
| 468 | fake_video_decoder_factory_); |
| 469 | if (!peer_connection_factory_) { |
| 470 | return false; |
| 471 | } |
| 472 | peer_connection_ = CreatePeerConnection(allocator_factory_.get(), |
| 473 | constraints); |
| 474 | return peer_connection_.get() != NULL; |
| 475 | } |
| 476 | virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface> |
| 477 | CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory, |
| 478 | const MediaConstraintsInterface* constraints) = 0; |
| 479 | MessageReceiver* signaling_message_receiver() { |
| 480 | return signaling_message_receiver_; |
| 481 | } |
| 482 | webrtc::PeerConnectionFactoryInterface* peer_connection_factory() { |
| 483 | return peer_connection_factory_.get(); |
| 484 | } |
| 485 | |
| 486 | virtual bool can_receive_audio() = 0; |
| 487 | virtual bool can_receive_video() = 0; |
| 488 | const std::string& id() const { return id_; } |
| 489 | |
| 490 | private: |
| 491 | class DummyDtmfObserver : public DtmfSenderObserverInterface { |
| 492 | public: |
| 493 | DummyDtmfObserver() : completed_(false) {} |
| 494 | |
| 495 | // Implements DtmfSenderObserverInterface. |
| 496 | void OnToneChange(const std::string& tone) { |
| 497 | tones_.push_back(tone); |
| 498 | if (tone.empty()) { |
| 499 | completed_ = true; |
| 500 | } |
| 501 | } |
| 502 | |
| 503 | void Verify(const std::vector<std::string>& tones) const { |
| 504 | ASSERT_TRUE(tones_.size() == tones.size()); |
| 505 | EXPECT_TRUE(std::equal(tones.begin(), tones.end(), tones_.begin())); |
| 506 | } |
| 507 | |
| 508 | bool completed() const { return completed_; } |
| 509 | |
| 510 | private: |
| 511 | bool completed_; |
| 512 | std::vector<std::string> tones_; |
| 513 | }; |
| 514 | |
| 515 | talk_base::scoped_refptr<webrtc::VideoTrackInterface> |
| 516 | CreateLocalVideoTrack(const std::string stream_label) { |
| 517 | // Set max frame rate to 10fps to reduce the risk of the tests to be flaky. |
| 518 | FakeConstraints source_constraints = video_constraints_; |
| 519 | source_constraints.SetMandatoryMaxFrameRate(10); |
| 520 | |
| 521 | talk_base::scoped_refptr<webrtc::VideoSourceInterface> source = |
| 522 | peer_connection_factory_->CreateVideoSource( |
| 523 | new webrtc::FakePeriodicVideoCapturer(), |
| 524 | &source_constraints); |
| 525 | std::string label = stream_label + kVideoTrackLabelBase; |
| 526 | return peer_connection_factory_->CreateVideoTrack(label, source); |
| 527 | } |
| 528 | |
| 529 | std::string id_; |
| 530 | // Separate thread for executing |fake_audio_capture_module_| tasks. Audio |
| 531 | // processing must not be performed on the same thread as signaling due to |
| 532 | // signaling time constraints and relative complexity of the audio pipeline. |
| 533 | // This is consistent with the video pipeline that us a a separate thread for |
| 534 | // encoding and decoding. |
| 535 | talk_base::Thread audio_thread_; |
| 536 | |
| 537 | talk_base::scoped_refptr<webrtc::PortAllocatorFactoryInterface> |
| 538 | allocator_factory_; |
| 539 | talk_base::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_; |
| 540 | talk_base::scoped_refptr<webrtc::PeerConnectionFactoryInterface> |
| 541 | peer_connection_factory_; |
| 542 | |
| 543 | typedef std::pair<std::string, std::string> IceUfragPwdPair; |
| 544 | std::map<int, IceUfragPwdPair> ice_ufrag_pwd_; |
| 545 | bool expect_ice_restart_; |
| 546 | |
| 547 | // Needed to keep track of number of frames send. |
| 548 | talk_base::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_; |
| 549 | // Needed to keep track of number of frames received. |
| 550 | typedef std::map<std::string, webrtc::FakeVideoTrackRenderer*> RenderMap; |
| 551 | RenderMap fake_video_renderers_; |
| 552 | // Needed to keep track of number of frames received when external decoder |
| 553 | // used. |
| 554 | FakeWebRtcVideoDecoderFactory* fake_video_decoder_factory_; |
| 555 | FakeWebRtcVideoEncoderFactory* fake_video_encoder_factory_; |
| 556 | bool video_decoder_factory_enabled_; |
| 557 | webrtc::FakeConstraints video_constraints_; |
| 558 | |
| 559 | // For remote peer communication. |
| 560 | MessageReceiver* signaling_message_receiver_; |
| 561 | }; |
| 562 | |
| 563 | class JsepTestClient |
| 564 | : public PeerConnectionTestClientBase<JsepMessageReceiver> { |
| 565 | public: |
| 566 | static JsepTestClient* CreateClient( |
| 567 | const std::string& id, |
| 568 | const MediaConstraintsInterface* constraints) { |
| 569 | JsepTestClient* client(new JsepTestClient(id)); |
| 570 | if (!client->Init(constraints)) { |
| 571 | delete client; |
| 572 | return NULL; |
| 573 | } |
| 574 | return client; |
| 575 | } |
| 576 | ~JsepTestClient() {} |
| 577 | |
| 578 | virtual void Negotiate() { |
| 579 | Negotiate(true, true); |
| 580 | } |
| 581 | virtual void Negotiate(bool audio, bool video) { |
| 582 | talk_base::scoped_ptr<SessionDescriptionInterface> offer; |
| 583 | EXPECT_TRUE(DoCreateOffer(offer.use())); |
| 584 | |
| 585 | if (offer->description()->GetContentByName("audio")) { |
| 586 | offer->description()->GetContentByName("audio")->rejected = !audio; |
| 587 | } |
| 588 | if (offer->description()->GetContentByName("video")) { |
| 589 | offer->description()->GetContentByName("video")->rejected = !video; |
| 590 | } |
| 591 | |
| 592 | std::string sdp; |
| 593 | EXPECT_TRUE(offer->ToString(&sdp)); |
| 594 | EXPECT_TRUE(DoSetLocalDescription(offer.release())); |
| 595 | signaling_message_receiver()->ReceiveSdpMessage( |
| 596 | webrtc::SessionDescriptionInterface::kOffer, sdp); |
| 597 | } |
| 598 | // JsepMessageReceiver callback. |
| 599 | virtual void ReceiveSdpMessage(const std::string& type, |
| 600 | std::string& msg) { |
| 601 | FilterIncomingSdpMessage(&msg); |
| 602 | if (type == webrtc::SessionDescriptionInterface::kOffer) { |
| 603 | HandleIncomingOffer(msg); |
| 604 | } else { |
| 605 | HandleIncomingAnswer(msg); |
| 606 | } |
| 607 | } |
| 608 | // JsepMessageReceiver callback. |
| 609 | virtual void ReceiveIceMessage(const std::string& sdp_mid, |
| 610 | int sdp_mline_index, |
| 611 | const std::string& msg) { |
| 612 | LOG(INFO) << id() << "ReceiveIceMessage"; |
| 613 | talk_base::scoped_ptr<webrtc::IceCandidateInterface> candidate( |
| 614 | webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, msg, NULL)); |
| 615 | EXPECT_TRUE(pc()->AddIceCandidate(candidate.get())); |
| 616 | } |
| 617 | // Implements PeerConnectionObserver functions needed by Jsep. |
| 618 | virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { |
| 619 | LOG(INFO) << id() << "OnIceCandidate"; |
| 620 | |
| 621 | std::string ice_sdp; |
| 622 | EXPECT_TRUE(candidate->ToString(&ice_sdp)); |
| 623 | if (signaling_message_receiver() == NULL) { |
| 624 | // Remote party may be deleted. |
| 625 | return; |
| 626 | } |
| 627 | signaling_message_receiver()->ReceiveIceMessage(candidate->sdp_mid(), |
| 628 | candidate->sdp_mline_index(), ice_sdp); |
| 629 | } |
| 630 | |
| 631 | void IceRestart() { |
| 632 | session_description_constraints_.SetMandatoryIceRestart(true); |
| 633 | SetExpectIceRestart(true); |
| 634 | } |
| 635 | |
| 636 | void SetReceiveAudioVideo(bool audio, bool video) { |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 637 | SetReceiveAudio(audio); |
| 638 | SetReceiveVideo(video); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 639 | ASSERT_EQ(audio, can_receive_audio()); |
| 640 | ASSERT_EQ(video, can_receive_video()); |
| 641 | } |
| 642 | |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 643 | void SetReceiveAudio(bool audio) { |
| 644 | if (audio && can_receive_audio()) |
| 645 | return; |
| 646 | session_description_constraints_.SetMandatoryReceiveAudio(audio); |
| 647 | } |
| 648 | |
| 649 | void SetReceiveVideo(bool video) { |
| 650 | if (video && can_receive_video()) |
| 651 | return; |
| 652 | session_description_constraints_.SetMandatoryReceiveVideo(video); |
| 653 | } |
| 654 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 655 | void RemoveMsidFromReceivedSdp(bool remove) { |
| 656 | remove_msid_ = remove; |
| 657 | } |
| 658 | |
| 659 | void RemoveSdesCryptoFromReceivedSdp(bool remove) { |
| 660 | remove_sdes_ = remove; |
| 661 | } |
| 662 | |
| 663 | void RemoveBundleFromReceivedSdp(bool remove) { |
| 664 | remove_bundle_ = remove; |
| 665 | } |
| 666 | |
| 667 | virtual bool can_receive_audio() { |
| 668 | bool value; |
| 669 | if (webrtc::FindConstraint(&session_description_constraints_, |
| 670 | MediaConstraintsInterface::kOfferToReceiveAudio, &value, NULL)) { |
| 671 | return value; |
| 672 | } |
| 673 | return true; |
| 674 | } |
| 675 | |
| 676 | virtual bool can_receive_video() { |
| 677 | bool value; |
| 678 | if (webrtc::FindConstraint(&session_description_constraints_, |
| 679 | MediaConstraintsInterface::kOfferToReceiveVideo, &value, NULL)) { |
| 680 | return value; |
| 681 | } |
| 682 | return true; |
| 683 | } |
| 684 | |
| 685 | virtual void OnIceComplete() { |
| 686 | LOG(INFO) << id() << "OnIceComplete"; |
| 687 | } |
| 688 | |
| 689 | virtual void OnDataChannel(DataChannelInterface* data_channel) { |
| 690 | LOG(INFO) << id() << "OnDataChannel"; |
| 691 | data_channel_ = data_channel; |
| 692 | data_observer_.reset(new MockDataChannelObserver(data_channel)); |
| 693 | } |
| 694 | |
| 695 | void CreateDataChannel() { |
| 696 | data_channel_ = pc()->CreateDataChannel(kDataChannelLabel, |
| 697 | NULL); |
| 698 | ASSERT_TRUE(data_channel_.get() != NULL); |
| 699 | data_observer_.reset(new MockDataChannelObserver(data_channel_)); |
| 700 | } |
| 701 | |
| 702 | DataChannelInterface* data_channel() { return data_channel_; } |
| 703 | const MockDataChannelObserver* data_observer() const { |
| 704 | return data_observer_.get(); |
| 705 | } |
| 706 | |
| 707 | protected: |
| 708 | explicit JsepTestClient(const std::string& id) |
| 709 | : PeerConnectionTestClientBase<JsepMessageReceiver>(id), |
| 710 | remove_msid_(false), |
| 711 | remove_bundle_(false), |
| 712 | remove_sdes_(false) { |
| 713 | } |
| 714 | |
| 715 | virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface> |
| 716 | CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory, |
| 717 | const MediaConstraintsInterface* constraints) { |
| 718 | // CreatePeerConnection with IceServers. |
| 719 | webrtc::PeerConnectionInterface::IceServers ice_servers; |
| 720 | webrtc::PeerConnectionInterface::IceServer ice_server; |
| 721 | ice_server.uri = "stun:stun.l.google.com:19302"; |
| 722 | ice_servers.push_back(ice_server); |
jiayl@webrtc.org | a576faf | 2014-01-29 17:45:53 +0000 | [diff] [blame] | 723 | |
| 724 | // TODO(jiayl): we should always pass a FakeIdentityService so that DTLS |
| 725 | // is enabled by default like in Chrome (issue 2838). |
| 726 | FakeIdentityService* dtls_service = NULL; |
| 727 | bool dtls; |
| 728 | if (FindConstraint(constraints, |
| 729 | MediaConstraintsInterface::kEnableDtlsSrtp, |
| 730 | &dtls, |
| 731 | NULL) && dtls) { |
| 732 | dtls_service = new FakeIdentityService(); |
| 733 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 734 | return peer_connection_factory()->CreatePeerConnection( |
jiayl@webrtc.org | a576faf | 2014-01-29 17:45:53 +0000 | [diff] [blame] | 735 | ice_servers, constraints, factory, dtls_service, this); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 736 | } |
| 737 | |
| 738 | void HandleIncomingOffer(const std::string& msg) { |
| 739 | LOG(INFO) << id() << "HandleIncomingOffer "; |
| 740 | if (NumberOfLocalMediaStreams() == 0) { |
| 741 | // If we are not sending any streams ourselves it is time to add some. |
| 742 | AddMediaStream(true, true); |
| 743 | } |
| 744 | talk_base::scoped_ptr<SessionDescriptionInterface> desc( |
| 745 | webrtc::CreateSessionDescription("offer", msg, NULL)); |
| 746 | EXPECT_TRUE(DoSetRemoteDescription(desc.release())); |
| 747 | talk_base::scoped_ptr<SessionDescriptionInterface> answer; |
| 748 | EXPECT_TRUE(DoCreateAnswer(answer.use())); |
| 749 | std::string sdp; |
| 750 | EXPECT_TRUE(answer->ToString(&sdp)); |
| 751 | EXPECT_TRUE(DoSetLocalDescription(answer.release())); |
| 752 | if (signaling_message_receiver()) { |
| 753 | signaling_message_receiver()->ReceiveSdpMessage( |
| 754 | webrtc::SessionDescriptionInterface::kAnswer, sdp); |
| 755 | } |
| 756 | } |
| 757 | |
| 758 | void HandleIncomingAnswer(const std::string& msg) { |
| 759 | LOG(INFO) << id() << "HandleIncomingAnswer"; |
| 760 | talk_base::scoped_ptr<SessionDescriptionInterface> desc( |
| 761 | webrtc::CreateSessionDescription("answer", msg, NULL)); |
| 762 | EXPECT_TRUE(DoSetRemoteDescription(desc.release())); |
| 763 | } |
| 764 | |
| 765 | bool DoCreateOfferAnswer(SessionDescriptionInterface** desc, |
| 766 | bool offer) { |
| 767 | talk_base::scoped_refptr<MockCreateSessionDescriptionObserver> |
| 768 | observer(new talk_base::RefCountedObject< |
| 769 | MockCreateSessionDescriptionObserver>()); |
| 770 | if (offer) { |
| 771 | pc()->CreateOffer(observer, &session_description_constraints_); |
| 772 | } else { |
| 773 | pc()->CreateAnswer(observer, &session_description_constraints_); |
| 774 | } |
| 775 | EXPECT_EQ_WAIT(true, observer->called(), kMaxWaitMs); |
| 776 | *desc = observer->release_desc(); |
| 777 | if (observer->result() && ExpectIceRestart()) { |
| 778 | EXPECT_EQ(0u, (*desc)->candidates(0)->count()); |
| 779 | } |
| 780 | return observer->result(); |
| 781 | } |
| 782 | |
| 783 | bool DoCreateOffer(SessionDescriptionInterface** desc) { |
| 784 | return DoCreateOfferAnswer(desc, true); |
| 785 | } |
| 786 | |
| 787 | bool DoCreateAnswer(SessionDescriptionInterface** desc) { |
| 788 | return DoCreateOfferAnswer(desc, false); |
| 789 | } |
| 790 | |
| 791 | bool DoSetLocalDescription(SessionDescriptionInterface* desc) { |
| 792 | talk_base::scoped_refptr<MockSetSessionDescriptionObserver> |
| 793 | observer(new talk_base::RefCountedObject< |
| 794 | MockSetSessionDescriptionObserver>()); |
| 795 | LOG(INFO) << id() << "SetLocalDescription "; |
| 796 | pc()->SetLocalDescription(observer, desc); |
| 797 | // Ignore the observer result. If we wait for the result with |
| 798 | // EXPECT_TRUE_WAIT, local ice candidates might be sent to the remote peer |
| 799 | // before the offer which is an error. |
| 800 | // The reason is that EXPECT_TRUE_WAIT uses |
| 801 | // talk_base::Thread::Current()->ProcessMessages(1); |
| 802 | // ProcessMessages waits at least 1ms but processes all messages before |
| 803 | // returning. Since this test is synchronous and send messages to the remote |
| 804 | // peer whenever a callback is invoked, this can lead to messages being |
| 805 | // sent to the remote peer in the wrong order. |
| 806 | // TODO(perkj): Find a way to check the result without risking that the |
| 807 | // order of sent messages are changed. Ex- by posting all messages that are |
| 808 | // sent to the remote peer. |
| 809 | return true; |
| 810 | } |
| 811 | |
| 812 | bool DoSetRemoteDescription(SessionDescriptionInterface* desc) { |
| 813 | talk_base::scoped_refptr<MockSetSessionDescriptionObserver> |
| 814 | observer(new talk_base::RefCountedObject< |
| 815 | MockSetSessionDescriptionObserver>()); |
| 816 | LOG(INFO) << id() << "SetRemoteDescription "; |
| 817 | pc()->SetRemoteDescription(observer, desc); |
| 818 | EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs); |
| 819 | return observer->result(); |
| 820 | } |
| 821 | |
| 822 | // This modifies all received SDP messages before they are processed. |
| 823 | void FilterIncomingSdpMessage(std::string* sdp) { |
| 824 | if (remove_msid_) { |
| 825 | const char kSdpSsrcAttribute[] = "a=ssrc:"; |
| 826 | RemoveLinesFromSdp(kSdpSsrcAttribute, sdp); |
| 827 | const char kSdpMsidSupportedAttribute[] = "a=msid-semantic:"; |
| 828 | RemoveLinesFromSdp(kSdpMsidSupportedAttribute, sdp); |
| 829 | } |
| 830 | if (remove_bundle_) { |
| 831 | const char kSdpBundleAttribute[] = "a=group:BUNDLE"; |
| 832 | RemoveLinesFromSdp(kSdpBundleAttribute, sdp); |
| 833 | } |
| 834 | if (remove_sdes_) { |
| 835 | const char kSdpSdesCryptoAttribute[] = "a=crypto"; |
| 836 | RemoveLinesFromSdp(kSdpSdesCryptoAttribute, sdp); |
| 837 | } |
| 838 | } |
| 839 | |
| 840 | private: |
| 841 | webrtc::FakeConstraints session_description_constraints_; |
| 842 | bool remove_msid_; // True if MSID should be removed in received SDP. |
| 843 | bool remove_bundle_; // True if bundle should be removed in received SDP. |
| 844 | bool remove_sdes_; // True if a=crypto should be removed in received SDP. |
| 845 | |
| 846 | talk_base::scoped_refptr<DataChannelInterface> data_channel_; |
| 847 | talk_base::scoped_ptr<MockDataChannelObserver> data_observer_; |
| 848 | }; |
| 849 | |
| 850 | template <typename SignalingClass> |
| 851 | class P2PTestConductor : public testing::Test { |
| 852 | public: |
| 853 | bool SessionActive() { |
| 854 | return initiating_client_->SessionActive() && |
| 855 | receiving_client_->SessionActive(); |
| 856 | } |
| 857 | // Return true if the number of frames provided have been received or it is |
| 858 | // known that that will never occur (e.g. no frames will be sent or |
| 859 | // captured). |
| 860 | bool FramesNotPending(int audio_frames_to_receive, |
| 861 | int video_frames_to_receive) { |
| 862 | return VideoFramesReceivedCheck(video_frames_to_receive) && |
| 863 | AudioFramesReceivedCheck(audio_frames_to_receive); |
| 864 | } |
| 865 | bool AudioFramesReceivedCheck(int frames_received) { |
| 866 | return initiating_client_->AudioFramesReceivedCheck(frames_received) && |
| 867 | receiving_client_->AudioFramesReceivedCheck(frames_received); |
| 868 | } |
| 869 | bool VideoFramesReceivedCheck(int frames_received) { |
| 870 | return initiating_client_->VideoFramesReceivedCheck(frames_received) && |
| 871 | receiving_client_->VideoFramesReceivedCheck(frames_received); |
| 872 | } |
| 873 | void VerifyDtmf() { |
| 874 | initiating_client_->VerifyDtmf(); |
| 875 | receiving_client_->VerifyDtmf(); |
| 876 | } |
| 877 | |
| 878 | void TestUpdateOfferWithRejectedContent() { |
| 879 | initiating_client_->Negotiate(true, false); |
| 880 | EXPECT_TRUE_WAIT( |
| 881 | FramesNotPending(kEndAudioFrameCount * 2, kEndVideoFrameCount), |
| 882 | kMaxWaitForFramesMs); |
| 883 | // There shouldn't be any more video frame after the new offer is |
| 884 | // negotiated. |
| 885 | EXPECT_FALSE(VideoFramesReceivedCheck(kEndVideoFrameCount + 1)); |
| 886 | } |
| 887 | |
| 888 | void VerifyRenderedSize(int width, int height) { |
| 889 | EXPECT_EQ(width, receiving_client()->rendered_width()); |
| 890 | EXPECT_EQ(height, receiving_client()->rendered_height()); |
| 891 | EXPECT_EQ(width, initializing_client()->rendered_width()); |
| 892 | EXPECT_EQ(height, initializing_client()->rendered_height()); |
| 893 | } |
| 894 | |
| 895 | void VerifySessionDescriptions() { |
| 896 | initiating_client_->VerifyRejectedMediaInSessionDescription(); |
| 897 | receiving_client_->VerifyRejectedMediaInSessionDescription(); |
| 898 | initiating_client_->VerifyLocalIceUfragAndPassword(); |
| 899 | receiving_client_->VerifyLocalIceUfragAndPassword(); |
| 900 | } |
| 901 | |
| 902 | P2PTestConductor() { |
| 903 | talk_base::InitializeSSL(NULL); |
| 904 | } |
| 905 | ~P2PTestConductor() { |
| 906 | if (initiating_client_) { |
| 907 | initiating_client_->set_signaling_message_receiver(NULL); |
| 908 | } |
| 909 | if (receiving_client_) { |
| 910 | receiving_client_->set_signaling_message_receiver(NULL); |
| 911 | } |
henrike@webrtc.org | 723d683 | 2013-07-12 16:04:50 +0000 | [diff] [blame] | 912 | talk_base::CleanupSSL(); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 913 | } |
| 914 | |
| 915 | bool CreateTestClients() { |
| 916 | return CreateTestClients(NULL, NULL); |
| 917 | } |
| 918 | |
| 919 | bool CreateTestClients(MediaConstraintsInterface* init_constraints, |
| 920 | MediaConstraintsInterface* recv_constraints) { |
| 921 | initiating_client_.reset(SignalingClass::CreateClient("Caller: ", |
| 922 | init_constraints)); |
| 923 | receiving_client_.reset(SignalingClass::CreateClient("Callee: ", |
| 924 | recv_constraints)); |
| 925 | if (!initiating_client_ || !receiving_client_) { |
| 926 | return false; |
| 927 | } |
| 928 | initiating_client_->set_signaling_message_receiver(receiving_client_.get()); |
| 929 | receiving_client_->set_signaling_message_receiver(initiating_client_.get()); |
| 930 | return true; |
| 931 | } |
| 932 | |
| 933 | void SetVideoConstraints(const webrtc::FakeConstraints& init_constraints, |
| 934 | const webrtc::FakeConstraints& recv_constraints) { |
| 935 | initiating_client_->SetVideoConstraints(init_constraints); |
| 936 | receiving_client_->SetVideoConstraints(recv_constraints); |
| 937 | } |
| 938 | |
| 939 | void EnableVideoDecoderFactory() { |
| 940 | initiating_client_->EnableVideoDecoderFactory(); |
| 941 | receiving_client_->EnableVideoDecoderFactory(); |
| 942 | } |
| 943 | |
| 944 | // This test sets up a call between two parties. Both parties send static |
| 945 | // frames to each other. Once the test is finished the number of sent frames |
| 946 | // is compared to the number of received frames. |
| 947 | void LocalP2PTest() { |
| 948 | if (initiating_client_->NumberOfLocalMediaStreams() == 0) { |
| 949 | initiating_client_->AddMediaStream(true, true); |
| 950 | } |
| 951 | initiating_client_->Negotiate(); |
| 952 | const int kMaxWaitForActivationMs = 5000; |
| 953 | // Assert true is used here since next tests are guaranteed to fail and |
| 954 | // would eat up 5 seconds. |
| 955 | ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs); |
| 956 | VerifySessionDescriptions(); |
| 957 | |
| 958 | |
| 959 | int audio_frame_count = kEndAudioFrameCount; |
| 960 | // TODO(ronghuawu): Add test to cover the case of sendonly and recvonly. |
| 961 | if (!initiating_client_->can_receive_audio() || |
| 962 | !receiving_client_->can_receive_audio()) { |
| 963 | audio_frame_count = -1; |
| 964 | } |
| 965 | int video_frame_count = kEndVideoFrameCount; |
| 966 | if (!initiating_client_->can_receive_video() || |
| 967 | !receiving_client_->can_receive_video()) { |
| 968 | video_frame_count = -1; |
| 969 | } |
| 970 | |
| 971 | if (audio_frame_count != -1 || video_frame_count != -1) { |
| 972 | // Audio or video is expected to flow, so both sides should get to the |
| 973 | // Connected state. |
| 974 | // Note: These tests have been observed to fail under heavy load at |
| 975 | // shorter timeouts, so they may be flaky. |
| 976 | EXPECT_EQ_WAIT( |
| 977 | webrtc::PeerConnectionInterface::kIceConnectionConnected, |
| 978 | initiating_client_->ice_connection_state(), |
| 979 | kMaxWaitForFramesMs); |
| 980 | EXPECT_EQ_WAIT( |
| 981 | webrtc::PeerConnectionInterface::kIceConnectionConnected, |
| 982 | receiving_client_->ice_connection_state(), |
| 983 | kMaxWaitForFramesMs); |
| 984 | } |
| 985 | |
| 986 | if (initiating_client_->can_receive_audio() || |
| 987 | initiating_client_->can_receive_video()) { |
| 988 | // The initiating client can receive media, so it must produce candidates |
| 989 | // that will serve as destinations for that media. |
| 990 | // TODO(bemasc): Understand why the state is not already Complete here, as |
| 991 | // seems to be the case for the receiving client. This may indicate a bug |
| 992 | // in the ICE gathering system. |
| 993 | EXPECT_NE(webrtc::PeerConnectionInterface::kIceGatheringNew, |
| 994 | initiating_client_->ice_gathering_state()); |
| 995 | } |
| 996 | if (receiving_client_->can_receive_audio() || |
| 997 | receiving_client_->can_receive_video()) { |
| 998 | EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceGatheringComplete, |
| 999 | receiving_client_->ice_gathering_state(), |
| 1000 | kMaxWaitForFramesMs); |
| 1001 | } |
| 1002 | |
| 1003 | EXPECT_TRUE_WAIT(FramesNotPending(audio_frame_count, video_frame_count), |
| 1004 | kMaxWaitForFramesMs); |
| 1005 | } |
| 1006 | |
| 1007 | SignalingClass* initializing_client() { return initiating_client_.get(); } |
| 1008 | SignalingClass* receiving_client() { return receiving_client_.get(); } |
| 1009 | |
| 1010 | private: |
| 1011 | talk_base::scoped_ptr<SignalingClass> initiating_client_; |
| 1012 | talk_base::scoped_ptr<SignalingClass> receiving_client_; |
| 1013 | }; |
| 1014 | typedef P2PTestConductor<JsepTestClient> JsepPeerConnectionP2PTestClient; |
| 1015 | |
kjellander@webrtc.org | d1cfa71 | 2013-10-16 16:51:52 +0000 | [diff] [blame] | 1016 | // Disable for TSan v2, see |
| 1017 | // https://code.google.com/p/webrtc/issues/detail?id=1205 for details. |
| 1018 | #if !defined(THREAD_SANITIZER) |
| 1019 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1020 | // This test sets up a Jsep call between two parties and test Dtmf. |
stefan@webrtc.org | da79008 | 2013-09-17 13:11:38 +0000 | [diff] [blame] | 1021 | // TODO(holmer): Disabled due to sometimes crashing on buildbots. |
| 1022 | // See issue webrtc/2378. |
| 1023 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtmf) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1024 | ASSERT_TRUE(CreateTestClients()); |
| 1025 | LocalP2PTest(); |
| 1026 | VerifyDtmf(); |
| 1027 | } |
| 1028 | |
| 1029 | // This test sets up a Jsep call between two parties and test that we can get a |
| 1030 | // video aspect ratio of 16:9. |
| 1031 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) { |
| 1032 | ASSERT_TRUE(CreateTestClients()); |
| 1033 | FakeConstraints constraint; |
| 1034 | double requested_ratio = 640.0/360; |
| 1035 | constraint.SetMandatoryMinAspectRatio(requested_ratio); |
| 1036 | SetVideoConstraints(constraint, constraint); |
| 1037 | LocalP2PTest(); |
| 1038 | |
| 1039 | ASSERT_LE(0, initializing_client()->rendered_height()); |
| 1040 | double initiating_video_ratio = |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 1041 | static_cast<double>(initializing_client()->rendered_width()) / |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1042 | initializing_client()->rendered_height(); |
| 1043 | EXPECT_LE(requested_ratio, initiating_video_ratio); |
| 1044 | |
| 1045 | ASSERT_LE(0, receiving_client()->rendered_height()); |
| 1046 | double receiving_video_ratio = |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 1047 | static_cast<double>(receiving_client()->rendered_width()) / |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1048 | receiving_client()->rendered_height(); |
| 1049 | EXPECT_LE(requested_ratio, receiving_video_ratio); |
| 1050 | } |
| 1051 | |
| 1052 | // This test sets up a Jsep call between two parties and test that the |
| 1053 | // received video has a resolution of 1280*720. |
| 1054 | // TODO(mallinath): Enable when |
| 1055 | // http://code.google.com/p/webrtc/issues/detail?id=981 is fixed. |
| 1056 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) { |
| 1057 | ASSERT_TRUE(CreateTestClients()); |
| 1058 | FakeConstraints constraint; |
| 1059 | constraint.SetMandatoryMinWidth(1280); |
| 1060 | constraint.SetMandatoryMinHeight(720); |
| 1061 | SetVideoConstraints(constraint, constraint); |
| 1062 | LocalP2PTest(); |
| 1063 | VerifyRenderedSize(1280, 720); |
| 1064 | } |
| 1065 | |
| 1066 | // This test sets up a call between two endpoints that are configured to use |
| 1067 | // DTLS key agreement. As a result, DTLS is negotiated and used for transport. |
| 1068 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtls) { |
| 1069 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1070 | FakeConstraints setup_constraints; |
| 1071 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1072 | true); |
| 1073 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1074 | LocalP2PTest(); |
| 1075 | VerifyRenderedSize(640, 480); |
| 1076 | } |
| 1077 | |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 1078 | // This test sets up a audio call initially and then upgrades to audio/video, |
| 1079 | // using DTLS. |
mallinath@webrtc.org | 50bc553 | 2013-10-21 17:58:35 +0000 | [diff] [blame] | 1080 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtlsRenegotiate) { |
mallinath@webrtc.org | 19f27e6 | 2013-10-13 17:18:27 +0000 | [diff] [blame] | 1081 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1082 | FakeConstraints setup_constraints; |
| 1083 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1084 | true); |
| 1085 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1086 | receiving_client()->SetReceiveAudioVideo(true, false); |
| 1087 | LocalP2PTest(); |
| 1088 | receiving_client()->SetReceiveAudioVideo(true, true); |
| 1089 | receiving_client()->Negotiate(); |
| 1090 | } |
| 1091 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1092 | // This test sets up a call between an endpoint configured to use either SDES or |
| 1093 | // DTLS (the offerer) and just SDES (the answerer). As a result, SDES is used |
| 1094 | // instead of DTLS. |
| 1095 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsToSdes) { |
| 1096 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1097 | FakeConstraints setup_constraints; |
| 1098 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1099 | true); |
| 1100 | ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL)); |
| 1101 | LocalP2PTest(); |
| 1102 | VerifyRenderedSize(640, 480); |
| 1103 | } |
| 1104 | |
| 1105 | // This test sets up a call between an endpoint configured to use SDES |
| 1106 | // (the offerer) and either SDES or DTLS (the answerer). As a result, SDES is |
| 1107 | // used instead of DTLS. |
| 1108 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferSdesToDtls) { |
| 1109 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1110 | FakeConstraints setup_constraints; |
| 1111 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1112 | true); |
| 1113 | ASSERT_TRUE(CreateTestClients(NULL, &setup_constraints)); |
| 1114 | LocalP2PTest(); |
| 1115 | VerifyRenderedSize(640, 480); |
| 1116 | } |
| 1117 | |
| 1118 | // This test sets up a call between two endpoints that are configured to use |
| 1119 | // DTLS key agreement. The offerer don't support SDES. As a result, DTLS is |
| 1120 | // negotiated and used for transport. |
| 1121 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) { |
| 1122 | MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp); |
| 1123 | FakeConstraints setup_constraints; |
| 1124 | setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, |
| 1125 | true); |
| 1126 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1127 | receiving_client()->RemoveSdesCryptoFromReceivedSdp(true); |
| 1128 | LocalP2PTest(); |
| 1129 | VerifyRenderedSize(640, 480); |
| 1130 | } |
| 1131 | |
| 1132 | // This test sets up a Jsep call between two parties, and the callee only |
| 1133 | // accept to receive video. |
sergeyu@chromium.org | a59696b | 2013-09-13 23:48:58 +0000 | [diff] [blame] | 1134 | // BUG=https://code.google.com/p/webrtc/issues/detail?id=2288 |
| 1135 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestAnswerVideo) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1136 | ASSERT_TRUE(CreateTestClients()); |
| 1137 | receiving_client()->SetReceiveAudioVideo(false, true); |
| 1138 | LocalP2PTest(); |
| 1139 | } |
| 1140 | |
| 1141 | // This test sets up a Jsep call between two parties, and the callee only |
| 1142 | // accept to receive audio. |
henrike@webrtc.org | c0b1a28 | 2013-08-23 14:32:21 +0000 | [diff] [blame] | 1143 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestAnswerAudio) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1144 | ASSERT_TRUE(CreateTestClients()); |
| 1145 | receiving_client()->SetReceiveAudioVideo(true, false); |
| 1146 | LocalP2PTest(); |
| 1147 | } |
| 1148 | |
| 1149 | // This test sets up a Jsep call between two parties, and the callee reject both |
| 1150 | // audio and video. |
| 1151 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) { |
| 1152 | ASSERT_TRUE(CreateTestClients()); |
| 1153 | receiving_client()->SetReceiveAudioVideo(false, false); |
| 1154 | LocalP2PTest(); |
| 1155 | } |
| 1156 | |
| 1157 | // This test sets up an audio and video call between two parties. After the call |
| 1158 | // runs for a while (10 frames), the caller sends an update offer with video |
| 1159 | // being rejected. Once the re-negotiation is done, the video flow should stop |
| 1160 | // and the audio flow should continue. |
| 1161 | TEST_F(JsepPeerConnectionP2PTestClient, UpdateOfferWithRejectedContent) { |
| 1162 | ASSERT_TRUE(CreateTestClients()); |
| 1163 | LocalP2PTest(); |
| 1164 | TestUpdateOfferWithRejectedContent(); |
| 1165 | } |
| 1166 | |
| 1167 | // This test sets up a Jsep call between two parties. The MSID is removed from |
| 1168 | // the SDP strings from the caller. |
| 1169 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestWithoutMsid) { |
| 1170 | ASSERT_TRUE(CreateTestClients()); |
| 1171 | receiving_client()->RemoveMsidFromReceivedSdp(true); |
| 1172 | // TODO(perkj): Currently there is a bug that cause audio to stop playing if |
| 1173 | // audio and video is muxed when MSID is disabled. Remove |
| 1174 | // SetRemoveBundleFromSdp once |
| 1175 | // https://code.google.com/p/webrtc/issues/detail?id=1193 is fixed. |
| 1176 | receiving_client()->RemoveBundleFromReceivedSdp(true); |
| 1177 | LocalP2PTest(); |
| 1178 | } |
| 1179 | |
| 1180 | // This test sets up a Jsep call between two parties and the initiating peer |
| 1181 | // sends two steams. |
| 1182 | // TODO(perkj): Disabled due to |
| 1183 | // https://code.google.com/p/webrtc/issues/detail?id=1454 |
| 1184 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) { |
| 1185 | ASSERT_TRUE(CreateTestClients()); |
| 1186 | // Set optional video constraint to max 320pixels to decrease CPU usage. |
| 1187 | FakeConstraints constraint; |
| 1188 | constraint.SetOptionalMaxWidth(320); |
| 1189 | SetVideoConstraints(constraint, constraint); |
| 1190 | initializing_client()->AddMediaStream(true, true); |
| 1191 | initializing_client()->AddMediaStream(false, true); |
| 1192 | ASSERT_EQ(2u, initializing_client()->NumberOfLocalMediaStreams()); |
| 1193 | LocalP2PTest(); |
| 1194 | EXPECT_EQ(2u, receiving_client()->number_of_remote_streams()); |
| 1195 | } |
| 1196 | |
| 1197 | // Test that we can receive the audio output level from a remote audio track. |
| 1198 | TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) { |
| 1199 | ASSERT_TRUE(CreateTestClients()); |
| 1200 | LocalP2PTest(); |
| 1201 | |
| 1202 | StreamCollectionInterface* remote_streams = |
| 1203 | initializing_client()->remote_streams(); |
| 1204 | ASSERT_GT(remote_streams->count(), 0u); |
| 1205 | ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u); |
| 1206 | MediaStreamTrackInterface* remote_audio_track = |
| 1207 | remote_streams->at(0)->GetAudioTracks()[0]; |
| 1208 | |
| 1209 | // Get the audio output level stats. Note that the level is not available |
| 1210 | // until a RTCP packet has been received. |
| 1211 | EXPECT_TRUE_WAIT( |
| 1212 | initializing_client()->GetAudioOutputLevelStats(remote_audio_track) > 0, |
| 1213 | kMaxWaitForStatsMs); |
| 1214 | } |
| 1215 | |
| 1216 | // Test that an audio input level is reported. |
| 1217 | TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) { |
| 1218 | ASSERT_TRUE(CreateTestClients()); |
| 1219 | LocalP2PTest(); |
| 1220 | |
| 1221 | // Get the audio input level stats. The level should be available very |
| 1222 | // soon after the test starts. |
| 1223 | EXPECT_TRUE_WAIT(initializing_client()->GetAudioInputLevelStats() > 0, |
| 1224 | kMaxWaitForStatsMs); |
| 1225 | } |
| 1226 | |
| 1227 | // Test that we can get incoming byte counts from both audio and video tracks. |
| 1228 | TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) { |
| 1229 | ASSERT_TRUE(CreateTestClients()); |
| 1230 | LocalP2PTest(); |
| 1231 | |
| 1232 | StreamCollectionInterface* remote_streams = |
| 1233 | initializing_client()->remote_streams(); |
| 1234 | ASSERT_GT(remote_streams->count(), 0u); |
| 1235 | ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u); |
| 1236 | MediaStreamTrackInterface* remote_audio_track = |
| 1237 | remote_streams->at(0)->GetAudioTracks()[0]; |
| 1238 | EXPECT_TRUE_WAIT( |
| 1239 | initializing_client()->GetBytesReceivedStats(remote_audio_track) > 0, |
| 1240 | kMaxWaitForStatsMs); |
| 1241 | |
| 1242 | MediaStreamTrackInterface* remote_video_track = |
| 1243 | remote_streams->at(0)->GetVideoTracks()[0]; |
| 1244 | EXPECT_TRUE_WAIT( |
| 1245 | initializing_client()->GetBytesReceivedStats(remote_video_track) > 0, |
| 1246 | kMaxWaitForStatsMs); |
| 1247 | } |
| 1248 | |
| 1249 | // Test that we can get outgoing byte counts from both audio and video tracks. |
| 1250 | TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) { |
| 1251 | ASSERT_TRUE(CreateTestClients()); |
| 1252 | LocalP2PTest(); |
| 1253 | |
| 1254 | StreamCollectionInterface* local_streams = |
| 1255 | initializing_client()->local_streams(); |
| 1256 | ASSERT_GT(local_streams->count(), 0u); |
| 1257 | ASSERT_GT(local_streams->at(0)->GetAudioTracks().size(), 0u); |
| 1258 | MediaStreamTrackInterface* local_audio_track = |
| 1259 | local_streams->at(0)->GetAudioTracks()[0]; |
| 1260 | EXPECT_TRUE_WAIT( |
| 1261 | initializing_client()->GetBytesSentStats(local_audio_track) > 0, |
| 1262 | kMaxWaitForStatsMs); |
| 1263 | |
| 1264 | MediaStreamTrackInterface* local_video_track = |
| 1265 | local_streams->at(0)->GetVideoTracks()[0]; |
| 1266 | EXPECT_TRUE_WAIT( |
| 1267 | initializing_client()->GetBytesSentStats(local_video_track) > 0, |
| 1268 | kMaxWaitForStatsMs); |
| 1269 | } |
| 1270 | |
| 1271 | // This test sets up a call between two parties with audio, video and data. |
sergeyu@chromium.org | 9cf037b | 2014-02-07 19:03:26 +0000 | [diff] [blame^] | 1272 | // TODO(jiayl): fix the flakiness on Windows and reenable. Issue 2891. |
| 1273 | #if defined(WIN32) |
| 1274 | TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDataChannel) { |
| 1275 | #else |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1276 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) { |
sergeyu@chromium.org | 9cf037b | 2014-02-07 19:03:26 +0000 | [diff] [blame^] | 1277 | #endif |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1278 | FakeConstraints setup_constraints; |
| 1279 | setup_constraints.SetAllowRtpDataChannels(); |
| 1280 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1281 | initializing_client()->CreateDataChannel(); |
| 1282 | LocalP2PTest(); |
| 1283 | ASSERT_TRUE(initializing_client()->data_channel() != NULL); |
| 1284 | ASSERT_TRUE(receiving_client()->data_channel() != NULL); |
| 1285 | EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(), |
| 1286 | kMaxWaitMs); |
| 1287 | EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(), |
| 1288 | kMaxWaitMs); |
| 1289 | |
| 1290 | std::string data = "hello world"; |
| 1291 | initializing_client()->data_channel()->Send(DataBuffer(data)); |
| 1292 | EXPECT_EQ_WAIT(data, receiving_client()->data_observer()->last_message(), |
| 1293 | kMaxWaitMs); |
| 1294 | receiving_client()->data_channel()->Send(DataBuffer(data)); |
| 1295 | EXPECT_EQ_WAIT(data, initializing_client()->data_observer()->last_message(), |
| 1296 | kMaxWaitMs); |
| 1297 | |
| 1298 | receiving_client()->data_channel()->Close(); |
| 1299 | // Send new offer and answer. |
| 1300 | receiving_client()->Negotiate(); |
| 1301 | EXPECT_FALSE(initializing_client()->data_observer()->IsOpen()); |
| 1302 | EXPECT_FALSE(receiving_client()->data_observer()->IsOpen()); |
| 1303 | } |
| 1304 | |
| 1305 | // This test sets up a call between two parties and creates a data channel. |
| 1306 | // The test tests that received data is buffered unless an observer has been |
| 1307 | // registered. |
| 1308 | // Rtp data channels can receive data before the underlying |
| 1309 | // transport has detected that a channel is writable and thus data can be |
| 1310 | // received before the data channel state changes to open. That is hard to test |
| 1311 | // but the same buffering is used in that case. |
| 1312 | TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) { |
| 1313 | FakeConstraints setup_constraints; |
| 1314 | setup_constraints.SetAllowRtpDataChannels(); |
| 1315 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1316 | initializing_client()->CreateDataChannel(); |
| 1317 | initializing_client()->Negotiate(); |
| 1318 | |
| 1319 | ASSERT_TRUE(initializing_client()->data_channel() != NULL); |
| 1320 | ASSERT_TRUE(receiving_client()->data_channel() != NULL); |
| 1321 | EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(), |
| 1322 | kMaxWaitMs); |
| 1323 | EXPECT_EQ_WAIT(DataChannelInterface::kOpen, |
| 1324 | receiving_client()->data_channel()->state(), kMaxWaitMs); |
| 1325 | |
| 1326 | // Unregister the existing observer. |
| 1327 | receiving_client()->data_channel()->UnregisterObserver(); |
| 1328 | std::string data = "hello world"; |
| 1329 | initializing_client()->data_channel()->Send(DataBuffer(data)); |
| 1330 | // Wait a while to allow the sent data to arrive before an observer is |
| 1331 | // registered.. |
| 1332 | talk_base::Thread::Current()->ProcessMessages(100); |
| 1333 | |
| 1334 | MockDataChannelObserver new_observer(receiving_client()->data_channel()); |
| 1335 | EXPECT_EQ_WAIT(data, new_observer.last_message(), kMaxWaitMs); |
| 1336 | } |
| 1337 | |
| 1338 | // This test sets up a call between two parties with audio, video and but only |
| 1339 | // the initiating client support data. |
| 1340 | TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) { |
| 1341 | FakeConstraints setup_constraints; |
| 1342 | setup_constraints.SetAllowRtpDataChannels(); |
| 1343 | ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL)); |
| 1344 | initializing_client()->CreateDataChannel(); |
| 1345 | LocalP2PTest(); |
| 1346 | EXPECT_TRUE(initializing_client()->data_channel() != NULL); |
| 1347 | EXPECT_FALSE(receiving_client()->data_channel()); |
| 1348 | EXPECT_FALSE(initializing_client()->data_observer()->IsOpen()); |
| 1349 | } |
| 1350 | |
| 1351 | // This test sets up a call between two parties with audio, video. When audio |
| 1352 | // and video is setup and flowing and data channel is negotiated. |
| 1353 | TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) { |
| 1354 | FakeConstraints setup_constraints; |
| 1355 | setup_constraints.SetAllowRtpDataChannels(); |
| 1356 | ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints)); |
| 1357 | LocalP2PTest(); |
| 1358 | initializing_client()->CreateDataChannel(); |
| 1359 | // Send new offer and answer. |
| 1360 | initializing_client()->Negotiate(); |
| 1361 | ASSERT_TRUE(initializing_client()->data_channel() != NULL); |
| 1362 | ASSERT_TRUE(receiving_client()->data_channel() != NULL); |
| 1363 | EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(), |
| 1364 | kMaxWaitMs); |
| 1365 | EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(), |
| 1366 | kMaxWaitMs); |
| 1367 | } |
| 1368 | |
| 1369 | // This test sets up a call between two parties with audio, and video. |
| 1370 | // During the call, the initializing side restart ice and the test verifies that |
| 1371 | // new ice candidates are generated and audio and video still can flow. |
| 1372 | TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) { |
| 1373 | ASSERT_TRUE(CreateTestClients()); |
| 1374 | |
| 1375 | // Negotiate and wait for ice completion and make sure audio and video plays. |
| 1376 | LocalP2PTest(); |
| 1377 | |
| 1378 | // Create a SDP string of the first audio candidate for both clients. |
| 1379 | const webrtc::IceCandidateCollection* audio_candidates_initiator = |
| 1380 | initializing_client()->pc()->local_description()->candidates(0); |
| 1381 | const webrtc::IceCandidateCollection* audio_candidates_receiver = |
| 1382 | receiving_client()->pc()->local_description()->candidates(0); |
| 1383 | ASSERT_GT(audio_candidates_initiator->count(), 0u); |
| 1384 | ASSERT_GT(audio_candidates_receiver->count(), 0u); |
| 1385 | std::string initiator_candidate; |
| 1386 | EXPECT_TRUE( |
| 1387 | audio_candidates_initiator->at(0)->ToString(&initiator_candidate)); |
| 1388 | std::string receiver_candidate; |
| 1389 | EXPECT_TRUE(audio_candidates_receiver->at(0)->ToString(&receiver_candidate)); |
| 1390 | |
| 1391 | // Restart ice on the initializing client. |
| 1392 | receiving_client()->SetExpectIceRestart(true); |
| 1393 | initializing_client()->IceRestart(); |
| 1394 | |
| 1395 | // Negotiate and wait for ice completion again and make sure audio and video |
| 1396 | // plays. |
| 1397 | LocalP2PTest(); |
| 1398 | |
| 1399 | // Create a SDP string of the first audio candidate for both clients again. |
| 1400 | const webrtc::IceCandidateCollection* audio_candidates_initiator_restart = |
| 1401 | initializing_client()->pc()->local_description()->candidates(0); |
| 1402 | const webrtc::IceCandidateCollection* audio_candidates_reciever_restart = |
| 1403 | receiving_client()->pc()->local_description()->candidates(0); |
| 1404 | ASSERT_GT(audio_candidates_initiator_restart->count(), 0u); |
| 1405 | ASSERT_GT(audio_candidates_reciever_restart->count(), 0u); |
| 1406 | std::string initiator_candidate_restart; |
| 1407 | EXPECT_TRUE(audio_candidates_initiator_restart->at(0)->ToString( |
| 1408 | &initiator_candidate_restart)); |
| 1409 | std::string receiver_candidate_restart; |
| 1410 | EXPECT_TRUE(audio_candidates_reciever_restart->at(0)->ToString( |
| 1411 | &receiver_candidate_restart)); |
| 1412 | |
| 1413 | // Verify that the first candidates in the local session descriptions has |
| 1414 | // changed. |
| 1415 | EXPECT_NE(initiator_candidate, initiator_candidate_restart); |
| 1416 | EXPECT_NE(receiver_candidate, receiver_candidate_restart); |
| 1417 | } |
| 1418 | |
| 1419 | |
| 1420 | // This test sets up a Jsep call between two parties with external |
| 1421 | // VideoDecoderFactory. |
stefan@webrtc.org | da79008 | 2013-09-17 13:11:38 +0000 | [diff] [blame] | 1422 | // TODO(holmer): Disabled due to sometimes crashing on buildbots. |
| 1423 | // See issue webrtc/2378. |
| 1424 | TEST_F(JsepPeerConnectionP2PTestClient, |
| 1425 | DISABLED_LocalP2PTestWithVideoDecoderFactory) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1426 | ASSERT_TRUE(CreateTestClients()); |
| 1427 | EnableVideoDecoderFactory(); |
| 1428 | LocalP2PTest(); |
| 1429 | } |
kjellander@webrtc.org | d1cfa71 | 2013-10-16 16:51:52 +0000 | [diff] [blame] | 1430 | |
| 1431 | #endif // if !defined(THREAD_SANITIZER) |
| 1432 | |