blob: eed6936134d45172e28e078c66a0ff843e620772 [file] [log] [blame]
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001/*
2 * libjingle
3 * Copyright 2012, Google Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <stdio.h>
29
30#include <algorithm>
31#include <list>
32#include <map>
33#include <vector>
34
35#include "talk/app/webrtc/dtmfsender.h"
36#include "talk/app/webrtc/fakeportallocatorfactory.h"
37#include "talk/app/webrtc/localaudiosource.h"
38#include "talk/app/webrtc/mediastreaminterface.h"
39#include "talk/app/webrtc/peerconnectionfactory.h"
40#include "talk/app/webrtc/peerconnectioninterface.h"
41#include "talk/app/webrtc/test/fakeaudiocapturemodule.h"
42#include "talk/app/webrtc/test/fakeconstraints.h"
43#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
44#include "talk/app/webrtc/test/fakeperiodicvideocapturer.h"
45#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
46#include "talk/app/webrtc/videosourceinterface.h"
47#include "talk/base/gunit.h"
48#include "talk/base/scoped_ptr.h"
49#include "talk/base/ssladapter.h"
50#include "talk/base/sslstreamadapter.h"
51#include "talk/base/thread.h"
52#include "talk/media/webrtc/fakewebrtcvideoengine.h"
53#include "talk/p2p/base/constants.h"
54#include "talk/p2p/base/sessiondescription.h"
55#include "talk/session/media/mediasession.h"
56
57#define MAYBE_SKIP_TEST(feature) \
58 if (!(feature())) { \
59 LOG(LS_INFO) << "Feature disabled... skipping"; \
60 return; \
61 }
62
63using cricket::ContentInfo;
64using cricket::FakeWebRtcVideoDecoder;
65using cricket::FakeWebRtcVideoDecoderFactory;
66using cricket::FakeWebRtcVideoEncoder;
67using cricket::FakeWebRtcVideoEncoderFactory;
68using cricket::MediaContentDescription;
69using webrtc::DataBuffer;
70using webrtc::DataChannelInterface;
71using webrtc::DtmfSender;
72using webrtc::DtmfSenderInterface;
73using webrtc::DtmfSenderObserverInterface;
74using webrtc::FakeConstraints;
75using webrtc::MediaConstraintsInterface;
76using webrtc::MediaStreamTrackInterface;
77using webrtc::MockCreateSessionDescriptionObserver;
78using webrtc::MockDataChannelObserver;
79using webrtc::MockSetSessionDescriptionObserver;
80using webrtc::MockStatsObserver;
81using webrtc::SessionDescriptionInterface;
82using webrtc::StreamCollectionInterface;
83
84static const int kMaxWaitMs = 1000;
85static const int kMaxWaitForStatsMs = 3000;
86static const int kMaxWaitForFramesMs = 5000;
87static const int kEndAudioFrameCount = 3;
88static const int kEndVideoFrameCount = 3;
89
90static const char kStreamLabelBase[] = "stream_label";
91static const char kVideoTrackLabelBase[] = "video_track";
92static const char kAudioTrackLabelBase[] = "audio_track";
93static const char kDataChannelLabel[] = "data_channel";
94
95static void RemoveLinesFromSdp(const std::string& line_start,
96 std::string* sdp) {
97 const char kSdpLineEnd[] = "\r\n";
98 size_t ssrc_pos = 0;
99 while ((ssrc_pos = sdp->find(line_start, ssrc_pos)) !=
100 std::string::npos) {
101 size_t end_ssrc = sdp->find(kSdpLineEnd, ssrc_pos);
102 sdp->erase(ssrc_pos, end_ssrc - ssrc_pos + strlen(kSdpLineEnd));
103 }
104}
105
106class SignalingMessageReceiver {
107 public:
108 protected:
109 SignalingMessageReceiver() {}
110 virtual ~SignalingMessageReceiver() {}
111};
112
113class JsepMessageReceiver : public SignalingMessageReceiver {
114 public:
115 virtual void ReceiveSdpMessage(const std::string& type,
116 std::string& msg) = 0;
117 virtual void ReceiveIceMessage(const std::string& sdp_mid,
118 int sdp_mline_index,
119 const std::string& msg) = 0;
120
121 protected:
122 JsepMessageReceiver() {}
123 virtual ~JsepMessageReceiver() {}
124};
125
126template <typename MessageReceiver>
127class PeerConnectionTestClientBase
128 : public webrtc::PeerConnectionObserver,
129 public MessageReceiver {
130 public:
131 ~PeerConnectionTestClientBase() {
132 while (!fake_video_renderers_.empty()) {
133 RenderMap::iterator it = fake_video_renderers_.begin();
134 delete it->second;
135 fake_video_renderers_.erase(it);
136 }
137 }
138
139 virtual void Negotiate() = 0;
140
141 virtual void Negotiate(bool audio, bool video) = 0;
142
143 virtual void SetVideoConstraints(
144 const webrtc::FakeConstraints& video_constraint) {
145 video_constraints_ = video_constraint;
146 }
147
148 void AddMediaStream(bool audio, bool video) {
149 std::string label = kStreamLabelBase +
150 talk_base::ToString<int>(peer_connection_->local_streams()->count());
151 talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream =
152 peer_connection_factory_->CreateLocalMediaStream(label);
153
154 if (audio && can_receive_audio()) {
155 FakeConstraints constraints;
156 // Disable highpass filter so that we can get all the test audio frames.
157 constraints.AddMandatory(
158 MediaConstraintsInterface::kHighpassFilter, false);
159 talk_base::scoped_refptr<webrtc::LocalAudioSource> source =
160 webrtc::LocalAudioSource::Create(&constraints);
161 // TODO(perkj): Test audio source when it is implemented. Currently audio
162 // always use the default input.
163 talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
164 peer_connection_factory_->CreateAudioTrack(kAudioTrackLabelBase,
165 source));
166 stream->AddTrack(audio_track);
167 }
168 if (video && can_receive_video()) {
169 stream->AddTrack(CreateLocalVideoTrack(label));
170 }
171
172 EXPECT_TRUE(peer_connection_->AddStream(stream, NULL));
173 }
174
175 size_t NumberOfLocalMediaStreams() {
176 return peer_connection_->local_streams()->count();
177 }
178
179 bool SessionActive() {
180 return peer_connection_->signaling_state() ==
181 webrtc::PeerConnectionInterface::kStable;
182 }
183
184 void set_signaling_message_receiver(
185 MessageReceiver* signaling_message_receiver) {
186 signaling_message_receiver_ = signaling_message_receiver;
187 }
188
189 void EnableVideoDecoderFactory() {
190 video_decoder_factory_enabled_ = true;
191 fake_video_decoder_factory_->AddSupportedVideoCodecType(
192 webrtc::kVideoCodecVP8);
193 }
194
195 bool AudioFramesReceivedCheck(int number_of_frames) const {
196 return number_of_frames <= fake_audio_capture_module_->frames_received();
197 }
198
199 bool VideoFramesReceivedCheck(int number_of_frames) {
200 if (video_decoder_factory_enabled_) {
201 const std::vector<FakeWebRtcVideoDecoder*>& decoders
202 = fake_video_decoder_factory_->decoders();
203 if (decoders.empty()) {
204 return number_of_frames <= 0;
205 }
206
207 for (std::vector<FakeWebRtcVideoDecoder*>::const_iterator
208 it = decoders.begin(); it != decoders.end(); ++it) {
209 if (number_of_frames > (*it)->GetNumFramesReceived()) {
210 return false;
211 }
212 }
213 return true;
214 } else {
215 if (fake_video_renderers_.empty()) {
216 return number_of_frames <= 0;
217 }
218
219 for (RenderMap::const_iterator it = fake_video_renderers_.begin();
220 it != fake_video_renderers_.end(); ++it) {
221 if (number_of_frames > it->second->num_rendered_frames()) {
222 return false;
223 }
224 }
225 return true;
226 }
227 }
228 // Verify the CreateDtmfSender interface
229 void VerifyDtmf() {
230 talk_base::scoped_ptr<DummyDtmfObserver> observer(new DummyDtmfObserver());
231 talk_base::scoped_refptr<DtmfSenderInterface> dtmf_sender;
232
233 // We can't create a DTMF sender with an invalid audio track or a non local
234 // track.
235 EXPECT_TRUE(peer_connection_->CreateDtmfSender(NULL) == NULL);
236 talk_base::scoped_refptr<webrtc::AudioTrackInterface> non_localtrack(
237 peer_connection_factory_->CreateAudioTrack("dummy_track",
238 NULL));
239 EXPECT_TRUE(peer_connection_->CreateDtmfSender(non_localtrack) == NULL);
240
241 // We should be able to create a DTMF sender from a local track.
242 webrtc::AudioTrackInterface* localtrack =
243 peer_connection_->local_streams()->at(0)->GetAudioTracks()[0];
244 dtmf_sender = peer_connection_->CreateDtmfSender(localtrack);
245 EXPECT_TRUE(dtmf_sender.get() != NULL);
246 dtmf_sender->RegisterObserver(observer.get());
247
248 // Test the DtmfSender object just created.
249 EXPECT_TRUE(dtmf_sender->CanInsertDtmf());
250 EXPECT_TRUE(dtmf_sender->InsertDtmf("1a", 100, 50));
251
252 // We don't need to verify that the DTMF tones are actually sent out because
253 // that is already covered by the tests of the lower level components.
254
255 EXPECT_TRUE_WAIT(observer->completed(), kMaxWaitMs);
256 std::vector<std::string> tones;
257 tones.push_back("1");
258 tones.push_back("a");
259 tones.push_back("");
260 observer->Verify(tones);
261
262 dtmf_sender->UnregisterObserver();
263 }
264
265 // Verifies that the SessionDescription have rejected the appropriate media
266 // content.
267 void VerifyRejectedMediaInSessionDescription() {
268 ASSERT_TRUE(peer_connection_->remote_description() != NULL);
269 ASSERT_TRUE(peer_connection_->local_description() != NULL);
270 const cricket::SessionDescription* remote_desc =
271 peer_connection_->remote_description()->description();
272 const cricket::SessionDescription* local_desc =
273 peer_connection_->local_description()->description();
274
275 const ContentInfo* remote_audio_content = GetFirstAudioContent(remote_desc);
276 if (remote_audio_content) {
277 const ContentInfo* audio_content =
278 GetFirstAudioContent(local_desc);
279 EXPECT_EQ(can_receive_audio(), !audio_content->rejected);
280 }
281
282 const ContentInfo* remote_video_content = GetFirstVideoContent(remote_desc);
283 if (remote_video_content) {
284 const ContentInfo* video_content =
285 GetFirstVideoContent(local_desc);
286 EXPECT_EQ(can_receive_video(), !video_content->rejected);
287 }
288 }
289
290 void SetExpectIceRestart(bool expect_restart) {
291 expect_ice_restart_ = expect_restart;
292 }
293
294 bool ExpectIceRestart() const { return expect_ice_restart_; }
295
296 void VerifyLocalIceUfragAndPassword() {
297 ASSERT_TRUE(peer_connection_->local_description() != NULL);
298 const cricket::SessionDescription* desc =
299 peer_connection_->local_description()->description();
300 const cricket::ContentInfos& contents = desc->contents();
301
302 for (size_t index = 0; index < contents.size(); ++index) {
303 if (contents[index].rejected)
304 continue;
305 const cricket::TransportDescription* transport_desc =
306 desc->GetTransportDescriptionByName(contents[index].name);
307
308 std::map<int, IceUfragPwdPair>::const_iterator ufragpair_it =
309 ice_ufrag_pwd_.find(index);
310 if (ufragpair_it == ice_ufrag_pwd_.end()) {
311 ASSERT_FALSE(ExpectIceRestart());
312 ice_ufrag_pwd_[index] = IceUfragPwdPair(transport_desc->ice_ufrag,
313 transport_desc->ice_pwd);
314 } else if (ExpectIceRestart()) {
315 const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second;
316 EXPECT_NE(ufrag_pwd.first, transport_desc->ice_ufrag);
317 EXPECT_NE(ufrag_pwd.second, transport_desc->ice_pwd);
318 } else {
319 const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second;
320 EXPECT_EQ(ufrag_pwd.first, transport_desc->ice_ufrag);
321 EXPECT_EQ(ufrag_pwd.second, transport_desc->ice_pwd);
322 }
323 }
324 }
325
326 int GetAudioOutputLevelStats(webrtc::MediaStreamTrackInterface* track) {
327 talk_base::scoped_refptr<MockStatsObserver>
328 observer(new talk_base::RefCountedObject<MockStatsObserver>());
329 EXPECT_TRUE(peer_connection_->GetStats(observer, track));
330 EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
331 return observer->AudioOutputLevel();
332 }
333
334 int GetAudioInputLevelStats() {
335 talk_base::scoped_refptr<MockStatsObserver>
336 observer(new talk_base::RefCountedObject<MockStatsObserver>());
337 EXPECT_TRUE(peer_connection_->GetStats(observer, NULL));
338 EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
339 return observer->AudioInputLevel();
340 }
341
342 int GetBytesReceivedStats(webrtc::MediaStreamTrackInterface* track) {
343 talk_base::scoped_refptr<MockStatsObserver>
344 observer(new talk_base::RefCountedObject<MockStatsObserver>());
345 EXPECT_TRUE(peer_connection_->GetStats(observer, track));
346 EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
347 return observer->BytesReceived();
348 }
349
350 int GetBytesSentStats(webrtc::MediaStreamTrackInterface* track) {
351 talk_base::scoped_refptr<MockStatsObserver>
352 observer(new talk_base::RefCountedObject<MockStatsObserver>());
353 EXPECT_TRUE(peer_connection_->GetStats(observer, track));
354 EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
355 return observer->BytesSent();
356 }
357
358 int rendered_width() {
359 EXPECT_FALSE(fake_video_renderers_.empty());
360 return fake_video_renderers_.empty() ? 1 :
361 fake_video_renderers_.begin()->second->width();
362 }
363
364 int rendered_height() {
365 EXPECT_FALSE(fake_video_renderers_.empty());
366 return fake_video_renderers_.empty() ? 1 :
367 fake_video_renderers_.begin()->second->height();
368 }
369
370 size_t number_of_remote_streams() {
371 if (!pc())
372 return 0;
373 return pc()->remote_streams()->count();
374 }
375
376 StreamCollectionInterface* remote_streams() {
377 if (!pc()) {
378 ADD_FAILURE();
379 return NULL;
380 }
381 return pc()->remote_streams();
382 }
383
384 StreamCollectionInterface* local_streams() {
385 if (!pc()) {
386 ADD_FAILURE();
387 return NULL;
388 }
389 return pc()->local_streams();
390 }
391
392 webrtc::PeerConnectionInterface::SignalingState signaling_state() {
393 return pc()->signaling_state();
394 }
395
396 webrtc::PeerConnectionInterface::IceConnectionState ice_connection_state() {
397 return pc()->ice_connection_state();
398 }
399
400 webrtc::PeerConnectionInterface::IceGatheringState ice_gathering_state() {
401 return pc()->ice_gathering_state();
402 }
403
404 // PeerConnectionObserver callbacks.
405 virtual void OnError() {}
406 virtual void OnMessage(const std::string&) {}
407 virtual void OnSignalingMessage(const std::string& /*msg*/) {}
408 virtual void OnSignalingChange(
409 webrtc::PeerConnectionInterface::SignalingState new_state) {
410 EXPECT_EQ(peer_connection_->signaling_state(), new_state);
411 }
412 virtual void OnAddStream(webrtc::MediaStreamInterface* media_stream) {
413 for (size_t i = 0; i < media_stream->GetVideoTracks().size(); ++i) {
414 const std::string id = media_stream->GetVideoTracks()[i]->id();
415 ASSERT_TRUE(fake_video_renderers_.find(id) ==
416 fake_video_renderers_.end());
417 fake_video_renderers_[id] = new webrtc::FakeVideoTrackRenderer(
418 media_stream->GetVideoTracks()[i]);
419 }
420 }
421 virtual void OnRemoveStream(webrtc::MediaStreamInterface* media_stream) {}
422 virtual void OnRenegotiationNeeded() {}
423 virtual void OnIceConnectionChange(
424 webrtc::PeerConnectionInterface::IceConnectionState new_state) {
425 EXPECT_EQ(peer_connection_->ice_connection_state(), new_state);
426 }
427 virtual void OnIceGatheringChange(
428 webrtc::PeerConnectionInterface::IceGatheringState new_state) {
429 EXPECT_EQ(peer_connection_->ice_gathering_state(), new_state);
430 }
431 virtual void OnIceCandidate(
432 const webrtc::IceCandidateInterface* /*candidate*/) {}
433
434 webrtc::PeerConnectionInterface* pc() {
435 return peer_connection_.get();
436 }
437
438 protected:
439 explicit PeerConnectionTestClientBase(const std::string& id)
440 : id_(id),
441 expect_ice_restart_(false),
442 fake_video_decoder_factory_(NULL),
443 fake_video_encoder_factory_(NULL),
444 video_decoder_factory_enabled_(false),
445 signaling_message_receiver_(NULL) {
446 }
447 bool Init(const MediaConstraintsInterface* constraints) {
448 EXPECT_TRUE(!peer_connection_);
449 EXPECT_TRUE(!peer_connection_factory_);
450 allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
451 if (!allocator_factory_) {
452 return false;
453 }
454 audio_thread_.Start();
455 fake_audio_capture_module_ = FakeAudioCaptureModule::Create(
456 &audio_thread_);
457
458 if (fake_audio_capture_module_ == NULL) {
459 return false;
460 }
461 fake_video_decoder_factory_ = new FakeWebRtcVideoDecoderFactory();
462 fake_video_encoder_factory_ = new FakeWebRtcVideoEncoderFactory();
463 peer_connection_factory_ = webrtc::CreatePeerConnectionFactory(
464 talk_base::Thread::Current(), talk_base::Thread::Current(),
465 fake_audio_capture_module_, fake_video_encoder_factory_,
466 fake_video_decoder_factory_);
467 if (!peer_connection_factory_) {
468 return false;
469 }
470 peer_connection_ = CreatePeerConnection(allocator_factory_.get(),
471 constraints);
472 return peer_connection_.get() != NULL;
473 }
474 virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface>
475 CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory,
476 const MediaConstraintsInterface* constraints) = 0;
477 MessageReceiver* signaling_message_receiver() {
478 return signaling_message_receiver_;
479 }
480 webrtc::PeerConnectionFactoryInterface* peer_connection_factory() {
481 return peer_connection_factory_.get();
482 }
483
484 virtual bool can_receive_audio() = 0;
485 virtual bool can_receive_video() = 0;
486 const std::string& id() const { return id_; }
487
488 private:
489 class DummyDtmfObserver : public DtmfSenderObserverInterface {
490 public:
491 DummyDtmfObserver() : completed_(false) {}
492
493 // Implements DtmfSenderObserverInterface.
494 void OnToneChange(const std::string& tone) {
495 tones_.push_back(tone);
496 if (tone.empty()) {
497 completed_ = true;
498 }
499 }
500
501 void Verify(const std::vector<std::string>& tones) const {
502 ASSERT_TRUE(tones_.size() == tones.size());
503 EXPECT_TRUE(std::equal(tones.begin(), tones.end(), tones_.begin()));
504 }
505
506 bool completed() const { return completed_; }
507
508 private:
509 bool completed_;
510 std::vector<std::string> tones_;
511 };
512
513 talk_base::scoped_refptr<webrtc::VideoTrackInterface>
514 CreateLocalVideoTrack(const std::string stream_label) {
515 // Set max frame rate to 10fps to reduce the risk of the tests to be flaky.
516 FakeConstraints source_constraints = video_constraints_;
517 source_constraints.SetMandatoryMaxFrameRate(10);
518
519 talk_base::scoped_refptr<webrtc::VideoSourceInterface> source =
520 peer_connection_factory_->CreateVideoSource(
521 new webrtc::FakePeriodicVideoCapturer(),
522 &source_constraints);
523 std::string label = stream_label + kVideoTrackLabelBase;
524 return peer_connection_factory_->CreateVideoTrack(label, source);
525 }
526
527 std::string id_;
528 // Separate thread for executing |fake_audio_capture_module_| tasks. Audio
529 // processing must not be performed on the same thread as signaling due to
530 // signaling time constraints and relative complexity of the audio pipeline.
531 // This is consistent with the video pipeline that us a a separate thread for
532 // encoding and decoding.
533 talk_base::Thread audio_thread_;
534
535 talk_base::scoped_refptr<webrtc::PortAllocatorFactoryInterface>
536 allocator_factory_;
537 talk_base::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
538 talk_base::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
539 peer_connection_factory_;
540
541 typedef std::pair<std::string, std::string> IceUfragPwdPair;
542 std::map<int, IceUfragPwdPair> ice_ufrag_pwd_;
543 bool expect_ice_restart_;
544
545 // Needed to keep track of number of frames send.
546 talk_base::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_;
547 // Needed to keep track of number of frames received.
548 typedef std::map<std::string, webrtc::FakeVideoTrackRenderer*> RenderMap;
549 RenderMap fake_video_renderers_;
550 // Needed to keep track of number of frames received when external decoder
551 // used.
552 FakeWebRtcVideoDecoderFactory* fake_video_decoder_factory_;
553 FakeWebRtcVideoEncoderFactory* fake_video_encoder_factory_;
554 bool video_decoder_factory_enabled_;
555 webrtc::FakeConstraints video_constraints_;
556
557 // For remote peer communication.
558 MessageReceiver* signaling_message_receiver_;
559};
560
561class JsepTestClient
562 : public PeerConnectionTestClientBase<JsepMessageReceiver> {
563 public:
564 static JsepTestClient* CreateClient(
565 const std::string& id,
566 const MediaConstraintsInterface* constraints) {
567 JsepTestClient* client(new JsepTestClient(id));
568 if (!client->Init(constraints)) {
569 delete client;
570 return NULL;
571 }
572 return client;
573 }
574 ~JsepTestClient() {}
575
576 virtual void Negotiate() {
577 Negotiate(true, true);
578 }
579 virtual void Negotiate(bool audio, bool video) {
580 talk_base::scoped_ptr<SessionDescriptionInterface> offer;
581 EXPECT_TRUE(DoCreateOffer(offer.use()));
582
583 if (offer->description()->GetContentByName("audio")) {
584 offer->description()->GetContentByName("audio")->rejected = !audio;
585 }
586 if (offer->description()->GetContentByName("video")) {
587 offer->description()->GetContentByName("video")->rejected = !video;
588 }
589
590 std::string sdp;
591 EXPECT_TRUE(offer->ToString(&sdp));
592 EXPECT_TRUE(DoSetLocalDescription(offer.release()));
593 signaling_message_receiver()->ReceiveSdpMessage(
594 webrtc::SessionDescriptionInterface::kOffer, sdp);
595 }
596 // JsepMessageReceiver callback.
597 virtual void ReceiveSdpMessage(const std::string& type,
598 std::string& msg) {
599 FilterIncomingSdpMessage(&msg);
600 if (type == webrtc::SessionDescriptionInterface::kOffer) {
601 HandleIncomingOffer(msg);
602 } else {
603 HandleIncomingAnswer(msg);
604 }
605 }
606 // JsepMessageReceiver callback.
607 virtual void ReceiveIceMessage(const std::string& sdp_mid,
608 int sdp_mline_index,
609 const std::string& msg) {
610 LOG(INFO) << id() << "ReceiveIceMessage";
611 talk_base::scoped_ptr<webrtc::IceCandidateInterface> candidate(
612 webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, msg, NULL));
613 EXPECT_TRUE(pc()->AddIceCandidate(candidate.get()));
614 }
615 // Implements PeerConnectionObserver functions needed by Jsep.
616 virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) {
617 LOG(INFO) << id() << "OnIceCandidate";
618
619 std::string ice_sdp;
620 EXPECT_TRUE(candidate->ToString(&ice_sdp));
621 if (signaling_message_receiver() == NULL) {
622 // Remote party may be deleted.
623 return;
624 }
625 signaling_message_receiver()->ReceiveIceMessage(candidate->sdp_mid(),
626 candidate->sdp_mline_index(), ice_sdp);
627 }
628
629 void IceRestart() {
630 session_description_constraints_.SetMandatoryIceRestart(true);
631 SetExpectIceRestart(true);
632 }
633
634 void SetReceiveAudioVideo(bool audio, bool video) {
635 session_description_constraints_.SetMandatoryReceiveAudio(audio);
636 session_description_constraints_.SetMandatoryReceiveVideo(video);
637 ASSERT_EQ(audio, can_receive_audio());
638 ASSERT_EQ(video, can_receive_video());
639 }
640
641 void RemoveMsidFromReceivedSdp(bool remove) {
642 remove_msid_ = remove;
643 }
644
645 void RemoveSdesCryptoFromReceivedSdp(bool remove) {
646 remove_sdes_ = remove;
647 }
648
649 void RemoveBundleFromReceivedSdp(bool remove) {
650 remove_bundle_ = remove;
651 }
652
653 virtual bool can_receive_audio() {
654 bool value;
655 if (webrtc::FindConstraint(&session_description_constraints_,
656 MediaConstraintsInterface::kOfferToReceiveAudio, &value, NULL)) {
657 return value;
658 }
659 return true;
660 }
661
662 virtual bool can_receive_video() {
663 bool value;
664 if (webrtc::FindConstraint(&session_description_constraints_,
665 MediaConstraintsInterface::kOfferToReceiveVideo, &value, NULL)) {
666 return value;
667 }
668 return true;
669 }
670
671 virtual void OnIceComplete() {
672 LOG(INFO) << id() << "OnIceComplete";
673 }
674
675 virtual void OnDataChannel(DataChannelInterface* data_channel) {
676 LOG(INFO) << id() << "OnDataChannel";
677 data_channel_ = data_channel;
678 data_observer_.reset(new MockDataChannelObserver(data_channel));
679 }
680
681 void CreateDataChannel() {
682 data_channel_ = pc()->CreateDataChannel(kDataChannelLabel,
683 NULL);
684 ASSERT_TRUE(data_channel_.get() != NULL);
685 data_observer_.reset(new MockDataChannelObserver(data_channel_));
686 }
687
688 DataChannelInterface* data_channel() { return data_channel_; }
689 const MockDataChannelObserver* data_observer() const {
690 return data_observer_.get();
691 }
692
693 protected:
694 explicit JsepTestClient(const std::string& id)
695 : PeerConnectionTestClientBase<JsepMessageReceiver>(id),
696 remove_msid_(false),
697 remove_bundle_(false),
698 remove_sdes_(false) {
699 }
700
701 virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface>
702 CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory,
703 const MediaConstraintsInterface* constraints) {
704 // CreatePeerConnection with IceServers.
705 webrtc::PeerConnectionInterface::IceServers ice_servers;
706 webrtc::PeerConnectionInterface::IceServer ice_server;
707 ice_server.uri = "stun:stun.l.google.com:19302";
708 ice_servers.push_back(ice_server);
709 return peer_connection_factory()->CreatePeerConnection(
710 ice_servers, constraints, factory, NULL, this);
711 }
712
713 void HandleIncomingOffer(const std::string& msg) {
714 LOG(INFO) << id() << "HandleIncomingOffer ";
715 if (NumberOfLocalMediaStreams() == 0) {
716 // If we are not sending any streams ourselves it is time to add some.
717 AddMediaStream(true, true);
718 }
719 talk_base::scoped_ptr<SessionDescriptionInterface> desc(
720 webrtc::CreateSessionDescription("offer", msg, NULL));
721 EXPECT_TRUE(DoSetRemoteDescription(desc.release()));
722 talk_base::scoped_ptr<SessionDescriptionInterface> answer;
723 EXPECT_TRUE(DoCreateAnswer(answer.use()));
724 std::string sdp;
725 EXPECT_TRUE(answer->ToString(&sdp));
726 EXPECT_TRUE(DoSetLocalDescription(answer.release()));
727 if (signaling_message_receiver()) {
728 signaling_message_receiver()->ReceiveSdpMessage(
729 webrtc::SessionDescriptionInterface::kAnswer, sdp);
730 }
731 }
732
733 void HandleIncomingAnswer(const std::string& msg) {
734 LOG(INFO) << id() << "HandleIncomingAnswer";
735 talk_base::scoped_ptr<SessionDescriptionInterface> desc(
736 webrtc::CreateSessionDescription("answer", msg, NULL));
737 EXPECT_TRUE(DoSetRemoteDescription(desc.release()));
738 }
739
740 bool DoCreateOfferAnswer(SessionDescriptionInterface** desc,
741 bool offer) {
742 talk_base::scoped_refptr<MockCreateSessionDescriptionObserver>
743 observer(new talk_base::RefCountedObject<
744 MockCreateSessionDescriptionObserver>());
745 if (offer) {
746 pc()->CreateOffer(observer, &session_description_constraints_);
747 } else {
748 pc()->CreateAnswer(observer, &session_description_constraints_);
749 }
750 EXPECT_EQ_WAIT(true, observer->called(), kMaxWaitMs);
751 *desc = observer->release_desc();
752 if (observer->result() && ExpectIceRestart()) {
753 EXPECT_EQ(0u, (*desc)->candidates(0)->count());
754 }
755 return observer->result();
756 }
757
758 bool DoCreateOffer(SessionDescriptionInterface** desc) {
759 return DoCreateOfferAnswer(desc, true);
760 }
761
762 bool DoCreateAnswer(SessionDescriptionInterface** desc) {
763 return DoCreateOfferAnswer(desc, false);
764 }
765
766 bool DoSetLocalDescription(SessionDescriptionInterface* desc) {
767 talk_base::scoped_refptr<MockSetSessionDescriptionObserver>
768 observer(new talk_base::RefCountedObject<
769 MockSetSessionDescriptionObserver>());
770 LOG(INFO) << id() << "SetLocalDescription ";
771 pc()->SetLocalDescription(observer, desc);
772 // Ignore the observer result. If we wait for the result with
773 // EXPECT_TRUE_WAIT, local ice candidates might be sent to the remote peer
774 // before the offer which is an error.
775 // The reason is that EXPECT_TRUE_WAIT uses
776 // talk_base::Thread::Current()->ProcessMessages(1);
777 // ProcessMessages waits at least 1ms but processes all messages before
778 // returning. Since this test is synchronous and send messages to the remote
779 // peer whenever a callback is invoked, this can lead to messages being
780 // sent to the remote peer in the wrong order.
781 // TODO(perkj): Find a way to check the result without risking that the
782 // order of sent messages are changed. Ex- by posting all messages that are
783 // sent to the remote peer.
784 return true;
785 }
786
787 bool DoSetRemoteDescription(SessionDescriptionInterface* desc) {
788 talk_base::scoped_refptr<MockSetSessionDescriptionObserver>
789 observer(new talk_base::RefCountedObject<
790 MockSetSessionDescriptionObserver>());
791 LOG(INFO) << id() << "SetRemoteDescription ";
792 pc()->SetRemoteDescription(observer, desc);
793 EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
794 return observer->result();
795 }
796
797 // This modifies all received SDP messages before they are processed.
798 void FilterIncomingSdpMessage(std::string* sdp) {
799 if (remove_msid_) {
800 const char kSdpSsrcAttribute[] = "a=ssrc:";
801 RemoveLinesFromSdp(kSdpSsrcAttribute, sdp);
802 const char kSdpMsidSupportedAttribute[] = "a=msid-semantic:";
803 RemoveLinesFromSdp(kSdpMsidSupportedAttribute, sdp);
804 }
805 if (remove_bundle_) {
806 const char kSdpBundleAttribute[] = "a=group:BUNDLE";
807 RemoveLinesFromSdp(kSdpBundleAttribute, sdp);
808 }
809 if (remove_sdes_) {
810 const char kSdpSdesCryptoAttribute[] = "a=crypto";
811 RemoveLinesFromSdp(kSdpSdesCryptoAttribute, sdp);
812 }
813 }
814
815 private:
816 webrtc::FakeConstraints session_description_constraints_;
817 bool remove_msid_; // True if MSID should be removed in received SDP.
818 bool remove_bundle_; // True if bundle should be removed in received SDP.
819 bool remove_sdes_; // True if a=crypto should be removed in received SDP.
820
821 talk_base::scoped_refptr<DataChannelInterface> data_channel_;
822 talk_base::scoped_ptr<MockDataChannelObserver> data_observer_;
823};
824
825template <typename SignalingClass>
826class P2PTestConductor : public testing::Test {
827 public:
828 bool SessionActive() {
829 return initiating_client_->SessionActive() &&
830 receiving_client_->SessionActive();
831 }
832 // Return true if the number of frames provided have been received or it is
833 // known that that will never occur (e.g. no frames will be sent or
834 // captured).
835 bool FramesNotPending(int audio_frames_to_receive,
836 int video_frames_to_receive) {
837 return VideoFramesReceivedCheck(video_frames_to_receive) &&
838 AudioFramesReceivedCheck(audio_frames_to_receive);
839 }
840 bool AudioFramesReceivedCheck(int frames_received) {
841 return initiating_client_->AudioFramesReceivedCheck(frames_received) &&
842 receiving_client_->AudioFramesReceivedCheck(frames_received);
843 }
844 bool VideoFramesReceivedCheck(int frames_received) {
845 return initiating_client_->VideoFramesReceivedCheck(frames_received) &&
846 receiving_client_->VideoFramesReceivedCheck(frames_received);
847 }
848 void VerifyDtmf() {
849 initiating_client_->VerifyDtmf();
850 receiving_client_->VerifyDtmf();
851 }
852
853 void TestUpdateOfferWithRejectedContent() {
854 initiating_client_->Negotiate(true, false);
855 EXPECT_TRUE_WAIT(
856 FramesNotPending(kEndAudioFrameCount * 2, kEndVideoFrameCount),
857 kMaxWaitForFramesMs);
858 // There shouldn't be any more video frame after the new offer is
859 // negotiated.
860 EXPECT_FALSE(VideoFramesReceivedCheck(kEndVideoFrameCount + 1));
861 }
862
863 void VerifyRenderedSize(int width, int height) {
864 EXPECT_EQ(width, receiving_client()->rendered_width());
865 EXPECT_EQ(height, receiving_client()->rendered_height());
866 EXPECT_EQ(width, initializing_client()->rendered_width());
867 EXPECT_EQ(height, initializing_client()->rendered_height());
868 }
869
870 void VerifySessionDescriptions() {
871 initiating_client_->VerifyRejectedMediaInSessionDescription();
872 receiving_client_->VerifyRejectedMediaInSessionDescription();
873 initiating_client_->VerifyLocalIceUfragAndPassword();
874 receiving_client_->VerifyLocalIceUfragAndPassword();
875 }
876
877 P2PTestConductor() {
878 talk_base::InitializeSSL(NULL);
879 }
880 ~P2PTestConductor() {
881 if (initiating_client_) {
882 initiating_client_->set_signaling_message_receiver(NULL);
883 }
884 if (receiving_client_) {
885 receiving_client_->set_signaling_message_receiver(NULL);
886 }
henrike@webrtc.org723d6832013-07-12 16:04:50 +0000887 talk_base::CleanupSSL();
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000888 }
889
890 bool CreateTestClients() {
891 return CreateTestClients(NULL, NULL);
892 }
893
894 bool CreateTestClients(MediaConstraintsInterface* init_constraints,
895 MediaConstraintsInterface* recv_constraints) {
896 initiating_client_.reset(SignalingClass::CreateClient("Caller: ",
897 init_constraints));
898 receiving_client_.reset(SignalingClass::CreateClient("Callee: ",
899 recv_constraints));
900 if (!initiating_client_ || !receiving_client_) {
901 return false;
902 }
903 initiating_client_->set_signaling_message_receiver(receiving_client_.get());
904 receiving_client_->set_signaling_message_receiver(initiating_client_.get());
905 return true;
906 }
907
908 void SetVideoConstraints(const webrtc::FakeConstraints& init_constraints,
909 const webrtc::FakeConstraints& recv_constraints) {
910 initiating_client_->SetVideoConstraints(init_constraints);
911 receiving_client_->SetVideoConstraints(recv_constraints);
912 }
913
914 void EnableVideoDecoderFactory() {
915 initiating_client_->EnableVideoDecoderFactory();
916 receiving_client_->EnableVideoDecoderFactory();
917 }
918
919 // This test sets up a call between two parties. Both parties send static
920 // frames to each other. Once the test is finished the number of sent frames
921 // is compared to the number of received frames.
922 void LocalP2PTest() {
923 if (initiating_client_->NumberOfLocalMediaStreams() == 0) {
924 initiating_client_->AddMediaStream(true, true);
925 }
926 initiating_client_->Negotiate();
927 const int kMaxWaitForActivationMs = 5000;
928 // Assert true is used here since next tests are guaranteed to fail and
929 // would eat up 5 seconds.
930 ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
931 VerifySessionDescriptions();
932
933
934 int audio_frame_count = kEndAudioFrameCount;
935 // TODO(ronghuawu): Add test to cover the case of sendonly and recvonly.
936 if (!initiating_client_->can_receive_audio() ||
937 !receiving_client_->can_receive_audio()) {
938 audio_frame_count = -1;
939 }
940 int video_frame_count = kEndVideoFrameCount;
941 if (!initiating_client_->can_receive_video() ||
942 !receiving_client_->can_receive_video()) {
943 video_frame_count = -1;
944 }
945
946 if (audio_frame_count != -1 || video_frame_count != -1) {
947 // Audio or video is expected to flow, so both sides should get to the
948 // Connected state.
949 // Note: These tests have been observed to fail under heavy load at
950 // shorter timeouts, so they may be flaky.
951 EXPECT_EQ_WAIT(
952 webrtc::PeerConnectionInterface::kIceConnectionConnected,
953 initiating_client_->ice_connection_state(),
954 kMaxWaitForFramesMs);
955 EXPECT_EQ_WAIT(
956 webrtc::PeerConnectionInterface::kIceConnectionConnected,
957 receiving_client_->ice_connection_state(),
958 kMaxWaitForFramesMs);
959 }
960
961 if (initiating_client_->can_receive_audio() ||
962 initiating_client_->can_receive_video()) {
963 // The initiating client can receive media, so it must produce candidates
964 // that will serve as destinations for that media.
965 // TODO(bemasc): Understand why the state is not already Complete here, as
966 // seems to be the case for the receiving client. This may indicate a bug
967 // in the ICE gathering system.
968 EXPECT_NE(webrtc::PeerConnectionInterface::kIceGatheringNew,
969 initiating_client_->ice_gathering_state());
970 }
971 if (receiving_client_->can_receive_audio() ||
972 receiving_client_->can_receive_video()) {
973 EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceGatheringComplete,
974 receiving_client_->ice_gathering_state(),
975 kMaxWaitForFramesMs);
976 }
977
978 EXPECT_TRUE_WAIT(FramesNotPending(audio_frame_count, video_frame_count),
979 kMaxWaitForFramesMs);
980 }
981
982 SignalingClass* initializing_client() { return initiating_client_.get(); }
983 SignalingClass* receiving_client() { return receiving_client_.get(); }
984
985 private:
986 talk_base::scoped_ptr<SignalingClass> initiating_client_;
987 talk_base::scoped_ptr<SignalingClass> receiving_client_;
988};
989typedef P2PTestConductor<JsepTestClient> JsepPeerConnectionP2PTestClient;
990
991// This test sets up a Jsep call between two parties and test Dtmf.
992TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtmf) {
993 ASSERT_TRUE(CreateTestClients());
994 LocalP2PTest();
995 VerifyDtmf();
996}
997
998// This test sets up a Jsep call between two parties and test that we can get a
999// video aspect ratio of 16:9.
1000TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
1001 ASSERT_TRUE(CreateTestClients());
1002 FakeConstraints constraint;
1003 double requested_ratio = 640.0/360;
1004 constraint.SetMandatoryMinAspectRatio(requested_ratio);
1005 SetVideoConstraints(constraint, constraint);
1006 LocalP2PTest();
1007
1008 ASSERT_LE(0, initializing_client()->rendered_height());
1009 double initiating_video_ratio =
1010 static_cast<double> (initializing_client()->rendered_width()) /
1011 initializing_client()->rendered_height();
1012 EXPECT_LE(requested_ratio, initiating_video_ratio);
1013
1014 ASSERT_LE(0, receiving_client()->rendered_height());
1015 double receiving_video_ratio =
1016 static_cast<double> (receiving_client()->rendered_width()) /
1017 receiving_client()->rendered_height();
1018 EXPECT_LE(requested_ratio, receiving_video_ratio);
1019}
1020
1021// This test sets up a Jsep call between two parties and test that the
1022// received video has a resolution of 1280*720.
1023// TODO(mallinath): Enable when
1024// http://code.google.com/p/webrtc/issues/detail?id=981 is fixed.
1025TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) {
1026 ASSERT_TRUE(CreateTestClients());
1027 FakeConstraints constraint;
1028 constraint.SetMandatoryMinWidth(1280);
1029 constraint.SetMandatoryMinHeight(720);
1030 SetVideoConstraints(constraint, constraint);
1031 LocalP2PTest();
1032 VerifyRenderedSize(1280, 720);
1033}
1034
1035// This test sets up a call between two endpoints that are configured to use
1036// DTLS key agreement. As a result, DTLS is negotiated and used for transport.
1037TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtls) {
1038 MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
1039 FakeConstraints setup_constraints;
1040 setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
1041 true);
1042 ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
1043 LocalP2PTest();
1044 VerifyRenderedSize(640, 480);
1045}
1046
1047// This test sets up a call between an endpoint configured to use either SDES or
1048// DTLS (the offerer) and just SDES (the answerer). As a result, SDES is used
1049// instead of DTLS.
1050TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsToSdes) {
1051 MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
1052 FakeConstraints setup_constraints;
1053 setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
1054 true);
1055 ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL));
1056 LocalP2PTest();
1057 VerifyRenderedSize(640, 480);
1058}
1059
1060// This test sets up a call between an endpoint configured to use SDES
1061// (the offerer) and either SDES or DTLS (the answerer). As a result, SDES is
1062// used instead of DTLS.
1063TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferSdesToDtls) {
1064 MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
1065 FakeConstraints setup_constraints;
1066 setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
1067 true);
1068 ASSERT_TRUE(CreateTestClients(NULL, &setup_constraints));
1069 LocalP2PTest();
1070 VerifyRenderedSize(640, 480);
1071}
1072
1073// This test sets up a call between two endpoints that are configured to use
1074// DTLS key agreement. The offerer don't support SDES. As a result, DTLS is
1075// negotiated and used for transport.
1076TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) {
1077 MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
1078 FakeConstraints setup_constraints;
1079 setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
1080 true);
1081 ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
1082 receiving_client()->RemoveSdesCryptoFromReceivedSdp(true);
1083 LocalP2PTest();
1084 VerifyRenderedSize(640, 480);
1085}
1086
1087// This test sets up a Jsep call between two parties, and the callee only
1088// accept to receive video.
1089TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerVideo) {
1090 ASSERT_TRUE(CreateTestClients());
1091 receiving_client()->SetReceiveAudioVideo(false, true);
1092 LocalP2PTest();
1093}
1094
1095// This test sets up a Jsep call between two parties, and the callee only
1096// accept to receive audio.
1097TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerAudio) {
1098 ASSERT_TRUE(CreateTestClients());
1099 receiving_client()->SetReceiveAudioVideo(true, false);
1100 LocalP2PTest();
1101}
1102
1103// This test sets up a Jsep call between two parties, and the callee reject both
1104// audio and video.
1105TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) {
1106 ASSERT_TRUE(CreateTestClients());
1107 receiving_client()->SetReceiveAudioVideo(false, false);
1108 LocalP2PTest();
1109}
1110
1111// This test sets up an audio and video call between two parties. After the call
1112// runs for a while (10 frames), the caller sends an update offer with video
1113// being rejected. Once the re-negotiation is done, the video flow should stop
1114// and the audio flow should continue.
1115TEST_F(JsepPeerConnectionP2PTestClient, UpdateOfferWithRejectedContent) {
1116 ASSERT_TRUE(CreateTestClients());
1117 LocalP2PTest();
1118 TestUpdateOfferWithRejectedContent();
1119}
1120
1121// This test sets up a Jsep call between two parties. The MSID is removed from
1122// the SDP strings from the caller.
1123TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestWithoutMsid) {
1124 ASSERT_TRUE(CreateTestClients());
1125 receiving_client()->RemoveMsidFromReceivedSdp(true);
1126 // TODO(perkj): Currently there is a bug that cause audio to stop playing if
1127 // audio and video is muxed when MSID is disabled. Remove
1128 // SetRemoveBundleFromSdp once
1129 // https://code.google.com/p/webrtc/issues/detail?id=1193 is fixed.
1130 receiving_client()->RemoveBundleFromReceivedSdp(true);
1131 LocalP2PTest();
1132}
1133
1134// This test sets up a Jsep call between two parties and the initiating peer
1135// sends two steams.
1136// TODO(perkj): Disabled due to
1137// https://code.google.com/p/webrtc/issues/detail?id=1454
1138TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) {
1139 ASSERT_TRUE(CreateTestClients());
1140 // Set optional video constraint to max 320pixels to decrease CPU usage.
1141 FakeConstraints constraint;
1142 constraint.SetOptionalMaxWidth(320);
1143 SetVideoConstraints(constraint, constraint);
1144 initializing_client()->AddMediaStream(true, true);
1145 initializing_client()->AddMediaStream(false, true);
1146 ASSERT_EQ(2u, initializing_client()->NumberOfLocalMediaStreams());
1147 LocalP2PTest();
1148 EXPECT_EQ(2u, receiving_client()->number_of_remote_streams());
1149}
1150
1151// Test that we can receive the audio output level from a remote audio track.
1152TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) {
1153 ASSERT_TRUE(CreateTestClients());
1154 LocalP2PTest();
1155
1156 StreamCollectionInterface* remote_streams =
1157 initializing_client()->remote_streams();
1158 ASSERT_GT(remote_streams->count(), 0u);
1159 ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u);
1160 MediaStreamTrackInterface* remote_audio_track =
1161 remote_streams->at(0)->GetAudioTracks()[0];
1162
1163 // Get the audio output level stats. Note that the level is not available
1164 // until a RTCP packet has been received.
1165 EXPECT_TRUE_WAIT(
1166 initializing_client()->GetAudioOutputLevelStats(remote_audio_track) > 0,
1167 kMaxWaitForStatsMs);
1168}
1169
1170// Test that an audio input level is reported.
1171TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) {
1172 ASSERT_TRUE(CreateTestClients());
1173 LocalP2PTest();
1174
1175 // Get the audio input level stats. The level should be available very
1176 // soon after the test starts.
1177 EXPECT_TRUE_WAIT(initializing_client()->GetAudioInputLevelStats() > 0,
1178 kMaxWaitForStatsMs);
1179}
1180
1181// Test that we can get incoming byte counts from both audio and video tracks.
1182TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) {
1183 ASSERT_TRUE(CreateTestClients());
1184 LocalP2PTest();
1185
1186 StreamCollectionInterface* remote_streams =
1187 initializing_client()->remote_streams();
1188 ASSERT_GT(remote_streams->count(), 0u);
1189 ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u);
1190 MediaStreamTrackInterface* remote_audio_track =
1191 remote_streams->at(0)->GetAudioTracks()[0];
1192 EXPECT_TRUE_WAIT(
1193 initializing_client()->GetBytesReceivedStats(remote_audio_track) > 0,
1194 kMaxWaitForStatsMs);
1195
1196 MediaStreamTrackInterface* remote_video_track =
1197 remote_streams->at(0)->GetVideoTracks()[0];
1198 EXPECT_TRUE_WAIT(
1199 initializing_client()->GetBytesReceivedStats(remote_video_track) > 0,
1200 kMaxWaitForStatsMs);
1201}
1202
1203// Test that we can get outgoing byte counts from both audio and video tracks.
1204TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) {
1205 ASSERT_TRUE(CreateTestClients());
1206 LocalP2PTest();
1207
1208 StreamCollectionInterface* local_streams =
1209 initializing_client()->local_streams();
1210 ASSERT_GT(local_streams->count(), 0u);
1211 ASSERT_GT(local_streams->at(0)->GetAudioTracks().size(), 0u);
1212 MediaStreamTrackInterface* local_audio_track =
1213 local_streams->at(0)->GetAudioTracks()[0];
1214 EXPECT_TRUE_WAIT(
1215 initializing_client()->GetBytesSentStats(local_audio_track) > 0,
1216 kMaxWaitForStatsMs);
1217
1218 MediaStreamTrackInterface* local_video_track =
1219 local_streams->at(0)->GetVideoTracks()[0];
1220 EXPECT_TRUE_WAIT(
1221 initializing_client()->GetBytesSentStats(local_video_track) > 0,
1222 kMaxWaitForStatsMs);
1223}
1224
1225// This test sets up a call between two parties with audio, video and data.
1226TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
1227 FakeConstraints setup_constraints;
1228 setup_constraints.SetAllowRtpDataChannels();
1229 ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
1230 initializing_client()->CreateDataChannel();
1231 LocalP2PTest();
1232 ASSERT_TRUE(initializing_client()->data_channel() != NULL);
1233 ASSERT_TRUE(receiving_client()->data_channel() != NULL);
1234 EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
1235 kMaxWaitMs);
1236 EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(),
1237 kMaxWaitMs);
1238
1239 std::string data = "hello world";
1240 initializing_client()->data_channel()->Send(DataBuffer(data));
1241 EXPECT_EQ_WAIT(data, receiving_client()->data_observer()->last_message(),
1242 kMaxWaitMs);
1243 receiving_client()->data_channel()->Send(DataBuffer(data));
1244 EXPECT_EQ_WAIT(data, initializing_client()->data_observer()->last_message(),
1245 kMaxWaitMs);
1246
1247 receiving_client()->data_channel()->Close();
1248 // Send new offer and answer.
1249 receiving_client()->Negotiate();
1250 EXPECT_FALSE(initializing_client()->data_observer()->IsOpen());
1251 EXPECT_FALSE(receiving_client()->data_observer()->IsOpen());
1252}
1253
1254// This test sets up a call between two parties and creates a data channel.
1255// The test tests that received data is buffered unless an observer has been
1256// registered.
1257// Rtp data channels can receive data before the underlying
1258// transport has detected that a channel is writable and thus data can be
1259// received before the data channel state changes to open. That is hard to test
1260// but the same buffering is used in that case.
1261TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) {
1262 FakeConstraints setup_constraints;
1263 setup_constraints.SetAllowRtpDataChannels();
1264 ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
1265 initializing_client()->CreateDataChannel();
1266 initializing_client()->Negotiate();
1267
1268 ASSERT_TRUE(initializing_client()->data_channel() != NULL);
1269 ASSERT_TRUE(receiving_client()->data_channel() != NULL);
1270 EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
1271 kMaxWaitMs);
1272 EXPECT_EQ_WAIT(DataChannelInterface::kOpen,
1273 receiving_client()->data_channel()->state(), kMaxWaitMs);
1274
1275 // Unregister the existing observer.
1276 receiving_client()->data_channel()->UnregisterObserver();
1277 std::string data = "hello world";
1278 initializing_client()->data_channel()->Send(DataBuffer(data));
1279 // Wait a while to allow the sent data to arrive before an observer is
1280 // registered..
1281 talk_base::Thread::Current()->ProcessMessages(100);
1282
1283 MockDataChannelObserver new_observer(receiving_client()->data_channel());
1284 EXPECT_EQ_WAIT(data, new_observer.last_message(), kMaxWaitMs);
1285}
1286
1287// This test sets up a call between two parties with audio, video and but only
1288// the initiating client support data.
1289TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) {
1290 FakeConstraints setup_constraints;
1291 setup_constraints.SetAllowRtpDataChannels();
1292 ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL));
1293 initializing_client()->CreateDataChannel();
1294 LocalP2PTest();
1295 EXPECT_TRUE(initializing_client()->data_channel() != NULL);
1296 EXPECT_FALSE(receiving_client()->data_channel());
1297 EXPECT_FALSE(initializing_client()->data_observer()->IsOpen());
1298}
1299
1300// This test sets up a call between two parties with audio, video. When audio
1301// and video is setup and flowing and data channel is negotiated.
1302TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) {
1303 FakeConstraints setup_constraints;
1304 setup_constraints.SetAllowRtpDataChannels();
1305 ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
1306 LocalP2PTest();
1307 initializing_client()->CreateDataChannel();
1308 // Send new offer and answer.
1309 initializing_client()->Negotiate();
1310 ASSERT_TRUE(initializing_client()->data_channel() != NULL);
1311 ASSERT_TRUE(receiving_client()->data_channel() != NULL);
1312 EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
1313 kMaxWaitMs);
1314 EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(),
1315 kMaxWaitMs);
1316}
1317
1318// This test sets up a call between two parties with audio, and video.
1319// During the call, the initializing side restart ice and the test verifies that
1320// new ice candidates are generated and audio and video still can flow.
1321TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) {
1322 ASSERT_TRUE(CreateTestClients());
1323
1324 // Negotiate and wait for ice completion and make sure audio and video plays.
1325 LocalP2PTest();
1326
1327 // Create a SDP string of the first audio candidate for both clients.
1328 const webrtc::IceCandidateCollection* audio_candidates_initiator =
1329 initializing_client()->pc()->local_description()->candidates(0);
1330 const webrtc::IceCandidateCollection* audio_candidates_receiver =
1331 receiving_client()->pc()->local_description()->candidates(0);
1332 ASSERT_GT(audio_candidates_initiator->count(), 0u);
1333 ASSERT_GT(audio_candidates_receiver->count(), 0u);
1334 std::string initiator_candidate;
1335 EXPECT_TRUE(
1336 audio_candidates_initiator->at(0)->ToString(&initiator_candidate));
1337 std::string receiver_candidate;
1338 EXPECT_TRUE(audio_candidates_receiver->at(0)->ToString(&receiver_candidate));
1339
1340 // Restart ice on the initializing client.
1341 receiving_client()->SetExpectIceRestart(true);
1342 initializing_client()->IceRestart();
1343
1344 // Negotiate and wait for ice completion again and make sure audio and video
1345 // plays.
1346 LocalP2PTest();
1347
1348 // Create a SDP string of the first audio candidate for both clients again.
1349 const webrtc::IceCandidateCollection* audio_candidates_initiator_restart =
1350 initializing_client()->pc()->local_description()->candidates(0);
1351 const webrtc::IceCandidateCollection* audio_candidates_reciever_restart =
1352 receiving_client()->pc()->local_description()->candidates(0);
1353 ASSERT_GT(audio_candidates_initiator_restart->count(), 0u);
1354 ASSERT_GT(audio_candidates_reciever_restart->count(), 0u);
1355 std::string initiator_candidate_restart;
1356 EXPECT_TRUE(audio_candidates_initiator_restart->at(0)->ToString(
1357 &initiator_candidate_restart));
1358 std::string receiver_candidate_restart;
1359 EXPECT_TRUE(audio_candidates_reciever_restart->at(0)->ToString(
1360 &receiver_candidate_restart));
1361
1362 // Verify that the first candidates in the local session descriptions has
1363 // changed.
1364 EXPECT_NE(initiator_candidate, initiator_candidate_restart);
1365 EXPECT_NE(receiver_candidate, receiver_candidate_restart);
1366}
1367
1368
1369// This test sets up a Jsep call between two parties with external
1370// VideoDecoderFactory.
1371TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestWithVideoDecoderFactory) {
1372 ASSERT_TRUE(CreateTestClients());
1373 EnableVideoDecoderFactory();
1374 LocalP2PTest();
1375}