pbos@webrtc.org | 1d09690 | 2013-12-13 12:48:05 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | #include <assert.h> |
| 11 | |
| 12 | #include <algorithm> |
| 13 | #include <sstream> |
| 14 | #include <string> |
| 15 | |
| 16 | #include "testing/gtest/include/gtest/gtest.h" |
| 17 | |
| 18 | #include "webrtc/call.h" |
| 19 | #include "webrtc/modules/remote_bitrate_estimator/include/rtp_to_ntp.h" |
| 20 | #include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h" |
| 21 | #include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h" |
| 22 | #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" |
| 23 | #include "webrtc/system_wrappers/interface/scoped_ptr.h" |
| 24 | #include "webrtc/test/direct_transport.h" |
| 25 | #include "webrtc/test/fake_audio_device.h" |
| 26 | #include "webrtc/test/fake_decoder.h" |
| 27 | #include "webrtc/test/fake_encoder.h" |
| 28 | #include "webrtc/test/frame_generator.h" |
| 29 | #include "webrtc/test/frame_generator_capturer.h" |
| 30 | #include "webrtc/test/rtp_rtcp_observer.h" |
| 31 | #include "webrtc/test/testsupport/fileutils.h" |
| 32 | #include "webrtc/test/testsupport/perf_test.h" |
| 33 | #include "webrtc/video/transport_adapter.h" |
| 34 | #include "webrtc/voice_engine/include/voe_base.h" |
| 35 | #include "webrtc/voice_engine/include/voe_codec.h" |
| 36 | #include "webrtc/voice_engine/include/voe_network.h" |
| 37 | #include "webrtc/voice_engine/include/voe_rtp_rtcp.h" |
| 38 | #include "webrtc/voice_engine/include/voe_video_sync.h" |
| 39 | |
| 40 | namespace webrtc { |
| 41 | |
| 42 | static unsigned int kLongTimeoutMs = 120 * 1000; |
| 43 | static const uint32_t kSendSsrc = 0x654321; |
| 44 | static const uint32_t kReceiverLocalSsrc = 0x123456; |
| 45 | static const uint8_t kSendPayloadType = 125; |
| 46 | |
| 47 | class CallPerfTest : public ::testing::Test { |
| 48 | }; |
| 49 | |
| 50 | class SyncRtcpObserver : public test::RtpRtcpObserver { |
| 51 | public: |
stefan@webrtc.org | faada6e | 2013-12-18 20:28:25 +0000 | [diff] [blame^] | 52 | explicit SyncRtcpObserver(const FakeNetworkPipe::Config& config) |
| 53 | : test::RtpRtcpObserver(kLongTimeoutMs, config), |
pbos@webrtc.org | 1d09690 | 2013-12-13 12:48:05 +0000 | [diff] [blame] | 54 | critical_section_(CriticalSectionWrapper::CreateCriticalSection()) {} |
| 55 | |
| 56 | virtual Action OnSendRtcp(const uint8_t* packet, size_t length) OVERRIDE { |
| 57 | RTCPUtility::RTCPParserV2 parser(packet, length, true); |
| 58 | EXPECT_TRUE(parser.IsValid()); |
| 59 | |
| 60 | for (RTCPUtility::RTCPPacketTypes packet_type = parser.Begin(); |
| 61 | packet_type != RTCPUtility::kRtcpNotValidCode; |
| 62 | packet_type = parser.Iterate()) { |
| 63 | if (packet_type == RTCPUtility::kRtcpSrCode) { |
| 64 | const RTCPUtility::RTCPPacket& packet = parser.Packet(); |
| 65 | synchronization::RtcpMeasurement ntp_rtp_pair( |
| 66 | packet.SR.NTPMostSignificant, |
| 67 | packet.SR.NTPLeastSignificant, |
| 68 | packet.SR.RTPTimestamp); |
| 69 | StoreNtpRtpPair(ntp_rtp_pair); |
| 70 | } |
| 71 | } |
| 72 | return SEND_PACKET; |
| 73 | } |
| 74 | |
| 75 | int64_t RtpTimestampToNtp(uint32_t timestamp) const { |
| 76 | CriticalSectionScoped cs(critical_section_.get()); |
| 77 | int64_t timestamp_in_ms = -1; |
| 78 | if (ntp_rtp_pairs_.size() == 2) { |
| 79 | // TODO(stefan): We can't EXPECT_TRUE on this call due to a bug in the |
| 80 | // RTCP sender where it sends RTCP SR before any RTP packets, which leads |
| 81 | // to a bogus NTP/RTP mapping. |
| 82 | synchronization::RtpToNtpMs(timestamp, ntp_rtp_pairs_, ×tamp_in_ms); |
| 83 | return timestamp_in_ms; |
| 84 | } |
| 85 | return -1; |
| 86 | } |
| 87 | |
| 88 | private: |
| 89 | void StoreNtpRtpPair(synchronization::RtcpMeasurement ntp_rtp_pair) { |
| 90 | CriticalSectionScoped cs(critical_section_.get()); |
| 91 | for (synchronization::RtcpList::iterator it = ntp_rtp_pairs_.begin(); |
| 92 | it != ntp_rtp_pairs_.end(); |
| 93 | ++it) { |
| 94 | if (ntp_rtp_pair.ntp_secs == it->ntp_secs && |
| 95 | ntp_rtp_pair.ntp_frac == it->ntp_frac) { |
| 96 | // This RTCP has already been added to the list. |
| 97 | return; |
| 98 | } |
| 99 | } |
| 100 | // We need two RTCP SR reports to map between RTP and NTP. More than two |
| 101 | // will not improve the mapping. |
| 102 | if (ntp_rtp_pairs_.size() == 2) { |
| 103 | ntp_rtp_pairs_.pop_back(); |
| 104 | } |
| 105 | ntp_rtp_pairs_.push_front(ntp_rtp_pair); |
| 106 | } |
| 107 | |
| 108 | scoped_ptr<CriticalSectionWrapper> critical_section_; |
| 109 | synchronization::RtcpList ntp_rtp_pairs_; |
| 110 | }; |
| 111 | |
| 112 | class VideoRtcpAndSyncObserver : public SyncRtcpObserver, public VideoRenderer { |
| 113 | static const int kInSyncThresholdMs = 50; |
| 114 | static const int kStartupTimeMs = 2000; |
| 115 | static const int kMinRunTimeMs = 30000; |
| 116 | |
| 117 | public: |
| 118 | VideoRtcpAndSyncObserver(Clock* clock, |
| 119 | int voe_channel, |
| 120 | VoEVideoSync* voe_sync, |
| 121 | SyncRtcpObserver* audio_observer) |
stefan@webrtc.org | faada6e | 2013-12-18 20:28:25 +0000 | [diff] [blame^] | 122 | : SyncRtcpObserver(FakeNetworkPipe::Config()), |
pbos@webrtc.org | 1d09690 | 2013-12-13 12:48:05 +0000 | [diff] [blame] | 123 | clock_(clock), |
| 124 | voe_channel_(voe_channel), |
| 125 | voe_sync_(voe_sync), |
| 126 | audio_observer_(audio_observer), |
| 127 | creation_time_ms_(clock_->TimeInMilliseconds()), |
| 128 | first_time_in_sync_(-1) {} |
| 129 | |
| 130 | virtual void RenderFrame(const I420VideoFrame& video_frame, |
| 131 | int time_to_render_ms) OVERRIDE { |
| 132 | int64_t now_ms = clock_->TimeInMilliseconds(); |
| 133 | uint32_t playout_timestamp = 0; |
| 134 | if (voe_sync_->GetPlayoutTimestamp(voe_channel_, playout_timestamp) != 0) |
| 135 | return; |
| 136 | int64_t latest_audio_ntp = |
| 137 | audio_observer_->RtpTimestampToNtp(playout_timestamp); |
| 138 | int64_t latest_video_ntp = RtpTimestampToNtp(video_frame.timestamp()); |
| 139 | if (latest_audio_ntp < 0 || latest_video_ntp < 0) |
| 140 | return; |
| 141 | int time_until_render_ms = |
| 142 | std::max(0, static_cast<int>(video_frame.render_time_ms() - now_ms)); |
| 143 | latest_video_ntp += time_until_render_ms; |
| 144 | int64_t stream_offset = latest_audio_ntp - latest_video_ntp; |
| 145 | std::stringstream ss; |
| 146 | ss << stream_offset; |
| 147 | webrtc::test::PrintResult( |
| 148 | "stream_offset", "", "synchronization", ss.str(), "ms", false); |
| 149 | int64_t time_since_creation = now_ms - creation_time_ms_; |
| 150 | // During the first couple of seconds audio and video can falsely be |
| 151 | // estimated as being synchronized. We don't want to trigger on those. |
| 152 | if (time_since_creation < kStartupTimeMs) |
| 153 | return; |
| 154 | if (abs(latest_audio_ntp - latest_video_ntp) < kInSyncThresholdMs) { |
| 155 | if (first_time_in_sync_ == -1) { |
| 156 | first_time_in_sync_ = now_ms; |
| 157 | webrtc::test::PrintResult("sync_convergence_time", |
| 158 | "", |
| 159 | "synchronization", |
| 160 | time_since_creation, |
| 161 | "ms", |
| 162 | false); |
| 163 | } |
| 164 | if (time_since_creation > kMinRunTimeMs) |
| 165 | observation_complete_->Set(); |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | private: |
| 170 | Clock* clock_; |
| 171 | int voe_channel_; |
| 172 | VoEVideoSync* voe_sync_; |
| 173 | SyncRtcpObserver* audio_observer_; |
| 174 | int64_t creation_time_ms_; |
| 175 | int64_t first_time_in_sync_; |
| 176 | }; |
| 177 | |
| 178 | TEST_F(CallPerfTest, PlaysOutAudioAndVideoInSync) { |
| 179 | VoiceEngine* voice_engine = VoiceEngine::Create(); |
| 180 | VoEBase* voe_base = VoEBase::GetInterface(voice_engine); |
| 181 | VoECodec* voe_codec = VoECodec::GetInterface(voice_engine); |
| 182 | VoENetwork* voe_network = VoENetwork::GetInterface(voice_engine); |
| 183 | VoEVideoSync* voe_sync = VoEVideoSync::GetInterface(voice_engine); |
| 184 | const std::string audio_filename = |
| 185 | test::ResourcePath("voice_engine/audio_long16", "pcm"); |
| 186 | ASSERT_STRNE("", audio_filename.c_str()); |
| 187 | test::FakeAudioDevice fake_audio_device(Clock::GetRealTimeClock(), |
| 188 | audio_filename); |
| 189 | EXPECT_EQ(0, voe_base->Init(&fake_audio_device, NULL)); |
| 190 | int channel = voe_base->CreateChannel(); |
| 191 | |
stefan@webrtc.org | faada6e | 2013-12-18 20:28:25 +0000 | [diff] [blame^] | 192 | FakeNetworkPipe::Config net_config; |
| 193 | net_config.queue_delay_ms = 500; |
| 194 | SyncRtcpObserver audio_observer(net_config); |
pbos@webrtc.org | 1d09690 | 2013-12-13 12:48:05 +0000 | [diff] [blame] | 195 | VideoRtcpAndSyncObserver observer( |
| 196 | Clock::GetRealTimeClock(), channel, voe_sync, &audio_observer); |
| 197 | |
| 198 | Call::Config receiver_config(observer.ReceiveTransport()); |
| 199 | receiver_config.voice_engine = voice_engine; |
| 200 | scoped_ptr<Call> sender_call( |
| 201 | Call::Create(Call::Config(observer.SendTransport()))); |
| 202 | scoped_ptr<Call> receiver_call(Call::Create(receiver_config)); |
| 203 | CodecInst isac = {103, "ISAC", 16000, 480, 1, 32000}; |
| 204 | EXPECT_EQ(0, voe_codec->SetSendCodec(channel, isac)); |
| 205 | |
| 206 | class VoicePacketReceiver : public PacketReceiver { |
| 207 | public: |
| 208 | VoicePacketReceiver(int channel, VoENetwork* voe_network) |
| 209 | : channel_(channel), |
| 210 | voe_network_(voe_network), |
| 211 | parser_(RtpHeaderParser::Create()) {} |
| 212 | virtual bool DeliverPacket(const uint8_t* packet, size_t length) { |
| 213 | int ret; |
| 214 | if (parser_->IsRtcp(packet, static_cast<int>(length))) { |
| 215 | ret = voe_network_->ReceivedRTCPPacket( |
| 216 | channel_, packet, static_cast<unsigned int>(length)); |
| 217 | } else { |
| 218 | ret = voe_network_->ReceivedRTPPacket( |
| 219 | channel_, packet, static_cast<unsigned int>(length)); |
| 220 | } |
| 221 | return ret == 0; |
| 222 | } |
| 223 | |
| 224 | private: |
| 225 | int channel_; |
| 226 | VoENetwork* voe_network_; |
| 227 | scoped_ptr<RtpHeaderParser> parser_; |
| 228 | } voe_packet_receiver(channel, voe_network); |
| 229 | |
| 230 | audio_observer.SetReceivers(&voe_packet_receiver, &voe_packet_receiver); |
| 231 | |
| 232 | internal::TransportAdapter transport_adapter(audio_observer.SendTransport()); |
| 233 | EXPECT_EQ(0, |
| 234 | voe_network->RegisterExternalTransport(channel, transport_adapter)); |
| 235 | |
| 236 | observer.SetReceivers(receiver_call->Receiver(), sender_call->Receiver()); |
| 237 | |
| 238 | test::FakeEncoder fake_encoder(Clock::GetRealTimeClock()); |
| 239 | test::FakeDecoder fake_decoder; |
| 240 | |
| 241 | VideoSendStream::Config send_config = sender_call->GetDefaultSendConfig(); |
| 242 | send_config.rtp.ssrcs.push_back(kSendSsrc); |
| 243 | send_config.encoder = &fake_encoder; |
| 244 | send_config.internal_source = false; |
| 245 | test::FakeEncoder::SetCodecSettings(&send_config.codec, 1); |
| 246 | send_config.codec.plType = kSendPayloadType; |
| 247 | |
| 248 | VideoReceiveStream::Config receive_config = |
| 249 | receiver_call->GetDefaultReceiveConfig(); |
| 250 | receive_config.codecs.clear(); |
| 251 | receive_config.codecs.push_back(send_config.codec); |
| 252 | ExternalVideoDecoder decoder; |
| 253 | decoder.decoder = &fake_decoder; |
| 254 | decoder.payload_type = send_config.codec.plType; |
| 255 | receive_config.external_decoders.push_back(decoder); |
| 256 | receive_config.rtp.remote_ssrc = send_config.rtp.ssrcs[0]; |
| 257 | receive_config.rtp.local_ssrc = kReceiverLocalSsrc; |
| 258 | receive_config.renderer = &observer; |
| 259 | receive_config.audio_channel_id = channel; |
| 260 | |
| 261 | VideoSendStream* send_stream = |
| 262 | sender_call->CreateVideoSendStream(send_config); |
| 263 | VideoReceiveStream* receive_stream = |
| 264 | receiver_call->CreateVideoReceiveStream(receive_config); |
| 265 | scoped_ptr<test::FrameGeneratorCapturer> capturer( |
| 266 | test::FrameGeneratorCapturer::Create(send_stream->Input(), |
| 267 | send_config.codec.width, |
| 268 | send_config.codec.height, |
| 269 | 30, |
| 270 | Clock::GetRealTimeClock())); |
| 271 | receive_stream->StartReceiving(); |
| 272 | send_stream->StartSending(); |
| 273 | capturer->Start(); |
| 274 | |
| 275 | fake_audio_device.Start(); |
| 276 | EXPECT_EQ(0, voe_base->StartPlayout(channel)); |
| 277 | EXPECT_EQ(0, voe_base->StartReceive(channel)); |
| 278 | EXPECT_EQ(0, voe_base->StartSend(channel)); |
| 279 | |
| 280 | EXPECT_EQ(kEventSignaled, observer.Wait()) |
| 281 | << "Timed out while waiting for audio and video to be synchronized."; |
| 282 | |
| 283 | EXPECT_EQ(0, voe_base->StopSend(channel)); |
| 284 | EXPECT_EQ(0, voe_base->StopReceive(channel)); |
| 285 | EXPECT_EQ(0, voe_base->StopPlayout(channel)); |
| 286 | fake_audio_device.Stop(); |
| 287 | |
| 288 | capturer->Stop(); |
| 289 | send_stream->StopSending(); |
| 290 | receive_stream->StopReceiving(); |
| 291 | observer.StopSending(); |
| 292 | audio_observer.StopSending(); |
| 293 | |
| 294 | voe_base->DeleteChannel(channel); |
| 295 | voe_base->Release(); |
| 296 | voe_codec->Release(); |
| 297 | voe_network->Release(); |
| 298 | voe_sync->Release(); |
| 299 | sender_call->DestroyVideoSendStream(send_stream); |
| 300 | receiver_call->DestroyVideoReceiveStream(receive_stream); |
| 301 | VoiceEngine::Delete(voice_engine); |
| 302 | } |
| 303 | } // namespace webrtc |