blob: 68a4921147a788b82cb19db4ffe56c10edfeabe2 [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Henrik Kjellander74640892015-10-29 11:31:02 +010011#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000013#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000014#include <stdlib.h>
15#include <string.h> // memset
16
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000017#include <algorithm>
kwiberg2d0c3322016-02-14 09:28:33 -080018#include <memory>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000019#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000020#include <string>
21#include <vector>
22
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000023#include "gflags/gflags.h"
kwiberg087bd342017-02-10 08:15:44 -080024#include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
kwiberg77eab702016-09-28 17:42:01 -070025#include "webrtc/base/ignore_wundef.h"
henrik.lundin246ef3e2017-04-24 09:14:32 -070026#include "webrtc/base/protobuf_utils.h"
minyue4f906772016-04-29 11:05:14 -070027#include "webrtc/base/sha1digest.h"
28#include "webrtc/base/stringencode.h"
henrik.lundin246ef3e2017-04-24 09:14:32 -070029#include "webrtc/common_types.h"
kwibergac9f8762016-09-30 22:29:43 -070030#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +000031#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +000032#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
henrik.lundin6d8e0112016-03-04 10:34:21 -080033#include "webrtc/modules/include/module_common_types.h"
kwibergac9f8762016-09-30 22:29:43 -070034#include "webrtc/test/gtest.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000035#include "webrtc/test/testsupport/fileutils.h"
36#include "webrtc/typedefs.h"
37
minyue5f026d02015-12-16 07:36:04 -080038#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
kwiberg77eab702016-09-28 17:42:01 -070039RTC_PUSH_IGNORING_WUNDEF()
minyue5f026d02015-12-16 07:36:04 -080040#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
41#include "external/webrtc/webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
42#else
kjellandere3e902e2017-02-28 08:01:46 -080043#include "webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
minyue5f026d02015-12-16 07:36:04 -080044#endif
kwiberg77eab702016-09-28 17:42:01 -070045RTC_POP_IGNORING_WUNDEF()
minyue5f026d02015-12-16 07:36:04 -080046#endif
47
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000048DEFINE_bool(gen_ref, false, "Generate reference files.");
49
kwiberg5adaf732016-10-04 09:33:27 -070050namespace webrtc {
51
minyue5f026d02015-12-16 07:36:04 -080052namespace {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000053
minyue4f906772016-04-29 11:05:14 -070054const std::string& PlatformChecksum(const std::string& checksum_general,
55 const std::string& checksum_android,
56 const std::string& checksum_win_32,
57 const std::string& checksum_win_64) {
kwiberg77eab702016-09-28 17:42:01 -070058#if defined(WEBRTC_ANDROID)
minyue4f906772016-04-29 11:05:14 -070059 return checksum_android;
kwiberg77eab702016-09-28 17:42:01 -070060#elif defined(WEBRTC_WIN)
minyue4f906772016-04-29 11:05:14 -070061 #ifdef WEBRTC_ARCH_64_BITS
62 return checksum_win_64;
63 #else
64 return checksum_win_32;
65 #endif // WEBRTC_ARCH_64_BITS
66#else
67 return checksum_general;
68#endif // WEBRTC_WIN
69}
70
minyue5f026d02015-12-16 07:36:04 -080071#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
72void Convert(const webrtc::NetEqNetworkStatistics& stats_raw,
73 webrtc::neteq_unittest::NetEqNetworkStatistics* stats) {
74 stats->set_current_buffer_size_ms(stats_raw.current_buffer_size_ms);
75 stats->set_preferred_buffer_size_ms(stats_raw.preferred_buffer_size_ms);
76 stats->set_jitter_peaks_found(stats_raw.jitter_peaks_found);
77 stats->set_packet_loss_rate(stats_raw.packet_loss_rate);
78 stats->set_packet_discard_rate(stats_raw.packet_discard_rate);
79 stats->set_expand_rate(stats_raw.expand_rate);
80 stats->set_speech_expand_rate(stats_raw.speech_expand_rate);
81 stats->set_preemptive_rate(stats_raw.preemptive_rate);
82 stats->set_accelerate_rate(stats_raw.accelerate_rate);
83 stats->set_secondary_decoded_rate(stats_raw.secondary_decoded_rate);
84 stats->set_clockdrift_ppm(stats_raw.clockdrift_ppm);
85 stats->set_added_zero_samples(stats_raw.added_zero_samples);
86 stats->set_mean_waiting_time_ms(stats_raw.mean_waiting_time_ms);
87 stats->set_median_waiting_time_ms(stats_raw.median_waiting_time_ms);
88 stats->set_min_waiting_time_ms(stats_raw.min_waiting_time_ms);
89 stats->set_max_waiting_time_ms(stats_raw.max_waiting_time_ms);
90}
91
92void Convert(const webrtc::RtcpStatistics& stats_raw,
93 webrtc::neteq_unittest::RtcpStatistics* stats) {
94 stats->set_fraction_lost(stats_raw.fraction_lost);
95 stats->set_cumulative_lost(stats_raw.cumulative_lost);
96 stats->set_extended_max_sequence_number(
97 stats_raw.extended_max_sequence_number);
98 stats->set_jitter(stats_raw.jitter);
99}
100
minyue4f906772016-04-29 11:05:14 -0700101void AddMessage(FILE* file, rtc::MessageDigest* digest,
102 const std::string& message) {
minyue5f026d02015-12-16 07:36:04 -0800103 int32_t size = message.length();
minyue4f906772016-04-29 11:05:14 -0700104 if (file)
105 ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
106 digest->Update(&size, sizeof(size));
107
108 if (file)
109 ASSERT_EQ(static_cast<size_t>(size),
110 fwrite(message.data(), sizeof(char), size, file));
111 digest->Update(message.data(), sizeof(char) * size);
minyue5f026d02015-12-16 07:36:04 -0800112}
113
minyue5f026d02015-12-16 07:36:04 -0800114#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
115
henrik.lundin7a926812016-05-12 13:51:28 -0700116void LoadDecoders(webrtc::NetEq* neteq) {
kwiberg5adaf732016-10-04 09:33:27 -0700117 ASSERT_EQ(true,
118 neteq->RegisterPayloadType(0, SdpAudioFormat("pcmu", 8000, 1)));
119 // Use non-SdpAudioFormat argument when registering PCMa, so that we get test
120 // coverage for that as well.
henrik.lundin7a926812016-05-12 13:51:28 -0700121 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMa,
122 "pcma", 8));
123#ifdef WEBRTC_CODEC_ILBC
kwiberg5adaf732016-10-04 09:33:27 -0700124 ASSERT_EQ(true,
125 neteq->RegisterPayloadType(102, SdpAudioFormat("ilbc", 8000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700126#endif
127#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
kwiberg5adaf732016-10-04 09:33:27 -0700128 ASSERT_EQ(true,
129 neteq->RegisterPayloadType(103, SdpAudioFormat("isac", 16000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700130#endif
131#ifdef WEBRTC_CODEC_ISAC
kwiberg5adaf732016-10-04 09:33:27 -0700132 ASSERT_EQ(true,
133 neteq->RegisterPayloadType(104, SdpAudioFormat("isac", 32000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700134#endif
135#ifdef WEBRTC_CODEC_OPUS
kwiberg5adaf732016-10-04 09:33:27 -0700136 ASSERT_EQ(true,
137 neteq->RegisterPayloadType(
138 111, SdpAudioFormat("opus", 48000, 2, {{"stereo", "0"}})));
henrik.lundin7a926812016-05-12 13:51:28 -0700139#endif
kwiberg5adaf732016-10-04 09:33:27 -0700140 ASSERT_EQ(true,
141 neteq->RegisterPayloadType(93, SdpAudioFormat("L16", 8000, 1)));
142 ASSERT_EQ(true,
143 neteq->RegisterPayloadType(94, SdpAudioFormat("L16", 16000, 1)));
144 ASSERT_EQ(true,
145 neteq->RegisterPayloadType(95, SdpAudioFormat("L16", 32000, 1)));
146 ASSERT_EQ(true,
147 neteq->RegisterPayloadType(13, SdpAudioFormat("cn", 8000, 1)));
148 ASSERT_EQ(true,
149 neteq->RegisterPayloadType(98, SdpAudioFormat("cn", 16000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700150}
minyue5f026d02015-12-16 07:36:04 -0800151} // namespace
152
minyue4f906772016-04-29 11:05:14 -0700153class ResultSink {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000154 public:
minyue4f906772016-04-29 11:05:14 -0700155 explicit ResultSink(const std::string& output_file);
156 ~ResultSink();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000157
minyue4f906772016-04-29 11:05:14 -0700158 template<typename T, size_t n> void AddResult(
159 const T (&test_results)[n],
160 size_t length);
161
162 void AddResult(const NetEqNetworkStatistics& stats);
163 void AddResult(const RtcpStatistics& stats);
164
165 void VerifyChecksum(const std::string& ref_check_sum);
166
167 private:
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000168 FILE* output_fp_;
minyue4f906772016-04-29 11:05:14 -0700169 std::unique_ptr<rtc::MessageDigest> digest_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000170};
171
minyue4f906772016-04-29 11:05:14 -0700172ResultSink::ResultSink(const std::string &output_file)
173 : output_fp_(nullptr),
174 digest_(new rtc::Sha1Digest()) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000175 if (!output_file.empty()) {
176 output_fp_ = fopen(output_file.c_str(), "wb");
177 EXPECT_TRUE(output_fp_ != NULL);
178 }
179}
180
minyue4f906772016-04-29 11:05:14 -0700181ResultSink::~ResultSink() {
182 if (output_fp_)
183 fclose(output_fp_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000184}
185
186template<typename T, size_t n>
minyue4f906772016-04-29 11:05:14 -0700187void ResultSink::AddResult(const T (&test_results)[n], size_t length) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000188 if (output_fp_) {
189 ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_));
190 }
minyue4f906772016-04-29 11:05:14 -0700191 digest_->Update(&test_results, sizeof(T) * length);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000192}
193
minyue4f906772016-04-29 11:05:14 -0700194void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) {
minyue5f026d02015-12-16 07:36:04 -0800195#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
minyue5f026d02015-12-16 07:36:04 -0800196 neteq_unittest::NetEqNetworkStatistics stats;
197 Convert(stats_raw, &stats);
198
mbonadei7c2c8432017-04-07 00:59:12 -0700199 ProtoString stats_string;
minyue5f026d02015-12-16 07:36:04 -0800200 ASSERT_TRUE(stats.SerializeToString(&stats_string));
minyue4f906772016-04-29 11:05:14 -0700201 AddMessage(output_fp_, digest_.get(), stats_string);
minyue5f026d02015-12-16 07:36:04 -0800202#else
203 FAIL() << "Writing to reference file requires Proto Buffer.";
204#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000205}
206
minyue4f906772016-04-29 11:05:14 -0700207void ResultSink::AddResult(const RtcpStatistics& stats_raw) {
minyue5f026d02015-12-16 07:36:04 -0800208#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
minyue5f026d02015-12-16 07:36:04 -0800209 neteq_unittest::RtcpStatistics stats;
210 Convert(stats_raw, &stats);
211
mbonadei7c2c8432017-04-07 00:59:12 -0700212 ProtoString stats_string;
minyue5f026d02015-12-16 07:36:04 -0800213 ASSERT_TRUE(stats.SerializeToString(&stats_string));
minyue4f906772016-04-29 11:05:14 -0700214 AddMessage(output_fp_, digest_.get(), stats_string);
minyue5f026d02015-12-16 07:36:04 -0800215#else
216 FAIL() << "Writing to reference file requires Proto Buffer.";
217#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000218}
219
minyue4f906772016-04-29 11:05:14 -0700220void ResultSink::VerifyChecksum(const std::string& checksum) {
221 std::vector<char> buffer;
222 buffer.resize(digest_->Size());
223 digest_->Finish(&buffer[0], buffer.size());
224 const std::string result = rtc::hex_encode(&buffer[0], digest_->Size());
225 EXPECT_EQ(checksum, result);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000226}
227
228class NetEqDecodingTest : public ::testing::Test {
229 protected:
230 // NetEQ must be polled for data once every 10 ms. Thus, neither of the
231 // constants below can be changed.
232 static const int kTimeStepMs = 10;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700233 static const size_t kBlockSize8kHz = kTimeStepMs * 8;
234 static const size_t kBlockSize16kHz = kTimeStepMs * 16;
235 static const size_t kBlockSize32kHz = kTimeStepMs * 32;
minyue93c08b72015-12-22 09:57:41 -0800236 static const size_t kBlockSize48kHz = kTimeStepMs * 48;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000237 static const int kInitSampleRateHz = 8000;
238
239 NetEqDecodingTest();
240 virtual void SetUp();
241 virtual void TearDown();
242 void SelectDecoders(NetEqDecoder* used_codec);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000243 void OpenInputFile(const std::string &rtp_file);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800244 void Process();
minyue5f026d02015-12-16 07:36:04 -0800245
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000246 void DecodeAndCompare(const std::string& rtp_file,
minyue4f906772016-04-29 11:05:14 -0700247 const std::string& output_checksum,
248 const std::string& network_stats_checksum,
249 const std::string& rtcp_stats_checksum,
250 bool gen_ref);
minyue5f026d02015-12-16 07:36:04 -0800251
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000252 static void PopulateRtpInfo(int frame_index,
253 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700254 RTPHeader* rtp_info);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000255 static void PopulateCng(int frame_index,
256 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700257 RTPHeader* rtp_info,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000258 uint8_t* payload,
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000259 size_t* payload_len);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000260
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000261 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
262 const std::set<uint16_t>& drop_seq_numbers,
263 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
264
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000265 void LongCngWithClockDrift(double drift_factor,
266 double network_freeze_ms,
267 bool pull_audio_during_freeze,
268 int delay_tolerance_ms,
269 int max_time_to_speech_ms);
270
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000271 void DuplicateCng();
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000272
henrik.lundin0d96ab72016-04-06 12:28:26 -0700273 rtc::Optional<uint32_t> PlayoutTimestamp();
wu@webrtc.org94454b72014-06-05 20:34:08 +0000274
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000275 NetEq* neteq_;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000276 NetEq::Config config_;
kwiberg2d0c3322016-02-14 09:28:33 -0800277 std::unique_ptr<test::RtpFileSource> rtp_source_;
278 std::unique_ptr<test::Packet> packet_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000279 unsigned int sim_clock_;
henrik.lundin6d8e0112016-03-04 10:34:21 -0800280 AudioFrame out_frame_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000281 int output_sample_rate_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000282 int algorithmic_delay_ms_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000283};
284
285// Allocating the static const so that it can be passed by reference.
286const int NetEqDecodingTest::kTimeStepMs;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700287const size_t NetEqDecodingTest::kBlockSize8kHz;
288const size_t NetEqDecodingTest::kBlockSize16kHz;
289const size_t NetEqDecodingTest::kBlockSize32kHz;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000290const int NetEqDecodingTest::kInitSampleRateHz;
291
292NetEqDecodingTest::NetEqDecodingTest()
293 : neteq_(NULL),
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000294 config_(),
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000295 sim_clock_(0),
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000296 output_sample_rate_(kInitSampleRateHz),
297 algorithmic_delay_ms_(0) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000298 config_.sample_rate_hz = kInitSampleRateHz;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000299}
300
301void NetEqDecodingTest::SetUp() {
ossue3525782016-05-25 07:37:43 -0700302 neteq_ = NetEq::Create(config_, CreateBuiltinAudioDecoderFactory());
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000303 NetEqNetworkStatistics stat;
304 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
305 algorithmic_delay_ms_ = stat.current_buffer_size_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000306 ASSERT_TRUE(neteq_);
henrik.lundin7a926812016-05-12 13:51:28 -0700307 LoadDecoders(neteq_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000308}
309
310void NetEqDecodingTest::TearDown() {
311 delete neteq_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000312}
313
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000314void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000315 rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000316}
317
henrik.lundin6d8e0112016-03-04 10:34:21 -0800318void NetEqDecodingTest::Process() {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000319 // Check if time to receive.
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000320 while (packet_ && sim_clock_ >= packet_->time_ms()) {
321 if (packet_->payload_length_bytes() > 0) {
ivoc72c08ed2016-01-20 07:26:24 -0800322#ifndef WEBRTC_CODEC_ISAC
323 // Ignore payload type 104 (iSAC-swb) if ISAC is not supported.
henrik.lundin246ef3e2017-04-24 09:14:32 -0700324 if (packet_->header().payloadType != 104)
ivoc72c08ed2016-01-20 07:26:24 -0800325#endif
Henrik Lundin70c09bd2017-04-24 15:56:56 +0200326 ASSERT_EQ(0,
327 neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700328 packet_->header(),
Henrik Lundin70c09bd2017-04-24 15:56:56 +0200329 rtc::ArrayView<const uint8_t>(
330 packet_->payload(), packet_->payload_length_bytes()),
331 static_cast<uint32_t>(packet_->time_ms() *
332 (output_sample_rate_ / 1000))));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000333 }
334 // Get next packet.
henrik.lundin46ba49c2016-05-24 22:50:47 -0700335 packet_ = rtp_source_->NextPacket();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000336 }
337
henrik.lundin@webrtc.orge1d468c2013-01-30 07:37:20 +0000338 // Get audio from NetEq.
henrik.lundin7a926812016-05-12 13:51:28 -0700339 bool muted;
340 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
341 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800342 ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) ||
343 (out_frame_.samples_per_channel_ == kBlockSize16kHz) ||
344 (out_frame_.samples_per_channel_ == kBlockSize32kHz) ||
345 (out_frame_.samples_per_channel_ == kBlockSize48kHz));
346 output_sample_rate_ = out_frame_.sample_rate_hz_;
henrik.lundind89814b2015-11-23 06:49:25 -0800347 EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000348
349 // Increase time.
350 sim_clock_ += kTimeStepMs;
351}
352
minyue4f906772016-04-29 11:05:14 -0700353void NetEqDecodingTest::DecodeAndCompare(
354 const std::string& rtp_file,
355 const std::string& output_checksum,
356 const std::string& network_stats_checksum,
357 const std::string& rtcp_stats_checksum,
358 bool gen_ref) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000359 OpenInputFile(rtp_file);
360
minyue4f906772016-04-29 11:05:14 -0700361 std::string ref_out_file =
362 gen_ref ? webrtc::test::OutputPath() + "neteq_universal_ref.pcm" : "";
363 ResultSink output(ref_out_file);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000364
minyue4f906772016-04-29 11:05:14 -0700365 std::string stat_out_file =
366 gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
367 ResultSink network_stats(stat_out_file);
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000368
minyue4f906772016-04-29 11:05:14 -0700369 std::string rtcp_out_file =
370 gen_ref ? webrtc::test::OutputPath() + "neteq_rtcp_stats.dat" : "";
371 ResultSink rtcp_stats(rtcp_out_file);
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000372
henrik.lundin46ba49c2016-05-24 22:50:47 -0700373 packet_ = rtp_source_->NextPacket();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000374 int i = 0;
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000375 while (packet_) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000376 std::ostringstream ss;
377 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
378 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800379 ASSERT_NO_FATAL_FAILURE(Process());
minyue4f906772016-04-29 11:05:14 -0700380 ASSERT_NO_FATAL_FAILURE(output.AddResult(
henrik.lundin6d8e0112016-03-04 10:34:21 -0800381 out_frame_.data_, out_frame_.samples_per_channel_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000382
383 // Query the network statistics API once per second
384 if (sim_clock_ % 1000 == 0) {
385 // Process NetworkStatistics.
minyue4f906772016-04-29 11:05:14 -0700386 NetEqNetworkStatistics current_network_stats;
387 ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats));
388 ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats));
389
henrik.lundin9c3efd02015-08-27 13:12:22 -0700390 // Compare with CurrentDelay, which should be identical.
minyue4f906772016-04-29 11:05:14 -0700391 EXPECT_EQ(current_network_stats.current_buffer_size_ms,
392 neteq_->CurrentDelayMs());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000393
394 // Process RTCPstat.
minyue4f906772016-04-29 11:05:14 -0700395 RtcpStatistics current_rtcp_stats;
396 neteq_->GetRtcpStatistics(&current_rtcp_stats);
397 ASSERT_NO_FATAL_FAILURE(rtcp_stats.AddResult(current_rtcp_stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000398 }
399 }
minyue4f906772016-04-29 11:05:14 -0700400
401 SCOPED_TRACE("Check output audio.");
402 output.VerifyChecksum(output_checksum);
403 SCOPED_TRACE("Check network stats.");
404 network_stats.VerifyChecksum(network_stats_checksum);
405 SCOPED_TRACE("Check rtcp stats.");
406 rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000407}
408
409void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
410 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700411 RTPHeader* rtp_info) {
412 rtp_info->sequenceNumber = frame_index;
413 rtp_info->timestamp = timestamp;
414 rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
415 rtp_info->payloadType = 94; // PCM16b WB codec.
416 rtp_info->markerBit = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000417}
418
419void NetEqDecodingTest::PopulateCng(int frame_index,
420 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700421 RTPHeader* rtp_info,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000422 uint8_t* payload,
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000423 size_t* payload_len) {
henrik.lundin246ef3e2017-04-24 09:14:32 -0700424 rtp_info->sequenceNumber = frame_index;
425 rtp_info->timestamp = timestamp;
426 rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
427 rtp_info->payloadType = 98; // WB CNG.
428 rtp_info->markerBit = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000429 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
430 *payload_len = 1; // Only noise level, no spectral parameters.
431}
432
ivoc72c08ed2016-01-20 07:26:24 -0800433#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
434 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
435 defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722) && \
pbosc7a65692016-05-06 12:50:04 -0700436 !defined(WEBRTC_ARCH_ARM64)
minyue5f026d02015-12-16 07:36:04 -0800437#define MAYBE_TestBitExactness TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -0700438#else
minyue5f026d02015-12-16 07:36:04 -0800439#define MAYBE_TestBitExactness DISABLED_TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -0700440#endif
minyue5f026d02015-12-16 07:36:04 -0800441TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
minyue49c454e2016-01-08 11:30:14 -0800442 const std::string input_rtp_file =
443 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000444
minyue4f906772016-04-29 11:05:14 -0700445 const std::string output_checksum = PlatformChecksum(
soren9f2c18e2017-04-10 02:22:46 -0700446 "09fa7646e2ad032a0b156177b95f09012430f81f",
447 "1c64eb8b55ce8878676c6a1e6ddd78f48de0668b",
448 "09fa7646e2ad032a0b156177b95f09012430f81f",
449 "759fef89a5de52bd17e733dc255c671ce86be909");
minyue4f906772016-04-29 11:05:14 -0700450
451 const std::string network_stats_checksum = PlatformChecksum(
henrik.lundin0d838572016-10-13 03:35:55 -0700452 "f59b3dfdb9b1b8bbb61abedd7c8cf3fc47c21f5f",
453 "c8b2a93842e48d014f7e6efe10ae96cb3892b129",
454 "f59b3dfdb9b1b8bbb61abedd7c8cf3fc47c21f5f",
455 "f59b3dfdb9b1b8bbb61abedd7c8cf3fc47c21f5f");
minyue4f906772016-04-29 11:05:14 -0700456
457 const std::string rtcp_stats_checksum = PlatformChecksum(
458 "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
459 "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4",
460 "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
461 "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
462
463 DecodeAndCompare(input_rtp_file,
464 output_checksum,
465 network_stats_checksum,
466 rtcp_stats_checksum,
467 FLAGS_gen_ref);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000468}
469
minyue93c08b72015-12-22 09:57:41 -0800470#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID) && \
471 defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
minyuea613eb62017-03-14 14:33:30 -0700472 defined(WEBRTC_CODEC_OPUS) && \
473 !WEBRTC_OPUS_SUPPORT_120MS_PTIME
minyue93c08b72015-12-22 09:57:41 -0800474#define MAYBE_TestOpusBitExactness TestOpusBitExactness
475#else
476#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
477#endif
flim64a7eab2016-08-12 04:36:05 -0700478TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
minyue93c08b72015-12-22 09:57:41 -0800479 const std::string input_rtp_file =
480 webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
minyue93c08b72015-12-22 09:57:41 -0800481
minyue4f906772016-04-29 11:05:14 -0700482 const std::string output_checksum = PlatformChecksum(
soren9f2c18e2017-04-10 02:22:46 -0700483 "6237dd113ad80d7764fe4c90b55b2ec035eae64e",
484 "6237dd113ad80d7764fe4c90b55b2ec035eae64e",
485 "6237dd113ad80d7764fe4c90b55b2ec035eae64e",
486 "6237dd113ad80d7764fe4c90b55b2ec035eae64e");
minyue4f906772016-04-29 11:05:14 -0700487
488 const std::string network_stats_checksum = PlatformChecksum(
henrik.lundin0d838572016-10-13 03:35:55 -0700489 "d8379381d5a619f0616bb3c0a8a9eea1704a8ab8",
490 "d8379381d5a619f0616bb3c0a8a9eea1704a8ab8",
491 "d8379381d5a619f0616bb3c0a8a9eea1704a8ab8",
492 "d8379381d5a619f0616bb3c0a8a9eea1704a8ab8");
minyue4f906772016-04-29 11:05:14 -0700493
494 const std::string rtcp_stats_checksum = PlatformChecksum(
495 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
496 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
497 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
498 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
499
500 DecodeAndCompare(input_rtp_file,
501 output_checksum,
502 network_stats_checksum,
503 rtcp_stats_checksum,
504 FLAGS_gen_ref);
minyue93c08b72015-12-22 09:57:41 -0800505}
506
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000507// Use fax mode to avoid time-scaling. This is to simplify the testing of
508// packet waiting times in the packet buffer.
509class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
510 protected:
511 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
512 config_.playout_mode = kPlayoutFax;
513 }
514};
515
516TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000517 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
518 size_t num_frames = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000519 const size_t kSamples = 10 * 16;
520 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000521 for (size_t i = 0; i < num_frames; ++i) {
kwibergee2bac22015-11-11 10:34:00 -0800522 const uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700523 RTPHeader rtp_info;
524 rtp_info.sequenceNumber = i;
525 rtp_info.timestamp = i * kSamples;
526 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
527 rtp_info.payloadType = 94; // PCM16b WB codec.
528 rtp_info.markerBit = 0;
529 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000530 }
531 // Pull out all data.
532 for (size_t i = 0; i < num_frames; ++i) {
henrik.lundin7a926812016-05-12 13:51:28 -0700533 bool muted;
534 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800535 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000536 }
537
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200538 NetEqNetworkStatistics stats;
539 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000540 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
541 // spacing (per definition), we expect the delay to increase with 10 ms for
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200542 // each packet. Thus, we are calculating the statistics for a series from 10
543 // to 300, in steps of 10 ms.
544 EXPECT_EQ(155, stats.mean_waiting_time_ms);
545 EXPECT_EQ(155, stats.median_waiting_time_ms);
546 EXPECT_EQ(10, stats.min_waiting_time_ms);
547 EXPECT_EQ(300, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000548
549 // Check statistics again and make sure it's been reset.
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200550 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
551 EXPECT_EQ(-1, stats.mean_waiting_time_ms);
552 EXPECT_EQ(-1, stats.median_waiting_time_ms);
553 EXPECT_EQ(-1, stats.min_waiting_time_ms);
554 EXPECT_EQ(-1, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000555}
556
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000557TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000558 const int kNumFrames = 3000; // Needed for convergence.
559 int frame_index = 0;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000560 const size_t kSamples = 10 * 16;
561 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000562 while (frame_index < kNumFrames) {
563 // Insert one packet each time, except every 10th time where we insert two
564 // packets at once. This will create a negative clock-drift of approx. 10%.
565 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
566 for (int n = 0; n < num_packets; ++n) {
567 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700568 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000569 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700570 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000571 ++frame_index;
572 }
573
574 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700575 bool muted;
576 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800577 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000578 }
579
580 NetEqNetworkStatistics network_stats;
581 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
henrik.lundin0d838572016-10-13 03:35:55 -0700582 EXPECT_EQ(-103192, network_stats.clockdrift_ppm);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000583}
584
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000585TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000586 const int kNumFrames = 5000; // Needed for convergence.
587 int frame_index = 0;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000588 const size_t kSamples = 10 * 16;
589 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000590 for (int i = 0; i < kNumFrames; ++i) {
591 // Insert one packet each time, except every 10th time where we don't insert
592 // any packet. This will create a positive clock-drift of approx. 11%.
593 int num_packets = (i % 10 == 9 ? 0 : 1);
594 for (int n = 0; n < num_packets; ++n) {
595 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700596 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000597 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700598 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000599 ++frame_index;
600 }
601
602 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700603 bool muted;
604 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800605 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000606 }
607
608 NetEqNetworkStatistics network_stats;
609 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
henrik.lundin0d838572016-10-13 03:35:55 -0700610 EXPECT_EQ(110953, network_stats.clockdrift_ppm);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000611}
612
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000613void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
614 double network_freeze_ms,
615 bool pull_audio_during_freeze,
616 int delay_tolerance_ms,
617 int max_time_to_speech_ms) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000618 uint16_t seq_no = 0;
619 uint32_t timestamp = 0;
620 const int kFrameSizeMs = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000621 const size_t kSamples = kFrameSizeMs * 16;
622 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000623 double next_input_time_ms = 0.0;
624 double t_ms;
henrik.lundin7a926812016-05-12 13:51:28 -0700625 bool muted;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000626
627 // Insert speech for 5 seconds.
628 const int kSpeechDurationMs = 5000;
629 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
630 // Each turn in this for loop is 10 ms.
631 while (next_input_time_ms <= t_ms) {
632 // Insert one 30 ms speech frame.
633 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700634 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000635 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700636 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000637 ++seq_no;
638 timestamp += kSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000639 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000640 }
641 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700642 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800643 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000644 }
645
henrik.lundin55480f52016-03-08 02:37:57 -0800646 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin0d96ab72016-04-06 12:28:26 -0700647 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
648 ASSERT_TRUE(playout_timestamp);
649 int32_t delay_before = timestamp - *playout_timestamp;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000650
651 // Insert CNG for 1 minute (= 60000 ms).
652 const int kCngPeriodMs = 100;
653 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
654 const int kCngDurationMs = 60000;
655 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
656 // Each turn in this for loop is 10 ms.
657 while (next_input_time_ms <= t_ms) {
658 // Insert one CNG frame each 100 ms.
659 uint8_t payload[kPayloadBytes];
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000660 size_t payload_len;
henrik.lundin246ef3e2017-04-24 09:14:32 -0700661 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000662 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
kwibergee2bac22015-11-11 10:34:00 -0800663 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700664 rtp_info,
kwibergee2bac22015-11-11 10:34:00 -0800665 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000666 ++seq_no;
667 timestamp += kCngPeriodSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000668 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000669 }
670 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700671 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800672 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000673 }
674
henrik.lundin55480f52016-03-08 02:37:57 -0800675 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000676
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000677 if (network_freeze_ms > 0) {
678 // First keep pulling audio for |network_freeze_ms| without inserting
679 // any data, then insert CNG data corresponding to |network_freeze_ms|
680 // without pulling any output audio.
681 const double loop_end_time = t_ms + network_freeze_ms;
682 for (; t_ms < loop_end_time; t_ms += 10) {
683 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700684 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800685 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800686 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000687 }
688 bool pull_once = pull_audio_during_freeze;
689 // If |pull_once| is true, GetAudio will be called once half-way through
690 // the network recovery period.
691 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
692 while (next_input_time_ms <= t_ms) {
693 if (pull_once && next_input_time_ms >= pull_time_ms) {
694 pull_once = false;
695 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700696 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800697 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800698 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000699 t_ms += 10;
700 }
701 // Insert one CNG frame each 100 ms.
702 uint8_t payload[kPayloadBytes];
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000703 size_t payload_len;
henrik.lundin246ef3e2017-04-24 09:14:32 -0700704 RTPHeader rtp_info;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000705 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
kwibergee2bac22015-11-11 10:34:00 -0800706 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700707 rtp_info,
kwibergee2bac22015-11-11 10:34:00 -0800708 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000709 ++seq_no;
710 timestamp += kCngPeriodSamples;
711 next_input_time_ms += kCngPeriodMs * drift_factor;
712 }
713 }
714
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000715 // Insert speech again until output type is speech.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000716 double speech_restart_time_ms = t_ms;
henrik.lundin55480f52016-03-08 02:37:57 -0800717 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000718 // Each turn in this for loop is 10 ms.
719 while (next_input_time_ms <= t_ms) {
720 // Insert one 30 ms speech frame.
721 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700722 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000723 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700724 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000725 ++seq_no;
726 timestamp += kSamples;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000727 next_input_time_ms += kFrameSizeMs * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000728 }
729 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700730 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800731 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000732 // Increase clock.
733 t_ms += 10;
734 }
735
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000736 // Check that the speech starts again within reasonable time.
737 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
738 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
henrik.lundin0d96ab72016-04-06 12:28:26 -0700739 playout_timestamp = PlayoutTimestamp();
740 ASSERT_TRUE(playout_timestamp);
741 int32_t delay_after = timestamp - *playout_timestamp;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000742 // Compare delay before and after, and make sure it differs less than 20 ms.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000743 EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
744 EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000745}
746
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000747TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000748 // Apply a clock drift of -25 ms / s (sender faster than receiver).
749 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000750 const double kNetworkFreezeTimeMs = 0.0;
751 const bool kGetAudioDuringFreezeRecovery = false;
752 const int kDelayToleranceMs = 20;
753 const int kMaxTimeToSpeechMs = 100;
754 LongCngWithClockDrift(kDriftFactor,
755 kNetworkFreezeTimeMs,
756 kGetAudioDuringFreezeRecovery,
757 kDelayToleranceMs,
758 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000759}
760
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000761TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000762 // Apply a clock drift of +25 ms / s (sender slower than receiver).
763 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000764 const double kNetworkFreezeTimeMs = 0.0;
765 const bool kGetAudioDuringFreezeRecovery = false;
766 const int kDelayToleranceMs = 20;
767 const int kMaxTimeToSpeechMs = 100;
768 LongCngWithClockDrift(kDriftFactor,
769 kNetworkFreezeTimeMs,
770 kGetAudioDuringFreezeRecovery,
771 kDelayToleranceMs,
772 kMaxTimeToSpeechMs);
773}
774
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000775TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000776 // Apply a clock drift of -25 ms / s (sender faster than receiver).
777 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
778 const double kNetworkFreezeTimeMs = 5000.0;
779 const bool kGetAudioDuringFreezeRecovery = false;
780 const int kDelayToleranceMs = 50;
781 const int kMaxTimeToSpeechMs = 200;
782 LongCngWithClockDrift(kDriftFactor,
783 kNetworkFreezeTimeMs,
784 kGetAudioDuringFreezeRecovery,
785 kDelayToleranceMs,
786 kMaxTimeToSpeechMs);
787}
788
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000789TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000790 // Apply a clock drift of +25 ms / s (sender slower than receiver).
791 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
792 const double kNetworkFreezeTimeMs = 5000.0;
793 const bool kGetAudioDuringFreezeRecovery = false;
794 const int kDelayToleranceMs = 20;
795 const int kMaxTimeToSpeechMs = 100;
796 LongCngWithClockDrift(kDriftFactor,
797 kNetworkFreezeTimeMs,
798 kGetAudioDuringFreezeRecovery,
799 kDelayToleranceMs,
800 kMaxTimeToSpeechMs);
801}
802
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000803TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000804 // Apply a clock drift of +25 ms / s (sender slower than receiver).
805 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
806 const double kNetworkFreezeTimeMs = 5000.0;
807 const bool kGetAudioDuringFreezeRecovery = true;
808 const int kDelayToleranceMs = 20;
809 const int kMaxTimeToSpeechMs = 100;
810 LongCngWithClockDrift(kDriftFactor,
811 kNetworkFreezeTimeMs,
812 kGetAudioDuringFreezeRecovery,
813 kDelayToleranceMs,
814 kMaxTimeToSpeechMs);
815}
816
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000817TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000818 const double kDriftFactor = 1.0; // No drift.
819 const double kNetworkFreezeTimeMs = 0.0;
820 const bool kGetAudioDuringFreezeRecovery = false;
821 const int kDelayToleranceMs = 10;
822 const int kMaxTimeToSpeechMs = 50;
823 LongCngWithClockDrift(kDriftFactor,
824 kNetworkFreezeTimeMs,
825 kGetAudioDuringFreezeRecovery,
826 kDelayToleranceMs,
827 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000828}
829
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000830TEST_F(NetEqDecodingTest, UnknownPayloadType) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000831 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000832 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700833 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000834 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700835 rtp_info.payloadType = 1; // Not registered as a decoder.
836 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000837 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
838}
839
Peter Boströme2976c82016-01-04 22:44:05 +0100840#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ivoc72c08ed2016-01-20 07:26:24 -0800841#define MAYBE_DecoderError DecoderError
842#else
843#define MAYBE_DecoderError DISABLED_DecoderError
844#endif
845
Peter Boströme2976c82016-01-04 22:44:05 +0100846TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000847 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000848 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700849 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000850 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700851 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
852 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000853 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
854 // to GetAudio.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800855 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
856 out_frame_.data_[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000857 }
henrik.lundin7a926812016-05-12 13:51:28 -0700858 bool muted;
859 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
860 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000861 // Verify that there is a decoder error to check.
862 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
ivoc72c08ed2016-01-20 07:26:24 -0800863
864 enum NetEqDecoderError {
865 ISAC_LENGTH_MISMATCH = 6730,
866 ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH = 6640
867 };
868#if defined(WEBRTC_CODEC_ISAC)
869 EXPECT_EQ(ISAC_LENGTH_MISMATCH, neteq_->LastDecoderError());
870#elif defined(WEBRTC_CODEC_ISACFX)
871 EXPECT_EQ(ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH, neteq_->LastDecoderError());
872#endif
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000873 // Verify that the first 160 samples are set to 0, and that the remaining
874 // samples are left unmodified.
875 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
876 for (int i = 0; i < kExpectedOutputLength; ++i) {
877 std::ostringstream ss;
878 ss << "i = " << i;
879 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800880 EXPECT_EQ(0, out_frame_.data_[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000881 }
henrik.lundin6d8e0112016-03-04 10:34:21 -0800882 for (size_t i = kExpectedOutputLength; i < AudioFrame::kMaxDataSizeSamples;
883 ++i) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000884 std::ostringstream ss;
885 ss << "i = " << i;
886 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800887 EXPECT_EQ(1, out_frame_.data_[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000888 }
889}
890
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000891TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000892 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
893 // to GetAudio.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800894 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
895 out_frame_.data_[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000896 }
henrik.lundin7a926812016-05-12 13:51:28 -0700897 bool muted;
898 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
899 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000900 // Verify that the first block of samples is set to 0.
901 static const int kExpectedOutputLength =
902 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
903 for (int i = 0; i < kExpectedOutputLength; ++i) {
904 std::ostringstream ss;
905 ss << "i = " << i;
906 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800907 EXPECT_EQ(0, out_frame_.data_[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000908 }
henrik.lundind89814b2015-11-23 06:49:25 -0800909 // Verify that the sample rate did not change from the initial configuration.
910 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000911}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000912
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000913class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000914 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000915 virtual void TestCondition(double sum_squared_noise,
916 bool should_be_faded) = 0;
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000917
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000918 void CheckBgn(int sampling_rate_hz) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700919 size_t expected_samples_per_channel = 0;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000920 uint8_t payload_type = 0xFF; // Invalid.
921 if (sampling_rate_hz == 8000) {
922 expected_samples_per_channel = kBlockSize8kHz;
923 payload_type = 93; // PCM 16, 8 kHz.
924 } else if (sampling_rate_hz == 16000) {
925 expected_samples_per_channel = kBlockSize16kHz;
926 payload_type = 94; // PCM 16, 16 kHZ.
927 } else if (sampling_rate_hz == 32000) {
928 expected_samples_per_channel = kBlockSize32kHz;
929 payload_type = 95; // PCM 16, 32 kHz.
930 } else {
931 ASSERT_TRUE(false); // Unsupported test case.
932 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000933
henrik.lundin6d8e0112016-03-04 10:34:21 -0800934 AudioFrame output;
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000935 test::AudioLoop input;
936 // We are using the same 32 kHz input file for all tests, regardless of
937 // |sampling_rate_hz|. The output may sound weird, but the test is still
938 // valid.
939 ASSERT_TRUE(input.Init(
940 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
941 10 * sampling_rate_hz, // Max 10 seconds loop length.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700942 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000943
944 // Payload of 10 ms of PCM16 32 kHz.
945 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin246ef3e2017-04-24 09:14:32 -0700946 RTPHeader rtp_info;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000947 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700948 rtp_info.payloadType = payload_type;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000949
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000950 uint32_t receive_timestamp = 0;
henrik.lundin7a926812016-05-12 13:51:28 -0700951 bool muted;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000952 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
kwiberg288886b2015-11-06 01:21:35 -0800953 auto block = input.GetNextBlock();
954 ASSERT_EQ(expected_samples_per_channel, block.size());
955 size_t enc_len_bytes =
956 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000957 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
958
Henrik Lundin70c09bd2017-04-24 15:56:56 +0200959 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700960 rtp_info,
Henrik Lundin70c09bd2017-04-24 15:56:56 +0200961 rtc::ArrayView<const uint8_t>(payload, enc_len_bytes),
962 receive_timestamp));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800963 output.Reset();
henrik.lundin7a926812016-05-12 13:51:28 -0700964 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800965 ASSERT_EQ(1u, output.num_channels_);
966 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800967 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000968
969 // Next packet.
henrik.lundin246ef3e2017-04-24 09:14:32 -0700970 rtp_info.timestamp += expected_samples_per_channel;
971 rtp_info.sequenceNumber++;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000972 receive_timestamp += expected_samples_per_channel;
973 }
974
henrik.lundin6d8e0112016-03-04 10:34:21 -0800975 output.Reset();
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000976
977 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
978 // one frame without checking speech-type. This is the first frame pulled
979 // without inserting any packet, and might not be labeled as PLC.
henrik.lundin7a926812016-05-12 13:51:28 -0700980 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800981 ASSERT_EQ(1u, output.num_channels_);
982 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000983
984 // To be able to test the fading of background noise we need at lease to
985 // pull 611 frames.
986 const int kFadingThreshold = 611;
987
988 // Test several CNG-to-PLC packet for the expected behavior. The number 20
989 // is arbitrary, but sufficiently large to test enough number of frames.
990 const int kNumPlcToCngTestFrames = 20;
991 bool plc_to_cng = false;
992 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
henrik.lundin6d8e0112016-03-04 10:34:21 -0800993 output.Reset();
994 memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero.
henrik.lundin7a926812016-05-12 13:51:28 -0700995 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
996 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800997 ASSERT_EQ(1u, output.num_channels_);
998 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800999 if (output.speech_type_ == AudioFrame::kPLCCNG) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001000 plc_to_cng = true;
1001 double sum_squared = 0;
henrik.lundin6d8e0112016-03-04 10:34:21 -08001002 for (size_t k = 0;
1003 k < output.num_channels_ * output.samples_per_channel_; ++k)
1004 sum_squared += output.data_[k] * output.data_[k];
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001005 TestCondition(sum_squared, n > kFadingThreshold);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001006 } else {
henrik.lundin55480f52016-03-08 02:37:57 -08001007 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001008 }
1009 }
1010 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
1011 }
1012};
1013
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001014class NetEqBgnTestOn : public NetEqBgnTest {
1015 protected:
1016 NetEqBgnTestOn() : NetEqBgnTest() {
1017 config_.background_noise_mode = NetEq::kBgnOn;
1018 }
1019
1020 void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
1021 EXPECT_NE(0, sum_squared_noise);
1022 }
1023};
1024
1025class NetEqBgnTestOff : public NetEqBgnTest {
1026 protected:
1027 NetEqBgnTestOff() : NetEqBgnTest() {
1028 config_.background_noise_mode = NetEq::kBgnOff;
1029 }
1030
1031 void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
1032 EXPECT_EQ(0, sum_squared_noise);
1033 }
1034};
1035
1036class NetEqBgnTestFade : public NetEqBgnTest {
1037 protected:
1038 NetEqBgnTestFade() : NetEqBgnTest() {
1039 config_.background_noise_mode = NetEq::kBgnFade;
1040 }
1041
1042 void TestCondition(double sum_squared_noise, bool should_be_faded) {
1043 if (should_be_faded)
1044 EXPECT_EQ(0, sum_squared_noise);
1045 }
1046};
1047
henrika1d34fe92015-06-16 10:04:20 +02001048TEST_F(NetEqBgnTestOn, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001049 CheckBgn(8000);
1050 CheckBgn(16000);
1051 CheckBgn(32000);
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001052}
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001053
henrika1d34fe92015-06-16 10:04:20 +02001054TEST_F(NetEqBgnTestOff, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001055 CheckBgn(8000);
1056 CheckBgn(16000);
1057 CheckBgn(32000);
1058}
1059
henrika1d34fe92015-06-16 10:04:20 +02001060TEST_F(NetEqBgnTestFade, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001061 CheckBgn(8000);
1062 CheckBgn(16000);
1063 CheckBgn(32000);
1064}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001065
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001066void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1067 uint32_t start_timestamp,
1068 const std::set<uint16_t>& drop_seq_numbers,
1069 bool expect_seq_no_wrap,
1070 bool expect_timestamp_wrap) {
1071 uint16_t seq_no = start_seq_no;
1072 uint32_t timestamp = start_timestamp;
1073 const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
1074 const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
1075 const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +00001076 const size_t kPayloadBytes = kSamples * sizeof(int16_t);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001077 double next_input_time_ms = 0.0;
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001078 uint32_t receive_timestamp = 0;
1079
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001080 // Insert speech for 2 seconds.
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001081 const int kSpeechDurationMs = 2000;
1082 int packets_inserted = 0;
1083 uint16_t last_seq_no;
1084 uint32_t last_timestamp;
1085 bool timestamp_wrapped = false;
1086 bool seq_no_wrapped = false;
1087 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1088 // Each turn in this for loop is 10 ms.
1089 while (next_input_time_ms <= t_ms) {
1090 // Insert one 30 ms speech frame.
1091 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001092 RTPHeader rtp_info;
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001093 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1094 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1095 // This sequence number was not in the set to drop. Insert it.
henrik.lundin246ef3e2017-04-24 09:14:32 -07001096 ASSERT_EQ(0,
1097 neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001098 ++packets_inserted;
1099 }
1100 NetEqNetworkStatistics network_stats;
1101 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1102
1103 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1104 // packet size for first few packets. Therefore we refrain from checking
1105 // the criteria.
1106 if (packets_inserted > 4) {
1107 // Expect preferred and actual buffer size to be no more than 2 frames.
1108 EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001109 EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
1110 algorithmic_delay_ms_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001111 }
1112 last_seq_no = seq_no;
1113 last_timestamp = timestamp;
1114
1115 ++seq_no;
1116 timestamp += kSamples;
1117 receive_timestamp += kSamples;
1118 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1119
1120 seq_no_wrapped |= seq_no < last_seq_no;
1121 timestamp_wrapped |= timestamp < last_timestamp;
1122 }
1123 // Pull out data once.
henrik.lundin6d8e0112016-03-04 10:34:21 -08001124 AudioFrame output;
henrik.lundin7a926812016-05-12 13:51:28 -07001125 bool muted;
1126 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001127 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1128 ASSERT_EQ(1u, output.num_channels_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001129
1130 // Expect delay (in samples) to be less than 2 packets.
henrik.lundin0d96ab72016-04-06 12:28:26 -07001131 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
1132 ASSERT_TRUE(playout_timestamp);
1133 EXPECT_LE(timestamp - *playout_timestamp,
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001134 static_cast<uint32_t>(kSamples * 2));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001135 }
1136 // Make sure we have actually tested wrap-around.
1137 ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
1138 ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
1139}
1140
1141TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
1142 // Start with a sequence number that will soon wrap.
1143 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
1144 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1145}
1146
1147TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
1148 // Start with a sequence number that will soon wrap.
1149 std::set<uint16_t> drop_seq_numbers;
1150 drop_seq_numbers.insert(0xFFFF);
1151 drop_seq_numbers.insert(0x0);
1152 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1153}
1154
1155TEST_F(NetEqDecodingTest, TimestampWrap) {
1156 // Start with a timestamp that will soon wrap.
1157 std::set<uint16_t> drop_seq_numbers;
1158 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
1159}
1160
1161TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
1162 // Start with a timestamp and a sequence number that will wrap at the same
1163 // time.
1164 std::set<uint16_t> drop_seq_numbers;
1165 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
1166}
1167
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001168void NetEqDecodingTest::DuplicateCng() {
1169 uint16_t seq_no = 0;
1170 uint32_t timestamp = 0;
1171 const int kFrameSizeMs = 10;
1172 const int kSampleRateKhz = 16;
1173 const int kSamples = kFrameSizeMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +00001174 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001175
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001176 const int algorithmic_delay_samples = std::max(
1177 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001178 // Insert three speech packets. Three are needed to get the frame length
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001179 // correct.
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001180 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001181 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -07001182 bool muted;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001183 for (int i = 0; i < 3; ++i) {
1184 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001185 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001186 ++seq_no;
1187 timestamp += kSamples;
1188
1189 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -07001190 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001191 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001192 }
1193 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -08001194 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001195
1196 // Insert same CNG packet twice.
1197 const int kCngPeriodMs = 100;
1198 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +00001199 size_t payload_len;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001200 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1201 // This is the first time this CNG packet is inserted.
henrik.lundin246ef3e2017-04-24 09:14:32 -07001202 ASSERT_EQ(
1203 0, neteq_->InsertPacket(
1204 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001205
1206 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -07001207 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001208 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001209 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin0d96ab72016-04-06 12:28:26 -07001210 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
1211 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1212 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001213
1214 // Insert the same CNG packet again. Note that at this point it is old, since
1215 // we have already decoded the first copy of it.
henrik.lundin246ef3e2017-04-24 09:14:32 -07001216 ASSERT_EQ(
1217 0, neteq_->InsertPacket(
1218 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001219
1220 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1221 // we have already pulled out CNG once.
1222 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
henrik.lundin7a926812016-05-12 13:51:28 -07001223 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001224 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001225 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin0d96ab72016-04-06 12:28:26 -07001226 EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001227 EXPECT_EQ(timestamp - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -07001228 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001229 }
1230
1231 // Insert speech again.
1232 ++seq_no;
1233 timestamp += kCngPeriodSamples;
1234 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001235 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001236
1237 // Pull audio once and verify that the output is speech again.
henrik.lundin7a926812016-05-12 13:51:28 -07001238 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001239 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001240 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin0d96ab72016-04-06 12:28:26 -07001241 rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
1242 ASSERT_TRUE(playout_timestamp);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001243 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -07001244 *playout_timestamp);
wu@webrtc.org94454b72014-06-05 20:34:08 +00001245}
1246
henrik.lundin0d96ab72016-04-06 12:28:26 -07001247rtc::Optional<uint32_t> NetEqDecodingTest::PlayoutTimestamp() {
1248 return neteq_->GetPlayoutTimestamp();
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001249}
1250
1251TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001252
1253TEST_F(NetEqDecodingTest, CngFirst) {
1254 uint16_t seq_no = 0;
1255 uint32_t timestamp = 0;
1256 const int kFrameSizeMs = 10;
1257 const int kSampleRateKhz = 16;
1258 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1259 const int kPayloadBytes = kSamples * 2;
1260 const int kCngPeriodMs = 100;
1261 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1262 size_t payload_len;
1263
1264 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001265 RTPHeader rtp_info;
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001266
1267 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001268 ASSERT_EQ(
1269 NetEq::kOK,
1270 neteq_->InsertPacket(
1271 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001272 ++seq_no;
1273 timestamp += kCngPeriodSamples;
1274
1275 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -07001276 bool muted;
1277 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001278 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001279 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001280
1281 // Insert some speech packets.
henrik.lundin549d80b2016-08-25 00:44:24 -07001282 const uint32_t first_speech_timestamp = timestamp;
1283 int timeout_counter = 0;
1284 do {
1285 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001286 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001287 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001288 ++seq_no;
1289 timestamp += kSamples;
1290
1291 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -07001292 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001293 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin549d80b2016-08-25 00:44:24 -07001294 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001295 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -08001296 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001297}
henrik.lundin7a926812016-05-12 13:51:28 -07001298
1299class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
1300 public:
1301 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
1302 config_.enable_muted_state = true;
1303 }
1304
1305 protected:
1306 static constexpr size_t kSamples = 10 * 16;
1307 static constexpr size_t kPayloadBytes = kSamples * 2;
1308
1309 void InsertPacket(uint32_t rtp_timestamp) {
1310 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001311 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -07001312 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001313 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin7a926812016-05-12 13:51:28 -07001314 }
1315
henrik.lundin42feb512016-09-20 06:51:40 -07001316 void InsertCngPacket(uint32_t rtp_timestamp) {
1317 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001318 RTPHeader rtp_info;
henrik.lundin42feb512016-09-20 06:51:40 -07001319 size_t payload_len;
1320 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001321 EXPECT_EQ(
1322 NetEq::kOK,
1323 neteq_->InsertPacket(
1324 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin42feb512016-09-20 06:51:40 -07001325 }
1326
henrik.lundin7a926812016-05-12 13:51:28 -07001327 bool GetAudioReturnMuted() {
1328 bool muted;
1329 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1330 return muted;
1331 }
1332
1333 void GetAudioUntilMuted() {
1334 while (!GetAudioReturnMuted()) {
1335 ASSERT_LT(counter_++, 1000) << "Test timed out";
1336 }
1337 }
1338
1339 void GetAudioUntilNormal() {
1340 bool muted = false;
1341 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
1342 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1343 ASSERT_LT(counter_++, 1000) << "Test timed out";
1344 }
1345 EXPECT_FALSE(muted);
1346 }
1347
1348 int counter_ = 0;
1349};
1350
1351// Verifies that NetEq goes in and out of muted state as expected.
1352TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
1353 // Insert one speech packet.
1354 InsertPacket(0);
1355 // Pull out audio once and expect it not to be muted.
1356 EXPECT_FALSE(GetAudioReturnMuted());
1357 // Pull data until faded out.
1358 GetAudioUntilMuted();
1359
1360 // Verify that output audio is not written during muted mode. Other parameters
1361 // should be correct, though.
1362 AudioFrame new_frame;
1363 for (auto& d : new_frame.data_) {
1364 d = 17;
1365 }
1366 bool muted;
1367 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
1368 EXPECT_TRUE(muted);
1369 for (auto d : new_frame.data_) {
1370 EXPECT_EQ(17, d);
1371 }
1372 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
1373 new_frame.timestamp_);
1374 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
1375 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
1376 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
1377 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
1378 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
1379
1380 // Insert new data. Timestamp is corrected for the time elapsed since the last
1381 // packet. Verify that normal operation resumes.
1382 InsertPacket(kSamples * counter_);
1383 GetAudioUntilNormal();
henrik.lundin612c25e2016-05-25 08:21:04 -07001384
1385 NetEqNetworkStatistics stats;
1386 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
1387 // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
1388 // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
1389 // concealment samples in this test.
1390 EXPECT_GT(stats.expand_rate, 14000);
1391 // And, it should be greater than the speech_expand_rate.
1392 EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
henrik.lundin7a926812016-05-12 13:51:28 -07001393}
1394
1395// Verifies that NetEq goes out of muted state when given a delayed packet.
1396TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
1397 // Insert one speech packet.
1398 InsertPacket(0);
1399 // Pull out audio once and expect it not to be muted.
1400 EXPECT_FALSE(GetAudioReturnMuted());
1401 // Pull data until faded out.
1402 GetAudioUntilMuted();
1403 // Insert new data. Timestamp is only corrected for the half of the time
1404 // elapsed since the last packet. That is, the new packet is delayed. Verify
1405 // that normal operation resumes.
1406 InsertPacket(kSamples * counter_ / 2);
1407 GetAudioUntilNormal();
1408}
1409
1410// Verifies that NetEq goes out of muted state when given a future packet.
1411TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
1412 // Insert one speech packet.
1413 InsertPacket(0);
1414 // Pull out audio once and expect it not to be muted.
1415 EXPECT_FALSE(GetAudioReturnMuted());
1416 // Pull data until faded out.
1417 GetAudioUntilMuted();
1418 // Insert new data. Timestamp is over-corrected for the time elapsed since the
1419 // last packet. That is, the new packet is too early. Verify that normal
1420 // operation resumes.
1421 InsertPacket(kSamples * counter_ * 2);
1422 GetAudioUntilNormal();
1423}
1424
1425// Verifies that NetEq goes out of muted state when given an old packet.
1426TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
1427 // Insert one speech packet.
1428 InsertPacket(0);
1429 // Pull out audio once and expect it not to be muted.
1430 EXPECT_FALSE(GetAudioReturnMuted());
1431 // Pull data until faded out.
1432 GetAudioUntilMuted();
1433
1434 EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1435 // Insert packet which is older than the first packet.
1436 InsertPacket(kSamples * (counter_ - 1000));
1437 EXPECT_FALSE(GetAudioReturnMuted());
1438 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1439}
1440
henrik.lundin42feb512016-09-20 06:51:40 -07001441// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
1442// packet stream is suspended for a long time.
1443TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
1444 // Insert one CNG packet.
1445 InsertCngPacket(0);
1446
1447 // Pull 10 seconds of audio (10 ms audio generated per lap).
1448 for (int i = 0; i < 1000; ++i) {
1449 bool muted;
1450 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1451 ASSERT_FALSE(muted);
1452 }
1453 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1454}
1455
1456// Verifies that NetEq goes back to normal after a long CNG period with the
1457// packet stream suspended.
1458TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
1459 // Insert one CNG packet.
1460 InsertCngPacket(0);
1461
1462 // Pull 10 seconds of audio (10 ms audio generated per lap).
1463 for (int i = 0; i < 1000; ++i) {
1464 bool muted;
1465 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1466 }
1467
1468 // Insert new data. Timestamp is corrected for the time elapsed since the last
1469 // packet. Verify that normal operation resumes.
1470 InsertPacket(kSamples * counter_);
1471 GetAudioUntilNormal();
1472}
1473
henrik.lundin7a926812016-05-12 13:51:28 -07001474class NetEqDecodingTestTwoInstances : public NetEqDecodingTest {
1475 public:
1476 NetEqDecodingTestTwoInstances() : NetEqDecodingTest() {}
1477
1478 void SetUp() override {
1479 NetEqDecodingTest::SetUp();
1480 config2_ = config_;
1481 }
1482
1483 void CreateSecondInstance() {
ossue3525782016-05-25 07:37:43 -07001484 neteq2_.reset(NetEq::Create(config2_, CreateBuiltinAudioDecoderFactory()));
henrik.lundin7a926812016-05-12 13:51:28 -07001485 ASSERT_TRUE(neteq2_);
1486 LoadDecoders(neteq2_.get());
1487 }
1488
1489 protected:
1490 std::unique_ptr<NetEq> neteq2_;
1491 NetEq::Config config2_;
1492};
1493
1494namespace {
1495::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
1496 const AudioFrame& b) {
1497 if (a.timestamp_ != b.timestamp_)
1498 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
1499 << " != " << b.timestamp_ << ")";
1500 if (a.sample_rate_hz_ != b.sample_rate_hz_)
1501 return ::testing::AssertionFailure() << "sample_rate_hz_ diff ("
1502 << a.sample_rate_hz_
1503 << " != " << b.sample_rate_hz_ << ")";
1504 if (a.samples_per_channel_ != b.samples_per_channel_)
1505 return ::testing::AssertionFailure()
1506 << "samples_per_channel_ diff (" << a.samples_per_channel_
1507 << " != " << b.samples_per_channel_ << ")";
1508 if (a.num_channels_ != b.num_channels_)
1509 return ::testing::AssertionFailure() << "num_channels_ diff ("
1510 << a.num_channels_
1511 << " != " << b.num_channels_ << ")";
1512 if (a.speech_type_ != b.speech_type_)
1513 return ::testing::AssertionFailure() << "speech_type_ diff ("
1514 << a.speech_type_
1515 << " != " << b.speech_type_ << ")";
1516 if (a.vad_activity_ != b.vad_activity_)
1517 return ::testing::AssertionFailure() << "vad_activity_ diff ("
1518 << a.vad_activity_
1519 << " != " << b.vad_activity_ << ")";
1520 return ::testing::AssertionSuccess();
1521}
1522
1523::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
1524 const AudioFrame& b) {
1525 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
1526 if (!res)
1527 return res;
1528 if (memcmp(
1529 a.data_, b.data_,
1530 a.samples_per_channel_ * a.num_channels_ * sizeof(a.data_[0])) != 0) {
1531 return ::testing::AssertionFailure() << "data_ diff";
1532 }
1533 return ::testing::AssertionSuccess();
1534}
1535
1536} // namespace
1537
1538TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
1539 ASSERT_FALSE(config_.enable_muted_state);
1540 config2_.enable_muted_state = true;
1541 CreateSecondInstance();
1542
1543 // Insert one speech packet into both NetEqs.
1544 const size_t kSamples = 10 * 16;
1545 const size_t kPayloadBytes = kSamples * 2;
1546 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001547 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -07001548 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001549 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1550 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
henrik.lundin7a926812016-05-12 13:51:28 -07001551
1552 AudioFrame out_frame1, out_frame2;
1553 bool muted;
1554 for (int i = 0; i < 1000; ++i) {
1555 std::ostringstream ss;
1556 ss << "i = " << i;
1557 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1558 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
1559 EXPECT_FALSE(muted);
1560 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
1561 if (muted) {
1562 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1563 } else {
1564 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1565 }
1566 }
1567 EXPECT_TRUE(muted);
1568
1569 // Insert new data. Timestamp is corrected for the time elapsed since the last
1570 // packet.
1571 PopulateRtpInfo(0, kSamples * 1000, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001572 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1573 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
henrik.lundin7a926812016-05-12 13:51:28 -07001574
1575 int counter = 0;
1576 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
1577 ASSERT_LT(counter++, 1000) << "Test timed out";
1578 std::ostringstream ss;
1579 ss << "counter = " << counter;
1580 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1581 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
1582 EXPECT_FALSE(muted);
1583 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
1584 if (muted) {
1585 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1586 } else {
1587 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1588 }
1589 }
1590 EXPECT_FALSE(muted);
1591}
1592
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001593} // namespace webrtc