blob: ef31f4b56e385a91c56597fb86968cde747a638e [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020011#include "modules/audio_coding/neteq/include/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000013#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000014#include <stdlib.h>
15#include <string.h> // memset
16
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000017#include <algorithm>
kwiberg2d0c3322016-02-14 09:28:33 -080018#include <memory>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000019#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000020#include <string>
21#include <vector>
22
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020023#include "api/audio_codecs/builtin_audio_decoder_factory.h"
Mirko Bonadei71207422017-09-15 13:58:09 +020024#include "common_types.h" // NOLINT(build/include)
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020025#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
26#include "modules/audio_coding/neteq/tools/audio_loop.h"
27#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
28#include "modules/include/module_common_types.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020029#include "rtc_base/ignore_wundef.h"
Joachim Bauch4e909192017-12-19 22:27:51 +010030#include "rtc_base/messagedigest.h"
Karl Wiberge40468b2017-11-22 10:42:26 +010031#include "rtc_base/numerics/safe_conversions.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020032#include "rtc_base/protobuf_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020033#include "rtc_base/stringencode.h"
Henrik Lundine9619f82017-11-27 14:05:27 +010034#include "test/field_trial.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020035#include "test/gtest.h"
36#include "test/testsupport/fileutils.h"
Mirko Bonadei71207422017-09-15 13:58:09 +020037#include "typedefs.h" // NOLINT(build/include)
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000038
Mirko Bonadei81ca3bf2018-01-09 09:40:39 +010039// This must come after test/gtest.h
40#include "rtc_base/flags.h" // NOLINT(build/include)
41
minyue5f026d02015-12-16 07:36:04 -080042#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
kwiberg77eab702016-09-28 17:42:01 -070043RTC_PUSH_IGNORING_WUNDEF()
minyue5f026d02015-12-16 07:36:04 -080044#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
45#include "external/webrtc/webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
46#else
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020047#include "modules/audio_coding/neteq/neteq_unittest.pb.h"
minyue5f026d02015-12-16 07:36:04 -080048#endif
kwiberg77eab702016-09-28 17:42:01 -070049RTC_POP_IGNORING_WUNDEF()
minyue5f026d02015-12-16 07:36:04 -080050#endif
51
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000052DEFINE_bool(gen_ref, false, "Generate reference files.");
53
kwiberg5adaf732016-10-04 09:33:27 -070054namespace webrtc {
55
minyue5f026d02015-12-16 07:36:04 -080056namespace {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000057
minyue4f906772016-04-29 11:05:14 -070058const std::string& PlatformChecksum(const std::string& checksum_general,
Henrik Lundin8cd750d2017-10-12 13:07:11 +020059 const std::string& checksum_android_32,
60 const std::string& checksum_android_64,
minyue4f906772016-04-29 11:05:14 -070061 const std::string& checksum_win_32,
62 const std::string& checksum_win_64) {
kwiberg77eab702016-09-28 17:42:01 -070063#if defined(WEBRTC_ANDROID)
Henrik Lundin8cd750d2017-10-12 13:07:11 +020064 #ifdef WEBRTC_ARCH_64_BITS
65 return checksum_android_64;
66 #else
67 return checksum_android_32;
68 #endif // WEBRTC_ARCH_64_BITS
kwiberg77eab702016-09-28 17:42:01 -070069#elif defined(WEBRTC_WIN)
minyue4f906772016-04-29 11:05:14 -070070 #ifdef WEBRTC_ARCH_64_BITS
71 return checksum_win_64;
72 #else
73 return checksum_win_32;
74 #endif // WEBRTC_ARCH_64_BITS
75#else
76 return checksum_general;
77#endif // WEBRTC_WIN
78}
79
minyue5f026d02015-12-16 07:36:04 -080080#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
81void Convert(const webrtc::NetEqNetworkStatistics& stats_raw,
82 webrtc::neteq_unittest::NetEqNetworkStatistics* stats) {
83 stats->set_current_buffer_size_ms(stats_raw.current_buffer_size_ms);
84 stats->set_preferred_buffer_size_ms(stats_raw.preferred_buffer_size_ms);
85 stats->set_jitter_peaks_found(stats_raw.jitter_peaks_found);
86 stats->set_packet_loss_rate(stats_raw.packet_loss_rate);
minyue5f026d02015-12-16 07:36:04 -080087 stats->set_expand_rate(stats_raw.expand_rate);
88 stats->set_speech_expand_rate(stats_raw.speech_expand_rate);
89 stats->set_preemptive_rate(stats_raw.preemptive_rate);
90 stats->set_accelerate_rate(stats_raw.accelerate_rate);
91 stats->set_secondary_decoded_rate(stats_raw.secondary_decoded_rate);
minyue-webrtc0c3ca752017-08-23 15:59:38 +020092 stats->set_secondary_discarded_rate(stats_raw.secondary_discarded_rate);
minyue5f026d02015-12-16 07:36:04 -080093 stats->set_clockdrift_ppm(stats_raw.clockdrift_ppm);
94 stats->set_added_zero_samples(stats_raw.added_zero_samples);
95 stats->set_mean_waiting_time_ms(stats_raw.mean_waiting_time_ms);
96 stats->set_median_waiting_time_ms(stats_raw.median_waiting_time_ms);
97 stats->set_min_waiting_time_ms(stats_raw.min_waiting_time_ms);
98 stats->set_max_waiting_time_ms(stats_raw.max_waiting_time_ms);
99}
100
101void Convert(const webrtc::RtcpStatistics& stats_raw,
102 webrtc::neteq_unittest::RtcpStatistics* stats) {
103 stats->set_fraction_lost(stats_raw.fraction_lost);
srte186d9c32017-08-04 05:03:53 -0700104 stats->set_cumulative_lost(stats_raw.packets_lost);
minyue5f026d02015-12-16 07:36:04 -0800105 stats->set_extended_max_sequence_number(
srte186d9c32017-08-04 05:03:53 -0700106 stats_raw.extended_highest_sequence_number);
minyue5f026d02015-12-16 07:36:04 -0800107 stats->set_jitter(stats_raw.jitter);
108}
109
minyue4f906772016-04-29 11:05:14 -0700110void AddMessage(FILE* file, rtc::MessageDigest* digest,
111 const std::string& message) {
minyue5f026d02015-12-16 07:36:04 -0800112 int32_t size = message.length();
minyue4f906772016-04-29 11:05:14 -0700113 if (file)
114 ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
115 digest->Update(&size, sizeof(size));
116
117 if (file)
118 ASSERT_EQ(static_cast<size_t>(size),
119 fwrite(message.data(), sizeof(char), size, file));
120 digest->Update(message.data(), sizeof(char) * size);
minyue5f026d02015-12-16 07:36:04 -0800121}
122
minyue5f026d02015-12-16 07:36:04 -0800123#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
124
henrik.lundin7a926812016-05-12 13:51:28 -0700125void LoadDecoders(webrtc::NetEq* neteq) {
kwiberg5adaf732016-10-04 09:33:27 -0700126 ASSERT_EQ(true,
127 neteq->RegisterPayloadType(0, SdpAudioFormat("pcmu", 8000, 1)));
128 // Use non-SdpAudioFormat argument when registering PCMa, so that we get test
129 // coverage for that as well.
henrik.lundin7a926812016-05-12 13:51:28 -0700130 ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMa,
131 "pcma", 8));
132#ifdef WEBRTC_CODEC_ILBC
kwiberg5adaf732016-10-04 09:33:27 -0700133 ASSERT_EQ(true,
134 neteq->RegisterPayloadType(102, SdpAudioFormat("ilbc", 8000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700135#endif
136#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
kwiberg5adaf732016-10-04 09:33:27 -0700137 ASSERT_EQ(true,
138 neteq->RegisterPayloadType(103, SdpAudioFormat("isac", 16000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700139#endif
140#ifdef WEBRTC_CODEC_ISAC
kwiberg5adaf732016-10-04 09:33:27 -0700141 ASSERT_EQ(true,
142 neteq->RegisterPayloadType(104, SdpAudioFormat("isac", 32000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700143#endif
144#ifdef WEBRTC_CODEC_OPUS
kwiberg5adaf732016-10-04 09:33:27 -0700145 ASSERT_EQ(true,
146 neteq->RegisterPayloadType(
147 111, SdpAudioFormat("opus", 48000, 2, {{"stereo", "0"}})));
henrik.lundin7a926812016-05-12 13:51:28 -0700148#endif
kwiberg5adaf732016-10-04 09:33:27 -0700149 ASSERT_EQ(true,
150 neteq->RegisterPayloadType(93, SdpAudioFormat("L16", 8000, 1)));
151 ASSERT_EQ(true,
152 neteq->RegisterPayloadType(94, SdpAudioFormat("L16", 16000, 1)));
153 ASSERT_EQ(true,
154 neteq->RegisterPayloadType(95, SdpAudioFormat("L16", 32000, 1)));
155 ASSERT_EQ(true,
156 neteq->RegisterPayloadType(13, SdpAudioFormat("cn", 8000, 1)));
157 ASSERT_EQ(true,
158 neteq->RegisterPayloadType(98, SdpAudioFormat("cn", 16000, 1)));
henrik.lundin7a926812016-05-12 13:51:28 -0700159}
minyue5f026d02015-12-16 07:36:04 -0800160} // namespace
161
minyue4f906772016-04-29 11:05:14 -0700162class ResultSink {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000163 public:
minyue4f906772016-04-29 11:05:14 -0700164 explicit ResultSink(const std::string& output_file);
165 ~ResultSink();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000166
yujo36b1a5f2017-06-12 12:45:32 -0700167 template<typename T> void AddResult(const T* test_results, size_t length);
minyue4f906772016-04-29 11:05:14 -0700168
169 void AddResult(const NetEqNetworkStatistics& stats);
170 void AddResult(const RtcpStatistics& stats);
171
172 void VerifyChecksum(const std::string& ref_check_sum);
173
174 private:
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000175 FILE* output_fp_;
minyue4f906772016-04-29 11:05:14 -0700176 std::unique_ptr<rtc::MessageDigest> digest_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000177};
178
Joachim Bauch4e909192017-12-19 22:27:51 +0100179ResultSink::ResultSink(const std::string& output_file)
minyue4f906772016-04-29 11:05:14 -0700180 : output_fp_(nullptr),
Joachim Bauch4e909192017-12-19 22:27:51 +0100181 digest_(rtc::MessageDigestFactory::Create(rtc::DIGEST_SHA_1)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000182 if (!output_file.empty()) {
183 output_fp_ = fopen(output_file.c_str(), "wb");
184 EXPECT_TRUE(output_fp_ != NULL);
185 }
186}
187
minyue4f906772016-04-29 11:05:14 -0700188ResultSink::~ResultSink() {
189 if (output_fp_)
190 fclose(output_fp_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000191}
192
yujo36b1a5f2017-06-12 12:45:32 -0700193template<typename T>
194void ResultSink::AddResult(const T* test_results, size_t length) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000195 if (output_fp_) {
yujo36b1a5f2017-06-12 12:45:32 -0700196 ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000197 }
yujo36b1a5f2017-06-12 12:45:32 -0700198 digest_->Update(test_results, sizeof(T) * length);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000199}
200
minyue4f906772016-04-29 11:05:14 -0700201void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) {
minyue5f026d02015-12-16 07:36:04 -0800202#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
minyue5f026d02015-12-16 07:36:04 -0800203 neteq_unittest::NetEqNetworkStatistics stats;
204 Convert(stats_raw, &stats);
205
mbonadei7c2c8432017-04-07 00:59:12 -0700206 ProtoString stats_string;
minyue5f026d02015-12-16 07:36:04 -0800207 ASSERT_TRUE(stats.SerializeToString(&stats_string));
minyue4f906772016-04-29 11:05:14 -0700208 AddMessage(output_fp_, digest_.get(), stats_string);
minyue5f026d02015-12-16 07:36:04 -0800209#else
210 FAIL() << "Writing to reference file requires Proto Buffer.";
211#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000212}
213
minyue4f906772016-04-29 11:05:14 -0700214void ResultSink::AddResult(const RtcpStatistics& stats_raw) {
minyue5f026d02015-12-16 07:36:04 -0800215#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
minyue5f026d02015-12-16 07:36:04 -0800216 neteq_unittest::RtcpStatistics stats;
217 Convert(stats_raw, &stats);
218
mbonadei7c2c8432017-04-07 00:59:12 -0700219 ProtoString stats_string;
minyue5f026d02015-12-16 07:36:04 -0800220 ASSERT_TRUE(stats.SerializeToString(&stats_string));
minyue4f906772016-04-29 11:05:14 -0700221 AddMessage(output_fp_, digest_.get(), stats_string);
minyue5f026d02015-12-16 07:36:04 -0800222#else
223 FAIL() << "Writing to reference file requires Proto Buffer.";
224#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000225}
226
minyue4f906772016-04-29 11:05:14 -0700227void ResultSink::VerifyChecksum(const std::string& checksum) {
228 std::vector<char> buffer;
229 buffer.resize(digest_->Size());
230 digest_->Finish(&buffer[0], buffer.size());
231 const std::string result = rtc::hex_encode(&buffer[0], digest_->Size());
232 EXPECT_EQ(checksum, result);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000233}
234
235class NetEqDecodingTest : public ::testing::Test {
236 protected:
237 // NetEQ must be polled for data once every 10 ms. Thus, neither of the
238 // constants below can be changed.
239 static const int kTimeStepMs = 10;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700240 static const size_t kBlockSize8kHz = kTimeStepMs * 8;
241 static const size_t kBlockSize16kHz = kTimeStepMs * 16;
242 static const size_t kBlockSize32kHz = kTimeStepMs * 32;
minyue93c08b72015-12-22 09:57:41 -0800243 static const size_t kBlockSize48kHz = kTimeStepMs * 48;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000244 static const int kInitSampleRateHz = 8000;
245
246 NetEqDecodingTest();
247 virtual void SetUp();
248 virtual void TearDown();
249 void SelectDecoders(NetEqDecoder* used_codec);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000250 void OpenInputFile(const std::string &rtp_file);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800251 void Process();
minyue5f026d02015-12-16 07:36:04 -0800252
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000253 void DecodeAndCompare(const std::string& rtp_file,
minyue4f906772016-04-29 11:05:14 -0700254 const std::string& output_checksum,
255 const std::string& network_stats_checksum,
256 const std::string& rtcp_stats_checksum,
257 bool gen_ref);
minyue5f026d02015-12-16 07:36:04 -0800258
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000259 static void PopulateRtpInfo(int frame_index,
260 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700261 RTPHeader* rtp_info);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000262 static void PopulateCng(int frame_index,
263 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700264 RTPHeader* rtp_info,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000265 uint8_t* payload,
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000266 size_t* payload_len);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000267
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000268 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
269 const std::set<uint16_t>& drop_seq_numbers,
270 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
271
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000272 void LongCngWithClockDrift(double drift_factor,
273 double network_freeze_ms,
274 bool pull_audio_during_freeze,
275 int delay_tolerance_ms,
276 int max_time_to_speech_ms);
277
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000278 void DuplicateCng();
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000279
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000280 NetEq* neteq_;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000281 NetEq::Config config_;
kwiberg2d0c3322016-02-14 09:28:33 -0800282 std::unique_ptr<test::RtpFileSource> rtp_source_;
283 std::unique_ptr<test::Packet> packet_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000284 unsigned int sim_clock_;
henrik.lundin6d8e0112016-03-04 10:34:21 -0800285 AudioFrame out_frame_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000286 int output_sample_rate_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000287 int algorithmic_delay_ms_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000288};
289
290// Allocating the static const so that it can be passed by reference.
291const int NetEqDecodingTest::kTimeStepMs;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700292const size_t NetEqDecodingTest::kBlockSize8kHz;
293const size_t NetEqDecodingTest::kBlockSize16kHz;
294const size_t NetEqDecodingTest::kBlockSize32kHz;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000295const int NetEqDecodingTest::kInitSampleRateHz;
296
297NetEqDecodingTest::NetEqDecodingTest()
298 : neteq_(NULL),
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000299 config_(),
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000300 sim_clock_(0),
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000301 output_sample_rate_(kInitSampleRateHz),
302 algorithmic_delay_ms_(0) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000303 config_.sample_rate_hz = kInitSampleRateHz;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000304}
305
306void NetEqDecodingTest::SetUp() {
ossue3525782016-05-25 07:37:43 -0700307 neteq_ = NetEq::Create(config_, CreateBuiltinAudioDecoderFactory());
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000308 NetEqNetworkStatistics stat;
309 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
310 algorithmic_delay_ms_ = stat.current_buffer_size_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000311 ASSERT_TRUE(neteq_);
henrik.lundin7a926812016-05-12 13:51:28 -0700312 LoadDecoders(neteq_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000313}
314
315void NetEqDecodingTest::TearDown() {
316 delete neteq_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000317}
318
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000319void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000320 rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000321}
322
henrik.lundin6d8e0112016-03-04 10:34:21 -0800323void NetEqDecodingTest::Process() {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000324 // Check if time to receive.
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000325 while (packet_ && sim_clock_ >= packet_->time_ms()) {
326 if (packet_->payload_length_bytes() > 0) {
ivoc72c08ed2016-01-20 07:26:24 -0800327#ifndef WEBRTC_CODEC_ISAC
328 // Ignore payload type 104 (iSAC-swb) if ISAC is not supported.
henrik.lundin246ef3e2017-04-24 09:14:32 -0700329 if (packet_->header().payloadType != 104)
ivoc72c08ed2016-01-20 07:26:24 -0800330#endif
Henrik Lundin70c09bd2017-04-24 15:56:56 +0200331 ASSERT_EQ(0,
332 neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700333 packet_->header(),
Henrik Lundin70c09bd2017-04-24 15:56:56 +0200334 rtc::ArrayView<const uint8_t>(
335 packet_->payload(), packet_->payload_length_bytes()),
336 static_cast<uint32_t>(packet_->time_ms() *
337 (output_sample_rate_ / 1000))));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000338 }
339 // Get next packet.
henrik.lundin46ba49c2016-05-24 22:50:47 -0700340 packet_ = rtp_source_->NextPacket();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000341 }
342
henrik.lundin@webrtc.orge1d468c2013-01-30 07:37:20 +0000343 // Get audio from NetEq.
henrik.lundin7a926812016-05-12 13:51:28 -0700344 bool muted;
345 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
346 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800347 ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) ||
348 (out_frame_.samples_per_channel_ == kBlockSize16kHz) ||
349 (out_frame_.samples_per_channel_ == kBlockSize32kHz) ||
350 (out_frame_.samples_per_channel_ == kBlockSize48kHz));
351 output_sample_rate_ = out_frame_.sample_rate_hz_;
henrik.lundind89814b2015-11-23 06:49:25 -0800352 EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000353
354 // Increase time.
355 sim_clock_ += kTimeStepMs;
356}
357
minyue4f906772016-04-29 11:05:14 -0700358void NetEqDecodingTest::DecodeAndCompare(
359 const std::string& rtp_file,
360 const std::string& output_checksum,
361 const std::string& network_stats_checksum,
362 const std::string& rtcp_stats_checksum,
363 bool gen_ref) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000364 OpenInputFile(rtp_file);
365
minyue4f906772016-04-29 11:05:14 -0700366 std::string ref_out_file =
367 gen_ref ? webrtc::test::OutputPath() + "neteq_universal_ref.pcm" : "";
368 ResultSink output(ref_out_file);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000369
minyue4f906772016-04-29 11:05:14 -0700370 std::string stat_out_file =
371 gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
372 ResultSink network_stats(stat_out_file);
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000373
minyue4f906772016-04-29 11:05:14 -0700374 std::string rtcp_out_file =
375 gen_ref ? webrtc::test::OutputPath() + "neteq_rtcp_stats.dat" : "";
376 ResultSink rtcp_stats(rtcp_out_file);
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000377
henrik.lundin46ba49c2016-05-24 22:50:47 -0700378 packet_ = rtp_source_->NextPacket();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000379 int i = 0;
Henrik Lundinac0a5032017-09-25 12:22:46 +0200380 uint64_t last_concealed_samples = 0;
381 uint64_t last_total_samples_received = 0;
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000382 while (packet_) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000383 std::ostringstream ss;
384 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
385 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
henrik.lundin6d8e0112016-03-04 10:34:21 -0800386 ASSERT_NO_FATAL_FAILURE(Process());
minyue4f906772016-04-29 11:05:14 -0700387 ASSERT_NO_FATAL_FAILURE(output.AddResult(
yujo36b1a5f2017-06-12 12:45:32 -0700388 out_frame_.data(), out_frame_.samples_per_channel_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000389
390 // Query the network statistics API once per second
391 if (sim_clock_ % 1000 == 0) {
392 // Process NetworkStatistics.
minyue4f906772016-04-29 11:05:14 -0700393 NetEqNetworkStatistics current_network_stats;
394 ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats));
395 ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats));
396
henrik.lundin9c3efd02015-08-27 13:12:22 -0700397 // Compare with CurrentDelay, which should be identical.
minyue4f906772016-04-29 11:05:14 -0700398 EXPECT_EQ(current_network_stats.current_buffer_size_ms,
399 neteq_->CurrentDelayMs());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000400
Henrik Lundinac0a5032017-09-25 12:22:46 +0200401 // Verify that liftime stats and network stats report similar loss
402 // concealment rates.
403 auto lifetime_stats = neteq_->GetLifetimeStatistics();
404 const uint64_t delta_concealed_samples =
405 lifetime_stats.concealed_samples - last_concealed_samples;
406 last_concealed_samples = lifetime_stats.concealed_samples;
407 const uint64_t delta_total_samples_received =
408 lifetime_stats.total_samples_received - last_total_samples_received;
409 last_total_samples_received = lifetime_stats.total_samples_received;
410 // The tolerance is 1% but expressed in Q14.
411 EXPECT_NEAR(
412 (delta_concealed_samples << 14) / delta_total_samples_received,
413 current_network_stats.expand_rate, (2 << 14) / 100.0);
414
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000415 // Process RTCPstat.
minyue4f906772016-04-29 11:05:14 -0700416 RtcpStatistics current_rtcp_stats;
417 neteq_->GetRtcpStatistics(&current_rtcp_stats);
418 ASSERT_NO_FATAL_FAILURE(rtcp_stats.AddResult(current_rtcp_stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000419 }
420 }
minyue4f906772016-04-29 11:05:14 -0700421
422 SCOPED_TRACE("Check output audio.");
423 output.VerifyChecksum(output_checksum);
424 SCOPED_TRACE("Check network stats.");
425 network_stats.VerifyChecksum(network_stats_checksum);
426 SCOPED_TRACE("Check rtcp stats.");
427 rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000428}
429
430void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
431 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700432 RTPHeader* rtp_info) {
433 rtp_info->sequenceNumber = frame_index;
434 rtp_info->timestamp = timestamp;
435 rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
436 rtp_info->payloadType = 94; // PCM16b WB codec.
437 rtp_info->markerBit = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000438}
439
440void NetEqDecodingTest::PopulateCng(int frame_index,
441 int timestamp,
henrik.lundin246ef3e2017-04-24 09:14:32 -0700442 RTPHeader* rtp_info,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000443 uint8_t* payload,
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000444 size_t* payload_len) {
henrik.lundin246ef3e2017-04-24 09:14:32 -0700445 rtp_info->sequenceNumber = frame_index;
446 rtp_info->timestamp = timestamp;
447 rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
448 rtp_info->payloadType = 98; // WB CNG.
449 rtp_info->markerBit = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000450 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
451 *payload_len = 1; // Only noise level, no spectral parameters.
452}
453
ivoc72c08ed2016-01-20 07:26:24 -0800454#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
455 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
Karl Wibergeb254b42017-11-01 15:08:12 +0100456 defined(WEBRTC_CODEC_ILBC) && !defined(WEBRTC_ARCH_ARM64)
minyue5f026d02015-12-16 07:36:04 -0800457#define MAYBE_TestBitExactness TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -0700458#else
minyue5f026d02015-12-16 07:36:04 -0800459#define MAYBE_TestBitExactness DISABLED_TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -0700460#endif
minyue5f026d02015-12-16 07:36:04 -0800461TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
minyue49c454e2016-01-08 11:30:14 -0800462 const std::string input_rtp_file =
463 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000464
minyue4f906772016-04-29 11:05:14 -0700465 const std::string output_checksum = PlatformChecksum(
soren9f2c18e2017-04-10 02:22:46 -0700466 "09fa7646e2ad032a0b156177b95f09012430f81f",
467 "1c64eb8b55ce8878676c6a1e6ddd78f48de0668b",
Henrik Lundin8cd750d2017-10-12 13:07:11 +0200468 "not used",
soren9f2c18e2017-04-10 02:22:46 -0700469 "09fa7646e2ad032a0b156177b95f09012430f81f",
470 "759fef89a5de52bd17e733dc255c671ce86be909");
minyue4f906772016-04-29 11:05:14 -0700471
henrik.lundin2979f552017-05-05 05:04:16 -0700472 const std::string network_stats_checksum =
minyue-webrtc0c3ca752017-08-23 15:59:38 +0200473 PlatformChecksum("5b4262ca328e5f066af5d34f3380521583dd20de",
474 "80235b6d727281203acb63b98f9a9e85d95f7ec0",
Henrik Lundin8cd750d2017-10-12 13:07:11 +0200475 "not used",
minyue-webrtc0c3ca752017-08-23 15:59:38 +0200476 "5b4262ca328e5f066af5d34f3380521583dd20de",
477 "5b4262ca328e5f066af5d34f3380521583dd20de");
minyue4f906772016-04-29 11:05:14 -0700478
479 const std::string rtcp_stats_checksum = PlatformChecksum(
480 "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
481 "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4",
Henrik Lundin8cd750d2017-10-12 13:07:11 +0200482 "not used",
minyue4f906772016-04-29 11:05:14 -0700483 "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
484 "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
485
486 DecodeAndCompare(input_rtp_file,
487 output_checksum,
488 network_stats_checksum,
489 rtcp_stats_checksum,
oprypin9b2f20c2017-08-29 05:51:57 -0700490 FLAG_gen_ref);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000491}
492
Henrik Lundin8cd750d2017-10-12 13:07:11 +0200493#if !defined(WEBRTC_IOS) && \
minyue93c08b72015-12-22 09:57:41 -0800494 defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
minyue-webrtc516711c2017-07-27 17:45:49 +0200495 defined(WEBRTC_CODEC_OPUS)
minyue93c08b72015-12-22 09:57:41 -0800496#define MAYBE_TestOpusBitExactness TestOpusBitExactness
497#else
498#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
499#endif
minyue-webrtcadb58b82017-07-26 17:59:59 +0200500TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
minyue93c08b72015-12-22 09:57:41 -0800501 const std::string input_rtp_file =
502 webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
minyue93c08b72015-12-22 09:57:41 -0800503
minyue4f906772016-04-29 11:05:14 -0700504 const std::string output_checksum = PlatformChecksum(
Ivo Creusenfd7c0a52017-10-20 12:35:04 +0200505 "7ea28d7edf9395f4ac8e8d8dd3a9e5c620b1bf48",
506 "5b1e691ab1c4465c742d6d944bc71e3b1c0e4c0e",
507 "b096114dd8c233eaf2b0ce9802ac95af13933772",
508 "7ea28d7edf9395f4ac8e8d8dd3a9e5c620b1bf48",
509 "7ea28d7edf9395f4ac8e8d8dd3a9e5c620b1bf48");
minyue4f906772016-04-29 11:05:14 -0700510
henrik.lundin2979f552017-05-05 05:04:16 -0700511 const std::string network_stats_checksum =
Ivo Creusenfd7c0a52017-10-20 12:35:04 +0200512 PlatformChecksum("9e72233c78baf685e500dd6c94212b30a4c5f27d",
513 "9a37270e4242fbd31e80bb47dc5e7ab82cf2d557",
514 "4f1e9734bc80a290faaf9d611efcb8d7802dbc4f",
515 "9e72233c78baf685e500dd6c94212b30a4c5f27d",
516 "9e72233c78baf685e500dd6c94212b30a4c5f27d");
minyue4f906772016-04-29 11:05:14 -0700517
518 const std::string rtcp_stats_checksum = PlatformChecksum(
519 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
520 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
521 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
Henrik Lundin8cd750d2017-10-12 13:07:11 +0200522 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
minyue4f906772016-04-29 11:05:14 -0700523 "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
524
525 DecodeAndCompare(input_rtp_file,
526 output_checksum,
527 network_stats_checksum,
528 rtcp_stats_checksum,
oprypin9b2f20c2017-08-29 05:51:57 -0700529 FLAG_gen_ref);
minyue93c08b72015-12-22 09:57:41 -0800530}
531
Henrik Lundine9619f82017-11-27 14:05:27 +0100532// This test fixture is identical to NetEqDecodingTest, except that it enables
533// the WebRTC-NetEqOpusDtxDelayFix field trial.
534// TODO(bugs.webrtc.org/8488): When the field trial is over and the feature is
535// default enabled, remove this fixture class and let the
536// TestOpusDtxBitExactness test build directly on NetEqDecodingTest.
537class NetEqDecodingTestWithOpusDtxFieldTrial : public NetEqDecodingTest {
538 public:
539 NetEqDecodingTestWithOpusDtxFieldTrial()
540 : override_field_trials_("WebRTC-NetEqOpusDtxDelayFix/Enabled/") {}
541
542 private:
543 test::ScopedFieldTrials override_field_trials_;
544};
545
546#if !defined(WEBRTC_IOS) && \
547 defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
548 defined(WEBRTC_CODEC_OPUS)
549#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness
550#else
551#define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness
552#endif
553TEST_F(NetEqDecodingTestWithOpusDtxFieldTrial, MAYBE_TestOpusDtxBitExactness) {
554 const std::string input_rtp_file =
555 webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
556
557 const std::string output_checksum =
558 PlatformChecksum("713af6c92881f5aab1285765ee6680da9d1c06ce",
559 "3ec991b96872123f1554c03c543ca5d518431e46",
560 "da9f9a2d94e0c2d67342fad4965d7b91cda50b25",
561 "713af6c92881f5aab1285765ee6680da9d1c06ce",
562 "713af6c92881f5aab1285765ee6680da9d1c06ce");
563
564 const std::string network_stats_checksum =
565 "bab58dc587d956f326056d7340c96eb9d2d3cc21";
566
567 const std::string rtcp_stats_checksum =
568 "ac27a7f305efb58b39bf123dccee25dee5758e63";
569
570 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
571 rtcp_stats_checksum, FLAG_gen_ref);
572}
573
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000574// Use fax mode to avoid time-scaling. This is to simplify the testing of
575// packet waiting times in the packet buffer.
576class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
577 protected:
578 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
579 config_.playout_mode = kPlayoutFax;
580 }
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200581 void TestJitterBufferDelay(bool apply_packet_loss);
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000582};
583
584TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000585 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
586 size_t num_frames = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000587 const size_t kSamples = 10 * 16;
588 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000589 for (size_t i = 0; i < num_frames; ++i) {
kwibergee2bac22015-11-11 10:34:00 -0800590 const uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700591 RTPHeader rtp_info;
Mirko Bonadeia8110272017-10-18 14:22:50 +0200592 rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
593 rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700594 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
595 rtp_info.payloadType = 94; // PCM16b WB codec.
596 rtp_info.markerBit = 0;
597 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000598 }
599 // Pull out all data.
600 for (size_t i = 0; i < num_frames; ++i) {
henrik.lundin7a926812016-05-12 13:51:28 -0700601 bool muted;
602 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800603 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000604 }
605
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200606 NetEqNetworkStatistics stats;
607 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000608 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
609 // spacing (per definition), we expect the delay to increase with 10 ms for
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200610 // each packet. Thus, we are calculating the statistics for a series from 10
611 // to 300, in steps of 10 ms.
612 EXPECT_EQ(155, stats.mean_waiting_time_ms);
613 EXPECT_EQ(155, stats.median_waiting_time_ms);
614 EXPECT_EQ(10, stats.min_waiting_time_ms);
615 EXPECT_EQ(300, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000616
617 // Check statistics again and make sure it's been reset.
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200618 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
619 EXPECT_EQ(-1, stats.mean_waiting_time_ms);
620 EXPECT_EQ(-1, stats.median_waiting_time_ms);
621 EXPECT_EQ(-1, stats.min_waiting_time_ms);
622 EXPECT_EQ(-1, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000623}
624
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000625TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000626 const int kNumFrames = 3000; // Needed for convergence.
627 int frame_index = 0;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000628 const size_t kSamples = 10 * 16;
629 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000630 while (frame_index < kNumFrames) {
631 // Insert one packet each time, except every 10th time where we insert two
632 // packets at once. This will create a negative clock-drift of approx. 10%.
633 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
634 for (int n = 0; n < num_packets; ++n) {
635 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700636 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000637 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700638 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000639 ++frame_index;
640 }
641
642 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700643 bool muted;
644 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800645 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000646 }
647
648 NetEqNetworkStatistics network_stats;
649 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
henrik.lundin0d838572016-10-13 03:35:55 -0700650 EXPECT_EQ(-103192, network_stats.clockdrift_ppm);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000651}
652
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000653TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000654 const int kNumFrames = 5000; // Needed for convergence.
655 int frame_index = 0;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000656 const size_t kSamples = 10 * 16;
657 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000658 for (int i = 0; i < kNumFrames; ++i) {
659 // Insert one packet each time, except every 10th time where we don't insert
660 // any packet. This will create a positive clock-drift of approx. 11%.
661 int num_packets = (i % 10 == 9 ? 0 : 1);
662 for (int n = 0; n < num_packets; ++n) {
663 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700664 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000665 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700666 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000667 ++frame_index;
668 }
669
670 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700671 bool muted;
672 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800673 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000674 }
675
676 NetEqNetworkStatistics network_stats;
677 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
henrik.lundin0d838572016-10-13 03:35:55 -0700678 EXPECT_EQ(110953, network_stats.clockdrift_ppm);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000679}
680
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000681void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
682 double network_freeze_ms,
683 bool pull_audio_during_freeze,
684 int delay_tolerance_ms,
685 int max_time_to_speech_ms) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000686 uint16_t seq_no = 0;
687 uint32_t timestamp = 0;
688 const int kFrameSizeMs = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000689 const size_t kSamples = kFrameSizeMs * 16;
690 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000691 double next_input_time_ms = 0.0;
692 double t_ms;
henrik.lundin7a926812016-05-12 13:51:28 -0700693 bool muted;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000694
695 // Insert speech for 5 seconds.
696 const int kSpeechDurationMs = 5000;
697 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
698 // Each turn in this for loop is 10 ms.
699 while (next_input_time_ms <= t_ms) {
700 // Insert one 30 ms speech frame.
701 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700702 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000703 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700704 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000705 ++seq_no;
706 timestamp += kSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000707 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000708 }
709 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700710 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800711 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000712 }
713
henrik.lundin55480f52016-03-08 02:37:57 -0800714 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700715 rtc::Optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -0700716 ASSERT_TRUE(playout_timestamp);
717 int32_t delay_before = timestamp - *playout_timestamp;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000718
719 // Insert CNG for 1 minute (= 60000 ms).
720 const int kCngPeriodMs = 100;
721 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
722 const int kCngDurationMs = 60000;
723 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
724 // Each turn in this for loop is 10 ms.
725 while (next_input_time_ms <= t_ms) {
726 // Insert one CNG frame each 100 ms.
727 uint8_t payload[kPayloadBytes];
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000728 size_t payload_len;
henrik.lundin246ef3e2017-04-24 09:14:32 -0700729 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000730 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
kwibergee2bac22015-11-11 10:34:00 -0800731 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700732 rtp_info,
kwibergee2bac22015-11-11 10:34:00 -0800733 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000734 ++seq_no;
735 timestamp += kCngPeriodSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000736 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000737 }
738 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700739 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800740 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000741 }
742
henrik.lundin55480f52016-03-08 02:37:57 -0800743 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000744
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000745 if (network_freeze_ms > 0) {
746 // First keep pulling audio for |network_freeze_ms| without inserting
747 // any data, then insert CNG data corresponding to |network_freeze_ms|
748 // without pulling any output audio.
749 const double loop_end_time = t_ms + network_freeze_ms;
750 for (; t_ms < loop_end_time; t_ms += 10) {
751 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700752 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800753 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800754 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000755 }
756 bool pull_once = pull_audio_during_freeze;
757 // If |pull_once| is true, GetAudio will be called once half-way through
758 // the network recovery period.
759 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
760 while (next_input_time_ms <= t_ms) {
761 if (pull_once && next_input_time_ms >= pull_time_ms) {
762 pull_once = false;
763 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700764 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800765 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800766 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000767 t_ms += 10;
768 }
769 // Insert one CNG frame each 100 ms.
770 uint8_t payload[kPayloadBytes];
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000771 size_t payload_len;
henrik.lundin246ef3e2017-04-24 09:14:32 -0700772 RTPHeader rtp_info;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000773 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
kwibergee2bac22015-11-11 10:34:00 -0800774 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -0700775 rtp_info,
kwibergee2bac22015-11-11 10:34:00 -0800776 rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000777 ++seq_no;
778 timestamp += kCngPeriodSamples;
779 next_input_time_ms += kCngPeriodMs * drift_factor;
780 }
781 }
782
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000783 // Insert speech again until output type is speech.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000784 double speech_restart_time_ms = t_ms;
henrik.lundin55480f52016-03-08 02:37:57 -0800785 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000786 // Each turn in this for loop is 10 ms.
787 while (next_input_time_ms <= t_ms) {
788 // Insert one 30 ms speech frame.
789 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700790 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000791 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700792 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000793 ++seq_no;
794 timestamp += kSamples;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000795 next_input_time_ms += kFrameSizeMs * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000796 }
797 // Pull out data once.
henrik.lundin7a926812016-05-12 13:51:28 -0700798 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800799 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000800 // Increase clock.
801 t_ms += 10;
802 }
803
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000804 // Check that the speech starts again within reasonable time.
805 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
806 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
henrik.lundin114c1b32017-04-26 07:47:32 -0700807 playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -0700808 ASSERT_TRUE(playout_timestamp);
809 int32_t delay_after = timestamp - *playout_timestamp;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000810 // Compare delay before and after, and make sure it differs less than 20 ms.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000811 EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
812 EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000813}
814
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000815TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000816 // Apply a clock drift of -25 ms / s (sender faster than receiver).
817 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000818 const double kNetworkFreezeTimeMs = 0.0;
819 const bool kGetAudioDuringFreezeRecovery = false;
820 const int kDelayToleranceMs = 20;
821 const int kMaxTimeToSpeechMs = 100;
822 LongCngWithClockDrift(kDriftFactor,
823 kNetworkFreezeTimeMs,
824 kGetAudioDuringFreezeRecovery,
825 kDelayToleranceMs,
826 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000827}
828
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000829TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000830 // Apply a clock drift of +25 ms / s (sender slower than receiver).
831 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000832 const double kNetworkFreezeTimeMs = 0.0;
833 const bool kGetAudioDuringFreezeRecovery = false;
834 const int kDelayToleranceMs = 20;
835 const int kMaxTimeToSpeechMs = 100;
836 LongCngWithClockDrift(kDriftFactor,
837 kNetworkFreezeTimeMs,
838 kGetAudioDuringFreezeRecovery,
839 kDelayToleranceMs,
840 kMaxTimeToSpeechMs);
841}
842
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000843TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000844 // Apply a clock drift of -25 ms / s (sender faster than receiver).
845 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
846 const double kNetworkFreezeTimeMs = 5000.0;
847 const bool kGetAudioDuringFreezeRecovery = false;
848 const int kDelayToleranceMs = 50;
849 const int kMaxTimeToSpeechMs = 200;
850 LongCngWithClockDrift(kDriftFactor,
851 kNetworkFreezeTimeMs,
852 kGetAudioDuringFreezeRecovery,
853 kDelayToleranceMs,
854 kMaxTimeToSpeechMs);
855}
856
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000857TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000858 // Apply a clock drift of +25 ms / s (sender slower than receiver).
859 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
860 const double kNetworkFreezeTimeMs = 5000.0;
861 const bool kGetAudioDuringFreezeRecovery = false;
862 const int kDelayToleranceMs = 20;
863 const int kMaxTimeToSpeechMs = 100;
864 LongCngWithClockDrift(kDriftFactor,
865 kNetworkFreezeTimeMs,
866 kGetAudioDuringFreezeRecovery,
867 kDelayToleranceMs,
868 kMaxTimeToSpeechMs);
869}
870
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000871TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000872 // Apply a clock drift of +25 ms / s (sender slower than receiver).
873 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
874 const double kNetworkFreezeTimeMs = 5000.0;
875 const bool kGetAudioDuringFreezeRecovery = true;
876 const int kDelayToleranceMs = 20;
877 const int kMaxTimeToSpeechMs = 100;
878 LongCngWithClockDrift(kDriftFactor,
879 kNetworkFreezeTimeMs,
880 kGetAudioDuringFreezeRecovery,
881 kDelayToleranceMs,
882 kMaxTimeToSpeechMs);
883}
884
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000885TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000886 const double kDriftFactor = 1.0; // No drift.
887 const double kNetworkFreezeTimeMs = 0.0;
888 const bool kGetAudioDuringFreezeRecovery = false;
889 const int kDelayToleranceMs = 10;
890 const int kMaxTimeToSpeechMs = 50;
891 LongCngWithClockDrift(kDriftFactor,
892 kNetworkFreezeTimeMs,
893 kGetAudioDuringFreezeRecovery,
894 kDelayToleranceMs,
895 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000896}
897
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000898TEST_F(NetEqDecodingTest, UnknownPayloadType) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000899 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000900 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700901 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000902 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700903 rtp_info.payloadType = 1; // Not registered as a decoder.
904 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000905}
906
Peter Boströme2976c82016-01-04 22:44:05 +0100907#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ivoc72c08ed2016-01-20 07:26:24 -0800908#define MAYBE_DecoderError DecoderError
909#else
910#define MAYBE_DecoderError DISABLED_DecoderError
911#endif
912
Peter Boströme2976c82016-01-04 22:44:05 +0100913TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000914 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000915 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700916 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000917 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700918 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
919 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000920 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
921 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700922 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800923 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700924 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000925 }
henrik.lundin7a926812016-05-12 13:51:28 -0700926 bool muted;
927 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
928 ASSERT_FALSE(muted);
ivoc72c08ed2016-01-20 07:26:24 -0800929
yujo36b1a5f2017-06-12 12:45:32 -0700930 // Verify that the first 160 samples are set to 0.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000931 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700932 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000933 for (int i = 0; i < kExpectedOutputLength; ++i) {
934 std::ostringstream ss;
935 ss << "i = " << i;
936 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700937 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000938 }
939}
940
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000941TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000942 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
943 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700944 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800945 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700946 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000947 }
henrik.lundin7a926812016-05-12 13:51:28 -0700948 bool muted;
949 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
950 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000951 // Verify that the first block of samples is set to 0.
952 static const int kExpectedOutputLength =
953 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700954 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000955 for (int i = 0; i < kExpectedOutputLength; ++i) {
956 std::ostringstream ss;
957 ss << "i = " << i;
958 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700959 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000960 }
henrik.lundind89814b2015-11-23 06:49:25 -0800961 // Verify that the sample rate did not change from the initial configuration.
962 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000963}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000964
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000965class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000966 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000967 virtual void TestCondition(double sum_squared_noise,
968 bool should_be_faded) = 0;
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000969
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000970 void CheckBgn(int sampling_rate_hz) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700971 size_t expected_samples_per_channel = 0;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000972 uint8_t payload_type = 0xFF; // Invalid.
973 if (sampling_rate_hz == 8000) {
974 expected_samples_per_channel = kBlockSize8kHz;
975 payload_type = 93; // PCM 16, 8 kHz.
976 } else if (sampling_rate_hz == 16000) {
977 expected_samples_per_channel = kBlockSize16kHz;
978 payload_type = 94; // PCM 16, 16 kHZ.
979 } else if (sampling_rate_hz == 32000) {
980 expected_samples_per_channel = kBlockSize32kHz;
981 payload_type = 95; // PCM 16, 32 kHz.
982 } else {
983 ASSERT_TRUE(false); // Unsupported test case.
984 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000985
henrik.lundin6d8e0112016-03-04 10:34:21 -0800986 AudioFrame output;
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000987 test::AudioLoop input;
988 // We are using the same 32 kHz input file for all tests, regardless of
989 // |sampling_rate_hz|. The output may sound weird, but the test is still
990 // valid.
991 ASSERT_TRUE(input.Init(
992 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
993 10 * sampling_rate_hz, // Max 10 seconds loop length.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700994 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000995
996 // Payload of 10 ms of PCM16 32 kHz.
997 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin246ef3e2017-04-24 09:14:32 -0700998 RTPHeader rtp_info;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000999 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001000 rtp_info.payloadType = payload_type;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001001
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001002 uint32_t receive_timestamp = 0;
henrik.lundin7a926812016-05-12 13:51:28 -07001003 bool muted;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001004 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
kwiberg288886b2015-11-06 01:21:35 -08001005 auto block = input.GetNextBlock();
1006 ASSERT_EQ(expected_samples_per_channel, block.size());
1007 size_t enc_len_bytes =
1008 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001009 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
1010
Henrik Lundin70c09bd2017-04-24 15:56:56 +02001011 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin246ef3e2017-04-24 09:14:32 -07001012 rtp_info,
Henrik Lundin70c09bd2017-04-24 15:56:56 +02001013 rtc::ArrayView<const uint8_t>(payload, enc_len_bytes),
1014 receive_timestamp));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001015 output.Reset();
henrik.lundin7a926812016-05-12 13:51:28 -07001016 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001017 ASSERT_EQ(1u, output.num_channels_);
1018 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001019 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001020
1021 // Next packet.
Mirko Bonadeia8110272017-10-18 14:22:50 +02001022 rtp_info.timestamp += rtc::checked_cast<uint32_t>(
1023 expected_samples_per_channel);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001024 rtp_info.sequenceNumber++;
Mirko Bonadeia8110272017-10-18 14:22:50 +02001025 receive_timestamp += rtc::checked_cast<uint32_t>(
1026 expected_samples_per_channel);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001027 }
1028
henrik.lundin6d8e0112016-03-04 10:34:21 -08001029 output.Reset();
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001030
1031 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
1032 // one frame without checking speech-type. This is the first frame pulled
1033 // without inserting any packet, and might not be labeled as PLC.
henrik.lundin7a926812016-05-12 13:51:28 -07001034 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001035 ASSERT_EQ(1u, output.num_channels_);
1036 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001037
1038 // To be able to test the fading of background noise we need at lease to
1039 // pull 611 frames.
1040 const int kFadingThreshold = 611;
1041
1042 // Test several CNG-to-PLC packet for the expected behavior. The number 20
1043 // is arbitrary, but sufficiently large to test enough number of frames.
1044 const int kNumPlcToCngTestFrames = 20;
1045 bool plc_to_cng = false;
1046 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
henrik.lundin6d8e0112016-03-04 10:34:21 -08001047 output.Reset();
yujo36b1a5f2017-06-12 12:45:32 -07001048 // Set to non-zero.
1049 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
henrik.lundin7a926812016-05-12 13:51:28 -07001050 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
1051 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -08001052 ASSERT_EQ(1u, output.num_channels_);
1053 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001054 if (output.speech_type_ == AudioFrame::kPLCCNG) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001055 plc_to_cng = true;
1056 double sum_squared = 0;
yujo36b1a5f2017-06-12 12:45:32 -07001057 const int16_t* output_data = output.data();
henrik.lundin6d8e0112016-03-04 10:34:21 -08001058 for (size_t k = 0;
1059 k < output.num_channels_ * output.samples_per_channel_; ++k)
yujo36b1a5f2017-06-12 12:45:32 -07001060 sum_squared += output_data[k] * output_data[k];
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001061 TestCondition(sum_squared, n > kFadingThreshold);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001062 } else {
henrik.lundin55480f52016-03-08 02:37:57 -08001063 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001064 }
1065 }
1066 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
1067 }
1068};
1069
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001070class NetEqBgnTestOn : public NetEqBgnTest {
1071 protected:
1072 NetEqBgnTestOn() : NetEqBgnTest() {
1073 config_.background_noise_mode = NetEq::kBgnOn;
1074 }
1075
1076 void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
1077 EXPECT_NE(0, sum_squared_noise);
1078 }
1079};
1080
1081class NetEqBgnTestOff : public NetEqBgnTest {
1082 protected:
1083 NetEqBgnTestOff() : NetEqBgnTest() {
1084 config_.background_noise_mode = NetEq::kBgnOff;
1085 }
1086
1087 void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
1088 EXPECT_EQ(0, sum_squared_noise);
1089 }
1090};
1091
1092class NetEqBgnTestFade : public NetEqBgnTest {
1093 protected:
1094 NetEqBgnTestFade() : NetEqBgnTest() {
1095 config_.background_noise_mode = NetEq::kBgnFade;
1096 }
1097
1098 void TestCondition(double sum_squared_noise, bool should_be_faded) {
1099 if (should_be_faded)
1100 EXPECT_EQ(0, sum_squared_noise);
1101 }
1102};
1103
henrika1d34fe92015-06-16 10:04:20 +02001104TEST_F(NetEqBgnTestOn, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001105 CheckBgn(8000);
1106 CheckBgn(16000);
1107 CheckBgn(32000);
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001108}
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001109
henrika1d34fe92015-06-16 10:04:20 +02001110TEST_F(NetEqBgnTestOff, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001111 CheckBgn(8000);
1112 CheckBgn(16000);
1113 CheckBgn(32000);
1114}
1115
henrika1d34fe92015-06-16 10:04:20 +02001116TEST_F(NetEqBgnTestFade, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001117 CheckBgn(8000);
1118 CheckBgn(16000);
1119 CheckBgn(32000);
1120}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001121
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001122void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1123 uint32_t start_timestamp,
1124 const std::set<uint16_t>& drop_seq_numbers,
1125 bool expect_seq_no_wrap,
1126 bool expect_timestamp_wrap) {
1127 uint16_t seq_no = start_seq_no;
1128 uint32_t timestamp = start_timestamp;
1129 const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
1130 const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
1131 const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +00001132 const size_t kPayloadBytes = kSamples * sizeof(int16_t);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001133 double next_input_time_ms = 0.0;
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001134 uint32_t receive_timestamp = 0;
1135
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001136 // Insert speech for 2 seconds.
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001137 const int kSpeechDurationMs = 2000;
1138 int packets_inserted = 0;
1139 uint16_t last_seq_no;
1140 uint32_t last_timestamp;
1141 bool timestamp_wrapped = false;
1142 bool seq_no_wrapped = false;
1143 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1144 // Each turn in this for loop is 10 ms.
1145 while (next_input_time_ms <= t_ms) {
1146 // Insert one 30 ms speech frame.
1147 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001148 RTPHeader rtp_info;
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001149 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1150 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1151 // This sequence number was not in the set to drop. Insert it.
henrik.lundin246ef3e2017-04-24 09:14:32 -07001152 ASSERT_EQ(0,
1153 neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001154 ++packets_inserted;
1155 }
1156 NetEqNetworkStatistics network_stats;
1157 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1158
1159 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1160 // packet size for first few packets. Therefore we refrain from checking
1161 // the criteria.
1162 if (packets_inserted > 4) {
1163 // Expect preferred and actual buffer size to be no more than 2 frames.
1164 EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001165 EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
1166 algorithmic_delay_ms_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001167 }
1168 last_seq_no = seq_no;
1169 last_timestamp = timestamp;
1170
1171 ++seq_no;
1172 timestamp += kSamples;
1173 receive_timestamp += kSamples;
1174 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1175
1176 seq_no_wrapped |= seq_no < last_seq_no;
1177 timestamp_wrapped |= timestamp < last_timestamp;
1178 }
1179 // Pull out data once.
henrik.lundin6d8e0112016-03-04 10:34:21 -08001180 AudioFrame output;
henrik.lundin7a926812016-05-12 13:51:28 -07001181 bool muted;
1182 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001183 ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
1184 ASSERT_EQ(1u, output.num_channels_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001185
1186 // Expect delay (in samples) to be less than 2 packets.
henrik.lundin114c1b32017-04-26 07:47:32 -07001187 rtc::Optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -07001188 ASSERT_TRUE(playout_timestamp);
1189 EXPECT_LE(timestamp - *playout_timestamp,
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001190 static_cast<uint32_t>(kSamples * 2));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001191 }
1192 // Make sure we have actually tested wrap-around.
1193 ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
1194 ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
1195}
1196
1197TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
1198 // Start with a sequence number that will soon wrap.
1199 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
1200 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1201}
1202
1203TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
1204 // Start with a sequence number that will soon wrap.
1205 std::set<uint16_t> drop_seq_numbers;
1206 drop_seq_numbers.insert(0xFFFF);
1207 drop_seq_numbers.insert(0x0);
1208 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1209}
1210
1211TEST_F(NetEqDecodingTest, TimestampWrap) {
1212 // Start with a timestamp that will soon wrap.
1213 std::set<uint16_t> drop_seq_numbers;
1214 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
1215}
1216
1217TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
1218 // Start with a timestamp and a sequence number that will wrap at the same
1219 // time.
1220 std::set<uint16_t> drop_seq_numbers;
1221 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
1222}
1223
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001224void NetEqDecodingTest::DuplicateCng() {
1225 uint16_t seq_no = 0;
1226 uint32_t timestamp = 0;
1227 const int kFrameSizeMs = 10;
1228 const int kSampleRateKhz = 16;
1229 const int kSamples = kFrameSizeMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +00001230 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001231
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001232 const int algorithmic_delay_samples = std::max(
1233 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001234 // Insert three speech packets. Three are needed to get the frame length
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001235 // correct.
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001236 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001237 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -07001238 bool muted;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001239 for (int i = 0; i < 3; ++i) {
1240 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001241 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001242 ++seq_no;
1243 timestamp += kSamples;
1244
1245 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -07001246 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001247 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001248 }
1249 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -08001250 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001251
1252 // Insert same CNG packet twice.
1253 const int kCngPeriodMs = 100;
1254 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +00001255 size_t payload_len;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001256 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1257 // This is the first time this CNG packet is inserted.
henrik.lundin246ef3e2017-04-24 09:14:32 -07001258 ASSERT_EQ(
1259 0, neteq_->InsertPacket(
1260 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001261
1262 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -07001263 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001264 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001265 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -07001266 EXPECT_FALSE(
1267 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
henrik.lundin0d96ab72016-04-06 12:28:26 -07001268 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1269 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001270
1271 // Insert the same CNG packet again. Note that at this point it is old, since
1272 // we have already decoded the first copy of it.
henrik.lundin246ef3e2017-04-24 09:14:32 -07001273 ASSERT_EQ(
1274 0, neteq_->InsertPacket(
1275 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001276
1277 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1278 // we have already pulled out CNG once.
1279 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
henrik.lundin7a926812016-05-12 13:51:28 -07001280 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001281 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001282 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -07001283 EXPECT_FALSE(
1284 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001285 EXPECT_EQ(timestamp - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -07001286 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001287 }
1288
1289 // Insert speech again.
1290 ++seq_no;
1291 timestamp += kCngPeriodSamples;
1292 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001293 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001294
1295 // Pull audio once and verify that the output is speech again.
henrik.lundin7a926812016-05-12 13:51:28 -07001296 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001297 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001298 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -07001299 rtc::Optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -07001300 ASSERT_TRUE(playout_timestamp);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001301 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -07001302 *playout_timestamp);
wu@webrtc.org94454b72014-06-05 20:34:08 +00001303}
1304
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001305TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001306
1307TEST_F(NetEqDecodingTest, CngFirst) {
1308 uint16_t seq_no = 0;
1309 uint32_t timestamp = 0;
1310 const int kFrameSizeMs = 10;
1311 const int kSampleRateKhz = 16;
1312 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1313 const int kPayloadBytes = kSamples * 2;
1314 const int kCngPeriodMs = 100;
1315 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1316 size_t payload_len;
1317
1318 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001319 RTPHeader rtp_info;
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001320
1321 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001322 ASSERT_EQ(
1323 NetEq::kOK,
1324 neteq_->InsertPacket(
1325 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001326 ++seq_no;
1327 timestamp += kCngPeriodSamples;
1328
1329 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -07001330 bool muted;
1331 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001332 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -08001333 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001334
1335 // Insert some speech packets.
henrik.lundin549d80b2016-08-25 00:44:24 -07001336 const uint32_t first_speech_timestamp = timestamp;
1337 int timeout_counter = 0;
1338 do {
1339 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001340 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001341 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001342 ++seq_no;
1343 timestamp += kSamples;
1344
1345 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -07001346 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -08001347 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin549d80b2016-08-25 00:44:24 -07001348 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001349 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -08001350 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +00001351}
henrik.lundin7a926812016-05-12 13:51:28 -07001352
1353class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
1354 public:
1355 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
1356 config_.enable_muted_state = true;
1357 }
1358
1359 protected:
1360 static constexpr size_t kSamples = 10 * 16;
1361 static constexpr size_t kPayloadBytes = kSamples * 2;
1362
1363 void InsertPacket(uint32_t rtp_timestamp) {
1364 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001365 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -07001366 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001367 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
henrik.lundin7a926812016-05-12 13:51:28 -07001368 }
1369
henrik.lundin42feb512016-09-20 06:51:40 -07001370 void InsertCngPacket(uint32_t rtp_timestamp) {
1371 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001372 RTPHeader rtp_info;
henrik.lundin42feb512016-09-20 06:51:40 -07001373 size_t payload_len;
1374 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001375 EXPECT_EQ(
1376 NetEq::kOK,
1377 neteq_->InsertPacket(
1378 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
henrik.lundin42feb512016-09-20 06:51:40 -07001379 }
1380
henrik.lundin7a926812016-05-12 13:51:28 -07001381 bool GetAudioReturnMuted() {
1382 bool muted;
1383 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1384 return muted;
1385 }
1386
1387 void GetAudioUntilMuted() {
1388 while (!GetAudioReturnMuted()) {
1389 ASSERT_LT(counter_++, 1000) << "Test timed out";
1390 }
1391 }
1392
1393 void GetAudioUntilNormal() {
1394 bool muted = false;
1395 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
1396 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1397 ASSERT_LT(counter_++, 1000) << "Test timed out";
1398 }
1399 EXPECT_FALSE(muted);
1400 }
1401
1402 int counter_ = 0;
1403};
1404
1405// Verifies that NetEq goes in and out of muted state as expected.
1406TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
1407 // Insert one speech packet.
1408 InsertPacket(0);
1409 // Pull out audio once and expect it not to be muted.
1410 EXPECT_FALSE(GetAudioReturnMuted());
1411 // Pull data until faded out.
1412 GetAudioUntilMuted();
henrik.lundina4491072017-07-06 05:23:53 -07001413 EXPECT_TRUE(out_frame_.muted());
henrik.lundin7a926812016-05-12 13:51:28 -07001414
1415 // Verify that output audio is not written during muted mode. Other parameters
1416 // should be correct, though.
1417 AudioFrame new_frame;
yujo36b1a5f2017-06-12 12:45:32 -07001418 int16_t* frame_data = new_frame.mutable_data();
1419 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
1420 frame_data[i] = 17;
henrik.lundin7a926812016-05-12 13:51:28 -07001421 }
1422 bool muted;
1423 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
1424 EXPECT_TRUE(muted);
henrik.lundina4491072017-07-06 05:23:53 -07001425 EXPECT_TRUE(out_frame_.muted());
yujo36b1a5f2017-06-12 12:45:32 -07001426 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
1427 EXPECT_EQ(17, frame_data[i]);
henrik.lundin7a926812016-05-12 13:51:28 -07001428 }
1429 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
1430 new_frame.timestamp_);
1431 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
1432 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
1433 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
1434 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
1435 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
1436
1437 // Insert new data. Timestamp is corrected for the time elapsed since the last
1438 // packet. Verify that normal operation resumes.
1439 InsertPacket(kSamples * counter_);
1440 GetAudioUntilNormal();
henrik.lundina4491072017-07-06 05:23:53 -07001441 EXPECT_FALSE(out_frame_.muted());
henrik.lundin612c25e2016-05-25 08:21:04 -07001442
1443 NetEqNetworkStatistics stats;
1444 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
1445 // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
1446 // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
1447 // concealment samples in this test.
1448 EXPECT_GT(stats.expand_rate, 14000);
1449 // And, it should be greater than the speech_expand_rate.
1450 EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
henrik.lundin7a926812016-05-12 13:51:28 -07001451}
1452
1453// Verifies that NetEq goes out of muted state when given a delayed packet.
1454TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
1455 // Insert one speech packet.
1456 InsertPacket(0);
1457 // Pull out audio once and expect it not to be muted.
1458 EXPECT_FALSE(GetAudioReturnMuted());
1459 // Pull data until faded out.
1460 GetAudioUntilMuted();
1461 // Insert new data. Timestamp is only corrected for the half of the time
1462 // elapsed since the last packet. That is, the new packet is delayed. Verify
1463 // that normal operation resumes.
1464 InsertPacket(kSamples * counter_ / 2);
1465 GetAudioUntilNormal();
1466}
1467
1468// Verifies that NetEq goes out of muted state when given a future packet.
1469TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
1470 // Insert one speech packet.
1471 InsertPacket(0);
1472 // Pull out audio once and expect it not to be muted.
1473 EXPECT_FALSE(GetAudioReturnMuted());
1474 // Pull data until faded out.
1475 GetAudioUntilMuted();
1476 // Insert new data. Timestamp is over-corrected for the time elapsed since the
1477 // last packet. That is, the new packet is too early. Verify that normal
1478 // operation resumes.
1479 InsertPacket(kSamples * counter_ * 2);
1480 GetAudioUntilNormal();
1481}
1482
1483// Verifies that NetEq goes out of muted state when given an old packet.
1484TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
1485 // Insert one speech packet.
1486 InsertPacket(0);
1487 // Pull out audio once and expect it not to be muted.
1488 EXPECT_FALSE(GetAudioReturnMuted());
1489 // Pull data until faded out.
1490 GetAudioUntilMuted();
1491
1492 EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1493 // Insert packet which is older than the first packet.
1494 InsertPacket(kSamples * (counter_ - 1000));
1495 EXPECT_FALSE(GetAudioReturnMuted());
1496 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
1497}
1498
henrik.lundin42feb512016-09-20 06:51:40 -07001499// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
1500// packet stream is suspended for a long time.
1501TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
1502 // Insert one CNG packet.
1503 InsertCngPacket(0);
1504
1505 // Pull 10 seconds of audio (10 ms audio generated per lap).
1506 for (int i = 0; i < 1000; ++i) {
1507 bool muted;
1508 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1509 ASSERT_FALSE(muted);
1510 }
1511 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
1512}
1513
1514// Verifies that NetEq goes back to normal after a long CNG period with the
1515// packet stream suspended.
1516TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
1517 // Insert one CNG packet.
1518 InsertCngPacket(0);
1519
1520 // Pull 10 seconds of audio (10 ms audio generated per lap).
1521 for (int i = 0; i < 1000; ++i) {
1522 bool muted;
1523 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
1524 }
1525
1526 // Insert new data. Timestamp is corrected for the time elapsed since the last
1527 // packet. Verify that normal operation resumes.
1528 InsertPacket(kSamples * counter_);
1529 GetAudioUntilNormal();
1530}
1531
henrik.lundin7a926812016-05-12 13:51:28 -07001532class NetEqDecodingTestTwoInstances : public NetEqDecodingTest {
1533 public:
1534 NetEqDecodingTestTwoInstances() : NetEqDecodingTest() {}
1535
1536 void SetUp() override {
1537 NetEqDecodingTest::SetUp();
1538 config2_ = config_;
1539 }
1540
1541 void CreateSecondInstance() {
ossue3525782016-05-25 07:37:43 -07001542 neteq2_.reset(NetEq::Create(config2_, CreateBuiltinAudioDecoderFactory()));
henrik.lundin7a926812016-05-12 13:51:28 -07001543 ASSERT_TRUE(neteq2_);
1544 LoadDecoders(neteq2_.get());
1545 }
1546
1547 protected:
1548 std::unique_ptr<NetEq> neteq2_;
1549 NetEq::Config config2_;
1550};
1551
1552namespace {
1553::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
1554 const AudioFrame& b) {
1555 if (a.timestamp_ != b.timestamp_)
1556 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
1557 << " != " << b.timestamp_ << ")";
1558 if (a.sample_rate_hz_ != b.sample_rate_hz_)
1559 return ::testing::AssertionFailure() << "sample_rate_hz_ diff ("
1560 << a.sample_rate_hz_
1561 << " != " << b.sample_rate_hz_ << ")";
1562 if (a.samples_per_channel_ != b.samples_per_channel_)
1563 return ::testing::AssertionFailure()
1564 << "samples_per_channel_ diff (" << a.samples_per_channel_
1565 << " != " << b.samples_per_channel_ << ")";
1566 if (a.num_channels_ != b.num_channels_)
1567 return ::testing::AssertionFailure() << "num_channels_ diff ("
1568 << a.num_channels_
1569 << " != " << b.num_channels_ << ")";
1570 if (a.speech_type_ != b.speech_type_)
1571 return ::testing::AssertionFailure() << "speech_type_ diff ("
1572 << a.speech_type_
1573 << " != " << b.speech_type_ << ")";
1574 if (a.vad_activity_ != b.vad_activity_)
1575 return ::testing::AssertionFailure() << "vad_activity_ diff ("
1576 << a.vad_activity_
1577 << " != " << b.vad_activity_ << ")";
1578 return ::testing::AssertionSuccess();
1579}
1580
1581::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
1582 const AudioFrame& b) {
1583 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
1584 if (!res)
1585 return res;
1586 if (memcmp(
yujo36b1a5f2017-06-12 12:45:32 -07001587 a.data(), b.data(),
1588 a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) != 0) {
henrik.lundin7a926812016-05-12 13:51:28 -07001589 return ::testing::AssertionFailure() << "data_ diff";
1590 }
1591 return ::testing::AssertionSuccess();
1592}
1593
1594} // namespace
1595
1596TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
1597 ASSERT_FALSE(config_.enable_muted_state);
1598 config2_.enable_muted_state = true;
1599 CreateSecondInstance();
1600
1601 // Insert one speech packet into both NetEqs.
1602 const size_t kSamples = 10 * 16;
1603 const size_t kPayloadBytes = kSamples * 2;
1604 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -07001605 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -07001606 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001607 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1608 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
henrik.lundin7a926812016-05-12 13:51:28 -07001609
1610 AudioFrame out_frame1, out_frame2;
1611 bool muted;
1612 for (int i = 0; i < 1000; ++i) {
1613 std::ostringstream ss;
1614 ss << "i = " << i;
1615 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1616 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
1617 EXPECT_FALSE(muted);
1618 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
1619 if (muted) {
1620 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1621 } else {
1622 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1623 }
1624 }
1625 EXPECT_TRUE(muted);
1626
1627 // Insert new data. Timestamp is corrected for the time elapsed since the last
1628 // packet.
1629 PopulateRtpInfo(0, kSamples * 1000, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -07001630 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1631 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
henrik.lundin7a926812016-05-12 13:51:28 -07001632
1633 int counter = 0;
1634 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
1635 ASSERT_LT(counter++, 1000) << "Test timed out";
1636 std::ostringstream ss;
1637 ss << "counter = " << counter;
1638 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
1639 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
1640 EXPECT_FALSE(muted);
1641 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
1642 if (muted) {
1643 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
1644 } else {
1645 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
1646 }
1647 }
1648 EXPECT_FALSE(muted);
1649}
1650
henrik.lundin114c1b32017-04-26 07:47:32 -07001651TEST_F(NetEqDecodingTest, LastDecodedTimestampsEmpty) {
1652 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
1653
1654 // Pull out data once.
1655 AudioFrame output;
1656 bool muted;
1657 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
1658
1659 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
1660}
1661
1662TEST_F(NetEqDecodingTest, LastDecodedTimestampsOneDecoded) {
1663 // Insert one packet with PCM16b WB data (this is what PopulateRtpInfo does by
1664 // default). Make the length 10 ms.
1665 constexpr size_t kPayloadSamples = 16 * 10;
1666 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
1667 uint8_t payload[kPayloadBytes] = {0};
1668
1669 RTPHeader rtp_info;
1670 constexpr uint32_t kRtpTimestamp = 0x1234;
1671 PopulateRtpInfo(0, kRtpTimestamp, &rtp_info);
1672 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1673
1674 // Pull out data once.
1675 AudioFrame output;
1676 bool muted;
1677 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
1678
1679 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp}),
1680 neteq_->LastDecodedTimestamps());
1681
1682 // Nothing decoded on the second call.
1683 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
1684 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
1685}
1686
1687TEST_F(NetEqDecodingTest, LastDecodedTimestampsTwoDecoded) {
1688 // Insert two packets with PCM16b WB data (this is what PopulateRtpInfo does
1689 // by default). Make the length 5 ms so that NetEq must decode them both in
1690 // the same GetAudio call.
1691 constexpr size_t kPayloadSamples = 16 * 5;
1692 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
1693 uint8_t payload[kPayloadBytes] = {0};
1694
1695 RTPHeader rtp_info;
1696 constexpr uint32_t kRtpTimestamp1 = 0x1234;
1697 PopulateRtpInfo(0, kRtpTimestamp1, &rtp_info);
1698 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1699 constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + kPayloadSamples;
1700 PopulateRtpInfo(1, kRtpTimestamp2, &rtp_info);
1701 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
1702
1703 // Pull out data once.
1704 AudioFrame output;
1705 bool muted;
1706 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
1707
1708 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp1, kRtpTimestamp2}),
1709 neteq_->LastDecodedTimestamps());
1710}
1711
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +02001712TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
1713 const int kNumConcealmentEvents = 19;
1714 const size_t kSamples = 10 * 16;
1715 const size_t kPayloadBytes = kSamples * 2;
1716 int seq_no = 0;
1717 RTPHeader rtp_info;
1718 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
1719 rtp_info.payloadType = 94; // PCM16b WB codec.
1720 rtp_info.markerBit = 0;
1721 const uint8_t payload[kPayloadBytes] = {0};
1722 bool muted;
1723
1724 for (int i = 0; i < kNumConcealmentEvents; i++) {
1725 // Insert some packets of 10 ms size.
1726 for (int j = 0; j < 10; j++) {
1727 rtp_info.sequenceNumber = seq_no++;
1728 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
1729 neteq_->InsertPacket(rtp_info, payload, 0);
1730 neteq_->GetAudio(&out_frame_, &muted);
1731 }
1732
1733 // Lose a number of packets.
1734 int num_lost = 1 + i;
1735 for (int j = 0; j < num_lost; j++) {
1736 seq_no++;
1737 neteq_->GetAudio(&out_frame_, &muted);
1738 }
1739 }
1740
1741 // Check number of concealment events.
1742 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
1743 EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
1744}
1745
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001746// Test that the jitter buffer delay stat is computed correctly.
1747void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
1748 const int kNumPackets = 10;
1749 const int kDelayInNumPackets = 2;
1750 const int kPacketLenMs = 10; // All packets are of 10 ms size.
1751 const size_t kSamples = kPacketLenMs * 16;
1752 const size_t kPayloadBytes = kSamples * 2;
1753 RTPHeader rtp_info;
1754 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
1755 rtp_info.payloadType = 94; // PCM16b WB codec.
1756 rtp_info.markerBit = 0;
1757 const uint8_t payload[kPayloadBytes] = {0};
1758 bool muted;
1759 int packets_sent = 0;
1760 int packets_received = 0;
1761 int expected_delay = 0;
1762 while (packets_received < kNumPackets) {
1763 // Insert packet.
1764 if (packets_sent < kNumPackets) {
1765 rtp_info.sequenceNumber = packets_sent++;
1766 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
1767 neteq_->InsertPacket(rtp_info, payload, 0);
1768 }
1769
1770 // Get packet.
1771 if (packets_sent > kDelayInNumPackets) {
1772 neteq_->GetAudio(&out_frame_, &muted);
1773 packets_received++;
1774
1775 // The delay reported by the jitter buffer never exceeds
1776 // the number of samples previously fetched with GetAudio
1777 // (hence the min()).
1778 int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
1779
1780 // The increase of the expected delay is the product of
1781 // the current delay of the jitter buffer in ms * the
1782 // number of samples that are sent for play out.
1783 int current_delay_ms = packets_delay * kPacketLenMs;
1784 expected_delay += current_delay_ms * kSamples;
1785 }
1786 }
1787
1788 if (apply_packet_loss) {
1789 // Extra call to GetAudio to cause concealment.
1790 neteq_->GetAudio(&out_frame_, &muted);
1791 }
1792
1793 // Check jitter buffer delay.
1794 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
1795 EXPECT_EQ(expected_delay, static_cast<int>(stats.jitter_buffer_delay_ms));
1796}
1797
1798TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
1799 TestJitterBufferDelay(false);
1800}
1801
1802TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
1803 TestJitterBufferDelay(true);
1804}
1805
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001806} // namespace webrtc