blob: bdd90e96cce7b9f1b4d793c55c766ef002d4a604 [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Ivo Creusen3ce44a32019-10-31 14:38:11 +010011#include "api/neteq/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000013#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000014#include <stdlib.h>
15#include <string.h> // memset
16
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000017#include <algorithm>
kwiberg2d0c3322016-02-14 09:28:33 -080018#include <memory>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000019#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000020#include <string>
21#include <vector>
22
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020023#include "absl/flags/flag.h"
Fredrik Solenbergbbf21a32018-04-12 22:44:09 +020024#include "api/audio/audio_frame.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020025#include "api/audio_codecs/builtin_audio_decoder_factory.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020026#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
Yves Gerey3a65f392019-11-11 18:05:42 +010027#include "modules/audio_coding/neteq/test/neteq_decoding_test.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020028#include "modules/audio_coding/neteq/tools/audio_loop.h"
Henrik Lundin7687ad52018-07-02 10:14:46 +020029#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
30#include "modules/audio_coding/neteq/tools/neteq_test.h"
Yves Gerey3e707812018-11-28 16:47:49 +010031#include "modules/include/module_common_types_public.h"
Niels Möller53382cb2018-11-27 14:05:08 +010032#include "modules/rtp_rtcp/include/rtcp_statistics.h"
Yves Gerey3e707812018-11-28 16:47:49 +010033#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020034#include "rtc_base/ignore_wundef.h"
Steve Anton10542f22019-01-11 09:11:00 -080035#include "rtc_base/message_digest.h"
Karl Wiberge40468b2017-11-22 10:42:26 +010036#include "rtc_base/numerics/safe_conversions.h"
Jonas Olsson366a50c2018-09-06 13:41:30 +020037#include "rtc_base/strings/string_builder.h"
Niels Möllera12c42a2018-07-25 16:05:48 +020038#include "rtc_base/system/arch.h"
Henrik Lundine9619f82017-11-27 14:05:27 +010039#include "test/field_trial.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020040#include "test/gtest.h"
Steve Anton10542f22019-01-11 09:11:00 -080041#include "test/testsupport/file_utils.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000042
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020043ABSL_FLAG(bool, gen_ref, false, "Generate reference files.");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000044
kwiberg5adaf732016-10-04 09:33:27 -070045namespace webrtc {
46
minyue5f026d02015-12-16 07:36:04 -080047namespace {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000048
minyue4f906772016-04-29 11:05:14 -070049const std::string& PlatformChecksum(const std::string& checksum_general,
Henrik Lundin8cd750d2017-10-12 13:07:11 +020050 const std::string& checksum_android_32,
51 const std::string& checksum_android_64,
minyue4f906772016-04-29 11:05:14 -070052 const std::string& checksum_win_32,
53 const std::string& checksum_win_64) {
kwiberg77eab702016-09-28 17:42:01 -070054#if defined(WEBRTC_ANDROID)
Yves Gerey665174f2018-06-19 15:03:05 +020055#ifdef WEBRTC_ARCH_64_BITS
56 return checksum_android_64;
57#else
58 return checksum_android_32;
59#endif // WEBRTC_ARCH_64_BITS
kwiberg77eab702016-09-28 17:42:01 -070060#elif defined(WEBRTC_WIN)
Yves Gerey665174f2018-06-19 15:03:05 +020061#ifdef WEBRTC_ARCH_64_BITS
62 return checksum_win_64;
63#else
64 return checksum_win_32;
65#endif // WEBRTC_ARCH_64_BITS
minyue4f906772016-04-29 11:05:14 -070066#else
67 return checksum_general;
68#endif // WEBRTC_WIN
69}
70
minyue5f026d02015-12-16 07:36:04 -080071} // namespace
72
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000073
ivoc72c08ed2016-01-20 07:26:24 -080074#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
75 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
Karl Wibergeb254b42017-11-01 15:08:12 +010076 defined(WEBRTC_CODEC_ILBC) && !defined(WEBRTC_ARCH_ARM64)
minyue5f026d02015-12-16 07:36:04 -080077#define MAYBE_TestBitExactness TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070078#else
minyue5f026d02015-12-16 07:36:04 -080079#define MAYBE_TestBitExactness DISABLED_TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070080#endif
minyue5f026d02015-12-16 07:36:04 -080081TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
minyue49c454e2016-01-08 11:30:14 -080082 const std::string input_rtp_file =
83 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +000084
Mirko Bonadei6d92fcd2021-07-06 16:23:27 +020085 const std::string output_checksum = PlatformChecksum(
86// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different
87// checksum.
88#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86)
89 "8d9c177b7f2f9398c0944a851edffae214de2c56",
90#else
91 "6c35140ce4d75874bdd60aa1872400b05fd05ca2",
92#endif
93 "ab451bb8301d9a92fbf4de91556b56f1ea38b4ce", "not used",
94 "6c35140ce4d75874bdd60aa1872400b05fd05ca2",
95 "64b46bb3c1165537a880ae8404afce2efba456c0");
minyue4f906772016-04-29 11:05:14 -070096
Mirko Bonadei6d92fcd2021-07-06 16:23:27 +020097 const std::string network_stats_checksum = PlatformChecksum(
98// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different
99// checksum.
100#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86)
101 "8cc08e3cd6801dcba4fcc15eb4036c19296a140d",
102#else
103 "90594d85fa31d3d9584d79293bf7aa4ee55ed751",
104#endif
105 "77b9c3640b81aff6a38d69d07dd782d39c15321d", "not used",
106 "90594d85fa31d3d9584d79293bf7aa4ee55ed751",
107 "90594d85fa31d3d9584d79293bf7aa4ee55ed751");
minyue4f906772016-04-29 11:05:14 -0700108
Yves Gerey665174f2018-06-19 15:03:05 +0200109 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200110 absl::GetFlag(FLAGS_gen_ref));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000111}
112
Yves Gerey665174f2018-06-19 15:03:05 +0200113#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
minyue-webrtc516711c2017-07-27 17:45:49 +0200114 defined(WEBRTC_CODEC_OPUS)
minyue93c08b72015-12-22 09:57:41 -0800115#define MAYBE_TestOpusBitExactness TestOpusBitExactness
116#else
117#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
118#endif
Jakob Ivarsson854d59f2021-03-04 13:05:19 +0100119// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been
120// updated.
121TEST_F(NetEqDecodingTest, DISABLED_TestOpusBitExactness) {
minyue93c08b72015-12-22 09:57:41 -0800122 const std::string input_rtp_file =
123 webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
minyue93c08b72015-12-22 09:57:41 -0800124
Yves Gereya038e712018-11-14 10:45:50 +0100125 const std::string maybe_sse =
Jakob Ivarssond723da12021-01-15 17:44:56 +0100126 "c7887ff60eecf460332c6c7a28c81561f9e8a40f"
127 "|673dd422cfc174152536d3b13af64f9722520ab5";
Yves Gereya038e712018-11-14 10:45:50 +0100128 const std::string output_checksum = PlatformChecksum(
Jakob Ivarssond723da12021-01-15 17:44:56 +0100129 maybe_sse, "e39283dd61a89cead3786ef8642d2637cc447296",
130 "53d8073eb848b70974cba9e26424f4946508fd19", maybe_sse, maybe_sse);
minyue4f906772016-04-29 11:05:14 -0700131
Yves Gerey75e22902019-09-06 03:07:55 +0200132 const std::string network_stats_checksum =
Jakob Ivarssond723da12021-01-15 17:44:56 +0100133 PlatformChecksum("c438bfa3b018f77691279eb9c63730569f54585c",
134 "8a474ed0992591e0c84f593824bb05979c3de157",
135 "9a05378dbf7e6edd56cdeb8ec45bcd6d8589623c",
136 "c438bfa3b018f77691279eb9c63730569f54585c",
137 "c438bfa3b018f77691279eb9c63730569f54585c");
minyue4f906772016-04-29 11:05:14 -0700138
Yves Gerey665174f2018-06-19 15:03:05 +0200139 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200140 absl::GetFlag(FLAGS_gen_ref));
minyue93c08b72015-12-22 09:57:41 -0800141}
142
Jakob Ivarssone7a55812021-03-03 14:18:15 +0100143// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been
144// updated.
145TEST_F(NetEqDecodingTest, DISABLED_TestOpusDtxBitExactness) {
Henrik Lundine9619f82017-11-27 14:05:27 +0100146 const std::string input_rtp_file =
147 webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
148
Yves Gereya038e712018-11-14 10:45:50 +0100149 const std::string maybe_sse =
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200150 "0fb0a3d6b3758ca6e108368bb777cd38d0a865af"
151 "|79cfb99a21338ba977eb0e15eb8464e2db9436f8";
Yves Gereya038e712018-11-14 10:45:50 +0100152 const std::string output_checksum = PlatformChecksum(
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200153 maybe_sse, "b6632690f8d7c2340c838df2821fc014f1cc8360",
154 "f890b9eb9bc5ab8313489230726b297f6a0825af", maybe_sse, maybe_sse);
Henrik Lundine9619f82017-11-27 14:05:27 +0100155
156 const std::string network_stats_checksum =
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200157 "18983bb67a57628c604dbdefa99574c6e0c5bb48";
Henrik Lundine9619f82017-11-27 14:05:27 +0100158
Henrik Lundine9619f82017-11-27 14:05:27 +0100159 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200160 absl::GetFlag(FLAGS_gen_ref));
Henrik Lundine9619f82017-11-27 14:05:27 +0100161}
162
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000163// Use fax mode to avoid time-scaling. This is to simplify the testing of
164// packet waiting times in the packet buffer.
165class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
166 protected:
167 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
Henrik Lundin7687ad52018-07-02 10:14:46 +0200168 config_.for_test_no_time_stretching = true;
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000169 }
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200170 void TestJitterBufferDelay(bool apply_packet_loss);
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000171};
172
173TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000174 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
175 size_t num_frames = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000176 const size_t kSamples = 10 * 16;
177 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000178 for (size_t i = 0; i < num_frames; ++i) {
kwibergee2bac22015-11-11 10:34:00 -0800179 const uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700180 RTPHeader rtp_info;
Mirko Bonadeia8110272017-10-18 14:22:50 +0200181 rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
182 rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700183 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
184 rtp_info.payloadType = 94; // PCM16b WB codec.
185 rtp_info.markerBit = 0;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200186 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000187 }
188 // Pull out all data.
189 for (size_t i = 0; i < num_frames; ++i) {
henrik.lundin7a926812016-05-12 13:51:28 -0700190 bool muted;
191 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800192 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000193 }
194
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200195 NetEqNetworkStatistics stats;
196 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000197 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
198 // spacing (per definition), we expect the delay to increase with 10 ms for
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200199 // each packet. Thus, we are calculating the statistics for a series from 10
200 // to 300, in steps of 10 ms.
201 EXPECT_EQ(155, stats.mean_waiting_time_ms);
202 EXPECT_EQ(155, stats.median_waiting_time_ms);
203 EXPECT_EQ(10, stats.min_waiting_time_ms);
204 EXPECT_EQ(300, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000205
206 // Check statistics again and make sure it's been reset.
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200207 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
208 EXPECT_EQ(-1, stats.mean_waiting_time_ms);
209 EXPECT_EQ(-1, stats.median_waiting_time_ms);
210 EXPECT_EQ(-1, stats.min_waiting_time_ms);
211 EXPECT_EQ(-1, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000212}
213
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000214
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000215TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000216 // Apply a clock drift of -25 ms / s (sender faster than receiver).
217 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000218 const double kNetworkFreezeTimeMs = 0.0;
219 const bool kGetAudioDuringFreezeRecovery = false;
220 const int kDelayToleranceMs = 20;
221 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200222 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
223 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000224 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000225}
226
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000227TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000228 // Apply a clock drift of +25 ms / s (sender slower than receiver).
229 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000230 const double kNetworkFreezeTimeMs = 0.0;
231 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200232 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000233 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200234 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
235 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000236 kMaxTimeToSpeechMs);
237}
238
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000239TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000240 // Apply a clock drift of -25 ms / s (sender faster than receiver).
241 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
242 const double kNetworkFreezeTimeMs = 5000.0;
243 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarssona36c5912019-06-27 10:12:02 +0200244 const int kDelayToleranceMs = 60;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000245 const int kMaxTimeToSpeechMs = 200;
Yves Gerey665174f2018-06-19 15:03:05 +0200246 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
247 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000248 kMaxTimeToSpeechMs);
249}
250
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000251TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000252 // Apply a clock drift of +25 ms / s (sender slower than receiver).
253 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
254 const double kNetworkFreezeTimeMs = 5000.0;
255 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200256 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000257 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200258 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
259 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000260 kMaxTimeToSpeechMs);
261}
262
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000263TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000264 // Apply a clock drift of +25 ms / s (sender slower than receiver).
265 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
266 const double kNetworkFreezeTimeMs = 5000.0;
267 const bool kGetAudioDuringFreezeRecovery = true;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200268 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000269 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200270 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
271 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000272 kMaxTimeToSpeechMs);
273}
274
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000275TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000276 const double kDriftFactor = 1.0; // No drift.
277 const double kNetworkFreezeTimeMs = 0.0;
278 const bool kGetAudioDuringFreezeRecovery = false;
279 const int kDelayToleranceMs = 10;
280 const int kMaxTimeToSpeechMs = 50;
Yves Gerey665174f2018-06-19 15:03:05 +0200281 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
282 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000283 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000284}
285
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000286TEST_F(NetEqDecodingTest, UnknownPayloadType) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000287 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000288 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700289 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000290 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700291 rtp_info.payloadType = 1; // Not registered as a decoder.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200292 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000293}
294
Peter Boströme2976c82016-01-04 22:44:05 +0100295#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ivoc72c08ed2016-01-20 07:26:24 -0800296#define MAYBE_DecoderError DecoderError
297#else
298#define MAYBE_DecoderError DISABLED_DecoderError
299#endif
300
Peter Boströme2976c82016-01-04 22:44:05 +0100301TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000302 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000303 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700304 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000305 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700306 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200307 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000308 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
309 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700310 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800311 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700312 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000313 }
henrik.lundin7a926812016-05-12 13:51:28 -0700314 bool muted;
315 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
316 ASSERT_FALSE(muted);
ivoc72c08ed2016-01-20 07:26:24 -0800317
yujo36b1a5f2017-06-12 12:45:32 -0700318 // Verify that the first 160 samples are set to 0.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000319 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700320 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000321 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200322 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000323 ss << "i = " << i;
324 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700325 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000326 }
327}
328
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000329TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000330 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
331 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700332 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800333 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700334 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000335 }
henrik.lundin7a926812016-05-12 13:51:28 -0700336 bool muted;
337 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
338 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000339 // Verify that the first block of samples is set to 0.
340 static const int kExpectedOutputLength =
341 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700342 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000343 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200344 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000345 ss << "i = " << i;
346 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700347 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000348 }
henrik.lundind89814b2015-11-23 06:49:25 -0800349 // Verify that the sample rate did not change from the initial configuration.
350 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000351}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000352
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000353class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000354 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000355 void CheckBgn(int sampling_rate_hz) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700356 size_t expected_samples_per_channel = 0;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000357 uint8_t payload_type = 0xFF; // Invalid.
358 if (sampling_rate_hz == 8000) {
359 expected_samples_per_channel = kBlockSize8kHz;
360 payload_type = 93; // PCM 16, 8 kHz.
361 } else if (sampling_rate_hz == 16000) {
362 expected_samples_per_channel = kBlockSize16kHz;
363 payload_type = 94; // PCM 16, 16 kHZ.
364 } else if (sampling_rate_hz == 32000) {
365 expected_samples_per_channel = kBlockSize32kHz;
366 payload_type = 95; // PCM 16, 32 kHz.
367 } else {
368 ASSERT_TRUE(false); // Unsupported test case.
369 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000370
henrik.lundin6d8e0112016-03-04 10:34:21 -0800371 AudioFrame output;
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000372 test::AudioLoop input;
373 // We are using the same 32 kHz input file for all tests, regardless of
374 // |sampling_rate_hz|. The output may sound weird, but the test is still
375 // valid.
376 ASSERT_TRUE(input.Init(
377 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
378 10 * sampling_rate_hz, // Max 10 seconds loop length.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700379 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000380
381 // Payload of 10 ms of PCM16 32 kHz.
382 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin246ef3e2017-04-24 09:14:32 -0700383 RTPHeader rtp_info;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000384 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700385 rtp_info.payloadType = payload_type;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000386
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000387 uint32_t receive_timestamp = 0;
henrik.lundin7a926812016-05-12 13:51:28 -0700388 bool muted;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000389 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
kwiberg288886b2015-11-06 01:21:35 -0800390 auto block = input.GetNextBlock();
391 ASSERT_EQ(expected_samples_per_channel, block.size());
392 size_t enc_len_bytes =
393 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000394 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
395
Karl Wiberg45eb1352019-10-10 14:23:00 +0200396 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
397 payload, enc_len_bytes)));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800398 output.Reset();
henrik.lundin7a926812016-05-12 13:51:28 -0700399 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800400 ASSERT_EQ(1u, output.num_channels_);
401 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800402 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000403
404 // Next packet.
Yves Gerey665174f2018-06-19 15:03:05 +0200405 rtp_info.timestamp +=
406 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700407 rtp_info.sequenceNumber++;
Yves Gerey665174f2018-06-19 15:03:05 +0200408 receive_timestamp +=
409 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000410 }
411
henrik.lundin6d8e0112016-03-04 10:34:21 -0800412 output.Reset();
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000413
414 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
415 // one frame without checking speech-type. This is the first frame pulled
416 // without inserting any packet, and might not be labeled as PLC.
henrik.lundin7a926812016-05-12 13:51:28 -0700417 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800418 ASSERT_EQ(1u, output.num_channels_);
419 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000420
421 // To be able to test the fading of background noise we need at lease to
422 // pull 611 frames.
423 const int kFadingThreshold = 611;
424
425 // Test several CNG-to-PLC packet for the expected behavior. The number 20
426 // is arbitrary, but sufficiently large to test enough number of frames.
427 const int kNumPlcToCngTestFrames = 20;
428 bool plc_to_cng = false;
429 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
henrik.lundin6d8e0112016-03-04 10:34:21 -0800430 output.Reset();
yujo36b1a5f2017-06-12 12:45:32 -0700431 // Set to non-zero.
432 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
henrik.lundin7a926812016-05-12 13:51:28 -0700433 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
434 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800435 ASSERT_EQ(1u, output.num_channels_);
436 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800437 if (output.speech_type_ == AudioFrame::kPLCCNG) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000438 plc_to_cng = true;
439 double sum_squared = 0;
yujo36b1a5f2017-06-12 12:45:32 -0700440 const int16_t* output_data = output.data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800441 for (size_t k = 0;
442 k < output.num_channels_ * output.samples_per_channel_; ++k)
yujo36b1a5f2017-06-12 12:45:32 -0700443 sum_squared += output_data[k] * output_data[k];
Henrik Lundin67190172018-04-20 15:34:48 +0200444 EXPECT_EQ(0, sum_squared);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000445 } else {
henrik.lundin55480f52016-03-08 02:37:57 -0800446 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000447 }
448 }
449 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
450 }
451};
452
Henrik Lundin67190172018-04-20 15:34:48 +0200453TEST_F(NetEqBgnTest, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000454 CheckBgn(8000);
455 CheckBgn(16000);
456 CheckBgn(32000);
457}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000458
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000459TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
460 // Start with a sequence number that will soon wrap.
461 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
462 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
463}
464
465TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
466 // Start with a sequence number that will soon wrap.
467 std::set<uint16_t> drop_seq_numbers;
468 drop_seq_numbers.insert(0xFFFF);
469 drop_seq_numbers.insert(0x0);
470 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
471}
472
473TEST_F(NetEqDecodingTest, TimestampWrap) {
474 // Start with a timestamp that will soon wrap.
475 std::set<uint16_t> drop_seq_numbers;
476 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
477}
478
479TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
480 // Start with a timestamp and a sequence number that will wrap at the same
481 // time.
482 std::set<uint16_t> drop_seq_numbers;
483 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
484}
485
Yves Gerey3a65f392019-11-11 18:05:42 +0100486TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000487 uint16_t seq_no = 0;
488 uint32_t timestamp = 0;
489 const int kFrameSizeMs = 10;
490 const int kSampleRateKhz = 16;
491 const int kSamples = kFrameSizeMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000492 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000493
Yves Gerey665174f2018-06-19 15:03:05 +0200494 const int algorithmic_delay_samples =
495 std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000496 // Insert three speech packets. Three are needed to get the frame length
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000497 // correct.
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000498 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700499 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700500 bool muted;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000501 for (int i = 0; i < 3; ++i) {
502 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200503 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000504 ++seq_no;
505 timestamp += kSamples;
506
507 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700508 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800509 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000510 }
511 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800512 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000513
514 // Insert same CNG packet twice.
515 const int kCngPeriodMs = 100;
516 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000517 size_t payload_len;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000518 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
519 // This is the first time this CNG packet is inserted.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200520 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
521 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000522
523 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700524 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800525 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800526 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700527 EXPECT_FALSE(
528 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
henrik.lundin0d96ab72016-04-06 12:28:26 -0700529 EXPECT_EQ(timestamp - algorithmic_delay_samples,
530 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000531
532 // Insert the same CNG packet again. Note that at this point it is old, since
533 // we have already decoded the first copy of it.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200534 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
535 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000536
537 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
538 // we have already pulled out CNG once.
539 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
henrik.lundin7a926812016-05-12 13:51:28 -0700540 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800541 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800542 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700543 EXPECT_FALSE(
544 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000545 EXPECT_EQ(timestamp - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700546 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000547 }
548
549 // Insert speech again.
550 ++seq_no;
551 timestamp += kCngPeriodSamples;
552 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200553 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000554
555 // Pull audio once and verify that the output is speech again.
henrik.lundin7a926812016-05-12 13:51:28 -0700556 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800557 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800558 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Danil Chapovalovb6021232018-06-19 13:26:36 +0200559 absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -0700560 ASSERT_TRUE(playout_timestamp);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000561 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700562 *playout_timestamp);
wu@webrtc.org94454b72014-06-05 20:34:08 +0000563}
564
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000565TEST_F(NetEqDecodingTest, CngFirst) {
566 uint16_t seq_no = 0;
567 uint32_t timestamp = 0;
568 const int kFrameSizeMs = 10;
569 const int kSampleRateKhz = 16;
570 const int kSamples = kFrameSizeMs * kSampleRateKhz;
571 const int kPayloadBytes = kSamples * 2;
572 const int kCngPeriodMs = 100;
573 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
574 size_t payload_len;
575
576 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700577 RTPHeader rtp_info;
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000578
579 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200580 ASSERT_EQ(NetEq::kOK,
581 neteq_->InsertPacket(
582 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len)));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000583 ++seq_no;
584 timestamp += kCngPeriodSamples;
585
586 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700587 bool muted;
588 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800589 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800590 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000591
592 // Insert some speech packets.
henrik.lundin549d80b2016-08-25 00:44:24 -0700593 const uint32_t first_speech_timestamp = timestamp;
594 int timeout_counter = 0;
595 do {
596 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000597 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200598 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000599 ++seq_no;
600 timestamp += kSamples;
601
602 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700603 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800604 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin549d80b2016-08-25 00:44:24 -0700605 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000606 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800607 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000608}
henrik.lundin7a926812016-05-12 13:51:28 -0700609
610class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
611 public:
612 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
613 config_.enable_muted_state = true;
614 }
615
616 protected:
617 static constexpr size_t kSamples = 10 * 16;
618 static constexpr size_t kPayloadBytes = kSamples * 2;
619
620 void InsertPacket(uint32_t rtp_timestamp) {
621 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700622 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700623 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200624 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700625 }
626
henrik.lundin42feb512016-09-20 06:51:40 -0700627 void InsertCngPacket(uint32_t rtp_timestamp) {
628 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700629 RTPHeader rtp_info;
henrik.lundin42feb512016-09-20 06:51:40 -0700630 size_t payload_len;
631 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200632 EXPECT_EQ(NetEq::kOK,
633 neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
634 payload, payload_len)));
henrik.lundin42feb512016-09-20 06:51:40 -0700635 }
636
henrik.lundin7a926812016-05-12 13:51:28 -0700637 bool GetAudioReturnMuted() {
638 bool muted;
639 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
640 return muted;
641 }
642
643 void GetAudioUntilMuted() {
644 while (!GetAudioReturnMuted()) {
645 ASSERT_LT(counter_++, 1000) << "Test timed out";
646 }
647 }
648
649 void GetAudioUntilNormal() {
650 bool muted = false;
651 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
652 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
653 ASSERT_LT(counter_++, 1000) << "Test timed out";
654 }
655 EXPECT_FALSE(muted);
656 }
657
658 int counter_ = 0;
659};
660
661// Verifies that NetEq goes in and out of muted state as expected.
662TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
663 // Insert one speech packet.
664 InsertPacket(0);
665 // Pull out audio once and expect it not to be muted.
666 EXPECT_FALSE(GetAudioReturnMuted());
667 // Pull data until faded out.
668 GetAudioUntilMuted();
henrik.lundina4491072017-07-06 05:23:53 -0700669 EXPECT_TRUE(out_frame_.muted());
henrik.lundin7a926812016-05-12 13:51:28 -0700670
671 // Verify that output audio is not written during muted mode. Other parameters
672 // should be correct, though.
673 AudioFrame new_frame;
yujo36b1a5f2017-06-12 12:45:32 -0700674 int16_t* frame_data = new_frame.mutable_data();
675 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
676 frame_data[i] = 17;
henrik.lundin7a926812016-05-12 13:51:28 -0700677 }
678 bool muted;
679 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
680 EXPECT_TRUE(muted);
henrik.lundina4491072017-07-06 05:23:53 -0700681 EXPECT_TRUE(out_frame_.muted());
yujo36b1a5f2017-06-12 12:45:32 -0700682 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
683 EXPECT_EQ(17, frame_data[i]);
henrik.lundin7a926812016-05-12 13:51:28 -0700684 }
685 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
686 new_frame.timestamp_);
687 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
688 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
689 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
690 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
691 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
692
693 // Insert new data. Timestamp is corrected for the time elapsed since the last
694 // packet. Verify that normal operation resumes.
695 InsertPacket(kSamples * counter_);
696 GetAudioUntilNormal();
henrik.lundina4491072017-07-06 05:23:53 -0700697 EXPECT_FALSE(out_frame_.muted());
henrik.lundin612c25e2016-05-25 08:21:04 -0700698
699 NetEqNetworkStatistics stats;
700 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
701 // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
702 // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
703 // concealment samples in this test.
704 EXPECT_GT(stats.expand_rate, 14000);
705 // And, it should be greater than the speech_expand_rate.
706 EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
henrik.lundin7a926812016-05-12 13:51:28 -0700707}
708
709// Verifies that NetEq goes out of muted state when given a delayed packet.
710TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
711 // Insert one speech packet.
712 InsertPacket(0);
713 // Pull out audio once and expect it not to be muted.
714 EXPECT_FALSE(GetAudioReturnMuted());
715 // Pull data until faded out.
716 GetAudioUntilMuted();
717 // Insert new data. Timestamp is only corrected for the half of the time
718 // elapsed since the last packet. That is, the new packet is delayed. Verify
719 // that normal operation resumes.
720 InsertPacket(kSamples * counter_ / 2);
721 GetAudioUntilNormal();
722}
723
724// Verifies that NetEq goes out of muted state when given a future packet.
725TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
726 // Insert one speech packet.
727 InsertPacket(0);
728 // Pull out audio once and expect it not to be muted.
729 EXPECT_FALSE(GetAudioReturnMuted());
730 // Pull data until faded out.
731 GetAudioUntilMuted();
732 // Insert new data. Timestamp is over-corrected for the time elapsed since the
733 // last packet. That is, the new packet is too early. Verify that normal
734 // operation resumes.
735 InsertPacket(kSamples * counter_ * 2);
736 GetAudioUntilNormal();
737}
738
739// Verifies that NetEq goes out of muted state when given an old packet.
740TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
741 // Insert one speech packet.
742 InsertPacket(0);
743 // Pull out audio once and expect it not to be muted.
744 EXPECT_FALSE(GetAudioReturnMuted());
745 // Pull data until faded out.
746 GetAudioUntilMuted();
747
748 EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200749 // Insert a few packets which are older than the first packet.
750 for (int i = 0; i < 5; ++i) {
751 InsertPacket(kSamples * (i - 1000));
752 }
henrik.lundin7a926812016-05-12 13:51:28 -0700753 EXPECT_FALSE(GetAudioReturnMuted());
754 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
755}
756
henrik.lundin42feb512016-09-20 06:51:40 -0700757// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
758// packet stream is suspended for a long time.
759TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
760 // Insert one CNG packet.
761 InsertCngPacket(0);
762
763 // Pull 10 seconds of audio (10 ms audio generated per lap).
764 for (int i = 0; i < 1000; ++i) {
765 bool muted;
766 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
767 ASSERT_FALSE(muted);
768 }
769 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
770}
771
772// Verifies that NetEq goes back to normal after a long CNG period with the
773// packet stream suspended.
774TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
775 // Insert one CNG packet.
776 InsertCngPacket(0);
777
778 // Pull 10 seconds of audio (10 ms audio generated per lap).
779 for (int i = 0; i < 1000; ++i) {
780 bool muted;
781 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
782 }
783
784 // Insert new data. Timestamp is corrected for the time elapsed since the last
785 // packet. Verify that normal operation resumes.
786 InsertPacket(kSamples * counter_);
787 GetAudioUntilNormal();
788}
789
henrik.lundin7a926812016-05-12 13:51:28 -0700790namespace {
791::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
792 const AudioFrame& b) {
793 if (a.timestamp_ != b.timestamp_)
794 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
795 << " != " << b.timestamp_ << ")";
796 if (a.sample_rate_hz_ != b.sample_rate_hz_)
Yves Gerey665174f2018-06-19 15:03:05 +0200797 return ::testing::AssertionFailure()
798 << "sample_rate_hz_ diff (" << a.sample_rate_hz_
799 << " != " << b.sample_rate_hz_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700800 if (a.samples_per_channel_ != b.samples_per_channel_)
801 return ::testing::AssertionFailure()
802 << "samples_per_channel_ diff (" << a.samples_per_channel_
803 << " != " << b.samples_per_channel_ << ")";
804 if (a.num_channels_ != b.num_channels_)
Yves Gerey665174f2018-06-19 15:03:05 +0200805 return ::testing::AssertionFailure()
806 << "num_channels_ diff (" << a.num_channels_
807 << " != " << b.num_channels_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700808 if (a.speech_type_ != b.speech_type_)
Yves Gerey665174f2018-06-19 15:03:05 +0200809 return ::testing::AssertionFailure()
810 << "speech_type_ diff (" << a.speech_type_
811 << " != " << b.speech_type_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700812 if (a.vad_activity_ != b.vad_activity_)
Yves Gerey665174f2018-06-19 15:03:05 +0200813 return ::testing::AssertionFailure()
814 << "vad_activity_ diff (" << a.vad_activity_
815 << " != " << b.vad_activity_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700816 return ::testing::AssertionSuccess();
817}
818
819::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
820 const AudioFrame& b) {
821 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
822 if (!res)
823 return res;
Yves Gerey665174f2018-06-19 15:03:05 +0200824 if (memcmp(a.data(), b.data(),
825 a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) !=
826 0) {
henrik.lundin7a926812016-05-12 13:51:28 -0700827 return ::testing::AssertionFailure() << "data_ diff";
828 }
829 return ::testing::AssertionSuccess();
830}
831
832} // namespace
833
834TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
835 ASSERT_FALSE(config_.enable_muted_state);
836 config2_.enable_muted_state = true;
837 CreateSecondInstance();
838
839 // Insert one speech packet into both NetEqs.
840 const size_t kSamples = 10 * 16;
841 const size_t kPayloadBytes = kSamples * 2;
842 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700843 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700844 PopulateRtpInfo(0, 0, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200845 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
846 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700847
848 AudioFrame out_frame1, out_frame2;
849 bool muted;
850 for (int i = 0; i < 1000; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200851 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700852 ss << "i = " << i;
853 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
854 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
855 EXPECT_FALSE(muted);
856 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
857 if (muted) {
858 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
859 } else {
860 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
861 }
862 }
863 EXPECT_TRUE(muted);
864
865 // Insert new data. Timestamp is corrected for the time elapsed since the last
866 // packet.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200867 for (int i = 0; i < 5; ++i) {
868 PopulateRtpInfo(0, kSamples * 1000 + kSamples * i, &rtp_info);
869 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
870 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
871 }
henrik.lundin7a926812016-05-12 13:51:28 -0700872
873 int counter = 0;
874 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
875 ASSERT_LT(counter++, 1000) << "Test timed out";
Jonas Olsson366a50c2018-09-06 13:41:30 +0200876 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700877 ss << "counter = " << counter;
878 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
879 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
880 EXPECT_FALSE(muted);
881 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
882 if (muted) {
883 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
884 } else {
885 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
886 }
887 }
888 EXPECT_FALSE(muted);
889}
890
henrik.lundin114c1b32017-04-26 07:47:32 -0700891TEST_F(NetEqDecodingTest, LastDecodedTimestampsEmpty) {
892 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
893
894 // Pull out data once.
895 AudioFrame output;
896 bool muted;
897 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
898
899 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
900}
901
902TEST_F(NetEqDecodingTest, LastDecodedTimestampsOneDecoded) {
903 // Insert one packet with PCM16b WB data (this is what PopulateRtpInfo does by
904 // default). Make the length 10 ms.
905 constexpr size_t kPayloadSamples = 16 * 10;
906 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
907 uint8_t payload[kPayloadBytes] = {0};
908
909 RTPHeader rtp_info;
910 constexpr uint32_t kRtpTimestamp = 0x1234;
911 PopulateRtpInfo(0, kRtpTimestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200912 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700913
914 // Pull out data once.
915 AudioFrame output;
916 bool muted;
917 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
918
919 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp}),
920 neteq_->LastDecodedTimestamps());
921
922 // Nothing decoded on the second call.
923 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
924 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
925}
926
927TEST_F(NetEqDecodingTest, LastDecodedTimestampsTwoDecoded) {
928 // Insert two packets with PCM16b WB data (this is what PopulateRtpInfo does
929 // by default). Make the length 5 ms so that NetEq must decode them both in
930 // the same GetAudio call.
931 constexpr size_t kPayloadSamples = 16 * 5;
932 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
933 uint8_t payload[kPayloadBytes] = {0};
934
935 RTPHeader rtp_info;
936 constexpr uint32_t kRtpTimestamp1 = 0x1234;
937 PopulateRtpInfo(0, kRtpTimestamp1, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200938 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700939 constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + kPayloadSamples;
940 PopulateRtpInfo(1, kRtpTimestamp2, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200941 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700942
943 // Pull out data once.
944 AudioFrame output;
945 bool muted;
946 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
947
948 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp1, kRtpTimestamp2}),
949 neteq_->LastDecodedTimestamps());
950}
951
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200952TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
953 const int kNumConcealmentEvents = 19;
954 const size_t kSamples = 10 * 16;
955 const size_t kPayloadBytes = kSamples * 2;
956 int seq_no = 0;
957 RTPHeader rtp_info;
958 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
959 rtp_info.payloadType = 94; // PCM16b WB codec.
960 rtp_info.markerBit = 0;
961 const uint8_t payload[kPayloadBytes] = {0};
962 bool muted;
963
964 for (int i = 0; i < kNumConcealmentEvents; i++) {
965 // Insert some packets of 10 ms size.
966 for (int j = 0; j < 10; j++) {
967 rtp_info.sequenceNumber = seq_no++;
968 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200969 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200970 neteq_->GetAudio(&out_frame_, &muted);
971 }
972
973 // Lose a number of packets.
974 int num_lost = 1 + i;
975 for (int j = 0; j < num_lost; j++) {
976 seq_no++;
977 neteq_->GetAudio(&out_frame_, &muted);
978 }
979 }
980
981 // Check number of concealment events.
982 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
983 EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
984}
985
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200986// Test that the jitter buffer delay stat is computed correctly.
987void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
988 const int kNumPackets = 10;
989 const int kDelayInNumPackets = 2;
990 const int kPacketLenMs = 10; // All packets are of 10 ms size.
991 const size_t kSamples = kPacketLenMs * 16;
992 const size_t kPayloadBytes = kSamples * 2;
993 RTPHeader rtp_info;
994 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
995 rtp_info.payloadType = 94; // PCM16b WB codec.
996 rtp_info.markerBit = 0;
997 const uint8_t payload[kPayloadBytes] = {0};
998 bool muted;
999 int packets_sent = 0;
1000 int packets_received = 0;
1001 int expected_delay = 0;
Artem Titove618cc92020-03-11 11:18:54 +01001002 int expected_target_delay = 0;
Chen Xing0acffb52019-01-15 15:46:29 +01001003 uint64_t expected_emitted_count = 0;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001004 while (packets_received < kNumPackets) {
1005 // Insert packet.
1006 if (packets_sent < kNumPackets) {
1007 rtp_info.sequenceNumber = packets_sent++;
1008 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001009 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001010 }
1011
1012 // Get packet.
1013 if (packets_sent > kDelayInNumPackets) {
1014 neteq_->GetAudio(&out_frame_, &muted);
1015 packets_received++;
1016
1017 // The delay reported by the jitter buffer never exceeds
1018 // the number of samples previously fetched with GetAudio
1019 // (hence the min()).
1020 int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
1021
1022 // The increase of the expected delay is the product of
1023 // the current delay of the jitter buffer in ms * the
1024 // number of samples that are sent for play out.
1025 int current_delay_ms = packets_delay * kPacketLenMs;
1026 expected_delay += current_delay_ms * kSamples;
Artem Titove618cc92020-03-11 11:18:54 +01001027 expected_target_delay += neteq_->TargetDelayMs() * kSamples;
Chen Xing0acffb52019-01-15 15:46:29 +01001028 expected_emitted_count += kSamples;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001029 }
1030 }
1031
1032 if (apply_packet_loss) {
1033 // Extra call to GetAudio to cause concealment.
1034 neteq_->GetAudio(&out_frame_, &muted);
1035 }
1036
1037 // Check jitter buffer delay.
1038 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
Artem Titove618cc92020-03-11 11:18:54 +01001039 EXPECT_EQ(expected_delay,
1040 rtc::checked_cast<int>(stats.jitter_buffer_delay_ms));
Chen Xing0acffb52019-01-15 15:46:29 +01001041 EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count);
Artem Titove618cc92020-03-11 11:18:54 +01001042 EXPECT_EQ(expected_target_delay,
1043 rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001044}
1045
1046TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
1047 TestJitterBufferDelay(false);
1048}
1049
1050TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
1051 TestJitterBufferDelay(true);
1052}
1053
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001054TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
1055 const int kPacketLenMs = 10; // All packets are of 10 ms size.
1056 const size_t kSamples = kPacketLenMs * 16;
1057 const size_t kPayloadBytes = kSamples * 2;
1058 RTPHeader rtp_info;
1059 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
1060 rtp_info.payloadType = 94; // PCM16b WB codec.
1061 rtp_info.markerBit = 0;
1062 const uint8_t payload[kPayloadBytes] = {0};
1063
Artem Titove618cc92020-03-11 11:18:54 +01001064 int expected_target_delay = neteq_->TargetDelayMs() * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001065 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001066
1067 bool muted;
1068 neteq_->GetAudio(&out_frame_, &muted);
1069
1070 rtp_info.sequenceNumber += 1;
1071 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001072 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001073 rtp_info.sequenceNumber += 1;
1074 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001075 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001076
Artem Titove618cc92020-03-11 11:18:54 +01001077 expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples;
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001078 // We have two packets in the buffer and kAccelerate operation will
1079 // extract 20 ms of data.
Tommi3cc68ec2021-06-09 19:30:41 +02001080 neteq_->GetAudio(&out_frame_, &muted, nullptr, NetEq::Operation::kAccelerate);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001081
1082 // Check jitter buffer delay.
1083 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
1084 EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms);
1085 EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count);
Artem Titove618cc92020-03-11 11:18:54 +01001086 EXPECT_EQ(expected_target_delay,
1087 rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001088}
1089
Henrik Lundin7687ad52018-07-02 10:14:46 +02001090namespace test {
Henrik Lundin7687ad52018-07-02 10:14:46 +02001091TEST(NetEqNoTimeStretchingMode, RunTest) {
1092 NetEq::Config config;
1093 config.for_test_no_time_stretching = true;
1094 auto codecs = NetEqTest::StandardDecoderMap();
Henrik Lundin7687ad52018-07-02 10:14:46 +02001095 NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
1096 {1, kRtpExtensionAudioLevel},
1097 {3, kRtpExtensionAbsoluteSendTime},
1098 {5, kRtpExtensionTransportSequenceNumber},
1099 {7, kRtpExtensionVideoContentType},
1100 {8, kRtpExtensionVideoTiming}};
1101 std::unique_ptr<NetEqInput> input(new NetEqRtpDumpInput(
1102 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
Bjorn Terelius5350d1c2018-10-11 16:51:23 +02001103 rtp_ext_map, absl::nullopt /*No SSRC filter*/));
Henrik Lundin7687ad52018-07-02 10:14:46 +02001104 std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
1105 new TimeLimitedNetEqInput(std::move(input), 20000));
1106 std::unique_ptr<AudioSink> output(new VoidAudioSink);
1107 NetEqTest::Callbacks callbacks;
Ivo Creusencee751a2020-01-16 17:17:09 +01001108 NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
1109 /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
1110 /*input=*/std::move(input_time_limit), std::move(output),
1111 callbacks);
Henrik Lundin7687ad52018-07-02 10:14:46 +02001112 test.Run();
1113 const auto stats = test.SimulationStats();
1114 EXPECT_EQ(0, stats.accelerate_rate);
1115 EXPECT_EQ(0, stats.preemptive_rate);
1116}
Henrik Lundin7687ad52018-07-02 10:14:46 +02001117
Henrik Lundinc49e9c22020-05-25 11:26:15 +02001118namespace {
1119// Helper classes and data types and functions for NetEqOutputDelayTest.
1120
1121class VectorAudioSink : public AudioSink {
1122 public:
1123 // Does not take ownership of the vector.
1124 VectorAudioSink(std::vector<int16_t>* output_vector) : v_(output_vector) {}
1125
1126 virtual ~VectorAudioSink() = default;
1127
1128 bool WriteArray(const int16_t* audio, size_t num_samples) override {
1129 v_->reserve(v_->size() + num_samples);
1130 for (size_t i = 0; i < num_samples; ++i) {
1131 v_->push_back(audio[i]);
1132 }
1133 return true;
1134 }
1135
1136 private:
1137 std::vector<int16_t>* const v_;
1138};
1139
1140struct TestResult {
1141 NetEqLifetimeStatistics lifetime_stats;
1142 NetEqNetworkStatistics network_stats;
1143 absl::optional<uint32_t> playout_timestamp;
1144 int target_delay_ms;
1145 int filtered_current_delay_ms;
1146 int sample_rate_hz;
1147};
1148
1149// This class is used as callback object to NetEqTest to collect some stats
1150// at the end of the simulation.
1151class SimEndStatsCollector : public NetEqSimulationEndedCallback {
1152 public:
1153 SimEndStatsCollector(TestResult& result) : result_(result) {}
1154
1155 void SimulationEnded(int64_t /*simulation_time_ms*/, NetEq* neteq) override {
1156 result_.playout_timestamp = neteq->GetPlayoutTimestamp();
1157 result_.target_delay_ms = neteq->TargetDelayMs();
1158 result_.filtered_current_delay_ms = neteq->FilteredCurrentDelayMs();
1159 result_.sample_rate_hz = neteq->last_output_sample_rate_hz();
1160 }
1161
1162 private:
1163 TestResult& result_;
1164};
1165
1166TestResult DelayLineNetEqTest(int delay_ms,
1167 std::vector<int16_t>* output_vector) {
1168 NetEq::Config config;
1169 config.for_test_no_time_stretching = true;
1170 config.extra_output_delay_ms = delay_ms;
1171 auto codecs = NetEqTest::StandardDecoderMap();
1172 NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
1173 {1, kRtpExtensionAudioLevel},
1174 {3, kRtpExtensionAbsoluteSendTime},
1175 {5, kRtpExtensionTransportSequenceNumber},
1176 {7, kRtpExtensionVideoContentType},
1177 {8, kRtpExtensionVideoTiming}};
1178 std::unique_ptr<NetEqInput> input = std::make_unique<NetEqRtpDumpInput>(
1179 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
1180 rtp_ext_map, absl::nullopt /*No SSRC filter*/);
1181 std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
1182 new TimeLimitedNetEqInput(std::move(input), 10000));
1183 std::unique_ptr<AudioSink> output =
1184 std::make_unique<VectorAudioSink>(output_vector);
1185
1186 TestResult result;
1187 SimEndStatsCollector stats_collector(result);
1188 NetEqTest::Callbacks callbacks;
1189 callbacks.simulation_ended_callback = &stats_collector;
1190
1191 NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
1192 /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
1193 /*input=*/std::move(input_time_limit), std::move(output),
1194 callbacks);
1195 test.Run();
1196 result.lifetime_stats = test.LifetimeStats();
1197 result.network_stats = test.SimulationStats();
1198 return result;
1199}
1200} // namespace
1201
1202// Tests the extra output delay functionality of NetEq.
1203TEST(NetEqOutputDelayTest, RunTest) {
1204 std::vector<int16_t> output;
1205 const auto result_no_delay = DelayLineNetEqTest(0, &output);
1206 std::vector<int16_t> output_delayed;
1207 constexpr int kDelayMs = 100;
1208 const auto result_delay = DelayLineNetEqTest(kDelayMs, &output_delayed);
1209
1210 // Verify that the loss concealment remains unchanged. The point of the delay
1211 // is to not affect the jitter buffering behavior.
1212 // First verify that there are concealments in the test.
1213 EXPECT_GT(result_no_delay.lifetime_stats.concealed_samples, 0u);
1214 // And that not all of the output is concealment.
1215 EXPECT_GT(result_no_delay.lifetime_stats.total_samples_received,
1216 result_no_delay.lifetime_stats.concealed_samples);
1217 // Now verify that they remain unchanged by the delay.
1218 EXPECT_EQ(result_no_delay.lifetime_stats.concealed_samples,
1219 result_delay.lifetime_stats.concealed_samples);
1220 // Accelerate and pre-emptive expand should also be unchanged.
1221 EXPECT_EQ(result_no_delay.lifetime_stats.inserted_samples_for_deceleration,
1222 result_delay.lifetime_stats.inserted_samples_for_deceleration);
1223 EXPECT_EQ(result_no_delay.lifetime_stats.removed_samples_for_acceleration,
1224 result_delay.lifetime_stats.removed_samples_for_acceleration);
1225 // Verify that delay stats are increased with the delay chain.
1226 EXPECT_EQ(
1227 result_no_delay.lifetime_stats.jitter_buffer_delay_ms +
1228 kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count,
1229 result_delay.lifetime_stats.jitter_buffer_delay_ms);
1230 EXPECT_EQ(
1231 result_no_delay.lifetime_stats.jitter_buffer_target_delay_ms +
1232 kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count,
1233 result_delay.lifetime_stats.jitter_buffer_target_delay_ms);
1234 EXPECT_EQ(result_no_delay.network_stats.current_buffer_size_ms + kDelayMs,
1235 result_delay.network_stats.current_buffer_size_ms);
1236 EXPECT_EQ(result_no_delay.network_stats.preferred_buffer_size_ms + kDelayMs,
1237 result_delay.network_stats.preferred_buffer_size_ms);
1238 EXPECT_EQ(result_no_delay.network_stats.mean_waiting_time_ms + kDelayMs,
1239 result_delay.network_stats.mean_waiting_time_ms);
1240 EXPECT_EQ(result_no_delay.network_stats.median_waiting_time_ms + kDelayMs,
1241 result_delay.network_stats.median_waiting_time_ms);
1242 EXPECT_EQ(result_no_delay.network_stats.min_waiting_time_ms + kDelayMs,
1243 result_delay.network_stats.min_waiting_time_ms);
1244 EXPECT_EQ(result_no_delay.network_stats.max_waiting_time_ms + kDelayMs,
1245 result_delay.network_stats.max_waiting_time_ms);
1246
1247 ASSERT_TRUE(result_no_delay.playout_timestamp);
1248 ASSERT_TRUE(result_delay.playout_timestamp);
1249 EXPECT_EQ(*result_no_delay.playout_timestamp -
1250 static_cast<uint32_t>(
1251 kDelayMs *
1252 rtc::CheckedDivExact(result_no_delay.sample_rate_hz, 1000)),
1253 *result_delay.playout_timestamp);
1254 EXPECT_EQ(result_no_delay.target_delay_ms + kDelayMs,
1255 result_delay.target_delay_ms);
1256 EXPECT_EQ(result_no_delay.filtered_current_delay_ms + kDelayMs,
1257 result_delay.filtered_current_delay_ms);
1258
1259 // Verify expected delay in decoded signal. The test vector uses 8 kHz sample
1260 // rate, so the delay will be 8 times the delay in ms.
1261 constexpr size_t kExpectedDelaySamples = kDelayMs * 8;
1262 for (size_t i = 0;
1263 i < output.size() && i + kExpectedDelaySamples < output_delayed.size();
1264 ++i) {
1265 EXPECT_EQ(output[i], output_delayed[i + kExpectedDelaySamples]);
1266 }
1267}
1268
Henrik Lundinf7cba9f2020-06-10 18:19:27 +02001269// Tests the extra output delay functionality of NetEq when configured via
1270// field trial.
1271TEST(NetEqOutputDelayTest, RunTestWithFieldTrial) {
1272 test::ScopedFieldTrials field_trial(
1273 "WebRTC-Audio-NetEqExtraDelay/Enabled-50/");
1274 constexpr int kExpectedDelayMs = 50;
1275 std::vector<int16_t> output;
1276 const auto result = DelayLineNetEqTest(0, &output);
1277
1278 // The base delay values are taken from the resuts of the non-delayed case in
1279 // NetEqOutputDelayTest.RunTest above.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +02001280 EXPECT_EQ(20 + kExpectedDelayMs, result.target_delay_ms);
Henrik Lundinf7cba9f2020-06-10 18:19:27 +02001281 EXPECT_EQ(24 + kExpectedDelayMs, result.filtered_current_delay_ms);
1282}
1283
1284// Set a non-multiple-of-10 value in the field trial, and verify that we don't
1285// crash, and that the result is rounded down.
1286TEST(NetEqOutputDelayTest, RunTestWithFieldTrialOddValue) {
1287 test::ScopedFieldTrials field_trial(
1288 "WebRTC-Audio-NetEqExtraDelay/Enabled-103/");
1289 constexpr int kRoundedDelayMs = 100;
1290 std::vector<int16_t> output;
1291 const auto result = DelayLineNetEqTest(0, &output);
1292
1293 // The base delay values are taken from the resuts of the non-delayed case in
1294 // NetEqOutputDelayTest.RunTest above.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +02001295 EXPECT_EQ(20 + kRoundedDelayMs, result.target_delay_ms);
Henrik Lundinf7cba9f2020-06-10 18:19:27 +02001296 EXPECT_EQ(24 + kRoundedDelayMs, result.filtered_current_delay_ms);
1297}
1298
Henrik Lundin7687ad52018-07-02 10:14:46 +02001299} // namespace test
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001300} // namespace webrtc