blob: 1369ead63cc9d66912290b196104565870f834ce [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Ivo Creusen3ce44a32019-10-31 14:38:11 +010011#include "api/neteq/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000013#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000014#include <stdlib.h>
15#include <string.h> // memset
16
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000017#include <algorithm>
kwiberg2d0c3322016-02-14 09:28:33 -080018#include <memory>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000019#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000020#include <string>
21#include <vector>
22
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020023#include "absl/flags/flag.h"
Fredrik Solenbergbbf21a32018-04-12 22:44:09 +020024#include "api/audio/audio_frame.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020025#include "api/audio_codecs/builtin_audio_decoder_factory.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020026#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
Yves Gerey3a65f392019-11-11 18:05:42 +010027#include "modules/audio_coding/neteq/test/neteq_decoding_test.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020028#include "modules/audio_coding/neteq/tools/audio_loop.h"
Henrik Lundin7687ad52018-07-02 10:14:46 +020029#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
30#include "modules/audio_coding/neteq/tools/neteq_test.h"
Yves Gerey3e707812018-11-28 16:47:49 +010031#include "modules/include/module_common_types_public.h"
Niels Möller53382cb2018-11-27 14:05:08 +010032#include "modules/rtp_rtcp/include/rtcp_statistics.h"
Yves Gerey3e707812018-11-28 16:47:49 +010033#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020034#include "rtc_base/ignore_wundef.h"
Steve Anton10542f22019-01-11 09:11:00 -080035#include "rtc_base/message_digest.h"
Karl Wiberge40468b2017-11-22 10:42:26 +010036#include "rtc_base/numerics/safe_conversions.h"
Steve Anton10542f22019-01-11 09:11:00 -080037#include "rtc_base/string_encode.h"
Jonas Olsson366a50c2018-09-06 13:41:30 +020038#include "rtc_base/strings/string_builder.h"
Niels Möllera12c42a2018-07-25 16:05:48 +020039#include "rtc_base/system/arch.h"
Henrik Lundine9619f82017-11-27 14:05:27 +010040#include "test/field_trial.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020041#include "test/gtest.h"
Steve Anton10542f22019-01-11 09:11:00 -080042#include "test/testsupport/file_utils.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000043
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020044ABSL_FLAG(bool, gen_ref, false, "Generate reference files.");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000045
kwiberg5adaf732016-10-04 09:33:27 -070046namespace webrtc {
47
minyue5f026d02015-12-16 07:36:04 -080048namespace {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000049
minyue4f906772016-04-29 11:05:14 -070050const std::string& PlatformChecksum(const std::string& checksum_general,
Henrik Lundin8cd750d2017-10-12 13:07:11 +020051 const std::string& checksum_android_32,
52 const std::string& checksum_android_64,
minyue4f906772016-04-29 11:05:14 -070053 const std::string& checksum_win_32,
54 const std::string& checksum_win_64) {
kwiberg77eab702016-09-28 17:42:01 -070055#if defined(WEBRTC_ANDROID)
Yves Gerey665174f2018-06-19 15:03:05 +020056#ifdef WEBRTC_ARCH_64_BITS
57 return checksum_android_64;
58#else
59 return checksum_android_32;
60#endif // WEBRTC_ARCH_64_BITS
kwiberg77eab702016-09-28 17:42:01 -070061#elif defined(WEBRTC_WIN)
Yves Gerey665174f2018-06-19 15:03:05 +020062#ifdef WEBRTC_ARCH_64_BITS
63 return checksum_win_64;
64#else
65 return checksum_win_32;
66#endif // WEBRTC_ARCH_64_BITS
minyue4f906772016-04-29 11:05:14 -070067#else
68 return checksum_general;
69#endif // WEBRTC_WIN
70}
71
minyue5f026d02015-12-16 07:36:04 -080072} // namespace
73
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000074
ivoc72c08ed2016-01-20 07:26:24 -080075#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
76 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
Karl Wibergeb254b42017-11-01 15:08:12 +010077 defined(WEBRTC_CODEC_ILBC) && !defined(WEBRTC_ARCH_ARM64)
minyue5f026d02015-12-16 07:36:04 -080078#define MAYBE_TestBitExactness TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070079#else
minyue5f026d02015-12-16 07:36:04 -080080#define MAYBE_TestBitExactness DISABLED_TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070081#endif
minyue5f026d02015-12-16 07:36:04 -080082TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
minyue49c454e2016-01-08 11:30:14 -080083 const std::string input_rtp_file =
84 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +000085
Yves Gerey665174f2018-06-19 15:03:05 +020086 const std::string output_checksum =
Jakob Ivarssond723da12021-01-15 17:44:56 +010087 PlatformChecksum("6c35140ce4d75874bdd60aa1872400b05fd05ca2",
88 "ab451bb8301d9a92fbf4de91556b56f1ea38b4ce", "not used",
89 "6c35140ce4d75874bdd60aa1872400b05fd05ca2",
90 "64b46bb3c1165537a880ae8404afce2efba456c0");
minyue4f906772016-04-29 11:05:14 -070091
henrik.lundin2979f552017-05-05 05:04:16 -070092 const std::string network_stats_checksum =
Jakob Ivarssond723da12021-01-15 17:44:56 +010093 PlatformChecksum("90594d85fa31d3d9584d79293bf7aa4ee55ed751",
94 "77b9c3640b81aff6a38d69d07dd782d39c15321d", "not used",
95 "90594d85fa31d3d9584d79293bf7aa4ee55ed751",
96 "90594d85fa31d3d9584d79293bf7aa4ee55ed751");
minyue4f906772016-04-29 11:05:14 -070097
Yves Gerey665174f2018-06-19 15:03:05 +020098 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020099 absl::GetFlag(FLAGS_gen_ref));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000100}
101
Yves Gerey665174f2018-06-19 15:03:05 +0200102#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
minyue-webrtc516711c2017-07-27 17:45:49 +0200103 defined(WEBRTC_CODEC_OPUS)
minyue93c08b72015-12-22 09:57:41 -0800104#define MAYBE_TestOpusBitExactness TestOpusBitExactness
105#else
106#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
107#endif
Jakob Ivarsson854d59f2021-03-04 13:05:19 +0100108// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been
109// updated.
110TEST_F(NetEqDecodingTest, DISABLED_TestOpusBitExactness) {
minyue93c08b72015-12-22 09:57:41 -0800111 const std::string input_rtp_file =
112 webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
minyue93c08b72015-12-22 09:57:41 -0800113
Yves Gereya038e712018-11-14 10:45:50 +0100114 const std::string maybe_sse =
Jakob Ivarssond723da12021-01-15 17:44:56 +0100115 "c7887ff60eecf460332c6c7a28c81561f9e8a40f"
116 "|673dd422cfc174152536d3b13af64f9722520ab5";
Yves Gereya038e712018-11-14 10:45:50 +0100117 const std::string output_checksum = PlatformChecksum(
Jakob Ivarssond723da12021-01-15 17:44:56 +0100118 maybe_sse, "e39283dd61a89cead3786ef8642d2637cc447296",
119 "53d8073eb848b70974cba9e26424f4946508fd19", maybe_sse, maybe_sse);
minyue4f906772016-04-29 11:05:14 -0700120
Yves Gerey75e22902019-09-06 03:07:55 +0200121 const std::string network_stats_checksum =
Jakob Ivarssond723da12021-01-15 17:44:56 +0100122 PlatformChecksum("c438bfa3b018f77691279eb9c63730569f54585c",
123 "8a474ed0992591e0c84f593824bb05979c3de157",
124 "9a05378dbf7e6edd56cdeb8ec45bcd6d8589623c",
125 "c438bfa3b018f77691279eb9c63730569f54585c",
126 "c438bfa3b018f77691279eb9c63730569f54585c");
minyue4f906772016-04-29 11:05:14 -0700127
Yves Gerey665174f2018-06-19 15:03:05 +0200128 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200129 absl::GetFlag(FLAGS_gen_ref));
minyue93c08b72015-12-22 09:57:41 -0800130}
131
Jakob Ivarssone7a55812021-03-03 14:18:15 +0100132// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been
133// updated.
134TEST_F(NetEqDecodingTest, DISABLED_TestOpusDtxBitExactness) {
Henrik Lundine9619f82017-11-27 14:05:27 +0100135 const std::string input_rtp_file =
136 webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
137
Yves Gereya038e712018-11-14 10:45:50 +0100138 const std::string maybe_sse =
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200139 "0fb0a3d6b3758ca6e108368bb777cd38d0a865af"
140 "|79cfb99a21338ba977eb0e15eb8464e2db9436f8";
Yves Gereya038e712018-11-14 10:45:50 +0100141 const std::string output_checksum = PlatformChecksum(
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200142 maybe_sse, "b6632690f8d7c2340c838df2821fc014f1cc8360",
143 "f890b9eb9bc5ab8313489230726b297f6a0825af", maybe_sse, maybe_sse);
Henrik Lundine9619f82017-11-27 14:05:27 +0100144
145 const std::string network_stats_checksum =
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200146 "18983bb67a57628c604dbdefa99574c6e0c5bb48";
Henrik Lundine9619f82017-11-27 14:05:27 +0100147
Henrik Lundine9619f82017-11-27 14:05:27 +0100148 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200149 absl::GetFlag(FLAGS_gen_ref));
Henrik Lundine9619f82017-11-27 14:05:27 +0100150}
151
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000152// Use fax mode to avoid time-scaling. This is to simplify the testing of
153// packet waiting times in the packet buffer.
154class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
155 protected:
156 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
Henrik Lundin7687ad52018-07-02 10:14:46 +0200157 config_.for_test_no_time_stretching = true;
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000158 }
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200159 void TestJitterBufferDelay(bool apply_packet_loss);
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000160};
161
162TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000163 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
164 size_t num_frames = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000165 const size_t kSamples = 10 * 16;
166 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000167 for (size_t i = 0; i < num_frames; ++i) {
kwibergee2bac22015-11-11 10:34:00 -0800168 const uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700169 RTPHeader rtp_info;
Mirko Bonadeia8110272017-10-18 14:22:50 +0200170 rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
171 rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700172 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
173 rtp_info.payloadType = 94; // PCM16b WB codec.
174 rtp_info.markerBit = 0;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200175 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000176 }
177 // Pull out all data.
178 for (size_t i = 0; i < num_frames; ++i) {
henrik.lundin7a926812016-05-12 13:51:28 -0700179 bool muted;
180 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800181 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000182 }
183
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200184 NetEqNetworkStatistics stats;
185 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000186 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
187 // spacing (per definition), we expect the delay to increase with 10 ms for
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200188 // each packet. Thus, we are calculating the statistics for a series from 10
189 // to 300, in steps of 10 ms.
190 EXPECT_EQ(155, stats.mean_waiting_time_ms);
191 EXPECT_EQ(155, stats.median_waiting_time_ms);
192 EXPECT_EQ(10, stats.min_waiting_time_ms);
193 EXPECT_EQ(300, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000194
195 // Check statistics again and make sure it's been reset.
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200196 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
197 EXPECT_EQ(-1, stats.mean_waiting_time_ms);
198 EXPECT_EQ(-1, stats.median_waiting_time_ms);
199 EXPECT_EQ(-1, stats.min_waiting_time_ms);
200 EXPECT_EQ(-1, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000201}
202
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000203
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000204TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000205 // Apply a clock drift of -25 ms / s (sender faster than receiver).
206 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000207 const double kNetworkFreezeTimeMs = 0.0;
208 const bool kGetAudioDuringFreezeRecovery = false;
209 const int kDelayToleranceMs = 20;
210 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200211 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
212 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000213 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000214}
215
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000216TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000217 // Apply a clock drift of +25 ms / s (sender slower than receiver).
218 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000219 const double kNetworkFreezeTimeMs = 0.0;
220 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200221 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000222 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200223 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
224 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000225 kMaxTimeToSpeechMs);
226}
227
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000228TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000229 // Apply a clock drift of -25 ms / s (sender faster than receiver).
230 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
231 const double kNetworkFreezeTimeMs = 5000.0;
232 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarssona36c5912019-06-27 10:12:02 +0200233 const int kDelayToleranceMs = 60;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000234 const int kMaxTimeToSpeechMs = 200;
Yves Gerey665174f2018-06-19 15:03:05 +0200235 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
236 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000237 kMaxTimeToSpeechMs);
238}
239
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000240TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000241 // Apply a clock drift of +25 ms / s (sender slower than receiver).
242 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
243 const double kNetworkFreezeTimeMs = 5000.0;
244 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200245 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000246 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200247 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
248 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000249 kMaxTimeToSpeechMs);
250}
251
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000252TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000253 // Apply a clock drift of +25 ms / s (sender slower than receiver).
254 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
255 const double kNetworkFreezeTimeMs = 5000.0;
256 const bool kGetAudioDuringFreezeRecovery = true;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200257 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000258 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200259 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
260 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000261 kMaxTimeToSpeechMs);
262}
263
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000264TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000265 const double kDriftFactor = 1.0; // No drift.
266 const double kNetworkFreezeTimeMs = 0.0;
267 const bool kGetAudioDuringFreezeRecovery = false;
268 const int kDelayToleranceMs = 10;
269 const int kMaxTimeToSpeechMs = 50;
Yves Gerey665174f2018-06-19 15:03:05 +0200270 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
271 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000272 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000273}
274
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000275TEST_F(NetEqDecodingTest, UnknownPayloadType) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000276 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000277 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700278 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000279 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700280 rtp_info.payloadType = 1; // Not registered as a decoder.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200281 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000282}
283
Peter Boströme2976c82016-01-04 22:44:05 +0100284#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ivoc72c08ed2016-01-20 07:26:24 -0800285#define MAYBE_DecoderError DecoderError
286#else
287#define MAYBE_DecoderError DISABLED_DecoderError
288#endif
289
Peter Boströme2976c82016-01-04 22:44:05 +0100290TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000291 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000292 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700293 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000294 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700295 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200296 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000297 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
298 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700299 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800300 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700301 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000302 }
henrik.lundin7a926812016-05-12 13:51:28 -0700303 bool muted;
304 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
305 ASSERT_FALSE(muted);
ivoc72c08ed2016-01-20 07:26:24 -0800306
yujo36b1a5f2017-06-12 12:45:32 -0700307 // Verify that the first 160 samples are set to 0.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000308 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700309 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000310 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200311 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000312 ss << "i = " << i;
313 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700314 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000315 }
316}
317
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000318TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000319 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
320 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700321 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800322 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700323 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000324 }
henrik.lundin7a926812016-05-12 13:51:28 -0700325 bool muted;
326 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
327 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000328 // Verify that the first block of samples is set to 0.
329 static const int kExpectedOutputLength =
330 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700331 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000332 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200333 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000334 ss << "i = " << i;
335 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700336 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000337 }
henrik.lundind89814b2015-11-23 06:49:25 -0800338 // Verify that the sample rate did not change from the initial configuration.
339 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000340}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000341
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000342class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000343 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000344 void CheckBgn(int sampling_rate_hz) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700345 size_t expected_samples_per_channel = 0;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000346 uint8_t payload_type = 0xFF; // Invalid.
347 if (sampling_rate_hz == 8000) {
348 expected_samples_per_channel = kBlockSize8kHz;
349 payload_type = 93; // PCM 16, 8 kHz.
350 } else if (sampling_rate_hz == 16000) {
351 expected_samples_per_channel = kBlockSize16kHz;
352 payload_type = 94; // PCM 16, 16 kHZ.
353 } else if (sampling_rate_hz == 32000) {
354 expected_samples_per_channel = kBlockSize32kHz;
355 payload_type = 95; // PCM 16, 32 kHz.
356 } else {
357 ASSERT_TRUE(false); // Unsupported test case.
358 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000359
henrik.lundin6d8e0112016-03-04 10:34:21 -0800360 AudioFrame output;
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000361 test::AudioLoop input;
362 // We are using the same 32 kHz input file for all tests, regardless of
363 // |sampling_rate_hz|. The output may sound weird, but the test is still
364 // valid.
365 ASSERT_TRUE(input.Init(
366 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
367 10 * sampling_rate_hz, // Max 10 seconds loop length.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700368 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000369
370 // Payload of 10 ms of PCM16 32 kHz.
371 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin246ef3e2017-04-24 09:14:32 -0700372 RTPHeader rtp_info;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000373 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700374 rtp_info.payloadType = payload_type;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000375
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000376 uint32_t receive_timestamp = 0;
henrik.lundin7a926812016-05-12 13:51:28 -0700377 bool muted;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000378 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
kwiberg288886b2015-11-06 01:21:35 -0800379 auto block = input.GetNextBlock();
380 ASSERT_EQ(expected_samples_per_channel, block.size());
381 size_t enc_len_bytes =
382 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000383 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
384
Karl Wiberg45eb1352019-10-10 14:23:00 +0200385 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
386 payload, enc_len_bytes)));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800387 output.Reset();
henrik.lundin7a926812016-05-12 13:51:28 -0700388 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800389 ASSERT_EQ(1u, output.num_channels_);
390 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800391 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000392
393 // Next packet.
Yves Gerey665174f2018-06-19 15:03:05 +0200394 rtp_info.timestamp +=
395 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700396 rtp_info.sequenceNumber++;
Yves Gerey665174f2018-06-19 15:03:05 +0200397 receive_timestamp +=
398 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000399 }
400
henrik.lundin6d8e0112016-03-04 10:34:21 -0800401 output.Reset();
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000402
403 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
404 // one frame without checking speech-type. This is the first frame pulled
405 // without inserting any packet, and might not be labeled as PLC.
henrik.lundin7a926812016-05-12 13:51:28 -0700406 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800407 ASSERT_EQ(1u, output.num_channels_);
408 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000409
410 // To be able to test the fading of background noise we need at lease to
411 // pull 611 frames.
412 const int kFadingThreshold = 611;
413
414 // Test several CNG-to-PLC packet for the expected behavior. The number 20
415 // is arbitrary, but sufficiently large to test enough number of frames.
416 const int kNumPlcToCngTestFrames = 20;
417 bool plc_to_cng = false;
418 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
henrik.lundin6d8e0112016-03-04 10:34:21 -0800419 output.Reset();
yujo36b1a5f2017-06-12 12:45:32 -0700420 // Set to non-zero.
421 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
henrik.lundin7a926812016-05-12 13:51:28 -0700422 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
423 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800424 ASSERT_EQ(1u, output.num_channels_);
425 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800426 if (output.speech_type_ == AudioFrame::kPLCCNG) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000427 plc_to_cng = true;
428 double sum_squared = 0;
yujo36b1a5f2017-06-12 12:45:32 -0700429 const int16_t* output_data = output.data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800430 for (size_t k = 0;
431 k < output.num_channels_ * output.samples_per_channel_; ++k)
yujo36b1a5f2017-06-12 12:45:32 -0700432 sum_squared += output_data[k] * output_data[k];
Henrik Lundin67190172018-04-20 15:34:48 +0200433 EXPECT_EQ(0, sum_squared);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000434 } else {
henrik.lundin55480f52016-03-08 02:37:57 -0800435 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000436 }
437 }
438 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
439 }
440};
441
Henrik Lundin67190172018-04-20 15:34:48 +0200442TEST_F(NetEqBgnTest, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000443 CheckBgn(8000);
444 CheckBgn(16000);
445 CheckBgn(32000);
446}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000447
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000448TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
449 // Start with a sequence number that will soon wrap.
450 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
451 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
452}
453
454TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
455 // Start with a sequence number that will soon wrap.
456 std::set<uint16_t> drop_seq_numbers;
457 drop_seq_numbers.insert(0xFFFF);
458 drop_seq_numbers.insert(0x0);
459 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
460}
461
462TEST_F(NetEqDecodingTest, TimestampWrap) {
463 // Start with a timestamp that will soon wrap.
464 std::set<uint16_t> drop_seq_numbers;
465 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
466}
467
468TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
469 // Start with a timestamp and a sequence number that will wrap at the same
470 // time.
471 std::set<uint16_t> drop_seq_numbers;
472 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
473}
474
Yves Gerey3a65f392019-11-11 18:05:42 +0100475TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000476 uint16_t seq_no = 0;
477 uint32_t timestamp = 0;
478 const int kFrameSizeMs = 10;
479 const int kSampleRateKhz = 16;
480 const int kSamples = kFrameSizeMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000481 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000482
Yves Gerey665174f2018-06-19 15:03:05 +0200483 const int algorithmic_delay_samples =
484 std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000485 // Insert three speech packets. Three are needed to get the frame length
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000486 // correct.
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000487 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700488 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700489 bool muted;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000490 for (int i = 0; i < 3; ++i) {
491 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200492 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000493 ++seq_no;
494 timestamp += kSamples;
495
496 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700497 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800498 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000499 }
500 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800501 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000502
503 // Insert same CNG packet twice.
504 const int kCngPeriodMs = 100;
505 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000506 size_t payload_len;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000507 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
508 // This is the first time this CNG packet is inserted.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200509 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
510 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000511
512 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700513 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800514 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800515 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700516 EXPECT_FALSE(
517 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
henrik.lundin0d96ab72016-04-06 12:28:26 -0700518 EXPECT_EQ(timestamp - algorithmic_delay_samples,
519 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000520
521 // Insert the same CNG packet again. Note that at this point it is old, since
522 // we have already decoded the first copy of it.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200523 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
524 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000525
526 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
527 // we have already pulled out CNG once.
528 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
henrik.lundin7a926812016-05-12 13:51:28 -0700529 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800530 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800531 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700532 EXPECT_FALSE(
533 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000534 EXPECT_EQ(timestamp - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700535 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000536 }
537
538 // Insert speech again.
539 ++seq_no;
540 timestamp += kCngPeriodSamples;
541 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200542 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000543
544 // Pull audio once and verify that the output is speech again.
henrik.lundin7a926812016-05-12 13:51:28 -0700545 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800546 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800547 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Danil Chapovalovb6021232018-06-19 13:26:36 +0200548 absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -0700549 ASSERT_TRUE(playout_timestamp);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000550 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700551 *playout_timestamp);
wu@webrtc.org94454b72014-06-05 20:34:08 +0000552}
553
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000554TEST_F(NetEqDecodingTest, CngFirst) {
555 uint16_t seq_no = 0;
556 uint32_t timestamp = 0;
557 const int kFrameSizeMs = 10;
558 const int kSampleRateKhz = 16;
559 const int kSamples = kFrameSizeMs * kSampleRateKhz;
560 const int kPayloadBytes = kSamples * 2;
561 const int kCngPeriodMs = 100;
562 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
563 size_t payload_len;
564
565 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700566 RTPHeader rtp_info;
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000567
568 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200569 ASSERT_EQ(NetEq::kOK,
570 neteq_->InsertPacket(
571 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len)));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000572 ++seq_no;
573 timestamp += kCngPeriodSamples;
574
575 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700576 bool muted;
577 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800578 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800579 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000580
581 // Insert some speech packets.
henrik.lundin549d80b2016-08-25 00:44:24 -0700582 const uint32_t first_speech_timestamp = timestamp;
583 int timeout_counter = 0;
584 do {
585 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000586 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200587 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000588 ++seq_no;
589 timestamp += kSamples;
590
591 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700592 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800593 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin549d80b2016-08-25 00:44:24 -0700594 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000595 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800596 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000597}
henrik.lundin7a926812016-05-12 13:51:28 -0700598
599class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
600 public:
601 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
602 config_.enable_muted_state = true;
603 }
604
605 protected:
606 static constexpr size_t kSamples = 10 * 16;
607 static constexpr size_t kPayloadBytes = kSamples * 2;
608
609 void InsertPacket(uint32_t rtp_timestamp) {
610 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700611 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700612 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200613 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700614 }
615
henrik.lundin42feb512016-09-20 06:51:40 -0700616 void InsertCngPacket(uint32_t rtp_timestamp) {
617 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700618 RTPHeader rtp_info;
henrik.lundin42feb512016-09-20 06:51:40 -0700619 size_t payload_len;
620 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200621 EXPECT_EQ(NetEq::kOK,
622 neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
623 payload, payload_len)));
henrik.lundin42feb512016-09-20 06:51:40 -0700624 }
625
henrik.lundin7a926812016-05-12 13:51:28 -0700626 bool GetAudioReturnMuted() {
627 bool muted;
628 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
629 return muted;
630 }
631
632 void GetAudioUntilMuted() {
633 while (!GetAudioReturnMuted()) {
634 ASSERT_LT(counter_++, 1000) << "Test timed out";
635 }
636 }
637
638 void GetAudioUntilNormal() {
639 bool muted = false;
640 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
641 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
642 ASSERT_LT(counter_++, 1000) << "Test timed out";
643 }
644 EXPECT_FALSE(muted);
645 }
646
647 int counter_ = 0;
648};
649
650// Verifies that NetEq goes in and out of muted state as expected.
651TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
652 // Insert one speech packet.
653 InsertPacket(0);
654 // Pull out audio once and expect it not to be muted.
655 EXPECT_FALSE(GetAudioReturnMuted());
656 // Pull data until faded out.
657 GetAudioUntilMuted();
henrik.lundina4491072017-07-06 05:23:53 -0700658 EXPECT_TRUE(out_frame_.muted());
henrik.lundin7a926812016-05-12 13:51:28 -0700659
660 // Verify that output audio is not written during muted mode. Other parameters
661 // should be correct, though.
662 AudioFrame new_frame;
yujo36b1a5f2017-06-12 12:45:32 -0700663 int16_t* frame_data = new_frame.mutable_data();
664 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
665 frame_data[i] = 17;
henrik.lundin7a926812016-05-12 13:51:28 -0700666 }
667 bool muted;
668 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
669 EXPECT_TRUE(muted);
henrik.lundina4491072017-07-06 05:23:53 -0700670 EXPECT_TRUE(out_frame_.muted());
yujo36b1a5f2017-06-12 12:45:32 -0700671 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
672 EXPECT_EQ(17, frame_data[i]);
henrik.lundin7a926812016-05-12 13:51:28 -0700673 }
674 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
675 new_frame.timestamp_);
676 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
677 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
678 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
679 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
680 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
681
682 // Insert new data. Timestamp is corrected for the time elapsed since the last
683 // packet. Verify that normal operation resumes.
684 InsertPacket(kSamples * counter_);
685 GetAudioUntilNormal();
henrik.lundina4491072017-07-06 05:23:53 -0700686 EXPECT_FALSE(out_frame_.muted());
henrik.lundin612c25e2016-05-25 08:21:04 -0700687
688 NetEqNetworkStatistics stats;
689 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
690 // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
691 // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
692 // concealment samples in this test.
693 EXPECT_GT(stats.expand_rate, 14000);
694 // And, it should be greater than the speech_expand_rate.
695 EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
henrik.lundin7a926812016-05-12 13:51:28 -0700696}
697
698// Verifies that NetEq goes out of muted state when given a delayed packet.
699TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
700 // Insert one speech packet.
701 InsertPacket(0);
702 // Pull out audio once and expect it not to be muted.
703 EXPECT_FALSE(GetAudioReturnMuted());
704 // Pull data until faded out.
705 GetAudioUntilMuted();
706 // Insert new data. Timestamp is only corrected for the half of the time
707 // elapsed since the last packet. That is, the new packet is delayed. Verify
708 // that normal operation resumes.
709 InsertPacket(kSamples * counter_ / 2);
710 GetAudioUntilNormal();
711}
712
713// Verifies that NetEq goes out of muted state when given a future packet.
714TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
715 // Insert one speech packet.
716 InsertPacket(0);
717 // Pull out audio once and expect it not to be muted.
718 EXPECT_FALSE(GetAudioReturnMuted());
719 // Pull data until faded out.
720 GetAudioUntilMuted();
721 // Insert new data. Timestamp is over-corrected for the time elapsed since the
722 // last packet. That is, the new packet is too early. Verify that normal
723 // operation resumes.
724 InsertPacket(kSamples * counter_ * 2);
725 GetAudioUntilNormal();
726}
727
728// Verifies that NetEq goes out of muted state when given an old packet.
729TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
730 // Insert one speech packet.
731 InsertPacket(0);
732 // Pull out audio once and expect it not to be muted.
733 EXPECT_FALSE(GetAudioReturnMuted());
734 // Pull data until faded out.
735 GetAudioUntilMuted();
736
737 EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200738 // Insert a few packets which are older than the first packet.
739 for (int i = 0; i < 5; ++i) {
740 InsertPacket(kSamples * (i - 1000));
741 }
henrik.lundin7a926812016-05-12 13:51:28 -0700742 EXPECT_FALSE(GetAudioReturnMuted());
743 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
744}
745
henrik.lundin42feb512016-09-20 06:51:40 -0700746// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
747// packet stream is suspended for a long time.
748TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
749 // Insert one CNG packet.
750 InsertCngPacket(0);
751
752 // Pull 10 seconds of audio (10 ms audio generated per lap).
753 for (int i = 0; i < 1000; ++i) {
754 bool muted;
755 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
756 ASSERT_FALSE(muted);
757 }
758 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
759}
760
761// Verifies that NetEq goes back to normal after a long CNG period with the
762// packet stream suspended.
763TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
764 // Insert one CNG packet.
765 InsertCngPacket(0);
766
767 // Pull 10 seconds of audio (10 ms audio generated per lap).
768 for (int i = 0; i < 1000; ++i) {
769 bool muted;
770 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
771 }
772
773 // Insert new data. Timestamp is corrected for the time elapsed since the last
774 // packet. Verify that normal operation resumes.
775 InsertPacket(kSamples * counter_);
776 GetAudioUntilNormal();
777}
778
henrik.lundin7a926812016-05-12 13:51:28 -0700779namespace {
780::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
781 const AudioFrame& b) {
782 if (a.timestamp_ != b.timestamp_)
783 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
784 << " != " << b.timestamp_ << ")";
785 if (a.sample_rate_hz_ != b.sample_rate_hz_)
Yves Gerey665174f2018-06-19 15:03:05 +0200786 return ::testing::AssertionFailure()
787 << "sample_rate_hz_ diff (" << a.sample_rate_hz_
788 << " != " << b.sample_rate_hz_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700789 if (a.samples_per_channel_ != b.samples_per_channel_)
790 return ::testing::AssertionFailure()
791 << "samples_per_channel_ diff (" << a.samples_per_channel_
792 << " != " << b.samples_per_channel_ << ")";
793 if (a.num_channels_ != b.num_channels_)
Yves Gerey665174f2018-06-19 15:03:05 +0200794 return ::testing::AssertionFailure()
795 << "num_channels_ diff (" << a.num_channels_
796 << " != " << b.num_channels_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700797 if (a.speech_type_ != b.speech_type_)
Yves Gerey665174f2018-06-19 15:03:05 +0200798 return ::testing::AssertionFailure()
799 << "speech_type_ diff (" << a.speech_type_
800 << " != " << b.speech_type_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700801 if (a.vad_activity_ != b.vad_activity_)
Yves Gerey665174f2018-06-19 15:03:05 +0200802 return ::testing::AssertionFailure()
803 << "vad_activity_ diff (" << a.vad_activity_
804 << " != " << b.vad_activity_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700805 return ::testing::AssertionSuccess();
806}
807
808::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
809 const AudioFrame& b) {
810 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
811 if (!res)
812 return res;
Yves Gerey665174f2018-06-19 15:03:05 +0200813 if (memcmp(a.data(), b.data(),
814 a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) !=
815 0) {
henrik.lundin7a926812016-05-12 13:51:28 -0700816 return ::testing::AssertionFailure() << "data_ diff";
817 }
818 return ::testing::AssertionSuccess();
819}
820
821} // namespace
822
823TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
824 ASSERT_FALSE(config_.enable_muted_state);
825 config2_.enable_muted_state = true;
826 CreateSecondInstance();
827
828 // Insert one speech packet into both NetEqs.
829 const size_t kSamples = 10 * 16;
830 const size_t kPayloadBytes = kSamples * 2;
831 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700832 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700833 PopulateRtpInfo(0, 0, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200834 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
835 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700836
837 AudioFrame out_frame1, out_frame2;
838 bool muted;
839 for (int i = 0; i < 1000; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200840 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700841 ss << "i = " << i;
842 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
843 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
844 EXPECT_FALSE(muted);
845 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
846 if (muted) {
847 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
848 } else {
849 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
850 }
851 }
852 EXPECT_TRUE(muted);
853
854 // Insert new data. Timestamp is corrected for the time elapsed since the last
855 // packet.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200856 for (int i = 0; i < 5; ++i) {
857 PopulateRtpInfo(0, kSamples * 1000 + kSamples * i, &rtp_info);
858 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
859 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
860 }
henrik.lundin7a926812016-05-12 13:51:28 -0700861
862 int counter = 0;
863 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
864 ASSERT_LT(counter++, 1000) << "Test timed out";
Jonas Olsson366a50c2018-09-06 13:41:30 +0200865 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700866 ss << "counter = " << counter;
867 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
868 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
869 EXPECT_FALSE(muted);
870 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
871 if (muted) {
872 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
873 } else {
874 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
875 }
876 }
877 EXPECT_FALSE(muted);
878}
879
henrik.lundin114c1b32017-04-26 07:47:32 -0700880TEST_F(NetEqDecodingTest, LastDecodedTimestampsEmpty) {
881 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
882
883 // Pull out data once.
884 AudioFrame output;
885 bool muted;
886 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
887
888 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
889}
890
891TEST_F(NetEqDecodingTest, LastDecodedTimestampsOneDecoded) {
892 // Insert one packet with PCM16b WB data (this is what PopulateRtpInfo does by
893 // default). Make the length 10 ms.
894 constexpr size_t kPayloadSamples = 16 * 10;
895 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
896 uint8_t payload[kPayloadBytes] = {0};
897
898 RTPHeader rtp_info;
899 constexpr uint32_t kRtpTimestamp = 0x1234;
900 PopulateRtpInfo(0, kRtpTimestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200901 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700902
903 // Pull out data once.
904 AudioFrame output;
905 bool muted;
906 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
907
908 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp}),
909 neteq_->LastDecodedTimestamps());
910
911 // Nothing decoded on the second call.
912 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
913 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
914}
915
916TEST_F(NetEqDecodingTest, LastDecodedTimestampsTwoDecoded) {
917 // Insert two packets with PCM16b WB data (this is what PopulateRtpInfo does
918 // by default). Make the length 5 ms so that NetEq must decode them both in
919 // the same GetAudio call.
920 constexpr size_t kPayloadSamples = 16 * 5;
921 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
922 uint8_t payload[kPayloadBytes] = {0};
923
924 RTPHeader rtp_info;
925 constexpr uint32_t kRtpTimestamp1 = 0x1234;
926 PopulateRtpInfo(0, kRtpTimestamp1, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200927 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700928 constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + kPayloadSamples;
929 PopulateRtpInfo(1, kRtpTimestamp2, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200930 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700931
932 // Pull out data once.
933 AudioFrame output;
934 bool muted;
935 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
936
937 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp1, kRtpTimestamp2}),
938 neteq_->LastDecodedTimestamps());
939}
940
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200941TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
942 const int kNumConcealmentEvents = 19;
943 const size_t kSamples = 10 * 16;
944 const size_t kPayloadBytes = kSamples * 2;
945 int seq_no = 0;
946 RTPHeader rtp_info;
947 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
948 rtp_info.payloadType = 94; // PCM16b WB codec.
949 rtp_info.markerBit = 0;
950 const uint8_t payload[kPayloadBytes] = {0};
951 bool muted;
952
953 for (int i = 0; i < kNumConcealmentEvents; i++) {
954 // Insert some packets of 10 ms size.
955 for (int j = 0; j < 10; j++) {
956 rtp_info.sequenceNumber = seq_no++;
957 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200958 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200959 neteq_->GetAudio(&out_frame_, &muted);
960 }
961
962 // Lose a number of packets.
963 int num_lost = 1 + i;
964 for (int j = 0; j < num_lost; j++) {
965 seq_no++;
966 neteq_->GetAudio(&out_frame_, &muted);
967 }
968 }
969
970 // Check number of concealment events.
971 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
972 EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
973}
974
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200975// Test that the jitter buffer delay stat is computed correctly.
976void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
977 const int kNumPackets = 10;
978 const int kDelayInNumPackets = 2;
979 const int kPacketLenMs = 10; // All packets are of 10 ms size.
980 const size_t kSamples = kPacketLenMs * 16;
981 const size_t kPayloadBytes = kSamples * 2;
982 RTPHeader rtp_info;
983 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
984 rtp_info.payloadType = 94; // PCM16b WB codec.
985 rtp_info.markerBit = 0;
986 const uint8_t payload[kPayloadBytes] = {0};
987 bool muted;
988 int packets_sent = 0;
989 int packets_received = 0;
990 int expected_delay = 0;
Artem Titove618cc92020-03-11 11:18:54 +0100991 int expected_target_delay = 0;
Chen Xing0acffb52019-01-15 15:46:29 +0100992 uint64_t expected_emitted_count = 0;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200993 while (packets_received < kNumPackets) {
994 // Insert packet.
995 if (packets_sent < kNumPackets) {
996 rtp_info.sequenceNumber = packets_sent++;
997 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200998 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200999 }
1000
1001 // Get packet.
1002 if (packets_sent > kDelayInNumPackets) {
1003 neteq_->GetAudio(&out_frame_, &muted);
1004 packets_received++;
1005
1006 // The delay reported by the jitter buffer never exceeds
1007 // the number of samples previously fetched with GetAudio
1008 // (hence the min()).
1009 int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
1010
1011 // The increase of the expected delay is the product of
1012 // the current delay of the jitter buffer in ms * the
1013 // number of samples that are sent for play out.
1014 int current_delay_ms = packets_delay * kPacketLenMs;
1015 expected_delay += current_delay_ms * kSamples;
Artem Titove618cc92020-03-11 11:18:54 +01001016 expected_target_delay += neteq_->TargetDelayMs() * kSamples;
Chen Xing0acffb52019-01-15 15:46:29 +01001017 expected_emitted_count += kSamples;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001018 }
1019 }
1020
1021 if (apply_packet_loss) {
1022 // Extra call to GetAudio to cause concealment.
1023 neteq_->GetAudio(&out_frame_, &muted);
1024 }
1025
1026 // Check jitter buffer delay.
1027 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
Artem Titove618cc92020-03-11 11:18:54 +01001028 EXPECT_EQ(expected_delay,
1029 rtc::checked_cast<int>(stats.jitter_buffer_delay_ms));
Chen Xing0acffb52019-01-15 15:46:29 +01001030 EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count);
Artem Titove618cc92020-03-11 11:18:54 +01001031 EXPECT_EQ(expected_target_delay,
1032 rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001033}
1034
1035TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
1036 TestJitterBufferDelay(false);
1037}
1038
1039TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
1040 TestJitterBufferDelay(true);
1041}
1042
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001043TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
1044 const int kPacketLenMs = 10; // All packets are of 10 ms size.
1045 const size_t kSamples = kPacketLenMs * 16;
1046 const size_t kPayloadBytes = kSamples * 2;
1047 RTPHeader rtp_info;
1048 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
1049 rtp_info.payloadType = 94; // PCM16b WB codec.
1050 rtp_info.markerBit = 0;
1051 const uint8_t payload[kPayloadBytes] = {0};
1052
Artem Titove618cc92020-03-11 11:18:54 +01001053 int expected_target_delay = neteq_->TargetDelayMs() * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001054 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001055
1056 bool muted;
1057 neteq_->GetAudio(&out_frame_, &muted);
1058
1059 rtp_info.sequenceNumber += 1;
1060 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001061 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001062 rtp_info.sequenceNumber += 1;
1063 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001064 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001065
Artem Titove618cc92020-03-11 11:18:54 +01001066 expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples;
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001067 // We have two packets in the buffer and kAccelerate operation will
1068 // extract 20 ms of data.
Ivo Creusen3ce44a32019-10-31 14:38:11 +01001069 neteq_->GetAudio(&out_frame_, &muted, NetEq::Operation::kAccelerate);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001070
1071 // Check jitter buffer delay.
1072 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
1073 EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms);
1074 EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count);
Artem Titove618cc92020-03-11 11:18:54 +01001075 EXPECT_EQ(expected_target_delay,
1076 rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001077}
1078
Henrik Lundin7687ad52018-07-02 10:14:46 +02001079namespace test {
Henrik Lundin7687ad52018-07-02 10:14:46 +02001080TEST(NetEqNoTimeStretchingMode, RunTest) {
1081 NetEq::Config config;
1082 config.for_test_no_time_stretching = true;
1083 auto codecs = NetEqTest::StandardDecoderMap();
Henrik Lundin7687ad52018-07-02 10:14:46 +02001084 NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
1085 {1, kRtpExtensionAudioLevel},
1086 {3, kRtpExtensionAbsoluteSendTime},
1087 {5, kRtpExtensionTransportSequenceNumber},
1088 {7, kRtpExtensionVideoContentType},
1089 {8, kRtpExtensionVideoTiming}};
1090 std::unique_ptr<NetEqInput> input(new NetEqRtpDumpInput(
1091 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
Bjorn Terelius5350d1c2018-10-11 16:51:23 +02001092 rtp_ext_map, absl::nullopt /*No SSRC filter*/));
Henrik Lundin7687ad52018-07-02 10:14:46 +02001093 std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
1094 new TimeLimitedNetEqInput(std::move(input), 20000));
1095 std::unique_ptr<AudioSink> output(new VoidAudioSink);
1096 NetEqTest::Callbacks callbacks;
Ivo Creusencee751a2020-01-16 17:17:09 +01001097 NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
1098 /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
1099 /*input=*/std::move(input_time_limit), std::move(output),
1100 callbacks);
Henrik Lundin7687ad52018-07-02 10:14:46 +02001101 test.Run();
1102 const auto stats = test.SimulationStats();
1103 EXPECT_EQ(0, stats.accelerate_rate);
1104 EXPECT_EQ(0, stats.preemptive_rate);
1105}
Henrik Lundin7687ad52018-07-02 10:14:46 +02001106
Henrik Lundinc49e9c22020-05-25 11:26:15 +02001107namespace {
1108// Helper classes and data types and functions for NetEqOutputDelayTest.
1109
1110class VectorAudioSink : public AudioSink {
1111 public:
1112 // Does not take ownership of the vector.
1113 VectorAudioSink(std::vector<int16_t>* output_vector) : v_(output_vector) {}
1114
1115 virtual ~VectorAudioSink() = default;
1116
1117 bool WriteArray(const int16_t* audio, size_t num_samples) override {
1118 v_->reserve(v_->size() + num_samples);
1119 for (size_t i = 0; i < num_samples; ++i) {
1120 v_->push_back(audio[i]);
1121 }
1122 return true;
1123 }
1124
1125 private:
1126 std::vector<int16_t>* const v_;
1127};
1128
1129struct TestResult {
1130 NetEqLifetimeStatistics lifetime_stats;
1131 NetEqNetworkStatistics network_stats;
1132 absl::optional<uint32_t> playout_timestamp;
1133 int target_delay_ms;
1134 int filtered_current_delay_ms;
1135 int sample_rate_hz;
1136};
1137
1138// This class is used as callback object to NetEqTest to collect some stats
1139// at the end of the simulation.
1140class SimEndStatsCollector : public NetEqSimulationEndedCallback {
1141 public:
1142 SimEndStatsCollector(TestResult& result) : result_(result) {}
1143
1144 void SimulationEnded(int64_t /*simulation_time_ms*/, NetEq* neteq) override {
1145 result_.playout_timestamp = neteq->GetPlayoutTimestamp();
1146 result_.target_delay_ms = neteq->TargetDelayMs();
1147 result_.filtered_current_delay_ms = neteq->FilteredCurrentDelayMs();
1148 result_.sample_rate_hz = neteq->last_output_sample_rate_hz();
1149 }
1150
1151 private:
1152 TestResult& result_;
1153};
1154
1155TestResult DelayLineNetEqTest(int delay_ms,
1156 std::vector<int16_t>* output_vector) {
1157 NetEq::Config config;
1158 config.for_test_no_time_stretching = true;
1159 config.extra_output_delay_ms = delay_ms;
1160 auto codecs = NetEqTest::StandardDecoderMap();
1161 NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
1162 {1, kRtpExtensionAudioLevel},
1163 {3, kRtpExtensionAbsoluteSendTime},
1164 {5, kRtpExtensionTransportSequenceNumber},
1165 {7, kRtpExtensionVideoContentType},
1166 {8, kRtpExtensionVideoTiming}};
1167 std::unique_ptr<NetEqInput> input = std::make_unique<NetEqRtpDumpInput>(
1168 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
1169 rtp_ext_map, absl::nullopt /*No SSRC filter*/);
1170 std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
1171 new TimeLimitedNetEqInput(std::move(input), 10000));
1172 std::unique_ptr<AudioSink> output =
1173 std::make_unique<VectorAudioSink>(output_vector);
1174
1175 TestResult result;
1176 SimEndStatsCollector stats_collector(result);
1177 NetEqTest::Callbacks callbacks;
1178 callbacks.simulation_ended_callback = &stats_collector;
1179
1180 NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
1181 /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
1182 /*input=*/std::move(input_time_limit), std::move(output),
1183 callbacks);
1184 test.Run();
1185 result.lifetime_stats = test.LifetimeStats();
1186 result.network_stats = test.SimulationStats();
1187 return result;
1188}
1189} // namespace
1190
1191// Tests the extra output delay functionality of NetEq.
1192TEST(NetEqOutputDelayTest, RunTest) {
1193 std::vector<int16_t> output;
1194 const auto result_no_delay = DelayLineNetEqTest(0, &output);
1195 std::vector<int16_t> output_delayed;
1196 constexpr int kDelayMs = 100;
1197 const auto result_delay = DelayLineNetEqTest(kDelayMs, &output_delayed);
1198
1199 // Verify that the loss concealment remains unchanged. The point of the delay
1200 // is to not affect the jitter buffering behavior.
1201 // First verify that there are concealments in the test.
1202 EXPECT_GT(result_no_delay.lifetime_stats.concealed_samples, 0u);
1203 // And that not all of the output is concealment.
1204 EXPECT_GT(result_no_delay.lifetime_stats.total_samples_received,
1205 result_no_delay.lifetime_stats.concealed_samples);
1206 // Now verify that they remain unchanged by the delay.
1207 EXPECT_EQ(result_no_delay.lifetime_stats.concealed_samples,
1208 result_delay.lifetime_stats.concealed_samples);
1209 // Accelerate and pre-emptive expand should also be unchanged.
1210 EXPECT_EQ(result_no_delay.lifetime_stats.inserted_samples_for_deceleration,
1211 result_delay.lifetime_stats.inserted_samples_for_deceleration);
1212 EXPECT_EQ(result_no_delay.lifetime_stats.removed_samples_for_acceleration,
1213 result_delay.lifetime_stats.removed_samples_for_acceleration);
1214 // Verify that delay stats are increased with the delay chain.
1215 EXPECT_EQ(
1216 result_no_delay.lifetime_stats.jitter_buffer_delay_ms +
1217 kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count,
1218 result_delay.lifetime_stats.jitter_buffer_delay_ms);
1219 EXPECT_EQ(
1220 result_no_delay.lifetime_stats.jitter_buffer_target_delay_ms +
1221 kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count,
1222 result_delay.lifetime_stats.jitter_buffer_target_delay_ms);
1223 EXPECT_EQ(result_no_delay.network_stats.current_buffer_size_ms + kDelayMs,
1224 result_delay.network_stats.current_buffer_size_ms);
1225 EXPECT_EQ(result_no_delay.network_stats.preferred_buffer_size_ms + kDelayMs,
1226 result_delay.network_stats.preferred_buffer_size_ms);
1227 EXPECT_EQ(result_no_delay.network_stats.mean_waiting_time_ms + kDelayMs,
1228 result_delay.network_stats.mean_waiting_time_ms);
1229 EXPECT_EQ(result_no_delay.network_stats.median_waiting_time_ms + kDelayMs,
1230 result_delay.network_stats.median_waiting_time_ms);
1231 EXPECT_EQ(result_no_delay.network_stats.min_waiting_time_ms + kDelayMs,
1232 result_delay.network_stats.min_waiting_time_ms);
1233 EXPECT_EQ(result_no_delay.network_stats.max_waiting_time_ms + kDelayMs,
1234 result_delay.network_stats.max_waiting_time_ms);
1235
1236 ASSERT_TRUE(result_no_delay.playout_timestamp);
1237 ASSERT_TRUE(result_delay.playout_timestamp);
1238 EXPECT_EQ(*result_no_delay.playout_timestamp -
1239 static_cast<uint32_t>(
1240 kDelayMs *
1241 rtc::CheckedDivExact(result_no_delay.sample_rate_hz, 1000)),
1242 *result_delay.playout_timestamp);
1243 EXPECT_EQ(result_no_delay.target_delay_ms + kDelayMs,
1244 result_delay.target_delay_ms);
1245 EXPECT_EQ(result_no_delay.filtered_current_delay_ms + kDelayMs,
1246 result_delay.filtered_current_delay_ms);
1247
1248 // Verify expected delay in decoded signal. The test vector uses 8 kHz sample
1249 // rate, so the delay will be 8 times the delay in ms.
1250 constexpr size_t kExpectedDelaySamples = kDelayMs * 8;
1251 for (size_t i = 0;
1252 i < output.size() && i + kExpectedDelaySamples < output_delayed.size();
1253 ++i) {
1254 EXPECT_EQ(output[i], output_delayed[i + kExpectedDelaySamples]);
1255 }
1256}
1257
Henrik Lundinf7cba9f2020-06-10 18:19:27 +02001258// Tests the extra output delay functionality of NetEq when configured via
1259// field trial.
1260TEST(NetEqOutputDelayTest, RunTestWithFieldTrial) {
1261 test::ScopedFieldTrials field_trial(
1262 "WebRTC-Audio-NetEqExtraDelay/Enabled-50/");
1263 constexpr int kExpectedDelayMs = 50;
1264 std::vector<int16_t> output;
1265 const auto result = DelayLineNetEqTest(0, &output);
1266
1267 // The base delay values are taken from the resuts of the non-delayed case in
1268 // NetEqOutputDelayTest.RunTest above.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +02001269 EXPECT_EQ(20 + kExpectedDelayMs, result.target_delay_ms);
Henrik Lundinf7cba9f2020-06-10 18:19:27 +02001270 EXPECT_EQ(24 + kExpectedDelayMs, result.filtered_current_delay_ms);
1271}
1272
1273// Set a non-multiple-of-10 value in the field trial, and verify that we don't
1274// crash, and that the result is rounded down.
1275TEST(NetEqOutputDelayTest, RunTestWithFieldTrialOddValue) {
1276 test::ScopedFieldTrials field_trial(
1277 "WebRTC-Audio-NetEqExtraDelay/Enabled-103/");
1278 constexpr int kRoundedDelayMs = 100;
1279 std::vector<int16_t> output;
1280 const auto result = DelayLineNetEqTest(0, &output);
1281
1282 // The base delay values are taken from the resuts of the non-delayed case in
1283 // NetEqOutputDelayTest.RunTest above.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +02001284 EXPECT_EQ(20 + kRoundedDelayMs, result.target_delay_ms);
Henrik Lundinf7cba9f2020-06-10 18:19:27 +02001285 EXPECT_EQ(24 + kRoundedDelayMs, result.filtered_current_delay_ms);
1286}
1287
Henrik Lundin7687ad52018-07-02 10:14:46 +02001288} // namespace test
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001289} // namespace webrtc