blob: 451e0c9587835f48e372ab3a08de653e1ac29b5a [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Ivo Creusen3ce44a32019-10-31 14:38:11 +010011#include "api/neteq/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000013#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000014#include <stdlib.h>
15#include <string.h> // memset
16
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000017#include <algorithm>
kwiberg2d0c3322016-02-14 09:28:33 -080018#include <memory>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000019#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000020#include <string>
21#include <vector>
22
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020023#include "absl/flags/flag.h"
Fredrik Solenbergbbf21a32018-04-12 22:44:09 +020024#include "api/audio/audio_frame.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020025#include "api/audio_codecs/builtin_audio_decoder_factory.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020026#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
Yves Gerey3a65f392019-11-11 18:05:42 +010027#include "modules/audio_coding/neteq/test/neteq_decoding_test.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020028#include "modules/audio_coding/neteq/tools/audio_loop.h"
Henrik Lundin7687ad52018-07-02 10:14:46 +020029#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
30#include "modules/audio_coding/neteq/tools/neteq_test.h"
Yves Gerey3e707812018-11-28 16:47:49 +010031#include "modules/include/module_common_types_public.h"
Niels Möller53382cb2018-11-27 14:05:08 +010032#include "modules/rtp_rtcp/include/rtcp_statistics.h"
Yves Gerey3e707812018-11-28 16:47:49 +010033#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020034#include "rtc_base/ignore_wundef.h"
Steve Anton10542f22019-01-11 09:11:00 -080035#include "rtc_base/message_digest.h"
Karl Wiberge40468b2017-11-22 10:42:26 +010036#include "rtc_base/numerics/safe_conversions.h"
Jonas Olsson366a50c2018-09-06 13:41:30 +020037#include "rtc_base/strings/string_builder.h"
Niels Möllera12c42a2018-07-25 16:05:48 +020038#include "rtc_base/system/arch.h"
Henrik Lundine9619f82017-11-27 14:05:27 +010039#include "test/field_trial.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020040#include "test/gtest.h"
Steve Anton10542f22019-01-11 09:11:00 -080041#include "test/testsupport/file_utils.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000042
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020043ABSL_FLAG(bool, gen_ref, false, "Generate reference files.");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000044
kwiberg5adaf732016-10-04 09:33:27 -070045namespace webrtc {
46
Ivo Creusenf1053ba2022-01-31 12:50:29 +010047#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \
48 defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
49 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
50 defined(WEBRTC_CODEC_ILBC)
minyue5f026d02015-12-16 07:36:04 -080051#define MAYBE_TestBitExactness TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070052#else
minyue5f026d02015-12-16 07:36:04 -080053#define MAYBE_TestBitExactness DISABLED_TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070054#endif
minyue5f026d02015-12-16 07:36:04 -080055TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
minyue49c454e2016-01-08 11:30:14 -080056 const std::string input_rtp_file =
57 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +000058
Jakob Ivarssonfa68ac02021-11-09 12:58:45 +010059 const std::string output_checksum =
Jakob Ivarsson01ab7d52022-05-25 21:06:14 +020060 "dee7a10ab92526876a70a85bc48a4906901af3df";
minyue4f906772016-04-29 11:05:14 -070061
Jakob Ivarssonfa68ac02021-11-09 12:58:45 +010062 const std::string network_stats_checksum =
Jakob Ivarsson01ab7d52022-05-25 21:06:14 +020063 "911dbf5fd97f48d25b8f0967286eb73c9d6f6158";
minyue4f906772016-04-29 11:05:14 -070064
Yves Gerey665174f2018-06-19 15:03:05 +020065 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020066 absl::GetFlag(FLAGS_gen_ref));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000067}
68
Ivo Creusenf1053ba2022-01-31 12:50:29 +010069#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \
70 defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS)
minyue93c08b72015-12-22 09:57:41 -080071#define MAYBE_TestOpusBitExactness TestOpusBitExactness
72#else
73#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
74#endif
Ivo Creusenf1053ba2022-01-31 12:50:29 +010075TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
minyue93c08b72015-12-22 09:57:41 -080076 const std::string input_rtp_file =
77 webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
minyue93c08b72015-12-22 09:57:41 -080078
Ivo Creusenf1053ba2022-01-31 12:50:29 +010079 // The checksum depends on SSE being enabled, the second part is the non-SSE
80 // checksum.
81 const std::string output_checksum =
landrey6f248172022-07-27 14:19:30 +000082 "fec6827bb9ee0b21770bbbb4a3a6f8823bf537dc|"
Jakob Ivarssonca101e62022-04-04 21:42:55 +020083 "c5eb0a8fcf7e8255a40f821cb815e1096619efeb";
minyue4f906772016-04-29 11:05:14 -070084
Yves Gerey75e22902019-09-06 03:07:55 +020085 const std::string network_stats_checksum =
landrey5505bb12022-07-27 21:03:05 +000086 "3d043e47e5f4bb81d37e7bce8c44bf802965c853|"
landrey6f248172022-07-27 14:19:30 +000087 "076662525572dba753b11578330bd491923f7f5e";
minyue4f906772016-04-29 11:05:14 -070088
Yves Gerey665174f2018-06-19 15:03:05 +020089 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020090 absl::GetFlag(FLAGS_gen_ref));
minyue93c08b72015-12-22 09:57:41 -080091}
92
Ivo Creusenf1053ba2022-01-31 12:50:29 +010093#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \
94 defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS)
95#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness
96#else
97#define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness
98#endif
99TEST_F(NetEqDecodingTest, MAYBE_TestOpusDtxBitExactness) {
Henrik Lundine9619f82017-11-27 14:05:27 +0100100 const std::string input_rtp_file =
101 webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
102
Ivo Creusenf1053ba2022-01-31 12:50:29 +0100103 // The checksum depends on SSE being enabled, the second part is the non-SSE
104 // checksum.
105 const std::string output_checksum =
landrey6f248172022-07-27 14:19:30 +0000106 "b3c4899eab5378ef5e54f2302948872149f6ad5e|"
Ivo Creusenf1053ba2022-01-31 12:50:29 +0100107 "e97e32a77355e7ce46a2dc2f43bf1c2805530fcb";
Henrik Lundine9619f82017-11-27 14:05:27 +0100108
109 const std::string network_stats_checksum =
Ivo Creusenf1053ba2022-01-31 12:50:29 +0100110 "dc8447b9fee1a21fd5d1f4045d62b982a3fb0215";
Henrik Lundine9619f82017-11-27 14:05:27 +0100111
Henrik Lundine9619f82017-11-27 14:05:27 +0100112 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200113 absl::GetFlag(FLAGS_gen_ref));
Henrik Lundine9619f82017-11-27 14:05:27 +0100114}
115
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000116// Use fax mode to avoid time-scaling. This is to simplify the testing of
117// packet waiting times in the packet buffer.
118class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
119 protected:
120 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
Henrik Lundin7687ad52018-07-02 10:14:46 +0200121 config_.for_test_no_time_stretching = true;
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000122 }
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200123 void TestJitterBufferDelay(bool apply_packet_loss);
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000124};
125
126TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000127 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
128 size_t num_frames = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000129 const size_t kSamples = 10 * 16;
130 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000131 for (size_t i = 0; i < num_frames; ++i) {
kwibergee2bac22015-11-11 10:34:00 -0800132 const uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700133 RTPHeader rtp_info;
Mirko Bonadeia8110272017-10-18 14:22:50 +0200134 rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
135 rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700136 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
137 rtp_info.payloadType = 94; // PCM16b WB codec.
138 rtp_info.markerBit = 0;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200139 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000140 }
141 // Pull out all data.
142 for (size_t i = 0; i < num_frames; ++i) {
henrik.lundin7a926812016-05-12 13:51:28 -0700143 bool muted;
144 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800145 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000146 }
147
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200148 NetEqNetworkStatistics stats;
149 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000150 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
151 // spacing (per definition), we expect the delay to increase with 10 ms for
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200152 // each packet. Thus, we are calculating the statistics for a series from 10
153 // to 300, in steps of 10 ms.
154 EXPECT_EQ(155, stats.mean_waiting_time_ms);
155 EXPECT_EQ(155, stats.median_waiting_time_ms);
156 EXPECT_EQ(10, stats.min_waiting_time_ms);
157 EXPECT_EQ(300, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000158
159 // Check statistics again and make sure it's been reset.
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200160 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
161 EXPECT_EQ(-1, stats.mean_waiting_time_ms);
162 EXPECT_EQ(-1, stats.median_waiting_time_ms);
163 EXPECT_EQ(-1, stats.min_waiting_time_ms);
164 EXPECT_EQ(-1, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000165}
166
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000167
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000168TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000169 // Apply a clock drift of -25 ms / s (sender faster than receiver).
170 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000171 const double kNetworkFreezeTimeMs = 0.0;
172 const bool kGetAudioDuringFreezeRecovery = false;
173 const int kDelayToleranceMs = 20;
174 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200175 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
176 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000177 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000178}
179
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000180TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000181 // Apply a clock drift of +25 ms / s (sender slower than receiver).
182 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000183 const double kNetworkFreezeTimeMs = 0.0;
184 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200185 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000186 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200187 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
188 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000189 kMaxTimeToSpeechMs);
190}
191
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000192TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000193 // Apply a clock drift of -25 ms / s (sender faster than receiver).
194 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
195 const double kNetworkFreezeTimeMs = 5000.0;
196 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarssona36c5912019-06-27 10:12:02 +0200197 const int kDelayToleranceMs = 60;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000198 const int kMaxTimeToSpeechMs = 200;
Yves Gerey665174f2018-06-19 15:03:05 +0200199 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
200 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000201 kMaxTimeToSpeechMs);
202}
203
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000204TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000205 // Apply a clock drift of +25 ms / s (sender slower than receiver).
206 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
207 const double kNetworkFreezeTimeMs = 5000.0;
208 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200209 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000210 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200211 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
212 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000213 kMaxTimeToSpeechMs);
214}
215
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000216TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000217 // Apply a clock drift of +25 ms / s (sender slower than receiver).
218 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
219 const double kNetworkFreezeTimeMs = 5000.0;
220 const bool kGetAudioDuringFreezeRecovery = true;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200221 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000222 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200223 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
224 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000225 kMaxTimeToSpeechMs);
226}
227
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000228TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000229 const double kDriftFactor = 1.0; // No drift.
230 const double kNetworkFreezeTimeMs = 0.0;
231 const bool kGetAudioDuringFreezeRecovery = false;
232 const int kDelayToleranceMs = 10;
233 const int kMaxTimeToSpeechMs = 50;
Yves Gerey665174f2018-06-19 15:03:05 +0200234 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
235 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000236 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000237}
238
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000239TEST_F(NetEqDecodingTest, UnknownPayloadType) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000240 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000241 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700242 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000243 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700244 rtp_info.payloadType = 1; // Not registered as a decoder.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200245 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000246}
247
Peter Boströme2976c82016-01-04 22:44:05 +0100248#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ivoc72c08ed2016-01-20 07:26:24 -0800249#define MAYBE_DecoderError DecoderError
250#else
251#define MAYBE_DecoderError DISABLED_DecoderError
252#endif
253
Peter Boströme2976c82016-01-04 22:44:05 +0100254TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000255 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000256 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700257 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000258 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700259 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200260 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
Artem Titovd00ce742021-07-28 20:00:17 +0200261 // Set all of `out_data_` to 1, and verify that it was set to 0 by the call
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000262 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700263 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800264 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700265 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000266 }
henrik.lundin7a926812016-05-12 13:51:28 -0700267 bool muted;
268 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
269 ASSERT_FALSE(muted);
ivoc72c08ed2016-01-20 07:26:24 -0800270
yujo36b1a5f2017-06-12 12:45:32 -0700271 // Verify that the first 160 samples are set to 0.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000272 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700273 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000274 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200275 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000276 ss << "i = " << i;
277 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700278 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000279 }
280}
281
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000282TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
Artem Titovd00ce742021-07-28 20:00:17 +0200283 // Set all of `out_data_` to 1, and verify that it was set to 0 by the call
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000284 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700285 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800286 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700287 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000288 }
henrik.lundin7a926812016-05-12 13:51:28 -0700289 bool muted;
290 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
291 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000292 // Verify that the first block of samples is set to 0.
293 static const int kExpectedOutputLength =
294 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700295 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000296 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200297 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000298 ss << "i = " << i;
299 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700300 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000301 }
henrik.lundind89814b2015-11-23 06:49:25 -0800302 // Verify that the sample rate did not change from the initial configuration.
303 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000304}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000305
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000306class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000307 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000308 void CheckBgn(int sampling_rate_hz) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700309 size_t expected_samples_per_channel = 0;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000310 uint8_t payload_type = 0xFF; // Invalid.
311 if (sampling_rate_hz == 8000) {
312 expected_samples_per_channel = kBlockSize8kHz;
313 payload_type = 93; // PCM 16, 8 kHz.
314 } else if (sampling_rate_hz == 16000) {
315 expected_samples_per_channel = kBlockSize16kHz;
316 payload_type = 94; // PCM 16, 16 kHZ.
317 } else if (sampling_rate_hz == 32000) {
318 expected_samples_per_channel = kBlockSize32kHz;
319 payload_type = 95; // PCM 16, 32 kHz.
320 } else {
321 ASSERT_TRUE(false); // Unsupported test case.
322 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000323
henrik.lundin6d8e0112016-03-04 10:34:21 -0800324 AudioFrame output;
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000325 test::AudioLoop input;
326 // We are using the same 32 kHz input file for all tests, regardless of
Artem Titovd00ce742021-07-28 20:00:17 +0200327 // `sampling_rate_hz`. The output may sound weird, but the test is still
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000328 // valid.
329 ASSERT_TRUE(input.Init(
330 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
331 10 * sampling_rate_hz, // Max 10 seconds loop length.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700332 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000333
334 // Payload of 10 ms of PCM16 32 kHz.
335 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin246ef3e2017-04-24 09:14:32 -0700336 RTPHeader rtp_info;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000337 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700338 rtp_info.payloadType = payload_type;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000339
henrik.lundin7a926812016-05-12 13:51:28 -0700340 bool muted;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000341 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
kwiberg288886b2015-11-06 01:21:35 -0800342 auto block = input.GetNextBlock();
343 ASSERT_EQ(expected_samples_per_channel, block.size());
344 size_t enc_len_bytes =
345 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000346 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
347
Karl Wiberg45eb1352019-10-10 14:23:00 +0200348 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
349 payload, enc_len_bytes)));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800350 output.Reset();
henrik.lundin7a926812016-05-12 13:51:28 -0700351 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800352 ASSERT_EQ(1u, output.num_channels_);
353 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800354 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000355
356 // Next packet.
Yves Gerey665174f2018-06-19 15:03:05 +0200357 rtp_info.timestamp +=
358 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700359 rtp_info.sequenceNumber++;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000360 }
361
henrik.lundin6d8e0112016-03-04 10:34:21 -0800362 output.Reset();
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000363
364 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
365 // one frame without checking speech-type. This is the first frame pulled
366 // without inserting any packet, and might not be labeled as PLC.
henrik.lundin7a926812016-05-12 13:51:28 -0700367 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800368 ASSERT_EQ(1u, output.num_channels_);
369 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000370
371 // To be able to test the fading of background noise we need at lease to
372 // pull 611 frames.
373 const int kFadingThreshold = 611;
374
375 // Test several CNG-to-PLC packet for the expected behavior. The number 20
376 // is arbitrary, but sufficiently large to test enough number of frames.
377 const int kNumPlcToCngTestFrames = 20;
378 bool plc_to_cng = false;
379 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
henrik.lundin6d8e0112016-03-04 10:34:21 -0800380 output.Reset();
yujo36b1a5f2017-06-12 12:45:32 -0700381 // Set to non-zero.
382 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
henrik.lundin7a926812016-05-12 13:51:28 -0700383 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
384 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800385 ASSERT_EQ(1u, output.num_channels_);
386 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800387 if (output.speech_type_ == AudioFrame::kPLCCNG) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000388 plc_to_cng = true;
389 double sum_squared = 0;
yujo36b1a5f2017-06-12 12:45:32 -0700390 const int16_t* output_data = output.data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800391 for (size_t k = 0;
392 k < output.num_channels_ * output.samples_per_channel_; ++k)
yujo36b1a5f2017-06-12 12:45:32 -0700393 sum_squared += output_data[k] * output_data[k];
Henrik Lundin67190172018-04-20 15:34:48 +0200394 EXPECT_EQ(0, sum_squared);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000395 } else {
henrik.lundin55480f52016-03-08 02:37:57 -0800396 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000397 }
398 }
399 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
400 }
401};
402
Henrik Lundin67190172018-04-20 15:34:48 +0200403TEST_F(NetEqBgnTest, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000404 CheckBgn(8000);
405 CheckBgn(16000);
406 CheckBgn(32000);
407}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000408
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000409TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
410 // Start with a sequence number that will soon wrap.
411 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
412 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
413}
414
415TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
416 // Start with a sequence number that will soon wrap.
417 std::set<uint16_t> drop_seq_numbers;
418 drop_seq_numbers.insert(0xFFFF);
419 drop_seq_numbers.insert(0x0);
420 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
421}
422
423TEST_F(NetEqDecodingTest, TimestampWrap) {
424 // Start with a timestamp that will soon wrap.
425 std::set<uint16_t> drop_seq_numbers;
426 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
427}
428
429TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
430 // Start with a timestamp and a sequence number that will wrap at the same
431 // time.
432 std::set<uint16_t> drop_seq_numbers;
433 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
434}
435
Yves Gerey3a65f392019-11-11 18:05:42 +0100436TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000437 uint16_t seq_no = 0;
438 uint32_t timestamp = 0;
439 const int kFrameSizeMs = 10;
440 const int kSampleRateKhz = 16;
441 const int kSamples = kFrameSizeMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000442 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000443
Yves Gerey665174f2018-06-19 15:03:05 +0200444 const int algorithmic_delay_samples =
445 std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000446 // Insert three speech packets. Three are needed to get the frame length
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000447 // correct.
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000448 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700449 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700450 bool muted;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000451 for (int i = 0; i < 3; ++i) {
452 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200453 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000454 ++seq_no;
455 timestamp += kSamples;
456
457 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700458 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800459 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000460 }
461 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800462 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000463
464 // Insert same CNG packet twice.
465 const int kCngPeriodMs = 100;
466 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000467 size_t payload_len;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000468 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
469 // This is the first time this CNG packet is inserted.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200470 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
471 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000472
473 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700474 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800475 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800476 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700477 EXPECT_FALSE(
478 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
henrik.lundin0d96ab72016-04-06 12:28:26 -0700479 EXPECT_EQ(timestamp - algorithmic_delay_samples,
480 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000481
482 // Insert the same CNG packet again. Note that at this point it is old, since
483 // we have already decoded the first copy of it.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200484 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
485 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000486
Artem Titovd00ce742021-07-28 20:00:17 +0200487 // Pull audio until we have played `kCngPeriodMs` of CNG. Start at 10 ms since
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000488 // we have already pulled out CNG once.
489 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
henrik.lundin7a926812016-05-12 13:51:28 -0700490 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800491 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800492 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700493 EXPECT_FALSE(
494 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000495 EXPECT_EQ(timestamp - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700496 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000497 }
498
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000499 ++seq_no;
500 timestamp += kCngPeriodSamples;
Jakob Ivarssonfa68ac02021-11-09 12:58:45 +0100501 uint32_t first_speech_timestamp = timestamp;
502 // Insert speech again.
503 for (int i = 0; i < 3; ++i) {
504 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
505 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
506 ++seq_no;
507 timestamp += kSamples;
508 }
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000509
510 // Pull audio once and verify that the output is speech again.
henrik.lundin7a926812016-05-12 13:51:28 -0700511 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800512 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800513 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Danil Chapovalovb6021232018-06-19 13:26:36 +0200514 absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -0700515 ASSERT_TRUE(playout_timestamp);
Jakob Ivarssonfa68ac02021-11-09 12:58:45 +0100516 EXPECT_EQ(first_speech_timestamp + kSamples - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700517 *playout_timestamp);
wu@webrtc.org94454b72014-06-05 20:34:08 +0000518}
519
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000520TEST_F(NetEqDecodingTest, CngFirst) {
521 uint16_t seq_no = 0;
522 uint32_t timestamp = 0;
523 const int kFrameSizeMs = 10;
524 const int kSampleRateKhz = 16;
525 const int kSamples = kFrameSizeMs * kSampleRateKhz;
526 const int kPayloadBytes = kSamples * 2;
527 const int kCngPeriodMs = 100;
528 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
529 size_t payload_len;
530
531 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700532 RTPHeader rtp_info;
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000533
534 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200535 ASSERT_EQ(NetEq::kOK,
536 neteq_->InsertPacket(
537 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len)));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000538 ++seq_no;
539 timestamp += kCngPeriodSamples;
540
541 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700542 bool muted;
543 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800544 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800545 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000546
547 // Insert some speech packets.
henrik.lundin549d80b2016-08-25 00:44:24 -0700548 const uint32_t first_speech_timestamp = timestamp;
549 int timeout_counter = 0;
550 do {
551 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000552 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200553 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000554 ++seq_no;
555 timestamp += kSamples;
556
557 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700558 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800559 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin549d80b2016-08-25 00:44:24 -0700560 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000561 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800562 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000563}
henrik.lundin7a926812016-05-12 13:51:28 -0700564
565class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
566 public:
567 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
568 config_.enable_muted_state = true;
569 }
570
571 protected:
572 static constexpr size_t kSamples = 10 * 16;
573 static constexpr size_t kPayloadBytes = kSamples * 2;
574
575 void InsertPacket(uint32_t rtp_timestamp) {
576 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700577 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700578 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200579 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700580 }
581
henrik.lundin42feb512016-09-20 06:51:40 -0700582 void InsertCngPacket(uint32_t rtp_timestamp) {
583 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700584 RTPHeader rtp_info;
henrik.lundin42feb512016-09-20 06:51:40 -0700585 size_t payload_len;
586 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200587 EXPECT_EQ(NetEq::kOK,
588 neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
589 payload, payload_len)));
henrik.lundin42feb512016-09-20 06:51:40 -0700590 }
591
henrik.lundin7a926812016-05-12 13:51:28 -0700592 bool GetAudioReturnMuted() {
593 bool muted;
594 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
595 return muted;
596 }
597
598 void GetAudioUntilMuted() {
599 while (!GetAudioReturnMuted()) {
600 ASSERT_LT(counter_++, 1000) << "Test timed out";
601 }
602 }
603
604 void GetAudioUntilNormal() {
605 bool muted = false;
606 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
607 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
608 ASSERT_LT(counter_++, 1000) << "Test timed out";
609 }
610 EXPECT_FALSE(muted);
611 }
612
613 int counter_ = 0;
614};
615
616// Verifies that NetEq goes in and out of muted state as expected.
617TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
618 // Insert one speech packet.
619 InsertPacket(0);
620 // Pull out audio once and expect it not to be muted.
621 EXPECT_FALSE(GetAudioReturnMuted());
622 // Pull data until faded out.
623 GetAudioUntilMuted();
henrik.lundina4491072017-07-06 05:23:53 -0700624 EXPECT_TRUE(out_frame_.muted());
henrik.lundin7a926812016-05-12 13:51:28 -0700625
626 // Verify that output audio is not written during muted mode. Other parameters
627 // should be correct, though.
628 AudioFrame new_frame;
yujo36b1a5f2017-06-12 12:45:32 -0700629 int16_t* frame_data = new_frame.mutable_data();
630 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
631 frame_data[i] = 17;
henrik.lundin7a926812016-05-12 13:51:28 -0700632 }
633 bool muted;
634 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
635 EXPECT_TRUE(muted);
henrik.lundina4491072017-07-06 05:23:53 -0700636 EXPECT_TRUE(out_frame_.muted());
yujo36b1a5f2017-06-12 12:45:32 -0700637 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
638 EXPECT_EQ(17, frame_data[i]);
henrik.lundin7a926812016-05-12 13:51:28 -0700639 }
640 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
641 new_frame.timestamp_);
642 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
643 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
644 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
645 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
646 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
647
648 // Insert new data. Timestamp is corrected for the time elapsed since the last
649 // packet. Verify that normal operation resumes.
650 InsertPacket(kSamples * counter_);
651 GetAudioUntilNormal();
henrik.lundina4491072017-07-06 05:23:53 -0700652 EXPECT_FALSE(out_frame_.muted());
henrik.lundin612c25e2016-05-25 08:21:04 -0700653
654 NetEqNetworkStatistics stats;
655 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
656 // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
657 // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
658 // concealment samples in this test.
659 EXPECT_GT(stats.expand_rate, 14000);
660 // And, it should be greater than the speech_expand_rate.
661 EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
henrik.lundin7a926812016-05-12 13:51:28 -0700662}
663
664// Verifies that NetEq goes out of muted state when given a delayed packet.
665TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
666 // Insert one speech packet.
667 InsertPacket(0);
668 // Pull out audio once and expect it not to be muted.
669 EXPECT_FALSE(GetAudioReturnMuted());
670 // Pull data until faded out.
671 GetAudioUntilMuted();
672 // Insert new data. Timestamp is only corrected for the half of the time
673 // elapsed since the last packet. That is, the new packet is delayed. Verify
674 // that normal operation resumes.
675 InsertPacket(kSamples * counter_ / 2);
676 GetAudioUntilNormal();
677}
678
679// Verifies that NetEq goes out of muted state when given a future packet.
680TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
681 // Insert one speech packet.
682 InsertPacket(0);
683 // Pull out audio once and expect it not to be muted.
684 EXPECT_FALSE(GetAudioReturnMuted());
685 // Pull data until faded out.
686 GetAudioUntilMuted();
687 // Insert new data. Timestamp is over-corrected for the time elapsed since the
688 // last packet. That is, the new packet is too early. Verify that normal
689 // operation resumes.
690 InsertPacket(kSamples * counter_ * 2);
691 GetAudioUntilNormal();
692}
693
694// Verifies that NetEq goes out of muted state when given an old packet.
695TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
696 // Insert one speech packet.
697 InsertPacket(0);
698 // Pull out audio once and expect it not to be muted.
699 EXPECT_FALSE(GetAudioReturnMuted());
700 // Pull data until faded out.
701 GetAudioUntilMuted();
702
703 EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200704 // Insert a few packets which are older than the first packet.
705 for (int i = 0; i < 5; ++i) {
706 InsertPacket(kSamples * (i - 1000));
707 }
henrik.lundin7a926812016-05-12 13:51:28 -0700708 EXPECT_FALSE(GetAudioReturnMuted());
709 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
710}
711
henrik.lundin42feb512016-09-20 06:51:40 -0700712// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
713// packet stream is suspended for a long time.
714TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
715 // Insert one CNG packet.
716 InsertCngPacket(0);
717
718 // Pull 10 seconds of audio (10 ms audio generated per lap).
719 for (int i = 0; i < 1000; ++i) {
720 bool muted;
721 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
722 ASSERT_FALSE(muted);
723 }
724 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
725}
726
727// Verifies that NetEq goes back to normal after a long CNG period with the
728// packet stream suspended.
729TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
730 // Insert one CNG packet.
731 InsertCngPacket(0);
732
733 // Pull 10 seconds of audio (10 ms audio generated per lap).
734 for (int i = 0; i < 1000; ++i) {
735 bool muted;
736 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
737 }
738
739 // Insert new data. Timestamp is corrected for the time elapsed since the last
740 // packet. Verify that normal operation resumes.
741 InsertPacket(kSamples * counter_);
742 GetAudioUntilNormal();
743}
744
henrik.lundin7a926812016-05-12 13:51:28 -0700745namespace {
746::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
747 const AudioFrame& b) {
748 if (a.timestamp_ != b.timestamp_)
749 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
750 << " != " << b.timestamp_ << ")";
751 if (a.sample_rate_hz_ != b.sample_rate_hz_)
Yves Gerey665174f2018-06-19 15:03:05 +0200752 return ::testing::AssertionFailure()
753 << "sample_rate_hz_ diff (" << a.sample_rate_hz_
754 << " != " << b.sample_rate_hz_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700755 if (a.samples_per_channel_ != b.samples_per_channel_)
756 return ::testing::AssertionFailure()
757 << "samples_per_channel_ diff (" << a.samples_per_channel_
758 << " != " << b.samples_per_channel_ << ")";
759 if (a.num_channels_ != b.num_channels_)
Yves Gerey665174f2018-06-19 15:03:05 +0200760 return ::testing::AssertionFailure()
761 << "num_channels_ diff (" << a.num_channels_
762 << " != " << b.num_channels_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700763 if (a.speech_type_ != b.speech_type_)
Yves Gerey665174f2018-06-19 15:03:05 +0200764 return ::testing::AssertionFailure()
765 << "speech_type_ diff (" << a.speech_type_
766 << " != " << b.speech_type_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700767 if (a.vad_activity_ != b.vad_activity_)
Yves Gerey665174f2018-06-19 15:03:05 +0200768 return ::testing::AssertionFailure()
769 << "vad_activity_ diff (" << a.vad_activity_
770 << " != " << b.vad_activity_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700771 return ::testing::AssertionSuccess();
772}
773
774::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
775 const AudioFrame& b) {
776 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
777 if (!res)
778 return res;
Yves Gerey665174f2018-06-19 15:03:05 +0200779 if (memcmp(a.data(), b.data(),
780 a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) !=
781 0) {
henrik.lundin7a926812016-05-12 13:51:28 -0700782 return ::testing::AssertionFailure() << "data_ diff";
783 }
784 return ::testing::AssertionSuccess();
785}
786
787} // namespace
788
789TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
790 ASSERT_FALSE(config_.enable_muted_state);
791 config2_.enable_muted_state = true;
792 CreateSecondInstance();
793
794 // Insert one speech packet into both NetEqs.
795 const size_t kSamples = 10 * 16;
796 const size_t kPayloadBytes = kSamples * 2;
797 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700798 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700799 PopulateRtpInfo(0, 0, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200800 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
801 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700802
803 AudioFrame out_frame1, out_frame2;
804 bool muted;
805 for (int i = 0; i < 1000; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200806 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700807 ss << "i = " << i;
808 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
809 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
810 EXPECT_FALSE(muted);
811 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
812 if (muted) {
813 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
814 } else {
815 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
816 }
817 }
818 EXPECT_TRUE(muted);
819
820 // Insert new data. Timestamp is corrected for the time elapsed since the last
821 // packet.
Jakob Ivarsson80fb9782020-10-09 13:41:06 +0200822 for (int i = 0; i < 5; ++i) {
823 PopulateRtpInfo(0, kSamples * 1000 + kSamples * i, &rtp_info);
824 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
825 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
826 }
henrik.lundin7a926812016-05-12 13:51:28 -0700827
828 int counter = 0;
829 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
830 ASSERT_LT(counter++, 1000) << "Test timed out";
Jonas Olsson366a50c2018-09-06 13:41:30 +0200831 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700832 ss << "counter = " << counter;
833 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
834 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
835 EXPECT_FALSE(muted);
836 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
837 if (muted) {
838 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
839 } else {
840 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
841 }
842 }
843 EXPECT_FALSE(muted);
844}
845
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200846TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
847 const int kNumConcealmentEvents = 19;
848 const size_t kSamples = 10 * 16;
849 const size_t kPayloadBytes = kSamples * 2;
850 int seq_no = 0;
851 RTPHeader rtp_info;
852 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
853 rtp_info.payloadType = 94; // PCM16b WB codec.
854 rtp_info.markerBit = 0;
855 const uint8_t payload[kPayloadBytes] = {0};
856 bool muted;
857
858 for (int i = 0; i < kNumConcealmentEvents; i++) {
859 // Insert some packets of 10 ms size.
860 for (int j = 0; j < 10; j++) {
861 rtp_info.sequenceNumber = seq_no++;
862 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200863 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200864 neteq_->GetAudio(&out_frame_, &muted);
865 }
866
867 // Lose a number of packets.
868 int num_lost = 1 + i;
869 for (int j = 0; j < num_lost; j++) {
870 seq_no++;
871 neteq_->GetAudio(&out_frame_, &muted);
872 }
873 }
874
875 // Check number of concealment events.
876 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
877 EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
878}
879
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200880// Test that the jitter buffer delay stat is computed correctly.
881void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
882 const int kNumPackets = 10;
883 const int kDelayInNumPackets = 2;
884 const int kPacketLenMs = 10; // All packets are of 10 ms size.
885 const size_t kSamples = kPacketLenMs * 16;
886 const size_t kPayloadBytes = kSamples * 2;
887 RTPHeader rtp_info;
888 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
889 rtp_info.payloadType = 94; // PCM16b WB codec.
890 rtp_info.markerBit = 0;
891 const uint8_t payload[kPayloadBytes] = {0};
892 bool muted;
893 int packets_sent = 0;
894 int packets_received = 0;
895 int expected_delay = 0;
Artem Titove618cc92020-03-11 11:18:54 +0100896 int expected_target_delay = 0;
Chen Xing0acffb52019-01-15 15:46:29 +0100897 uint64_t expected_emitted_count = 0;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200898 while (packets_received < kNumPackets) {
899 // Insert packet.
900 if (packets_sent < kNumPackets) {
901 rtp_info.sequenceNumber = packets_sent++;
902 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200903 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200904 }
905
906 // Get packet.
907 if (packets_sent > kDelayInNumPackets) {
908 neteq_->GetAudio(&out_frame_, &muted);
909 packets_received++;
910
911 // The delay reported by the jitter buffer never exceeds
912 // the number of samples previously fetched with GetAudio
913 // (hence the min()).
914 int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
915
916 // The increase of the expected delay is the product of
917 // the current delay of the jitter buffer in ms * the
918 // number of samples that are sent for play out.
919 int current_delay_ms = packets_delay * kPacketLenMs;
920 expected_delay += current_delay_ms * kSamples;
Artem Titove618cc92020-03-11 11:18:54 +0100921 expected_target_delay += neteq_->TargetDelayMs() * kSamples;
Chen Xing0acffb52019-01-15 15:46:29 +0100922 expected_emitted_count += kSamples;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200923 }
924 }
925
926 if (apply_packet_loss) {
927 // Extra call to GetAudio to cause concealment.
928 neteq_->GetAudio(&out_frame_, &muted);
929 }
930
931 // Check jitter buffer delay.
932 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
Artem Titove618cc92020-03-11 11:18:54 +0100933 EXPECT_EQ(expected_delay,
934 rtc::checked_cast<int>(stats.jitter_buffer_delay_ms));
Chen Xing0acffb52019-01-15 15:46:29 +0100935 EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count);
Artem Titove618cc92020-03-11 11:18:54 +0100936 EXPECT_EQ(expected_target_delay,
937 rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200938}
939
940TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
941 TestJitterBufferDelay(false);
942}
943
944TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
945 TestJitterBufferDelay(true);
946}
947
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100948TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
949 const int kPacketLenMs = 10; // All packets are of 10 ms size.
950 const size_t kSamples = kPacketLenMs * 16;
951 const size_t kPayloadBytes = kSamples * 2;
952 RTPHeader rtp_info;
953 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
954 rtp_info.payloadType = 94; // PCM16b WB codec.
955 rtp_info.markerBit = 0;
956 const uint8_t payload[kPayloadBytes] = {0};
957
Artem Titove618cc92020-03-11 11:18:54 +0100958 int expected_target_delay = neteq_->TargetDelayMs() * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200959 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100960
961 bool muted;
962 neteq_->GetAudio(&out_frame_, &muted);
963
964 rtp_info.sequenceNumber += 1;
965 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200966 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100967 rtp_info.sequenceNumber += 1;
968 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200969 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100970
Artem Titove618cc92020-03-11 11:18:54 +0100971 expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples;
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100972 // We have two packets in the buffer and kAccelerate operation will
973 // extract 20 ms of data.
Tommi3cc68ec2021-06-09 19:30:41 +0200974 neteq_->GetAudio(&out_frame_, &muted, nullptr, NetEq::Operation::kAccelerate);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100975
976 // Check jitter buffer delay.
977 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
978 EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms);
979 EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count);
Artem Titove618cc92020-03-11 11:18:54 +0100980 EXPECT_EQ(expected_target_delay,
981 rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +0100982}
983
Henrik Lundin7687ad52018-07-02 10:14:46 +0200984namespace test {
Henrik Lundin7687ad52018-07-02 10:14:46 +0200985TEST(NetEqNoTimeStretchingMode, RunTest) {
986 NetEq::Config config;
987 config.for_test_no_time_stretching = true;
988 auto codecs = NetEqTest::StandardDecoderMap();
Henrik Lundin7687ad52018-07-02 10:14:46 +0200989 NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
990 {1, kRtpExtensionAudioLevel},
991 {3, kRtpExtensionAbsoluteSendTime},
992 {5, kRtpExtensionTransportSequenceNumber},
993 {7, kRtpExtensionVideoContentType},
994 {8, kRtpExtensionVideoTiming}};
995 std::unique_ptr<NetEqInput> input(new NetEqRtpDumpInput(
996 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
Bjorn Terelius5350d1c2018-10-11 16:51:23 +0200997 rtp_ext_map, absl::nullopt /*No SSRC filter*/));
Henrik Lundin7687ad52018-07-02 10:14:46 +0200998 std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
999 new TimeLimitedNetEqInput(std::move(input), 20000));
1000 std::unique_ptr<AudioSink> output(new VoidAudioSink);
1001 NetEqTest::Callbacks callbacks;
Ivo Creusencee751a2020-01-16 17:17:09 +01001002 NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
1003 /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
1004 /*input=*/std::move(input_time_limit), std::move(output),
1005 callbacks);
Henrik Lundin7687ad52018-07-02 10:14:46 +02001006 test.Run();
1007 const auto stats = test.SimulationStats();
1008 EXPECT_EQ(0, stats.accelerate_rate);
1009 EXPECT_EQ(0, stats.preemptive_rate);
1010}
Henrik Lundin7687ad52018-07-02 10:14:46 +02001011
1012} // namespace test
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001013} // namespace webrtc