blob: 9a53fddc218fd5457de15664f31a604dadaf16fe [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Ivo Creusen3ce44a32019-10-31 14:38:11 +010011#include "api/neteq/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000013#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000014#include <stdlib.h>
15#include <string.h> // memset
16
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000017#include <algorithm>
kwiberg2d0c3322016-02-14 09:28:33 -080018#include <memory>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000019#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000020#include <string>
21#include <vector>
22
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020023#include "absl/flags/flag.h"
Fredrik Solenbergbbf21a32018-04-12 22:44:09 +020024#include "api/audio/audio_frame.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020025#include "api/audio_codecs/builtin_audio_decoder_factory.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020026#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
Yves Gerey3a65f392019-11-11 18:05:42 +010027#include "modules/audio_coding/neteq/test/neteq_decoding_test.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020028#include "modules/audio_coding/neteq/tools/audio_loop.h"
Henrik Lundin7687ad52018-07-02 10:14:46 +020029#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
30#include "modules/audio_coding/neteq/tools/neteq_test.h"
Yves Gerey3e707812018-11-28 16:47:49 +010031#include "modules/include/module_common_types_public.h"
Niels Möller53382cb2018-11-27 14:05:08 +010032#include "modules/rtp_rtcp/include/rtcp_statistics.h"
Yves Gerey3e707812018-11-28 16:47:49 +010033#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020034#include "rtc_base/ignore_wundef.h"
Steve Anton10542f22019-01-11 09:11:00 -080035#include "rtc_base/message_digest.h"
Karl Wiberge40468b2017-11-22 10:42:26 +010036#include "rtc_base/numerics/safe_conversions.h"
Steve Anton10542f22019-01-11 09:11:00 -080037#include "rtc_base/string_encode.h"
Jonas Olsson366a50c2018-09-06 13:41:30 +020038#include "rtc_base/strings/string_builder.h"
Niels Möllera12c42a2018-07-25 16:05:48 +020039#include "rtc_base/system/arch.h"
Henrik Lundine9619f82017-11-27 14:05:27 +010040#include "test/field_trial.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020041#include "test/gtest.h"
Steve Anton10542f22019-01-11 09:11:00 -080042#include "test/testsupport/file_utils.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000043
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020044ABSL_FLAG(bool, gen_ref, false, "Generate reference files.");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000045
kwiberg5adaf732016-10-04 09:33:27 -070046namespace webrtc {
47
minyue5f026d02015-12-16 07:36:04 -080048namespace {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000049
minyue4f906772016-04-29 11:05:14 -070050const std::string& PlatformChecksum(const std::string& checksum_general,
Henrik Lundin8cd750d2017-10-12 13:07:11 +020051 const std::string& checksum_android_32,
52 const std::string& checksum_android_64,
minyue4f906772016-04-29 11:05:14 -070053 const std::string& checksum_win_32,
54 const std::string& checksum_win_64) {
kwiberg77eab702016-09-28 17:42:01 -070055#if defined(WEBRTC_ANDROID)
Yves Gerey665174f2018-06-19 15:03:05 +020056#ifdef WEBRTC_ARCH_64_BITS
57 return checksum_android_64;
58#else
59 return checksum_android_32;
60#endif // WEBRTC_ARCH_64_BITS
kwiberg77eab702016-09-28 17:42:01 -070061#elif defined(WEBRTC_WIN)
Yves Gerey665174f2018-06-19 15:03:05 +020062#ifdef WEBRTC_ARCH_64_BITS
63 return checksum_win_64;
64#else
65 return checksum_win_32;
66#endif // WEBRTC_ARCH_64_BITS
minyue4f906772016-04-29 11:05:14 -070067#else
68 return checksum_general;
69#endif // WEBRTC_WIN
70}
71
minyue5f026d02015-12-16 07:36:04 -080072} // namespace
73
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000074
ivoc72c08ed2016-01-20 07:26:24 -080075#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
76 (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
Karl Wibergeb254b42017-11-01 15:08:12 +010077 defined(WEBRTC_CODEC_ILBC) && !defined(WEBRTC_ARCH_ARM64)
minyue5f026d02015-12-16 07:36:04 -080078#define MAYBE_TestBitExactness TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070079#else
minyue5f026d02015-12-16 07:36:04 -080080#define MAYBE_TestBitExactness DISABLED_TestBitExactness
kwiberg98ab3a42015-09-30 21:54:21 -070081#endif
minyue5f026d02015-12-16 07:36:04 -080082TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
minyue49c454e2016-01-08 11:30:14 -080083 const std::string input_rtp_file =
84 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +000085
Yves Gerey665174f2018-06-19 15:03:05 +020086 const std::string output_checksum =
Jakob Ivarsson507f4342019-09-03 13:04:41 +020087 PlatformChecksum("6ae9f643dc3e5f3452d28a772eef7e00e74158bc",
88 "f4374430e870d66268c1b8e22fb700eb072d567e", "not used",
89 "6ae9f643dc3e5f3452d28a772eef7e00e74158bc",
90 "8d73c98645917cdeaaa01c20cf095ccc5a10b2b5");
minyue4f906772016-04-29 11:05:14 -070091
henrik.lundin2979f552017-05-05 05:04:16 -070092 const std::string network_stats_checksum =
Jakob Ivarsson507f4342019-09-03 13:04:41 +020093 PlatformChecksum("3d186ea7e243abfdbd3d39b8ebf8f02a318117e4",
94 "0b725774133da5dd823f2046663c12a76e0dbd79", "not used",
95 "3d186ea7e243abfdbd3d39b8ebf8f02a318117e4",
96 "3d186ea7e243abfdbd3d39b8ebf8f02a318117e4");
minyue4f906772016-04-29 11:05:14 -070097
Yves Gerey665174f2018-06-19 15:03:05 +020098 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +020099 absl::GetFlag(FLAGS_gen_ref));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000100}
101
Yves Gerey665174f2018-06-19 15:03:05 +0200102#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
minyue-webrtc516711c2017-07-27 17:45:49 +0200103 defined(WEBRTC_CODEC_OPUS)
minyue93c08b72015-12-22 09:57:41 -0800104#define MAYBE_TestOpusBitExactness TestOpusBitExactness
105#else
106#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
107#endif
Ivo Creusenc31a4ec2020-01-30 15:01:45 +0100108// TODO(webrtc:11325) Reenable after Opus has been upgraded to 1.3.
109TEST_F(NetEqDecodingTest, DISABLED_TestOpusBitExactness) {
minyue93c08b72015-12-22 09:57:41 -0800110 const std::string input_rtp_file =
111 webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
minyue93c08b72015-12-22 09:57:41 -0800112
Yves Gereya038e712018-11-14 10:45:50 +0100113 // Checksum depends on libopus being compiled with or without SSE.
114 const std::string maybe_sse =
Jakob Ivarssona36c5912019-06-27 10:12:02 +0200115 "6b602683ca7285a98118b4824d72f4257952c18f|"
116 "eb0b68bddcac00fc85403df64f83126f8ea9bc93";
Yves Gereya038e712018-11-14 10:45:50 +0100117 const std::string output_checksum = PlatformChecksum(
Yves Gerey75e22902019-09-06 03:07:55 +0200118 maybe_sse, "f95f2a220c9ca5d60b81c4653d46e0de2bee159f",
119 "6f288a03d34958f62496f18fa85655593eef4dbe", maybe_sse, maybe_sse);
minyue4f906772016-04-29 11:05:14 -0700120
Yves Gerey75e22902019-09-06 03:07:55 +0200121 const std::string network_stats_checksum =
122 PlatformChecksum("87d2d3e5ca7f1b3fb7a501ffaa51ae29aea74544",
123 "6b8c29e39c82f5479f59726744d0cf3e88e725d3",
124 "c876f2a04c4f0a91da7f084f80e87871b7c5a4a1",
125 "87d2d3e5ca7f1b3fb7a501ffaa51ae29aea74544",
126 "87d2d3e5ca7f1b3fb7a501ffaa51ae29aea74544");
minyue4f906772016-04-29 11:05:14 -0700127
Yves Gerey665174f2018-06-19 15:03:05 +0200128 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200129 absl::GetFlag(FLAGS_gen_ref));
minyue93c08b72015-12-22 09:57:41 -0800130}
131
Yves Gerey665174f2018-06-19 15:03:05 +0200132#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
Henrik Lundine9619f82017-11-27 14:05:27 +0100133 defined(WEBRTC_CODEC_OPUS)
134#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness
135#else
136#define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness
137#endif
Ivo Creusenc31a4ec2020-01-30 15:01:45 +0100138// TODO(webrtc:11325) Reenable after Opus has been upgraded to 1.3.
139TEST_F(NetEqDecodingTest, DISABLED_TestOpusDtxBitExactness) {
Henrik Lundine9619f82017-11-27 14:05:27 +0100140 const std::string input_rtp_file =
141 webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
142
Yves Gereya038e712018-11-14 10:45:50 +0100143 const std::string maybe_sse =
Minyue Li8e83c7a2019-11-04 14:47:52 +0100144 "0bdeb4ccf95a2577e38274360903ad099fc46787|"
145 "f7bbf5d92a0595a2a3445ffbaddfb20e98b6e94e";
Yves Gereya038e712018-11-14 10:45:50 +0100146 const std::string output_checksum = PlatformChecksum(
Minyue Li8e83c7a2019-11-04 14:47:52 +0100147 maybe_sse, "6d200cc51a001b6137abf67db2bb8eeb0375cdee",
148 "36d43761de86b12520cf2e63f97372a2b7c6f939", maybe_sse, maybe_sse);
Henrik Lundine9619f82017-11-27 14:05:27 +0100149
150 const std::string network_stats_checksum =
Jakob Ivarsson65024d92019-08-30 15:37:07 +0200151 "8caf49765f35b6862066d3f17531ce44d8e25f60";
Henrik Lundine9619f82017-11-27 14:05:27 +0100152
Henrik Lundine9619f82017-11-27 14:05:27 +0100153 DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
Mirko Bonadei2ab97f62019-07-18 13:44:12 +0200154 absl::GetFlag(FLAGS_gen_ref));
Henrik Lundine9619f82017-11-27 14:05:27 +0100155}
156
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000157// Use fax mode to avoid time-scaling. This is to simplify the testing of
158// packet waiting times in the packet buffer.
159class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
160 protected:
161 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
Henrik Lundin7687ad52018-07-02 10:14:46 +0200162 config_.for_test_no_time_stretching = true;
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000163 }
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200164 void TestJitterBufferDelay(bool apply_packet_loss);
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000165};
166
167TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000168 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
169 size_t num_frames = 30;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000170 const size_t kSamples = 10 * 16;
171 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000172 for (size_t i = 0; i < num_frames; ++i) {
kwibergee2bac22015-11-11 10:34:00 -0800173 const uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700174 RTPHeader rtp_info;
Mirko Bonadeia8110272017-10-18 14:22:50 +0200175 rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
176 rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700177 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
178 rtp_info.payloadType = 94; // PCM16b WB codec.
179 rtp_info.markerBit = 0;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200180 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000181 }
182 // Pull out all data.
183 for (size_t i = 0; i < num_frames; ++i) {
henrik.lundin7a926812016-05-12 13:51:28 -0700184 bool muted;
185 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800186 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000187 }
188
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200189 NetEqNetworkStatistics stats;
190 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000191 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
192 // spacing (per definition), we expect the delay to increase with 10 ms for
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200193 // each packet. Thus, we are calculating the statistics for a series from 10
194 // to 300, in steps of 10 ms.
195 EXPECT_EQ(155, stats.mean_waiting_time_ms);
196 EXPECT_EQ(155, stats.median_waiting_time_ms);
197 EXPECT_EQ(10, stats.min_waiting_time_ms);
198 EXPECT_EQ(300, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000199
200 // Check statistics again and make sure it's been reset.
Henrik Lundin1bb8cf82015-08-25 13:08:04 +0200201 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
202 EXPECT_EQ(-1, stats.mean_waiting_time_ms);
203 EXPECT_EQ(-1, stats.median_waiting_time_ms);
204 EXPECT_EQ(-1, stats.min_waiting_time_ms);
205 EXPECT_EQ(-1, stats.max_waiting_time_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000206}
207
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000208
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000209TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000210 // Apply a clock drift of -25 ms / s (sender faster than receiver).
211 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000212 const double kNetworkFreezeTimeMs = 0.0;
213 const bool kGetAudioDuringFreezeRecovery = false;
214 const int kDelayToleranceMs = 20;
215 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200216 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
217 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000218 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000219}
220
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000221TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000222 // Apply a clock drift of +25 ms / s (sender slower than receiver).
223 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000224 const double kNetworkFreezeTimeMs = 0.0;
225 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200226 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000227 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200228 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
229 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000230 kMaxTimeToSpeechMs);
231}
232
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000233TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000234 // Apply a clock drift of -25 ms / s (sender faster than receiver).
235 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
236 const double kNetworkFreezeTimeMs = 5000.0;
237 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarssona36c5912019-06-27 10:12:02 +0200238 const int kDelayToleranceMs = 60;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000239 const int kMaxTimeToSpeechMs = 200;
Yves Gerey665174f2018-06-19 15:03:05 +0200240 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
241 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000242 kMaxTimeToSpeechMs);
243}
244
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000245TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000246 // Apply a clock drift of +25 ms / s (sender slower than receiver).
247 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
248 const double kNetworkFreezeTimeMs = 5000.0;
249 const bool kGetAudioDuringFreezeRecovery = false;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200250 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000251 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200252 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
253 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000254 kMaxTimeToSpeechMs);
255}
256
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000257TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000258 // Apply a clock drift of +25 ms / s (sender slower than receiver).
259 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
260 const double kNetworkFreezeTimeMs = 5000.0;
261 const bool kGetAudioDuringFreezeRecovery = true;
Jakob Ivarsson507f4342019-09-03 13:04:41 +0200262 const int kDelayToleranceMs = 40;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000263 const int kMaxTimeToSpeechMs = 100;
Yves Gerey665174f2018-06-19 15:03:05 +0200264 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
265 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000266 kMaxTimeToSpeechMs);
267}
268
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000269TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000270 const double kDriftFactor = 1.0; // No drift.
271 const double kNetworkFreezeTimeMs = 0.0;
272 const bool kGetAudioDuringFreezeRecovery = false;
273 const int kDelayToleranceMs = 10;
274 const int kMaxTimeToSpeechMs = 50;
Yves Gerey665174f2018-06-19 15:03:05 +0200275 LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
276 kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000277 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000278}
279
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000280TEST_F(NetEqDecodingTest, UnknownPayloadType) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000281 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000282 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700283 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000284 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700285 rtp_info.payloadType = 1; // Not registered as a decoder.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200286 EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000287}
288
Peter Boströme2976c82016-01-04 22:44:05 +0100289#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ivoc72c08ed2016-01-20 07:26:24 -0800290#define MAYBE_DecoderError DecoderError
291#else
292#define MAYBE_DecoderError DISABLED_DecoderError
293#endif
294
Peter Boströme2976c82016-01-04 22:44:05 +0100295TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000296 const size_t kPayloadBytes = 100;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000297 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700298 RTPHeader rtp_info;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000299 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700300 rtp_info.payloadType = 103; // iSAC, but the payload is invalid.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200301 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000302 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
303 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700304 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800305 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700306 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000307 }
henrik.lundin7a926812016-05-12 13:51:28 -0700308 bool muted;
309 EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
310 ASSERT_FALSE(muted);
ivoc72c08ed2016-01-20 07:26:24 -0800311
yujo36b1a5f2017-06-12 12:45:32 -0700312 // Verify that the first 160 samples are set to 0.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000313 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700314 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000315 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200316 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000317 ss << "i = " << i;
318 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700319 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000320 }
321}
322
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000323TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000324 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
325 // to GetAudio.
yujo36b1a5f2017-06-12 12:45:32 -0700326 int16_t* out_frame_data = out_frame_.mutable_data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800327 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
yujo36b1a5f2017-06-12 12:45:32 -0700328 out_frame_data[i] = 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000329 }
henrik.lundin7a926812016-05-12 13:51:28 -0700330 bool muted;
331 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
332 ASSERT_FALSE(muted);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000333 // Verify that the first block of samples is set to 0.
334 static const int kExpectedOutputLength =
335 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
yujo36b1a5f2017-06-12 12:45:32 -0700336 const int16_t* const_out_frame_data = out_frame_.data();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000337 for (int i = 0; i < kExpectedOutputLength; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200338 rtc::StringBuilder ss;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000339 ss << "i = " << i;
340 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
yujo36b1a5f2017-06-12 12:45:32 -0700341 EXPECT_EQ(0, const_out_frame_data[i]);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000342 }
henrik.lundind89814b2015-11-23 06:49:25 -0800343 // Verify that the sample rate did not change from the initial configuration.
344 EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000345}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000346
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000347class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000348 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000349 void CheckBgn(int sampling_rate_hz) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700350 size_t expected_samples_per_channel = 0;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000351 uint8_t payload_type = 0xFF; // Invalid.
352 if (sampling_rate_hz == 8000) {
353 expected_samples_per_channel = kBlockSize8kHz;
354 payload_type = 93; // PCM 16, 8 kHz.
355 } else if (sampling_rate_hz == 16000) {
356 expected_samples_per_channel = kBlockSize16kHz;
357 payload_type = 94; // PCM 16, 16 kHZ.
358 } else if (sampling_rate_hz == 32000) {
359 expected_samples_per_channel = kBlockSize32kHz;
360 payload_type = 95; // PCM 16, 32 kHz.
361 } else {
362 ASSERT_TRUE(false); // Unsupported test case.
363 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000364
henrik.lundin6d8e0112016-03-04 10:34:21 -0800365 AudioFrame output;
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000366 test::AudioLoop input;
367 // We are using the same 32 kHz input file for all tests, regardless of
368 // |sampling_rate_hz|. The output may sound weird, but the test is still
369 // valid.
370 ASSERT_TRUE(input.Init(
371 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
372 10 * sampling_rate_hz, // Max 10 seconds loop length.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700373 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000374
375 // Payload of 10 ms of PCM16 32 kHz.
376 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin246ef3e2017-04-24 09:14:32 -0700377 RTPHeader rtp_info;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000378 PopulateRtpInfo(0, 0, &rtp_info);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700379 rtp_info.payloadType = payload_type;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000380
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000381 uint32_t receive_timestamp = 0;
henrik.lundin7a926812016-05-12 13:51:28 -0700382 bool muted;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000383 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
kwiberg288886b2015-11-06 01:21:35 -0800384 auto block = input.GetNextBlock();
385 ASSERT_EQ(expected_samples_per_channel, block.size());
386 size_t enc_len_bytes =
387 WebRtcPcm16b_Encode(block.data(), block.size(), payload);
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000388 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
389
Karl Wiberg45eb1352019-10-10 14:23:00 +0200390 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
391 payload, enc_len_bytes)));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800392 output.Reset();
henrik.lundin7a926812016-05-12 13:51:28 -0700393 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800394 ASSERT_EQ(1u, output.num_channels_);
395 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800396 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000397
398 // Next packet.
Yves Gerey665174f2018-06-19 15:03:05 +0200399 rtp_info.timestamp +=
400 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin246ef3e2017-04-24 09:14:32 -0700401 rtp_info.sequenceNumber++;
Yves Gerey665174f2018-06-19 15:03:05 +0200402 receive_timestamp +=
403 rtc::checked_cast<uint32_t>(expected_samples_per_channel);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000404 }
405
henrik.lundin6d8e0112016-03-04 10:34:21 -0800406 output.Reset();
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000407
408 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
409 // one frame without checking speech-type. This is the first frame pulled
410 // without inserting any packet, and might not be labeled as PLC.
henrik.lundin7a926812016-05-12 13:51:28 -0700411 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800412 ASSERT_EQ(1u, output.num_channels_);
413 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000414
415 // To be able to test the fading of background noise we need at lease to
416 // pull 611 frames.
417 const int kFadingThreshold = 611;
418
419 // Test several CNG-to-PLC packet for the expected behavior. The number 20
420 // is arbitrary, but sufficiently large to test enough number of frames.
421 const int kNumPlcToCngTestFrames = 20;
422 bool plc_to_cng = false;
423 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
henrik.lundin6d8e0112016-03-04 10:34:21 -0800424 output.Reset();
yujo36b1a5f2017-06-12 12:45:32 -0700425 // Set to non-zero.
426 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
henrik.lundin7a926812016-05-12 13:51:28 -0700427 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
428 ASSERT_FALSE(muted);
henrik.lundin6d8e0112016-03-04 10:34:21 -0800429 ASSERT_EQ(1u, output.num_channels_);
430 ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800431 if (output.speech_type_ == AudioFrame::kPLCCNG) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000432 plc_to_cng = true;
433 double sum_squared = 0;
yujo36b1a5f2017-06-12 12:45:32 -0700434 const int16_t* output_data = output.data();
henrik.lundin6d8e0112016-03-04 10:34:21 -0800435 for (size_t k = 0;
436 k < output.num_channels_ * output.samples_per_channel_; ++k)
yujo36b1a5f2017-06-12 12:45:32 -0700437 sum_squared += output_data[k] * output_data[k];
Henrik Lundin67190172018-04-20 15:34:48 +0200438 EXPECT_EQ(0, sum_squared);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000439 } else {
henrik.lundin55480f52016-03-08 02:37:57 -0800440 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000441 }
442 }
443 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
444 }
445};
446
Henrik Lundin67190172018-04-20 15:34:48 +0200447TEST_F(NetEqBgnTest, RunTest) {
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000448 CheckBgn(8000);
449 CheckBgn(16000);
450 CheckBgn(32000);
451}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000452
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000453TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
454 // Start with a sequence number that will soon wrap.
455 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
456 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
457}
458
459TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
460 // Start with a sequence number that will soon wrap.
461 std::set<uint16_t> drop_seq_numbers;
462 drop_seq_numbers.insert(0xFFFF);
463 drop_seq_numbers.insert(0x0);
464 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
465}
466
467TEST_F(NetEqDecodingTest, TimestampWrap) {
468 // Start with a timestamp that will soon wrap.
469 std::set<uint16_t> drop_seq_numbers;
470 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
471}
472
473TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
474 // Start with a timestamp and a sequence number that will wrap at the same
475 // time.
476 std::set<uint16_t> drop_seq_numbers;
477 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
478}
479
Yves Gerey3a65f392019-11-11 18:05:42 +0100480TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000481 uint16_t seq_no = 0;
482 uint32_t timestamp = 0;
483 const int kFrameSizeMs = 10;
484 const int kSampleRateKhz = 16;
485 const int kSamples = kFrameSizeMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000486 const size_t kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000487
Yves Gerey665174f2018-06-19 15:03:05 +0200488 const int algorithmic_delay_samples =
489 std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000490 // Insert three speech packets. Three are needed to get the frame length
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000491 // correct.
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000492 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700493 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700494 bool muted;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000495 for (int i = 0; i < 3; ++i) {
496 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200497 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000498 ++seq_no;
499 timestamp += kSamples;
500
501 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700502 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800503 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000504 }
505 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800506 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000507
508 // Insert same CNG packet twice.
509 const int kCngPeriodMs = 100;
510 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
pkasting@chromium.org4591fbd2014-11-20 22:28:14 +0000511 size_t payload_len;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000512 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
513 // This is the first time this CNG packet is inserted.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200514 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
515 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000516
517 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700518 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800519 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800520 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700521 EXPECT_FALSE(
522 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
henrik.lundin0d96ab72016-04-06 12:28:26 -0700523 EXPECT_EQ(timestamp - algorithmic_delay_samples,
524 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000525
526 // Insert the same CNG packet again. Note that at this point it is old, since
527 // we have already decoded the first copy of it.
Karl Wiberg45eb1352019-10-10 14:23:00 +0200528 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
529 payload, payload_len)));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000530
531 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
532 // we have already pulled out CNG once.
533 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
henrik.lundin7a926812016-05-12 13:51:28 -0700534 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800535 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800536 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin114c1b32017-04-26 07:47:32 -0700537 EXPECT_FALSE(
538 neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000539 EXPECT_EQ(timestamp - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700540 out_frame_.timestamp_ + out_frame_.samples_per_channel_);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000541 }
542
543 // Insert speech again.
544 ++seq_no;
545 timestamp += kCngPeriodSamples;
546 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200547 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000548
549 // Pull audio once and verify that the output is speech again.
henrik.lundin7a926812016-05-12 13:51:28 -0700550 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800551 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800552 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
Danil Chapovalovb6021232018-06-19 13:26:36 +0200553 absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
henrik.lundin0d96ab72016-04-06 12:28:26 -0700554 ASSERT_TRUE(playout_timestamp);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000555 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
henrik.lundin0d96ab72016-04-06 12:28:26 -0700556 *playout_timestamp);
wu@webrtc.org94454b72014-06-05 20:34:08 +0000557}
558
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000559TEST_F(NetEqDecodingTest, CngFirst) {
560 uint16_t seq_no = 0;
561 uint32_t timestamp = 0;
562 const int kFrameSizeMs = 10;
563 const int kSampleRateKhz = 16;
564 const int kSamples = kFrameSizeMs * kSampleRateKhz;
565 const int kPayloadBytes = kSamples * 2;
566 const int kCngPeriodMs = 100;
567 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
568 size_t payload_len;
569
570 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700571 RTPHeader rtp_info;
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000572
573 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200574 ASSERT_EQ(NetEq::kOK,
575 neteq_->InsertPacket(
576 rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len)));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000577 ++seq_no;
578 timestamp += kCngPeriodSamples;
579
580 // Pull audio once and make sure CNG is played.
henrik.lundin7a926812016-05-12 13:51:28 -0700581 bool muted;
582 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800583 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin55480f52016-03-08 02:37:57 -0800584 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000585
586 // Insert some speech packets.
henrik.lundin549d80b2016-08-25 00:44:24 -0700587 const uint32_t first_speech_timestamp = timestamp;
588 int timeout_counter = 0;
589 do {
590 ASSERT_LT(timeout_counter++, 20) << "Test timed out";
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000591 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200592 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000593 ++seq_no;
594 timestamp += kSamples;
595
596 // Pull audio once.
henrik.lundin7a926812016-05-12 13:51:28 -0700597 ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
henrik.lundin6d8e0112016-03-04 10:34:21 -0800598 ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
henrik.lundin549d80b2016-08-25 00:44:24 -0700599 } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000600 // Verify speech output.
henrik.lundin55480f52016-03-08 02:37:57 -0800601 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
henrik.lundin@webrtc.orgc93437e2014-12-01 11:42:42 +0000602}
henrik.lundin7a926812016-05-12 13:51:28 -0700603
604class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
605 public:
606 NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
607 config_.enable_muted_state = true;
608 }
609
610 protected:
611 static constexpr size_t kSamples = 10 * 16;
612 static constexpr size_t kPayloadBytes = kSamples * 2;
613
614 void InsertPacket(uint32_t rtp_timestamp) {
615 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700616 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700617 PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200618 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700619 }
620
henrik.lundin42feb512016-09-20 06:51:40 -0700621 void InsertCngPacket(uint32_t rtp_timestamp) {
622 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700623 RTPHeader rtp_info;
henrik.lundin42feb512016-09-20 06:51:40 -0700624 size_t payload_len;
625 PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200626 EXPECT_EQ(NetEq::kOK,
627 neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
628 payload, payload_len)));
henrik.lundin42feb512016-09-20 06:51:40 -0700629 }
630
henrik.lundin7a926812016-05-12 13:51:28 -0700631 bool GetAudioReturnMuted() {
632 bool muted;
633 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
634 return muted;
635 }
636
637 void GetAudioUntilMuted() {
638 while (!GetAudioReturnMuted()) {
639 ASSERT_LT(counter_++, 1000) << "Test timed out";
640 }
641 }
642
643 void GetAudioUntilNormal() {
644 bool muted = false;
645 while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
646 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
647 ASSERT_LT(counter_++, 1000) << "Test timed out";
648 }
649 EXPECT_FALSE(muted);
650 }
651
652 int counter_ = 0;
653};
654
655// Verifies that NetEq goes in and out of muted state as expected.
656TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
657 // Insert one speech packet.
658 InsertPacket(0);
659 // Pull out audio once and expect it not to be muted.
660 EXPECT_FALSE(GetAudioReturnMuted());
661 // Pull data until faded out.
662 GetAudioUntilMuted();
henrik.lundina4491072017-07-06 05:23:53 -0700663 EXPECT_TRUE(out_frame_.muted());
henrik.lundin7a926812016-05-12 13:51:28 -0700664
665 // Verify that output audio is not written during muted mode. Other parameters
666 // should be correct, though.
667 AudioFrame new_frame;
yujo36b1a5f2017-06-12 12:45:32 -0700668 int16_t* frame_data = new_frame.mutable_data();
669 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
670 frame_data[i] = 17;
henrik.lundin7a926812016-05-12 13:51:28 -0700671 }
672 bool muted;
673 EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
674 EXPECT_TRUE(muted);
henrik.lundina4491072017-07-06 05:23:53 -0700675 EXPECT_TRUE(out_frame_.muted());
yujo36b1a5f2017-06-12 12:45:32 -0700676 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
677 EXPECT_EQ(17, frame_data[i]);
henrik.lundin7a926812016-05-12 13:51:28 -0700678 }
679 EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
680 new_frame.timestamp_);
681 EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
682 EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
683 EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
684 EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
685 EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
686
687 // Insert new data. Timestamp is corrected for the time elapsed since the last
688 // packet. Verify that normal operation resumes.
689 InsertPacket(kSamples * counter_);
690 GetAudioUntilNormal();
henrik.lundina4491072017-07-06 05:23:53 -0700691 EXPECT_FALSE(out_frame_.muted());
henrik.lundin612c25e2016-05-25 08:21:04 -0700692
693 NetEqNetworkStatistics stats;
694 EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
695 // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
696 // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
697 // concealment samples in this test.
698 EXPECT_GT(stats.expand_rate, 14000);
699 // And, it should be greater than the speech_expand_rate.
700 EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
henrik.lundin7a926812016-05-12 13:51:28 -0700701}
702
703// Verifies that NetEq goes out of muted state when given a delayed packet.
704TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
705 // Insert one speech packet.
706 InsertPacket(0);
707 // Pull out audio once and expect it not to be muted.
708 EXPECT_FALSE(GetAudioReturnMuted());
709 // Pull data until faded out.
710 GetAudioUntilMuted();
711 // Insert new data. Timestamp is only corrected for the half of the time
712 // elapsed since the last packet. That is, the new packet is delayed. Verify
713 // that normal operation resumes.
714 InsertPacket(kSamples * counter_ / 2);
715 GetAudioUntilNormal();
716}
717
718// Verifies that NetEq goes out of muted state when given a future packet.
719TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
720 // Insert one speech packet.
721 InsertPacket(0);
722 // Pull out audio once and expect it not to be muted.
723 EXPECT_FALSE(GetAudioReturnMuted());
724 // Pull data until faded out.
725 GetAudioUntilMuted();
726 // Insert new data. Timestamp is over-corrected for the time elapsed since the
727 // last packet. That is, the new packet is too early. Verify that normal
728 // operation resumes.
729 InsertPacket(kSamples * counter_ * 2);
730 GetAudioUntilNormal();
731}
732
733// Verifies that NetEq goes out of muted state when given an old packet.
734TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
735 // Insert one speech packet.
736 InsertPacket(0);
737 // Pull out audio once and expect it not to be muted.
738 EXPECT_FALSE(GetAudioReturnMuted());
739 // Pull data until faded out.
740 GetAudioUntilMuted();
741
742 EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
743 // Insert packet which is older than the first packet.
744 InsertPacket(kSamples * (counter_ - 1000));
745 EXPECT_FALSE(GetAudioReturnMuted());
746 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
747}
748
henrik.lundin42feb512016-09-20 06:51:40 -0700749// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
750// packet stream is suspended for a long time.
751TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
752 // Insert one CNG packet.
753 InsertCngPacket(0);
754
755 // Pull 10 seconds of audio (10 ms audio generated per lap).
756 for (int i = 0; i < 1000; ++i) {
757 bool muted;
758 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
759 ASSERT_FALSE(muted);
760 }
761 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
762}
763
764// Verifies that NetEq goes back to normal after a long CNG period with the
765// packet stream suspended.
766TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
767 // Insert one CNG packet.
768 InsertCngPacket(0);
769
770 // Pull 10 seconds of audio (10 ms audio generated per lap).
771 for (int i = 0; i < 1000; ++i) {
772 bool muted;
773 EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
774 }
775
776 // Insert new data. Timestamp is corrected for the time elapsed since the last
777 // packet. Verify that normal operation resumes.
778 InsertPacket(kSamples * counter_);
779 GetAudioUntilNormal();
780}
781
henrik.lundin7a926812016-05-12 13:51:28 -0700782namespace {
783::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
784 const AudioFrame& b) {
785 if (a.timestamp_ != b.timestamp_)
786 return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
787 << " != " << b.timestamp_ << ")";
788 if (a.sample_rate_hz_ != b.sample_rate_hz_)
Yves Gerey665174f2018-06-19 15:03:05 +0200789 return ::testing::AssertionFailure()
790 << "sample_rate_hz_ diff (" << a.sample_rate_hz_
791 << " != " << b.sample_rate_hz_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700792 if (a.samples_per_channel_ != b.samples_per_channel_)
793 return ::testing::AssertionFailure()
794 << "samples_per_channel_ diff (" << a.samples_per_channel_
795 << " != " << b.samples_per_channel_ << ")";
796 if (a.num_channels_ != b.num_channels_)
Yves Gerey665174f2018-06-19 15:03:05 +0200797 return ::testing::AssertionFailure()
798 << "num_channels_ diff (" << a.num_channels_
799 << " != " << b.num_channels_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700800 if (a.speech_type_ != b.speech_type_)
Yves Gerey665174f2018-06-19 15:03:05 +0200801 return ::testing::AssertionFailure()
802 << "speech_type_ diff (" << a.speech_type_
803 << " != " << b.speech_type_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700804 if (a.vad_activity_ != b.vad_activity_)
Yves Gerey665174f2018-06-19 15:03:05 +0200805 return ::testing::AssertionFailure()
806 << "vad_activity_ diff (" << a.vad_activity_
807 << " != " << b.vad_activity_ << ")";
henrik.lundin7a926812016-05-12 13:51:28 -0700808 return ::testing::AssertionSuccess();
809}
810
811::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
812 const AudioFrame& b) {
813 ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
814 if (!res)
815 return res;
Yves Gerey665174f2018-06-19 15:03:05 +0200816 if (memcmp(a.data(), b.data(),
817 a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) !=
818 0) {
henrik.lundin7a926812016-05-12 13:51:28 -0700819 return ::testing::AssertionFailure() << "data_ diff";
820 }
821 return ::testing::AssertionSuccess();
822}
823
824} // namespace
825
826TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
827 ASSERT_FALSE(config_.enable_muted_state);
828 config2_.enable_muted_state = true;
829 CreateSecondInstance();
830
831 // Insert one speech packet into both NetEqs.
832 const size_t kSamples = 10 * 16;
833 const size_t kPayloadBytes = kSamples * 2;
834 uint8_t payload[kPayloadBytes] = {0};
henrik.lundin246ef3e2017-04-24 09:14:32 -0700835 RTPHeader rtp_info;
henrik.lundin7a926812016-05-12 13:51:28 -0700836 PopulateRtpInfo(0, 0, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200837 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
838 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700839
840 AudioFrame out_frame1, out_frame2;
841 bool muted;
842 for (int i = 0; i < 1000; ++i) {
Jonas Olsson366a50c2018-09-06 13:41:30 +0200843 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700844 ss << "i = " << i;
845 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
846 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
847 EXPECT_FALSE(muted);
848 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
849 if (muted) {
850 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
851 } else {
852 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
853 }
854 }
855 EXPECT_TRUE(muted);
856
857 // Insert new data. Timestamp is corrected for the time elapsed since the last
858 // packet.
859 PopulateRtpInfo(0, kSamples * 1000, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200860 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
861 EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload));
henrik.lundin7a926812016-05-12 13:51:28 -0700862
863 int counter = 0;
864 while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
865 ASSERT_LT(counter++, 1000) << "Test timed out";
Jonas Olsson366a50c2018-09-06 13:41:30 +0200866 rtc::StringBuilder ss;
henrik.lundin7a926812016-05-12 13:51:28 -0700867 ss << "counter = " << counter;
868 SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
869 EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
870 EXPECT_FALSE(muted);
871 EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
872 if (muted) {
873 EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
874 } else {
875 EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
876 }
877 }
878 EXPECT_FALSE(muted);
879}
880
henrik.lundin114c1b32017-04-26 07:47:32 -0700881TEST_F(NetEqDecodingTest, LastDecodedTimestampsEmpty) {
882 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
883
884 // Pull out data once.
885 AudioFrame output;
886 bool muted;
887 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
888
889 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
890}
891
892TEST_F(NetEqDecodingTest, LastDecodedTimestampsOneDecoded) {
893 // Insert one packet with PCM16b WB data (this is what PopulateRtpInfo does by
894 // default). Make the length 10 ms.
895 constexpr size_t kPayloadSamples = 16 * 10;
896 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
897 uint8_t payload[kPayloadBytes] = {0};
898
899 RTPHeader rtp_info;
900 constexpr uint32_t kRtpTimestamp = 0x1234;
901 PopulateRtpInfo(0, kRtpTimestamp, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200902 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700903
904 // Pull out data once.
905 AudioFrame output;
906 bool muted;
907 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
908
909 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp}),
910 neteq_->LastDecodedTimestamps());
911
912 // Nothing decoded on the second call.
913 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
914 EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
915}
916
917TEST_F(NetEqDecodingTest, LastDecodedTimestampsTwoDecoded) {
918 // Insert two packets with PCM16b WB data (this is what PopulateRtpInfo does
919 // by default). Make the length 5 ms so that NetEq must decode them both in
920 // the same GetAudio call.
921 constexpr size_t kPayloadSamples = 16 * 5;
922 constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
923 uint8_t payload[kPayloadBytes] = {0};
924
925 RTPHeader rtp_info;
926 constexpr uint32_t kRtpTimestamp1 = 0x1234;
927 PopulateRtpInfo(0, kRtpTimestamp1, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200928 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700929 constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + kPayloadSamples;
930 PopulateRtpInfo(1, kRtpTimestamp2, &rtp_info);
Karl Wiberg45eb1352019-10-10 14:23:00 +0200931 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
henrik.lundin114c1b32017-04-26 07:47:32 -0700932
933 // Pull out data once.
934 AudioFrame output;
935 bool muted;
936 ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
937
938 EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp1, kRtpTimestamp2}),
939 neteq_->LastDecodedTimestamps());
940}
941
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200942TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
943 const int kNumConcealmentEvents = 19;
944 const size_t kSamples = 10 * 16;
945 const size_t kPayloadBytes = kSamples * 2;
946 int seq_no = 0;
947 RTPHeader rtp_info;
948 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
949 rtp_info.payloadType = 94; // PCM16b WB codec.
950 rtp_info.markerBit = 0;
951 const uint8_t payload[kPayloadBytes] = {0};
952 bool muted;
953
954 for (int i = 0; i < kNumConcealmentEvents; i++) {
955 // Insert some packets of 10 ms size.
956 for (int j = 0; j < 10; j++) {
957 rtp_info.sequenceNumber = seq_no++;
958 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200959 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullberg9a2e9062017-09-18 09:28:20 +0200960 neteq_->GetAudio(&out_frame_, &muted);
961 }
962
963 // Lose a number of packets.
964 int num_lost = 1 + i;
965 for (int j = 0; j < num_lost; j++) {
966 seq_no++;
967 neteq_->GetAudio(&out_frame_, &muted);
968 }
969 }
970
971 // Check number of concealment events.
972 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
973 EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
974}
975
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200976// Test that the jitter buffer delay stat is computed correctly.
977void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
978 const int kNumPackets = 10;
979 const int kDelayInNumPackets = 2;
980 const int kPacketLenMs = 10; // All packets are of 10 ms size.
981 const size_t kSamples = kPacketLenMs * 16;
982 const size_t kPayloadBytes = kSamples * 2;
983 RTPHeader rtp_info;
984 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
985 rtp_info.payloadType = 94; // PCM16b WB codec.
986 rtp_info.markerBit = 0;
987 const uint8_t payload[kPayloadBytes] = {0};
988 bool muted;
989 int packets_sent = 0;
990 int packets_received = 0;
991 int expected_delay = 0;
Chen Xing0acffb52019-01-15 15:46:29 +0100992 uint64_t expected_emitted_count = 0;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200993 while (packets_received < kNumPackets) {
994 // Insert packet.
995 if (packets_sent < kNumPackets) {
996 rtp_info.sequenceNumber = packets_sent++;
997 rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +0200998 neteq_->InsertPacket(rtp_info, payload);
Gustaf Ullbergb0a02072017-10-02 12:00:34 +0200999 }
1000
1001 // Get packet.
1002 if (packets_sent > kDelayInNumPackets) {
1003 neteq_->GetAudio(&out_frame_, &muted);
1004 packets_received++;
1005
1006 // The delay reported by the jitter buffer never exceeds
1007 // the number of samples previously fetched with GetAudio
1008 // (hence the min()).
1009 int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
1010
1011 // The increase of the expected delay is the product of
1012 // the current delay of the jitter buffer in ms * the
1013 // number of samples that are sent for play out.
1014 int current_delay_ms = packets_delay * kPacketLenMs;
1015 expected_delay += current_delay_ms * kSamples;
Chen Xing0acffb52019-01-15 15:46:29 +01001016 expected_emitted_count += kSamples;
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001017 }
1018 }
1019
1020 if (apply_packet_loss) {
1021 // Extra call to GetAudio to cause concealment.
1022 neteq_->GetAudio(&out_frame_, &muted);
1023 }
1024
1025 // Check jitter buffer delay.
1026 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
1027 EXPECT_EQ(expected_delay, static_cast<int>(stats.jitter_buffer_delay_ms));
Chen Xing0acffb52019-01-15 15:46:29 +01001028 EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count);
Gustaf Ullbergb0a02072017-10-02 12:00:34 +02001029}
1030
1031TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
1032 TestJitterBufferDelay(false);
1033}
1034
1035TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
1036 TestJitterBufferDelay(true);
1037}
1038
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001039TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
1040 const int kPacketLenMs = 10; // All packets are of 10 ms size.
1041 const size_t kSamples = kPacketLenMs * 16;
1042 const size_t kPayloadBytes = kSamples * 2;
1043 RTPHeader rtp_info;
1044 rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
1045 rtp_info.payloadType = 94; // PCM16b WB codec.
1046 rtp_info.markerBit = 0;
1047 const uint8_t payload[kPayloadBytes] = {0};
1048
Karl Wiberg45eb1352019-10-10 14:23:00 +02001049 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001050
1051 bool muted;
1052 neteq_->GetAudio(&out_frame_, &muted);
1053
1054 rtp_info.sequenceNumber += 1;
1055 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001056 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001057 rtp_info.sequenceNumber += 1;
1058 rtp_info.timestamp += kSamples;
Karl Wiberg45eb1352019-10-10 14:23:00 +02001059 neteq_->InsertPacket(rtp_info, payload);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001060
1061 // We have two packets in the buffer and kAccelerate operation will
1062 // extract 20 ms of data.
Ivo Creusen3ce44a32019-10-31 14:38:11 +01001063 neteq_->GetAudio(&out_frame_, &muted, NetEq::Operation::kAccelerate);
Jakob Ivarsson26c59ff2019-02-28 09:55:49 +01001064
1065 // Check jitter buffer delay.
1066 NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
1067 EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms);
1068 EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count);
1069}
1070
Henrik Lundin7687ad52018-07-02 10:14:46 +02001071namespace test {
Henrik Lundin7687ad52018-07-02 10:14:46 +02001072TEST(NetEqNoTimeStretchingMode, RunTest) {
1073 NetEq::Config config;
1074 config.for_test_no_time_stretching = true;
1075 auto codecs = NetEqTest::StandardDecoderMap();
Henrik Lundin7687ad52018-07-02 10:14:46 +02001076 NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
1077 {1, kRtpExtensionAudioLevel},
1078 {3, kRtpExtensionAbsoluteSendTime},
1079 {5, kRtpExtensionTransportSequenceNumber},
1080 {7, kRtpExtensionVideoContentType},
1081 {8, kRtpExtensionVideoTiming}};
1082 std::unique_ptr<NetEqInput> input(new NetEqRtpDumpInput(
1083 webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"),
Bjorn Terelius5350d1c2018-10-11 16:51:23 +02001084 rtp_ext_map, absl::nullopt /*No SSRC filter*/));
Henrik Lundin7687ad52018-07-02 10:14:46 +02001085 std::unique_ptr<TimeLimitedNetEqInput> input_time_limit(
1086 new TimeLimitedNetEqInput(std::move(input), 20000));
1087 std::unique_ptr<AudioSink> output(new VoidAudioSink);
1088 NetEqTest::Callbacks callbacks;
Ivo Creusencee751a2020-01-16 17:17:09 +01001089 NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs,
1090 /*text_log=*/nullptr, /*neteq_factory=*/nullptr,
1091 /*input=*/std::move(input_time_limit), std::move(output),
1092 callbacks);
Henrik Lundin7687ad52018-07-02 10:14:46 +02001093 test.Run();
1094 const auto stats = test.SimulationStats();
1095 EXPECT_EQ(0, stats.accelerate_rate);
1096 EXPECT_EQ(0, stats.preemptive_rate);
1097}
Henrik Lundin7687ad52018-07-02 10:14:46 +02001098
1099} // namespace test
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001100} // namespace webrtc