blob: 7ed9a87c6874e89016d7d985f6bb403abafddbbe [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11/*
12 * This file includes unit tests for NetEQ.
13 */
14
henrik.lundin@webrtc.org9c55f0f2014-06-09 08:10:28 +000015#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000016
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000017#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000018#include <stdlib.h>
19#include <string.h> // memset
20
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000021#include <algorithm>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000022#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000023#include <string>
24#include <vector>
25
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000026#include "gflags/gflags.h"
kjellander@webrtc.org3c0aae12014-09-04 09:55:40 +000027#include "testing/gtest/include/gtest/gtest.h"
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +000028#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +000029#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
turaj@webrtc.orgff43c852013-09-25 00:07:27 +000030#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +000031#include "webrtc/system_wrappers/interface/scoped_ptr.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000032#include "webrtc/test/testsupport/fileutils.h"
henrike@webrtc.orga950300b2013-07-08 18:53:54 +000033#include "webrtc/test/testsupport/gtest_disable.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000034#include "webrtc/typedefs.h"
35
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000036DEFINE_bool(gen_ref, false, "Generate reference files.");
37
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000038namespace webrtc {
39
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +000040static bool IsAllZero(const int16_t* buf, int buf_length) {
41 bool all_zero = true;
42 for (int n = 0; n < buf_length && all_zero; ++n)
43 all_zero = buf[n] == 0;
44 return all_zero;
45}
46
47static bool IsAllNonZero(const int16_t* buf, int buf_length) {
48 bool all_non_zero = true;
49 for (int n = 0; n < buf_length && all_non_zero; ++n)
50 all_non_zero = buf[n] != 0;
51 return all_non_zero;
52}
53
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000054class RefFiles {
55 public:
56 RefFiles(const std::string& input_file, const std::string& output_file);
57 ~RefFiles();
58 template<class T> void ProcessReference(const T& test_results);
59 template<typename T, size_t n> void ProcessReference(
60 const T (&test_results)[n],
61 size_t length);
62 template<typename T, size_t n> void WriteToFile(
63 const T (&test_results)[n],
64 size_t length);
65 template<typename T, size_t n> void ReadFromFileAndCompare(
66 const T (&test_results)[n],
67 size_t length);
68 void WriteToFile(const NetEqNetworkStatistics& stats);
69 void ReadFromFileAndCompare(const NetEqNetworkStatistics& stats);
70 void WriteToFile(const RtcpStatistics& stats);
71 void ReadFromFileAndCompare(const RtcpStatistics& stats);
72
73 FILE* input_fp_;
74 FILE* output_fp_;
75};
76
77RefFiles::RefFiles(const std::string &input_file,
78 const std::string &output_file)
79 : input_fp_(NULL),
80 output_fp_(NULL) {
81 if (!input_file.empty()) {
82 input_fp_ = fopen(input_file.c_str(), "rb");
83 EXPECT_TRUE(input_fp_ != NULL);
84 }
85 if (!output_file.empty()) {
86 output_fp_ = fopen(output_file.c_str(), "wb");
87 EXPECT_TRUE(output_fp_ != NULL);
88 }
89}
90
91RefFiles::~RefFiles() {
92 if (input_fp_) {
93 EXPECT_EQ(EOF, fgetc(input_fp_)); // Make sure that we reached the end.
94 fclose(input_fp_);
95 }
96 if (output_fp_) fclose(output_fp_);
97}
98
99template<class T>
100void RefFiles::ProcessReference(const T& test_results) {
101 WriteToFile(test_results);
102 ReadFromFileAndCompare(test_results);
103}
104
105template<typename T, size_t n>
106void RefFiles::ProcessReference(const T (&test_results)[n], size_t length) {
107 WriteToFile(test_results, length);
108 ReadFromFileAndCompare(test_results, length);
109}
110
111template<typename T, size_t n>
112void RefFiles::WriteToFile(const T (&test_results)[n], size_t length) {
113 if (output_fp_) {
114 ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_));
115 }
116}
117
118template<typename T, size_t n>
119void RefFiles::ReadFromFileAndCompare(const T (&test_results)[n],
120 size_t length) {
121 if (input_fp_) {
122 // Read from ref file.
123 T* ref = new T[length];
124 ASSERT_EQ(length, fread(ref, sizeof(T), length, input_fp_));
125 // Compare
126 ASSERT_EQ(0, memcmp(&test_results, ref, sizeof(T) * length));
127 delete [] ref;
128 }
129}
130
131void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats) {
132 if (output_fp_) {
133 ASSERT_EQ(1u, fwrite(&stats, sizeof(NetEqNetworkStatistics), 1,
134 output_fp_));
135 }
136}
137
138void RefFiles::ReadFromFileAndCompare(
139 const NetEqNetworkStatistics& stats) {
140 if (input_fp_) {
141 // Read from ref file.
142 size_t stat_size = sizeof(NetEqNetworkStatistics);
143 NetEqNetworkStatistics ref_stats;
144 ASSERT_EQ(1u, fread(&ref_stats, stat_size, 1, input_fp_));
145 // Compare
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000146 ASSERT_EQ(0, memcmp(&stats, &ref_stats, stat_size));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000147 }
148}
149
150void RefFiles::WriteToFile(const RtcpStatistics& stats) {
151 if (output_fp_) {
152 ASSERT_EQ(1u, fwrite(&(stats.fraction_lost), sizeof(stats.fraction_lost), 1,
153 output_fp_));
154 ASSERT_EQ(1u, fwrite(&(stats.cumulative_lost),
155 sizeof(stats.cumulative_lost), 1, output_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000156 ASSERT_EQ(1u, fwrite(&(stats.extended_max_sequence_number),
157 sizeof(stats.extended_max_sequence_number), 1,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000158 output_fp_));
159 ASSERT_EQ(1u, fwrite(&(stats.jitter), sizeof(stats.jitter), 1,
160 output_fp_));
161 }
162}
163
164void RefFiles::ReadFromFileAndCompare(
165 const RtcpStatistics& stats) {
166 if (input_fp_) {
167 // Read from ref file.
168 RtcpStatistics ref_stats;
169 ASSERT_EQ(1u, fread(&(ref_stats.fraction_lost),
170 sizeof(ref_stats.fraction_lost), 1, input_fp_));
171 ASSERT_EQ(1u, fread(&(ref_stats.cumulative_lost),
172 sizeof(ref_stats.cumulative_lost), 1, input_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000173 ASSERT_EQ(1u, fread(&(ref_stats.extended_max_sequence_number),
174 sizeof(ref_stats.extended_max_sequence_number), 1,
175 input_fp_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000176 ASSERT_EQ(1u, fread(&(ref_stats.jitter), sizeof(ref_stats.jitter), 1,
177 input_fp_));
178 // Compare
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000179 ASSERT_EQ(ref_stats.fraction_lost, stats.fraction_lost);
180 ASSERT_EQ(ref_stats.cumulative_lost, stats.cumulative_lost);
181 ASSERT_EQ(ref_stats.extended_max_sequence_number,
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000182 stats.extended_max_sequence_number);
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000183 ASSERT_EQ(ref_stats.jitter, stats.jitter);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000184 }
185}
186
187class NetEqDecodingTest : public ::testing::Test {
188 protected:
189 // NetEQ must be polled for data once every 10 ms. Thus, neither of the
190 // constants below can be changed.
191 static const int kTimeStepMs = 10;
192 static const int kBlockSize8kHz = kTimeStepMs * 8;
193 static const int kBlockSize16kHz = kTimeStepMs * 16;
194 static const int kBlockSize32kHz = kTimeStepMs * 32;
195 static const int kMaxBlockSize = kBlockSize32kHz;
196 static const int kInitSampleRateHz = 8000;
197
198 NetEqDecodingTest();
199 virtual void SetUp();
200 virtual void TearDown();
201 void SelectDecoders(NetEqDecoder* used_codec);
202 void LoadDecoders();
203 void OpenInputFile(const std::string &rtp_file);
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000204 void Process(int* out_len);
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000205 void DecodeAndCompare(const std::string& rtp_file,
206 const std::string& ref_file,
207 const std::string& stat_ref_file,
208 const std::string& rtcp_ref_file);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000209 static void PopulateRtpInfo(int frame_index,
210 int timestamp,
211 WebRtcRTPHeader* rtp_info);
212 static void PopulateCng(int frame_index,
213 int timestamp,
214 WebRtcRTPHeader* rtp_info,
215 uint8_t* payload,
216 int* payload_len);
217
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000218 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
219 const std::set<uint16_t>& drop_seq_numbers,
220 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
221
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000222 void LongCngWithClockDrift(double drift_factor,
223 double network_freeze_ms,
224 bool pull_audio_during_freeze,
225 int delay_tolerance_ms,
226 int max_time_to_speech_ms);
227
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000228 void DuplicateCng();
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000229
wu@webrtc.org94454b72014-06-05 20:34:08 +0000230 uint32_t PlayoutTimestamp();
231
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000232 NetEq* neteq_;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000233 NetEq::Config config_;
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000234 scoped_ptr<test::RtpFileSource> rtp_source_;
235 scoped_ptr<test::Packet> packet_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000236 unsigned int sim_clock_;
237 int16_t out_data_[kMaxBlockSize];
238 int output_sample_rate_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000239 int algorithmic_delay_ms_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000240};
241
242// Allocating the static const so that it can be passed by reference.
243const int NetEqDecodingTest::kTimeStepMs;
244const int NetEqDecodingTest::kBlockSize8kHz;
245const int NetEqDecodingTest::kBlockSize16kHz;
246const int NetEqDecodingTest::kBlockSize32kHz;
247const int NetEqDecodingTest::kMaxBlockSize;
248const int NetEqDecodingTest::kInitSampleRateHz;
249
250NetEqDecodingTest::NetEqDecodingTest()
251 : neteq_(NULL),
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000252 config_(),
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000253 sim_clock_(0),
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000254 output_sample_rate_(kInitSampleRateHz),
255 algorithmic_delay_ms_(0) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000256 config_.sample_rate_hz = kInitSampleRateHz;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000257 memset(out_data_, 0, sizeof(out_data_));
258}
259
260void NetEqDecodingTest::SetUp() {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000261 neteq_ = NetEq::Create(config_);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000262 NetEqNetworkStatistics stat;
263 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
264 algorithmic_delay_ms_ = stat.current_buffer_size_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000265 ASSERT_TRUE(neteq_);
266 LoadDecoders();
267}
268
269void NetEqDecodingTest::TearDown() {
270 delete neteq_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000271}
272
273void NetEqDecodingTest::LoadDecoders() {
274 // Load PCMu.
275 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMu, 0));
276 // Load PCMa.
277 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMa, 8));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000278#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000279 // Load iLBC.
280 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderILBC, 102));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000281#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000282 // Load iSAC.
283 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, 103));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000284#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000285 // Load iSAC SWB.
286 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACswb, 104));
henrik.lundin@webrtc.orgac59dba2013-01-31 09:55:24 +0000287 // Load iSAC FB.
288 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACfb, 105));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000289#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000290 // Load PCM16B nb.
291 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16B, 93));
292 // Load PCM16B wb.
293 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb, 94));
294 // Load PCM16B swb32.
295 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bswb32kHz, 95));
296 // Load CNG 8 kHz.
297 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, 13));
298 // Load CNG 16 kHz.
299 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, 98));
300}
301
302void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000303 rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000304}
305
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000306void NetEqDecodingTest::Process(int* out_len) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000307 // Check if time to receive.
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000308 while (packet_ && sim_clock_ >= packet_->time_ms()) {
309 if (packet_->payload_length_bytes() > 0) {
310 WebRtcRTPHeader rtp_header;
311 packet_->ConvertHeader(&rtp_header);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000312 ASSERT_EQ(0, neteq_->InsertPacket(
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000313 rtp_header, packet_->payload(),
314 packet_->payload_length_bytes(),
315 packet_->time_ms() * (output_sample_rate_ / 1000)));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000316 }
317 // Get next packet.
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000318 packet_.reset(rtp_source_->NextPacket());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000319 }
320
henrik.lundin@webrtc.orge1d468c2013-01-30 07:37:20 +0000321 // Get audio from NetEq.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000322 NetEqOutputType type;
323 int num_channels;
324 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, out_len,
325 &num_channels, &type));
326 ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
327 (*out_len == kBlockSize16kHz) ||
328 (*out_len == kBlockSize32kHz));
329 output_sample_rate_ = *out_len / 10 * 1000;
330
331 // Increase time.
332 sim_clock_ += kTimeStepMs;
333}
334
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000335void NetEqDecodingTest::DecodeAndCompare(const std::string& rtp_file,
336 const std::string& ref_file,
337 const std::string& stat_ref_file,
338 const std::string& rtcp_ref_file) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000339 OpenInputFile(rtp_file);
340
341 std::string ref_out_file = "";
342 if (ref_file.empty()) {
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000343 ref_out_file = webrtc::test::OutputPath() + "neteq_universal_ref.pcm";
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000344 }
345 RefFiles ref_files(ref_file, ref_out_file);
346
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000347 std::string stat_out_file = "";
348 if (stat_ref_file.empty()) {
349 stat_out_file = webrtc::test::OutputPath() + "neteq_network_stats.dat";
350 }
351 RefFiles network_stat_files(stat_ref_file, stat_out_file);
352
353 std::string rtcp_out_file = "";
354 if (rtcp_ref_file.empty()) {
355 rtcp_out_file = webrtc::test::OutputPath() + "neteq_rtcp_stats.dat";
356 }
357 RefFiles rtcp_stat_files(rtcp_ref_file, rtcp_out_file);
358
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000359 packet_.reset(rtp_source_->NextPacket());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000360 int i = 0;
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000361 while (packet_) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000362 std::ostringstream ss;
363 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
364 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000365 int out_len = 0;
henrik.lundin@webrtc.org966a7082014-11-17 09:08:38 +0000366 ASSERT_NO_FATAL_FAILURE(Process(&out_len));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000367 ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000368
369 // Query the network statistics API once per second
370 if (sim_clock_ % 1000 == 0) {
371 // Process NetworkStatistics.
372 NetEqNetworkStatistics network_stats;
373 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000374 ASSERT_NO_FATAL_FAILURE(
375 network_stat_files.ProcessReference(network_stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000376
377 // Process RTCPstat.
378 RtcpStatistics rtcp_stats;
379 neteq_->GetRtcpStatistics(&rtcp_stats);
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000380 ASSERT_NO_FATAL_FAILURE(rtcp_stat_files.ProcessReference(rtcp_stats));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000381 }
382 }
383}
384
385void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
386 int timestamp,
387 WebRtcRTPHeader* rtp_info) {
388 rtp_info->header.sequenceNumber = frame_index;
389 rtp_info->header.timestamp = timestamp;
390 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
391 rtp_info->header.payloadType = 94; // PCM16b WB codec.
392 rtp_info->header.markerBit = 0;
393}
394
395void NetEqDecodingTest::PopulateCng(int frame_index,
396 int timestamp,
397 WebRtcRTPHeader* rtp_info,
398 uint8_t* payload,
399 int* payload_len) {
400 rtp_info->header.sequenceNumber = frame_index;
401 rtp_info->header.timestamp = timestamp;
402 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
403 rtp_info->header.payloadType = 98; // WB CNG.
404 rtp_info->header.markerBit = 0;
405 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
406 *payload_len = 1; // Only noise level, no spectral parameters.
407}
408
henrik.lundin@webrtc.org48438c22014-05-20 16:07:43 +0000409TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestBitExactness)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000410 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000411 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org48438c22014-05-20 16:07:43 +0000412 // Note that neteq4_universal_ref.pcm and neteq4_universal_ref_win_32.pcm
413 // are identical. The latter could have been removed, but if clients still
414 // have a copy of the file, the test will fail.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000415 const std::string input_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000416 webrtc::test::ResourcePath("audio_coding/neteq4_universal_ref", "pcm");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000417#if defined(_MSC_VER) && (_MSC_VER >= 1700)
418 // For Visual Studio 2012 and later, we will have to use the generic reference
419 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000420 const std::string network_stat_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000421 "resources/audio_coding/neteq4_network_stats.dat";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000422#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000423 const std::string network_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000424 webrtc::test::ResourcePath("audio_coding/neteq4_network_stats", "dat");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000425#endif
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000426 const std::string rtcp_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000427 webrtc::test::ResourcePath("audio_coding/neteq4_rtcp_stats", "dat");
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000428
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000429 if (FLAGS_gen_ref) {
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000430 DecodeAndCompare(input_rtp_file, "", "", "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000431 } else {
henrik.lundin@webrtc.org4e4b0982014-08-11 14:48:49 +0000432 DecodeAndCompare(input_rtp_file,
433 input_ref_file,
434 network_stat_ref_file,
435 rtcp_stat_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000436 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000437}
438
henrik.lundin@webrtc.org7cbc4f92014-10-07 06:37:39 +0000439// Use fax mode to avoid time-scaling. This is to simplify the testing of
440// packet waiting times in the packet buffer.
441class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
442 protected:
443 NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
444 config_.playout_mode = kPlayoutFax;
445 }
446};
447
448TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000449 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
450 size_t num_frames = 30;
451 const int kSamples = 10 * 16;
452 const int kPayloadBytes = kSamples * 2;
453 for (size_t i = 0; i < num_frames; ++i) {
454 uint16_t payload[kSamples] = {0};
455 WebRtcRTPHeader rtp_info;
456 rtp_info.header.sequenceNumber = i;
457 rtp_info.header.timestamp = i * kSamples;
458 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC.
459 rtp_info.header.payloadType = 94; // PCM16b WB codec.
460 rtp_info.header.markerBit = 0;
461 ASSERT_EQ(0, neteq_->InsertPacket(
462 rtp_info,
463 reinterpret_cast<uint8_t*>(payload),
464 kPayloadBytes, 0));
465 }
466 // Pull out all data.
467 for (size_t i = 0; i < num_frames; ++i) {
468 int out_len;
469 int num_channels;
470 NetEqOutputType type;
471 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
472 &num_channels, &type));
473 ASSERT_EQ(kBlockSize16kHz, out_len);
474 }
475
476 std::vector<int> waiting_times;
477 neteq_->WaitingTimes(&waiting_times);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000478 EXPECT_EQ(num_frames, waiting_times.size());
479 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
480 // spacing (per definition), we expect the delay to increase with 10 ms for
481 // each packet.
482 for (size_t i = 0; i < waiting_times.size(); ++i) {
483 EXPECT_EQ(static_cast<int>(i + 1) * 10, waiting_times[i]);
484 }
485
486 // Check statistics again and make sure it's been reset.
487 neteq_->WaitingTimes(&waiting_times);
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000488 int len = waiting_times.size();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000489 EXPECT_EQ(0, len);
490
491 // Process > 100 frames, and make sure that that we get statistics
492 // only for 100 frames. Note the new SSRC, causing NetEQ to reset.
493 num_frames = 110;
494 for (size_t i = 0; i < num_frames; ++i) {
495 uint16_t payload[kSamples] = {0};
496 WebRtcRTPHeader rtp_info;
497 rtp_info.header.sequenceNumber = i;
498 rtp_info.header.timestamp = i * kSamples;
499 rtp_info.header.ssrc = 0x1235; // Just an arbitrary SSRC.
500 rtp_info.header.payloadType = 94; // PCM16b WB codec.
501 rtp_info.header.markerBit = 0;
502 ASSERT_EQ(0, neteq_->InsertPacket(
503 rtp_info,
504 reinterpret_cast<uint8_t*>(payload),
505 kPayloadBytes, 0));
506 int out_len;
507 int num_channels;
508 NetEqOutputType type;
509 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
510 &num_channels, &type));
511 ASSERT_EQ(kBlockSize16kHz, out_len);
512 }
513
514 neteq_->WaitingTimes(&waiting_times);
515 EXPECT_EQ(100u, waiting_times.size());
516}
517
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000518TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000519 const int kNumFrames = 3000; // Needed for convergence.
520 int frame_index = 0;
521 const int kSamples = 10 * 16;
522 const int kPayloadBytes = kSamples * 2;
523 while (frame_index < kNumFrames) {
524 // Insert one packet each time, except every 10th time where we insert two
525 // packets at once. This will create a negative clock-drift of approx. 10%.
526 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
527 for (int n = 0; n < num_packets; ++n) {
528 uint8_t payload[kPayloadBytes] = {0};
529 WebRtcRTPHeader rtp_info;
530 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
531 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
532 ++frame_index;
533 }
534
535 // Pull out data once.
536 int out_len;
537 int num_channels;
538 NetEqOutputType type;
539 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
540 &num_channels, &type));
541 ASSERT_EQ(kBlockSize16kHz, out_len);
542 }
543
544 NetEqNetworkStatistics network_stats;
545 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
546 EXPECT_EQ(-103196, network_stats.clockdrift_ppm);
547}
548
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000549TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000550 const int kNumFrames = 5000; // Needed for convergence.
551 int frame_index = 0;
552 const int kSamples = 10 * 16;
553 const int kPayloadBytes = kSamples * 2;
554 for (int i = 0; i < kNumFrames; ++i) {
555 // Insert one packet each time, except every 10th time where we don't insert
556 // any packet. This will create a positive clock-drift of approx. 11%.
557 int num_packets = (i % 10 == 9 ? 0 : 1);
558 for (int n = 0; n < num_packets; ++n) {
559 uint8_t payload[kPayloadBytes] = {0};
560 WebRtcRTPHeader rtp_info;
561 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
562 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
563 ++frame_index;
564 }
565
566 // Pull out data once.
567 int out_len;
568 int num_channels;
569 NetEqOutputType type;
570 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
571 &num_channels, &type));
572 ASSERT_EQ(kBlockSize16kHz, out_len);
573 }
574
575 NetEqNetworkStatistics network_stats;
576 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
577 EXPECT_EQ(110946, network_stats.clockdrift_ppm);
578}
579
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000580void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
581 double network_freeze_ms,
582 bool pull_audio_during_freeze,
583 int delay_tolerance_ms,
584 int max_time_to_speech_ms) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000585 uint16_t seq_no = 0;
586 uint32_t timestamp = 0;
587 const int kFrameSizeMs = 30;
588 const int kSamples = kFrameSizeMs * 16;
589 const int kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000590 double next_input_time_ms = 0.0;
591 double t_ms;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000592 int out_len;
593 int num_channels;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000594 NetEqOutputType type;
595
596 // Insert speech for 5 seconds.
597 const int kSpeechDurationMs = 5000;
598 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
599 // Each turn in this for loop is 10 ms.
600 while (next_input_time_ms <= t_ms) {
601 // Insert one 30 ms speech frame.
602 uint8_t payload[kPayloadBytes] = {0};
603 WebRtcRTPHeader rtp_info;
604 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
605 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
606 ++seq_no;
607 timestamp += kSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000608 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000609 }
610 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000611 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
612 &num_channels, &type));
613 ASSERT_EQ(kBlockSize16kHz, out_len);
614 }
615
616 EXPECT_EQ(kOutputNormal, type);
wu@webrtc.org94454b72014-06-05 20:34:08 +0000617 int32_t delay_before = timestamp - PlayoutTimestamp();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000618
619 // Insert CNG for 1 minute (= 60000 ms).
620 const int kCngPeriodMs = 100;
621 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
622 const int kCngDurationMs = 60000;
623 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
624 // Each turn in this for loop is 10 ms.
625 while (next_input_time_ms <= t_ms) {
626 // Insert one CNG frame each 100 ms.
627 uint8_t payload[kPayloadBytes];
628 int payload_len;
629 WebRtcRTPHeader rtp_info;
630 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
631 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
632 ++seq_no;
633 timestamp += kCngPeriodSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000634 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000635 }
636 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000637 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
638 &num_channels, &type));
639 ASSERT_EQ(kBlockSize16kHz, out_len);
640 }
641
642 EXPECT_EQ(kOutputCNG, type);
643
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000644 if (network_freeze_ms > 0) {
645 // First keep pulling audio for |network_freeze_ms| without inserting
646 // any data, then insert CNG data corresponding to |network_freeze_ms|
647 // without pulling any output audio.
648 const double loop_end_time = t_ms + network_freeze_ms;
649 for (; t_ms < loop_end_time; t_ms += 10) {
650 // Pull out data once.
651 ASSERT_EQ(0,
652 neteq_->GetAudio(
653 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
654 ASSERT_EQ(kBlockSize16kHz, out_len);
655 EXPECT_EQ(kOutputCNG, type);
656 }
657 bool pull_once = pull_audio_during_freeze;
658 // If |pull_once| is true, GetAudio will be called once half-way through
659 // the network recovery period.
660 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
661 while (next_input_time_ms <= t_ms) {
662 if (pull_once && next_input_time_ms >= pull_time_ms) {
663 pull_once = false;
664 // Pull out data once.
665 ASSERT_EQ(
666 0,
667 neteq_->GetAudio(
668 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
669 ASSERT_EQ(kBlockSize16kHz, out_len);
670 EXPECT_EQ(kOutputCNG, type);
671 t_ms += 10;
672 }
673 // Insert one CNG frame each 100 ms.
674 uint8_t payload[kPayloadBytes];
675 int payload_len;
676 WebRtcRTPHeader rtp_info;
677 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
678 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
679 ++seq_no;
680 timestamp += kCngPeriodSamples;
681 next_input_time_ms += kCngPeriodMs * drift_factor;
682 }
683 }
684
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000685 // Insert speech again until output type is speech.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000686 double speech_restart_time_ms = t_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000687 while (type != kOutputNormal) {
688 // Each turn in this for loop is 10 ms.
689 while (next_input_time_ms <= t_ms) {
690 // Insert one 30 ms speech frame.
691 uint8_t payload[kPayloadBytes] = {0};
692 WebRtcRTPHeader rtp_info;
693 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
694 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
695 ++seq_no;
696 timestamp += kSamples;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000697 next_input_time_ms += kFrameSizeMs * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000698 }
699 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000700 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
701 &num_channels, &type));
702 ASSERT_EQ(kBlockSize16kHz, out_len);
703 // Increase clock.
704 t_ms += 10;
705 }
706
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000707 // Check that the speech starts again within reasonable time.
708 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
709 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
wu@webrtc.org94454b72014-06-05 20:34:08 +0000710 int32_t delay_after = timestamp - PlayoutTimestamp();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000711 // Compare delay before and after, and make sure it differs less than 20 ms.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000712 EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
713 EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000714}
715
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000716TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000717 // Apply a clock drift of -25 ms / s (sender faster than receiver).
718 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000719 const double kNetworkFreezeTimeMs = 0.0;
720 const bool kGetAudioDuringFreezeRecovery = false;
721 const int kDelayToleranceMs = 20;
722 const int kMaxTimeToSpeechMs = 100;
723 LongCngWithClockDrift(kDriftFactor,
724 kNetworkFreezeTimeMs,
725 kGetAudioDuringFreezeRecovery,
726 kDelayToleranceMs,
727 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000728}
729
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000730TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000731 // Apply a clock drift of +25 ms / s (sender slower than receiver).
732 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000733 const double kNetworkFreezeTimeMs = 0.0;
734 const bool kGetAudioDuringFreezeRecovery = false;
735 const int kDelayToleranceMs = 20;
736 const int kMaxTimeToSpeechMs = 100;
737 LongCngWithClockDrift(kDriftFactor,
738 kNetworkFreezeTimeMs,
739 kGetAudioDuringFreezeRecovery,
740 kDelayToleranceMs,
741 kMaxTimeToSpeechMs);
742}
743
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000744TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000745 // Apply a clock drift of -25 ms / s (sender faster than receiver).
746 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
747 const double kNetworkFreezeTimeMs = 5000.0;
748 const bool kGetAudioDuringFreezeRecovery = false;
749 const int kDelayToleranceMs = 50;
750 const int kMaxTimeToSpeechMs = 200;
751 LongCngWithClockDrift(kDriftFactor,
752 kNetworkFreezeTimeMs,
753 kGetAudioDuringFreezeRecovery,
754 kDelayToleranceMs,
755 kMaxTimeToSpeechMs);
756}
757
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000758TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000759 // Apply a clock drift of +25 ms / s (sender slower than receiver).
760 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
761 const double kNetworkFreezeTimeMs = 5000.0;
762 const bool kGetAudioDuringFreezeRecovery = false;
763 const int kDelayToleranceMs = 20;
764 const int kMaxTimeToSpeechMs = 100;
765 LongCngWithClockDrift(kDriftFactor,
766 kNetworkFreezeTimeMs,
767 kGetAudioDuringFreezeRecovery,
768 kDelayToleranceMs,
769 kMaxTimeToSpeechMs);
770}
771
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000772TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000773 // Apply a clock drift of +25 ms / s (sender slower than receiver).
774 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
775 const double kNetworkFreezeTimeMs = 5000.0;
776 const bool kGetAudioDuringFreezeRecovery = true;
777 const int kDelayToleranceMs = 20;
778 const int kMaxTimeToSpeechMs = 100;
779 LongCngWithClockDrift(kDriftFactor,
780 kNetworkFreezeTimeMs,
781 kGetAudioDuringFreezeRecovery,
782 kDelayToleranceMs,
783 kMaxTimeToSpeechMs);
784}
785
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000786TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000787 const double kDriftFactor = 1.0; // No drift.
788 const double kNetworkFreezeTimeMs = 0.0;
789 const bool kGetAudioDuringFreezeRecovery = false;
790 const int kDelayToleranceMs = 10;
791 const int kMaxTimeToSpeechMs = 50;
792 LongCngWithClockDrift(kDriftFactor,
793 kNetworkFreezeTimeMs,
794 kGetAudioDuringFreezeRecovery,
795 kDelayToleranceMs,
796 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000797}
798
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000799TEST_F(NetEqDecodingTest, UnknownPayloadType) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000800 const int kPayloadBytes = 100;
801 uint8_t payload[kPayloadBytes] = {0};
802 WebRtcRTPHeader rtp_info;
803 PopulateRtpInfo(0, 0, &rtp_info);
804 rtp_info.header.payloadType = 1; // Not registered as a decoder.
805 EXPECT_EQ(NetEq::kFail,
806 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
807 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
808}
809
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000810TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(DecoderError)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000811 const int kPayloadBytes = 100;
812 uint8_t payload[kPayloadBytes] = {0};
813 WebRtcRTPHeader rtp_info;
814 PopulateRtpInfo(0, 0, &rtp_info);
815 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
816 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
817 NetEqOutputType type;
818 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
819 // to GetAudio.
820 for (int i = 0; i < kMaxBlockSize; ++i) {
821 out_data_[i] = 1;
822 }
823 int num_channels;
824 int samples_per_channel;
825 EXPECT_EQ(NetEq::kFail,
826 neteq_->GetAudio(kMaxBlockSize, out_data_,
827 &samples_per_channel, &num_channels, &type));
828 // Verify that there is a decoder error to check.
829 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
830 // Code 6730 is an iSAC error code.
831 EXPECT_EQ(6730, neteq_->LastDecoderError());
832 // Verify that the first 160 samples are set to 0, and that the remaining
833 // samples are left unmodified.
834 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
835 for (int i = 0; i < kExpectedOutputLength; ++i) {
836 std::ostringstream ss;
837 ss << "i = " << i;
838 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
839 EXPECT_EQ(0, out_data_[i]);
840 }
841 for (int i = kExpectedOutputLength; i < kMaxBlockSize; ++i) {
842 std::ostringstream ss;
843 ss << "i = " << i;
844 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
845 EXPECT_EQ(1, out_data_[i]);
846 }
847}
848
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +0000849TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000850 NetEqOutputType type;
851 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
852 // to GetAudio.
853 for (int i = 0; i < kMaxBlockSize; ++i) {
854 out_data_[i] = 1;
855 }
856 int num_channels;
857 int samples_per_channel;
858 EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
859 &samples_per_channel,
860 &num_channels, &type));
861 // Verify that the first block of samples is set to 0.
862 static const int kExpectedOutputLength =
863 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
864 for (int i = 0; i < kExpectedOutputLength; ++i) {
865 std::ostringstream ss;
866 ss << "i = " << i;
867 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
868 EXPECT_EQ(0, out_data_[i]);
869 }
870}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000871
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000872class NetEqBgnTest : public NetEqDecodingTest {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000873 protected:
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000874 virtual void TestCondition(double sum_squared_noise,
875 bool should_be_faded) = 0;
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000876
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000877 void CheckBgn(int sampling_rate_hz) {
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000878 int expected_samples_per_channel = 0;
879 uint8_t payload_type = 0xFF; // Invalid.
880 if (sampling_rate_hz == 8000) {
881 expected_samples_per_channel = kBlockSize8kHz;
882 payload_type = 93; // PCM 16, 8 kHz.
883 } else if (sampling_rate_hz == 16000) {
884 expected_samples_per_channel = kBlockSize16kHz;
885 payload_type = 94; // PCM 16, 16 kHZ.
886 } else if (sampling_rate_hz == 32000) {
887 expected_samples_per_channel = kBlockSize32kHz;
888 payload_type = 95; // PCM 16, 32 kHz.
889 } else {
890 ASSERT_TRUE(false); // Unsupported test case.
891 }
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000892
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000893 NetEqOutputType type;
894 int16_t output[kBlockSize32kHz]; // Maximum size is chosen.
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000895 test::AudioLoop input;
896 // We are using the same 32 kHz input file for all tests, regardless of
897 // |sampling_rate_hz|. The output may sound weird, but the test is still
898 // valid.
899 ASSERT_TRUE(input.Init(
900 webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
901 10 * sampling_rate_hz, // Max 10 seconds loop length.
902 expected_samples_per_channel));
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000903
904 // Payload of 10 ms of PCM16 32 kHz.
905 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000906 WebRtcRTPHeader rtp_info;
907 PopulateRtpInfo(0, 0, &rtp_info);
908 rtp_info.header.payloadType = payload_type;
909
910 int number_channels = 0;
911 int samples_per_channel = 0;
912
913 uint32_t receive_timestamp = 0;
914 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000915 int enc_len_bytes =
916 WebRtcPcm16b_EncodeW16(input.GetNextBlock(),
917 expected_samples_per_channel,
918 reinterpret_cast<int16_t*>(payload));
919 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
920
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000921 number_channels = 0;
922 samples_per_channel = 0;
923 ASSERT_EQ(0,
924 neteq_->InsertPacket(
925 rtp_info, payload, enc_len_bytes, receive_timestamp));
926 ASSERT_EQ(0,
927 neteq_->GetAudio(kBlockSize32kHz,
928 output,
929 &samples_per_channel,
930 &number_channels,
931 &type));
932 ASSERT_EQ(1, number_channels);
933 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
934 ASSERT_EQ(kOutputNormal, type);
935
936 // Next packet.
937 rtp_info.header.timestamp += expected_samples_per_channel;
938 rtp_info.header.sequenceNumber++;
939 receive_timestamp += expected_samples_per_channel;
940 }
941
942 number_channels = 0;
943 samples_per_channel = 0;
944
945 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
946 // one frame without checking speech-type. This is the first frame pulled
947 // without inserting any packet, and might not be labeled as PLC.
948 ASSERT_EQ(0,
949 neteq_->GetAudio(kBlockSize32kHz,
950 output,
951 &samples_per_channel,
952 &number_channels,
953 &type));
954 ASSERT_EQ(1, number_channels);
955 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
956
957 // To be able to test the fading of background noise we need at lease to
958 // pull 611 frames.
959 const int kFadingThreshold = 611;
960
961 // Test several CNG-to-PLC packet for the expected behavior. The number 20
962 // is arbitrary, but sufficiently large to test enough number of frames.
963 const int kNumPlcToCngTestFrames = 20;
964 bool plc_to_cng = false;
965 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
966 number_channels = 0;
967 samples_per_channel = 0;
968 memset(output, 1, sizeof(output)); // Set to non-zero.
969 ASSERT_EQ(0,
970 neteq_->GetAudio(kBlockSize32kHz,
971 output,
972 &samples_per_channel,
973 &number_channels,
974 &type));
975 ASSERT_EQ(1, number_channels);
976 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
977 if (type == kOutputPLCtoCNG) {
978 plc_to_cng = true;
979 double sum_squared = 0;
980 for (int k = 0; k < number_channels * samples_per_channel; ++k)
981 sum_squared += output[k] * output[k];
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000982 TestCondition(sum_squared, n > kFadingThreshold);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000983 } else {
984 EXPECT_EQ(kOutputPLC, type);
985 }
986 }
987 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
988 }
989};
990
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +0000991class NetEqBgnTestOn : public NetEqBgnTest {
992 protected:
993 NetEqBgnTestOn() : NetEqBgnTest() {
994 config_.background_noise_mode = NetEq::kBgnOn;
995 }
996
997 void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
998 EXPECT_NE(0, sum_squared_noise);
999 }
1000};
1001
1002class NetEqBgnTestOff : public NetEqBgnTest {
1003 protected:
1004 NetEqBgnTestOff() : NetEqBgnTest() {
1005 config_.background_noise_mode = NetEq::kBgnOff;
1006 }
1007
1008 void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
1009 EXPECT_EQ(0, sum_squared_noise);
1010 }
1011};
1012
1013class NetEqBgnTestFade : public NetEqBgnTest {
1014 protected:
1015 NetEqBgnTestFade() : NetEqBgnTest() {
1016 config_.background_noise_mode = NetEq::kBgnFade;
1017 }
1018
1019 void TestCondition(double sum_squared_noise, bool should_be_faded) {
1020 if (should_be_faded)
1021 EXPECT_EQ(0, sum_squared_noise);
1022 }
1023};
1024
1025TEST_F(NetEqBgnTestOn, RunTest) {
1026 CheckBgn(8000);
1027 CheckBgn(16000);
1028 CheckBgn(32000);
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001029}
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001030
henrik.lundin@webrtc.org9b8102c2014-08-21 08:27:44 +00001031TEST_F(NetEqBgnTestOff, RunTest) {
1032 CheckBgn(8000);
1033 CheckBgn(16000);
1034 CheckBgn(32000);
1035}
1036
1037TEST_F(NetEqBgnTestFade, RunTest) {
1038 CheckBgn(8000);
1039 CheckBgn(16000);
1040 CheckBgn(32000);
1041}
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +00001042
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +00001043TEST_F(NetEqDecodingTest, SyncPacketInsert) {
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001044 WebRtcRTPHeader rtp_info;
1045 uint32_t receive_timestamp = 0;
1046 // For the readability use the following payloads instead of the defaults of
1047 // this test.
1048 uint8_t kPcm16WbPayloadType = 1;
1049 uint8_t kCngNbPayloadType = 2;
1050 uint8_t kCngWbPayloadType = 3;
1051 uint8_t kCngSwb32PayloadType = 4;
1052 uint8_t kCngSwb48PayloadType = 5;
1053 uint8_t kAvtPayloadType = 6;
1054 uint8_t kRedPayloadType = 7;
1055 uint8_t kIsacPayloadType = 9; // Payload type 8 is already registered.
1056
1057 // Register decoders.
1058 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb,
1059 kPcm16WbPayloadType));
1060 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, kCngNbPayloadType));
1061 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, kCngWbPayloadType));
1062 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb32kHz,
1063 kCngSwb32PayloadType));
1064 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb48kHz,
1065 kCngSwb48PayloadType));
1066 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderAVT, kAvtPayloadType));
1067 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderRED, kRedPayloadType));
1068 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, kIsacPayloadType));
1069
1070 PopulateRtpInfo(0, 0, &rtp_info);
1071 rtp_info.header.payloadType = kPcm16WbPayloadType;
1072
1073 // The first packet injected cannot be sync-packet.
1074 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1075
1076 // Payload length of 10 ms PCM16 16 kHz.
1077 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1078 uint8_t payload[kPayloadBytes] = {0};
1079 ASSERT_EQ(0, neteq_->InsertPacket(
1080 rtp_info, payload, kPayloadBytes, receive_timestamp));
1081
1082 // Next packet. Last packet contained 10 ms audio.
1083 rtp_info.header.sequenceNumber++;
1084 rtp_info.header.timestamp += kBlockSize16kHz;
1085 receive_timestamp += kBlockSize16kHz;
1086
1087 // Unacceptable payload types CNG, AVT (DTMF), RED.
1088 rtp_info.header.payloadType = kCngNbPayloadType;
1089 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1090
1091 rtp_info.header.payloadType = kCngWbPayloadType;
1092 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1093
1094 rtp_info.header.payloadType = kCngSwb32PayloadType;
1095 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1096
1097 rtp_info.header.payloadType = kCngSwb48PayloadType;
1098 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1099
1100 rtp_info.header.payloadType = kAvtPayloadType;
1101 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1102
1103 rtp_info.header.payloadType = kRedPayloadType;
1104 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1105
1106 // Change of codec cannot be initiated with a sync packet.
1107 rtp_info.header.payloadType = kIsacPayloadType;
1108 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1109
1110 // Change of SSRC is not allowed with a sync packet.
1111 rtp_info.header.payloadType = kPcm16WbPayloadType;
1112 ++rtp_info.header.ssrc;
1113 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1114
1115 --rtp_info.header.ssrc;
1116 EXPECT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1117}
1118
1119// First insert several noise like packets, then sync-packets. Decoding all
1120// packets should not produce error, statistics should not show any packet loss
1121// and sync-packets should decode to zero.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001122// TODO(turajs) we will have a better test if we have a referece NetEq, and
1123// when Sync packets are inserted in "test" NetEq we insert all-zero payload
1124// in reference NetEq and compare the output of those two.
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +00001125TEST_F(NetEqDecodingTest, SyncPacketDecode) {
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001126 WebRtcRTPHeader rtp_info;
1127 PopulateRtpInfo(0, 0, &rtp_info);
1128 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1129 uint8_t payload[kPayloadBytes];
1130 int16_t decoded[kBlockSize16kHz];
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001131 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001132 for (int n = 0; n < kPayloadBytes; ++n) {
1133 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1134 }
1135 // Insert some packets which decode to noise. We are not interested in
1136 // actual decoded values.
1137 NetEqOutputType output_type;
1138 int num_channels;
1139 int samples_per_channel;
1140 uint32_t receive_timestamp = 0;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001141 for (int n = 0; n < 100; ++n) {
1142 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1143 receive_timestamp));
1144 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1145 &samples_per_channel, &num_channels,
1146 &output_type));
1147 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1148 ASSERT_EQ(1, num_channels);
1149
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001150 rtp_info.header.sequenceNumber++;
1151 rtp_info.header.timestamp += kBlockSize16kHz;
1152 receive_timestamp += kBlockSize16kHz;
1153 }
1154 const int kNumSyncPackets = 10;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001155
1156 // Make sure sufficient number of sync packets are inserted that we can
1157 // conduct a test.
1158 ASSERT_GT(kNumSyncPackets, algorithmic_frame_delay);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001159 // Insert sync-packets, the decoded sequence should be all-zero.
1160 for (int n = 0; n < kNumSyncPackets; ++n) {
1161 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1162 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1163 &samples_per_channel, &num_channels,
1164 &output_type));
1165 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1166 ASSERT_EQ(1, num_channels);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001167 if (n > algorithmic_frame_delay) {
1168 EXPECT_TRUE(IsAllZero(decoded, samples_per_channel * num_channels));
1169 }
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001170 rtp_info.header.sequenceNumber++;
1171 rtp_info.header.timestamp += kBlockSize16kHz;
1172 receive_timestamp += kBlockSize16kHz;
1173 }
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001174
1175 // We insert regular packets, if sync packet are not correctly buffered then
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001176 // network statistics would show some packet loss.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001177 for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) {
1178 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1179 receive_timestamp));
1180 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1181 &samples_per_channel, &num_channels,
1182 &output_type));
1183 if (n >= algorithmic_frame_delay + 1) {
1184 // Expect that this frame contain samples from regular RTP.
1185 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1186 }
1187 rtp_info.header.sequenceNumber++;
1188 rtp_info.header.timestamp += kBlockSize16kHz;
1189 receive_timestamp += kBlockSize16kHz;
1190 }
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001191 NetEqNetworkStatistics network_stats;
1192 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1193 // Expecting a "clean" network.
1194 EXPECT_EQ(0, network_stats.packet_loss_rate);
1195 EXPECT_EQ(0, network_stats.expand_rate);
1196 EXPECT_EQ(0, network_stats.accelerate_rate);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001197 EXPECT_LE(network_stats.preemptive_rate, 150);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001198}
1199
1200// Test if the size of the packet buffer reported correctly when containing
1201// sync packets. Also, test if network packets override sync packets. That is to
1202// prefer decoding a network packet to a sync packet, if both have same sequence
1203// number and timestamp.
henrik.lundin@webrtc.orgb4e80e02014-05-15 07:14:00 +00001204TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001205 WebRtcRTPHeader rtp_info;
1206 PopulateRtpInfo(0, 0, &rtp_info);
1207 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1208 uint8_t payload[kPayloadBytes];
1209 int16_t decoded[kBlockSize16kHz];
1210 for (int n = 0; n < kPayloadBytes; ++n) {
1211 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1212 }
1213 // Insert some packets which decode to noise. We are not interested in
1214 // actual decoded values.
1215 NetEqOutputType output_type;
1216 int num_channels;
1217 int samples_per_channel;
1218 uint32_t receive_timestamp = 0;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001219 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
1220 for (int n = 0; n < algorithmic_frame_delay; ++n) {
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001221 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1222 receive_timestamp));
1223 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1224 &samples_per_channel, &num_channels,
1225 &output_type));
1226 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1227 ASSERT_EQ(1, num_channels);
1228 rtp_info.header.sequenceNumber++;
1229 rtp_info.header.timestamp += kBlockSize16kHz;
1230 receive_timestamp += kBlockSize16kHz;
1231 }
1232 const int kNumSyncPackets = 10;
1233
1234 WebRtcRTPHeader first_sync_packet_rtp_info;
1235 memcpy(&first_sync_packet_rtp_info, &rtp_info, sizeof(rtp_info));
1236
1237 // Insert sync-packets, but no decoding.
1238 for (int n = 0; n < kNumSyncPackets; ++n) {
1239 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1240 rtp_info.header.sequenceNumber++;
1241 rtp_info.header.timestamp += kBlockSize16kHz;
1242 receive_timestamp += kBlockSize16kHz;
1243 }
1244 NetEqNetworkStatistics network_stats;
1245 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001246 EXPECT_EQ(kNumSyncPackets * 10 + algorithmic_delay_ms_,
1247 network_stats.current_buffer_size_ms);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001248
1249 // Rewind |rtp_info| to that of the first sync packet.
1250 memcpy(&rtp_info, &first_sync_packet_rtp_info, sizeof(rtp_info));
1251
1252 // Insert.
1253 for (int n = 0; n < kNumSyncPackets; ++n) {
1254 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1255 receive_timestamp));
1256 rtp_info.header.sequenceNumber++;
1257 rtp_info.header.timestamp += kBlockSize16kHz;
1258 receive_timestamp += kBlockSize16kHz;
1259 }
1260
1261 // Decode.
1262 for (int n = 0; n < kNumSyncPackets; ++n) {
1263 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1264 &samples_per_channel, &num_channels,
1265 &output_type));
1266 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1267 ASSERT_EQ(1, num_channels);
1268 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1269 }
1270}
1271
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001272void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1273 uint32_t start_timestamp,
1274 const std::set<uint16_t>& drop_seq_numbers,
1275 bool expect_seq_no_wrap,
1276 bool expect_timestamp_wrap) {
1277 uint16_t seq_no = start_seq_no;
1278 uint32_t timestamp = start_timestamp;
1279 const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
1280 const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
1281 const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
1282 const int kPayloadBytes = kSamples * sizeof(int16_t);
1283 double next_input_time_ms = 0.0;
1284 int16_t decoded[kBlockSize16kHz];
1285 int num_channels;
1286 int samples_per_channel;
1287 NetEqOutputType output_type;
1288 uint32_t receive_timestamp = 0;
1289
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001290 // Insert speech for 2 seconds.
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001291 const int kSpeechDurationMs = 2000;
1292 int packets_inserted = 0;
1293 uint16_t last_seq_no;
1294 uint32_t last_timestamp;
1295 bool timestamp_wrapped = false;
1296 bool seq_no_wrapped = false;
1297 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1298 // Each turn in this for loop is 10 ms.
1299 while (next_input_time_ms <= t_ms) {
1300 // Insert one 30 ms speech frame.
1301 uint8_t payload[kPayloadBytes] = {0};
1302 WebRtcRTPHeader rtp_info;
1303 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1304 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1305 // This sequence number was not in the set to drop. Insert it.
1306 ASSERT_EQ(0,
1307 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1308 receive_timestamp));
1309 ++packets_inserted;
1310 }
1311 NetEqNetworkStatistics network_stats;
1312 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1313
1314 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1315 // packet size for first few packets. Therefore we refrain from checking
1316 // the criteria.
1317 if (packets_inserted > 4) {
1318 // Expect preferred and actual buffer size to be no more than 2 frames.
1319 EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001320 EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
1321 algorithmic_delay_ms_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001322 }
1323 last_seq_no = seq_no;
1324 last_timestamp = timestamp;
1325
1326 ++seq_no;
1327 timestamp += kSamples;
1328 receive_timestamp += kSamples;
1329 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1330
1331 seq_no_wrapped |= seq_no < last_seq_no;
1332 timestamp_wrapped |= timestamp < last_timestamp;
1333 }
1334 // Pull out data once.
1335 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1336 &samples_per_channel, &num_channels,
1337 &output_type));
1338 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1339 ASSERT_EQ(1, num_channels);
1340
1341 // Expect delay (in samples) to be less than 2 packets.
wu@webrtc.org94454b72014-06-05 20:34:08 +00001342 EXPECT_LE(timestamp - PlayoutTimestamp(),
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001343 static_cast<uint32_t>(kSamples * 2));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001344 }
1345 // Make sure we have actually tested wrap-around.
1346 ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
1347 ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
1348}
1349
1350TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
1351 // Start with a sequence number that will soon wrap.
1352 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
1353 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1354}
1355
1356TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
1357 // Start with a sequence number that will soon wrap.
1358 std::set<uint16_t> drop_seq_numbers;
1359 drop_seq_numbers.insert(0xFFFF);
1360 drop_seq_numbers.insert(0x0);
1361 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1362}
1363
1364TEST_F(NetEqDecodingTest, TimestampWrap) {
1365 // Start with a timestamp that will soon wrap.
1366 std::set<uint16_t> drop_seq_numbers;
1367 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
1368}
1369
1370TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
1371 // Start with a timestamp and a sequence number that will wrap at the same
1372 // time.
1373 std::set<uint16_t> drop_seq_numbers;
1374 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
1375}
1376
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001377void NetEqDecodingTest::DuplicateCng() {
1378 uint16_t seq_no = 0;
1379 uint32_t timestamp = 0;
1380 const int kFrameSizeMs = 10;
1381 const int kSampleRateKhz = 16;
1382 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1383 const int kPayloadBytes = kSamples * 2;
1384
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001385 const int algorithmic_delay_samples = std::max(
1386 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001387 // Insert three speech packet. Three are needed to get the frame length
1388 // correct.
1389 int out_len;
1390 int num_channels;
1391 NetEqOutputType type;
1392 uint8_t payload[kPayloadBytes] = {0};
1393 WebRtcRTPHeader rtp_info;
1394 for (int i = 0; i < 3; ++i) {
1395 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1396 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1397 ++seq_no;
1398 timestamp += kSamples;
1399
1400 // Pull audio once.
1401 ASSERT_EQ(0,
1402 neteq_->GetAudio(
1403 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1404 ASSERT_EQ(kBlockSize16kHz, out_len);
1405 }
1406 // Verify speech output.
1407 EXPECT_EQ(kOutputNormal, type);
1408
1409 // Insert same CNG packet twice.
1410 const int kCngPeriodMs = 100;
1411 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1412 int payload_len;
1413 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1414 // This is the first time this CNG packet is inserted.
1415 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1416
1417 // Pull audio once and make sure CNG is played.
1418 ASSERT_EQ(0,
1419 neteq_->GetAudio(
1420 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1421 ASSERT_EQ(kBlockSize16kHz, out_len);
1422 EXPECT_EQ(kOutputCNG, type);
wu@webrtc.org94454b72014-06-05 20:34:08 +00001423 EXPECT_EQ(timestamp - algorithmic_delay_samples, PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001424
1425 // Insert the same CNG packet again. Note that at this point it is old, since
1426 // we have already decoded the first copy of it.
1427 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1428
1429 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1430 // we have already pulled out CNG once.
1431 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
1432 ASSERT_EQ(0,
1433 neteq_->GetAudio(
1434 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1435 ASSERT_EQ(kBlockSize16kHz, out_len);
1436 EXPECT_EQ(kOutputCNG, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001437 EXPECT_EQ(timestamp - algorithmic_delay_samples,
wu@webrtc.org94454b72014-06-05 20:34:08 +00001438 PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001439 }
1440
1441 // Insert speech again.
1442 ++seq_no;
1443 timestamp += kCngPeriodSamples;
1444 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1445 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1446
1447 // Pull audio once and verify that the output is speech again.
1448 ASSERT_EQ(0,
1449 neteq_->GetAudio(
1450 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1451 ASSERT_EQ(kBlockSize16kHz, out_len);
1452 EXPECT_EQ(kOutputNormal, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001453 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
wu@webrtc.org94454b72014-06-05 20:34:08 +00001454 PlayoutTimestamp());
1455}
1456
1457uint32_t NetEqDecodingTest::PlayoutTimestamp() {
1458 uint32_t playout_timestamp = 0;
1459 EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&playout_timestamp));
1460 return playout_timestamp;
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001461}
1462
1463TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001464} // namespace webrtc