blob: 1e8c1288a6268e3cce5e1a7497cebb8d637d183c [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11/*
12 * This file includes unit tests for NetEQ.
13 */
14
15#include "webrtc/modules/audio_coding/neteq4/interface/neteq.h"
16
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000017#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000018#include <stdlib.h>
19#include <string.h> // memset
20
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000021#include <algorithm>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000022#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000023#include <string>
24#include <vector>
25
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000026#include "gflags/gflags.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000027#include "gtest/gtest.h"
28#include "webrtc/modules/audio_coding/neteq4/test/NETEQTEST_RTPpacket.h"
turaj@webrtc.orgff43c852013-09-25 00:07:27 +000029#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000030#include "webrtc/test/testsupport/fileutils.h"
henrike@webrtc.orga950300b2013-07-08 18:53:54 +000031#include "webrtc/test/testsupport/gtest_disable.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000032#include "webrtc/typedefs.h"
33
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000034DEFINE_bool(gen_ref, false, "Generate reference files.");
35
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000036namespace webrtc {
37
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +000038static bool IsAllZero(const int16_t* buf, int buf_length) {
39 bool all_zero = true;
40 for (int n = 0; n < buf_length && all_zero; ++n)
41 all_zero = buf[n] == 0;
42 return all_zero;
43}
44
45static bool IsAllNonZero(const int16_t* buf, int buf_length) {
46 bool all_non_zero = true;
47 for (int n = 0; n < buf_length && all_non_zero; ++n)
48 all_non_zero = buf[n] != 0;
49 return all_non_zero;
50}
51
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000052class RefFiles {
53 public:
54 RefFiles(const std::string& input_file, const std::string& output_file);
55 ~RefFiles();
56 template<class T> void ProcessReference(const T& test_results);
57 template<typename T, size_t n> void ProcessReference(
58 const T (&test_results)[n],
59 size_t length);
60 template<typename T, size_t n> void WriteToFile(
61 const T (&test_results)[n],
62 size_t length);
63 template<typename T, size_t n> void ReadFromFileAndCompare(
64 const T (&test_results)[n],
65 size_t length);
66 void WriteToFile(const NetEqNetworkStatistics& stats);
67 void ReadFromFileAndCompare(const NetEqNetworkStatistics& stats);
68 void WriteToFile(const RtcpStatistics& stats);
69 void ReadFromFileAndCompare(const RtcpStatistics& stats);
70
71 FILE* input_fp_;
72 FILE* output_fp_;
73};
74
75RefFiles::RefFiles(const std::string &input_file,
76 const std::string &output_file)
77 : input_fp_(NULL),
78 output_fp_(NULL) {
79 if (!input_file.empty()) {
80 input_fp_ = fopen(input_file.c_str(), "rb");
81 EXPECT_TRUE(input_fp_ != NULL);
82 }
83 if (!output_file.empty()) {
84 output_fp_ = fopen(output_file.c_str(), "wb");
85 EXPECT_TRUE(output_fp_ != NULL);
86 }
87}
88
89RefFiles::~RefFiles() {
90 if (input_fp_) {
91 EXPECT_EQ(EOF, fgetc(input_fp_)); // Make sure that we reached the end.
92 fclose(input_fp_);
93 }
94 if (output_fp_) fclose(output_fp_);
95}
96
97template<class T>
98void RefFiles::ProcessReference(const T& test_results) {
99 WriteToFile(test_results);
100 ReadFromFileAndCompare(test_results);
101}
102
103template<typename T, size_t n>
104void RefFiles::ProcessReference(const T (&test_results)[n], size_t length) {
105 WriteToFile(test_results, length);
106 ReadFromFileAndCompare(test_results, length);
107}
108
109template<typename T, size_t n>
110void RefFiles::WriteToFile(const T (&test_results)[n], size_t length) {
111 if (output_fp_) {
112 ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_));
113 }
114}
115
116template<typename T, size_t n>
117void RefFiles::ReadFromFileAndCompare(const T (&test_results)[n],
118 size_t length) {
119 if (input_fp_) {
120 // Read from ref file.
121 T* ref = new T[length];
122 ASSERT_EQ(length, fread(ref, sizeof(T), length, input_fp_));
123 // Compare
124 ASSERT_EQ(0, memcmp(&test_results, ref, sizeof(T) * length));
125 delete [] ref;
126 }
127}
128
129void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats) {
130 if (output_fp_) {
131 ASSERT_EQ(1u, fwrite(&stats, sizeof(NetEqNetworkStatistics), 1,
132 output_fp_));
133 }
134}
135
136void RefFiles::ReadFromFileAndCompare(
137 const NetEqNetworkStatistics& stats) {
138 if (input_fp_) {
139 // Read from ref file.
140 size_t stat_size = sizeof(NetEqNetworkStatistics);
141 NetEqNetworkStatistics ref_stats;
142 ASSERT_EQ(1u, fread(&ref_stats, stat_size, 1, input_fp_));
143 // Compare
144 EXPECT_EQ(0, memcmp(&stats, &ref_stats, stat_size));
145 }
146}
147
148void RefFiles::WriteToFile(const RtcpStatistics& stats) {
149 if (output_fp_) {
150 ASSERT_EQ(1u, fwrite(&(stats.fraction_lost), sizeof(stats.fraction_lost), 1,
151 output_fp_));
152 ASSERT_EQ(1u, fwrite(&(stats.cumulative_lost),
153 sizeof(stats.cumulative_lost), 1, output_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000154 ASSERT_EQ(1u, fwrite(&(stats.extended_max_sequence_number),
155 sizeof(stats.extended_max_sequence_number), 1,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000156 output_fp_));
157 ASSERT_EQ(1u, fwrite(&(stats.jitter), sizeof(stats.jitter), 1,
158 output_fp_));
159 }
160}
161
162void RefFiles::ReadFromFileAndCompare(
163 const RtcpStatistics& stats) {
164 if (input_fp_) {
165 // Read from ref file.
166 RtcpStatistics ref_stats;
167 ASSERT_EQ(1u, fread(&(ref_stats.fraction_lost),
168 sizeof(ref_stats.fraction_lost), 1, input_fp_));
169 ASSERT_EQ(1u, fread(&(ref_stats.cumulative_lost),
170 sizeof(ref_stats.cumulative_lost), 1, input_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000171 ASSERT_EQ(1u, fread(&(ref_stats.extended_max_sequence_number),
172 sizeof(ref_stats.extended_max_sequence_number), 1,
173 input_fp_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000174 ASSERT_EQ(1u, fread(&(ref_stats.jitter), sizeof(ref_stats.jitter), 1,
175 input_fp_));
176 // Compare
177 EXPECT_EQ(ref_stats.fraction_lost, stats.fraction_lost);
178 EXPECT_EQ(ref_stats.cumulative_lost, stats.cumulative_lost);
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000179 EXPECT_EQ(ref_stats.extended_max_sequence_number,
180 stats.extended_max_sequence_number);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000181 EXPECT_EQ(ref_stats.jitter, stats.jitter);
182 }
183}
184
185class NetEqDecodingTest : public ::testing::Test {
186 protected:
187 // NetEQ must be polled for data once every 10 ms. Thus, neither of the
188 // constants below can be changed.
189 static const int kTimeStepMs = 10;
190 static const int kBlockSize8kHz = kTimeStepMs * 8;
191 static const int kBlockSize16kHz = kTimeStepMs * 16;
192 static const int kBlockSize32kHz = kTimeStepMs * 32;
193 static const int kMaxBlockSize = kBlockSize32kHz;
194 static const int kInitSampleRateHz = 8000;
195
196 NetEqDecodingTest();
197 virtual void SetUp();
198 virtual void TearDown();
199 void SelectDecoders(NetEqDecoder* used_codec);
200 void LoadDecoders();
201 void OpenInputFile(const std::string &rtp_file);
202 void Process(NETEQTEST_RTPpacket* rtp_ptr, int* out_len);
203 void DecodeAndCompare(const std::string &rtp_file,
204 const std::string &ref_file);
205 void DecodeAndCheckStats(const std::string &rtp_file,
206 const std::string &stat_ref_file,
207 const std::string &rtcp_ref_file);
208 static void PopulateRtpInfo(int frame_index,
209 int timestamp,
210 WebRtcRTPHeader* rtp_info);
211 static void PopulateCng(int frame_index,
212 int timestamp,
213 WebRtcRTPHeader* rtp_info,
214 uint8_t* payload,
215 int* payload_len);
216
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000217 void CheckBgnOff(int sampling_rate, NetEqBackgroundNoiseMode bgn_mode);
218
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000219 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
220 const std::set<uint16_t>& drop_seq_numbers,
221 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
222
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000223 void LongCngWithClockDrift(double drift_factor,
224 double network_freeze_ms,
225 bool pull_audio_during_freeze,
226 int delay_tolerance_ms,
227 int max_time_to_speech_ms);
228
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000229 void DuplicateCng();
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000230
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000231 NetEq* neteq_;
232 FILE* rtp_fp_;
233 unsigned int sim_clock_;
234 int16_t out_data_[kMaxBlockSize];
235 int output_sample_rate_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000236 int algorithmic_delay_ms_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000237};
238
239// Allocating the static const so that it can be passed by reference.
240const int NetEqDecodingTest::kTimeStepMs;
241const int NetEqDecodingTest::kBlockSize8kHz;
242const int NetEqDecodingTest::kBlockSize16kHz;
243const int NetEqDecodingTest::kBlockSize32kHz;
244const int NetEqDecodingTest::kMaxBlockSize;
245const int NetEqDecodingTest::kInitSampleRateHz;
246
247NetEqDecodingTest::NetEqDecodingTest()
248 : neteq_(NULL),
249 rtp_fp_(NULL),
250 sim_clock_(0),
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000251 output_sample_rate_(kInitSampleRateHz),
252 algorithmic_delay_ms_(0) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000253 memset(out_data_, 0, sizeof(out_data_));
254}
255
256void NetEqDecodingTest::SetUp() {
257 neteq_ = NetEq::Create(kInitSampleRateHz);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000258 NetEqNetworkStatistics stat;
259 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
260 algorithmic_delay_ms_ = stat.current_buffer_size_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000261 ASSERT_TRUE(neteq_);
262 LoadDecoders();
263}
264
265void NetEqDecodingTest::TearDown() {
266 delete neteq_;
267 if (rtp_fp_)
268 fclose(rtp_fp_);
269}
270
271void NetEqDecodingTest::LoadDecoders() {
272 // Load PCMu.
273 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMu, 0));
274 // Load PCMa.
275 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMa, 8));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000276#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000277 // Load iLBC.
278 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderILBC, 102));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000279#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000280 // Load iSAC.
281 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, 103));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000282#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000283 // Load iSAC SWB.
284 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACswb, 104));
henrik.lundin@webrtc.orgac59dba2013-01-31 09:55:24 +0000285 // Load iSAC FB.
286 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACfb, 105));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000287#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000288 // Load PCM16B nb.
289 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16B, 93));
290 // Load PCM16B wb.
291 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb, 94));
292 // Load PCM16B swb32.
293 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bswb32kHz, 95));
294 // Load CNG 8 kHz.
295 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, 13));
296 // Load CNG 16 kHz.
297 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, 98));
298}
299
300void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
301 rtp_fp_ = fopen(rtp_file.c_str(), "rb");
302 ASSERT_TRUE(rtp_fp_ != NULL);
303 ASSERT_EQ(0, NETEQTEST_RTPpacket::skipFileHeader(rtp_fp_));
304}
305
306void NetEqDecodingTest::Process(NETEQTEST_RTPpacket* rtp, int* out_len) {
307 // Check if time to receive.
308 while ((sim_clock_ >= rtp->time()) &&
309 (rtp->dataLen() >= 0)) {
310 if (rtp->dataLen() > 0) {
311 WebRtcRTPHeader rtpInfo;
312 rtp->parseHeader(&rtpInfo);
313 ASSERT_EQ(0, neteq_->InsertPacket(
314 rtpInfo,
315 rtp->payload(),
316 rtp->payloadLen(),
317 rtp->time() * (output_sample_rate_ / 1000)));
318 }
319 // Get next packet.
320 ASSERT_NE(-1, rtp->readFromFile(rtp_fp_));
321 }
322
henrik.lundin@webrtc.orge1d468c2013-01-30 07:37:20 +0000323 // Get audio from NetEq.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000324 NetEqOutputType type;
325 int num_channels;
326 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, out_len,
327 &num_channels, &type));
328 ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
329 (*out_len == kBlockSize16kHz) ||
330 (*out_len == kBlockSize32kHz));
331 output_sample_rate_ = *out_len / 10 * 1000;
332
333 // Increase time.
334 sim_clock_ += kTimeStepMs;
335}
336
337void NetEqDecodingTest::DecodeAndCompare(const std::string &rtp_file,
338 const std::string &ref_file) {
339 OpenInputFile(rtp_file);
340
341 std::string ref_out_file = "";
342 if (ref_file.empty()) {
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000343 ref_out_file = webrtc::test::OutputPath() + "neteq_universal_ref.pcm";
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000344 }
345 RefFiles ref_files(ref_file, ref_out_file);
346
347 NETEQTEST_RTPpacket rtp;
348 ASSERT_GT(rtp.readFromFile(rtp_fp_), 0);
349 int i = 0;
350 while (rtp.dataLen() >= 0) {
351 std::ostringstream ss;
352 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
353 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000354 int out_len = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000355 ASSERT_NO_FATAL_FAILURE(Process(&rtp, &out_len));
356 ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
357 }
358}
359
360void NetEqDecodingTest::DecodeAndCheckStats(const std::string &rtp_file,
361 const std::string &stat_ref_file,
362 const std::string &rtcp_ref_file) {
363 OpenInputFile(rtp_file);
364 std::string stat_out_file = "";
365 if (stat_ref_file.empty()) {
366 stat_out_file = webrtc::test::OutputPath() +
367 "neteq_network_stats.dat";
368 }
369 RefFiles network_stat_files(stat_ref_file, stat_out_file);
370
371 std::string rtcp_out_file = "";
372 if (rtcp_ref_file.empty()) {
373 rtcp_out_file = webrtc::test::OutputPath() +
374 "neteq_rtcp_stats.dat";
375 }
376 RefFiles rtcp_stat_files(rtcp_ref_file, rtcp_out_file);
377
378 NETEQTEST_RTPpacket rtp;
379 ASSERT_GT(rtp.readFromFile(rtp_fp_), 0);
380 while (rtp.dataLen() >= 0) {
381 int out_len;
382 Process(&rtp, &out_len);
383
384 // Query the network statistics API once per second
385 if (sim_clock_ % 1000 == 0) {
386 // Process NetworkStatistics.
387 NetEqNetworkStatistics network_stats;
388 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
389 network_stat_files.ProcessReference(network_stats);
390
391 // Process RTCPstat.
392 RtcpStatistics rtcp_stats;
393 neteq_->GetRtcpStatistics(&rtcp_stats);
394 rtcp_stat_files.ProcessReference(rtcp_stats);
395 }
396 }
397}
398
399void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
400 int timestamp,
401 WebRtcRTPHeader* rtp_info) {
402 rtp_info->header.sequenceNumber = frame_index;
403 rtp_info->header.timestamp = timestamp;
404 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
405 rtp_info->header.payloadType = 94; // PCM16b WB codec.
406 rtp_info->header.markerBit = 0;
407}
408
409void NetEqDecodingTest::PopulateCng(int frame_index,
410 int timestamp,
411 WebRtcRTPHeader* rtp_info,
412 uint8_t* payload,
413 int* payload_len) {
414 rtp_info->header.sequenceNumber = frame_index;
415 rtp_info->header.timestamp = timestamp;
416 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
417 rtp_info->header.payloadType = 98; // WB CNG.
418 rtp_info->header.markerBit = 0;
419 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
420 *payload_len = 1; // Only noise level, no spectral parameters.
421}
422
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000423void NetEqDecodingTest::CheckBgnOff(int sampling_rate_hz,
424 NetEqBackgroundNoiseMode bgn_mode) {
425 int expected_samples_per_channel = 0;
426 uint8_t payload_type = 0xFF; // Invalid.
427 if (sampling_rate_hz == 8000) {
428 expected_samples_per_channel = kBlockSize8kHz;
429 payload_type = 93; // PCM 16, 8 kHz.
430 } else if (sampling_rate_hz == 16000) {
431 expected_samples_per_channel = kBlockSize16kHz;
432 payload_type = 94; // PCM 16, 16 kHZ.
433 } else if (sampling_rate_hz == 32000) {
434 expected_samples_per_channel = kBlockSize32kHz;
435 payload_type = 95; // PCM 16, 32 kHz.
436 } else {
437 ASSERT_TRUE(false); // Unsupported test case.
438 }
439
440 NetEqOutputType type;
441 int16_t output[kBlockSize32kHz]; // Maximum size is chosen.
442 int16_t input[kBlockSize32kHz]; // Maximum size is chosen.
443
444 // Payload of 10 ms of PCM16 32 kHz.
445 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
446
447 // Random payload.
448 for (int n = 0; n < expected_samples_per_channel; ++n) {
449 input[n] = (rand() & ((1 << 10) - 1)) - ((1 << 5) - 1);
450 }
451 int enc_len_bytes = WebRtcPcm16b_EncodeW16(
452 input, expected_samples_per_channel, reinterpret_cast<int16_t*>(payload));
453 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
454
455 WebRtcRTPHeader rtp_info;
456 PopulateRtpInfo(0, 0, &rtp_info);
457 rtp_info.header.payloadType = payload_type;
458
459 int number_channels = 0;
460 int samples_per_channel = 0;
461
462 uint32_t receive_timestamp = 0;
463 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
464 number_channels = 0;
465 samples_per_channel = 0;
466 ASSERT_EQ(0, neteq_->InsertPacket(
467 rtp_info, payload, enc_len_bytes, receive_timestamp));
468 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
469 &number_channels, &type));
470 ASSERT_EQ(1, number_channels);
471 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
472 ASSERT_EQ(kOutputNormal, type);
473
474 // Next packet.
475 rtp_info.header.timestamp += expected_samples_per_channel;
476 rtp_info.header.sequenceNumber++;
477 receive_timestamp += expected_samples_per_channel;
478 }
479
480 number_channels = 0;
481 samples_per_channel = 0;
482
483 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull one
484 // frame without checking speech-type. This is the first frame pulled without
485 // inserting any packet, and might not be labeled as PCL.
486 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
487 &number_channels, &type));
488 ASSERT_EQ(1, number_channels);
489 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
490
491 // To be able to test the fading of background noise we need at lease to pull
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000492 // 611 frames.
493 const int kFadingThreshold = 611;
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000494
495 // Test several CNG-to-PLC packet for the expected behavior. The number 20 is
496 // arbitrary, but sufficiently large to test enough number of frames.
497 const int kNumPlcToCngTestFrames = 20;
498 bool plc_to_cng = false;
499 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
500 number_channels = 0;
501 samples_per_channel = 0;
502 memset(output, 1, sizeof(output)); // Set to non-zero.
503 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
504 &number_channels, &type));
505 ASSERT_EQ(1, number_channels);
506 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
507 if (type == kOutputPLCtoCNG) {
508 plc_to_cng = true;
509 double sum_squared = 0;
510 for (int k = 0; k < number_channels * samples_per_channel; ++k)
511 sum_squared += output[k] * output[k];
512 if (bgn_mode == kBgnOn) {
513 EXPECT_NE(0, sum_squared);
514 } else if (bgn_mode == kBgnOff || n > kFadingThreshold) {
515 EXPECT_EQ(0, sum_squared);
516 }
517 } else {
518 EXPECT_EQ(kOutputPLC, type);
519 }
520 }
521 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
522}
523
kjellander@webrtc.org6eba2772013-06-04 05:46:37 +0000524#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
525// Disabled for Windows 64-bit until webrtc:1458 is fixed.
526#define MAYBE_TestBitExactness DISABLED_TestBitExactness
527#else
528#define MAYBE_TestBitExactness TestBitExactness
529#endif
530
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000531TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(MAYBE_TestBitExactness)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000532 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000533 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000534#if defined(_MSC_VER) && (_MSC_VER >= 1700)
535 // For Visual Studio 2012 and later, we will have to use the generic reference
536 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000537 const std::string input_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000538 "resources/audio_coding/neteq4_universal_ref.pcm";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000539#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000540 const std::string input_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000541 webrtc::test::ResourcePath("audio_coding/neteq4_universal_ref", "pcm");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000542#endif
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000543
544 if (FLAGS_gen_ref) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000545 DecodeAndCompare(input_rtp_file, "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000546 } else {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000547 DecodeAndCompare(input_rtp_file, input_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000548 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000549}
550
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000551TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestNetworkStatistics)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000552 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000553 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000554#if defined(_MSC_VER) && (_MSC_VER >= 1700)
555 // For Visual Studio 2012 and later, we will have to use the generic reference
556 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000557 const std::string network_stat_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000558 "resources/audio_coding/neteq4_network_stats.dat";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000559#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000560 const std::string network_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000561 webrtc::test::ResourcePath("audio_coding/neteq4_network_stats", "dat");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000562#endif
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000563 const std::string rtcp_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000564 webrtc::test::ResourcePath("audio_coding/neteq4_rtcp_stats", "dat");
565 if (FLAGS_gen_ref) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000566 DecodeAndCheckStats(input_rtp_file, "", "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000567 } else {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000568 DecodeAndCheckStats(input_rtp_file, network_stat_ref_file,
569 rtcp_stat_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000570 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000571}
572
573// TODO(hlundin): Re-enable test once the statistics interface is up and again.
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000574TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestFrameWaitingTimeStatistics)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000575 // Use fax mode to avoid time-scaling. This is to simplify the testing of
576 // packet waiting times in the packet buffer.
577 neteq_->SetPlayoutMode(kPlayoutFax);
578 ASSERT_EQ(kPlayoutFax, neteq_->PlayoutMode());
579 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
580 size_t num_frames = 30;
581 const int kSamples = 10 * 16;
582 const int kPayloadBytes = kSamples * 2;
583 for (size_t i = 0; i < num_frames; ++i) {
584 uint16_t payload[kSamples] = {0};
585 WebRtcRTPHeader rtp_info;
586 rtp_info.header.sequenceNumber = i;
587 rtp_info.header.timestamp = i * kSamples;
588 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC.
589 rtp_info.header.payloadType = 94; // PCM16b WB codec.
590 rtp_info.header.markerBit = 0;
591 ASSERT_EQ(0, neteq_->InsertPacket(
592 rtp_info,
593 reinterpret_cast<uint8_t*>(payload),
594 kPayloadBytes, 0));
595 }
596 // Pull out all data.
597 for (size_t i = 0; i < num_frames; ++i) {
598 int out_len;
599 int num_channels;
600 NetEqOutputType type;
601 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
602 &num_channels, &type));
603 ASSERT_EQ(kBlockSize16kHz, out_len);
604 }
605
606 std::vector<int> waiting_times;
607 neteq_->WaitingTimes(&waiting_times);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000608 EXPECT_EQ(num_frames, waiting_times.size());
609 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
610 // spacing (per definition), we expect the delay to increase with 10 ms for
611 // each packet.
612 for (size_t i = 0; i < waiting_times.size(); ++i) {
613 EXPECT_EQ(static_cast<int>(i + 1) * 10, waiting_times[i]);
614 }
615
616 // Check statistics again and make sure it's been reset.
617 neteq_->WaitingTimes(&waiting_times);
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000618 int len = waiting_times.size();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000619 EXPECT_EQ(0, len);
620
621 // Process > 100 frames, and make sure that that we get statistics
622 // only for 100 frames. Note the new SSRC, causing NetEQ to reset.
623 num_frames = 110;
624 for (size_t i = 0; i < num_frames; ++i) {
625 uint16_t payload[kSamples] = {0};
626 WebRtcRTPHeader rtp_info;
627 rtp_info.header.sequenceNumber = i;
628 rtp_info.header.timestamp = i * kSamples;
629 rtp_info.header.ssrc = 0x1235; // Just an arbitrary SSRC.
630 rtp_info.header.payloadType = 94; // PCM16b WB codec.
631 rtp_info.header.markerBit = 0;
632 ASSERT_EQ(0, neteq_->InsertPacket(
633 rtp_info,
634 reinterpret_cast<uint8_t*>(payload),
635 kPayloadBytes, 0));
636 int out_len;
637 int num_channels;
638 NetEqOutputType type;
639 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
640 &num_channels, &type));
641 ASSERT_EQ(kBlockSize16kHz, out_len);
642 }
643
644 neteq_->WaitingTimes(&waiting_times);
645 EXPECT_EQ(100u, waiting_times.size());
646}
647
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000648TEST_F(NetEqDecodingTest,
649 DISABLED_ON_ANDROID(TestAverageInterArrivalTimeNegative)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000650 const int kNumFrames = 3000; // Needed for convergence.
651 int frame_index = 0;
652 const int kSamples = 10 * 16;
653 const int kPayloadBytes = kSamples * 2;
654 while (frame_index < kNumFrames) {
655 // Insert one packet each time, except every 10th time where we insert two
656 // packets at once. This will create a negative clock-drift of approx. 10%.
657 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
658 for (int n = 0; n < num_packets; ++n) {
659 uint8_t payload[kPayloadBytes] = {0};
660 WebRtcRTPHeader rtp_info;
661 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
662 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
663 ++frame_index;
664 }
665
666 // Pull out data once.
667 int out_len;
668 int num_channels;
669 NetEqOutputType type;
670 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
671 &num_channels, &type));
672 ASSERT_EQ(kBlockSize16kHz, out_len);
673 }
674
675 NetEqNetworkStatistics network_stats;
676 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
677 EXPECT_EQ(-103196, network_stats.clockdrift_ppm);
678}
679
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000680TEST_F(NetEqDecodingTest,
681 DISABLED_ON_ANDROID(TestAverageInterArrivalTimePositive)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000682 const int kNumFrames = 5000; // Needed for convergence.
683 int frame_index = 0;
684 const int kSamples = 10 * 16;
685 const int kPayloadBytes = kSamples * 2;
686 for (int i = 0; i < kNumFrames; ++i) {
687 // Insert one packet each time, except every 10th time where we don't insert
688 // any packet. This will create a positive clock-drift of approx. 11%.
689 int num_packets = (i % 10 == 9 ? 0 : 1);
690 for (int n = 0; n < num_packets; ++n) {
691 uint8_t payload[kPayloadBytes] = {0};
692 WebRtcRTPHeader rtp_info;
693 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
694 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
695 ++frame_index;
696 }
697
698 // Pull out data once.
699 int out_len;
700 int num_channels;
701 NetEqOutputType type;
702 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
703 &num_channels, &type));
704 ASSERT_EQ(kBlockSize16kHz, out_len);
705 }
706
707 NetEqNetworkStatistics network_stats;
708 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
709 EXPECT_EQ(110946, network_stats.clockdrift_ppm);
710}
711
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000712void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
713 double network_freeze_ms,
714 bool pull_audio_during_freeze,
715 int delay_tolerance_ms,
716 int max_time_to_speech_ms) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000717 uint16_t seq_no = 0;
718 uint32_t timestamp = 0;
719 const int kFrameSizeMs = 30;
720 const int kSamples = kFrameSizeMs * 16;
721 const int kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000722 double next_input_time_ms = 0.0;
723 double t_ms;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000724 int out_len;
725 int num_channels;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000726 NetEqOutputType type;
727
728 // Insert speech for 5 seconds.
729 const int kSpeechDurationMs = 5000;
730 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
731 // Each turn in this for loop is 10 ms.
732 while (next_input_time_ms <= t_ms) {
733 // Insert one 30 ms speech frame.
734 uint8_t payload[kPayloadBytes] = {0};
735 WebRtcRTPHeader rtp_info;
736 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
737 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
738 ++seq_no;
739 timestamp += kSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000740 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000741 }
742 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000743 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
744 &num_channels, &type));
745 ASSERT_EQ(kBlockSize16kHz, out_len);
746 }
747
748 EXPECT_EQ(kOutputNormal, type);
749 int32_t delay_before = timestamp - neteq_->PlayoutTimestamp();
750
751 // Insert CNG for 1 minute (= 60000 ms).
752 const int kCngPeriodMs = 100;
753 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
754 const int kCngDurationMs = 60000;
755 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
756 // Each turn in this for loop is 10 ms.
757 while (next_input_time_ms <= t_ms) {
758 // Insert one CNG frame each 100 ms.
759 uint8_t payload[kPayloadBytes];
760 int payload_len;
761 WebRtcRTPHeader rtp_info;
762 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
763 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
764 ++seq_no;
765 timestamp += kCngPeriodSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000766 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000767 }
768 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000769 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
770 &num_channels, &type));
771 ASSERT_EQ(kBlockSize16kHz, out_len);
772 }
773
774 EXPECT_EQ(kOutputCNG, type);
775
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000776 if (network_freeze_ms > 0) {
777 // First keep pulling audio for |network_freeze_ms| without inserting
778 // any data, then insert CNG data corresponding to |network_freeze_ms|
779 // without pulling any output audio.
780 const double loop_end_time = t_ms + network_freeze_ms;
781 for (; t_ms < loop_end_time; t_ms += 10) {
782 // Pull out data once.
783 ASSERT_EQ(0,
784 neteq_->GetAudio(
785 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
786 ASSERT_EQ(kBlockSize16kHz, out_len);
787 EXPECT_EQ(kOutputCNG, type);
788 }
789 bool pull_once = pull_audio_during_freeze;
790 // If |pull_once| is true, GetAudio will be called once half-way through
791 // the network recovery period.
792 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
793 while (next_input_time_ms <= t_ms) {
794 if (pull_once && next_input_time_ms >= pull_time_ms) {
795 pull_once = false;
796 // Pull out data once.
797 ASSERT_EQ(
798 0,
799 neteq_->GetAudio(
800 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
801 ASSERT_EQ(kBlockSize16kHz, out_len);
802 EXPECT_EQ(kOutputCNG, type);
803 t_ms += 10;
804 }
805 // Insert one CNG frame each 100 ms.
806 uint8_t payload[kPayloadBytes];
807 int payload_len;
808 WebRtcRTPHeader rtp_info;
809 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
810 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
811 ++seq_no;
812 timestamp += kCngPeriodSamples;
813 next_input_time_ms += kCngPeriodMs * drift_factor;
814 }
815 }
816
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000817 // Insert speech again until output type is speech.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000818 double speech_restart_time_ms = t_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000819 while (type != kOutputNormal) {
820 // Each turn in this for loop is 10 ms.
821 while (next_input_time_ms <= t_ms) {
822 // Insert one 30 ms speech frame.
823 uint8_t payload[kPayloadBytes] = {0};
824 WebRtcRTPHeader rtp_info;
825 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
826 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
827 ++seq_no;
828 timestamp += kSamples;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000829 next_input_time_ms += kFrameSizeMs * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000830 }
831 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000832 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
833 &num_channels, &type));
834 ASSERT_EQ(kBlockSize16kHz, out_len);
835 // Increase clock.
836 t_ms += 10;
837 }
838
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000839 // Check that the speech starts again within reasonable time.
840 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
841 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000842 int32_t delay_after = timestamp - neteq_->PlayoutTimestamp();
843 // Compare delay before and after, and make sure it differs less than 20 ms.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000844 EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
845 EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000846}
847
henrik.lundin@webrtc.orged865b52014-03-06 10:28:07 +0000848TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithNegativeClockDrift)) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000849 // Apply a clock drift of -25 ms / s (sender faster than receiver).
850 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000851 const double kNetworkFreezeTimeMs = 0.0;
852 const bool kGetAudioDuringFreezeRecovery = false;
853 const int kDelayToleranceMs = 20;
854 const int kMaxTimeToSpeechMs = 100;
855 LongCngWithClockDrift(kDriftFactor,
856 kNetworkFreezeTimeMs,
857 kGetAudioDuringFreezeRecovery,
858 kDelayToleranceMs,
859 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000860}
861
henrik.lundin@webrtc.orged865b52014-03-06 10:28:07 +0000862TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithPositiveClockDrift)) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000863 // Apply a clock drift of +25 ms / s (sender slower than receiver).
864 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000865 const double kNetworkFreezeTimeMs = 0.0;
866 const bool kGetAudioDuringFreezeRecovery = false;
867 const int kDelayToleranceMs = 20;
868 const int kMaxTimeToSpeechMs = 100;
869 LongCngWithClockDrift(kDriftFactor,
870 kNetworkFreezeTimeMs,
871 kGetAudioDuringFreezeRecovery,
872 kDelayToleranceMs,
873 kMaxTimeToSpeechMs);
874}
875
876TEST_F(NetEqDecodingTest,
877 DISABLED_ON_ANDROID(LongCngWithNegativeClockDriftNetworkFreeze)) {
878 // Apply a clock drift of -25 ms / s (sender faster than receiver).
879 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
880 const double kNetworkFreezeTimeMs = 5000.0;
881 const bool kGetAudioDuringFreezeRecovery = false;
882 const int kDelayToleranceMs = 50;
883 const int kMaxTimeToSpeechMs = 200;
884 LongCngWithClockDrift(kDriftFactor,
885 kNetworkFreezeTimeMs,
886 kGetAudioDuringFreezeRecovery,
887 kDelayToleranceMs,
888 kMaxTimeToSpeechMs);
889}
890
891TEST_F(NetEqDecodingTest,
892 DISABLED_ON_ANDROID(LongCngWithPositiveClockDriftNetworkFreeze)) {
893 // Apply a clock drift of +25 ms / s (sender slower than receiver).
894 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
895 const double kNetworkFreezeTimeMs = 5000.0;
896 const bool kGetAudioDuringFreezeRecovery = false;
897 const int kDelayToleranceMs = 20;
898 const int kMaxTimeToSpeechMs = 100;
899 LongCngWithClockDrift(kDriftFactor,
900 kNetworkFreezeTimeMs,
901 kGetAudioDuringFreezeRecovery,
902 kDelayToleranceMs,
903 kMaxTimeToSpeechMs);
904}
905
906TEST_F(
907 NetEqDecodingTest,
908 DISABLED_ON_ANDROID(LongCngWithPositiveClockDriftNetworkFreezeExtraPull)) {
909 // Apply a clock drift of +25 ms / s (sender slower than receiver).
910 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
911 const double kNetworkFreezeTimeMs = 5000.0;
912 const bool kGetAudioDuringFreezeRecovery = true;
913 const int kDelayToleranceMs = 20;
914 const int kMaxTimeToSpeechMs = 100;
915 LongCngWithClockDrift(kDriftFactor,
916 kNetworkFreezeTimeMs,
917 kGetAudioDuringFreezeRecovery,
918 kDelayToleranceMs,
919 kMaxTimeToSpeechMs);
920}
921
922TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithoutClockDrift)) {
923 const double kDriftFactor = 1.0; // No drift.
924 const double kNetworkFreezeTimeMs = 0.0;
925 const bool kGetAudioDuringFreezeRecovery = false;
926 const int kDelayToleranceMs = 10;
927 const int kMaxTimeToSpeechMs = 50;
928 LongCngWithClockDrift(kDriftFactor,
929 kNetworkFreezeTimeMs,
930 kGetAudioDuringFreezeRecovery,
931 kDelayToleranceMs,
932 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000933}
934
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000935TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(UnknownPayloadType)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000936 const int kPayloadBytes = 100;
937 uint8_t payload[kPayloadBytes] = {0};
938 WebRtcRTPHeader rtp_info;
939 PopulateRtpInfo(0, 0, &rtp_info);
940 rtp_info.header.payloadType = 1; // Not registered as a decoder.
941 EXPECT_EQ(NetEq::kFail,
942 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
943 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
944}
945
minyue@webrtc.org7bb54362013-08-06 05:40:57 +0000946TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(OversizePacket)) {
947 // Payload size is greater than packet buffer size
948 const int kPayloadBytes = NetEq::kMaxBytesInBuffer + 1;
949 uint8_t payload[kPayloadBytes] = {0};
950 WebRtcRTPHeader rtp_info;
951 PopulateRtpInfo(0, 0, &rtp_info);
952 rtp_info.header.payloadType = 103; // iSAC, no packet splitting.
953 EXPECT_EQ(NetEq::kFail,
954 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
955 EXPECT_EQ(NetEq::kOversizePacket, neteq_->LastError());
956}
957
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000958TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(DecoderError)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000959 const int kPayloadBytes = 100;
960 uint8_t payload[kPayloadBytes] = {0};
961 WebRtcRTPHeader rtp_info;
962 PopulateRtpInfo(0, 0, &rtp_info);
963 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
964 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
965 NetEqOutputType type;
966 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
967 // to GetAudio.
968 for (int i = 0; i < kMaxBlockSize; ++i) {
969 out_data_[i] = 1;
970 }
971 int num_channels;
972 int samples_per_channel;
973 EXPECT_EQ(NetEq::kFail,
974 neteq_->GetAudio(kMaxBlockSize, out_data_,
975 &samples_per_channel, &num_channels, &type));
976 // Verify that there is a decoder error to check.
977 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
978 // Code 6730 is an iSAC error code.
979 EXPECT_EQ(6730, neteq_->LastDecoderError());
980 // Verify that the first 160 samples are set to 0, and that the remaining
981 // samples are left unmodified.
982 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
983 for (int i = 0; i < kExpectedOutputLength; ++i) {
984 std::ostringstream ss;
985 ss << "i = " << i;
986 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
987 EXPECT_EQ(0, out_data_[i]);
988 }
989 for (int i = kExpectedOutputLength; i < kMaxBlockSize; ++i) {
990 std::ostringstream ss;
991 ss << "i = " << i;
992 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
993 EXPECT_EQ(1, out_data_[i]);
994 }
995}
996
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000997TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(GetAudioBeforeInsertPacket)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000998 NetEqOutputType type;
999 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
1000 // to GetAudio.
1001 for (int i = 0; i < kMaxBlockSize; ++i) {
1002 out_data_[i] = 1;
1003 }
1004 int num_channels;
1005 int samples_per_channel;
1006 EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
1007 &samples_per_channel,
1008 &num_channels, &type));
1009 // Verify that the first block of samples is set to 0.
1010 static const int kExpectedOutputLength =
1011 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
1012 for (int i = 0; i < kExpectedOutputLength; ++i) {
1013 std::ostringstream ss;
1014 ss << "i = " << i;
1015 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
1016 EXPECT_EQ(0, out_data_[i]);
1017 }
1018}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001019
turaj@webrtc.org3fdeddb2013-09-25 22:19:22 +00001020TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(BackgroundNoise)) {
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001021 neteq_->SetBackgroundNoiseMode(kBgnOn);
1022 CheckBgnOff(8000, kBgnOn);
1023 CheckBgnOff(16000, kBgnOn);
1024 CheckBgnOff(32000, kBgnOn);
1025 EXPECT_EQ(kBgnOn, neteq_->BackgroundNoiseMode());
1026
1027 neteq_->SetBackgroundNoiseMode(kBgnOff);
1028 CheckBgnOff(8000, kBgnOff);
1029 CheckBgnOff(16000, kBgnOff);
1030 CheckBgnOff(32000, kBgnOff);
1031 EXPECT_EQ(kBgnOff, neteq_->BackgroundNoiseMode());
1032
1033 neteq_->SetBackgroundNoiseMode(kBgnFade);
1034 CheckBgnOff(8000, kBgnFade);
1035 CheckBgnOff(16000, kBgnFade);
1036 CheckBgnOff(32000, kBgnFade);
1037 EXPECT_EQ(kBgnFade, neteq_->BackgroundNoiseMode());
1038}
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001039
1040TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(SyncPacketInsert)) {
1041 WebRtcRTPHeader rtp_info;
1042 uint32_t receive_timestamp = 0;
1043 // For the readability use the following payloads instead of the defaults of
1044 // this test.
1045 uint8_t kPcm16WbPayloadType = 1;
1046 uint8_t kCngNbPayloadType = 2;
1047 uint8_t kCngWbPayloadType = 3;
1048 uint8_t kCngSwb32PayloadType = 4;
1049 uint8_t kCngSwb48PayloadType = 5;
1050 uint8_t kAvtPayloadType = 6;
1051 uint8_t kRedPayloadType = 7;
1052 uint8_t kIsacPayloadType = 9; // Payload type 8 is already registered.
1053
1054 // Register decoders.
1055 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb,
1056 kPcm16WbPayloadType));
1057 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, kCngNbPayloadType));
1058 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, kCngWbPayloadType));
1059 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb32kHz,
1060 kCngSwb32PayloadType));
1061 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb48kHz,
1062 kCngSwb48PayloadType));
1063 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderAVT, kAvtPayloadType));
1064 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderRED, kRedPayloadType));
1065 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, kIsacPayloadType));
1066
1067 PopulateRtpInfo(0, 0, &rtp_info);
1068 rtp_info.header.payloadType = kPcm16WbPayloadType;
1069
1070 // The first packet injected cannot be sync-packet.
1071 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1072
1073 // Payload length of 10 ms PCM16 16 kHz.
1074 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1075 uint8_t payload[kPayloadBytes] = {0};
1076 ASSERT_EQ(0, neteq_->InsertPacket(
1077 rtp_info, payload, kPayloadBytes, receive_timestamp));
1078
1079 // Next packet. Last packet contained 10 ms audio.
1080 rtp_info.header.sequenceNumber++;
1081 rtp_info.header.timestamp += kBlockSize16kHz;
1082 receive_timestamp += kBlockSize16kHz;
1083
1084 // Unacceptable payload types CNG, AVT (DTMF), RED.
1085 rtp_info.header.payloadType = kCngNbPayloadType;
1086 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1087
1088 rtp_info.header.payloadType = kCngWbPayloadType;
1089 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1090
1091 rtp_info.header.payloadType = kCngSwb32PayloadType;
1092 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1093
1094 rtp_info.header.payloadType = kCngSwb48PayloadType;
1095 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1096
1097 rtp_info.header.payloadType = kAvtPayloadType;
1098 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1099
1100 rtp_info.header.payloadType = kRedPayloadType;
1101 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1102
1103 // Change of codec cannot be initiated with a sync packet.
1104 rtp_info.header.payloadType = kIsacPayloadType;
1105 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1106
1107 // Change of SSRC is not allowed with a sync packet.
1108 rtp_info.header.payloadType = kPcm16WbPayloadType;
1109 ++rtp_info.header.ssrc;
1110 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1111
1112 --rtp_info.header.ssrc;
1113 EXPECT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1114}
1115
1116// First insert several noise like packets, then sync-packets. Decoding all
1117// packets should not produce error, statistics should not show any packet loss
1118// and sync-packets should decode to zero.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001119// TODO(turajs) we will have a better test if we have a referece NetEq, and
1120// when Sync packets are inserted in "test" NetEq we insert all-zero payload
1121// in reference NetEq and compare the output of those two.
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001122TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(SyncPacketDecode)) {
1123 WebRtcRTPHeader rtp_info;
1124 PopulateRtpInfo(0, 0, &rtp_info);
1125 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1126 uint8_t payload[kPayloadBytes];
1127 int16_t decoded[kBlockSize16kHz];
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001128 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001129 for (int n = 0; n < kPayloadBytes; ++n) {
1130 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1131 }
1132 // Insert some packets which decode to noise. We are not interested in
1133 // actual decoded values.
1134 NetEqOutputType output_type;
1135 int num_channels;
1136 int samples_per_channel;
1137 uint32_t receive_timestamp = 0;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001138 for (int n = 0; n < 100; ++n) {
1139 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1140 receive_timestamp));
1141 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1142 &samples_per_channel, &num_channels,
1143 &output_type));
1144 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1145 ASSERT_EQ(1, num_channels);
1146
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001147 rtp_info.header.sequenceNumber++;
1148 rtp_info.header.timestamp += kBlockSize16kHz;
1149 receive_timestamp += kBlockSize16kHz;
1150 }
1151 const int kNumSyncPackets = 10;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001152
1153 // Make sure sufficient number of sync packets are inserted that we can
1154 // conduct a test.
1155 ASSERT_GT(kNumSyncPackets, algorithmic_frame_delay);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001156 // Insert sync-packets, the decoded sequence should be all-zero.
1157 for (int n = 0; n < kNumSyncPackets; ++n) {
1158 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1159 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1160 &samples_per_channel, &num_channels,
1161 &output_type));
1162 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1163 ASSERT_EQ(1, num_channels);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001164 if (n > algorithmic_frame_delay) {
1165 EXPECT_TRUE(IsAllZero(decoded, samples_per_channel * num_channels));
1166 }
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001167 rtp_info.header.sequenceNumber++;
1168 rtp_info.header.timestamp += kBlockSize16kHz;
1169 receive_timestamp += kBlockSize16kHz;
1170 }
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001171
1172 // We insert regular packets, if sync packet are not correctly buffered then
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001173 // network statistics would show some packet loss.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001174 for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) {
1175 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1176 receive_timestamp));
1177 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1178 &samples_per_channel, &num_channels,
1179 &output_type));
1180 if (n >= algorithmic_frame_delay + 1) {
1181 // Expect that this frame contain samples from regular RTP.
1182 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1183 }
1184 rtp_info.header.sequenceNumber++;
1185 rtp_info.header.timestamp += kBlockSize16kHz;
1186 receive_timestamp += kBlockSize16kHz;
1187 }
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001188 NetEqNetworkStatistics network_stats;
1189 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1190 // Expecting a "clean" network.
1191 EXPECT_EQ(0, network_stats.packet_loss_rate);
1192 EXPECT_EQ(0, network_stats.expand_rate);
1193 EXPECT_EQ(0, network_stats.accelerate_rate);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001194 EXPECT_LE(network_stats.preemptive_rate, 150);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001195}
1196
1197// Test if the size of the packet buffer reported correctly when containing
1198// sync packets. Also, test if network packets override sync packets. That is to
1199// prefer decoding a network packet to a sync packet, if both have same sequence
1200// number and timestamp.
1201TEST_F(NetEqDecodingTest,
1202 DISABLED_ON_ANDROID(SyncPacketBufferSizeAndOverridenByNetworkPackets)) {
1203 WebRtcRTPHeader rtp_info;
1204 PopulateRtpInfo(0, 0, &rtp_info);
1205 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1206 uint8_t payload[kPayloadBytes];
1207 int16_t decoded[kBlockSize16kHz];
1208 for (int n = 0; n < kPayloadBytes; ++n) {
1209 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1210 }
1211 // Insert some packets which decode to noise. We are not interested in
1212 // actual decoded values.
1213 NetEqOutputType output_type;
1214 int num_channels;
1215 int samples_per_channel;
1216 uint32_t receive_timestamp = 0;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001217 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
1218 for (int n = 0; n < algorithmic_frame_delay; ++n) {
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001219 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1220 receive_timestamp));
1221 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1222 &samples_per_channel, &num_channels,
1223 &output_type));
1224 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1225 ASSERT_EQ(1, num_channels);
1226 rtp_info.header.sequenceNumber++;
1227 rtp_info.header.timestamp += kBlockSize16kHz;
1228 receive_timestamp += kBlockSize16kHz;
1229 }
1230 const int kNumSyncPackets = 10;
1231
1232 WebRtcRTPHeader first_sync_packet_rtp_info;
1233 memcpy(&first_sync_packet_rtp_info, &rtp_info, sizeof(rtp_info));
1234
1235 // Insert sync-packets, but no decoding.
1236 for (int n = 0; n < kNumSyncPackets; ++n) {
1237 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1238 rtp_info.header.sequenceNumber++;
1239 rtp_info.header.timestamp += kBlockSize16kHz;
1240 receive_timestamp += kBlockSize16kHz;
1241 }
1242 NetEqNetworkStatistics network_stats;
1243 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001244 EXPECT_EQ(kNumSyncPackets * 10 + algorithmic_delay_ms_,
1245 network_stats.current_buffer_size_ms);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001246
1247 // Rewind |rtp_info| to that of the first sync packet.
1248 memcpy(&rtp_info, &first_sync_packet_rtp_info, sizeof(rtp_info));
1249
1250 // Insert.
1251 for (int n = 0; n < kNumSyncPackets; ++n) {
1252 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1253 receive_timestamp));
1254 rtp_info.header.sequenceNumber++;
1255 rtp_info.header.timestamp += kBlockSize16kHz;
1256 receive_timestamp += kBlockSize16kHz;
1257 }
1258
1259 // Decode.
1260 for (int n = 0; n < kNumSyncPackets; ++n) {
1261 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1262 &samples_per_channel, &num_channels,
1263 &output_type));
1264 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1265 ASSERT_EQ(1, num_channels);
1266 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1267 }
1268}
1269
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001270void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1271 uint32_t start_timestamp,
1272 const std::set<uint16_t>& drop_seq_numbers,
1273 bool expect_seq_no_wrap,
1274 bool expect_timestamp_wrap) {
1275 uint16_t seq_no = start_seq_no;
1276 uint32_t timestamp = start_timestamp;
1277 const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
1278 const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
1279 const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
1280 const int kPayloadBytes = kSamples * sizeof(int16_t);
1281 double next_input_time_ms = 0.0;
1282 int16_t decoded[kBlockSize16kHz];
1283 int num_channels;
1284 int samples_per_channel;
1285 NetEqOutputType output_type;
1286 uint32_t receive_timestamp = 0;
1287
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001288 // Insert speech for 2 seconds.
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001289 const int kSpeechDurationMs = 2000;
1290 int packets_inserted = 0;
1291 uint16_t last_seq_no;
1292 uint32_t last_timestamp;
1293 bool timestamp_wrapped = false;
1294 bool seq_no_wrapped = false;
1295 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1296 // Each turn in this for loop is 10 ms.
1297 while (next_input_time_ms <= t_ms) {
1298 // Insert one 30 ms speech frame.
1299 uint8_t payload[kPayloadBytes] = {0};
1300 WebRtcRTPHeader rtp_info;
1301 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1302 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1303 // This sequence number was not in the set to drop. Insert it.
1304 ASSERT_EQ(0,
1305 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1306 receive_timestamp));
1307 ++packets_inserted;
1308 }
1309 NetEqNetworkStatistics network_stats;
1310 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1311
1312 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1313 // packet size for first few packets. Therefore we refrain from checking
1314 // the criteria.
1315 if (packets_inserted > 4) {
1316 // Expect preferred and actual buffer size to be no more than 2 frames.
1317 EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001318 EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
1319 algorithmic_delay_ms_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001320 }
1321 last_seq_no = seq_no;
1322 last_timestamp = timestamp;
1323
1324 ++seq_no;
1325 timestamp += kSamples;
1326 receive_timestamp += kSamples;
1327 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1328
1329 seq_no_wrapped |= seq_no < last_seq_no;
1330 timestamp_wrapped |= timestamp < last_timestamp;
1331 }
1332 // Pull out data once.
1333 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1334 &samples_per_channel, &num_channels,
1335 &output_type));
1336 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1337 ASSERT_EQ(1, num_channels);
1338
1339 // Expect delay (in samples) to be less than 2 packets.
1340 EXPECT_LE(timestamp - neteq_->PlayoutTimestamp(),
1341 static_cast<uint32_t>(kSamples * 2));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001342 }
1343 // Make sure we have actually tested wrap-around.
1344 ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
1345 ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
1346}
1347
1348TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
1349 // Start with a sequence number that will soon wrap.
1350 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
1351 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1352}
1353
1354TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
1355 // Start with a sequence number that will soon wrap.
1356 std::set<uint16_t> drop_seq_numbers;
1357 drop_seq_numbers.insert(0xFFFF);
1358 drop_seq_numbers.insert(0x0);
1359 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1360}
1361
1362TEST_F(NetEqDecodingTest, TimestampWrap) {
1363 // Start with a timestamp that will soon wrap.
1364 std::set<uint16_t> drop_seq_numbers;
1365 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
1366}
1367
1368TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
1369 // Start with a timestamp and a sequence number that will wrap at the same
1370 // time.
1371 std::set<uint16_t> drop_seq_numbers;
1372 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
1373}
1374
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001375void NetEqDecodingTest::DuplicateCng() {
1376 uint16_t seq_no = 0;
1377 uint32_t timestamp = 0;
1378 const int kFrameSizeMs = 10;
1379 const int kSampleRateKhz = 16;
1380 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1381 const int kPayloadBytes = kSamples * 2;
1382
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001383 const int algorithmic_delay_samples = std::max(
1384 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001385 // Insert three speech packet. Three are needed to get the frame length
1386 // correct.
1387 int out_len;
1388 int num_channels;
1389 NetEqOutputType type;
1390 uint8_t payload[kPayloadBytes] = {0};
1391 WebRtcRTPHeader rtp_info;
1392 for (int i = 0; i < 3; ++i) {
1393 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1394 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1395 ++seq_no;
1396 timestamp += kSamples;
1397
1398 // Pull audio once.
1399 ASSERT_EQ(0,
1400 neteq_->GetAudio(
1401 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1402 ASSERT_EQ(kBlockSize16kHz, out_len);
1403 }
1404 // Verify speech output.
1405 EXPECT_EQ(kOutputNormal, type);
1406
1407 // Insert same CNG packet twice.
1408 const int kCngPeriodMs = 100;
1409 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1410 int payload_len;
1411 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1412 // This is the first time this CNG packet is inserted.
1413 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1414
1415 // Pull audio once and make sure CNG is played.
1416 ASSERT_EQ(0,
1417 neteq_->GetAudio(
1418 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1419 ASSERT_EQ(kBlockSize16kHz, out_len);
1420 EXPECT_EQ(kOutputCNG, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001421 EXPECT_EQ(timestamp - algorithmic_delay_samples, neteq_->PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001422
1423 // Insert the same CNG packet again. Note that at this point it is old, since
1424 // we have already decoded the first copy of it.
1425 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1426
1427 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1428 // we have already pulled out CNG once.
1429 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
1430 ASSERT_EQ(0,
1431 neteq_->GetAudio(
1432 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1433 ASSERT_EQ(kBlockSize16kHz, out_len);
1434 EXPECT_EQ(kOutputCNG, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001435 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1436 neteq_->PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001437 }
1438
1439 // Insert speech again.
1440 ++seq_no;
1441 timestamp += kCngPeriodSamples;
1442 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1443 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1444
1445 // Pull audio once and verify that the output is speech again.
1446 ASSERT_EQ(0,
1447 neteq_->GetAudio(
1448 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1449 ASSERT_EQ(kBlockSize16kHz, out_len);
1450 EXPECT_EQ(kOutputNormal, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001451 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
1452 neteq_->PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001453}
1454
1455TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001456} // namespace webrtc