blob: 90dc7171088e19dddb090e383f417595a930ab25 [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11/*
12 * This file includes unit tests for NetEQ.
13 */
14
15#include "webrtc/modules/audio_coding/neteq4/interface/neteq.h"
16
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000017#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000018#include <stdlib.h>
19#include <string.h> // memset
20
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000021#include <algorithm>
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000022#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000023#include <string>
24#include <vector>
25
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000026#include "gflags/gflags.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000027#include "gtest/gtest.h"
28#include "webrtc/modules/audio_coding/neteq4/test/NETEQTEST_RTPpacket.h"
turaj@webrtc.orgff43c852013-09-25 00:07:27 +000029#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000030#include "webrtc/test/testsupport/fileutils.h"
henrike@webrtc.orga950300b2013-07-08 18:53:54 +000031#include "webrtc/test/testsupport/gtest_disable.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000032#include "webrtc/typedefs.h"
33
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000034DEFINE_bool(gen_ref, false, "Generate reference files.");
35
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000036namespace webrtc {
37
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +000038static bool IsAllZero(const int16_t* buf, int buf_length) {
39 bool all_zero = true;
40 for (int n = 0; n < buf_length && all_zero; ++n)
41 all_zero = buf[n] == 0;
42 return all_zero;
43}
44
45static bool IsAllNonZero(const int16_t* buf, int buf_length) {
46 bool all_non_zero = true;
47 for (int n = 0; n < buf_length && all_non_zero; ++n)
48 all_non_zero = buf[n] != 0;
49 return all_non_zero;
50}
51
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000052class RefFiles {
53 public:
54 RefFiles(const std::string& input_file, const std::string& output_file);
55 ~RefFiles();
56 template<class T> void ProcessReference(const T& test_results);
57 template<typename T, size_t n> void ProcessReference(
58 const T (&test_results)[n],
59 size_t length);
60 template<typename T, size_t n> void WriteToFile(
61 const T (&test_results)[n],
62 size_t length);
63 template<typename T, size_t n> void ReadFromFileAndCompare(
64 const T (&test_results)[n],
65 size_t length);
66 void WriteToFile(const NetEqNetworkStatistics& stats);
67 void ReadFromFileAndCompare(const NetEqNetworkStatistics& stats);
68 void WriteToFile(const RtcpStatistics& stats);
69 void ReadFromFileAndCompare(const RtcpStatistics& stats);
70
71 FILE* input_fp_;
72 FILE* output_fp_;
73};
74
75RefFiles::RefFiles(const std::string &input_file,
76 const std::string &output_file)
77 : input_fp_(NULL),
78 output_fp_(NULL) {
79 if (!input_file.empty()) {
80 input_fp_ = fopen(input_file.c_str(), "rb");
81 EXPECT_TRUE(input_fp_ != NULL);
82 }
83 if (!output_file.empty()) {
84 output_fp_ = fopen(output_file.c_str(), "wb");
85 EXPECT_TRUE(output_fp_ != NULL);
86 }
87}
88
89RefFiles::~RefFiles() {
90 if (input_fp_) {
91 EXPECT_EQ(EOF, fgetc(input_fp_)); // Make sure that we reached the end.
92 fclose(input_fp_);
93 }
94 if (output_fp_) fclose(output_fp_);
95}
96
97template<class T>
98void RefFiles::ProcessReference(const T& test_results) {
99 WriteToFile(test_results);
100 ReadFromFileAndCompare(test_results);
101}
102
103template<typename T, size_t n>
104void RefFiles::ProcessReference(const T (&test_results)[n], size_t length) {
105 WriteToFile(test_results, length);
106 ReadFromFileAndCompare(test_results, length);
107}
108
109template<typename T, size_t n>
110void RefFiles::WriteToFile(const T (&test_results)[n], size_t length) {
111 if (output_fp_) {
112 ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_));
113 }
114}
115
116template<typename T, size_t n>
117void RefFiles::ReadFromFileAndCompare(const T (&test_results)[n],
118 size_t length) {
119 if (input_fp_) {
120 // Read from ref file.
121 T* ref = new T[length];
122 ASSERT_EQ(length, fread(ref, sizeof(T), length, input_fp_));
123 // Compare
124 ASSERT_EQ(0, memcmp(&test_results, ref, sizeof(T) * length));
125 delete [] ref;
126 }
127}
128
129void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats) {
130 if (output_fp_) {
131 ASSERT_EQ(1u, fwrite(&stats, sizeof(NetEqNetworkStatistics), 1,
132 output_fp_));
133 }
134}
135
136void RefFiles::ReadFromFileAndCompare(
137 const NetEqNetworkStatistics& stats) {
138 if (input_fp_) {
139 // Read from ref file.
140 size_t stat_size = sizeof(NetEqNetworkStatistics);
141 NetEqNetworkStatistics ref_stats;
142 ASSERT_EQ(1u, fread(&ref_stats, stat_size, 1, input_fp_));
143 // Compare
144 EXPECT_EQ(0, memcmp(&stats, &ref_stats, stat_size));
145 }
146}
147
148void RefFiles::WriteToFile(const RtcpStatistics& stats) {
149 if (output_fp_) {
150 ASSERT_EQ(1u, fwrite(&(stats.fraction_lost), sizeof(stats.fraction_lost), 1,
151 output_fp_));
152 ASSERT_EQ(1u, fwrite(&(stats.cumulative_lost),
153 sizeof(stats.cumulative_lost), 1, output_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000154 ASSERT_EQ(1u, fwrite(&(stats.extended_max_sequence_number),
155 sizeof(stats.extended_max_sequence_number), 1,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000156 output_fp_));
157 ASSERT_EQ(1u, fwrite(&(stats.jitter), sizeof(stats.jitter), 1,
158 output_fp_));
159 }
160}
161
162void RefFiles::ReadFromFileAndCompare(
163 const RtcpStatistics& stats) {
164 if (input_fp_) {
165 // Read from ref file.
166 RtcpStatistics ref_stats;
167 ASSERT_EQ(1u, fread(&(ref_stats.fraction_lost),
168 sizeof(ref_stats.fraction_lost), 1, input_fp_));
169 ASSERT_EQ(1u, fread(&(ref_stats.cumulative_lost),
170 sizeof(ref_stats.cumulative_lost), 1, input_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000171 ASSERT_EQ(1u, fread(&(ref_stats.extended_max_sequence_number),
172 sizeof(ref_stats.extended_max_sequence_number), 1,
173 input_fp_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000174 ASSERT_EQ(1u, fread(&(ref_stats.jitter), sizeof(ref_stats.jitter), 1,
175 input_fp_));
176 // Compare
177 EXPECT_EQ(ref_stats.fraction_lost, stats.fraction_lost);
178 EXPECT_EQ(ref_stats.cumulative_lost, stats.cumulative_lost);
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000179 EXPECT_EQ(ref_stats.extended_max_sequence_number,
180 stats.extended_max_sequence_number);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000181 EXPECT_EQ(ref_stats.jitter, stats.jitter);
182 }
183}
184
185class NetEqDecodingTest : public ::testing::Test {
186 protected:
187 // NetEQ must be polled for data once every 10 ms. Thus, neither of the
188 // constants below can be changed.
189 static const int kTimeStepMs = 10;
190 static const int kBlockSize8kHz = kTimeStepMs * 8;
191 static const int kBlockSize16kHz = kTimeStepMs * 16;
192 static const int kBlockSize32kHz = kTimeStepMs * 32;
193 static const int kMaxBlockSize = kBlockSize32kHz;
194 static const int kInitSampleRateHz = 8000;
195
196 NetEqDecodingTest();
197 virtual void SetUp();
198 virtual void TearDown();
199 void SelectDecoders(NetEqDecoder* used_codec);
200 void LoadDecoders();
201 void OpenInputFile(const std::string &rtp_file);
202 void Process(NETEQTEST_RTPpacket* rtp_ptr, int* out_len);
203 void DecodeAndCompare(const std::string &rtp_file,
204 const std::string &ref_file);
205 void DecodeAndCheckStats(const std::string &rtp_file,
206 const std::string &stat_ref_file,
207 const std::string &rtcp_ref_file);
208 static void PopulateRtpInfo(int frame_index,
209 int timestamp,
210 WebRtcRTPHeader* rtp_info);
211 static void PopulateCng(int frame_index,
212 int timestamp,
213 WebRtcRTPHeader* rtp_info,
214 uint8_t* payload,
215 int* payload_len);
216
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000217 void CheckBgnOff(int sampling_rate, NetEqBackgroundNoiseMode bgn_mode);
218
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000219 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
220 const std::set<uint16_t>& drop_seq_numbers,
221 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
222
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000223 void LongCngWithClockDrift(double drift_factor,
224 double network_freeze_ms,
225 bool pull_audio_during_freeze,
226 int delay_tolerance_ms,
227 int max_time_to_speech_ms);
228
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000229 void DuplicateCng();
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000230
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000231 NetEq* neteq_;
232 FILE* rtp_fp_;
233 unsigned int sim_clock_;
234 int16_t out_data_[kMaxBlockSize];
235 int output_sample_rate_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000236 int algorithmic_delay_ms_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000237};
238
239// Allocating the static const so that it can be passed by reference.
240const int NetEqDecodingTest::kTimeStepMs;
241const int NetEqDecodingTest::kBlockSize8kHz;
242const int NetEqDecodingTest::kBlockSize16kHz;
243const int NetEqDecodingTest::kBlockSize32kHz;
244const int NetEqDecodingTest::kMaxBlockSize;
245const int NetEqDecodingTest::kInitSampleRateHz;
246
247NetEqDecodingTest::NetEqDecodingTest()
248 : neteq_(NULL),
249 rtp_fp_(NULL),
250 sim_clock_(0),
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000251 output_sample_rate_(kInitSampleRateHz),
252 algorithmic_delay_ms_(0) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000253 memset(out_data_, 0, sizeof(out_data_));
254}
255
256void NetEqDecodingTest::SetUp() {
henrik.lundin@webrtc.org35ead382014-04-14 18:49:17 +0000257 NetEq::Config config;
258 config.sample_rate_hz = kInitSampleRateHz;
259 neteq_ = NetEq::Create(config);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000260 NetEqNetworkStatistics stat;
261 ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
262 algorithmic_delay_ms_ = stat.current_buffer_size_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000263 ASSERT_TRUE(neteq_);
264 LoadDecoders();
265}
266
267void NetEqDecodingTest::TearDown() {
268 delete neteq_;
269 if (rtp_fp_)
270 fclose(rtp_fp_);
271}
272
273void NetEqDecodingTest::LoadDecoders() {
274 // Load PCMu.
275 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMu, 0));
276 // Load PCMa.
277 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMa, 8));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000278#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000279 // Load iLBC.
280 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderILBC, 102));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000281#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000282 // Load iSAC.
283 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, 103));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000284#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000285 // Load iSAC SWB.
286 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACswb, 104));
henrik.lundin@webrtc.orgac59dba2013-01-31 09:55:24 +0000287 // Load iSAC FB.
288 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACfb, 105));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000289#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000290 // Load PCM16B nb.
291 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16B, 93));
292 // Load PCM16B wb.
293 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb, 94));
294 // Load PCM16B swb32.
295 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bswb32kHz, 95));
296 // Load CNG 8 kHz.
297 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, 13));
298 // Load CNG 16 kHz.
299 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, 98));
300}
301
302void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
303 rtp_fp_ = fopen(rtp_file.c_str(), "rb");
304 ASSERT_TRUE(rtp_fp_ != NULL);
305 ASSERT_EQ(0, NETEQTEST_RTPpacket::skipFileHeader(rtp_fp_));
306}
307
308void NetEqDecodingTest::Process(NETEQTEST_RTPpacket* rtp, int* out_len) {
309 // Check if time to receive.
310 while ((sim_clock_ >= rtp->time()) &&
311 (rtp->dataLen() >= 0)) {
312 if (rtp->dataLen() > 0) {
313 WebRtcRTPHeader rtpInfo;
314 rtp->parseHeader(&rtpInfo);
315 ASSERT_EQ(0, neteq_->InsertPacket(
316 rtpInfo,
317 rtp->payload(),
318 rtp->payloadLen(),
319 rtp->time() * (output_sample_rate_ / 1000)));
320 }
321 // Get next packet.
322 ASSERT_NE(-1, rtp->readFromFile(rtp_fp_));
323 }
324
henrik.lundin@webrtc.orge1d468c2013-01-30 07:37:20 +0000325 // Get audio from NetEq.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000326 NetEqOutputType type;
327 int num_channels;
328 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, out_len,
329 &num_channels, &type));
330 ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
331 (*out_len == kBlockSize16kHz) ||
332 (*out_len == kBlockSize32kHz));
333 output_sample_rate_ = *out_len / 10 * 1000;
334
335 // Increase time.
336 sim_clock_ += kTimeStepMs;
337}
338
339void NetEqDecodingTest::DecodeAndCompare(const std::string &rtp_file,
340 const std::string &ref_file) {
341 OpenInputFile(rtp_file);
342
343 std::string ref_out_file = "";
344 if (ref_file.empty()) {
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000345 ref_out_file = webrtc::test::OutputPath() + "neteq_universal_ref.pcm";
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000346 }
347 RefFiles ref_files(ref_file, ref_out_file);
348
349 NETEQTEST_RTPpacket rtp;
350 ASSERT_GT(rtp.readFromFile(rtp_fp_), 0);
351 int i = 0;
352 while (rtp.dataLen() >= 0) {
353 std::ostringstream ss;
354 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
355 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000356 int out_len = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000357 ASSERT_NO_FATAL_FAILURE(Process(&rtp, &out_len));
358 ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
359 }
360}
361
362void NetEqDecodingTest::DecodeAndCheckStats(const std::string &rtp_file,
363 const std::string &stat_ref_file,
364 const std::string &rtcp_ref_file) {
365 OpenInputFile(rtp_file);
366 std::string stat_out_file = "";
367 if (stat_ref_file.empty()) {
368 stat_out_file = webrtc::test::OutputPath() +
369 "neteq_network_stats.dat";
370 }
371 RefFiles network_stat_files(stat_ref_file, stat_out_file);
372
373 std::string rtcp_out_file = "";
374 if (rtcp_ref_file.empty()) {
375 rtcp_out_file = webrtc::test::OutputPath() +
376 "neteq_rtcp_stats.dat";
377 }
378 RefFiles rtcp_stat_files(rtcp_ref_file, rtcp_out_file);
379
380 NETEQTEST_RTPpacket rtp;
381 ASSERT_GT(rtp.readFromFile(rtp_fp_), 0);
382 while (rtp.dataLen() >= 0) {
383 int out_len;
384 Process(&rtp, &out_len);
385
386 // Query the network statistics API once per second
387 if (sim_clock_ % 1000 == 0) {
388 // Process NetworkStatistics.
389 NetEqNetworkStatistics network_stats;
390 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
391 network_stat_files.ProcessReference(network_stats);
392
393 // Process RTCPstat.
394 RtcpStatistics rtcp_stats;
395 neteq_->GetRtcpStatistics(&rtcp_stats);
396 rtcp_stat_files.ProcessReference(rtcp_stats);
397 }
398 }
399}
400
401void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
402 int timestamp,
403 WebRtcRTPHeader* rtp_info) {
404 rtp_info->header.sequenceNumber = frame_index;
405 rtp_info->header.timestamp = timestamp;
406 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
407 rtp_info->header.payloadType = 94; // PCM16b WB codec.
408 rtp_info->header.markerBit = 0;
409}
410
411void NetEqDecodingTest::PopulateCng(int frame_index,
412 int timestamp,
413 WebRtcRTPHeader* rtp_info,
414 uint8_t* payload,
415 int* payload_len) {
416 rtp_info->header.sequenceNumber = frame_index;
417 rtp_info->header.timestamp = timestamp;
418 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
419 rtp_info->header.payloadType = 98; // WB CNG.
420 rtp_info->header.markerBit = 0;
421 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
422 *payload_len = 1; // Only noise level, no spectral parameters.
423}
424
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000425void NetEqDecodingTest::CheckBgnOff(int sampling_rate_hz,
426 NetEqBackgroundNoiseMode bgn_mode) {
427 int expected_samples_per_channel = 0;
428 uint8_t payload_type = 0xFF; // Invalid.
429 if (sampling_rate_hz == 8000) {
430 expected_samples_per_channel = kBlockSize8kHz;
431 payload_type = 93; // PCM 16, 8 kHz.
432 } else if (sampling_rate_hz == 16000) {
433 expected_samples_per_channel = kBlockSize16kHz;
434 payload_type = 94; // PCM 16, 16 kHZ.
435 } else if (sampling_rate_hz == 32000) {
436 expected_samples_per_channel = kBlockSize32kHz;
437 payload_type = 95; // PCM 16, 32 kHz.
438 } else {
439 ASSERT_TRUE(false); // Unsupported test case.
440 }
441
442 NetEqOutputType type;
443 int16_t output[kBlockSize32kHz]; // Maximum size is chosen.
444 int16_t input[kBlockSize32kHz]; // Maximum size is chosen.
445
446 // Payload of 10 ms of PCM16 32 kHz.
447 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
448
449 // Random payload.
450 for (int n = 0; n < expected_samples_per_channel; ++n) {
451 input[n] = (rand() & ((1 << 10) - 1)) - ((1 << 5) - 1);
452 }
453 int enc_len_bytes = WebRtcPcm16b_EncodeW16(
454 input, expected_samples_per_channel, reinterpret_cast<int16_t*>(payload));
455 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
456
457 WebRtcRTPHeader rtp_info;
458 PopulateRtpInfo(0, 0, &rtp_info);
459 rtp_info.header.payloadType = payload_type;
460
461 int number_channels = 0;
462 int samples_per_channel = 0;
463
464 uint32_t receive_timestamp = 0;
465 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
466 number_channels = 0;
467 samples_per_channel = 0;
468 ASSERT_EQ(0, neteq_->InsertPacket(
469 rtp_info, payload, enc_len_bytes, receive_timestamp));
470 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
471 &number_channels, &type));
472 ASSERT_EQ(1, number_channels);
473 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
474 ASSERT_EQ(kOutputNormal, type);
475
476 // Next packet.
477 rtp_info.header.timestamp += expected_samples_per_channel;
478 rtp_info.header.sequenceNumber++;
479 receive_timestamp += expected_samples_per_channel;
480 }
481
482 number_channels = 0;
483 samples_per_channel = 0;
484
485 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull one
486 // frame without checking speech-type. This is the first frame pulled without
487 // inserting any packet, and might not be labeled as PCL.
488 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
489 &number_channels, &type));
490 ASSERT_EQ(1, number_channels);
491 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
492
493 // To be able to test the fading of background noise we need at lease to pull
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000494 // 611 frames.
495 const int kFadingThreshold = 611;
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000496
497 // Test several CNG-to-PLC packet for the expected behavior. The number 20 is
498 // arbitrary, but sufficiently large to test enough number of frames.
499 const int kNumPlcToCngTestFrames = 20;
500 bool plc_to_cng = false;
501 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
502 number_channels = 0;
503 samples_per_channel = 0;
504 memset(output, 1, sizeof(output)); // Set to non-zero.
505 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
506 &number_channels, &type));
507 ASSERT_EQ(1, number_channels);
508 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
509 if (type == kOutputPLCtoCNG) {
510 plc_to_cng = true;
511 double sum_squared = 0;
512 for (int k = 0; k < number_channels * samples_per_channel; ++k)
513 sum_squared += output[k] * output[k];
514 if (bgn_mode == kBgnOn) {
515 EXPECT_NE(0, sum_squared);
516 } else if (bgn_mode == kBgnOff || n > kFadingThreshold) {
517 EXPECT_EQ(0, sum_squared);
518 }
519 } else {
520 EXPECT_EQ(kOutputPLC, type);
521 }
522 }
523 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
524}
525
kjellander@webrtc.org6eba2772013-06-04 05:46:37 +0000526#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
527// Disabled for Windows 64-bit until webrtc:1458 is fixed.
528#define MAYBE_TestBitExactness DISABLED_TestBitExactness
529#else
530#define MAYBE_TestBitExactness TestBitExactness
531#endif
532
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000533TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(MAYBE_TestBitExactness)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000534 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000535 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000536#if defined(_MSC_VER) && (_MSC_VER >= 1700)
537 // For Visual Studio 2012 and later, we will have to use the generic reference
538 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000539 const std::string input_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000540 "resources/audio_coding/neteq4_universal_ref.pcm";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000541#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000542 const std::string input_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000543 webrtc::test::ResourcePath("audio_coding/neteq4_universal_ref", "pcm");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000544#endif
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000545
546 if (FLAGS_gen_ref) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000547 DecodeAndCompare(input_rtp_file, "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000548 } else {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000549 DecodeAndCompare(input_rtp_file, input_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000550 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000551}
552
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000553TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestNetworkStatistics)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000554 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000555 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000556#if defined(_MSC_VER) && (_MSC_VER >= 1700)
557 // For Visual Studio 2012 and later, we will have to use the generic reference
558 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000559 const std::string network_stat_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000560 "resources/audio_coding/neteq4_network_stats.dat";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000561#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000562 const std::string network_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000563 webrtc::test::ResourcePath("audio_coding/neteq4_network_stats", "dat");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000564#endif
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000565 const std::string rtcp_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000566 webrtc::test::ResourcePath("audio_coding/neteq4_rtcp_stats", "dat");
567 if (FLAGS_gen_ref) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000568 DecodeAndCheckStats(input_rtp_file, "", "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000569 } else {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000570 DecodeAndCheckStats(input_rtp_file, network_stat_ref_file,
571 rtcp_stat_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000572 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000573}
574
575// TODO(hlundin): Re-enable test once the statistics interface is up and again.
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000576TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestFrameWaitingTimeStatistics)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000577 // Use fax mode to avoid time-scaling. This is to simplify the testing of
578 // packet waiting times in the packet buffer.
579 neteq_->SetPlayoutMode(kPlayoutFax);
580 ASSERT_EQ(kPlayoutFax, neteq_->PlayoutMode());
581 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
582 size_t num_frames = 30;
583 const int kSamples = 10 * 16;
584 const int kPayloadBytes = kSamples * 2;
585 for (size_t i = 0; i < num_frames; ++i) {
586 uint16_t payload[kSamples] = {0};
587 WebRtcRTPHeader rtp_info;
588 rtp_info.header.sequenceNumber = i;
589 rtp_info.header.timestamp = i * kSamples;
590 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC.
591 rtp_info.header.payloadType = 94; // PCM16b WB codec.
592 rtp_info.header.markerBit = 0;
593 ASSERT_EQ(0, neteq_->InsertPacket(
594 rtp_info,
595 reinterpret_cast<uint8_t*>(payload),
596 kPayloadBytes, 0));
597 }
598 // Pull out all data.
599 for (size_t i = 0; i < num_frames; ++i) {
600 int out_len;
601 int num_channels;
602 NetEqOutputType type;
603 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
604 &num_channels, &type));
605 ASSERT_EQ(kBlockSize16kHz, out_len);
606 }
607
608 std::vector<int> waiting_times;
609 neteq_->WaitingTimes(&waiting_times);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000610 EXPECT_EQ(num_frames, waiting_times.size());
611 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
612 // spacing (per definition), we expect the delay to increase with 10 ms for
613 // each packet.
614 for (size_t i = 0; i < waiting_times.size(); ++i) {
615 EXPECT_EQ(static_cast<int>(i + 1) * 10, waiting_times[i]);
616 }
617
618 // Check statistics again and make sure it's been reset.
619 neteq_->WaitingTimes(&waiting_times);
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000620 int len = waiting_times.size();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000621 EXPECT_EQ(0, len);
622
623 // Process > 100 frames, and make sure that that we get statistics
624 // only for 100 frames. Note the new SSRC, causing NetEQ to reset.
625 num_frames = 110;
626 for (size_t i = 0; i < num_frames; ++i) {
627 uint16_t payload[kSamples] = {0};
628 WebRtcRTPHeader rtp_info;
629 rtp_info.header.sequenceNumber = i;
630 rtp_info.header.timestamp = i * kSamples;
631 rtp_info.header.ssrc = 0x1235; // Just an arbitrary SSRC.
632 rtp_info.header.payloadType = 94; // PCM16b WB codec.
633 rtp_info.header.markerBit = 0;
634 ASSERT_EQ(0, neteq_->InsertPacket(
635 rtp_info,
636 reinterpret_cast<uint8_t*>(payload),
637 kPayloadBytes, 0));
638 int out_len;
639 int num_channels;
640 NetEqOutputType type;
641 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
642 &num_channels, &type));
643 ASSERT_EQ(kBlockSize16kHz, out_len);
644 }
645
646 neteq_->WaitingTimes(&waiting_times);
647 EXPECT_EQ(100u, waiting_times.size());
648}
649
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000650TEST_F(NetEqDecodingTest,
651 DISABLED_ON_ANDROID(TestAverageInterArrivalTimeNegative)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000652 const int kNumFrames = 3000; // Needed for convergence.
653 int frame_index = 0;
654 const int kSamples = 10 * 16;
655 const int kPayloadBytes = kSamples * 2;
656 while (frame_index < kNumFrames) {
657 // Insert one packet each time, except every 10th time where we insert two
658 // packets at once. This will create a negative clock-drift of approx. 10%.
659 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
660 for (int n = 0; n < num_packets; ++n) {
661 uint8_t payload[kPayloadBytes] = {0};
662 WebRtcRTPHeader rtp_info;
663 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
664 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
665 ++frame_index;
666 }
667
668 // Pull out data once.
669 int out_len;
670 int num_channels;
671 NetEqOutputType type;
672 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
673 &num_channels, &type));
674 ASSERT_EQ(kBlockSize16kHz, out_len);
675 }
676
677 NetEqNetworkStatistics network_stats;
678 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
679 EXPECT_EQ(-103196, network_stats.clockdrift_ppm);
680}
681
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000682TEST_F(NetEqDecodingTest,
683 DISABLED_ON_ANDROID(TestAverageInterArrivalTimePositive)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000684 const int kNumFrames = 5000; // Needed for convergence.
685 int frame_index = 0;
686 const int kSamples = 10 * 16;
687 const int kPayloadBytes = kSamples * 2;
688 for (int i = 0; i < kNumFrames; ++i) {
689 // Insert one packet each time, except every 10th time where we don't insert
690 // any packet. This will create a positive clock-drift of approx. 11%.
691 int num_packets = (i % 10 == 9 ? 0 : 1);
692 for (int n = 0; n < num_packets; ++n) {
693 uint8_t payload[kPayloadBytes] = {0};
694 WebRtcRTPHeader rtp_info;
695 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
696 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
697 ++frame_index;
698 }
699
700 // Pull out data once.
701 int out_len;
702 int num_channels;
703 NetEqOutputType type;
704 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
705 &num_channels, &type));
706 ASSERT_EQ(kBlockSize16kHz, out_len);
707 }
708
709 NetEqNetworkStatistics network_stats;
710 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
711 EXPECT_EQ(110946, network_stats.clockdrift_ppm);
712}
713
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000714void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
715 double network_freeze_ms,
716 bool pull_audio_during_freeze,
717 int delay_tolerance_ms,
718 int max_time_to_speech_ms) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000719 uint16_t seq_no = 0;
720 uint32_t timestamp = 0;
721 const int kFrameSizeMs = 30;
722 const int kSamples = kFrameSizeMs * 16;
723 const int kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000724 double next_input_time_ms = 0.0;
725 double t_ms;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000726 int out_len;
727 int num_channels;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000728 NetEqOutputType type;
729
730 // Insert speech for 5 seconds.
731 const int kSpeechDurationMs = 5000;
732 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
733 // Each turn in this for loop is 10 ms.
734 while (next_input_time_ms <= t_ms) {
735 // Insert one 30 ms speech frame.
736 uint8_t payload[kPayloadBytes] = {0};
737 WebRtcRTPHeader rtp_info;
738 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
739 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
740 ++seq_no;
741 timestamp += kSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000742 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000743 }
744 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000745 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
746 &num_channels, &type));
747 ASSERT_EQ(kBlockSize16kHz, out_len);
748 }
749
750 EXPECT_EQ(kOutputNormal, type);
751 int32_t delay_before = timestamp - neteq_->PlayoutTimestamp();
752
753 // Insert CNG for 1 minute (= 60000 ms).
754 const int kCngPeriodMs = 100;
755 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
756 const int kCngDurationMs = 60000;
757 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
758 // Each turn in this for loop is 10 ms.
759 while (next_input_time_ms <= t_ms) {
760 // Insert one CNG frame each 100 ms.
761 uint8_t payload[kPayloadBytes];
762 int payload_len;
763 WebRtcRTPHeader rtp_info;
764 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
765 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
766 ++seq_no;
767 timestamp += kCngPeriodSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000768 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000769 }
770 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000771 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
772 &num_channels, &type));
773 ASSERT_EQ(kBlockSize16kHz, out_len);
774 }
775
776 EXPECT_EQ(kOutputCNG, type);
777
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000778 if (network_freeze_ms > 0) {
779 // First keep pulling audio for |network_freeze_ms| without inserting
780 // any data, then insert CNG data corresponding to |network_freeze_ms|
781 // without pulling any output audio.
782 const double loop_end_time = t_ms + network_freeze_ms;
783 for (; t_ms < loop_end_time; t_ms += 10) {
784 // Pull out data once.
785 ASSERT_EQ(0,
786 neteq_->GetAudio(
787 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
788 ASSERT_EQ(kBlockSize16kHz, out_len);
789 EXPECT_EQ(kOutputCNG, type);
790 }
791 bool pull_once = pull_audio_during_freeze;
792 // If |pull_once| is true, GetAudio will be called once half-way through
793 // the network recovery period.
794 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
795 while (next_input_time_ms <= t_ms) {
796 if (pull_once && next_input_time_ms >= pull_time_ms) {
797 pull_once = false;
798 // Pull out data once.
799 ASSERT_EQ(
800 0,
801 neteq_->GetAudio(
802 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
803 ASSERT_EQ(kBlockSize16kHz, out_len);
804 EXPECT_EQ(kOutputCNG, type);
805 t_ms += 10;
806 }
807 // Insert one CNG frame each 100 ms.
808 uint8_t payload[kPayloadBytes];
809 int payload_len;
810 WebRtcRTPHeader rtp_info;
811 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
812 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
813 ++seq_no;
814 timestamp += kCngPeriodSamples;
815 next_input_time_ms += kCngPeriodMs * drift_factor;
816 }
817 }
818
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000819 // Insert speech again until output type is speech.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000820 double speech_restart_time_ms = t_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000821 while (type != kOutputNormal) {
822 // Each turn in this for loop is 10 ms.
823 while (next_input_time_ms <= t_ms) {
824 // Insert one 30 ms speech frame.
825 uint8_t payload[kPayloadBytes] = {0};
826 WebRtcRTPHeader rtp_info;
827 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
828 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
829 ++seq_no;
830 timestamp += kSamples;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000831 next_input_time_ms += kFrameSizeMs * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000832 }
833 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000834 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
835 &num_channels, &type));
836 ASSERT_EQ(kBlockSize16kHz, out_len);
837 // Increase clock.
838 t_ms += 10;
839 }
840
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000841 // Check that the speech starts again within reasonable time.
842 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
843 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000844 int32_t delay_after = timestamp - neteq_->PlayoutTimestamp();
845 // Compare delay before and after, and make sure it differs less than 20 ms.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000846 EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
847 EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000848}
849
henrik.lundin@webrtc.orged865b52014-03-06 10:28:07 +0000850TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithNegativeClockDrift)) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000851 // Apply a clock drift of -25 ms / s (sender faster than receiver).
852 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000853 const double kNetworkFreezeTimeMs = 0.0;
854 const bool kGetAudioDuringFreezeRecovery = false;
855 const int kDelayToleranceMs = 20;
856 const int kMaxTimeToSpeechMs = 100;
857 LongCngWithClockDrift(kDriftFactor,
858 kNetworkFreezeTimeMs,
859 kGetAudioDuringFreezeRecovery,
860 kDelayToleranceMs,
861 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000862}
863
henrik.lundin@webrtc.orged865b52014-03-06 10:28:07 +0000864TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithPositiveClockDrift)) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000865 // Apply a clock drift of +25 ms / s (sender slower than receiver).
866 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000867 const double kNetworkFreezeTimeMs = 0.0;
868 const bool kGetAudioDuringFreezeRecovery = false;
869 const int kDelayToleranceMs = 20;
870 const int kMaxTimeToSpeechMs = 100;
871 LongCngWithClockDrift(kDriftFactor,
872 kNetworkFreezeTimeMs,
873 kGetAudioDuringFreezeRecovery,
874 kDelayToleranceMs,
875 kMaxTimeToSpeechMs);
876}
877
878TEST_F(NetEqDecodingTest,
879 DISABLED_ON_ANDROID(LongCngWithNegativeClockDriftNetworkFreeze)) {
880 // Apply a clock drift of -25 ms / s (sender faster than receiver).
881 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
882 const double kNetworkFreezeTimeMs = 5000.0;
883 const bool kGetAudioDuringFreezeRecovery = false;
884 const int kDelayToleranceMs = 50;
885 const int kMaxTimeToSpeechMs = 200;
886 LongCngWithClockDrift(kDriftFactor,
887 kNetworkFreezeTimeMs,
888 kGetAudioDuringFreezeRecovery,
889 kDelayToleranceMs,
890 kMaxTimeToSpeechMs);
891}
892
893TEST_F(NetEqDecodingTest,
894 DISABLED_ON_ANDROID(LongCngWithPositiveClockDriftNetworkFreeze)) {
895 // Apply a clock drift of +25 ms / s (sender slower than receiver).
896 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
897 const double kNetworkFreezeTimeMs = 5000.0;
898 const bool kGetAudioDuringFreezeRecovery = false;
899 const int kDelayToleranceMs = 20;
900 const int kMaxTimeToSpeechMs = 100;
901 LongCngWithClockDrift(kDriftFactor,
902 kNetworkFreezeTimeMs,
903 kGetAudioDuringFreezeRecovery,
904 kDelayToleranceMs,
905 kMaxTimeToSpeechMs);
906}
907
908TEST_F(
909 NetEqDecodingTest,
910 DISABLED_ON_ANDROID(LongCngWithPositiveClockDriftNetworkFreezeExtraPull)) {
911 // Apply a clock drift of +25 ms / s (sender slower than receiver).
912 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
913 const double kNetworkFreezeTimeMs = 5000.0;
914 const bool kGetAudioDuringFreezeRecovery = true;
915 const int kDelayToleranceMs = 20;
916 const int kMaxTimeToSpeechMs = 100;
917 LongCngWithClockDrift(kDriftFactor,
918 kNetworkFreezeTimeMs,
919 kGetAudioDuringFreezeRecovery,
920 kDelayToleranceMs,
921 kMaxTimeToSpeechMs);
922}
923
924TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithoutClockDrift)) {
925 const double kDriftFactor = 1.0; // No drift.
926 const double kNetworkFreezeTimeMs = 0.0;
927 const bool kGetAudioDuringFreezeRecovery = false;
928 const int kDelayToleranceMs = 10;
929 const int kMaxTimeToSpeechMs = 50;
930 LongCngWithClockDrift(kDriftFactor,
931 kNetworkFreezeTimeMs,
932 kGetAudioDuringFreezeRecovery,
933 kDelayToleranceMs,
934 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000935}
936
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000937TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(UnknownPayloadType)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000938 const int kPayloadBytes = 100;
939 uint8_t payload[kPayloadBytes] = {0};
940 WebRtcRTPHeader rtp_info;
941 PopulateRtpInfo(0, 0, &rtp_info);
942 rtp_info.header.payloadType = 1; // Not registered as a decoder.
943 EXPECT_EQ(NetEq::kFail,
944 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
945 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
946}
947
minyue@webrtc.org7bb54362013-08-06 05:40:57 +0000948TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(OversizePacket)) {
949 // Payload size is greater than packet buffer size
950 const int kPayloadBytes = NetEq::kMaxBytesInBuffer + 1;
951 uint8_t payload[kPayloadBytes] = {0};
952 WebRtcRTPHeader rtp_info;
953 PopulateRtpInfo(0, 0, &rtp_info);
954 rtp_info.header.payloadType = 103; // iSAC, no packet splitting.
955 EXPECT_EQ(NetEq::kFail,
956 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
957 EXPECT_EQ(NetEq::kOversizePacket, neteq_->LastError());
958}
959
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000960TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(DecoderError)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000961 const int kPayloadBytes = 100;
962 uint8_t payload[kPayloadBytes] = {0};
963 WebRtcRTPHeader rtp_info;
964 PopulateRtpInfo(0, 0, &rtp_info);
965 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
966 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
967 NetEqOutputType type;
968 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
969 // to GetAudio.
970 for (int i = 0; i < kMaxBlockSize; ++i) {
971 out_data_[i] = 1;
972 }
973 int num_channels;
974 int samples_per_channel;
975 EXPECT_EQ(NetEq::kFail,
976 neteq_->GetAudio(kMaxBlockSize, out_data_,
977 &samples_per_channel, &num_channels, &type));
978 // Verify that there is a decoder error to check.
979 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
980 // Code 6730 is an iSAC error code.
981 EXPECT_EQ(6730, neteq_->LastDecoderError());
982 // Verify that the first 160 samples are set to 0, and that the remaining
983 // samples are left unmodified.
984 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
985 for (int i = 0; i < kExpectedOutputLength; ++i) {
986 std::ostringstream ss;
987 ss << "i = " << i;
988 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
989 EXPECT_EQ(0, out_data_[i]);
990 }
991 for (int i = kExpectedOutputLength; i < kMaxBlockSize; ++i) {
992 std::ostringstream ss;
993 ss << "i = " << i;
994 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
995 EXPECT_EQ(1, out_data_[i]);
996 }
997}
998
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000999TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(GetAudioBeforeInsertPacket)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001000 NetEqOutputType type;
1001 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
1002 // to GetAudio.
1003 for (int i = 0; i < kMaxBlockSize; ++i) {
1004 out_data_[i] = 1;
1005 }
1006 int num_channels;
1007 int samples_per_channel;
1008 EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
1009 &samples_per_channel,
1010 &num_channels, &type));
1011 // Verify that the first block of samples is set to 0.
1012 static const int kExpectedOutputLength =
1013 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
1014 for (int i = 0; i < kExpectedOutputLength; ++i) {
1015 std::ostringstream ss;
1016 ss << "i = " << i;
1017 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
1018 EXPECT_EQ(0, out_data_[i]);
1019 }
1020}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001021
turaj@webrtc.org3fdeddb2013-09-25 22:19:22 +00001022TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(BackgroundNoise)) {
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001023 neteq_->SetBackgroundNoiseMode(kBgnOn);
1024 CheckBgnOff(8000, kBgnOn);
1025 CheckBgnOff(16000, kBgnOn);
1026 CheckBgnOff(32000, kBgnOn);
1027 EXPECT_EQ(kBgnOn, neteq_->BackgroundNoiseMode());
1028
1029 neteq_->SetBackgroundNoiseMode(kBgnOff);
1030 CheckBgnOff(8000, kBgnOff);
1031 CheckBgnOff(16000, kBgnOff);
1032 CheckBgnOff(32000, kBgnOff);
1033 EXPECT_EQ(kBgnOff, neteq_->BackgroundNoiseMode());
1034
1035 neteq_->SetBackgroundNoiseMode(kBgnFade);
1036 CheckBgnOff(8000, kBgnFade);
1037 CheckBgnOff(16000, kBgnFade);
1038 CheckBgnOff(32000, kBgnFade);
1039 EXPECT_EQ(kBgnFade, neteq_->BackgroundNoiseMode());
1040}
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001041
1042TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(SyncPacketInsert)) {
1043 WebRtcRTPHeader rtp_info;
1044 uint32_t receive_timestamp = 0;
1045 // For the readability use the following payloads instead of the defaults of
1046 // this test.
1047 uint8_t kPcm16WbPayloadType = 1;
1048 uint8_t kCngNbPayloadType = 2;
1049 uint8_t kCngWbPayloadType = 3;
1050 uint8_t kCngSwb32PayloadType = 4;
1051 uint8_t kCngSwb48PayloadType = 5;
1052 uint8_t kAvtPayloadType = 6;
1053 uint8_t kRedPayloadType = 7;
1054 uint8_t kIsacPayloadType = 9; // Payload type 8 is already registered.
1055
1056 // Register decoders.
1057 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb,
1058 kPcm16WbPayloadType));
1059 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, kCngNbPayloadType));
1060 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, kCngWbPayloadType));
1061 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb32kHz,
1062 kCngSwb32PayloadType));
1063 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb48kHz,
1064 kCngSwb48PayloadType));
1065 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderAVT, kAvtPayloadType));
1066 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderRED, kRedPayloadType));
1067 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, kIsacPayloadType));
1068
1069 PopulateRtpInfo(0, 0, &rtp_info);
1070 rtp_info.header.payloadType = kPcm16WbPayloadType;
1071
1072 // The first packet injected cannot be sync-packet.
1073 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1074
1075 // Payload length of 10 ms PCM16 16 kHz.
1076 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1077 uint8_t payload[kPayloadBytes] = {0};
1078 ASSERT_EQ(0, neteq_->InsertPacket(
1079 rtp_info, payload, kPayloadBytes, receive_timestamp));
1080
1081 // Next packet. Last packet contained 10 ms audio.
1082 rtp_info.header.sequenceNumber++;
1083 rtp_info.header.timestamp += kBlockSize16kHz;
1084 receive_timestamp += kBlockSize16kHz;
1085
1086 // Unacceptable payload types CNG, AVT (DTMF), RED.
1087 rtp_info.header.payloadType = kCngNbPayloadType;
1088 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1089
1090 rtp_info.header.payloadType = kCngWbPayloadType;
1091 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1092
1093 rtp_info.header.payloadType = kCngSwb32PayloadType;
1094 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1095
1096 rtp_info.header.payloadType = kCngSwb48PayloadType;
1097 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1098
1099 rtp_info.header.payloadType = kAvtPayloadType;
1100 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1101
1102 rtp_info.header.payloadType = kRedPayloadType;
1103 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1104
1105 // Change of codec cannot be initiated with a sync packet.
1106 rtp_info.header.payloadType = kIsacPayloadType;
1107 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1108
1109 // Change of SSRC is not allowed with a sync packet.
1110 rtp_info.header.payloadType = kPcm16WbPayloadType;
1111 ++rtp_info.header.ssrc;
1112 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1113
1114 --rtp_info.header.ssrc;
1115 EXPECT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1116}
1117
1118// First insert several noise like packets, then sync-packets. Decoding all
1119// packets should not produce error, statistics should not show any packet loss
1120// and sync-packets should decode to zero.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001121// TODO(turajs) we will have a better test if we have a referece NetEq, and
1122// when Sync packets are inserted in "test" NetEq we insert all-zero payload
1123// in reference NetEq and compare the output of those two.
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001124TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(SyncPacketDecode)) {
1125 WebRtcRTPHeader rtp_info;
1126 PopulateRtpInfo(0, 0, &rtp_info);
1127 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1128 uint8_t payload[kPayloadBytes];
1129 int16_t decoded[kBlockSize16kHz];
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001130 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001131 for (int n = 0; n < kPayloadBytes; ++n) {
1132 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1133 }
1134 // Insert some packets which decode to noise. We are not interested in
1135 // actual decoded values.
1136 NetEqOutputType output_type;
1137 int num_channels;
1138 int samples_per_channel;
1139 uint32_t receive_timestamp = 0;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001140 for (int n = 0; n < 100; ++n) {
1141 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1142 receive_timestamp));
1143 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1144 &samples_per_channel, &num_channels,
1145 &output_type));
1146 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1147 ASSERT_EQ(1, num_channels);
1148
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001149 rtp_info.header.sequenceNumber++;
1150 rtp_info.header.timestamp += kBlockSize16kHz;
1151 receive_timestamp += kBlockSize16kHz;
1152 }
1153 const int kNumSyncPackets = 10;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001154
1155 // Make sure sufficient number of sync packets are inserted that we can
1156 // conduct a test.
1157 ASSERT_GT(kNumSyncPackets, algorithmic_frame_delay);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001158 // Insert sync-packets, the decoded sequence should be all-zero.
1159 for (int n = 0; n < kNumSyncPackets; ++n) {
1160 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1161 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1162 &samples_per_channel, &num_channels,
1163 &output_type));
1164 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1165 ASSERT_EQ(1, num_channels);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001166 if (n > algorithmic_frame_delay) {
1167 EXPECT_TRUE(IsAllZero(decoded, samples_per_channel * num_channels));
1168 }
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001169 rtp_info.header.sequenceNumber++;
1170 rtp_info.header.timestamp += kBlockSize16kHz;
1171 receive_timestamp += kBlockSize16kHz;
1172 }
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001173
1174 // We insert regular packets, if sync packet are not correctly buffered then
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001175 // network statistics would show some packet loss.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001176 for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) {
1177 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1178 receive_timestamp));
1179 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1180 &samples_per_channel, &num_channels,
1181 &output_type));
1182 if (n >= algorithmic_frame_delay + 1) {
1183 // Expect that this frame contain samples from regular RTP.
1184 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1185 }
1186 rtp_info.header.sequenceNumber++;
1187 rtp_info.header.timestamp += kBlockSize16kHz;
1188 receive_timestamp += kBlockSize16kHz;
1189 }
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001190 NetEqNetworkStatistics network_stats;
1191 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1192 // Expecting a "clean" network.
1193 EXPECT_EQ(0, network_stats.packet_loss_rate);
1194 EXPECT_EQ(0, network_stats.expand_rate);
1195 EXPECT_EQ(0, network_stats.accelerate_rate);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001196 EXPECT_LE(network_stats.preemptive_rate, 150);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001197}
1198
1199// Test if the size of the packet buffer reported correctly when containing
1200// sync packets. Also, test if network packets override sync packets. That is to
1201// prefer decoding a network packet to a sync packet, if both have same sequence
1202// number and timestamp.
1203TEST_F(NetEqDecodingTest,
1204 DISABLED_ON_ANDROID(SyncPacketBufferSizeAndOverridenByNetworkPackets)) {
1205 WebRtcRTPHeader rtp_info;
1206 PopulateRtpInfo(0, 0, &rtp_info);
1207 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1208 uint8_t payload[kPayloadBytes];
1209 int16_t decoded[kBlockSize16kHz];
1210 for (int n = 0; n < kPayloadBytes; ++n) {
1211 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1212 }
1213 // Insert some packets which decode to noise. We are not interested in
1214 // actual decoded values.
1215 NetEqOutputType output_type;
1216 int num_channels;
1217 int samples_per_channel;
1218 uint32_t receive_timestamp = 0;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001219 int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
1220 for (int n = 0; n < algorithmic_frame_delay; ++n) {
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001221 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1222 receive_timestamp));
1223 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1224 &samples_per_channel, &num_channels,
1225 &output_type));
1226 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1227 ASSERT_EQ(1, num_channels);
1228 rtp_info.header.sequenceNumber++;
1229 rtp_info.header.timestamp += kBlockSize16kHz;
1230 receive_timestamp += kBlockSize16kHz;
1231 }
1232 const int kNumSyncPackets = 10;
1233
1234 WebRtcRTPHeader first_sync_packet_rtp_info;
1235 memcpy(&first_sync_packet_rtp_info, &rtp_info, sizeof(rtp_info));
1236
1237 // Insert sync-packets, but no decoding.
1238 for (int n = 0; n < kNumSyncPackets; ++n) {
1239 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1240 rtp_info.header.sequenceNumber++;
1241 rtp_info.header.timestamp += kBlockSize16kHz;
1242 receive_timestamp += kBlockSize16kHz;
1243 }
1244 NetEqNetworkStatistics network_stats;
1245 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001246 EXPECT_EQ(kNumSyncPackets * 10 + algorithmic_delay_ms_,
1247 network_stats.current_buffer_size_ms);
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001248
1249 // Rewind |rtp_info| to that of the first sync packet.
1250 memcpy(&rtp_info, &first_sync_packet_rtp_info, sizeof(rtp_info));
1251
1252 // Insert.
1253 for (int n = 0; n < kNumSyncPackets; ++n) {
1254 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1255 receive_timestamp));
1256 rtp_info.header.sequenceNumber++;
1257 rtp_info.header.timestamp += kBlockSize16kHz;
1258 receive_timestamp += kBlockSize16kHz;
1259 }
1260
1261 // Decode.
1262 for (int n = 0; n < kNumSyncPackets; ++n) {
1263 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1264 &samples_per_channel, &num_channels,
1265 &output_type));
1266 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1267 ASSERT_EQ(1, num_channels);
1268 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1269 }
1270}
1271
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001272void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1273 uint32_t start_timestamp,
1274 const std::set<uint16_t>& drop_seq_numbers,
1275 bool expect_seq_no_wrap,
1276 bool expect_timestamp_wrap) {
1277 uint16_t seq_no = start_seq_no;
1278 uint32_t timestamp = start_timestamp;
1279 const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
1280 const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
1281 const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
1282 const int kPayloadBytes = kSamples * sizeof(int16_t);
1283 double next_input_time_ms = 0.0;
1284 int16_t decoded[kBlockSize16kHz];
1285 int num_channels;
1286 int samples_per_channel;
1287 NetEqOutputType output_type;
1288 uint32_t receive_timestamp = 0;
1289
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001290 // Insert speech for 2 seconds.
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001291 const int kSpeechDurationMs = 2000;
1292 int packets_inserted = 0;
1293 uint16_t last_seq_no;
1294 uint32_t last_timestamp;
1295 bool timestamp_wrapped = false;
1296 bool seq_no_wrapped = false;
1297 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1298 // Each turn in this for loop is 10 ms.
1299 while (next_input_time_ms <= t_ms) {
1300 // Insert one 30 ms speech frame.
1301 uint8_t payload[kPayloadBytes] = {0};
1302 WebRtcRTPHeader rtp_info;
1303 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1304 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1305 // This sequence number was not in the set to drop. Insert it.
1306 ASSERT_EQ(0,
1307 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1308 receive_timestamp));
1309 ++packets_inserted;
1310 }
1311 NetEqNetworkStatistics network_stats;
1312 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1313
1314 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1315 // packet size for first few packets. Therefore we refrain from checking
1316 // the criteria.
1317 if (packets_inserted > 4) {
1318 // Expect preferred and actual buffer size to be no more than 2 frames.
1319 EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001320 EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
1321 algorithmic_delay_ms_);
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001322 }
1323 last_seq_no = seq_no;
1324 last_timestamp = timestamp;
1325
1326 ++seq_no;
1327 timestamp += kSamples;
1328 receive_timestamp += kSamples;
1329 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1330
1331 seq_no_wrapped |= seq_no < last_seq_no;
1332 timestamp_wrapped |= timestamp < last_timestamp;
1333 }
1334 // Pull out data once.
1335 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1336 &samples_per_channel, &num_channels,
1337 &output_type));
1338 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1339 ASSERT_EQ(1, num_channels);
1340
1341 // Expect delay (in samples) to be less than 2 packets.
1342 EXPECT_LE(timestamp - neteq_->PlayoutTimestamp(),
1343 static_cast<uint32_t>(kSamples * 2));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001344 }
1345 // Make sure we have actually tested wrap-around.
1346 ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
1347 ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
1348}
1349
1350TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
1351 // Start with a sequence number that will soon wrap.
1352 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
1353 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1354}
1355
1356TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
1357 // Start with a sequence number that will soon wrap.
1358 std::set<uint16_t> drop_seq_numbers;
1359 drop_seq_numbers.insert(0xFFFF);
1360 drop_seq_numbers.insert(0x0);
1361 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1362}
1363
1364TEST_F(NetEqDecodingTest, TimestampWrap) {
1365 // Start with a timestamp that will soon wrap.
1366 std::set<uint16_t> drop_seq_numbers;
1367 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
1368}
1369
1370TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
1371 // Start with a timestamp and a sequence number that will wrap at the same
1372 // time.
1373 std::set<uint16_t> drop_seq_numbers;
1374 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
1375}
1376
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001377void NetEqDecodingTest::DuplicateCng() {
1378 uint16_t seq_no = 0;
1379 uint32_t timestamp = 0;
1380 const int kFrameSizeMs = 10;
1381 const int kSampleRateKhz = 16;
1382 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1383 const int kPayloadBytes = kSamples * 2;
1384
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001385 const int algorithmic_delay_samples = std::max(
1386 algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001387 // Insert three speech packet. Three are needed to get the frame length
1388 // correct.
1389 int out_len;
1390 int num_channels;
1391 NetEqOutputType type;
1392 uint8_t payload[kPayloadBytes] = {0};
1393 WebRtcRTPHeader rtp_info;
1394 for (int i = 0; i < 3; ++i) {
1395 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1396 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1397 ++seq_no;
1398 timestamp += kSamples;
1399
1400 // Pull audio once.
1401 ASSERT_EQ(0,
1402 neteq_->GetAudio(
1403 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1404 ASSERT_EQ(kBlockSize16kHz, out_len);
1405 }
1406 // Verify speech output.
1407 EXPECT_EQ(kOutputNormal, type);
1408
1409 // Insert same CNG packet twice.
1410 const int kCngPeriodMs = 100;
1411 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1412 int payload_len;
1413 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1414 // This is the first time this CNG packet is inserted.
1415 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1416
1417 // Pull audio once and make sure CNG is played.
1418 ASSERT_EQ(0,
1419 neteq_->GetAudio(
1420 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1421 ASSERT_EQ(kBlockSize16kHz, out_len);
1422 EXPECT_EQ(kOutputCNG, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001423 EXPECT_EQ(timestamp - algorithmic_delay_samples, neteq_->PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001424
1425 // Insert the same CNG packet again. Note that at this point it is old, since
1426 // we have already decoded the first copy of it.
1427 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1428
1429 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1430 // we have already pulled out CNG once.
1431 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
1432 ASSERT_EQ(0,
1433 neteq_->GetAudio(
1434 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1435 ASSERT_EQ(kBlockSize16kHz, out_len);
1436 EXPECT_EQ(kOutputCNG, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001437 EXPECT_EQ(timestamp - algorithmic_delay_samples,
1438 neteq_->PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001439 }
1440
1441 // Insert speech again.
1442 ++seq_no;
1443 timestamp += kCngPeriodSamples;
1444 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1445 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1446
1447 // Pull audio once and verify that the output is speech again.
1448 ASSERT_EQ(0,
1449 neteq_->GetAudio(
1450 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1451 ASSERT_EQ(kBlockSize16kHz, out_len);
1452 EXPECT_EQ(kOutputNormal, type);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +00001453 EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
1454 neteq_->PlayoutTimestamp());
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001455}
1456
1457TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001458} // namespace webrtc