blob: 365e233e699e4862d553fd00f5532b4f7e8d1b62 [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11/*
12 * This file includes unit tests for NetEQ.
13 */
14
15#include "webrtc/modules/audio_coding/neteq4/interface/neteq.h"
16
pbos@webrtc.org3ecc1622014-03-07 15:23:34 +000017#include <math.h>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000018#include <stdlib.h>
19#include <string.h> // memset
20
turaj@webrtc.org78b41a02013-11-22 20:27:07 +000021#include <set>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000022#include <string>
23#include <vector>
24
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000025#include "gflags/gflags.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000026#include "gtest/gtest.h"
27#include "webrtc/modules/audio_coding/neteq4/test/NETEQTEST_RTPpacket.h"
turaj@webrtc.orgff43c852013-09-25 00:07:27 +000028#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000029#include "webrtc/test/testsupport/fileutils.h"
henrike@webrtc.orga950300b2013-07-08 18:53:54 +000030#include "webrtc/test/testsupport/gtest_disable.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000031#include "webrtc/typedefs.h"
32
turaj@webrtc.orga6101d72013-10-01 22:01:09 +000033DEFINE_bool(gen_ref, false, "Generate reference files.");
34
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000035namespace webrtc {
36
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +000037static bool IsAllZero(const int16_t* buf, int buf_length) {
38 bool all_zero = true;
39 for (int n = 0; n < buf_length && all_zero; ++n)
40 all_zero = buf[n] == 0;
41 return all_zero;
42}
43
44static bool IsAllNonZero(const int16_t* buf, int buf_length) {
45 bool all_non_zero = true;
46 for (int n = 0; n < buf_length && all_non_zero; ++n)
47 all_non_zero = buf[n] != 0;
48 return all_non_zero;
49}
50
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000051class RefFiles {
52 public:
53 RefFiles(const std::string& input_file, const std::string& output_file);
54 ~RefFiles();
55 template<class T> void ProcessReference(const T& test_results);
56 template<typename T, size_t n> void ProcessReference(
57 const T (&test_results)[n],
58 size_t length);
59 template<typename T, size_t n> void WriteToFile(
60 const T (&test_results)[n],
61 size_t length);
62 template<typename T, size_t n> void ReadFromFileAndCompare(
63 const T (&test_results)[n],
64 size_t length);
65 void WriteToFile(const NetEqNetworkStatistics& stats);
66 void ReadFromFileAndCompare(const NetEqNetworkStatistics& stats);
67 void WriteToFile(const RtcpStatistics& stats);
68 void ReadFromFileAndCompare(const RtcpStatistics& stats);
69
70 FILE* input_fp_;
71 FILE* output_fp_;
72};
73
74RefFiles::RefFiles(const std::string &input_file,
75 const std::string &output_file)
76 : input_fp_(NULL),
77 output_fp_(NULL) {
78 if (!input_file.empty()) {
79 input_fp_ = fopen(input_file.c_str(), "rb");
80 EXPECT_TRUE(input_fp_ != NULL);
81 }
82 if (!output_file.empty()) {
83 output_fp_ = fopen(output_file.c_str(), "wb");
84 EXPECT_TRUE(output_fp_ != NULL);
85 }
86}
87
88RefFiles::~RefFiles() {
89 if (input_fp_) {
90 EXPECT_EQ(EOF, fgetc(input_fp_)); // Make sure that we reached the end.
91 fclose(input_fp_);
92 }
93 if (output_fp_) fclose(output_fp_);
94}
95
96template<class T>
97void RefFiles::ProcessReference(const T& test_results) {
98 WriteToFile(test_results);
99 ReadFromFileAndCompare(test_results);
100}
101
102template<typename T, size_t n>
103void RefFiles::ProcessReference(const T (&test_results)[n], size_t length) {
104 WriteToFile(test_results, length);
105 ReadFromFileAndCompare(test_results, length);
106}
107
108template<typename T, size_t n>
109void RefFiles::WriteToFile(const T (&test_results)[n], size_t length) {
110 if (output_fp_) {
111 ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_));
112 }
113}
114
115template<typename T, size_t n>
116void RefFiles::ReadFromFileAndCompare(const T (&test_results)[n],
117 size_t length) {
118 if (input_fp_) {
119 // Read from ref file.
120 T* ref = new T[length];
121 ASSERT_EQ(length, fread(ref, sizeof(T), length, input_fp_));
122 // Compare
123 ASSERT_EQ(0, memcmp(&test_results, ref, sizeof(T) * length));
124 delete [] ref;
125 }
126}
127
128void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats) {
129 if (output_fp_) {
130 ASSERT_EQ(1u, fwrite(&stats, sizeof(NetEqNetworkStatistics), 1,
131 output_fp_));
132 }
133}
134
135void RefFiles::ReadFromFileAndCompare(
136 const NetEqNetworkStatistics& stats) {
137 if (input_fp_) {
138 // Read from ref file.
139 size_t stat_size = sizeof(NetEqNetworkStatistics);
140 NetEqNetworkStatistics ref_stats;
141 ASSERT_EQ(1u, fread(&ref_stats, stat_size, 1, input_fp_));
142 // Compare
143 EXPECT_EQ(0, memcmp(&stats, &ref_stats, stat_size));
144 }
145}
146
147void RefFiles::WriteToFile(const RtcpStatistics& stats) {
148 if (output_fp_) {
149 ASSERT_EQ(1u, fwrite(&(stats.fraction_lost), sizeof(stats.fraction_lost), 1,
150 output_fp_));
151 ASSERT_EQ(1u, fwrite(&(stats.cumulative_lost),
152 sizeof(stats.cumulative_lost), 1, output_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000153 ASSERT_EQ(1u, fwrite(&(stats.extended_max_sequence_number),
154 sizeof(stats.extended_max_sequence_number), 1,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000155 output_fp_));
156 ASSERT_EQ(1u, fwrite(&(stats.jitter), sizeof(stats.jitter), 1,
157 output_fp_));
158 }
159}
160
161void RefFiles::ReadFromFileAndCompare(
162 const RtcpStatistics& stats) {
163 if (input_fp_) {
164 // Read from ref file.
165 RtcpStatistics ref_stats;
166 ASSERT_EQ(1u, fread(&(ref_stats.fraction_lost),
167 sizeof(ref_stats.fraction_lost), 1, input_fp_));
168 ASSERT_EQ(1u, fread(&(ref_stats.cumulative_lost),
169 sizeof(ref_stats.cumulative_lost), 1, input_fp_));
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000170 ASSERT_EQ(1u, fread(&(ref_stats.extended_max_sequence_number),
171 sizeof(ref_stats.extended_max_sequence_number), 1,
172 input_fp_));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000173 ASSERT_EQ(1u, fread(&(ref_stats.jitter), sizeof(ref_stats.jitter), 1,
174 input_fp_));
175 // Compare
176 EXPECT_EQ(ref_stats.fraction_lost, stats.fraction_lost);
177 EXPECT_EQ(ref_stats.cumulative_lost, stats.cumulative_lost);
sprang@webrtc.orgfe5d36b2013-10-28 09:21:07 +0000178 EXPECT_EQ(ref_stats.extended_max_sequence_number,
179 stats.extended_max_sequence_number);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000180 EXPECT_EQ(ref_stats.jitter, stats.jitter);
181 }
182}
183
184class NetEqDecodingTest : public ::testing::Test {
185 protected:
186 // NetEQ must be polled for data once every 10 ms. Thus, neither of the
187 // constants below can be changed.
188 static const int kTimeStepMs = 10;
189 static const int kBlockSize8kHz = kTimeStepMs * 8;
190 static const int kBlockSize16kHz = kTimeStepMs * 16;
191 static const int kBlockSize32kHz = kTimeStepMs * 32;
192 static const int kMaxBlockSize = kBlockSize32kHz;
193 static const int kInitSampleRateHz = 8000;
194
195 NetEqDecodingTest();
196 virtual void SetUp();
197 virtual void TearDown();
198 void SelectDecoders(NetEqDecoder* used_codec);
199 void LoadDecoders();
200 void OpenInputFile(const std::string &rtp_file);
201 void Process(NETEQTEST_RTPpacket* rtp_ptr, int* out_len);
202 void DecodeAndCompare(const std::string &rtp_file,
203 const std::string &ref_file);
204 void DecodeAndCheckStats(const std::string &rtp_file,
205 const std::string &stat_ref_file,
206 const std::string &rtcp_ref_file);
207 static void PopulateRtpInfo(int frame_index,
208 int timestamp,
209 WebRtcRTPHeader* rtp_info);
210 static void PopulateCng(int frame_index,
211 int timestamp,
212 WebRtcRTPHeader* rtp_info,
213 uint8_t* payload,
214 int* payload_len);
215
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000216 void CheckBgnOff(int sampling_rate, NetEqBackgroundNoiseMode bgn_mode);
217
turaj@webrtc.org78b41a02013-11-22 20:27:07 +0000218 void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
219 const std::set<uint16_t>& drop_seq_numbers,
220 bool expect_seq_no_wrap, bool expect_timestamp_wrap);
221
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000222 void LongCngWithClockDrift(double drift_factor,
223 double network_freeze_ms,
224 bool pull_audio_during_freeze,
225 int delay_tolerance_ms,
226 int max_time_to_speech_ms);
227
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +0000228 void DuplicateCng();
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000229
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000230 NetEq* neteq_;
231 FILE* rtp_fp_;
232 unsigned int sim_clock_;
233 int16_t out_data_[kMaxBlockSize];
234 int output_sample_rate_;
235};
236
237// Allocating the static const so that it can be passed by reference.
238const int NetEqDecodingTest::kTimeStepMs;
239const int NetEqDecodingTest::kBlockSize8kHz;
240const int NetEqDecodingTest::kBlockSize16kHz;
241const int NetEqDecodingTest::kBlockSize32kHz;
242const int NetEqDecodingTest::kMaxBlockSize;
243const int NetEqDecodingTest::kInitSampleRateHz;
244
245NetEqDecodingTest::NetEqDecodingTest()
246 : neteq_(NULL),
247 rtp_fp_(NULL),
248 sim_clock_(0),
249 output_sample_rate_(kInitSampleRateHz) {
250 memset(out_data_, 0, sizeof(out_data_));
251}
252
253void NetEqDecodingTest::SetUp() {
254 neteq_ = NetEq::Create(kInitSampleRateHz);
255 ASSERT_TRUE(neteq_);
256 LoadDecoders();
257}
258
259void NetEqDecodingTest::TearDown() {
260 delete neteq_;
261 if (rtp_fp_)
262 fclose(rtp_fp_);
263}
264
265void NetEqDecodingTest::LoadDecoders() {
266 // Load PCMu.
267 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMu, 0));
268 // Load PCMa.
269 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMa, 8));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000270#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000271 // Load iLBC.
272 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderILBC, 102));
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000273#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000274 // Load iSAC.
275 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, 103));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000276#ifndef WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000277 // Load iSAC SWB.
278 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACswb, 104));
henrik.lundin@webrtc.orgac59dba2013-01-31 09:55:24 +0000279 // Load iSAC FB.
280 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACfb, 105));
turaj@webrtc.org5272eb82013-11-23 00:11:32 +0000281#endif // WEBRTC_ANDROID
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000282 // Load PCM16B nb.
283 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16B, 93));
284 // Load PCM16B wb.
285 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb, 94));
286 // Load PCM16B swb32.
287 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bswb32kHz, 95));
288 // Load CNG 8 kHz.
289 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, 13));
290 // Load CNG 16 kHz.
291 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, 98));
292}
293
294void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
295 rtp_fp_ = fopen(rtp_file.c_str(), "rb");
296 ASSERT_TRUE(rtp_fp_ != NULL);
297 ASSERT_EQ(0, NETEQTEST_RTPpacket::skipFileHeader(rtp_fp_));
298}
299
300void NetEqDecodingTest::Process(NETEQTEST_RTPpacket* rtp, int* out_len) {
301 // Check if time to receive.
302 while ((sim_clock_ >= rtp->time()) &&
303 (rtp->dataLen() >= 0)) {
304 if (rtp->dataLen() > 0) {
305 WebRtcRTPHeader rtpInfo;
306 rtp->parseHeader(&rtpInfo);
307 ASSERT_EQ(0, neteq_->InsertPacket(
308 rtpInfo,
309 rtp->payload(),
310 rtp->payloadLen(),
311 rtp->time() * (output_sample_rate_ / 1000)));
312 }
313 // Get next packet.
314 ASSERT_NE(-1, rtp->readFromFile(rtp_fp_));
315 }
316
henrik.lundin@webrtc.orge1d468c2013-01-30 07:37:20 +0000317 // Get audio from NetEq.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000318 NetEqOutputType type;
319 int num_channels;
320 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, out_len,
321 &num_channels, &type));
322 ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
323 (*out_len == kBlockSize16kHz) ||
324 (*out_len == kBlockSize32kHz));
325 output_sample_rate_ = *out_len / 10 * 1000;
326
327 // Increase time.
328 sim_clock_ += kTimeStepMs;
329}
330
331void NetEqDecodingTest::DecodeAndCompare(const std::string &rtp_file,
332 const std::string &ref_file) {
333 OpenInputFile(rtp_file);
334
335 std::string ref_out_file = "";
336 if (ref_file.empty()) {
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000337 ref_out_file = webrtc::test::OutputPath() + "neteq_universal_ref.pcm";
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000338 }
339 RefFiles ref_files(ref_file, ref_out_file);
340
341 NETEQTEST_RTPpacket rtp;
342 ASSERT_GT(rtp.readFromFile(rtp_fp_), 0);
343 int i = 0;
344 while (rtp.dataLen() >= 0) {
345 std::ostringstream ss;
346 ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
347 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000348 int out_len = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000349 ASSERT_NO_FATAL_FAILURE(Process(&rtp, &out_len));
350 ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
351 }
352}
353
354void NetEqDecodingTest::DecodeAndCheckStats(const std::string &rtp_file,
355 const std::string &stat_ref_file,
356 const std::string &rtcp_ref_file) {
357 OpenInputFile(rtp_file);
358 std::string stat_out_file = "";
359 if (stat_ref_file.empty()) {
360 stat_out_file = webrtc::test::OutputPath() +
361 "neteq_network_stats.dat";
362 }
363 RefFiles network_stat_files(stat_ref_file, stat_out_file);
364
365 std::string rtcp_out_file = "";
366 if (rtcp_ref_file.empty()) {
367 rtcp_out_file = webrtc::test::OutputPath() +
368 "neteq_rtcp_stats.dat";
369 }
370 RefFiles rtcp_stat_files(rtcp_ref_file, rtcp_out_file);
371
372 NETEQTEST_RTPpacket rtp;
373 ASSERT_GT(rtp.readFromFile(rtp_fp_), 0);
374 while (rtp.dataLen() >= 0) {
375 int out_len;
376 Process(&rtp, &out_len);
377
378 // Query the network statistics API once per second
379 if (sim_clock_ % 1000 == 0) {
380 // Process NetworkStatistics.
381 NetEqNetworkStatistics network_stats;
382 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
383 network_stat_files.ProcessReference(network_stats);
384
385 // Process RTCPstat.
386 RtcpStatistics rtcp_stats;
387 neteq_->GetRtcpStatistics(&rtcp_stats);
388 rtcp_stat_files.ProcessReference(rtcp_stats);
389 }
390 }
391}
392
393void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
394 int timestamp,
395 WebRtcRTPHeader* rtp_info) {
396 rtp_info->header.sequenceNumber = frame_index;
397 rtp_info->header.timestamp = timestamp;
398 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
399 rtp_info->header.payloadType = 94; // PCM16b WB codec.
400 rtp_info->header.markerBit = 0;
401}
402
403void NetEqDecodingTest::PopulateCng(int frame_index,
404 int timestamp,
405 WebRtcRTPHeader* rtp_info,
406 uint8_t* payload,
407 int* payload_len) {
408 rtp_info->header.sequenceNumber = frame_index;
409 rtp_info->header.timestamp = timestamp;
410 rtp_info->header.ssrc = 0x1234; // Just an arbitrary SSRC.
411 rtp_info->header.payloadType = 98; // WB CNG.
412 rtp_info->header.markerBit = 0;
413 payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
414 *payload_len = 1; // Only noise level, no spectral parameters.
415}
416
turaj@webrtc.orgff43c852013-09-25 00:07:27 +0000417void NetEqDecodingTest::CheckBgnOff(int sampling_rate_hz,
418 NetEqBackgroundNoiseMode bgn_mode) {
419 int expected_samples_per_channel = 0;
420 uint8_t payload_type = 0xFF; // Invalid.
421 if (sampling_rate_hz == 8000) {
422 expected_samples_per_channel = kBlockSize8kHz;
423 payload_type = 93; // PCM 16, 8 kHz.
424 } else if (sampling_rate_hz == 16000) {
425 expected_samples_per_channel = kBlockSize16kHz;
426 payload_type = 94; // PCM 16, 16 kHZ.
427 } else if (sampling_rate_hz == 32000) {
428 expected_samples_per_channel = kBlockSize32kHz;
429 payload_type = 95; // PCM 16, 32 kHz.
430 } else {
431 ASSERT_TRUE(false); // Unsupported test case.
432 }
433
434 NetEqOutputType type;
435 int16_t output[kBlockSize32kHz]; // Maximum size is chosen.
436 int16_t input[kBlockSize32kHz]; // Maximum size is chosen.
437
438 // Payload of 10 ms of PCM16 32 kHz.
439 uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
440
441 // Random payload.
442 for (int n = 0; n < expected_samples_per_channel; ++n) {
443 input[n] = (rand() & ((1 << 10) - 1)) - ((1 << 5) - 1);
444 }
445 int enc_len_bytes = WebRtcPcm16b_EncodeW16(
446 input, expected_samples_per_channel, reinterpret_cast<int16_t*>(payload));
447 ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
448
449 WebRtcRTPHeader rtp_info;
450 PopulateRtpInfo(0, 0, &rtp_info);
451 rtp_info.header.payloadType = payload_type;
452
453 int number_channels = 0;
454 int samples_per_channel = 0;
455
456 uint32_t receive_timestamp = 0;
457 for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
458 number_channels = 0;
459 samples_per_channel = 0;
460 ASSERT_EQ(0, neteq_->InsertPacket(
461 rtp_info, payload, enc_len_bytes, receive_timestamp));
462 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
463 &number_channels, &type));
464 ASSERT_EQ(1, number_channels);
465 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
466 ASSERT_EQ(kOutputNormal, type);
467
468 // Next packet.
469 rtp_info.header.timestamp += expected_samples_per_channel;
470 rtp_info.header.sequenceNumber++;
471 receive_timestamp += expected_samples_per_channel;
472 }
473
474 number_channels = 0;
475 samples_per_channel = 0;
476
477 // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull one
478 // frame without checking speech-type. This is the first frame pulled without
479 // inserting any packet, and might not be labeled as PCL.
480 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
481 &number_channels, &type));
482 ASSERT_EQ(1, number_channels);
483 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
484
485 // To be able to test the fading of background noise we need at lease to pull
486 // 610 frames.
487 const int kFadingThreshold = 610;
488
489 // Test several CNG-to-PLC packet for the expected behavior. The number 20 is
490 // arbitrary, but sufficiently large to test enough number of frames.
491 const int kNumPlcToCngTestFrames = 20;
492 bool plc_to_cng = false;
493 for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
494 number_channels = 0;
495 samples_per_channel = 0;
496 memset(output, 1, sizeof(output)); // Set to non-zero.
497 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize32kHz, output, &samples_per_channel,
498 &number_channels, &type));
499 ASSERT_EQ(1, number_channels);
500 ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
501 if (type == kOutputPLCtoCNG) {
502 plc_to_cng = true;
503 double sum_squared = 0;
504 for (int k = 0; k < number_channels * samples_per_channel; ++k)
505 sum_squared += output[k] * output[k];
506 if (bgn_mode == kBgnOn) {
507 EXPECT_NE(0, sum_squared);
508 } else if (bgn_mode == kBgnOff || n > kFadingThreshold) {
509 EXPECT_EQ(0, sum_squared);
510 }
511 } else {
512 EXPECT_EQ(kOutputPLC, type);
513 }
514 }
515 EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred.
516}
517
kjellander@webrtc.org6eba2772013-06-04 05:46:37 +0000518#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
519// Disabled for Windows 64-bit until webrtc:1458 is fixed.
520#define MAYBE_TestBitExactness DISABLED_TestBitExactness
521#else
522#define MAYBE_TestBitExactness TestBitExactness
523#endif
524
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000525TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(MAYBE_TestBitExactness)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000526 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000527 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000528#if defined(_MSC_VER) && (_MSC_VER >= 1700)
529 // For Visual Studio 2012 and later, we will have to use the generic reference
530 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000531 const std::string input_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000532 "resources/audio_coding/neteq4_universal_ref.pcm";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000533#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000534 const std::string input_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000535 webrtc::test::ResourcePath("audio_coding/neteq4_universal_ref", "pcm");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000536#endif
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000537
538 if (FLAGS_gen_ref) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000539 DecodeAndCompare(input_rtp_file, "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000540 } else {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000541 DecodeAndCompare(input_rtp_file, input_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000542 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000543}
544
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000545TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestNetworkStatistics)) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000546 const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
henrik.lundin@webrtc.org73deaad2013-01-31 13:32:51 +0000547 "resources/audio_coding/neteq_universal_new.rtp";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000548#if defined(_MSC_VER) && (_MSC_VER >= 1700)
549 // For Visual Studio 2012 and later, we will have to use the generic reference
550 // file, rather than the windows-specific one.
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000551 const std::string network_stat_ref_file = webrtc::test::ProjectRootPath() +
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000552 "resources/audio_coding/neteq4_network_stats.dat";
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000553#else
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000554 const std::string network_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000555 webrtc::test::ResourcePath("audio_coding/neteq4_network_stats", "dat");
henrik.lundin@webrtc.org6e3968f2013-01-31 15:07:30 +0000556#endif
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000557 const std::string rtcp_stat_ref_file =
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000558 webrtc::test::ResourcePath("audio_coding/neteq4_rtcp_stats", "dat");
559 if (FLAGS_gen_ref) {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000560 DecodeAndCheckStats(input_rtp_file, "", "");
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000561 } else {
andrew@webrtc.orgf6a638e2014-02-04 01:31:28 +0000562 DecodeAndCheckStats(input_rtp_file, network_stat_ref_file,
563 rtcp_stat_ref_file);
turaj@webrtc.orga6101d72013-10-01 22:01:09 +0000564 }
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000565}
566
567// TODO(hlundin): Re-enable test once the statistics interface is up and again.
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000568TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(TestFrameWaitingTimeStatistics)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000569 // Use fax mode to avoid time-scaling. This is to simplify the testing of
570 // packet waiting times in the packet buffer.
571 neteq_->SetPlayoutMode(kPlayoutFax);
572 ASSERT_EQ(kPlayoutFax, neteq_->PlayoutMode());
573 // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
574 size_t num_frames = 30;
575 const int kSamples = 10 * 16;
576 const int kPayloadBytes = kSamples * 2;
577 for (size_t i = 0; i < num_frames; ++i) {
578 uint16_t payload[kSamples] = {0};
579 WebRtcRTPHeader rtp_info;
580 rtp_info.header.sequenceNumber = i;
581 rtp_info.header.timestamp = i * kSamples;
582 rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC.
583 rtp_info.header.payloadType = 94; // PCM16b WB codec.
584 rtp_info.header.markerBit = 0;
585 ASSERT_EQ(0, neteq_->InsertPacket(
586 rtp_info,
587 reinterpret_cast<uint8_t*>(payload),
588 kPayloadBytes, 0));
589 }
590 // Pull out all data.
591 for (size_t i = 0; i < num_frames; ++i) {
592 int out_len;
593 int num_channels;
594 NetEqOutputType type;
595 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
596 &num_channels, &type));
597 ASSERT_EQ(kBlockSize16kHz, out_len);
598 }
599
600 std::vector<int> waiting_times;
601 neteq_->WaitingTimes(&waiting_times);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000602 EXPECT_EQ(num_frames, waiting_times.size());
603 // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
604 // spacing (per definition), we expect the delay to increase with 10 ms for
605 // each packet.
606 for (size_t i = 0; i < waiting_times.size(); ++i) {
607 EXPECT_EQ(static_cast<int>(i + 1) * 10, waiting_times[i]);
608 }
609
610 // Check statistics again and make sure it's been reset.
611 neteq_->WaitingTimes(&waiting_times);
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000612 int len = waiting_times.size();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000613 EXPECT_EQ(0, len);
614
615 // Process > 100 frames, and make sure that that we get statistics
616 // only for 100 frames. Note the new SSRC, causing NetEQ to reset.
617 num_frames = 110;
618 for (size_t i = 0; i < num_frames; ++i) {
619 uint16_t payload[kSamples] = {0};
620 WebRtcRTPHeader rtp_info;
621 rtp_info.header.sequenceNumber = i;
622 rtp_info.header.timestamp = i * kSamples;
623 rtp_info.header.ssrc = 0x1235; // Just an arbitrary SSRC.
624 rtp_info.header.payloadType = 94; // PCM16b WB codec.
625 rtp_info.header.markerBit = 0;
626 ASSERT_EQ(0, neteq_->InsertPacket(
627 rtp_info,
628 reinterpret_cast<uint8_t*>(payload),
629 kPayloadBytes, 0));
630 int out_len;
631 int num_channels;
632 NetEqOutputType type;
633 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
634 &num_channels, &type));
635 ASSERT_EQ(kBlockSize16kHz, out_len);
636 }
637
638 neteq_->WaitingTimes(&waiting_times);
639 EXPECT_EQ(100u, waiting_times.size());
640}
641
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000642TEST_F(NetEqDecodingTest,
643 DISABLED_ON_ANDROID(TestAverageInterArrivalTimeNegative)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000644 const int kNumFrames = 3000; // Needed for convergence.
645 int frame_index = 0;
646 const int kSamples = 10 * 16;
647 const int kPayloadBytes = kSamples * 2;
648 while (frame_index < kNumFrames) {
649 // Insert one packet each time, except every 10th time where we insert two
650 // packets at once. This will create a negative clock-drift of approx. 10%.
651 int num_packets = (frame_index % 10 == 0 ? 2 : 1);
652 for (int n = 0; n < num_packets; ++n) {
653 uint8_t payload[kPayloadBytes] = {0};
654 WebRtcRTPHeader rtp_info;
655 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
656 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
657 ++frame_index;
658 }
659
660 // Pull out data once.
661 int out_len;
662 int num_channels;
663 NetEqOutputType type;
664 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
665 &num_channels, &type));
666 ASSERT_EQ(kBlockSize16kHz, out_len);
667 }
668
669 NetEqNetworkStatistics network_stats;
670 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
671 EXPECT_EQ(-103196, network_stats.clockdrift_ppm);
672}
673
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000674TEST_F(NetEqDecodingTest,
675 DISABLED_ON_ANDROID(TestAverageInterArrivalTimePositive)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000676 const int kNumFrames = 5000; // Needed for convergence.
677 int frame_index = 0;
678 const int kSamples = 10 * 16;
679 const int kPayloadBytes = kSamples * 2;
680 for (int i = 0; i < kNumFrames; ++i) {
681 // Insert one packet each time, except every 10th time where we don't insert
682 // any packet. This will create a positive clock-drift of approx. 11%.
683 int num_packets = (i % 10 == 9 ? 0 : 1);
684 for (int n = 0; n < num_packets; ++n) {
685 uint8_t payload[kPayloadBytes] = {0};
686 WebRtcRTPHeader rtp_info;
687 PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
688 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
689 ++frame_index;
690 }
691
692 // Pull out data once.
693 int out_len;
694 int num_channels;
695 NetEqOutputType type;
696 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
697 &num_channels, &type));
698 ASSERT_EQ(kBlockSize16kHz, out_len);
699 }
700
701 NetEqNetworkStatistics network_stats;
702 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
703 EXPECT_EQ(110946, network_stats.clockdrift_ppm);
704}
705
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000706void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
707 double network_freeze_ms,
708 bool pull_audio_during_freeze,
709 int delay_tolerance_ms,
710 int max_time_to_speech_ms) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000711 uint16_t seq_no = 0;
712 uint32_t timestamp = 0;
713 const int kFrameSizeMs = 30;
714 const int kSamples = kFrameSizeMs * 16;
715 const int kPayloadBytes = kSamples * 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000716 double next_input_time_ms = 0.0;
717 double t_ms;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000718 int out_len;
719 int num_channels;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000720 NetEqOutputType type;
721
722 // Insert speech for 5 seconds.
723 const int kSpeechDurationMs = 5000;
724 for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
725 // Each turn in this for loop is 10 ms.
726 while (next_input_time_ms <= t_ms) {
727 // Insert one 30 ms speech frame.
728 uint8_t payload[kPayloadBytes] = {0};
729 WebRtcRTPHeader rtp_info;
730 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
731 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
732 ++seq_no;
733 timestamp += kSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000734 next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000735 }
736 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000737 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
738 &num_channels, &type));
739 ASSERT_EQ(kBlockSize16kHz, out_len);
740 }
741
742 EXPECT_EQ(kOutputNormal, type);
743 int32_t delay_before = timestamp - neteq_->PlayoutTimestamp();
744
745 // Insert CNG for 1 minute (= 60000 ms).
746 const int kCngPeriodMs = 100;
747 const int kCngPeriodSamples = kCngPeriodMs * 16; // Period in 16 kHz samples.
748 const int kCngDurationMs = 60000;
749 for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
750 // Each turn in this for loop is 10 ms.
751 while (next_input_time_ms <= t_ms) {
752 // Insert one CNG frame each 100 ms.
753 uint8_t payload[kPayloadBytes];
754 int payload_len;
755 WebRtcRTPHeader rtp_info;
756 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
757 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
758 ++seq_no;
759 timestamp += kCngPeriodSamples;
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000760 next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000761 }
762 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000763 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
764 &num_channels, &type));
765 ASSERT_EQ(kBlockSize16kHz, out_len);
766 }
767
768 EXPECT_EQ(kOutputCNG, type);
769
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000770 if (network_freeze_ms > 0) {
771 // First keep pulling audio for |network_freeze_ms| without inserting
772 // any data, then insert CNG data corresponding to |network_freeze_ms|
773 // without pulling any output audio.
774 const double loop_end_time = t_ms + network_freeze_ms;
775 for (; t_ms < loop_end_time; t_ms += 10) {
776 // Pull out data once.
777 ASSERT_EQ(0,
778 neteq_->GetAudio(
779 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
780 ASSERT_EQ(kBlockSize16kHz, out_len);
781 EXPECT_EQ(kOutputCNG, type);
782 }
783 bool pull_once = pull_audio_during_freeze;
784 // If |pull_once| is true, GetAudio will be called once half-way through
785 // the network recovery period.
786 double pull_time_ms = (t_ms + next_input_time_ms) / 2;
787 while (next_input_time_ms <= t_ms) {
788 if (pull_once && next_input_time_ms >= pull_time_ms) {
789 pull_once = false;
790 // Pull out data once.
791 ASSERT_EQ(
792 0,
793 neteq_->GetAudio(
794 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
795 ASSERT_EQ(kBlockSize16kHz, out_len);
796 EXPECT_EQ(kOutputCNG, type);
797 t_ms += 10;
798 }
799 // Insert one CNG frame each 100 ms.
800 uint8_t payload[kPayloadBytes];
801 int payload_len;
802 WebRtcRTPHeader rtp_info;
803 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
804 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
805 ++seq_no;
806 timestamp += kCngPeriodSamples;
807 next_input_time_ms += kCngPeriodMs * drift_factor;
808 }
809 }
810
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000811 // Insert speech again until output type is speech.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000812 double speech_restart_time_ms = t_ms;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000813 while (type != kOutputNormal) {
814 // Each turn in this for loop is 10 ms.
815 while (next_input_time_ms <= t_ms) {
816 // Insert one 30 ms speech frame.
817 uint8_t payload[kPayloadBytes] = {0};
818 WebRtcRTPHeader rtp_info;
819 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
820 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
821 ++seq_no;
822 timestamp += kSamples;
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000823 next_input_time_ms += kFrameSizeMs * drift_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000824 }
825 // Pull out data once.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000826 ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
827 &num_channels, &type));
828 ASSERT_EQ(kBlockSize16kHz, out_len);
829 // Increase clock.
830 t_ms += 10;
831 }
832
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000833 // Check that the speech starts again within reasonable time.
834 double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
835 EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000836 int32_t delay_after = timestamp - neteq_->PlayoutTimestamp();
837 // Compare delay before and after, and make sure it differs less than 20 ms.
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000838 EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
839 EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000840}
841
henrik.lundin@webrtc.orged865b52014-03-06 10:28:07 +0000842TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithNegativeClockDrift)) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000843 // Apply a clock drift of -25 ms / s (sender faster than receiver).
844 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000845 const double kNetworkFreezeTimeMs = 0.0;
846 const bool kGetAudioDuringFreezeRecovery = false;
847 const int kDelayToleranceMs = 20;
848 const int kMaxTimeToSpeechMs = 100;
849 LongCngWithClockDrift(kDriftFactor,
850 kNetworkFreezeTimeMs,
851 kGetAudioDuringFreezeRecovery,
852 kDelayToleranceMs,
853 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000854}
855
henrik.lundin@webrtc.orged865b52014-03-06 10:28:07 +0000856TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithPositiveClockDrift)) {
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000857 // Apply a clock drift of +25 ms / s (sender slower than receiver).
858 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
henrik.lundin@webrtc.org24779fe2014-03-14 12:40:05 +0000859 const double kNetworkFreezeTimeMs = 0.0;
860 const bool kGetAudioDuringFreezeRecovery = false;
861 const int kDelayToleranceMs = 20;
862 const int kMaxTimeToSpeechMs = 100;
863 LongCngWithClockDrift(kDriftFactor,
864 kNetworkFreezeTimeMs,
865 kGetAudioDuringFreezeRecovery,
866 kDelayToleranceMs,
867 kMaxTimeToSpeechMs);
868}
869
870TEST_F(NetEqDecodingTest,
871 DISABLED_ON_ANDROID(LongCngWithNegativeClockDriftNetworkFreeze)) {
872 // Apply a clock drift of -25 ms / s (sender faster than receiver).
873 const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
874 const double kNetworkFreezeTimeMs = 5000.0;
875 const bool kGetAudioDuringFreezeRecovery = false;
876 const int kDelayToleranceMs = 50;
877 const int kMaxTimeToSpeechMs = 200;
878 LongCngWithClockDrift(kDriftFactor,
879 kNetworkFreezeTimeMs,
880 kGetAudioDuringFreezeRecovery,
881 kDelayToleranceMs,
882 kMaxTimeToSpeechMs);
883}
884
885TEST_F(NetEqDecodingTest,
886 DISABLED_ON_ANDROID(LongCngWithPositiveClockDriftNetworkFreeze)) {
887 // Apply a clock drift of +25 ms / s (sender slower than receiver).
888 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
889 const double kNetworkFreezeTimeMs = 5000.0;
890 const bool kGetAudioDuringFreezeRecovery = false;
891 const int kDelayToleranceMs = 20;
892 const int kMaxTimeToSpeechMs = 100;
893 LongCngWithClockDrift(kDriftFactor,
894 kNetworkFreezeTimeMs,
895 kGetAudioDuringFreezeRecovery,
896 kDelayToleranceMs,
897 kMaxTimeToSpeechMs);
898}
899
900TEST_F(
901 NetEqDecodingTest,
902 DISABLED_ON_ANDROID(LongCngWithPositiveClockDriftNetworkFreezeExtraPull)) {
903 // Apply a clock drift of +25 ms / s (sender slower than receiver).
904 const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
905 const double kNetworkFreezeTimeMs = 5000.0;
906 const bool kGetAudioDuringFreezeRecovery = true;
907 const int kDelayToleranceMs = 20;
908 const int kMaxTimeToSpeechMs = 100;
909 LongCngWithClockDrift(kDriftFactor,
910 kNetworkFreezeTimeMs,
911 kGetAudioDuringFreezeRecovery,
912 kDelayToleranceMs,
913 kMaxTimeToSpeechMs);
914}
915
916TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(LongCngWithoutClockDrift)) {
917 const double kDriftFactor = 1.0; // No drift.
918 const double kNetworkFreezeTimeMs = 0.0;
919 const bool kGetAudioDuringFreezeRecovery = false;
920 const int kDelayToleranceMs = 10;
921 const int kMaxTimeToSpeechMs = 50;
922 LongCngWithClockDrift(kDriftFactor,
923 kNetworkFreezeTimeMs,
924 kGetAudioDuringFreezeRecovery,
925 kDelayToleranceMs,
926 kMaxTimeToSpeechMs);
henrik.lundin@webrtc.orgfcfc6a92014-02-13 11:42:28 +0000927}
928
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000929TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(UnknownPayloadType)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000930 const int kPayloadBytes = 100;
931 uint8_t payload[kPayloadBytes] = {0};
932 WebRtcRTPHeader rtp_info;
933 PopulateRtpInfo(0, 0, &rtp_info);
934 rtp_info.header.payloadType = 1; // Not registered as a decoder.
935 EXPECT_EQ(NetEq::kFail,
936 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
937 EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
938}
939
minyue@webrtc.org7bb54362013-08-06 05:40:57 +0000940TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(OversizePacket)) {
941 // Payload size is greater than packet buffer size
942 const int kPayloadBytes = NetEq::kMaxBytesInBuffer + 1;
943 uint8_t payload[kPayloadBytes] = {0};
944 WebRtcRTPHeader rtp_info;
945 PopulateRtpInfo(0, 0, &rtp_info);
946 rtp_info.header.payloadType = 103; // iSAC, no packet splitting.
947 EXPECT_EQ(NetEq::kFail,
948 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
949 EXPECT_EQ(NetEq::kOversizePacket, neteq_->LastError());
950}
951
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000952TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(DecoderError)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000953 const int kPayloadBytes = 100;
954 uint8_t payload[kPayloadBytes] = {0};
955 WebRtcRTPHeader rtp_info;
956 PopulateRtpInfo(0, 0, &rtp_info);
957 rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
958 EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
959 NetEqOutputType type;
960 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
961 // to GetAudio.
962 for (int i = 0; i < kMaxBlockSize; ++i) {
963 out_data_[i] = 1;
964 }
965 int num_channels;
966 int samples_per_channel;
967 EXPECT_EQ(NetEq::kFail,
968 neteq_->GetAudio(kMaxBlockSize, out_data_,
969 &samples_per_channel, &num_channels, &type));
970 // Verify that there is a decoder error to check.
971 EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
972 // Code 6730 is an iSAC error code.
973 EXPECT_EQ(6730, neteq_->LastDecoderError());
974 // Verify that the first 160 samples are set to 0, and that the remaining
975 // samples are left unmodified.
976 static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate.
977 for (int i = 0; i < kExpectedOutputLength; ++i) {
978 std::ostringstream ss;
979 ss << "i = " << i;
980 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
981 EXPECT_EQ(0, out_data_[i]);
982 }
983 for (int i = kExpectedOutputLength; i < kMaxBlockSize; ++i) {
984 std::ostringstream ss;
985 ss << "i = " << i;
986 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
987 EXPECT_EQ(1, out_data_[i]);
988 }
989}
990
henrike@webrtc.orga950300b2013-07-08 18:53:54 +0000991TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(GetAudioBeforeInsertPacket)) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000992 NetEqOutputType type;
993 // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
994 // to GetAudio.
995 for (int i = 0; i < kMaxBlockSize; ++i) {
996 out_data_[i] = 1;
997 }
998 int num_channels;
999 int samples_per_channel;
1000 EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
1001 &samples_per_channel,
1002 &num_channels, &type));
1003 // Verify that the first block of samples is set to 0.
1004 static const int kExpectedOutputLength =
1005 kInitSampleRateHz / 100; // 10 ms at initial sample rate.
1006 for (int i = 0; i < kExpectedOutputLength; ++i) {
1007 std::ostringstream ss;
1008 ss << "i = " << i;
1009 SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
1010 EXPECT_EQ(0, out_data_[i]);
1011 }
1012}
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001013
turaj@webrtc.org3fdeddb2013-09-25 22:19:22 +00001014TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(BackgroundNoise)) {
turaj@webrtc.orgff43c852013-09-25 00:07:27 +00001015 neteq_->SetBackgroundNoiseMode(kBgnOn);
1016 CheckBgnOff(8000, kBgnOn);
1017 CheckBgnOff(16000, kBgnOn);
1018 CheckBgnOff(32000, kBgnOn);
1019 EXPECT_EQ(kBgnOn, neteq_->BackgroundNoiseMode());
1020
1021 neteq_->SetBackgroundNoiseMode(kBgnOff);
1022 CheckBgnOff(8000, kBgnOff);
1023 CheckBgnOff(16000, kBgnOff);
1024 CheckBgnOff(32000, kBgnOff);
1025 EXPECT_EQ(kBgnOff, neteq_->BackgroundNoiseMode());
1026
1027 neteq_->SetBackgroundNoiseMode(kBgnFade);
1028 CheckBgnOff(8000, kBgnFade);
1029 CheckBgnOff(16000, kBgnFade);
1030 CheckBgnOff(32000, kBgnFade);
1031 EXPECT_EQ(kBgnFade, neteq_->BackgroundNoiseMode());
1032}
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001033
1034TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(SyncPacketInsert)) {
1035 WebRtcRTPHeader rtp_info;
1036 uint32_t receive_timestamp = 0;
1037 // For the readability use the following payloads instead of the defaults of
1038 // this test.
1039 uint8_t kPcm16WbPayloadType = 1;
1040 uint8_t kCngNbPayloadType = 2;
1041 uint8_t kCngWbPayloadType = 3;
1042 uint8_t kCngSwb32PayloadType = 4;
1043 uint8_t kCngSwb48PayloadType = 5;
1044 uint8_t kAvtPayloadType = 6;
1045 uint8_t kRedPayloadType = 7;
1046 uint8_t kIsacPayloadType = 9; // Payload type 8 is already registered.
1047
1048 // Register decoders.
1049 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16Bwb,
1050 kPcm16WbPayloadType));
1051 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGnb, kCngNbPayloadType));
1052 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGwb, kCngWbPayloadType));
1053 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb32kHz,
1054 kCngSwb32PayloadType));
1055 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderCNGswb48kHz,
1056 kCngSwb48PayloadType));
1057 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderAVT, kAvtPayloadType));
1058 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderRED, kRedPayloadType));
1059 ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, kIsacPayloadType));
1060
1061 PopulateRtpInfo(0, 0, &rtp_info);
1062 rtp_info.header.payloadType = kPcm16WbPayloadType;
1063
1064 // The first packet injected cannot be sync-packet.
1065 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1066
1067 // Payload length of 10 ms PCM16 16 kHz.
1068 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1069 uint8_t payload[kPayloadBytes] = {0};
1070 ASSERT_EQ(0, neteq_->InsertPacket(
1071 rtp_info, payload, kPayloadBytes, receive_timestamp));
1072
1073 // Next packet. Last packet contained 10 ms audio.
1074 rtp_info.header.sequenceNumber++;
1075 rtp_info.header.timestamp += kBlockSize16kHz;
1076 receive_timestamp += kBlockSize16kHz;
1077
1078 // Unacceptable payload types CNG, AVT (DTMF), RED.
1079 rtp_info.header.payloadType = kCngNbPayloadType;
1080 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1081
1082 rtp_info.header.payloadType = kCngWbPayloadType;
1083 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1084
1085 rtp_info.header.payloadType = kCngSwb32PayloadType;
1086 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1087
1088 rtp_info.header.payloadType = kCngSwb48PayloadType;
1089 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1090
1091 rtp_info.header.payloadType = kAvtPayloadType;
1092 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1093
1094 rtp_info.header.payloadType = kRedPayloadType;
1095 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1096
1097 // Change of codec cannot be initiated with a sync packet.
1098 rtp_info.header.payloadType = kIsacPayloadType;
1099 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1100
1101 // Change of SSRC is not allowed with a sync packet.
1102 rtp_info.header.payloadType = kPcm16WbPayloadType;
1103 ++rtp_info.header.ssrc;
1104 EXPECT_EQ(-1, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1105
1106 --rtp_info.header.ssrc;
1107 EXPECT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1108}
1109
1110// First insert several noise like packets, then sync-packets. Decoding all
1111// packets should not produce error, statistics should not show any packet loss
1112// and sync-packets should decode to zero.
1113TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(SyncPacketDecode)) {
1114 WebRtcRTPHeader rtp_info;
1115 PopulateRtpInfo(0, 0, &rtp_info);
1116 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1117 uint8_t payload[kPayloadBytes];
1118 int16_t decoded[kBlockSize16kHz];
1119 for (int n = 0; n < kPayloadBytes; ++n) {
1120 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1121 }
1122 // Insert some packets which decode to noise. We are not interested in
1123 // actual decoded values.
1124 NetEqOutputType output_type;
1125 int num_channels;
1126 int samples_per_channel;
1127 uint32_t receive_timestamp = 0;
1128 int delay_samples = 0;
1129 for (int n = 0; n < 100; ++n) {
1130 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1131 receive_timestamp));
1132 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1133 &samples_per_channel, &num_channels,
1134 &output_type));
1135 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1136 ASSERT_EQ(1, num_channels);
1137
1138 // Even if there is RTP packet in NetEq's buffer, the first frame pulled
1139 // from NetEq starts with few zero samples. Here we measure this delay.
1140 if (n == 0) {
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001141 while (decoded[delay_samples] == 0) delay_samples++;
turaj@webrtc.org7b75ac62013-09-26 00:27:56 +00001142 }
1143 rtp_info.header.sequenceNumber++;
1144 rtp_info.header.timestamp += kBlockSize16kHz;
1145 receive_timestamp += kBlockSize16kHz;
1146 }
1147 const int kNumSyncPackets = 10;
1148 // Insert sync-packets, the decoded sequence should be all-zero.
1149 for (int n = 0; n < kNumSyncPackets; ++n) {
1150 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1151 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1152 &samples_per_channel, &num_channels,
1153 &output_type));
1154 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1155 ASSERT_EQ(1, num_channels);
1156 EXPECT_TRUE(IsAllZero(&decoded[delay_samples],
1157 samples_per_channel * num_channels - delay_samples));
1158 delay_samples = 0; // Delay only matters in the first frame.
1159 rtp_info.header.sequenceNumber++;
1160 rtp_info.header.timestamp += kBlockSize16kHz;
1161 receive_timestamp += kBlockSize16kHz;
1162 }
1163 // We insert a regular packet, if sync packet are not correctly buffered then
1164 // network statistics would show some packet loss.
1165 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1166 receive_timestamp));
1167 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1168 &samples_per_channel, &num_channels,
1169 &output_type));
1170 // Make sure the last inserted packet is decoded and there are non-zero
1171 // samples.
1172 EXPECT_FALSE(IsAllZero(decoded, samples_per_channel * num_channels));
1173 NetEqNetworkStatistics network_stats;
1174 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1175 // Expecting a "clean" network.
1176 EXPECT_EQ(0, network_stats.packet_loss_rate);
1177 EXPECT_EQ(0, network_stats.expand_rate);
1178 EXPECT_EQ(0, network_stats.accelerate_rate);
1179 EXPECT_EQ(0, network_stats.preemptive_rate);
1180}
1181
1182// Test if the size of the packet buffer reported correctly when containing
1183// sync packets. Also, test if network packets override sync packets. That is to
1184// prefer decoding a network packet to a sync packet, if both have same sequence
1185// number and timestamp.
1186TEST_F(NetEqDecodingTest,
1187 DISABLED_ON_ANDROID(SyncPacketBufferSizeAndOverridenByNetworkPackets)) {
1188 WebRtcRTPHeader rtp_info;
1189 PopulateRtpInfo(0, 0, &rtp_info);
1190 const int kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
1191 uint8_t payload[kPayloadBytes];
1192 int16_t decoded[kBlockSize16kHz];
1193 for (int n = 0; n < kPayloadBytes; ++n) {
1194 payload[n] = (rand() & 0xF0) + 1; // Non-zero random sequence.
1195 }
1196 // Insert some packets which decode to noise. We are not interested in
1197 // actual decoded values.
1198 NetEqOutputType output_type;
1199 int num_channels;
1200 int samples_per_channel;
1201 uint32_t receive_timestamp = 0;
1202 for (int n = 0; n < 1; ++n) {
1203 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1204 receive_timestamp));
1205 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1206 &samples_per_channel, &num_channels,
1207 &output_type));
1208 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1209 ASSERT_EQ(1, num_channels);
1210 rtp_info.header.sequenceNumber++;
1211 rtp_info.header.timestamp += kBlockSize16kHz;
1212 receive_timestamp += kBlockSize16kHz;
1213 }
1214 const int kNumSyncPackets = 10;
1215
1216 WebRtcRTPHeader first_sync_packet_rtp_info;
1217 memcpy(&first_sync_packet_rtp_info, &rtp_info, sizeof(rtp_info));
1218
1219 // Insert sync-packets, but no decoding.
1220 for (int n = 0; n < kNumSyncPackets; ++n) {
1221 ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
1222 rtp_info.header.sequenceNumber++;
1223 rtp_info.header.timestamp += kBlockSize16kHz;
1224 receive_timestamp += kBlockSize16kHz;
1225 }
1226 NetEqNetworkStatistics network_stats;
1227 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1228 EXPECT_EQ(kNumSyncPackets * 10, network_stats.current_buffer_size_ms);
1229
1230 // Rewind |rtp_info| to that of the first sync packet.
1231 memcpy(&rtp_info, &first_sync_packet_rtp_info, sizeof(rtp_info));
1232
1233 // Insert.
1234 for (int n = 0; n < kNumSyncPackets; ++n) {
1235 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1236 receive_timestamp));
1237 rtp_info.header.sequenceNumber++;
1238 rtp_info.header.timestamp += kBlockSize16kHz;
1239 receive_timestamp += kBlockSize16kHz;
1240 }
1241
1242 // Decode.
1243 for (int n = 0; n < kNumSyncPackets; ++n) {
1244 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1245 &samples_per_channel, &num_channels,
1246 &output_type));
1247 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1248 ASSERT_EQ(1, num_channels);
1249 EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
1250 }
1251}
1252
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001253void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
1254 uint32_t start_timestamp,
1255 const std::set<uint16_t>& drop_seq_numbers,
1256 bool expect_seq_no_wrap,
1257 bool expect_timestamp_wrap) {
1258 uint16_t seq_no = start_seq_no;
1259 uint32_t timestamp = start_timestamp;
1260 const int kBlocksPerFrame = 3; // Number of 10 ms blocks per frame.
1261 const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
1262 const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
1263 const int kPayloadBytes = kSamples * sizeof(int16_t);
1264 double next_input_time_ms = 0.0;
1265 int16_t decoded[kBlockSize16kHz];
1266 int num_channels;
1267 int samples_per_channel;
1268 NetEqOutputType output_type;
1269 uint32_t receive_timestamp = 0;
1270
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001271 // Insert speech for 2 seconds.
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001272 const int kSpeechDurationMs = 2000;
1273 int packets_inserted = 0;
1274 uint16_t last_seq_no;
1275 uint32_t last_timestamp;
1276 bool timestamp_wrapped = false;
1277 bool seq_no_wrapped = false;
1278 for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
1279 // Each turn in this for loop is 10 ms.
1280 while (next_input_time_ms <= t_ms) {
1281 // Insert one 30 ms speech frame.
1282 uint8_t payload[kPayloadBytes] = {0};
1283 WebRtcRTPHeader rtp_info;
1284 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1285 if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
1286 // This sequence number was not in the set to drop. Insert it.
1287 ASSERT_EQ(0,
1288 neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
1289 receive_timestamp));
1290 ++packets_inserted;
1291 }
1292 NetEqNetworkStatistics network_stats;
1293 ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
1294
1295 // Due to internal NetEq logic, preferred buffer-size is about 4 times the
1296 // packet size for first few packets. Therefore we refrain from checking
1297 // the criteria.
1298 if (packets_inserted > 4) {
1299 // Expect preferred and actual buffer size to be no more than 2 frames.
1300 EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
1301 EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2);
1302 }
1303 last_seq_no = seq_no;
1304 last_timestamp = timestamp;
1305
1306 ++seq_no;
1307 timestamp += kSamples;
1308 receive_timestamp += kSamples;
1309 next_input_time_ms += static_cast<double>(kFrameSizeMs);
1310
1311 seq_no_wrapped |= seq_no < last_seq_no;
1312 timestamp_wrapped |= timestamp < last_timestamp;
1313 }
1314 // Pull out data once.
1315 ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
1316 &samples_per_channel, &num_channels,
1317 &output_type));
1318 ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
1319 ASSERT_EQ(1, num_channels);
1320
1321 // Expect delay (in samples) to be less than 2 packets.
1322 EXPECT_LE(timestamp - neteq_->PlayoutTimestamp(),
1323 static_cast<uint32_t>(kSamples * 2));
turaj@webrtc.org78b41a02013-11-22 20:27:07 +00001324 }
1325 // Make sure we have actually tested wrap-around.
1326 ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
1327 ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
1328}
1329
1330TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
1331 // Start with a sequence number that will soon wrap.
1332 std::set<uint16_t> drop_seq_numbers; // Don't drop any packets.
1333 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1334}
1335
1336TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
1337 // Start with a sequence number that will soon wrap.
1338 std::set<uint16_t> drop_seq_numbers;
1339 drop_seq_numbers.insert(0xFFFF);
1340 drop_seq_numbers.insert(0x0);
1341 WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
1342}
1343
1344TEST_F(NetEqDecodingTest, TimestampWrap) {
1345 // Start with a timestamp that will soon wrap.
1346 std::set<uint16_t> drop_seq_numbers;
1347 WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
1348}
1349
1350TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
1351 // Start with a timestamp and a sequence number that will wrap at the same
1352 // time.
1353 std::set<uint16_t> drop_seq_numbers;
1354 WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
1355}
1356
henrik.lundin@webrtc.orgca8cb952014-03-12 10:26:52 +00001357void NetEqDecodingTest::DuplicateCng() {
1358 uint16_t seq_no = 0;
1359 uint32_t timestamp = 0;
1360 const int kFrameSizeMs = 10;
1361 const int kSampleRateKhz = 16;
1362 const int kSamples = kFrameSizeMs * kSampleRateKhz;
1363 const int kPayloadBytes = kSamples * 2;
1364
1365 // Insert three speech packet. Three are needed to get the frame length
1366 // correct.
1367 int out_len;
1368 int num_channels;
1369 NetEqOutputType type;
1370 uint8_t payload[kPayloadBytes] = {0};
1371 WebRtcRTPHeader rtp_info;
1372 for (int i = 0; i < 3; ++i) {
1373 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1374 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1375 ++seq_no;
1376 timestamp += kSamples;
1377
1378 // Pull audio once.
1379 ASSERT_EQ(0,
1380 neteq_->GetAudio(
1381 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1382 ASSERT_EQ(kBlockSize16kHz, out_len);
1383 }
1384 // Verify speech output.
1385 EXPECT_EQ(kOutputNormal, type);
1386
1387 // Insert same CNG packet twice.
1388 const int kCngPeriodMs = 100;
1389 const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
1390 int payload_len;
1391 PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
1392 // This is the first time this CNG packet is inserted.
1393 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1394
1395 // Pull audio once and make sure CNG is played.
1396 ASSERT_EQ(0,
1397 neteq_->GetAudio(
1398 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1399 ASSERT_EQ(kBlockSize16kHz, out_len);
1400 EXPECT_EQ(kOutputCNG, type);
1401 EXPECT_EQ(timestamp - 10, neteq_->PlayoutTimestamp());
1402
1403 // Insert the same CNG packet again. Note that at this point it is old, since
1404 // we have already decoded the first copy of it.
1405 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
1406
1407 // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
1408 // we have already pulled out CNG once.
1409 for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
1410 ASSERT_EQ(0,
1411 neteq_->GetAudio(
1412 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1413 ASSERT_EQ(kBlockSize16kHz, out_len);
1414 EXPECT_EQ(kOutputCNG, type);
1415 EXPECT_EQ(timestamp - 10, neteq_->PlayoutTimestamp());
1416 }
1417
1418 // Insert speech again.
1419 ++seq_no;
1420 timestamp += kCngPeriodSamples;
1421 PopulateRtpInfo(seq_no, timestamp, &rtp_info);
1422 ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
1423
1424 // Pull audio once and verify that the output is speech again.
1425 ASSERT_EQ(0,
1426 neteq_->GetAudio(
1427 kMaxBlockSize, out_data_, &out_len, &num_channels, &type));
1428 ASSERT_EQ(kBlockSize16kHz, out_len);
1429 EXPECT_EQ(kOutputNormal, type);
1430 EXPECT_EQ(timestamp + kSamples - 10, neteq_->PlayoutTimestamp());
1431}
1432
1433TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
henrik.lundin@webrtc.orge7ce4372014-01-09 14:01:55 +00001434} // namespace webrtc