Update a ton of audio code to use size_t more correctly and in general reduce
use of int16_t/uint16_t.
This is the upshot of a recommendation by henrik.lundin and kwiberg on an original small change ( https://webrtc-codereview.appspot.com/42569004/#ps1 ) to stop using int16_t just because values could fit in it, and is similar in nature to a previous "mass change to use size_t more" ( https://webrtc-codereview.appspot.com/23129004/ ) which also needed to be split up for review but to land all at once, since, like adding "const", such changes tend to cause a lot of transitive effects.
This was be reviewed and approved in pieces:
https://codereview.webrtc.org/1224093003
https://codereview.webrtc.org/1224123002
https://codereview.webrtc.org/1224163002
https://codereview.webrtc.org/1225133003
https://codereview.webrtc.org/1225173002
https://codereview.webrtc.org/1227163003
https://codereview.webrtc.org/1227203003
https://codereview.webrtc.org/1227213002
https://codereview.webrtc.org/1227893002
https://codereview.webrtc.org/1228793004
https://codereview.webrtc.org/1228803003
https://codereview.webrtc.org/1228823002
https://codereview.webrtc.org/1228823003
https://codereview.webrtc.org/1228843002
https://codereview.webrtc.org/1230693002
https://codereview.webrtc.org/1231713002
The change is being landed as TBR to all the folks who reviewed the above.
BUG=chromium:81439
TEST=none
R=andrew@webrtc.org, pbos@webrtc.org
TBR=aluebs, andrew, asapersson, henrika, hlundin, jan.skoglund, kwiberg, minyue, pbos, pthatcher
Review URL: https://codereview.webrtc.org/1230503003 .
Cr-Commit-Position: refs/heads/master@{#9768}
diff --git a/webrtc/modules/audio_coding/codecs/audio_decoder.cc b/webrtc/modules/audio_coding/codecs/audio_decoder.cc
index 0a4a6a9..08d101c 100644
--- a/webrtc/modules/audio_coding/codecs/audio_decoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_decoder.cc
@@ -56,7 +56,9 @@
bool AudioDecoder::HasDecodePlc() const { return false; }
-int AudioDecoder::DecodePlc(int num_frames, int16_t* decoded) { return 0; }
+size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
+ return 0;
+}
int AudioDecoder::IncomingPacket(const uint8_t* payload,
size_t payload_len,
diff --git a/webrtc/modules/audio_coding/codecs/audio_decoder.h b/webrtc/modules/audio_coding/codecs/audio_decoder.h
index 8947e81..480b1aa 100644
--- a/webrtc/modules/audio_coding/codecs/audio_decoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_decoder.h
@@ -62,7 +62,7 @@
// Calls the packet-loss concealment of the decoder to update the state after
// one or several lost packets.
- virtual int DecodePlc(int num_frames, int16_t* decoded);
+ virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Initializes the decoder.
virtual int Init() = 0;
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.h b/webrtc/modules/audio_coding/codecs/audio_encoder.h
index fe6fd87..0a40316 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -90,11 +90,11 @@
// the encoder may vary the number of 10 ms frames from packet to packet, but
// it must decide the length of the next packet no later than when outputting
// the preceding packet.
- virtual int Num10MsFramesInNextPacket() const = 0;
+ virtual size_t Num10MsFramesInNextPacket() const = 0;
// Returns the maximum value that can be returned by
// Num10MsFramesInNextPacket().
- virtual int Max10MsFramesInAPacket() const = 0;
+ virtual size_t Max10MsFramesInAPacket() const = 0;
// Returns the current target bitrate in bits/s. The value -1 means that the
// codec adapts the target automatically, and a current target cannot be
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h b/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
index 553d8ad..c1184e1 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
@@ -74,11 +74,11 @@
CriticalSectionScoped cs(encoder_lock_.get());
return encoder_->RtpTimestampRateHz();
}
- int Num10MsFramesInNextPacket() const override {
+ size_t Num10MsFramesInNextPacket() const override {
CriticalSectionScoped cs(encoder_lock_.get());
return encoder_->Num10MsFramesInNextPacket();
}
- int Max10MsFramesInAPacket() const override {
+ size_t Max10MsFramesInAPacket() const override {
CriticalSectionScoped cs(encoder_lock_.get());
return encoder_->Max10MsFramesInAPacket();
}
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index d2acaa1..279616e 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -38,7 +38,8 @@
return false;
if (num_channels != speech_encoder->NumChannels())
return false;
- if (sid_frame_interval_ms < speech_encoder->Max10MsFramesInAPacket() * 10)
+ if (sid_frame_interval_ms <
+ static_cast<int>(speech_encoder->Max10MsFramesInAPacket() * 10))
return false;
if (num_cng_coefficients > WEBRTC_CNG_MAX_LPC_ORDER ||
num_cng_coefficients <= 0)
@@ -89,11 +90,11 @@
return std::max(max_encoded_bytes_active, max_encoded_bytes_passive);
}
-int AudioEncoderCng::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderCng::Num10MsFramesInNextPacket() const {
return speech_encoder_->Num10MsFramesInNextPacket();
}
-int AudioEncoderCng::Max10MsFramesInAPacket() const {
+size_t AudioEncoderCng::Max10MsFramesInAPacket() const {
return speech_encoder_->Max10MsFramesInAPacket();
}
@@ -124,11 +125,11 @@
for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
speech_buffer_.push_back(audio[i]);
}
- const int frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
- if (rtp_timestamps_.size() < static_cast<size_t>(frames_to_encode)) {
+ const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
+ if (rtp_timestamps_.size() < frames_to_encode) {
return EncodedInfo();
}
- CHECK_LE(frames_to_encode * 10, kMaxFrameSizeMs)
+ CHECK_LE(static_cast<int>(frames_to_encode * 10), kMaxFrameSizeMs)
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
<< " ms when using VAD/CNG.";
@@ -136,12 +137,12 @@
// following split sizes:
// 10 ms = 10 + 0 ms; 20 ms = 20 + 0 ms; 30 ms = 30 + 0 ms;
// 40 ms = 20 + 20 ms; 50 ms = 30 + 20 ms; 60 ms = 30 + 30 ms.
- int blocks_in_first_vad_call =
+ size_t blocks_in_first_vad_call =
(frames_to_encode > 3 ? 3 : frames_to_encode);
if (frames_to_encode == 4)
blocks_in_first_vad_call = 2;
CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
- const int blocks_in_second_vad_call =
+ const size_t blocks_in_second_vad_call =
frames_to_encode - blocks_in_first_vad_call;
// Check if all of the buffer is passive speech. Start with checking the first
@@ -183,7 +184,7 @@
}
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
- int frames_to_encode,
+ size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
bool force_sid = last_frame_active_;
@@ -191,15 +192,19 @@
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
AudioEncoder::EncodedInfo info;
- for (int i = 0; i < frames_to_encode; ++i) {
- int16_t encoded_bytes_tmp = 0;
+ for (size_t i = 0; i < frames_to_encode; ++i) {
+ // It's important not to pass &info.encoded_bytes directly to
+ // WebRtcCng_Encode(), since later loop iterations may return zero in that
+ // value, in which case we don't want to overwrite any value from an earlier
+ // iteration.
+ size_t encoded_bytes_tmp = 0;
CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
&speech_buffer_[i * samples_per_10ms_frame],
- static_cast<int16_t>(samples_per_10ms_frame),
+ samples_per_10ms_frame,
encoded, &encoded_bytes_tmp, force_sid), 0);
if (encoded_bytes_tmp > 0) {
CHECK(!output_produced);
- info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
+ info.encoded_bytes = encoded_bytes_tmp;
output_produced = true;
force_sid = false;
}
@@ -212,12 +217,12 @@
}
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
- int frames_to_encode,
+ size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
- for (int i = 0; i < frames_to_encode; ++i) {
+ for (size_t i = 0; i < frames_to_encode; ++i) {
info = speech_encoder_->Encode(
rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, encoded);
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index d16dd3b..a2ab6e8 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -59,14 +59,14 @@
void CreateCng() {
// The config_ parameters may be changed by the TEST_Fs up until CreateCng()
// is called, thus we cannot use the values until now.
- num_audio_samples_10ms_ = 10 * sample_rate_hz_ / 1000;
+ num_audio_samples_10ms_ = static_cast<size_t>(10 * sample_rate_hz_ / 1000);
ASSERT_LE(num_audio_samples_10ms_, kMaxNumSamples);
EXPECT_CALL(mock_encoder_, SampleRateHz())
.WillRepeatedly(Return(sample_rate_hz_));
// Max10MsFramesInAPacket() is just used to verify that the SID frame period
// is not too small. The return value does not matter that much, as long as
// it is smaller than 10.
- EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1));
+ EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1u));
EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
.WillRepeatedly(Return(kMockMaxEncodedBytes));
cng_.reset(new AudioEncoderCng(config_));
@@ -83,10 +83,10 @@
// Expect |num_calls| calls to the encoder, all successful. The last call
// claims to have encoded |kMockMaxEncodedBytes| bytes, and all the preceding
// ones 0 bytes.
- void ExpectEncodeCalls(int num_calls) {
+ void ExpectEncodeCalls(size_t num_calls) {
InSequence s;
AudioEncoder::EncodedInfo info;
- for (int j = 0; j < num_calls - 1; ++j) {
+ for (size_t j = 0; j < num_calls - 1; ++j) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
}
@@ -98,7 +98,7 @@
// Verifies that the cng_ object waits until it has collected
// |blocks_per_frame| blocks of audio, and then dispatches all of them to
// the underlying codec (speech or cng).
- void CheckBlockGrouping(int blocks_per_frame, bool active_speech) {
+ void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
CreateCng();
@@ -107,7 +107,7 @@
// Don't expect any calls to the encoder yet.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
- for (int i = 0; i < blocks_per_frame - 1; ++i) {
+ for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
}
@@ -127,14 +127,15 @@
void CheckVadInputSize(int input_frame_size_ms,
int expected_first_block_size_ms,
int expected_second_block_size_ms) {
- const int blocks_per_frame = input_frame_size_ms / 10;
+ const size_t blocks_per_frame =
+ static_cast<size_t>(input_frame_size_ms / 10);
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
// Expect nothing to happen before the last block is sent to cng_.
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)).Times(0);
- for (int i = 0; i < blocks_per_frame - 1; ++i) {
+ for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
}
@@ -163,7 +164,7 @@
Vad::Activity second_type) {
// Set the speech encoder frame size to 60 ms, to ensure that the VAD will
// be called twice.
- const int blocks_per_frame = 6;
+ const size_t blocks_per_frame = 6;
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
InSequence s;
@@ -175,7 +176,7 @@
.WillOnce(Return(second_type));
}
encoded_info_.payload_type = 0;
- for (int i = 0; i < blocks_per_frame; ++i) {
+ for (size_t i = 0; i < blocks_per_frame; ++i) {
Encode();
}
return encoded_info_.payload_type != kCngPayloadType;
@@ -199,8 +200,8 @@
TEST_F(AudioEncoderCngTest, CheckFrameSizePropagation) {
CreateCng();
- EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
- EXPECT_EQ(17, cng_->Num10MsFramesInNextPacket());
+ EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, cng_->Num10MsFramesInNextPacket());
}
TEST_F(AudioEncoderCngTest, CheckChangeBitratePropagation) {
@@ -217,7 +218,7 @@
TEST_F(AudioEncoderCngTest, EncodeCallsVad) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
- .WillRepeatedly(Return(1));
+ .WillRepeatedly(Return(1U));
CreateCng();
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
@@ -249,7 +250,7 @@
}
TEST_F(AudioEncoderCngTest, EncodePassive) {
- const int kBlocksPerFrame = 3;
+ const size_t kBlocksPerFrame = 3;
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(kBlocksPerFrame));
CreateCng();
@@ -258,7 +259,7 @@
// Expect no calls at all to the speech encoder mock.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
uint32_t expected_timestamp = timestamp_;
- for (int i = 0; i < 100; ++i) {
+ for (size_t i = 0; i < 100; ++i) {
Encode();
// Check if it was time to call the cng encoder. This is done once every
// |kBlocksPerFrame| calls.
@@ -339,7 +340,7 @@
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
- EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
+ EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
encoded_info_.payload_type = 0;
@@ -352,7 +353,7 @@
TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
- .WillRepeatedly(Return(1));
+ .WillRepeatedly(Return(1U));
// Start with encoding noise.
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.Times(2)
@@ -443,7 +444,7 @@
TEST_F(AudioEncoderCngDeathTest, EncoderFrameSizeTooLarge) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
- .WillRepeatedly(Return(7));
+ .WillRepeatedly(Return(7U));
for (int i = 0; i < 6; ++i)
Encode();
EXPECT_DEATH(Encode(),
diff --git a/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
index 0d1c670..2409540 100644
--- a/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
@@ -99,7 +99,7 @@
TEST_F(CngTest, CngEncode) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -151,7 +151,7 @@
// Encode Cng with too long input vector.
TEST_F(CngTest, CngEncodeTooLong) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and init encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -170,7 +170,7 @@
// Call encode without calling init.
TEST_F(CngTest, CngEncodeNoInit) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -187,7 +187,7 @@
// Update SID parameters, for both 9 and 16 parameters.
TEST_F(CngTest, CngUpdateSid) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -224,7 +224,7 @@
// Update SID parameters, with wrong parameters or without calling decode.
TEST_F(CngTest, CngUpdateSidErroneous) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -261,7 +261,7 @@
TEST_F(CngTest, CngGenerate) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t out_data[640];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -294,7 +294,7 @@
// Test automatic SID.
TEST_F(CngTest, CngAutoSid) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -321,7 +321,7 @@
// Test automatic SID, with very short interval.
TEST_F(CngTest, CngAutoSidShort) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
index 51d2feb..6a11366 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
@@ -50,8 +50,8 @@
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
void SetTargetBitrate(int bits_per_second) override;
void SetProjectedPacketLossRate(double fraction) override;
@@ -67,10 +67,10 @@
inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
};
- EncodedInfo EncodePassive(int frames_to_encode,
+ EncodedInfo EncodePassive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
- EncodedInfo EncodeActive(int frames_to_encode,
+ EncodedInfo EncodeActive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
size_t SamplesPer10msFrame() const;
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h b/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
index 1ec5d67..6c7e50b 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
@@ -104,8 +104,8 @@
* -1 - Error
*/
int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
- int16_t nrOfSamples, uint8_t* SIDdata,
- int16_t* bytesOut, int16_t forceSID);
+ size_t nrOfSamples, uint8_t* SIDdata,
+ size_t* bytesOut, int16_t forceSID);
/****************************************************************************
* WebRtcCng_UpdateSid(...)
@@ -138,7 +138,7 @@
* -1 - Error
*/
int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
- int16_t nrOfSamples, int16_t new_period);
+ size_t nrOfSamples, int16_t new_period);
/*****************************************************************************
* WebRtcCng_GetErrorCodeEnc/Dec(...)
diff --git a/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
index 1f6974a..a0c166a 100644
--- a/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
+++ b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
@@ -35,7 +35,7 @@
} WebRtcCngDecoder;
typedef struct WebRtcCngEncoder_ {
- int16_t enc_nrOfCoefs;
+ size_t enc_nrOfCoefs;
int enc_sampfreq;
int16_t enc_interval;
int16_t enc_msSinceSID;
@@ -228,8 +228,8 @@
* -1 - Error
*/
int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
- int16_t nrOfSamples, uint8_t* SIDdata,
- int16_t* bytesOut, int16_t forceSID) {
+ size_t nrOfSamples, uint8_t* SIDdata,
+ size_t* bytesOut, int16_t forceSID) {
WebRtcCngEncoder* inst = (WebRtcCngEncoder*) cng_inst;
int16_t arCoefs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
@@ -240,10 +240,11 @@
int16_t ReflBetaComp = 13107; /* 0.4 in q15. */
int32_t outEnergy;
int outShifts;
- int i, stab;
+ size_t i;
+ int stab;
int acorrScale;
- int index;
- int16_t ind, factor;
+ size_t index;
+ size_t ind, factor;
int32_t* bptr;
int32_t blo, bhi;
int16_t negate;
@@ -281,7 +282,7 @@
outShifts--;
}
}
- outEnergy = WebRtcSpl_DivW32W16(outEnergy, factor);
+ outEnergy = WebRtcSpl_DivW32W16(outEnergy, (int16_t)factor);
if (outEnergy > 1) {
/* Create Hanning Window. */
@@ -390,7 +391,7 @@
inst->enc_msSinceSID +=
(int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
- return inst->enc_nrOfCoefs + 1;
+ return (int)(inst->enc_nrOfCoefs + 1);
} else {
inst->enc_msSinceSID +=
(int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
@@ -475,10 +476,10 @@
* -1 - Error
*/
int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
- int16_t nrOfSamples, int16_t new_period) {
+ size_t nrOfSamples, int16_t new_period) {
WebRtcCngDecoder* inst = (WebRtcCngDecoder*) cng_inst;
- int i;
+ size_t i;
int16_t excitation[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
int16_t low[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
int16_t lpPoly[WEBRTC_CNG_MAX_LPC_ORDER + 1];
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index 905a715..ba5959d 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -37,7 +37,8 @@
: sample_rate_hz_(sample_rate_hz),
num_channels_(config.num_channels),
payload_type_(config.payload_type),
- num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
full_frame_samples_(NumSamplesPerFrame(config.num_channels,
config.frame_size_ms,
sample_rate_hz_)),
@@ -63,11 +64,11 @@
return full_frame_samples_ * BytesPerSample();
}
-int AudioEncoderPcm::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderPcm::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderPcm::Max10MsFramesInAPacket() const {
+size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -95,27 +96,26 @@
EncodedInfo info;
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
- int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
- CHECK_GE(ret, 0);
- info.encoded_bytes = static_cast<size_t>(ret);
+ info.encoded_bytes =
+ EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
speech_buffer_.clear();
return info;
}
-int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) {
- return WebRtcG711_EncodeA(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcG711_EncodeA(audio, input_len, encoded);
}
int AudioEncoderPcmA::BytesPerSample() const {
return 1;
}
-int16_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) {
- return WebRtcG711_EncodeU(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcG711_EncodeU(audio, input_len, encoded);
}
int AudioEncoderPcmU::BytesPerSample() const {
diff --git a/webrtc/modules/audio_coding/codecs/g711/g711_interface.c b/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
index b579520..5b96a9c 100644
--- a/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
+++ b/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
@@ -12,40 +12,40 @@
#include "g711_interface.h"
#include "webrtc/typedefs.h"
-int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded) {
- int n;
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t n;
for (n = 0; n < len; n++)
encoded[n] = linear_to_alaw(speechIn[n]);
return len;
}
-int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded) {
- int n;
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t n;
for (n = 0; n < len; n++)
encoded[n] = linear_to_ulaw(speechIn[n]);
return len;
}
-int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType) {
- int n;
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType) {
+ size_t n;
for (n = 0; n < len; n++)
decoded[n] = alaw_to_linear(encoded[n]);
*speechType = 1;
return len;
}
-int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType) {
- int n;
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType) {
+ size_t n;
for (n = 0; n < len; n++)
decoded[n] = ulaw_to_linear(encoded[n]);
*speechType = 1;
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
index c869037..7d45f3f 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
@@ -39,8 +39,8 @@
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@@ -50,9 +50,9 @@
protected:
AudioEncoderPcm(const Config& config, int sample_rate_hz);
- virtual int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) = 0;
+ virtual size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) = 0;
virtual int BytesPerSample() const = 0;
@@ -60,7 +60,7 @@
const int sample_rate_hz_;
const int num_channels_;
const int payload_type_;
- const int num_10ms_frames_per_packet_;
+ const size_t num_10ms_frames_per_packet_;
const size_t full_frame_samples_;
std::vector<int16_t> speech_buffer_;
uint32_t first_timestamp_in_buffer_;
@@ -76,9 +76,9 @@
: AudioEncoderPcm(config, kSampleRateHz) {}
protected:
- int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) override;
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
int BytesPerSample() const override;
@@ -96,9 +96,9 @@
: AudioEncoderPcm(config, kSampleRateHz) {}
protected:
- int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) override;
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
int BytesPerSample() const override;
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h b/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
index 5c71e98..9d67222 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
+++ b/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
@@ -38,9 +38,9 @@
* Always equal to len input parameter.
*/
-int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
* WebRtcG711_EncodeU(...)
@@ -59,9 +59,9 @@
* Always equal to len input parameter.
*/
-int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
* WebRtcG711_DecodeA(...)
@@ -82,10 +82,10 @@
* -1 - Error
*/
-int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType);
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
/****************************************************************************
* WebRtcG711_DecodeU(...)
@@ -106,10 +106,10 @@
* -1 - Error
*/
-int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType);
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
/**********************************************************************
* WebRtcG711_Version(...)
diff --git a/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc b/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
index 49c671c..94248f7 100644
--- a/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
@@ -24,8 +24,8 @@
#define CLOCKS_PER_SEC_G711 1000
/* function for reading audio data from PCM file */
-bool readframe(int16_t* data, FILE* inp, int length) {
- short rlen = (short) fread(data, sizeof(int16_t), length, inp);
+bool readframe(int16_t* data, FILE* inp, size_t length) {
+ size_t rlen = fread(data, sizeof(int16_t), length, inp);
if (rlen >= length)
return false;
memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@@ -40,16 +40,14 @@
int framecnt;
bool endfile;
- int16_t framelength = 80;
-
- int err;
+ size_t framelength = 80;
/* Runtime statistics */
double starttime;
double runtime;
double length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int16_t shortdata[480];
int16_t decoded[480];
uint8_t streamdata[1000];
@@ -80,11 +78,12 @@
printf("-----------------------------------\n");
printf("G.711 version: %s\n\n", versionNumber);
/* Get frame length */
- framelength = atoi(argv[1]);
- if (framelength < 0) {
- printf(" G.711: Invalid framelength %d.\n", framelength);
- exit(1);
+ int framelength_int = atoi(argv[1]);
+ if (framelength_int < 0) {
+ printf(" G.722: Invalid framelength %d.\n", framelength_int);
+ exit(1);
}
+ framelength = static_cast<size_t>(framelength_int);
/* Get compression law */
strcpy(law, argv[2]);
@@ -130,36 +129,29 @@
if (argc == 6) {
/* Write bits to file */
if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
- static_cast<size_t>(stream_len)) {
+ stream_len) {
return -1;
}
}
- err = WebRtcG711_DecodeA(streamdata, stream_len, decoded,
- speechType);
+ WebRtcG711_DecodeA(streamdata, stream_len, decoded, speechType);
} else if (!strcmp(law, "u")) {
/* u-law encoding */
stream_len = WebRtcG711_EncodeU(shortdata, framelength, streamdata);
if (argc == 6) {
/* Write bits to file */
if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
- static_cast<size_t>(stream_len)) {
+ stream_len) {
return -1;
}
}
- err = WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
+ WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
} else {
printf("Wrong law mode\n");
exit(1);
}
- if (stream_len < 0 || err < 0) {
- /* exit if returned with error */
- printf("Error in encoder/decoder\n");
- } else {
- /* Write coded speech to file */
- if (fwrite(decoded, sizeof(short), framelength, outp) !=
- static_cast<size_t>(framelength)) {
- return -1;
- }
+ /* Write coded speech to file */
+ if (fwrite(decoded, sizeof(short), framelength, outp) != framelength) {
+ return -1;
}
}
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index a0d1720..9eb7a11 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -19,7 +19,7 @@
namespace {
-const int kSampleRateHz = 16000;
+const size_t kSampleRateHz = 16000;
} // namespace
@@ -40,13 +40,14 @@
AudioEncoderG722::AudioEncoderG722(const Config& config)
: num_channels_(config.num_channels),
payload_type_(config.payload_type),
- num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
num_10ms_frames_buffered_(0),
first_timestamp_in_buffer_(0),
encoders_(new EncoderState[num_channels_]),
interleave_buffer_(2 * num_channels_) {
CHECK(config.IsOk());
- const int samples_per_channel =
+ const size_t samples_per_channel =
kSampleRateHz / 100 * num_10ms_frames_per_packet_;
for (int i = 0; i < num_channels_; ++i) {
encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
@@ -71,14 +72,14 @@
}
size_t AudioEncoderG722::MaxEncodedBytes() const {
- return static_cast<size_t>(SamplesPerChannel() / 2 * num_channels_);
+ return SamplesPerChannel() / 2 * num_channels_;
}
-int AudioEncoderG722::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderG722::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderG722::Max10MsFramesInAPacket() const {
+size_t AudioEncoderG722::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -98,8 +99,8 @@
first_timestamp_in_buffer_ = rtp_timestamp;
// Deinterleave samples and save them in each channel's buffer.
- const int start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
- for (int i = 0; i < kSampleRateHz / 100; ++i)
+ const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
+ for (size_t i = 0; i < kSampleRateHz / 100; ++i)
for (int j = 0; j < num_channels_; ++j)
encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
@@ -111,19 +112,18 @@
// Encode each channel separately.
CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
num_10ms_frames_buffered_ = 0;
- const int samples_per_channel = SamplesPerChannel();
+ const size_t samples_per_channel = SamplesPerChannel();
for (int i = 0; i < num_channels_; ++i) {
- const int encoded = WebRtcG722_Encode(
+ const size_t encoded = WebRtcG722_Encode(
encoders_[i].encoder, encoders_[i].speech_buffer.get(),
samples_per_channel, encoders_[i].encoded_buffer.data<uint8_t>());
- CHECK_GE(encoded, 0);
CHECK_EQ(encoded, samples_per_channel / 2);
}
// Interleave the encoded bytes of the different channels. Each separate
// channel and the interleaved stream encodes two samples per byte, most
// significant half first.
- for (int i = 0; i < samples_per_channel / 2; ++i) {
+ for (size_t i = 0; i < samples_per_channel / 2; ++i) {
for (int j = 0; j < num_channels_; ++j) {
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
interleave_buffer_.data()[j] = two_samples >> 4;
@@ -140,7 +140,7 @@
return info;
}
-int AudioEncoderG722::SamplesPerChannel() const {
+size_t AudioEncoderG722::SamplesPerChannel() const {
return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
}
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_decode.c b/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
index ee0eb89..8fdeec1 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
@@ -188,8 +188,8 @@
}
/*- End of function --------------------------------------------------------*/
-int WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
- const uint8_t g722_data[], int len)
+size_t WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
+ const uint8_t g722_data[], size_t len)
{
static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1,
@@ -258,9 +258,9 @@
int wd2;
int wd3;
int code;
- int outlen;
+ size_t outlen;
int i;
- int j;
+ size_t j;
outlen = 0;
rhigh = 0;
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h b/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
index 5cd1b2d..7db4895 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
@@ -139,19 +139,19 @@
int rate,
int options);
int WebRtc_g722_encode_release(G722EncoderState *s);
-int WebRtc_g722_encode(G722EncoderState *s,
- uint8_t g722_data[],
- const int16_t amp[],
- int len);
+size_t WebRtc_g722_encode(G722EncoderState *s,
+ uint8_t g722_data[],
+ const int16_t amp[],
+ size_t len);
G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
int rate,
int options);
int WebRtc_g722_decode_release(G722DecoderState *s);
-int WebRtc_g722_decode(G722DecoderState *s,
- int16_t amp[],
- const uint8_t g722_data[],
- int len);
+size_t WebRtc_g722_decode(G722DecoderState *s,
+ int16_t amp[],
+ const uint8_t g722_data[],
+ size_t len);
#ifdef __cplusplus
}
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_encode.c b/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
index bed2d21..01ec127 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
@@ -202,8 +202,8 @@
}
#endif
-int WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
- const int16_t amp[], int len)
+size_t WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
+ const int16_t amp[], size_t len)
{
static const int q6[32] =
{
@@ -275,11 +275,11 @@
int eh;
int mih;
int i;
- int j;
+ size_t j;
/* Low and high band PCM from the QMF */
int xlow;
int xhigh;
- int g722_bytes;
+ size_t g722_bytes;
/* Even and odd tap accumulators */
int sumeven;
int sumodd;
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_interface.c b/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
index 1edf58d..f6b9842 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
@@ -45,10 +45,10 @@
return WebRtc_g722_encode_release((G722EncoderState*) G722enc_inst);
}
-int16_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
- const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded)
+size_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded)
{
unsigned char *codechar = (unsigned char*) encoded;
// Encode the input speech vector
@@ -85,11 +85,11 @@
return WebRtc_g722_decode_release((G722DecoderState*) G722dec_inst);
}
-int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
- const uint8_t *encoded,
- int16_t len,
- int16_t *decoded,
- int16_t *speechType)
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+ const uint8_t *encoded,
+ size_t len,
+ int16_t *decoded,
+ int16_t *speechType)
{
// Decode the G.722 encoder stream
*speechType=G722_WEBRTC_SPEECH;
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
index 9b57fbe..1f36fac 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
@@ -37,8 +37,8 @@
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@@ -55,12 +55,12 @@
~EncoderState();
};
- int SamplesPerChannel() const;
+ size_t SamplesPerChannel() const;
const int num_channels_;
const int payload_type_;
- const int num_10ms_frames_per_packet_;
- int num_10ms_frames_buffered_;
+ const size_t num_10ms_frames_per_packet_;
+ size_t num_10ms_frames_buffered_;
uint32_t first_timestamp_in_buffer_;
const rtc::scoped_ptr<EncoderState[]> encoders_;
rtc::Buffer interleave_buffer_;
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h b/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
index 46ff3b0..fa4a48c 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
+++ b/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
@@ -94,10 +94,10 @@
* Return value : Length (in bytes) of coded data
*/
-int16_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
- const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
@@ -162,15 +162,14 @@
* - speechType : 1 normal, 2 CNG (Since G722 does not have its own
* DTX/CNG scheme it should always return 1)
*
- * Return value : >0 - Samples in decoded vector
- * -1 - Error
+ * Return value : Samples in decoded vector
*/
-int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
- const uint8_t* encoded,
- int16_t len,
- int16_t *decoded,
- int16_t *speechType);
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t *decoded,
+ int16_t *speechType);
/****************************************************************************
* WebRtcG722_Version(...)
diff --git a/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc b/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
index 6a6f03c..b473c13 100644
--- a/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
@@ -29,9 +29,9 @@
typedef struct WebRtcG722DecInst G722DecInst;
/* function for reading audio data from PCM file */
-bool readframe(int16_t *data, FILE *inp, int length)
+bool readframe(int16_t *data, FILE *inp, size_t length)
{
- short rlen = (short)fread(data, sizeof(int16_t), length, inp);
+ size_t rlen = fread(data, sizeof(int16_t), length, inp);
if (rlen >= length)
return false;
memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@@ -45,17 +45,16 @@
int framecnt;
bool endfile;
- int16_t framelength = 160;
+ size_t framelength = 160;
G722EncInst *G722enc_inst;
G722DecInst *G722dec_inst;
- int err;
/* Runtime statistics */
double starttime;
double runtime = 0;
double length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int16_t shortdata[960];
int16_t decoded[960];
uint8_t streamdata[80 * 6];
@@ -78,11 +77,12 @@
}
/* Get frame length */
- framelength = atoi(argv[1]);
- if (framelength < 0) {
- printf(" G.722: Invalid framelength %d.\n", framelength);
+ int framelength_int = atoi(argv[1]);
+ if (framelength_int < 0) {
+ printf(" G.722: Invalid framelength %d.\n", framelength_int);
exit(1);
}
+ framelength = static_cast<size_t>(framelength_int);
/* Get Input and Output files */
sscanf(argv[2], "%s", inname);
@@ -124,26 +124,21 @@
/* G.722 encoding + decoding */
stream_len = WebRtcG722_Encode((G722EncInst *)G722enc_inst, shortdata, framelength, streamdata);
- err = WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
- speechType);
+ WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
+ speechType);
/* Stop clock after call to encoder and decoder */
runtime += (double)((clock()/(double)CLOCKS_PER_SEC_G722)-starttime);
- if (stream_len < 0 || err < 0) {
- /* exit if returned with error */
- printf("Error in encoder/decoder\n");
- } else {
- /* Write coded bits to file */
- if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
- static_cast<size_t>(stream_len / 2)) {
- return -1;
- }
- /* Write coded speech to file */
- if (fwrite(decoded, sizeof(short), framelength, outp) !=
- static_cast<size_t>(framelength)) {
- return -1;
- }
+ /* Write coded bits to file */
+ if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
+ stream_len / 2) {
+ return -1;
+ }
+ /* Write coded speech to file */
+ if (fwrite(decoded, sizeof(short), framelength, outp) !=
+ framelength) {
+ return -1;
}
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
index 75fc970..263749a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
@@ -36,7 +36,7 @@
int16_t *weightDenum /* (i) denominator of synthesis filter */
) {
int16_t *syntOut;
- int16_t quantLen[2];
+ size_t quantLen[2];
/* Stack based */
int16_t syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
index d26fb5d..4b76453 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
@@ -21,9 +21,9 @@
#include "sort_sq.h"
void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
- int16_t *weightDenumIN, int16_t *quantLenIN,
+ int16_t *weightDenumIN, size_t *quantLenIN,
int16_t *idxVecIN ) {
- int k1, k2;
+ size_t k1, k2;
int16_t index;
int32_t toQW32;
int32_t toQ32;
@@ -33,7 +33,7 @@
int16_t *syntOut = syntOutIN;
int16_t *in_weighted = in_weightedIN;
int16_t *weightDenum = weightDenumIN;
- int16_t *quantLen = quantLenIN;
+ size_t *quantLen = quantLenIN;
int16_t *idxVec = idxVecIN;
for(k1=0;k1<2;k1++) {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
index 50c6ffe..c8bf675 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
@@ -27,7 +27,7 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
- int16_t *weightDenumIN, int16_t *quantLenIN,
+ int16_t *weightDenumIN, size_t *quantLenIN,
int16_t *idxVecIN);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 8dc9bdf..33aba38 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -24,15 +24,20 @@
} // namespace
+// static
+const size_t AudioEncoderIlbc::kMaxSamplesPerPacket;
+
bool AudioEncoderIlbc::Config::IsOk() const {
return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||
frame_size_ms == 60) &&
- (kSampleRateHz / 100 * (frame_size_ms / 10)) <= kMaxSamplesPerPacket;
+ static_cast<size_t>(kSampleRateHz / 100 * (frame_size_ms / 10)) <=
+ kMaxSamplesPerPacket;
}
AudioEncoderIlbc::AudioEncoderIlbc(const Config& config)
: payload_type_(config.payload_type),
- num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
num_10ms_frames_buffered_(0) {
CHECK(config.IsOk());
CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
@@ -58,11 +63,11 @@
return RequiredOutputSizeBytes();
}
-int AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
+size_t AudioEncoderIlbc::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -111,7 +116,7 @@
encoded);
CHECK_GE(output_len, 0);
EncodedInfo info;
- info.encoded_bytes = output_len;
+ info.encoded_bytes = static_cast<size_t>(output_len);
DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
index c24b4a6..1a3735f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
@@ -28,14 +28,14 @@
int32_t *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
- int16_t low, /* (i) Lag to start from (typically
+ size_t low, /* (i) Lag to start from (typically
20) */
- int16_t high, /* (i) Lag to end at (typically 39) */
+ size_t high, /* (i) Lag to end at (typically 39) */
int scale) /* (i) Scale factor to use for
the crossDot */
{
- int lagcount;
- int16_t ilow;
+ size_t lagcount;
+ size_t ilow;
int16_t *targetPtr;
int32_t *crossDotPtr;
int16_t *iSPtr=interpSamples;
@@ -46,7 +46,7 @@
crossDotPtr=crossDot;
for (lagcount=low; lagcount<=high; lagcount++) {
- ilow = (int16_t) (lagcount-4);
+ ilow = lagcount - 4;
/* Compute dot product for the first (lagcount-4) samples */
(*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
index a0435c4..c5c4088 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
@@ -33,9 +33,9 @@
int32_t *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
- int16_t low, /* (i) Lag to start from (typically
+ size_t low, /* (i) Lag to start from (typically
20) */
- int16_t high, /* (i) Lag to end at (typically 39 */
+ size_t high, /* (i) Lag to end at (typically 39 */
int scale); /* (i) Scale factor to use for the crossDot */
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
index 9d11b83..cacf3ac 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
@@ -29,10 +29,10 @@
int16_t *index, /* (i) Codebook indices */
int16_t *gain_index, /* (i) Gain quantization indices */
int16_t *mem, /* (i) Buffer for codevector construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t veclen /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t veclen /* (i) Length of vector */
){
- int j;
+ size_t j;
int16_t gain[CB_NSTAGES];
/* Stack based */
int16_t cbvec0[SUBL];
@@ -50,9 +50,9 @@
/* codebook vector construction and construction of total vector */
/* Stack based */
- WebRtcIlbcfix_GetCbVec(cbvec0, mem, index[0], lMem, veclen);
- WebRtcIlbcfix_GetCbVec(cbvec1, mem, index[1], lMem, veclen);
- WebRtcIlbcfix_GetCbVec(cbvec2, mem, index[2], lMem, veclen);
+ WebRtcIlbcfix_GetCbVec(cbvec0, mem, (size_t)index[0], lMem, veclen);
+ WebRtcIlbcfix_GetCbVec(cbvec1, mem, (size_t)index[1], lMem, veclen);
+ WebRtcIlbcfix_GetCbVec(cbvec2, mem, (size_t)index[2], lMem, veclen);
gainPtr = &gain[0];
for (j=0;j<veclen;j++) {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
index 2e9080f..b676ef9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
@@ -30,8 +30,8 @@
int16_t *index, /* (i) Codebook indices */
int16_t *gain_index, /* (i) Gain quantization indices */
int16_t *mem, /* (i) Buffer for codevector construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t veclen /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t veclen /* (i) Length of vector */
);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
index 1b8c506..6ad2f8e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
@@ -27,15 +27,15 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CbMemEnergy(
- int16_t range,
+ size_t range,
int16_t *CB, /* (i) The CB memory (1:st section) */
int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
- int16_t lMem, /* (i) Length of the CB memory */
- int16_t lTarget, /* (i) Length of the target vector */
+ size_t lMem, /* (i) Length of the CB memory */
+ size_t lTarget, /* (i) Length of the target vector */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
) {
int16_t *ppi, *ppo, *pp;
int32_t energy, tmp32;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
index 34ff8aa..6da2f43 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
@@ -20,15 +20,15 @@
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
void WebRtcIlbcfix_CbMemEnergy(
- int16_t range,
+ size_t range,
int16_t *CB, /* (i) The CB memory (1:st section) */
int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
- int16_t lMem, /* (i) Length of the CB memory */
- int16_t lTarget, /* (i) Length of the target vector */
+ size_t lMem, /* (i) Length of the CB memory */
+ size_t lTarget, /* (i) Length of the target vector */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
index 2f3c299..acd6b9c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
@@ -23,13 +23,14 @@
int16_t *interpSamples, /* (i) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size, /* (i) Index to where energy values should be stored */
+ size_t base_size, /* (i) Index to where energy values should be stored */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts /* (o) Shift value of the energy */
){
int32_t energy, tmp32;
int16_t *ppe, *pp, *interpSamplesPtr;
- int16_t *CBmemPtr, lagcount;
+ int16_t *CBmemPtr;
+ size_t lagcount;
int16_t *enPtr=&energyW16[base_size-20];
int16_t *enShPtr=&energyShifts[base_size-20];
int32_t nrjRecursive;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
index 46fb2fd..594ba5f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
@@ -23,7 +23,7 @@
int16_t *interpSamples, /* (i) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size, /* (i) Index to where energy values should be stored */
+ size_t base_size, /* (i) Index to where energy values should be stored */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts /* (o) Shift value of the energy */
);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
index 481dfba..f2415fe 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
@@ -23,16 +23,17 @@
* sample and the last sample respectively */
void WebRtcIlbcfix_CbMemEnergyCalc(
int32_t energy, /* (i) input start energy */
- int16_t range, /* (i) number of iterations */
+ size_t range, /* (i) number of iterations */
int16_t *ppi, /* (i) input pointer 1 */
int16_t *ppo, /* (i) input pointer 2 */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
)
{
- int16_t j,shft;
+ size_t j;
+ int16_t shft;
int32_t tmp;
int16_t *eSh_ptr;
int16_t *eW16_ptr;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
index 7f0cadf..2991869 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
@@ -21,13 +21,13 @@
void WebRtcIlbcfix_CbMemEnergyCalc(
int32_t energy, /* (i) input start energy */
- int16_t range, /* (i) number of iterations */
+ size_t range, /* (i) number of iterations */
int16_t *ppi, /* (i) input pointer 1 */
int16_t *ppo, /* (i) input pointer 2 */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
index d502cf0..be94951 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
@@ -40,29 +40,31 @@
int16_t *gain_index, /* (o) Gain quantization indices */
int16_t *intarget, /* (i) Target vector for encoding */
int16_t *decResidual,/* (i) Decoded residual for codebook construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t lTarget, /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t lTarget, /* (i) Length of vector */
int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
- int16_t block /* (i) the subblock number */
+ size_t block /* (i) the subblock number */
) {
- int16_t i, j, stage, range;
+ size_t i, range;
+ int16_t ii, j, stage;
int16_t *pp;
int16_t tmp;
int scale;
int16_t bits, temp1, temp2;
- int16_t base_size;
+ size_t base_size;
int32_t codedEner, targetEner;
int16_t gains[CB_NSTAGES+1];
int16_t *cb_vecPtr;
- int16_t indexOffset, sInd, eInd;
+ size_t indexOffset, sInd, eInd;
int32_t CritMax=0;
int16_t shTotMax=WEBRTC_SPL_WORD16_MIN;
- int16_t bestIndex=0;
+ size_t bestIndex=0;
int16_t bestGain=0;
- int16_t indexNew, CritNewSh;
+ size_t indexNew;
+ int16_t CritNewSh;
int32_t CritNew;
int32_t *cDotPtr;
- int16_t noOfZeros;
+ size_t noOfZeros;
int16_t *gainPtr;
int32_t t32, tmpW32;
int16_t *WebRtcIlbcfix_kGainSq5_ptr;
@@ -148,9 +150,9 @@
scale, 20, energyW16, energyShifts);
/* Compute the CB vectors' energies for the second cb section (filtered cb) */
- WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors,
- scale, (int16_t)(base_size + 20),
- energyW16, energyShifts);
+ WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors, scale,
+ base_size + 20, energyW16,
+ energyShifts);
/* Compute the CB vectors' energies and store them in the vector
* energyW16. Also the corresponding shift values are stored. The
@@ -224,7 +226,7 @@
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
- CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew+indexOffset],
+ CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew+indexOffset],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
@@ -242,11 +244,8 @@
i=sInd;
if (sInd<20) {
WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors + lMem,
- interpSamplesFilt, cDot,
- (int16_t)(sInd + 20),
- (int16_t)(WEBRTC_SPL_MIN(39,
- (eInd + 20))),
- scale);
+ interpSamplesFilt, cDot, sInd + 20,
+ WEBRTC_SPL_MIN(39, (eInd + 20)), scale);
i=20;
cDotPtr = &cDot[20 - sInd];
} else {
@@ -257,7 +256,7 @@
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
- (int16_t)(eInd - i + 1), scale, -1);
+ eInd - i + 1, scale, -1);
} else {
cDotPtr = cDot;
@@ -265,7 +264,7 @@
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
- (int16_t)(eInd - sInd + 1), scale, -1);
+ eInd - sInd + 1, scale, -1);
}
@@ -274,17 +273,17 @@
/* Search for best index in this part of the vector */
WebRtcIlbcfix_CbSearchCore(
- cDot, (int16_t)(eInd-sInd+1), stage, inverseEnergy+indexOffset,
+ cDot, eInd-sInd+1, stage, inverseEnergy+indexOffset,
inverseEnergyShifts+indexOffset, Crit,
&indexNew, &CritNew, &CritNewSh);
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
- CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew],
+ CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
- index[stage] = bestIndex;
+ index[stage] = (int16_t)bestIndex;
bestGain = WebRtcIlbcfix_GainQuant(bestGain,
@@ -297,7 +296,7 @@
if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
- if(index[stage]<base_size) {
+ if((size_t)index[stage]<base_size) {
pp=buf+lMem-lTarget-index[stage];
} else {
pp=cbvectors+lMem-lTarget-
@@ -306,16 +305,16 @@
} else {
- if (index[stage]<base_size) {
+ if ((size_t)index[stage]<base_size) {
if (index[stage]>=20) {
/* Adjust index and extract vector */
index[stage]-=20;
pp=buf+lMem-lTarget-index[stage];
} else {
/* Adjust index and extract vector */
- index[stage]+=(base_size-20);
+ index[stage]+=(int16_t)(base_size-20);
- WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-base_size+40),
+ WebRtcIlbcfix_CreateAugmentedVec(index[stage]-base_size+40,
buf+lMem, aug_vec);
pp = aug_vec;
@@ -329,8 +328,8 @@
index[stage]+base_size;
} else {
/* Adjust index and extract vector */
- index[stage]+=(base_size-20);
- WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-2*base_size+40),
+ index[stage]+=(int16_t)(base_size-20);
+ WebRtcIlbcfix_CreateAugmentedVec(index[stage]-2*base_size+40,
cbvectors+lMem, aug_vec);
pp = aug_vec;
}
@@ -381,7 +380,7 @@
WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[j];
/* targetEner and codedEner are in Q(-2*scale) */
- for (i=gain_index[0];i<32;i++) {
+ for (ii=gain_index[0];ii<32;ii++) {
/* Change the index if
(codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
@@ -392,8 +391,8 @@
t32 = t32 - targetEner;
if (t32 < 0) {
if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
- j=i;
- WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[i];
+ j=ii;
+ WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[ii];
}
}
gainPtr++;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
index 2fe236f..ed1580c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
@@ -26,10 +26,10 @@
int16_t *gain_index, /* (o) Gain quantization indices */
int16_t *intarget, /* (i) Target vector for encoding */
int16_t *decResidual,/* (i) Decoded residual for codebook construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t lTarget, /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t lTarget, /* (i) Length of vector */
int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
- int16_t block /* (i) the subblock number */
+ size_t block /* (i) the subblock number */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
index 3deb08a..d297b15 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
@@ -21,13 +21,13 @@
void WebRtcIlbcfix_CbSearchCore(
int32_t *cDot, /* (i) Cross Correlation */
- int16_t range, /* (i) Search range */
+ size_t range, /* (i) Search range */
int16_t stage, /* (i) Stage of this search */
int16_t *inverseEnergy, /* (i) Inversed energy */
int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
int32_t *Crit, /* (o) The criteria */
- int16_t *bestIndex, /* (o) Index that corresponds to
+ size_t *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
int32_t *bestCrit, /* (o) Value of critera for the
@@ -37,7 +37,7 @@
{
int32_t maxW32, tmp32;
int16_t max, sh, tmp16;
- int i;
+ size_t i;
int32_t *cDotPtr;
int16_t cDotSqW16;
int16_t *inverseEnergyPtr;
@@ -103,7 +103,7 @@
}
/* Find the index of the best value */
- *bestIndex = WebRtcSpl_MaxIndexW32(Crit, range);
+ *bestIndex = (size_t)WebRtcSpl_MaxIndexW32(Crit, range);
*bestCrit = Crit[*bestIndex];
/* Calculate total shifts of this criteria */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
index e4f2e92..9648cf2 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
@@ -23,13 +23,13 @@
void WebRtcIlbcfix_CbSearchCore(
int32_t *cDot, /* (i) Cross Correlation */
- int16_t range, /* (i) Search range */
+ size_t range, /* (i) Search range */
int16_t stage, /* (i) Stage of this search */
int16_t *inverseEnergy, /* (i) Inversed energy */
int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
int32_t *Crit, /* (o) The criteria */
- int16_t *bestIndex, /* (o) Index that corresponds to
+ size_t *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
int32_t *bestCrit, /* (o) Value of critera for the
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
index 6fdec27..fc27ea9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
@@ -23,13 +23,13 @@
void WebRtcIlbcfix_CbUpdateBestIndex(
int32_t CritNew, /* (i) New Potentially best Criteria */
int16_t CritNewSh, /* (i) Shift value of above Criteria */
- int16_t IndexNew, /* (i) Index of new Criteria */
+ size_t IndexNew, /* (i) Index of new Criteria */
int32_t cDotNew, /* (i) Cross dot of new index */
int16_t invEnergyNew, /* (i) Inversed energy new index */
int16_t energyShiftNew, /* (i) Energy shifts of new index */
int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
- int16_t *bestIndex, /* (i/o) Index that corresponds to
+ size_t *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
int16_t *bestGain) /* (i/o) Gain in Q14 that corresponds
to maximum criteria */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
index e8519d4..a20fa38 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
@@ -24,13 +24,13 @@
void WebRtcIlbcfix_CbUpdateBestIndex(
int32_t CritNew, /* (i) New Potentially best Criteria */
int16_t CritNewSh, /* (i) Shift value of above Criteria */
- int16_t IndexNew, /* (i) Index of new Criteria */
+ size_t IndexNew, /* (i) Index of new Criteria */
int32_t cDotNew, /* (i) Cross dot of new index */
int16_t invEnergyNew, /* (i) Inversed energy new index */
int16_t energyShiftNew, /* (i) Energy shifts of new index */
int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
- int16_t *bestIndex, /* (i/o) Index that corresponds to
+ size_t *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
int16_t *bestGain); /* (i/o) Gain in Q14 that corresponds
to maximum criteria */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
index a53e8a7..7653cb0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
@@ -27,9 +27,9 @@
int32_t *corr, /* (o) cross correlation */
int32_t *ener, /* (o) energy */
int16_t *buffer, /* (i) signal buffer */
- int16_t lag, /* (i) pitch lag */
- int16_t bLen, /* (i) length of buffer */
- int16_t sRange, /* (i) correlation search length */
+ size_t lag, /* (i) pitch lag */
+ size_t bLen, /* (i) length of buffer */
+ size_t sRange, /* (i) correlation search length */
int16_t scale /* (i) number of rightshifts to use */
){
int16_t *w16ptr;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
index 4ff80aa..ab78c72 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
@@ -30,9 +30,9 @@
int32_t *corr, /* (o) cross correlation */
int32_t *ener, /* (o) energy */
int16_t *buffer, /* (i) signal buffer */
- int16_t lag, /* (i) pitch lag */
- int16_t bLen, /* (i) length of buffer */
- int16_t sRange, /* (i) correlation search length */
+ size_t lag, /* (i) pitch lag */
+ size_t bLen, /* (i) length of buffer */
+ size_t sRange, /* (i) correlation search length */
int16_t scale /* (i) number of rightshifts to use */
);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/constants.c b/webrtc/modules/audio_coding/codecs/ilbc/constants.c
index 1d384b7..f726ae2 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/constants.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/constants.c
@@ -593,10 +593,10 @@
/* Ranges for search and filters at different subframes */
-const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
+const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
{58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
-const int16_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
+const size_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
/* Gain Quantization for the codebook gains of the 3 stages */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/constants.h b/webrtc/modules/audio_coding/codecs/ilbc/constants.h
index ff6370e..1f4de4d 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/constants.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/constants.h
@@ -61,8 +61,8 @@
/* Ranges for search and filters at different subframes */
-extern const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
-extern const int16_t WebRtcIlbcfix_kFilterRange[];
+extern const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
+extern const size_t WebRtcIlbcfix_kFilterRange[];
/* gain quantization tables */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
index 965cbe0..8ae28ac 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
@@ -25,12 +25,12 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
- int16_t index, /* (i) Index for the augmented vector to be created */
+ size_t index, /* (i) Index for the augmented vector to be created */
int16_t *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
int16_t *cbVec /* (o) The construced codebook vector */
) {
- int16_t ilow;
+ size_t ilow;
int16_t *ppo, *ppi;
int16_t cbVecTmp[4];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
index e3c3c7b..430dfe9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
@@ -27,7 +27,7 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
- int16_t index, /* (i) Index for the augmented vector to be created */
+ size_t index, /* (i) Index for the augmented vector to be created */
int16_t *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
int16_t *cbVec /* (o) The construced codebook vector */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decode.c b/webrtc/modules/audio_coding/codecs/ilbc/decode.c
index 9918de2..4c8497a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decode.c
@@ -44,7 +44,7 @@
int16_t mode /* (i) 0: bad packet, PLC,
1: normal */
) {
- int i;
+ size_t i;
int16_t order_plus_one;
int16_t last_bit;
@@ -106,7 +106,7 @@
WebRtcIlbcfix_DoThePlc(
PLCresidual, PLClpc, 0, decresidual,
syntdenum + (LPC_FILTERORDER + 1) * (iLBCdec_inst->nsub - 1),
- (int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
+ iLBCdec_inst->last_lag, iLBCdec_inst);
/* Use the output from doThePLC */
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@@ -122,7 +122,7 @@
/* packet loss conceal */
WebRtcIlbcfix_DoThePlc(PLCresidual, PLClpc, 1, decresidual, syntdenum,
- (int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
+ iLBCdec_inst->last_lag, iLBCdec_inst);
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@@ -188,18 +188,18 @@
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
} else { /* Enhancer not activated */
- int16_t lag;
+ size_t lag;
/* Find last lag (since the enhancer is not called to give this info) */
lag = 20;
if (iLBCdec_inst->mode==20) {
- lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
+ lag = WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-60],
&decresidual[iLBCdec_inst->blockl-60-lag],
60,
80, lag, -1);
} else {
- lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
+ lag = WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
ENH_BLOCKL,
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c b/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
index de42ea9..b8a067e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
@@ -41,8 +41,8 @@
int16_t *syntdenum /* (i) the decoded synthesis filter
coefficients */
) {
- int16_t meml_gotten, diff, start_pos;
- int16_t subcount, subframe;
+ size_t meml_gotten, diff, start_pos;
+ size_t subcount, subframe;
int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
int16_t *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */
int16_t *mem = &memVec[CB_HALFFILTERLEN]; /* Memory for codebook */
@@ -118,7 +118,7 @@
/* loop over subframes to encode */
- int16_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
+ size_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nfor; subframe++) {
/* construct decoded vector */
@@ -156,7 +156,7 @@
/* loop over subframes to decode */
- int16_t Nback = iLBC_encbits->startIdx - 1;
+ size_t Nback = iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nback; subframe++) {
/* construct decoded vector */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c b/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
index fad8170..06ab2e7 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
@@ -34,7 +34,8 @@
IlbcDecoder *iLBCdec_inst
/* (i) the decoder state structure */
){
- int i, pos, lp_length;
+ size_t i;
+ int pos, lp_length;
int16_t lp[LPC_FILTERORDER + 1], *lsfdeq2;
lsfdeq2 = lsfdeq + length;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/defines.h b/webrtc/modules/audio_coding/codecs/ilbc/defines.h
index 2d37e52..5fcd4a0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/defines.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/defines.h
@@ -121,11 +121,11 @@
int16_t lsf[LSF_NSPLIT*LPC_N_MAX];
int16_t cb_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB index */
int16_t gain_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB gain */
- int16_t idxForMax;
+ size_t idxForMax;
int16_t state_first;
int16_t idxVec[STATE_SHORT_LEN_30MS];
int16_t firstbits;
- int16_t startIdx;
+ size_t startIdx;
} iLBC_bits;
/* type definition encoder instance */
@@ -135,12 +135,12 @@
int16_t mode;
/* basic parameters for different frame sizes */
- int16_t blockl;
- int16_t nsub;
+ size_t blockl;
+ size_t nsub;
int16_t nasub;
- int16_t no_of_bytes, no_of_words;
+ size_t no_of_bytes, no_of_words;
int16_t lpc_n;
- int16_t state_short_len;
+ size_t state_short_len;
/* analysis filter state */
int16_t anaMem[LPC_FILTERORDER];
@@ -164,7 +164,7 @@
int16_t Nfor_flag;
int16_t Nback_flag;
int16_t start_pos;
- int16_t diff;
+ size_t diff;
#endif
} IlbcEncoder;
@@ -176,12 +176,12 @@
int16_t mode;
/* basic parameters for different frame sizes */
- int16_t blockl;
- int16_t nsub;
+ size_t blockl;
+ size_t nsub;
int16_t nasub;
- int16_t no_of_bytes, no_of_words;
+ size_t no_of_bytes, no_of_words;
int16_t lpc_n;
- int16_t state_short_len;
+ size_t state_short_len;
/* synthesis filter state */
int16_t syntMem[LPC_FILTERORDER];
@@ -190,14 +190,15 @@
int16_t lsfdeqold[LPC_FILTERORDER];
/* pitch lag estimated in enhancer and used in PLC */
- int last_lag;
+ size_t last_lag;
/* PLC state information */
int consPLICount, prev_enh_pl;
int16_t perSquare;
int16_t prevScale, prevPLI;
- int16_t prevLag, prevLpc[LPC_FILTERORDER+1];
+ size_t prevLag;
+ int16_t prevLpc[LPC_FILTERORDER+1];
int16_t prevResidual[NSUB_MAX*SUBL];
int16_t seed;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
index b313b58..f74439e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
@@ -33,18 +33,19 @@
0 - no PL, 1 = PL */
int16_t *decresidual, /* (i) decoded residual */
int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
- int16_t inlag, /* (i) pitch lag */
+ size_t inlag, /* (i) pitch lag */
IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */
){
- int16_t i;
+ size_t i;
int32_t cross, ener, cross_comp, ener_comp = 0;
int32_t measure, maxMeasure, energy;
int16_t max, crossSquareMax, crossSquare;
- int16_t j, lag, tmp1, tmp2, randlag;
+ size_t j, lag, randlag;
+ int16_t tmp1, tmp2;
int16_t shift1, shift2, shift3, shiftMax;
int16_t scale3;
- int16_t corrLen;
+ size_t corrLen;
int32_t tmpW32, tmp2W32;
int16_t use_gain;
int16_t tot_gain;
@@ -54,7 +55,7 @@
int32_t nom;
int16_t denom;
int16_t pitchfact;
- int16_t use_lag;
+ size_t use_lag;
int ind;
int16_t randvec[BLOCKL_MAX];
@@ -71,7 +72,7 @@
/* Maximum 60 samples are correlated, preserve as high accuracy
as possible without getting overflow */
max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual,
- (int16_t)iLBCdec_inst->blockl);
+ iLBCdec_inst->blockl);
scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
if (scale3 < 0) {
scale3 = 0;
@@ -86,7 +87,7 @@
lag = inlag - 3;
/* Guard against getting outside the frame */
- corrLen = WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
+ corrLen = (size_t)WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
WebRtcIlbcfix_CompCorr( &cross, &ener,
iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
@@ -234,7 +235,7 @@
/* noise component - 52 < randlagFIX < 117 */
iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
- randlag = 53 + (int16_t)(iLBCdec_inst->seed & 63);
+ randlag = 53 + (iLBCdec_inst->seed & 63);
if (randlag > i) {
randvec[i] =
iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
index c55b815..38b8fdb 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
@@ -33,7 +33,7 @@
0 - no PL, 1 = PL */
int16_t *decresidual, /* (i) decoded residual */
int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
- int16_t inlag, /* (i) pitch lag */
+ size_t inlag, /* (i) pitch lag */
IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */
);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/encode.c b/webrtc/modules/audio_coding/codecs/ilbc/encode.c
index 114ce1f..812ec8d 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/encode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/encode.c
@@ -48,11 +48,11 @@
IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
state */
){
- int n, meml_gotten, Nfor;
- int16_t diff, start_pos;
- int index;
- int subcount, subframe;
- int16_t start_count, end_count;
+ size_t n, meml_gotten, Nfor;
+ size_t diff, start_pos;
+ size_t index;
+ size_t subcount, subframe;
+ size_t start_count, end_count;
int16_t *residual;
int32_t en1, en2;
int16_t scale, max;
@@ -86,7 +86,7 @@
#ifdef SPLIT_10MS
WebRtcSpl_MemSetW16 ( (int16_t *) iLBCbits_inst, 0,
- (int16_t) (sizeof(iLBC_bits) / sizeof(int16_t)) );
+ sizeof(iLBC_bits) / sizeof(int16_t) );
start_pos = iLBCenc_inst->start_pos;
diff = iLBCenc_inst->diff;
@@ -317,17 +317,17 @@
if (iLBCenc_inst->section == 1)
{
start_count = 0;
- end_count = WEBRTC_SPL_MIN (Nfor, 2);
+ end_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
}
if (iLBCenc_inst->section == 2)
{
- start_count = WEBRTC_SPL_MIN (Nfor, 2);
+ start_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
end_count = Nfor;
}
}
#else
start_count = 0;
- end_count = (int16_t)Nfor;
+ end_count = Nfor;
#endif
/* loop over subframes to encode */
@@ -341,7 +341,7 @@
&residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
- (int16_t)subcount);
+ subcount);
/* construct decoded vector */
@@ -386,7 +386,7 @@
contained in the same vector as the residual)
*/
- int Nback = iLBCbits_inst->startIdx - 1;
+ size_t Nback = iLBCbits_inst->startIdx - 1;
WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
/* setup memory */
@@ -434,7 +434,7 @@
}
#else
start_count = 0;
- end_count = (int16_t)Nback;
+ end_count = Nback;
#endif
/* loop over subframes to encode */
@@ -447,7 +447,7 @@
iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
- (int16_t)subcount);
+ subcount);
/* construct decoded vector */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
index a6b1c75..b2bdcff 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
@@ -23,12 +23,12 @@
void WebRtcIlbcfix_EnergyInverse(
int16_t *energy, /* (i/o) Energy and inverse
energy (in Q29) */
- int noOfEnergies) /* (i) The length of the energy
+ size_t noOfEnergies) /* (i) The length of the energy
vector */
{
int32_t Nom=(int32_t)0x1FFFFFFF;
int16_t *energyPtr;
- int i;
+ size_t i;
/* Set the minimum energy value to 16384 to avoid overflow */
energyPtr=energy;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
index 7bb6721..fe25094 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
@@ -26,7 +26,7 @@
void WebRtcIlbcfix_EnergyInverse(
int16_t *energy, /* (i/o) Energy and inverse
energy (in Q29) */
- int noOfEnergies); /* (i) The length of the energy
+ size_t noOfEnergies); /* (i) The length of the energy
vector */
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
index 38c3de3..5683597 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
@@ -33,7 +33,7 @@
int16_t centerStartPos, /* (i) first sample current block within idata */
int16_t *period, /* (i) pitch period array (pitch bward-in time) */
int16_t *plocs, /* (i) locations where period array values valid */
- int16_t periodl /* (i) dimension of period and plocs */
+ size_t periodl /* (i) dimension of period and plocs */
){
/* Stack based */
int16_t surround[ENH_BLOCKL];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
index 83f48b0..78a12d3 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
@@ -33,7 +33,7 @@
int16_t centerStartPos, /* (i) first sample current block within idata */
int16_t *period, /* (i) pitch period array (pitch bward-in time) */
int16_t *plocs, /* (i) locations where period array values valid */
- int16_t periodl /* (i) dimension of period and plocs */
+ size_t periodl /* (i) dimension of period and plocs */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
index c630dd5..f15aee6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
@@ -30,19 +30,21 @@
* interface for enhancer
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
+size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int16_t *out, /* (o) enhanced signal */
int16_t *in, /* (i) unenhanced signal */
IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
){
int iblock;
- int lag=20, tlag=20;
- int inLen=iLBCdec_inst->blockl+120;
- int16_t scale, scale1, plc_blockl;
+ size_t lag=20, tlag=20;
+ size_t inLen=iLBCdec_inst->blockl+120;
+ int16_t scale, scale1;
+ size_t plc_blockl;
int16_t *enh_buf, *enh_period;
int32_t tmp1, tmp2, max, new_blocks;
int16_t *enh_bufPtr1;
- int i, k;
+ size_t i;
+ int k;
int16_t EnChange;
int16_t SqrtEnChange;
int16_t inc;
@@ -56,7 +58,8 @@
int32_t ener;
int16_t enerSh;
int16_t corrSh;
- int16_t ind, sh;
+ size_t ind;
+ int16_t sh;
int16_t start, stop;
/* Stack based */
int16_t totsh[3];
@@ -168,7 +171,7 @@
}
}
- lag = lagmax[ind] + 10;
+ lag = (size_t)(lagmax[ind] + 10);
/* Store the estimated lag in the non-downsampled domain */
enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = (int16_t)(lag * 8);
@@ -224,7 +227,7 @@
(plc_blockl-lag));
}
} else {
- int pos;
+ size_t pos;
pos = plc_blockl;
@@ -280,8 +283,8 @@
/* Multiply first part of vector with 2*SqrtEnChange */
- WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange,
- (int16_t)(plc_blockl-16), 14);
+ WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, plc_blockl-16,
+ 14);
/* Calculate increase parameter for window part (16 last samples) */
/* (1-2*SqrtEnChange)/16 in Q15 */
@@ -343,7 +346,7 @@
LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
- (int16_t)lag);
+ lag);
WebRtcSpl_FilterARFastQ12(
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[
@@ -354,7 +357,7 @@
LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
- (int16_t)lag);
+ lag);
}
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
index fa58b7a..61efd22 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
@@ -25,7 +25,7 @@
* interface for enhancer
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
+size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int16_t *out, /* (o) enhanced signal */
int16_t *in, /* (i) unenhanced signal */
IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
index aa8170c..04d17a6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
@@ -29,8 +29,8 @@
int16_t *cbvectors, /* (o) Codebook vector for the higher section */
int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
- int lMem, /* (i) Length of codebook memory */
- int16_t samples /* (i) Number of samples to filter */
+ size_t lMem, /* (i) Length of codebook memory */
+ size_t samples /* (i) Number of samples to filter */
) {
/* Set up the memory, start with zero state */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
index 99e89a0..d23b25c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
@@ -31,8 +31,8 @@
int16_t *cbvectors, /* (o) Codebook vector for the higher section */
int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
- int lMem, /* (i) Length of codebook memory */
- int16_t samples /* (i) Number of samples to filter */
+ size_t lMem, /* (i) Length of codebook memory */
+ size_t samples /* (i) Number of samples to filter */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
index 6a68dec..f442f6a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
@@ -23,7 +23,7 @@
* Classification of subframes to localize start state
*---------------------------------------------------------------*/
-int16_t WebRtcIlbcfix_FrameClassify(
+size_t WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
IlbcEncoder *iLBCenc_inst,
/* (i/o) the encoder state structure */
@@ -35,8 +35,8 @@
int32_t *seqEnPtr;
int32_t maxW32;
int16_t scale1;
- int16_t pos;
- int n;
+ size_t pos;
+ size_t n;
/*
Calculate the energy of each of the 80 sample blocks
@@ -82,7 +82,7 @@
}
/* Extract the best choise of start state */
- pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
+ pos = (size_t)WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
return(pos);
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
index b32e2c8..99f7144 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
@@ -19,7 +19,7 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
-int16_t WebRtcIlbcfix_FrameClassify(
+size_t WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
IlbcEncoder *iLBCenc_inst,
/* (i/o) the encoder state structure */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
index cf05ce3..d7c2e75 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
@@ -27,12 +27,12 @@
void WebRtcIlbcfix_GetCbVec(
int16_t *cbvec, /* (o) Constructed codebook vector */
int16_t *mem, /* (i) Codebook buffer */
- int16_t index, /* (i) Codebook index */
- int16_t lMem, /* (i) Length of codebook buffer */
- int16_t cbveclen /* (i) Codebook vector length */
+ size_t index, /* (i) Codebook index */
+ size_t lMem, /* (i) Length of codebook buffer */
+ size_t cbveclen /* (i) Codebook vector length */
){
- int16_t k, base_size;
- int16_t lag;
+ size_t k, base_size;
+ size_t lag;
/* Stack based */
int16_t tempbuff2[SUBL+5];
@@ -58,7 +58,7 @@
/* Calculate lag */
- k = (int16_t)(2 * (index - (lMem - cbveclen + 1))) + cbveclen;
+ k = (2 * (index - (lMem - cbveclen + 1))) + cbveclen;
lag = k / 2;
@@ -70,7 +70,7 @@
else {
- int16_t memIndTest;
+ size_t memIndTest;
/* first non-interpolated vectors */
@@ -100,7 +100,7 @@
/* do filtering */
WebRtcSpl_FilterMAFastQ12(
&mem[memIndTest+7], tempbuff2, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
- CB_FILTERLEN, (int16_t)(cbveclen+5));
+ CB_FILTERLEN, cbveclen+5);
/* Calculate lag index */
lag = (cbveclen<<1)-20+index-base_size-lMem-1;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
index 1c5ac8f..07f67a2 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
@@ -22,9 +22,9 @@
void WebRtcIlbcfix_GetCbVec(
int16_t *cbvec, /* (o) Constructed codebook vector */
int16_t *mem, /* (i) Codebook buffer */
- int16_t index, /* (i) Codebook index */
- int16_t lMem, /* (i) Length of codebook buffer */
- int16_t cbveclen /* (i) Codebook vector length */
+ size_t index, /* (i) Codebook index */
+ size_t lMem, /* (i) Length of codebook buffer */
+ size_t cbveclen /* (i) Codebook vector length */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
index 480ed7c..66dfafb 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
@@ -31,12 +31,13 @@
int16_t centerStartPos, /* (i) where current block starts */
int16_t *period, /* (i) rough-pitch-period array (Q-2) */
int16_t *plocs, /* (i) where periods of period array are taken (Q-2) */
- int16_t periodl, /* (i) dimension period array */
+ size_t periodl, /* (i) dimension period array */
int16_t hl, /* (i) 2*hl+1 is the number of sequences */
int16_t *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
){
- int16_t i,centerEndPos,q;
+ size_t i;
+ int16_t centerEndPos,q;
/* Stack based */
int16_t lagBlock[2*ENH_HL+1];
int16_t blockStartPos[2*ENH_HL+1]; /* Defines the position to search around (Q2) */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
index f9b08b7..5b59f98 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
@@ -31,7 +31,7 @@
int16_t centerStartPos, /* (i) where current block starts */
int16_t *period, /* (i) rough-pitch-period array (Q-2) */
int16_t *plocs, /* (i) where periods of period array are taken (Q-2) */
- int16_t periodl, /* (i) dimension period array */
+ size_t periodl, /* (i) dimension period array */
int16_t hl, /* (i) 2*hl+1 is the number of sequences */
int16_t *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
index 260591e..5d8a860 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
@@ -30,9 +30,9 @@
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len) /* (i) Number of samples to filter */
+ size_t len) /* (i) Number of samples to filter */
{
- int i;
+ size_t i;
int32_t tmpW32;
int32_t tmpW32b;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
index a30f703..acdfa91 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
@@ -29,6 +29,6 @@
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len); /* (i) Number of samples to filter */
+ size_t len); /* (i) Number of samples to filter */
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
index 3abb427..bd101bf 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
@@ -30,9 +30,9 @@
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len) /* (i) Number of samples to filter */
+ size_t len) /* (i) Number of samples to filter */
{
- int i;
+ size_t i;
int32_t tmpW32;
int32_t tmpW32b;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
index 7937ba0..1840b68 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
@@ -29,6 +29,6 @@
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len); /* (i) Number of samples to filter */
+ size_t len); /* (i) Number of samples to filter */
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
index e41c095..c565a24 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
@@ -90,10 +90,10 @@
int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
const int16_t* speechIn,
- int16_t len,
+ size_t len,
uint8_t* encoded) {
- int16_t pos = 0;
- int16_t encpos = 0;
+ size_t pos = 0;
+ size_t encpos = 0;
if ((len != ((IlbcEncoder*)iLBCenc_inst)->blockl) &&
#ifdef SPLIT_10MS
@@ -118,7 +118,7 @@
#endif
encpos += ((IlbcEncoder*)iLBCenc_inst)->no_of_words;
}
- return (encpos*2);
+ return (int)(encpos*2);
}
}
@@ -143,11 +143,11 @@
int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
- int i=0;
+ size_t i=0;
/* Allow for automatic switching between the frame sizes
(although you do get some discontinuity) */
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
@@ -191,16 +191,16 @@
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
- return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
- int i=0;
+ size_t i=0;
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@@ -219,16 +219,16 @@
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
- return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
- int i=0;
+ size_t i=0;
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@@ -247,13 +247,13 @@
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
- return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
-int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
- int16_t* decoded,
- int16_t noOfLostFrames) {
- int i;
+size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ size_t i;
uint16_t dummy;
for (i=0;i<noOfLostFrames;i++) {
@@ -265,9 +265,9 @@
return (noOfLostFrames*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
-int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
- int16_t* decoded,
- int16_t noOfLostFrames) {
+size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
/* Two input parameters not used, but needed for function pointers in NetEQ */
(void)(decoded = NULL);
(void)(noOfLostFrames = 0);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c b/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
index 0659e50..1f92480 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
@@ -92,5 +92,5 @@
iLBCdec_inst->prev_enh_pl = 0;
- return (iLBCdec_inst->blockl);
+ return (int)(iLBCdec_inst->blockl);
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c b/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
index 9c562db..f559d84 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
@@ -67,5 +67,5 @@
iLBCenc_inst->section = 0;
#endif
- return (iLBCenc_inst->no_of_bytes);
+ return (int)(iLBCenc_inst->no_of_bytes);
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
index b627c3a..c3cf4d8 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
@@ -36,8 +36,8 @@
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@@ -47,10 +47,10 @@
private:
size_t RequiredOutputSizeBytes() const;
- static const int kMaxSamplesPerPacket = 480;
+ static const size_t kMaxSamplesPerPacket = 480;
const int payload_type_;
- const int num_10ms_frames_per_packet_;
- int num_10ms_frames_buffered_;
+ const size_t num_10ms_frames_per_packet_;
+ size_t num_10ms_frames_buffered_;
uint32_t first_timestamp_in_buffer_;
int16_t input_buffer_[kMaxSamplesPerPacket];
IlbcEncoderInstance* encoder_;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
index 4934968..be0b121 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
@@ -18,6 +18,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
+#include <stddef.h>
+
/*
* Define the fixpoint numeric formats
*/
@@ -137,7 +139,7 @@
int WebRtcIlbcfix_Encode(IlbcEncoderInstance *iLBCenc_inst,
const int16_t *speechIn,
- int16_t len,
+ size_t len,
uint8_t* encoded);
/****************************************************************************
@@ -182,17 +184,17 @@
int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
@@ -210,13 +212,12 @@
* Output:
* - decoded : The "decoded" vector
*
- * Return value : >0 - Samples in decoded PLC vector
- * -1 - Error
+ * Return value : Samples in decoded PLC vector
*/
- int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
- int16_t *decoded,
- int16_t noOfLostFrames);
+ size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_NetEqPlc(...)
@@ -232,13 +233,12 @@
* Output:
* - decoded : The "decoded" vector (nothing in this case)
*
- * Return value : >0 - Samples in decoded PLC vector
- * -1 - Error
+ * Return value : Samples in decoded PLC vector
*/
- int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
- int16_t *decoded,
- int16_t noOfLostFrames);
+ size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_version(...)
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
index 4957142..376dbbb 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
@@ -22,7 +22,7 @@
void WebRtcIlbcfix_InterpolateSamples(
int16_t *interpSamples, /* (o) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
- int16_t lMem /* (i) Length of the CB memory */
+ size_t lMem /* (i) Length of the CB memory */
) {
int16_t *ppi, *ppo, i, j, temp1, temp2;
int16_t *tmpPtr;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
index 586c27d..7549d2c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
@@ -28,7 +28,7 @@
void WebRtcIlbcfix_InterpolateSamples(
int16_t *interpSamples, /* (o) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
- int16_t lMem /* (i) Length of the CB memory */
+ size_t lMem /* (i) Length of the CB memory */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
index 3261015..bd6ff56 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
@@ -25,11 +25,12 @@
void WebRtcIlbcfix_MyCorr(
int32_t* corr, /* (o) correlation of seq1 and seq2 */
const int16_t* seq1, /* (i) first sequence */
- int16_t dim1, /* (i) dimension first seq1 */
+ size_t dim1, /* (i) dimension first seq1 */
const int16_t* seq2, /* (i) second sequence */
- int16_t dim2 /* (i) dimension seq2 */
+ size_t dim2 /* (i) dimension seq2 */
){
- int16_t max, loops;
+ int16_t max;
+ size_t loops;
int scale;
/* Calculate correlation between the two sequences. Scale the
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
index a74dd1e..2149464 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
@@ -28,9 +28,9 @@
void WebRtcIlbcfix_MyCorr(
int32_t* corr, /* (o) correlation of seq1 and seq2 */
const int16_t* seq1, /* (i) first sequence */
- int16_t dim1, /* (i) dimension first seq1 */
+ size_t dim1, /* (i) dimension first seq1 */
const int16_t* seq2, /* (i) second sequence */
- int16_t dim2 /* (i) dimension seq2 */
+ size_t dim2 /* (i) dimension seq2 */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
index 30c7a03..b6cc240 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
@@ -28,9 +28,9 @@
int16_t *index, /* (o) index of array element closest to value */
int16_t *array, /* (i) data array (Q2) */
int16_t value, /* (i) value (Q2) */
- int16_t arlength /* (i) dimension of data array (==8) */
+ size_t arlength /* (i) dimension of data array (==8) */
){
- int i;
+ size_t i;
int16_t diff;
/* Stack based */
int32_t crit[8];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
index 0c03470..4c7ed3e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
@@ -31,7 +31,7 @@
int16_t *index, /* (o) index of array element closest to value */
int16_t *array, /* (i) data array (Q2) */
int16_t value, /* (i) value (Q2) */
- int16_t arlength /* (i) dimension of data array (==8) */
+ size_t arlength /* (i) dimension of data array (==8) */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/refiner.c b/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
index 2fff362..86df81c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
@@ -39,8 +39,9 @@
summed with earlier contributions */
int16_t gain /* (i) Gain to use for this sequence */
){
- int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos,corrdim;
- int16_t tloc,tloc2,i,st,en,fraction;
+ int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos;
+ size_t corrdim,i;
+ int16_t tloc,tloc2,st,en,fraction;
int32_t maxtemp, scalefact;
int16_t *filtStatePtr, *polyPtr;
@@ -65,13 +66,13 @@
if(searchSegEndPos+ENH_BLOCKL >= idatal) {
searchSegEndPos=idatal-ENH_BLOCKL-1;
}
- corrdim=searchSegEndPos-searchSegStartPos+1;
+ corrdim=(size_t)(searchSegEndPos-searchSegStartPos+1);
/* compute upsampled correlation and find
location of max */
WebRtcIlbcfix_MyCorr(corrVecTemp,idata+searchSegStartPos,
- (int16_t)(corrdim+ENH_BLOCKL-1),idata+centerStartPos,ENH_BLOCKL);
+ corrdim+ENH_BLOCKL-1,idata+centerStartPos,ENH_BLOCKL);
/* Calculate the rescaling factor for the correlation in order to
put the correlation in a int16_t vector instead */
@@ -110,7 +111,7 @@
/* initialize the vector to be filtered, stuff with zeros
when data is outside idata buffer */
if(st<0){
- WebRtcSpl_MemSetW16(vect, 0, (int16_t)(-st));
+ WebRtcSpl_MemSetW16(vect, 0, (size_t)(-st));
WEBRTC_SPL_MEMCPY_W16(&vect[-st], idata, (ENH_VECTL+st));
}
else{
@@ -120,7 +121,7 @@
WEBRTC_SPL_MEMCPY_W16(vect, &idata[st],
(ENH_VECTL-(en-idatal)));
WebRtcSpl_MemSetW16(&vect[ENH_VECTL-(en-idatal)], 0,
- (int16_t)(en-idatal));
+ (size_t)(en-idatal));
}
else {
WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c b/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
index d89770e..e63dda8 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
@@ -42,7 +42,8 @@
IlbcEncoder *iLBCenc_inst
/* (i/o) the encoder state structure */
) {
- int i, pos, lp_length;
+ size_t i;
+ int pos, lp_length;
int16_t *lsf2, *lsfdeq2;
/* Stack based */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c b/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
index dfc637b..72d80e0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
@@ -34,7 +34,7 @@
) {
int k;
int scale;
- int16_t is;
+ size_t is;
int16_t stability;
/* Stack based */
int16_t A[LPC_FILTERORDER + 1];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
index 324b670..29fe91b 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
@@ -24,14 +24,14 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_StateConstruct(
- int16_t idxForMax, /* (i) 6-bit index for the quantization of
+ size_t idxForMax, /* (i) 6-bit index for the quantization of
max amplitude */
int16_t *idxVec, /* (i) vector of quantization indexes */
int16_t *syntDenum, /* (i) synthesis filter denumerator */
int16_t *Out_fix, /* (o) the decoded state vector */
- int16_t len /* (i) length of a state vector */
+ size_t len /* (i) length of a state vector */
) {
- int k;
+ size_t k;
int16_t maxVal;
int16_t *tmp1, *tmp2, *tmp3;
/* Stack based */
@@ -96,7 +96,7 @@
/* Run MA filter + AR filter */
WebRtcSpl_FilterMAFastQ12(
sampleVal, sampleMa,
- numerator, LPC_FILTERORDER+1, (int16_t)(len + LPC_FILTERORDER));
+ numerator, LPC_FILTERORDER+1, len + LPC_FILTERORDER);
WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
index 22d75e2..2631919 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
@@ -24,12 +24,12 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_StateConstruct(
- int16_t idxForMax, /* (i) 6-bit index for the quantization of
+ size_t idxForMax, /* (i) 6-bit index for the quantization of
max amplitude */
int16_t *idxVec, /* (i) vector of quantization indexes */
int16_t *syntDenum, /* (i) synthesis filter denumerator */
int16_t *Out_fix, /* (o) the decoded state vector */
- int16_t len /* (i) length of a state vector */
+ size_t len /* (i) length of a state vector */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_search.c b/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
index b2214c7..295c543 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
@@ -33,7 +33,7 @@
int16_t *syntDenum, /* (i) lpc synthesis filter */
int16_t *weightDenum /* (i) weighting filter denuminator */
) {
- int16_t k, index;
+ size_t k, index;
int16_t maxVal;
int16_t scale, shift;
int32_t maxValsq;
@@ -64,9 +64,9 @@
/* Run the Zero-Pole filter (Ciurcular convolution) */
WebRtcSpl_MemSetW16(residualLongVec, 0, LPC_FILTERORDER);
- WebRtcSpl_FilterMAFastQ12(
- residualLong, sampleMa,
- numerator, LPC_FILTERORDER+1, (int16_t)(iLBCenc_inst->state_short_len + LPC_FILTERORDER));
+ WebRtcSpl_FilterMAFastQ12(residualLong, sampleMa, numerator,
+ LPC_FILTERORDER + 1,
+ iLBCenc_inst->state_short_len + LPC_FILTERORDER);
WebRtcSpl_MemSetW16(&sampleMa[iLBCenc_inst->state_short_len + LPC_FILTERORDER], 0, iLBCenc_inst->state_short_len - LPC_FILTERORDER);
WebRtcSpl_FilterARFastQ12(
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
index 8bbac42..b795e56 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
@@ -24,10 +24,10 @@
void WebRtcIlbcfix_SwapBytes(
const uint16_t* input, /* (i) the sequence to swap */
- int16_t wordLength, /* (i) number or uint16_t to swap */
+ size_t wordLength, /* (i) number or uint16_t to swap */
uint16_t* output /* (o) the swapped sequence */
) {
- int k;
+ size_t k;
for (k = wordLength; k > 0; k--) {
*output++ = (*input >> 8)|(*input << 8);
input++;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
index a909b2c..a4484d6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
@@ -27,7 +27,7 @@
void WebRtcIlbcfix_SwapBytes(
const uint16_t* input, /* (i) the sequence to swap */
- int16_t wordLength, /* (i) number or uint16_t to swap */
+ size_t wordLength, /* (i) number or uint16_t to swap */
uint16_t* output /* (o) the swapped sequence */
);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
index 6ee3df4..1199c81 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
@@ -47,12 +47,11 @@
int16_t data[BLOCKL_MAX];
uint8_t encoded_data[2 * ILBCNOOFWORDS_MAX];
int16_t decoded_data[BLOCKL_MAX];
- int len;
- short pli, mode;
+ int len_int, mode;
+ short pli;
int blockcount = 0;
int packetlosscount = 0;
- int frameLen;
- size_t len_i16s;
+ size_t frameLen, len, len_i16s;
int16_t speechType;
IlbcEncoderInstance *Enc_Inst;
IlbcDecoderInstance *Dec_Inst;
@@ -153,23 +152,23 @@
WebRtcIlbcfix_EncoderInit(Enc_Inst, mode);
WebRtcIlbcfix_DecoderInit(Dec_Inst, mode);
- frameLen = mode*8;
+ frameLen = (size_t)(mode*8);
/* loop over input blocks */
- while (((int16_t)fread(data,sizeof(int16_t),frameLen,ifileid))==
- frameLen) {
+ while (fread(data,sizeof(int16_t),frameLen,ifileid) == frameLen) {
blockcount++;
/* encoding */
fprintf(stderr, "--- Encoding block %i --- ",blockcount);
- len = WebRtcIlbcfix_Encode(Enc_Inst, data, (int16_t)frameLen, encoded_data);
- if (len < 0) {
+ len_int = WebRtcIlbcfix_Encode(Enc_Inst, data, frameLen, encoded_data);
+ if (len_int < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
+ len = (size_t)len_int;
fprintf(stderr, "\r");
/* write byte file */
@@ -204,12 +203,13 @@
fprintf(stderr, "--- Decoding block %i --- ",blockcount);
if (pli==1) {
- len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
- (int16_t)len, decoded_data,&speechType);
- if (len < 0) {
+ len_int=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
+ len, decoded_data,&speechType);
+ if (len_int < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
+ len = (size_t)len_int;
} else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1);
}
@@ -217,8 +217,7 @@
/* write output file */
- if (fwrite(decoded_data, sizeof(int16_t), len,
- ofileid) != (size_t)len) {
+ if (fwrite(decoded_data, sizeof(int16_t), len, ofileid) != len) {
return -1;
}
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
index b4e36b6..f14192c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
@@ -41,15 +41,15 @@
{
FILE *ifileid,*efileid,*ofileid, *chfileid;
short encoded_data[55], data[240], speechType;
- int len;
- short mode, pli;
- size_t readlen;
+ int len_int, mode;
+ short pli;
+ size_t len, readlen;
int blockcount = 0;
IlbcEncoderInstance *Enc_Inst;
IlbcDecoderInstance *Dec_Inst;
#ifdef JUNK_DATA
- int i;
+ size_t i;
FILE *seedfile;
unsigned int random_seed = (unsigned int) time(NULL);//1196764538
#endif
@@ -136,11 +136,12 @@
/* encoding */
fprintf(stderr, "--- Encoding block %i --- ",blockcount);
- len=WebRtcIlbcfix_Encode(Enc_Inst, data, (short)readlen, encoded_data);
- if (len < 0) {
+ len_int=WebRtcIlbcfix_Encode(Enc_Inst, data, readlen, encoded_data);
+ if (len_int < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
+ len = (size_t)len_int;
fprintf(stderr, "\r");
#ifdef JUNK_DATA
@@ -174,12 +175,13 @@
/* decoding */
fprintf(stderr, "--- Decoding block %i --- ",blockcount);
if (pli==1) {
- len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, (int16_t)len, data,
- &speechType);
- if (len < 0) {
+ len_int = WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, len, data,
+ &speechType);
+ if (len_int < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
+ len = (size_t)len_int;
} else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1);
}
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
index dbecc33..dc12a5a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
@@ -26,9 +26,9 @@
int32_t *z, /* Output */
int32_t *x, /* Input (same domain as Output)*/
const int32_t *y, /* Q31 Window */
- int16_t N /* length to process */
+ size_t N /* length to process */
) {
- int16_t i;
+ size_t i;
int16_t x_low, x_hi, y_low, y_hi;
int16_t left_shifts;
int32_t temp;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
index 4ee6fce..27ed1b6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
@@ -29,7 +29,7 @@
int32_t *z, /* Output */
int32_t *x, /* Input (same domain as Output)*/
const int32_t *y, /* Q31 Window */
- int16_t N /* length to process */
+ size_t N /* length to process */
);
#endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
index 53d95bf..0d898c5 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
@@ -23,16 +23,16 @@
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_XcorrCoef(
+size_t WebRtcIlbcfix_XcorrCoef(
int16_t *target, /* (i) first array */
int16_t *regressor, /* (i) second array */
- int16_t subl, /* (i) dimension arrays */
- int16_t searchLen, /* (i) the search lenght */
- int16_t offset, /* (i) samples offset between arrays */
+ size_t subl, /* (i) dimension arrays */
+ size_t searchLen, /* (i) the search lenght */
+ size_t offset, /* (i) samples offset between arrays */
int16_t step /* (i) +1 or -1 */
){
- int k;
- int16_t maxlag;
+ size_t k;
+ size_t maxlag;
int16_t pos;
int16_t max;
int16_t crossCorrScale, Energyscale;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
index 1f4c58d..9b81c0f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
@@ -26,12 +26,12 @@
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_XcorrCoef(
+size_t WebRtcIlbcfix_XcorrCoef(
int16_t *target, /* (i) first array */
int16_t *regressor, /* (i) second array */
- int16_t subl, /* (i) dimension arrays */
- int16_t searchLen, /* (i) the search lenght */
- int16_t offset, /* (i) samples offset between arrays */
+ size_t subl, /* (i) dimension arrays */
+ size_t searchLen, /* (i) the search lenght */
+ size_t offset, /* (i) samples offset between arrays */
int16_t step /* (i) +1 or -1 */
);
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index 7093304..a2c43a6 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -55,8 +55,8 @@
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
@@ -94,7 +94,7 @@
~AudioDecoderIsacT() override;
bool HasDecodePlc() const override;
- int DecodePlc(int num_frames, int16_t* decoded) override;
+ size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
int Init() override;
int IncomingPacket(const uint8_t* payload,
size_t payload_len,
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index ce70db4..93fbde9 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -123,14 +123,15 @@
}
template <typename T>
-int AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
const int samples_in_next_packet = T::GetNewFrameLen(isac_state_);
- return rtc::CheckedDivExact(samples_in_next_packet,
- rtc::CheckedDivExact(SampleRateHz(), 100));
+ return static_cast<size_t>(
+ rtc::CheckedDivExact(samples_in_next_packet,
+ rtc::CheckedDivExact(SampleRateHz(), 100)));
}
template <typename T>
-int AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
+size_t AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
return 6; // iSAC puts at most 60 ms in a packet.
}
@@ -215,8 +216,7 @@
}
int16_t temp_type = 1; // Default is speech.
int ret =
- T::DecodeInternal(isac_state_, encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
+ T::DecodeInternal(isac_state_, encoded, encoded_len, decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
return ret;
}
@@ -227,7 +227,7 @@
}
template <typename T>
-int AudioDecoderIsacT<T>::DecodePlc(int num_frames, int16_t* decoded) {
+size_t AudioDecoderIsacT<T>::DecodePlc(size_t num_frames, int16_t* decoded) {
return T::DecodePlc(isac_state_, decoded, num_frames);
}
@@ -243,7 +243,7 @@
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
int ret = T::UpdateBwEstimate(
- isac_state_, payload, static_cast<int32_t>(payload_len),
+ isac_state_, payload, payload_len,
rtp_sequence_number, rtp_timestamp, arrival_timestamp);
if (bwinfo_) {
IsacBandwidthInfo bwinfo;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h b/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
index 9d51161..6c61915 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
@@ -40,14 +40,14 @@
}
static inline int DecodeInternal(instance_type* inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speech_type) {
return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
}
- static inline int16_t DecodePlc(instance_type* inst,
- int16_t* decoded,
- int16_t num_lost_frames) {
+ static inline size_t DecodePlc(instance_type* inst,
+ int16_t* decoded,
+ size_t num_lost_frames) {
return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
}
static inline int16_t DecoderInit(instance_type* inst) {
@@ -104,7 +104,7 @@
}
static inline int16_t UpdateBwEstimate(instance_type* inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts) {
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h b/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
index 68ffe65..eec4a39 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_ISACFIX_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_ISACFIX_H_
+#include <stddef.h>
+
#include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
#include "webrtc/typedefs.h"
@@ -189,7 +191,7 @@
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
- * - packet_size : size of the packet.
+ * - packet_size : size of the packet in bytes.
* - rtp_seq_number : the RTP number of the packet.
* - arr_ts : the arrival time of the packet (from NetEq)
* in samples.
@@ -200,7 +202,7 @@
int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t arr_ts);
@@ -212,7 +214,7 @@
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
- * - packet_size : size of the packet.
+ * - packet_size : size of the packet in bytes.
* - rtp_seq_number : the RTP number of the packet.
* - send_ts : the send time of the packet from RTP header,
* in samples.
@@ -225,7 +227,7 @@
int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
@@ -251,7 +253,7 @@
int WebRtcIsacfix_Decode(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t *decoded,
int16_t *speechType);
@@ -280,7 +282,7 @@
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
const uint16_t *encoded,
- int16_t len,
+ size_t len,
int16_t *decoded,
int16_t *speechType);
#endif // WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -303,14 +305,13 @@
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
- int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames);
+ size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames);
#endif // WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -332,13 +333,12 @@
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
- int16_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames );
+ size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames );
/****************************************************************************
@@ -356,8 +356,8 @@
*/
int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
- int encoded_len_bytes,
- int16_t* frameLength);
+ size_t encoded_len_bytes,
+ size_t* frameLength);
/****************************************************************************
* WebRtcIsacfix_Control(...)
@@ -608,7 +608,7 @@
*/
int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
- int encoded_len_bytes,
+ size_t encoded_len_bytes,
int16_t* rateIndex);
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
index d876a3c..b074962 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
@@ -148,7 +148,7 @@
const int16_t frameSize,
const uint32_t sendTime,
const uint32_t arrivalTime,
- const int16_t pksize,
+ const size_t pksize,
const uint16_t Index)
{
uint16_t weight = 0;
@@ -379,7 +379,7 @@
/* compute inverse receiving rate for last packet, in Q19 */
numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
- 524288 + ((pksize + HEADER_SIZE) >> 1),
+ (int32_t)(524288 + ((pksize + HEADER_SIZE) >> 1)),
(int16_t)(pksize + HEADER_SIZE));
/* 8389 is ~ 1/128000 in Q30 */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
index 5d8ccbc..101ef62 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
@@ -62,7 +62,7 @@
const int16_t frameSize,
const uint32_t send_ts,
const uint32_t arr_ts,
- const int16_t pksize,
+ const size_t pksize,
const uint16_t Index);
/* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
index d71decc..fdbb2fc 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -27,18 +27,18 @@
int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr* bwest_str,
Bitstr_dec* streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
IsacFixDecoderInstance* ISACdec_obj,
- int16_t* current_framesamples);
+ size_t* current_framesamples);
void WebRtcIsacfix_DecodePlcImpl(int16_t* decoded,
IsacFixDecoderInstance* ISACdec_obj,
- int16_t* current_framesample );
+ size_t* current_framesample );
int WebRtcIsacfix_EncodeImpl(int16_t* in,
IsacFixEncoderInstance* ISACenc_obj,
@@ -141,7 +141,7 @@
/* normalized lattice filters */
-void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
int32_t* stateGQ15,
int16_t* lat_inQ0,
int16_t* filt_coefQ15,
@@ -149,7 +149,7 @@
int16_t lo_hi,
int16_t* lat_outQ9);
-void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
int16_t* stateGQ0,
int32_t* lat_inQ25,
int16_t* filt_coefQ15,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
index d0c59d6..e3de437 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
@@ -29,7 +29,7 @@
int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
IsacFixDecoderInstance* ISACdec_obj,
- int16_t* current_framesamples)
+ size_t* current_framesamples)
{
int k;
int err;
@@ -58,9 +58,9 @@
int16_t gainQ13;
- int16_t frame_nb; /* counter */
- int16_t frame_mode; /* 0 for 30ms, 1 for 60ms */
- static const int16_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
+ size_t frame_nb; /* counter */
+ size_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+ static const size_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
/* PLC */
int16_t overlapWin[ 240 ];
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
index b1f5d10..316f59a 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
@@ -26,13 +26,13 @@
int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
Bitstr_dec *streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts)
{
int16_t index;
- int16_t frame_samples;
+ size_t frame_samples;
int err;
/* decode framelength */
@@ -53,10 +53,10 @@
err = WebRtcIsacfix_UpdateUplinkBwImpl(
bwest_str,
rtp_seq_number,
- frame_samples * 1000 / FS,
+ (int16_t)(frame_samples * 1000 / FS),
send_ts,
arr_ts,
- (int16_t) packet_size, /* in bytes */
+ packet_size, /* in bytes */
index);
/* error check */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
index c3a89c3..e907f2b6 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
@@ -177,11 +177,12 @@
static void LinearResampler(int16_t* in,
int16_t* out,
- int16_t lenIn,
- int16_t lenOut)
+ size_t lenIn,
+ size_t lenOut)
{
- int32_t n = (lenIn - 1) * RESAMP_RES;
- int16_t resOut, i, j, relativePos, diff; /* */
+ size_t n = (lenIn - 1) * RESAMP_RES;
+ int16_t resOut, relativePos, diff; /* */
+ size_t i, j;
uint16_t udiff;
if( lenIn == lenOut )
@@ -190,7 +191,7 @@
return;
}
- resOut = WebRtcSpl_DivW32W16ResW16( n, (int16_t)(lenOut-1) );
+ resOut = WebRtcSpl_DivW32W16ResW16( (int32_t)n, (int16_t)(lenOut-1) );
out[0] = in[0];
for( i = 1, j = 0, relativePos = 0; i < lenOut; i++ )
@@ -235,7 +236,7 @@
void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
IsacFixDecoderInstance *ISACdec_obj,
- int16_t *current_framesamples )
+ size_t *current_framesamples )
{
int subframecnt;
@@ -260,12 +261,14 @@
int16_t myDecayRate;
/* ---------- PLC variables ------------ */
- int16_t lag0, i, k, noiseIndex;
+ size_t lag0, i, k;
+ int16_t noiseIndex;
int16_t stretchPitchLP[PITCH_MAX_LAG + 10], stretchPitchLP1[PITCH_MAX_LAG + 10];
int32_t gain_lo_hiQ17[2*SUBFRAMES];
- int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16, minIdx;
+ int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16;
+ size_t minIdx;
int32_t nHP, pHP, wNoisyHP, wPriodicHP, corr, minCorr, maxCoeff;
int16_t noise1, rshift;
@@ -300,7 +303,7 @@
- lag0 = ((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1;
+ lag0 = (size_t)(((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1);
if( (ISACdec_obj->plcstr_obj).used != PLC_WAS_USED )
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
index 5f6e6ac..2379ba5 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
@@ -1870,7 +1870,7 @@
int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
- int16_t *framesamples)
+ size_t *framesamples)
{
int err;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
index e4489df..2c8c923 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
@@ -92,7 +92,7 @@
int16_t *PitchLagQ7);
int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
- int16_t *framelength);
+ size_t *framelength);
int WebRtcIsacfix_EncodeFrameLen(int16_t framelength,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 9b61d60..4a663d1 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -38,7 +38,7 @@
/* This method assumes that |stream_size_bytes| is in valid range,
* i.e. >= 0 && <= STREAM_MAXW16_60MS
*/
-static void InitializeDecoderBitstream(int stream_size_bytes,
+static void InitializeDecoderBitstream(size_t stream_size_bytes,
Bitstr_dec* bitstream) {
bitstream->W_upper = 0xFFFFFFFF;
bitstream->streamval = 0;
@@ -621,20 +621,20 @@
int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t arr_ts)
{
ISACFIX_SubStruct *ISAC_inst;
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
/* Sanity check of packet length */
- if (packet_size <= 0) {
+ if (packet_size == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -693,7 +693,7 @@
int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts)
@@ -701,13 +701,13 @@
ISACFIX_SubStruct *ISAC_inst;
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
/* Sanity check of packet length */
- if (packet_size <= 0) {
+ if (packet_size == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -770,15 +770,16 @@
int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
ISACFIX_SubStruct *ISAC_inst;
/* number of samples (480 or 960), output from decoder */
/* that were actually used in the encoder/decoder (determined on the fly) */
- int16_t number_of_samples;
- int declen = 0;
+ size_t number_of_samples;
+ int declen_int = 0;
+ size_t declen;
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
@@ -790,7 +791,7 @@
}
/* Sanity check of packet length */
- if (len <= 0) {
+ if (len == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -807,14 +808,15 @@
/* added for NetEq purposes (VAD/DTX related) */
*speechType=1;
- declen = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
- &number_of_samples);
- if (declen < 0) {
+ declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+ &number_of_samples);
+ if (declen_int < 0) {
/* Some error inside the decoder */
- ISAC_inst->errorcode = -(int16_t)declen;
+ ISAC_inst->errorcode = -(int16_t)declen_int;
memset(decoded, 0, sizeof(int16_t) * MAX_FRAMESAMPLES);
return -1;
}
+ declen = (size_t)declen_int;
/* error check */
@@ -836,7 +838,7 @@
}
}
- return number_of_samples;
+ return (int)number_of_samples;
}
@@ -865,17 +867,18 @@
*/
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
- const uint16_t *encoded,
- int16_t len,
- int16_t *decoded,
- int16_t *speechType)
+int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct* ISAC_main_inst,
+ const uint16_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType)
{
ISACFIX_SubStruct *ISAC_inst;
/* twice the number of samples (480 or 960), output from decoder */
/* that were actually used in the encoder/decoder (determined on the fly) */
- int16_t number_of_samples;
- int declen = 0;
+ size_t number_of_samples;
+ int declen_int = 0;
+ size_t declen;
int16_t dummy[FRAMESAMPLES/2];
@@ -888,7 +891,7 @@
return (-1);
}
- if (len <= 0) {
+ if (len == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -905,14 +908,15 @@
/* added for NetEq purposes (VAD/DTX related) */
*speechType=1;
- declen = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
- &number_of_samples);
- if (declen < 0) {
+ declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+ &number_of_samples);
+ if (declen_int < 0) {
/* Some error inside the decoder */
- ISAC_inst->errorcode = -(int16_t)declen;
+ ISAC_inst->errorcode = -(int16_t)declen_int;
memset(decoded, 0, sizeof(int16_t) * FRAMESAMPLES);
return -1;
}
+ declen = (size_t)declen_int;
/* error check */
@@ -941,7 +945,7 @@
dummy, &ISAC_inst->ISACdec_obj.decimatorstr_obj);
}
- return number_of_samples/2;
+ return (int)(number_of_samples / 2);
}
#endif /* WEBRTC_ISAC_FIX_NB_CALLS_ENABLED */
@@ -962,16 +966,15 @@
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames )
+size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames )
{
- int16_t no_of_samples, declen, k, ok;
+ size_t no_of_samples, declen, k;
int16_t outframeNB[FRAMESAMPLES];
int16_t outframeWB[FRAMESAMPLES];
int16_t dummy[FRAMESAMPLES/2];
@@ -1028,16 +1031,15 @@
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
-int16_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames)
+size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames)
{
- int16_t no_of_samples, declen, k;
+ size_t no_of_samples, declen, k;
int16_t outframe16[MAX_FRAMESAMPLES];
ISACFIX_SubStruct *ISAC_inst;
@@ -1272,12 +1274,12 @@
*/
int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
- int encoded_len_bytes,
- int16_t* frameLength)
+ size_t encoded_len_bytes,
+ size_t* frameLength)
{
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
if (encoded_len_bytes < kRequiredEncodedLenBytes) {
return -1;
@@ -1311,12 +1313,12 @@
*/
int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
- int encoded_len_bytes,
+ size_t encoded_len_bytes,
int16_t* rateIndex)
{
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
if (encoded_len_bytes < kRequiredEncodedLenBytes) {
return -1;
@@ -1327,7 +1329,7 @@
read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
/* decode frame length, needed to get to the rateIndex in the bitstream */
- int16_t frameLength;
+ size_t frameLength;
err = WebRtcIsacfix_DecodeFrameLen(&streamdata, &frameLength);
if (err<0) // error check
return err;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
index 13858d7..22224a8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
@@ -43,7 +43,7 @@
int16_t* ar_f_Q0,
int16_t* cth_Q15,
int16_t* sth_Q15,
- int16_t order_coef);
+ size_t order_coef);
/* Inner loop used for function WebRtcIsacfix_NormLatticeFilterMa(). It does:
for 0 <= n < HALF_SUBFRAMELEN - 1:
@@ -86,7 +86,7 @@
/* filter the signal using normalized lattice filter */
/* MA filter */
-void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
int32_t *stateGQ15,
int16_t *lat_inQ0,
int16_t *filt_coefQ15,
@@ -97,9 +97,10 @@
int16_t sthQ15[MAX_AR_MODEL_ORDER];
int16_t cthQ15[MAX_AR_MODEL_ORDER];
- int u, i, k, n;
+ int u, n;
+ size_t i, k;
int16_t temp2,temp3;
- int16_t ord_1 = orderCoef+1;
+ size_t ord_1 = orderCoef+1;
int32_t inv_cthQ16[MAX_AR_MODEL_ORDER];
int32_t gain32, fQtmp;
@@ -210,7 +211,7 @@
/* ----------------AR filter-------------------------*/
/* filter the signal using normalized lattice filter */
-void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
int16_t *stateGQ0,
int32_t *lat_inQ25,
int16_t *filt_coefQ15,
@@ -218,7 +219,8 @@
int16_t lo_hi,
int16_t *lat_outQ0)
{
- int ii, n, k, i, u;
+ size_t ii, k, i;
+ int n, u;
int16_t sthQ15[MAX_AR_MODEL_ORDER];
int16_t cthQ15[MAX_AR_MODEL_ORDER];
int32_t tmp32;
@@ -234,7 +236,7 @@
int16_t sh;
int16_t temp2,temp3;
- int16_t ord_1 = orderCoef+1;
+ size_t ord_1 = orderCoef+1;
for (u=0;u<SUBFRAMES;u++)
{
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
index 43a1579..40c3bf8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
@@ -25,11 +25,11 @@
int16_t* ar_f_Q0, // Input samples
int16_t* cth_Q15, // Filter coefficients
int16_t* sth_Q15, // Filter coefficients
- int16_t order_coef) { // order of the filter
+ size_t order_coef) { // order of the filter
int n = 0;
for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
- int k = 0;
+ size_t k = 0;
int16_t tmpAR = 0;
int32_t tmp32 = 0;
int32_t tmp32_2 = 0;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
index c596922..7197b15 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
@@ -17,11 +17,11 @@
int16_t* ar_f_Q0, // Input samples
int16_t* cth_Q15, // Filter coefficients
int16_t* sth_Q15, // Filter coefficients
- int16_t order_coef) { // order of the filter
+ size_t order_coef) { // order of the filter
int n = 0;
for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
- int count = order_coef - 1;
+ int count = (int)(order_coef - 1);
int offset;
#if !defined(MIPS_DSP_R1_LE)
int16_t* tmp_cth;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
index da401e5..40f15c4 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -39,7 +39,7 @@
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
- int index,
+ size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuff2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
index c787d6e..d73a429 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
@@ -34,8 +34,8 @@
{ 271, -743, 1570, -3320, 12963, 7301, -2292, 953, -325}
};
-static __inline int32_t CalcLrIntQ(int32_t fixVal,
- int16_t qDomain) {
+static __inline size_t CalcLrIntQ(int16_t fixVal,
+ int16_t qDomain) {
int32_t roundVal = 1 << (qDomain - 1);
return (fixVal + roundVal) >> qDomain;
@@ -55,7 +55,7 @@
const int16_t Gain = 21299; // 1.3 in Q14
int16_t oldLagQ7;
int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
- int indW32 = 0, frcQQ = 0;
+ size_t indW32 = 0, frcQQ = 0;
const int16_t* fracoeffQQ = NULL;
// Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
@@ -141,13 +141,15 @@
PitchFiltstr* pfp,
int16_t* lagsQ7,
int16_t* gainsQ12) {
- int k, n, m, ind, pos, pos3QQ;
+ int k, n, m;
+ size_t ind, pos, pos3QQ;
int16_t ubufQQ[PITCH_INTBUFFSIZE];
int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
const int16_t* fracoeffQQ = NULL;
int16_t scale;
- int16_t cnt = 0, frcQQ, indW16 = 0, tmpW16;
+ int16_t cnt = 0, tmpW16;
+ size_t frcQQ, indW16 = 0;
int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;
// Set up buffer and states.
@@ -179,7 +181,7 @@
for (cnt = 0; cnt < kSegments; cnt++) {
// Update parameters for each segment.
curLagQ7 += lagdeltaQ7;
- indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7);
+ indW16 = CalcLrIntQ(curLagQ7, 7);
frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
if (frcQQ == PITCH_FRACS) {
@@ -202,7 +204,7 @@
tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
tmpW32 += 8192;
- tmpW16 = (int16_t)(tmpW32 >> 14);
+ tmpW16 = tmpW32 >> 14;
tmpW32 = tmpW16 * tmpW16;
if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
index 57796b0..10b9579 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
@@ -21,7 +21,7 @@
@ void WebRtcIsacfix_PitchFilterCore(int loopNumber,
@ int16_t gain,
-@ int index,
+@ size_t index,
@ int16_t sign,
@ int16_t* inputState,
@ int16_t* outputBuf2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
index 5c95678..366eef0 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
@@ -18,7 +18,7 @@
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
- int index,
+ size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuf2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
index 8334f7e..0f390b8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
@@ -12,7 +12,7 @@
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
- int index,
+ size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuf2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
index 5abbd7a..278af75 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
@@ -34,7 +34,7 @@
int16_t full; /* 0 - first byte in memory filled, second empty*/
/* 1 - both bytes are empty (we just filled the previous memory */
- int stream_size; /* The size of stream. */
+ size_t stream_size; /* The size of stream in bytes. */
} Bitstr_dec;
/* Bitstream struct for encoder */
@@ -178,8 +178,8 @@
int16_t pitchCycles;
int16_t A;
int16_t B;
- int16_t pitchIndex;
- int16_t stretchLag;
+ size_t pitchIndex;
+ size_t stretchLag;
int16_t *prevPitchLP; // [ FRAMESAMPLES/2 ]; saved 240
int16_t seed;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
index 8f073ad..fc7588d 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -26,8 +26,8 @@
void SetUp() override;
void TearDown() override;
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes);
- virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+ size_t max_bytes, size_t* encoded_bytes);
+ virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
int16_t* out_data);
ISACFIX_MainStruct *ISACFIX_main_inst_;
};
@@ -43,7 +43,7 @@
AudioCodecSpeedTest::SetUp();
// Check whether the allocated buffer for the bit stream is large enough.
- EXPECT_GE(max_bytes_, STREAM_MAXW16_60MS);
+ EXPECT_GE(max_bytes_, static_cast<size_t>(STREAM_MAXW16_60MS));
// Create encoder memory.
EXPECT_EQ(0, WebRtcIsacfix_Create(&ISACFIX_main_inst_));
@@ -61,7 +61,7 @@
}
float IsacSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes) {
+ size_t max_bytes, size_t* encoded_bytes) {
// ISAC takes 10 ms everycall
const int subblocks = block_duration_ms_ / 10;
const int subblock_length = 10 * input_sampling_khz_;
@@ -78,13 +78,13 @@
EXPECT_EQ(0, value);
}
clocks = clock() - clocks;
- *encoded_bytes = value;
+ *encoded_bytes = static_cast<size_t>(value);
assert(*encoded_bytes <= max_bytes);
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
- int encoded_bytes,
+ size_t encoded_bytes,
int16_t* out_data) {
int value;
int16_t audio_type;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
index 2628f1f..6a947c8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
@@ -50,7 +50,7 @@
} BottleNeckModel;
void get_arrival_time(int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel *BN_data)
{
@@ -99,7 +99,8 @@
FILE *inp, *outp, *f_bn, *outbits;
int endfile;
- int i, errtype, h = 0, k, packetLossPercent = 0;
+ size_t i;
+ int errtype, h = 0, k, packetLossPercent = 0;
int16_t CodingMode;
int16_t bottleneck;
int framesize = 30; /* ms */
@@ -108,14 +109,15 @@
/* Runtime statistics */
double starttime, runtime, length_file;
- int16_t stream_len = 0;
+ int stream_len_int = 0;
+ size_t stream_len = 0;
int16_t framecnt;
int declen = 0;
int16_t shortdata[FRAMESAMPLES_10ms];
int16_t decoded[MAX_FRAMESAMPLES];
uint16_t streamdata[500];
int16_t speechType[1];
- int16_t prevFrameSize = 1;
+ size_t prevFrameSize = 1;
int16_t rateBPS = 0;
int16_t fixedFL = 0;
int16_t payloadSize = 0;
@@ -233,7 +235,7 @@
CodingMode = 0;
testNum = 0;
testCE = 0;
- for (i = 1; i + 2 < argc; i++) {
+ for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
/* Instantaneous mode */
if (!strcmp ("-I", argv[i])) {
printf("\nInstantaneous BottleNeck\n");
@@ -565,19 +567,19 @@
short bwe;
/* Encode */
- stream_len = WebRtcIsacfix_Encode(ISAC_main_inst,
- shortdata,
- (uint8_t*)streamdata);
+ stream_len_int = WebRtcIsacfix_Encode(ISAC_main_inst,
+ shortdata,
+ (uint8_t*)streamdata);
/* If packet is ready, and CE testing, call the different API
functions from the internal API. */
- if (stream_len>0) {
+ if (stream_len_int>0) {
if (testCE == 1) {
err = WebRtcIsacfix_ReadBwIndex(
reinterpret_cast<const uint8_t*>(streamdata),
- stream_len,
+ static_cast<size_t>(stream_len_int),
&bwe);
- stream_len = WebRtcIsacfix_GetNewBitStream(
+ stream_len_int = WebRtcIsacfix_GetNewBitStream(
ISAC_main_inst,
bwe,
scale,
@@ -606,11 +608,11 @@
}
} else {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
- stream_len = WebRtcIsacfix_EncodeNb(ISAC_main_inst,
- shortdata,
- streamdata);
+ stream_len_int = WebRtcIsacfix_EncodeNb(ISAC_main_inst,
+ shortdata,
+ streamdata);
#else
- stream_len = -1;
+ stream_len_int = -1;
#endif
}
}
@@ -619,13 +621,14 @@
break;
}
- if (stream_len < 0 || err < 0) {
+ if (stream_len_int < 0 || err < 0) {
/* exit if returned with error */
errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
printf("\nError in encoder: %d.\n", errtype);
} else {
+ stream_len = static_cast<size_t>(stream_len_int);
if (fwrite(streamdata, sizeof(char), stream_len, outbits) !=
- (size_t)stream_len) {
+ stream_len) {
return -1;
}
}
@@ -731,12 +734,12 @@
/* iSAC decoding */
if( lostFrame && framecnt > 0) {
if (nbTest !=2) {
- declen =
- WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize);
+ declen = static_cast<int>(
+ WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize));
} else {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
- declen = WebRtcIsacfix_DecodePlcNb(
- ISAC_main_inst, decoded, prevFrameSize);
+ declen = static_cast<int>(WebRtcIsacfix_DecodePlcNb(
+ ISAC_main_inst, decoded, prevFrameSize));
#else
declen = -1;
#endif
@@ -744,7 +747,7 @@
lostPackets++;
} else {
if (nbTest !=2 ) {
- short FL;
+ size_t FL;
/* Call getFramelen, only used here for function test */
err = WebRtcIsacfix_ReadFrameLen(
reinterpret_cast<const uint8_t*>(streamdata), stream_len, &FL);
@@ -755,11 +758,11 @@
decoded,
speechType);
/* Error check */
- if (err < 0 || declen < 0 || FL != declen) {
+ if (err < 0 || declen < 0 || FL != static_cast<size_t>(declen)) {
errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
printf("\nError in decode_B/or getFrameLen: %d.\n", errtype);
}
- prevFrameSize = declen/480;
+ prevFrameSize = static_cast<size_t>(declen/480);
} else {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -768,7 +771,7 @@
#else
declen = -1;
#endif
- prevFrameSize = static_cast<int16_t>(declen / 240);
+ prevFrameSize = static_cast<size_t>(declen / 240);
}
}
@@ -791,7 +794,7 @@
framecnt++;
totalsmpls += declen;
- totalbits += 8 * stream_len;
+ totalbits += static_cast<int>(8 * stream_len);
/* Error test number 10, garbage data */
if (testNum == 10) {
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
index 71bd272..b82af1c 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
@@ -21,6 +21,7 @@
/* include API */
#include "isac.h"
#include "isacfix.h"
+#include "webrtc/base/format_macros.h"
/* max number of samples per frame (= 60 ms frame) */
#define MAX_FRAMESAMPLES 960
@@ -57,7 +58,7 @@
} BottleNeckModel;
void get_arrival_time(int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel* BN_data) {
const int HeaderSize = 35;
@@ -98,7 +99,7 @@
double runtime;
double length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int declen;
int16_t shortdata[FRAMESAMPLES_10ms];
@@ -114,7 +115,7 @@
#ifdef _DEBUG
FILE* fy;
double kbps;
- int totalbits = 0;
+ size_t totalbits = 0;
int totalsmpls = 0;
#endif /* _DEBUG */
@@ -392,6 +393,8 @@
while (endfile == 0) {
cur_framesmpls = 0;
while (1) {
+ int stream_len_int;
+
/* Read 10 ms speech block */
if (nbTest != 1)
endfile = readframe(shortdata, inp, FRAMESAMPLES_10ms);
@@ -401,9 +404,9 @@
/* iSAC encoding */
if (mode == 0 || mode == 1) {
- stream_len =
+ stream_len_int =
WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
/* exit if returned with error */
errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
printf("\n\nError in encoder: %d.\n\n", errtype);
@@ -412,20 +415,21 @@
} else if (mode == 2 || mode == 3) {
/* iSAC encoding */
if (nbTest != 1) {
- stream_len = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata,
- (uint8_t*)streamdata);
+ stream_len_int = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata,
+ (uint8_t*)streamdata);
} else {
- stream_len =
+ stream_len_int =
WebRtcIsacfix_EncodeNb(ISACFIX_main_inst, shortdata, streamdata);
}
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
/* exit if returned with error */
errtype = WebRtcIsacfix_GetErrorCode(ISACFIX_main_inst);
printf("\n\nError in encoder: %d.\n\n", errtype);
// exit(EXIT_FAILURE);
}
}
+ stream_len = (size_t)stream_len_int;
cur_framesmpls += FRAMESAMPLES_10ms;
@@ -494,10 +498,13 @@
/* iSAC decoding */
if (plc && (framecnt + 1) % 10 == 0) {
- if (nbTest != 2)
- declen = WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
- else
- declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ if (nbTest != 2) {
+ declen =
+ (int)WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
+ } else {
+ declen =
+ (int)WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ }
} else {
if (nbTest != 2)
declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata,
@@ -551,10 +558,13 @@
/* iSAC decoding */
if (plc && (framecnt + 1) % 10 == 0) {
- if (nbTest != 2)
- declen = WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
- else
- declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ if (nbTest != 2) {
+ declen =
+ (int)WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
+ } else {
+ declen =
+ (int)WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ }
} else {
if (nbTest != 2) {
declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata,
@@ -592,7 +602,7 @@
}
#ifdef _DEBUG
- printf("\n\ntotal bits = %d bits", totalbits);
+ printf("\n\ntotal bits = %" PRIuS " bits", totalbits);
printf("\nmeasured average bitrate = %0.3f kbits/s",
(double)totalbits * (FS / 1000) / totalsmpls);
printf("\n");
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h b/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
index c0f3b11..1bfd149 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
@@ -39,14 +39,14 @@
}
static inline int DecodeInternal(instance_type* inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speech_type) {
return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
}
- static inline int16_t DecodePlc(instance_type* inst,
- int16_t* decoded,
- int16_t num_lost_frames) {
+ static inline size_t DecodePlc(instance_type* inst,
+ int16_t* decoded,
+ size_t num_lost_frames) {
return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
}
@@ -102,7 +102,7 @@
}
static inline int16_t UpdateBwEstimate(instance_type* inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts) {
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h b/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
index 429fc6b..0597de8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
+#include <stddef.h>
+
#include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
#include "webrtc/typedefs.h"
@@ -186,7 +188,7 @@
int16_t WebRtcIsac_UpdateBwEstimate(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
@@ -215,7 +217,7 @@
int WebRtcIsac_Decode(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
@@ -235,14 +237,13 @@
* Output:
* - decoded : The decoded vector.
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
- int16_t WebRtcIsac_DecodePlc(
+ size_t WebRtcIsac_DecodePlc(
ISACStruct* ISAC_main_inst,
int16_t* decoded,
- int16_t noOfLostFrames);
+ size_t noOfLostFrames);
/******************************************************************************
@@ -704,7 +705,7 @@
int WebRtcIsac_DecodeRcu(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
index 940e8f5..51da3f7 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
@@ -142,7 +142,7 @@
const int32_t frame_length,
const uint32_t send_ts,
const uint32_t arr_ts,
- const int32_t pksize
+ const size_t pksize
/*, const uint16_t Index*/)
{
float weight = 0.0f;
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
index 2916876..0704337 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
@@ -95,7 +95,7 @@
const int32_t frame_length,
const uint32_t send_ts,
const uint32_t arr_ts,
- const int32_t pksize);
+ const size_t pksize);
/* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
int16_t WebRtcIsac_UpdateUplinkBwImpl(
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h b/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
index 4b36fff..7ef64b5 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
@@ -25,7 +25,7 @@
void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str, Bitstr* streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts, uint32_t arr_ts,
enum IsacSamplingRate encoderSampRate,
@@ -195,14 +195,14 @@
/******************************* filter functions ****************************/
-void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, int lengthInOut,
+void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, size_t lengthInOut,
int orderCoef);
-void WebRtcIsac_AllZeroFilter(double* In, double* Coef, int lengthInOut,
+void WebRtcIsac_AllZeroFilter(double* In, double* Coef, size_t lengthInOut,
int orderCoef, double* Out);
void WebRtcIsac_ZeroPoleFilter(double* In, double* ZeroCoef, double* PoleCoef,
- int lengthInOut, int orderCoef, double* Out);
+ size_t lengthInOut, int orderCoef, double* Out);
/***************************** filterbank functions **************************/
@@ -228,6 +228,6 @@
void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
-void WebRtcIsac_AutoCorr(double* r, const double* x, int N, int order);
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c b/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
index 5abe204..019cc89 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
@@ -18,7 +18,7 @@
WebRtcIsac_EstimateBandwidth(
BwEstimatorstr* bwest_str,
Bitstr* streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts,
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c b/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
index 089f26e..d47eb1f 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
@@ -19,12 +19,15 @@
-void WebRtcIsac_AllPoleFilter(double *InOut, double *Coef, int lengthInOut, int orderCoef){
-
+void WebRtcIsac_AllPoleFilter(double* InOut,
+ double* Coef,
+ size_t lengthInOut,
+ int orderCoef) {
/* the state of filter is assumed to be in InOut[-1] to InOut[-orderCoef] */
double scal;
double sum;
- int n,k;
+ size_t n;
+ int k;
//if (fabs(Coef[0]-1.0)<0.001) {
if ( (Coef[0] > 0.9999) && (Coef[0] < 1.0001) )
@@ -53,11 +56,15 @@
}
-void WebRtcIsac_AllZeroFilter(double *In, double *Coef, int lengthInOut, int orderCoef, double *Out){
-
+void WebRtcIsac_AllZeroFilter(double* In,
+ double* Coef,
+ size_t lengthInOut,
+ int orderCoef,
+ double* Out) {
/* the state of filter is assumed to be in In[-1] to In[-orderCoef] */
- int n, k;
+ size_t n;
+ int k;
double tmp;
for(n = 0; n < lengthInOut; n++)
@@ -74,9 +81,12 @@
}
-
-void WebRtcIsac_ZeroPoleFilter(double *In, double *ZeroCoef, double *PoleCoef, int lengthInOut, int orderCoef, double *Out){
-
+void WebRtcIsac_ZeroPoleFilter(double* In,
+ double* ZeroCoef,
+ double* PoleCoef,
+ size_t lengthInOut,
+ int orderCoef,
+ double* Out) {
/* the state of the zero section is assumed to be in In[-1] to In[-orderCoef] */
/* the state of the pole section is assumed to be in Out[-1] to Out[-orderCoef] */
@@ -85,14 +95,8 @@
}
-void WebRtcIsac_AutoCorr(
- double *r,
- const double *x,
- int N,
- int order
- )
-{
- int lag, n;
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order) {
+ size_t lag, n;
double sum, prod;
const double *x_lag;
@@ -112,8 +116,8 @@
}
-void WebRtcIsac_BwExpand(double *out, double *in, double coef, short length) {
- int i;
+void WebRtcIsac_BwExpand(double* out, double* in, double coef, size_t length) {
+ size_t i;
double chirp;
chirp = coef;
@@ -125,8 +129,10 @@
}
}
-void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata) {
-
+void WebRtcIsac_WeightingFilter(const double* in,
+ double* weiout,
+ double* whiout,
+ WeightFiltstr* wfdata) {
double tmpbuffer[PITCH_FRAME_LEN + PITCH_WLPCBUFLEN];
double corr[PITCH_WLPCORDER+1], rc[PITCH_WLPCORDER+1];
double apol[PITCH_WLPCORDER+1], apolr[PITCH_WLPCORDER+1];
@@ -195,15 +201,13 @@
static const double APlower[ALLPASSSECTIONS] = {0.1544, 0.744};
-
-void WebRtcIsac_AllpassFilterForDec(double *InOut,
- const double *APSectionFactors,
- int lengthInOut,
- double *FilterState)
-{
+void WebRtcIsac_AllpassFilterForDec(double* InOut,
+ const double* APSectionFactors,
+ size_t lengthInOut,
+ double* FilterState) {
//This performs all-pass filtering--a series of first order all-pass sections are used
//to filter the input in a cascade manner.
- int n,j;
+ size_t n,j;
double temp;
for (j=0; j<ALLPASSSECTIONS; j++){
for (n=0;n<lengthInOut;n+=2){
@@ -214,12 +218,11 @@
}
}
-void WebRtcIsac_DecimateAllpass(const double *in,
- double *state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
- int N, /* number of input samples */
- double *out) /* array of size N/2 */
-{
- int n;
+void WebRtcIsac_DecimateAllpass(const double* in,
+ double* state_in,
+ size_t N,
+ double* out) {
+ size_t n;
double data_vec[PITCH_FRAME_LEN];
/* copy input */
@@ -237,7 +240,6 @@
}
-
/* create high-pass filter ocefficients
* z = 0.998 * exp(j*2*pi*35/8000);
* p = 0.94 * exp(j*2*pi*140/8000);
@@ -247,9 +249,11 @@
static const double b_coef[2] = {-1.99524591718270, 0.99600400000000};
/* second order high-pass filter */
-void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N)
-{
- int k;
+void WebRtcIsac_Highpass(const double* in,
+ double* out,
+ double* state,
+ size_t N) {
+ size_t k;
for (k=0; k<N; k++) {
*out = *in + state[1];
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
index ac211e9..190277e 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -507,7 +507,7 @@
int streamLenLB = 0;
int streamLenUB = 0;
int streamLen = 0;
- int16_t k = 0;
+ size_t k = 0;
uint8_t garbageLen = 0;
int32_t bottleneck = 0;
int16_t bottleneckIdx = 0;
@@ -528,12 +528,12 @@
if (instISAC->in_sample_rate_hz == 48000) {
/* Samples in 10 ms @ 48 kHz. */
- const int kNumInputSamples = FRAMESAMPLES_10ms * 3;
+ const size_t kNumInputSamples = FRAMESAMPLES_10ms * 3;
/* Samples 10 ms @ 32 kHz. */
- const int kNumOutputSamples = FRAMESAMPLES_10ms * 2;
+ const size_t kNumOutputSamples = FRAMESAMPLES_10ms * 2;
/* Resampler divide the input into blocks of 3 samples, i.e.
* kNumInputSamples / 3. */
- const int kNumResamplerBlocks = FRAMESAMPLES_10ms;
+ const size_t kNumResamplerBlocks = FRAMESAMPLES_10ms;
int32_t buffer32[FRAMESAMPLES_10ms * 3 + SIZE_RESAMPLER_STATE];
/* Restore last samples from the past to the beginning of the buffer
@@ -1006,7 +1006,7 @@
*/
int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts) {
@@ -1056,7 +1056,7 @@
static int Decode(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t lenEncodedBytes,
+ size_t lenEncodedBytes,
int16_t* decoded,
int16_t* speechType,
int16_t isRCUPayload) {
@@ -1069,13 +1069,14 @@
float outFrame[MAX_FRAMESAMPLES];
int16_t outFrameLB[MAX_FRAMESAMPLES];
int16_t outFrameUB[MAX_FRAMESAMPLES];
- int numDecodedBytesLB;
+ int numDecodedBytesLBint;
+ size_t numDecodedBytesLB;
int numDecodedBytesUB;
- int16_t lenEncodedLBBytes;
+ size_t lenEncodedLBBytes;
int16_t validChecksum = 1;
int16_t k;
uint16_t numLayer;
- int16_t totSizeBytes;
+ size_t totSizeBytes;
int16_t err;
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
@@ -1089,7 +1090,7 @@
return -1;
}
- if (lenEncodedBytes <= 0) {
+ if (lenEncodedBytes == 0) {
/* return error code if the packet length is null. */
instISAC->errorCode = ISAC_EMPTY_PACKET;
return -1;
@@ -1115,11 +1116,12 @@
/* Regardless of that the current codec is setup to work in
* wideband or super-wideband, the decoding of the lower-band
* has to be performed. */
- numDecodedBytesLB = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
- outFrame, decInstLB,
- &numSamplesLB, isRCUPayload);
-
- if ((numDecodedBytesLB < 0) || (numDecodedBytesLB > lenEncodedLBBytes) ||
+ numDecodedBytesLBint = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
+ outFrame, decInstLB,
+ &numSamplesLB, isRCUPayload);
+ numDecodedBytesLB = (size_t)numDecodedBytesLBint;
+ if ((numDecodedBytesLBint < 0) ||
+ (numDecodedBytesLB > lenEncodedLBBytes) ||
(numSamplesLB > MAX_FRAMESAMPLES)) {
instISAC->errorCode = ISAC_LENGTH_MISMATCH;
return -1;
@@ -1362,7 +1364,7 @@
int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t lenEncodedBytes,
+ size_t lenEncodedBytes,
int16_t* decoded,
int16_t* speechType) {
int16_t isRCUPayload = 0;
@@ -1394,7 +1396,7 @@
int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t lenEncodedBytes,
+ size_t lenEncodedBytes,
int16_t* decoded,
int16_t* speechType) {
int16_t isRCUPayload = 1;
@@ -1417,13 +1419,12 @@
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
-int16_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
- int16_t* decoded,
- int16_t noOfLostFrames) {
- int16_t numSamples = 0;
+size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ size_t numSamples = 0;
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
/* Limit number of frames to two = 60 millisecond.
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc b/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
index a751c24..84c712e 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
@@ -97,10 +97,12 @@
encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
EXPECT_EQ(0, encoded_bytes);
encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_GT(encoded_bytes, 0);
// Call to update bandwidth estimator with real data.
EXPECT_EQ(0, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_,
- encoded_bytes, 1, 12345, 56789));
+ static_cast<size_t>(encoded_bytes),
+ 1, 12345, 56789));
// Free memory.
EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
index 4708a5c..60fc25b 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
@@ -75,11 +75,11 @@
0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
};
-double WebRtcIsac_LevDurb(double *a, double *k, double *r, int order)
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order)
{
double sum, alpha;
- int m, m_h, i;
+ size_t m, m_h, i;
alpha = 0; //warning -DH
a[0] = 1.0;
if (r[0] < LEVINSON_EPS) { /* if r[0] <= 0, set LPC coeff. to zero */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
index 866c76d..8dfe383 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
@@ -21,7 +21,7 @@
#include "settings.h"
#include "structs.h"
-double WebRtcIsac_LevDurb(double *a, double *k, double *r, int order);
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order);
void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
double *oldEnergy, double *varscale);
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h b/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
index f5d9356..6fb02b3 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
@@ -61,11 +61,15 @@
void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata);
-void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N);
+void WebRtcIsac_Highpass(const double *in,
+ double *out,
+ double *state,
+ size_t N);
void WebRtcIsac_DecimateAllpass(const double *in,
- double *state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
- int N, /* number of input samples */
- double *out); /* array of size N/2 */
+ double *state_in, /* array of size:
+ * 2*ALLPASSSECTIONS+1 */
+ size_t N, /* number of input samples */
+ double *out); /* array of size N/2 */
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
index 8584c76..d385ff4 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -21,6 +21,7 @@
/* include API */
#include "isac.h"
#include "utility.h"
+#include "webrtc/base/format_macros.h"
/* Defines */
#define SEED_FILE "randseed.txt" /* Used when running decoder on garbage data */
@@ -42,7 +43,8 @@
FILE* inp, *outp, * f_bn = NULL, * vadp = NULL, *bandwidthp;
int framecnt, endfile;
- int i, errtype, VADusage = 0, packetLossPercent = 0;
+ size_t i;
+ int errtype, VADusage = 0, packetLossPercent = 0;
int16_t CodingMode;
int32_t bottleneck = 0;
int framesize = 30; /* ms */
@@ -51,7 +53,7 @@
/* Runtime statistics */
double starttime, runtime, length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int declen = 0, declenTC = 0;
bool lostFrame = false;
@@ -75,14 +77,14 @@
FILE* fy;
double kbps;
#endif /* _DEBUG */
- int totalbits = 0;
+ size_t totalbits = 0;
int totalsmpls = 0;
/* If use GNS file */
FILE* fp_gns = NULL;
char gns_file[100];
- short maxStreamLen30 = 0;
- short maxStreamLen60 = 0;
+ size_t maxStreamLen30 = 0;
+ size_t maxStreamLen60 = 0;
short sampFreqKHz = 32;
short samplesIn10Ms;
short useAssign = 0;
@@ -90,10 +92,10 @@
bool doTransCoding = false;
int32_t rateTransCoding = 0;
uint8_t streamDataTransCoding[1200];
- int16_t streamLenTransCoding = 0;
+ size_t streamLenTransCoding = 0;
FILE* transCodingFile = NULL;
FILE* transcodingBitstream = NULL;
- uint32_t numTransCodingBytes = 0;
+ size_t numTransCodingBytes = 0;
/* only one structure used for ISAC encoder */
ISACStruct* ISAC_main_inst = NULL;
@@ -185,7 +187,7 @@
char transCodingFileName[500];
int16_t totFileLoop = 0;
int16_t numFileLoop = 0;
- for (i = 1; i + 2 < argc; i++) {
+ for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
if (!strcmp("-LOOP", argv[i])) {
i++;
totFileLoop = (int16_t)atol(argv[i]);
@@ -579,6 +581,8 @@
cur_framesmpls = 0;
while (1) {
+ int stream_len_int = 0;
+
/* Read 10 ms speech block */
endfile = readframe(shortdata, inp, samplesIn10Ms);
@@ -598,21 +602,21 @@
/* iSAC encoding */
if (!(testNum == 3 && framecnt == 0)) {
- stream_len =
+ stream_len_int =
WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
- if ((payloadSize != 0) && (stream_len > payloadSize)) {
+ if ((payloadSize != 0) && (stream_len_int > payloadSize)) {
if (testNum == 0) {
printf("\n\n");
}
printf("\nError: Streamsize out of range %d\n",
- stream_len - payloadSize);
+ stream_len_int - payloadSize);
cout << flush;
}
WebRtcIsac_GetUplinkBw(ISAC_main_inst, &sendBN);
- if (stream_len > 0) {
+ if (stream_len_int > 0) {
if (doTransCoding) {
int16_t indexStream;
uint8_t auxUW8;
@@ -620,13 +624,15 @@
/******************** Main Transcoding stream ********************/
WebRtcIsac_GetDownLinkBwIndex(ISAC_main_inst, &bnIdxTC,
&jitterInfoTC);
- streamLenTransCoding = WebRtcIsac_GetNewBitStream(
+ int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
streamDataTransCoding, false);
- if (streamLenTransCoding < 0) {
+ if (streamLenTransCoding_int < 0) {
fprintf(stderr, "Error in trans-coding\n");
exit(0);
}
+ streamLenTransCoding =
+ static_cast<size_t>(streamLenTransCoding_int);
auxUW8 = (uint8_t)(((streamLenTransCoding & 0xFF00) >> 8) & 0x00FF);
if (fwrite(&auxUW8, sizeof(uint8_t), 1, transcodingBitstream) !=
1) {
@@ -641,7 +647,7 @@
if (fwrite(streamDataTransCoding, sizeof(uint8_t),
streamLenTransCoding, transcodingBitstream) !=
- static_cast<size_t>(streamLenTransCoding)) {
+ streamLenTransCoding) {
return -1;
}
@@ -659,13 +665,15 @@
break;
}
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
/* exit if returned with error */
errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
fprintf(stderr, "Error in encoder: %d.\n", errtype);
cout << flush;
exit(0);
}
+ stream_len = static_cast<size_t>(stream_len_int);
+
cur_framesmpls += samplesIn10Ms;
/* exit encoder loop if the encoder returned a bitstream */
if (stream_len != 0)
@@ -703,17 +711,24 @@
// RED.
if (lostFrame) {
- stream_len = WebRtcIsac_GetRedPayload(
+ int stream_len_int = WebRtcIsac_GetRedPayload(
ISAC_main_inst, reinterpret_cast<uint8_t*>(streamdata));
+ if (stream_len_int < 0) {
+ fprintf(stderr, "Error getting RED payload\n");
+ exit(0);
+ }
+ stream_len = static_cast<size_t>(stream_len_int);
if (doTransCoding) {
- streamLenTransCoding = WebRtcIsac_GetNewBitStream(
+ int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
streamDataTransCoding, true);
- if (streamLenTransCoding < 0) {
+ if (streamLenTransCoding_int < 0) {
fprintf(stderr, "Error in RED trans-coding\n");
exit(0);
}
+ streamLenTransCoding =
+ static_cast<size_t>(streamLenTransCoding_int);
}
}
@@ -891,7 +906,7 @@
#endif /* _DEBUG */
}
printf("\n");
- printf("total bits = %d bits\n", totalbits);
+ printf("total bits = %" PRIuS " bits\n", totalbits);
printf("measured average bitrate = %0.3f kbits/s\n",
(double)totalbits * (sampFreqKHz) / totalsmpls);
if (doTransCoding) {
@@ -910,11 +925,11 @@
(100 * runtime / length_file));
if (maxStreamLen30 != 0) {
- printf("Maximum payload size 30ms Frames %d bytes (%0.3f kbps)\n",
+ printf("Maximum payload size 30ms Frames %" PRIuS " bytes (%0.3f kbps)\n",
maxStreamLen30, maxStreamLen30 * 8 / 30.);
}
if (maxStreamLen60 != 0) {
- printf("Maximum payload size 60ms Frames %d bytes (%0.3f kbps)\n",
+ printf("Maximum payload size 60ms Frames %" PRIuS " bytes (%0.3f kbps)\n",
maxStreamLen60, maxStreamLen60 * 8 / 60.);
}
// fprintf(stderr, "\n");
@@ -923,12 +938,12 @@
fprintf(stderr, " %0.1f kbps",
(double)totalbits * (sampFreqKHz) / totalsmpls);
if (maxStreamLen30 != 0) {
- fprintf(stderr, " plmax-30ms %d bytes (%0.0f kbps)", maxStreamLen30,
- maxStreamLen30 * 8 / 30.);
+ fprintf(stderr, " plmax-30ms %" PRIuS " bytes (%0.0f kbps)",
+ maxStreamLen30, maxStreamLen30 * 8 / 30.);
}
if (maxStreamLen60 != 0) {
- fprintf(stderr, " plmax-60ms %d bytes (%0.0f kbps)", maxStreamLen60,
- maxStreamLen60 * 8 / 60.);
+ fprintf(stderr, " plmax-60ms %" PRIuS " bytes (%0.0f kbps)",
+ maxStreamLen60, maxStreamLen60 * 8 / 60.);
}
if (doTransCoding) {
fprintf(stderr, " transcoding rate %.0f kbps",
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
index a11e408..08061ac 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
@@ -51,9 +51,9 @@
short clientCntr;
- unsigned int lenEncodedInBytes[MAX_NUM_CLIENTS];
+ size_t lenEncodedInBytes[MAX_NUM_CLIENTS];
unsigned int lenAudioIn10ms[MAX_NUM_CLIENTS];
- unsigned int lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
+ size_t lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
unsigned int lenAudioIn10msTmp[MAX_NUM_CLIENTS];
BottleNeckModel* packetData[MAX_NUM_CLIENTS];
@@ -189,9 +189,9 @@
}
- short streamLen;
+ size_t streamLen;
short numSamplesRead;
- int lenDecodedAudio;
+ size_t lenDecodedAudio;
short senderIdx;
short receiverIdx;
@@ -282,11 +282,11 @@
// Encode
- streamLen = WebRtcIsac_Encode(codecInstance[senderIdx],
- audioBuff10ms,
- (uint8_t*)bitStream);
+ int streamLen_int = WebRtcIsac_Encode(codecInstance[senderIdx],
+ audioBuff10ms,
+ (uint8_t*)bitStream);
int16_t ggg;
- if (streamLen > 0) {
+ if (streamLen_int > 0) {
if ((WebRtcIsac_ReadFrameLen(
codecInstance[receiverIdx],
reinterpret_cast<const uint8_t*>(bitStream),
@@ -295,11 +295,12 @@
}
// Sanity check
- if(streamLen < 0)
+ if(streamLen_int < 0)
{
printf(" Encoder error in client %d \n", senderIdx + 1);
return -1;
}
+ streamLen = static_cast<size_t>(streamLen_int);
if(streamLen > 0)
@@ -423,18 +424,18 @@
}
/**/
// Decode
- lenDecodedAudio = WebRtcIsac_Decode(
+ int lenDecodedAudio_int = WebRtcIsac_Decode(
codecInstance[receiverIdx],
reinterpret_cast<const uint8_t*>(bitStream),
streamLen,
audioBuff60ms,
speechType);
- if(lenDecodedAudio < 0)
+ if(lenDecodedAudio_int < 0)
{
printf(" Decoder error in client %d \n", receiverIdx + 1);
return -1;
}
-
+ lenDecodedAudio = static_cast<size_t>(lenDecodedAudio_int);
if(encoderSampRate[senderIdx] == 16000)
{
@@ -442,7 +443,7 @@
resamplerState[receiverIdx]);
if (fwrite(resampledAudio60ms, sizeof(short), lenDecodedAudio << 1,
outFile[receiverIdx]) !=
- static_cast<size_t>(lenDecodedAudio << 1)) {
+ lenDecodedAudio << 1) {
return -1;
}
}
@@ -450,7 +451,7 @@
{
if (fwrite(audioBuff60ms, sizeof(short), lenDecodedAudio,
outFile[receiverIdx]) !=
- static_cast<size_t>(lenDecodedAudio)) {
+ lenDecodedAudio) {
return -1;
}
}
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
index 214dccd..2f44ca8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
@@ -26,6 +26,7 @@
/* include API */
#include "isac.h"
#include "utility.h"
+#include "webrtc/base/format_macros.h"
//#include "commonDefs.h"
/* max number of samples per frame (= 60 ms frame) */
@@ -57,7 +58,7 @@
/* Runtime statistics */
double rate;
double rateRCU;
- unsigned long totalbits = 0;
+ size_t totalbits = 0;
unsigned long totalBitsRCU = 0;
unsigned long totalsmpls = 0;
@@ -72,7 +73,7 @@
int32_t rateLimit;
ISACStruct* ISAC_main_inst;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int declen = 0;
int16_t err;
int cur_framesmpls;
@@ -94,7 +95,7 @@
FILE* averageFile;
int sampFreqKHz;
int samplesIn10Ms;
- int16_t maxStreamLen = 0;
+ size_t maxStreamLen = 0;
char histFileName[500];
char averageFileName[500];
unsigned int hist[600];
@@ -310,22 +311,22 @@
if (onlyDecode) {
uint8_t auxUW8;
- size_t auxSizet;
if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
break;
}
- stream_len = ((uint8_t)auxUW8) << 8;
+ stream_len = auxUW8 << 8;
if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
break;
}
- stream_len |= (uint16_t)auxUW8;
- auxSizet = (size_t)stream_len;
- if (fread(payload, 1, auxSizet, inp) < auxSizet) {
+ stream_len |= auxUW8;
+ if (fread(payload, 1, stream_len, inp) < stream_len) {
printf("last payload is corrupted\n");
break;
}
} else {
while (stream_len == 0) {
+ int stream_len_int;
+
// Read 10 ms speech block
endfile = readframe(shortdata, inp, samplesIn10Ms);
if (endfile) {
@@ -334,15 +335,16 @@
cur_framesmpls += samplesIn10Ms;
//-------- iSAC encoding ---------
- stream_len = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
+ stream_len_int = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
// exit if returned with error
// errType=WebRtcIsac_GetErrorCode(ISAC_main_inst);
fprintf(stderr, "\nError in encoder\n");
getc(stdin);
exit(EXIT_FAILURE);
}
+ stream_len = (size_t)stream_len_int;
}
//===================================================================
if (endfile) {
@@ -396,15 +398,16 @@
if (fwrite(&auxUW8, sizeof(uint8_t), 1, outp) != 1) {
return -1;
}
- if (fwrite(payload, 1, stream_len, outp) != (size_t)stream_len) {
+ if (fwrite(payload, 1, stream_len, outp) != stream_len) {
return -1;
}
} else {
//======================= iSAC decoding ===========================
if ((rand() % 100) < packetLossPercent) {
- declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU, rcuStreamLen,
- decoded, speechType);
+ declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU,
+ (size_t)rcuStreamLen, decoded,
+ speechType);
lostPacketCntr++;
} else {
declen = WebRtcIsac_Decode(ISAC_main_inst, payload, stream_len, decoded,
@@ -458,7 +461,7 @@
printf("\n");
printf("Measured bit-rate........... %0.3f kbps\n", rate);
printf("Measured RCU bit-ratre...... %0.3f kbps\n", rateRCU);
- printf("Maximum bit-rate/payloadsize %0.3f / %d\n",
+ printf("Maximum bit-rate/payloadsize %0.3f / %" PRIuS "\n",
maxStreamLen * 8 / 0.03, maxStreamLen);
printf("Measured packet-loss........ %0.1f%% \n",
100.0f * (float)lostPacketCntr / (float)packetCntr);
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
index 0a2256a..d9c4332 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
@@ -135,7 +135,7 @@
void
get_arrival_time(
int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel* BN_data,
short senderSampFreqHz,
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
index f9fba94..1bb6d29 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
@@ -99,7 +99,7 @@
void get_arrival_time(
int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel* BN_data,
short senderSampFreqHz,
diff --git a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 18d4068..545fc19 100644
--- a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -24,8 +24,8 @@
MOCK_CONST_METHOD0(SampleRateHz, int());
MOCK_CONST_METHOD0(NumChannels, int());
MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
- MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
- MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
+ MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
+ MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
MOCK_CONST_METHOD0(GetTargetBitrate, int());
MOCK_METHOD1(SetTargetBitrate, void(int));
MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
@@ -42,8 +42,8 @@
MOCK_CONST_METHOD0(SampleRateHz, int());
MOCK_CONST_METHOD0(NumChannels, int());
MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
- MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
- MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
+ MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
+ MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
MOCK_CONST_METHOD0(GetTargetBitrate, int());
MOCK_METHOD1(SetTargetBitrate, void(int));
MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 9bf1ae3..37ce873 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -34,16 +34,6 @@
// We always encode at 48 kHz.
const int kSampleRateHz = 48000;
-int16_t ClampInt16(size_t x) {
- return static_cast<int16_t>(
- std::min(x, static_cast<size_t>(std::numeric_limits<int16_t>::max())));
-}
-
-int16_t CastInt16(size_t x) {
- DCHECK_LE(x, static_cast<size_t>(std::numeric_limits<int16_t>::max()));
- return static_cast<int16_t>(x);
-}
-
} // namespace
AudioEncoderOpus::Config::Config()
@@ -72,13 +62,13 @@
AudioEncoderOpus::AudioEncoderOpus(const Config& config)
: num_10ms_frames_per_packet_(
- rtc::CheckedDivExact(config.frame_size_ms, 10)),
+ static_cast<size_t>(rtc::CheckedDivExact(config.frame_size_ms, 10))),
num_channels_(config.num_channels),
payload_type_(config.payload_type),
application_(config.application),
dtx_enabled_(config.dtx_enabled),
- samples_per_10ms_frame_(rtc::CheckedDivExact(kSampleRateHz, 100) *
- num_channels_),
+ samples_per_10ms_frame_(static_cast<size_t>(
+ rtc::CheckedDivExact(kSampleRateHz, 100) * num_channels_)),
packet_loss_rate_(0.0) {
CHECK(config.IsOk());
input_buffer_.reserve(num_10ms_frames_per_packet_ * samples_per_10ms_frame_);
@@ -121,11 +111,11 @@
return 2 * approx_encoded_bytes;
}
-int AudioEncoderOpus::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderOpus::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderOpus::Max10MsFramesInAPacket() const {
+size_t AudioEncoderOpus::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -195,18 +185,17 @@
first_timestamp_in_buffer_ = rtp_timestamp;
input_buffer_.insert(input_buffer_.end(), audio,
audio + samples_per_10ms_frame_);
- if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
- samples_per_10ms_frame_)) {
+ if (input_buffer_.size() <
+ (num_10ms_frames_per_packet_ * samples_per_10ms_frame_)) {
return EncodedInfo();
}
CHECK_EQ(input_buffer_.size(),
- static_cast<size_t>(num_10ms_frames_per_packet_) *
- samples_per_10ms_frame_);
+ num_10ms_frames_per_packet_ * samples_per_10ms_frame_);
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
- rtc::CheckedDivExact(CastInt16(input_buffer_.size()),
- static_cast<int16_t>(num_channels_)),
- ClampInt16(max_encoded_bytes), encoded);
+ rtc::CheckedDivExact(input_buffer_.size(),
+ static_cast<size_t>(num_channels_)),
+ max_encoded_bytes, encoded);
CHECK_GE(status, 0); // Fails only if fed invalid data.
input_buffer_.clear();
EncodedInfo info;
diff --git a/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
index 3393bd5..5fab599 100644
--- a/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
@@ -50,8 +50,8 @@
int SampleRateHz() const override;
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
void SetTargetBitrate(int bits_per_second) override;
void SetProjectedPacketLossRate(double fraction) override;
@@ -66,13 +66,13 @@
uint8_t* encoded) override;
private:
- const int num_10ms_frames_per_packet_;
+ const size_t num_10ms_frames_per_packet_;
const int num_channels_;
const int payload_type_;
const ApplicationMode application_;
int bitrate_bps_;
const bool dtx_enabled_;
- const int samples_per_10ms_frame_;
+ const size_t samples_per_10ms_frame_;
std::vector<int16_t> input_buffer_;
OpusEncInst* inst_;
uint32_t first_timestamp_in_buffer_;
diff --git a/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h b/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
index 925cd85..007f5c5 100644
--- a/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
+++ b/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
#ifdef __cplusplus
@@ -66,8 +68,8 @@
*/
int WebRtcOpus_Encode(OpusEncInst* inst,
const int16_t* audio_in,
- int16_t samples,
- int16_t length_encoded_buffer,
+ size_t samples,
+ size_t length_encoded_buffer,
uint8_t* encoded);
/****************************************************************************
@@ -237,7 +239,7 @@
* -1 - Error
*/
int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
@@ -276,7 +278,7 @@
* -1 - Error
*/
int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
@@ -293,7 +295,7 @@
*/
int WebRtcOpus_DurationEst(OpusDecInst* inst,
const uint8_t* payload,
- int payload_length_bytes);
+ size_t payload_length_bytes);
/* TODO(minyue): Check whether it is needed to add a decoder context to the
* arguments, like WebRtcOpus_DurationEst(...). In fact, the packet itself tells
@@ -313,7 +315,7 @@
* 0 - No FEC data in the packet.
*/
int WebRtcOpus_FecDurationEst(const uint8_t* payload,
- int payload_length_bytes);
+ size_t payload_length_bytes);
/****************************************************************************
* WebRtcOpus_PacketHasFec(...)
@@ -327,7 +329,7 @@
* 1 - the packet contains FEC.
*/
int WebRtcOpus_PacketHasFec(const uint8_t* payload,
- int payload_length_bytes);
+ size_t payload_length_bytes);
#ifdef __cplusplus
} // extern "C"
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
index f0ef70a..c86fab7 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -45,15 +45,15 @@
int block_duration_ms_;
int sampling_khz_;
- int block_length_sample_;
+ size_t block_length_sample_;
int channels_;
int bit_rate_;
size_t data_pointer_;
size_t loop_length_samples_;
- int max_bytes_;
- int encoded_bytes_;
+ size_t max_bytes_;
+ size_t encoded_bytes_;
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
@@ -122,7 +122,8 @@
OpusFecTest::OpusFecTest()
: block_duration_ms_(kOpusBlockDurationMs),
sampling_khz_(kOpusSamplingKhz),
- block_length_sample_(block_duration_ms_ * sampling_khz_),
+ block_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * sampling_khz_)),
data_pointer_(0),
max_bytes_(0),
encoded_bytes_(0),
@@ -137,7 +138,7 @@
max_bytes_, &bit_stream_[0]);
EXPECT_GT(value, 0);
- encoded_bytes_ = value;
+ encoded_bytes_ = static_cast<size_t>(value);
}
void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
@@ -154,14 +155,14 @@
} else {
value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
}
- EXPECT_EQ(block_length_sample_, value_1);
+ EXPECT_EQ(static_cast<int>(block_length_sample_), value_1);
}
if (!lost_current) {
// Decode current frame.
value_2 = WebRtcOpus_Decode(opus_decoder_, &bit_stream_[0], encoded_bytes_,
&out_data_[value_1 * channels_], &audio_type);
- EXPECT_EQ(block_length_sample_, value_2);
+ EXPECT_EQ(static_cast<int>(block_length_sample_), value_2);
}
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
index e250616..e2a8383 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -80,8 +80,8 @@
int WebRtcOpus_Encode(OpusEncInst* inst,
const int16_t* audio_in,
- int16_t samples,
- int16_t length_encoded_buffer,
+ size_t samples,
+ size_t length_encoded_buffer,
uint8_t* encoded) {
int res;
@@ -91,9 +91,9 @@
res = opus_encode(inst->encoder,
(const opus_int16*)audio_in,
- samples,
+ (int)samples,
encoded,
- length_encoded_buffer);
+ (opus_int32)length_encoded_buffer);
if (res == 1) {
// Indicates DTX since the packet has nothing but a header. In principle,
@@ -260,7 +260,7 @@
}
/* For decoder to determine if it is to output speech or comfort noise. */
-static int16_t DetermineAudioType(OpusDecInst* inst, int16_t encoded_bytes) {
+static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
// Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
// to be so if the following |encoded_byte| are 0 or 1.
if (encoded_bytes == 0 && inst->in_dtx_mode) {
@@ -278,9 +278,9 @@
* is set to the number of samples needed for PLC in case of losses.
* It is up to the caller to make sure the value is correct. */
static int DecodeNative(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int frame_size,
+ size_t encoded_bytes, int frame_size,
int16_t* decoded, int16_t* audio_type, int decode_fec) {
- int res = opus_decode(inst->decoder, encoded, encoded_bytes,
+ int res = opus_decode(inst->decoder, encoded, (opus_int32)encoded_bytes,
(opus_int16*)decoded, frame_size, decode_fec);
if (res <= 0)
@@ -292,7 +292,7 @@
}
int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
@@ -340,7 +340,7 @@
}
int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
int fec_samples;
@@ -362,9 +362,9 @@
int WebRtcOpus_DurationEst(OpusDecInst* inst,
const uint8_t* payload,
- int payload_length_bytes) {
+ size_t payload_length_bytes) {
int frames, samples;
- frames = opus_packet_get_nb_frames(payload, payload_length_bytes);
+ frames = opus_packet_get_nb_frames(payload, (opus_int32)payload_length_bytes);
if (frames < 0) {
/* Invalid payload data. */
return 0;
@@ -378,7 +378,7 @@
}
int WebRtcOpus_FecDurationEst(const uint8_t* payload,
- int payload_length_bytes) {
+ size_t payload_length_bytes) {
int samples;
if (WebRtcOpus_PacketHasFec(payload, payload_length_bytes) != 1) {
return 0;
@@ -393,13 +393,13 @@
}
int WebRtcOpus_PacketHasFec(const uint8_t* payload,
- int payload_length_bytes) {
+ size_t payload_length_bytes) {
int frames, channels, payload_length_ms;
int n;
opus_int16 frame_sizes[48];
const unsigned char *frame_data[48];
- if (payload == NULL || payload_length_bytes <= 0)
+ if (payload == NULL || payload_length_bytes == 0)
return 0;
/* In CELT_ONLY mode, packets should not have FEC. */
@@ -432,8 +432,8 @@
}
/* The following is to parse the LBRR flags. */
- if (opus_packet_parse(payload, payload_length_bytes, NULL, frame_data,
- frame_sizes, NULL) < 0) {
+ if (opus_packet_parse(payload, (opus_int32)payload_length_bytes, NULL,
+ frame_data, frame_sizes, NULL) < 0) {
return 0;
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
index b39de49..926bcaf 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -24,8 +24,8 @@
void SetUp() override;
void TearDown() override;
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes);
- virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+ size_t max_bytes, size_t* encoded_bytes);
+ virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
int16_t* out_data);
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
@@ -58,19 +58,19 @@
}
float OpusSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes) {
+ size_t max_bytes, size_t* encoded_bytes) {
clock_t clocks = clock();
int value = WebRtcOpus_Encode(opus_encoder_, in_data,
input_length_sample_, max_bytes,
bit_stream);
clocks = clock() - clocks;
EXPECT_GT(value, 0);
- *encoded_bytes = value;
+ *encoded_bytes = static_cast<size_t>(value);
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
- int encoded_bytes, int16_t* out_data) {
+ size_t encoded_bytes, int16_t* out_data) {
int value;
int16_t audio_type;
clock_t clocks = clock();
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
index e218a6b..2208f74 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -25,11 +25,11 @@
// Maximum number of bytes in output bitstream.
const size_t kMaxBytes = 1000;
// Sample rate of Opus.
-const int kOpusRateKhz = 48;
+const size_t kOpusRateKhz = 48;
// Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
-const int kOpus20msFrameSamples = kOpusRateKhz * 20;
+const size_t kOpus20msFrameSamples = kOpusRateKhz * 20;
// Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
-const int kOpus10msFrameSamples = kOpusRateKhz * 10;
+const size_t kOpus10msFrameSamples = kOpusRateKhz * 10;
class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
protected:
@@ -45,7 +45,7 @@
int EncodeDecode(WebRtcOpusEncInst* encoder,
const int16_t* input_audio,
- int input_samples,
+ size_t input_samples,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type);
@@ -58,7 +58,7 @@
AudioLoop speech_data_;
uint8_t bitstream_[kMaxBytes];
- int encoded_bytes_;
+ size_t encoded_bytes_;
int channels_;
int application_;
};
@@ -97,15 +97,14 @@
int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
const int16_t* input_audio,
- int input_samples,
+ size_t input_samples,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type) {
- encoded_bytes_ = WebRtcOpus_Encode(encoder,
- input_audio,
- input_samples, kMaxBytes,
- bitstream_);
- EXPECT_GE(encoded_bytes_, 0);
+ int encoded_bytes_int = WebRtcOpus_Encode(encoder, input_audio, input_samples,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
+ encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
return WebRtcOpus_Decode(decoder, bitstream_,
encoded_bytes_, output_audio,
audio_type);
@@ -139,13 +138,14 @@
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
// If not DTX, it should never enter DTX mode. If DTX, we do not care since
// whether it enters DTX depends on the signal type.
if (!dtx) {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -156,11 +156,11 @@
// However, DTX may happen after a while.
for (int i = 0; i < 30; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
if (!dtx) {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -180,17 +180,17 @@
// DTX mode is maintained 19 frames.
for (int i = 0; i < 19; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples,
+ opus_decoder_, output_data_decode, &audio_type)));
if (dtx) {
- EXPECT_EQ(0, encoded_bytes_) // Send 0 byte.
+ EXPECT_EQ(0U, encoded_bytes_) // Send 0 byte.
<< "Opus should have entered DTX mode.";
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
EXPECT_EQ(2, audio_type); // Comfort noise.
} else {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -199,27 +199,27 @@
// Quit DTX after 19 frames.
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
// Enters DTX again immediately.
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
if (dtx) {
- EXPECT_EQ(1, encoded_bytes_); // Send 1 byte.
+ EXPECT_EQ(1U, encoded_bytes_); // Send 1 byte.
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
EXPECT_EQ(2, audio_type); // Comfort noise.
} else {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -230,10 +230,10 @@
if (dtx) {
// Verify that encoder/decoder can jump out from DTX mode.
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
- EXPECT_GT(encoded_bytes_, 1);
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -311,9 +311,10 @@
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
// Free memory.
delete[] output_data_decode;
@@ -370,16 +371,17 @@
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_decoder_));
EXPECT_EQ(kOpus20msFrameSamples,
- WebRtcOpus_Decode(opus_decoder_, bitstream_,
- encoded_bytes_, output_data_decode,
- &audio_type));
+ static_cast<size_t>(WebRtcOpus_Decode(
+ opus_decoder_, bitstream_, encoded_bytes_, output_data_decode,
+ &audio_type)));
// Free memory.
delete[] output_data_decode;
@@ -508,14 +510,16 @@
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
// Call decoder PLC.
int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- WebRtcOpus_DecodePlc(opus_decoder_, plc_buffer, 1));
+ static_cast<size_t>(WebRtcOpus_DecodePlc(
+ opus_decoder_, plc_buffer, 1)));
// Free memory.
delete[] plc_buffer;
@@ -535,24 +539,26 @@
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
// 10 ms. We use only first 10 ms of a 20 ms block.
- encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus10msFrameSamples, kMaxBytes,
- bitstream_);
- EXPECT_GE(encoded_bytes_, 0);
+ int encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
+ speech_data_.GetNextBlock(),
+ kOpus10msFrameSamples,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus10msFrameSamples,
- WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
- encoded_bytes_));
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_,
+ static_cast<size_t>(encoded_bytes_int))));
// 20 ms
- encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, kMaxBytes,
- bitstream_);
- EXPECT_GE(encoded_bytes_, 0);
+ encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
+ speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus20msFrameSamples,
- WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
- encoded_bytes_));
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_,
+ static_cast<size_t>(encoded_bytes_int))));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
@@ -595,11 +601,13 @@
encoded_bytes_ = opus_repacketizer_out(rp, bitstream_, kMaxBytes);
EXPECT_EQ(kOpus20msFrameSamples * kPackets,
- WebRtcOpus_DurationEst(opus_decoder_, bitstream_, encoded_bytes_));
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_, encoded_bytes_)));
EXPECT_EQ(kOpus20msFrameSamples * kPackets,
- WebRtcOpus_Decode(opus_decoder_, bitstream_, encoded_bytes_,
- output_data_decode.get(), &audio_type));
+ static_cast<size_t>(WebRtcOpus_Decode(
+ opus_decoder_, bitstream_, encoded_bytes_,
+ output_data_decode.get(), &audio_type)));
// Free memory.
opus_repacketizer_destroy(rp);
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
index 0c246c3..4ca6fe9 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -23,10 +23,10 @@
return AudioEncoderPcm::Config::IsOk();
}
-int16_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) {
- return WebRtcPcm16b_Encode(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcPcm16b_Encode(audio, input_len, encoded);
}
int AudioEncoderPcm16B::BytesPerSample() const {
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
index f02cf92..6a0fb43 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
@@ -31,9 +31,9 @@
: AudioEncoderPcm(config, config.sample_rate_hz) {}
protected:
- int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) override;
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
int BytesPerSample() const override;
};
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
index 1cdf92d..d65d08a 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
@@ -14,6 +14,8 @@
* Define the fixpoint numeric formats
*/
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
#ifdef __cplusplus
@@ -36,9 +38,9 @@
* Always equal to twice the len input parameter.
*/
-int16_t WebRtcPcm16b_Encode(const int16_t* speech,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
* WebRtcPcm16b_Decode(...)
@@ -55,9 +57,9 @@
* Returned value : Samples in speech
*/
-int16_t WebRtcPcm16b_Decode(const uint8_t* encoded,
- int16_t len,
- int16_t* speech);
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+ size_t len,
+ int16_t* speech);
#ifdef __cplusplus
}
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
index b6de0b5..120c790 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -12,10 +12,10 @@
#include "webrtc/typedefs.h"
-int16_t WebRtcPcm16b_Encode(const int16_t* speech,
- int16_t len,
- uint8_t* encoded) {
- int i;
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+ size_t len,
+ uint8_t* encoded) {
+ size_t i;
for (i = 0; i < len; ++i) {
uint16_t s = speech[i];
encoded[2 * i] = s >> 8;
@@ -24,10 +24,10 @@
return 2 * len;
}
-int16_t WebRtcPcm16b_Decode(const uint8_t* encoded,
- int16_t len,
- int16_t* speech) {
- int i;
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+ size_t len,
+ int16_t* speech) {
+ size_t i;
for (i = 0; i < len / 2; ++i)
speech[i] = encoded[2 * i] << 8 | encoded[2 * i + 1];
return len / 2;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index 16ba290..dccaf43 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -41,11 +41,11 @@
return 2 * speech_encoder_->MaxEncodedBytes();
}
-int AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
return speech_encoder_->Num10MsFramesInNextPacket();
}
-int AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
+size_t AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
return speech_encoder_->Max10MsFramesInAPacket();
}
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index 78e1e9a..644255b 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -40,8 +40,8 @@
int NumChannels() const override;
size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
void SetTargetBitrate(int bits_per_second) override;
void SetProjectedPacketLossRate(double fraction) override;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index 4debdfa..a1ddf4b 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -113,13 +113,13 @@
}
TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
- EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
- EXPECT_EQ(17, red_->Num10MsFramesInNextPacket());
+ EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->Num10MsFramesInNextPacket());
}
TEST_F(AudioEncoderCopyRedTest, CheckMaxFrameSizePropagation) {
- EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17));
- EXPECT_EQ(17, red_->Max10MsFramesInAPacket());
+ EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->Max10MsFramesInAPacket());
}
TEST_F(AudioEncoderCopyRedTest, CheckSetBitratePropagation) {
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
index c7cafdf..3395721 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -65,7 +65,8 @@
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
input_length_sample_ * channels_ * sizeof(int16_t));
- max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+ max_bytes_ =
+ static_cast<size_t>(input_length_sample_ * channels_ * sizeof(int16_t));
out_data_.reset(new int16_t[output_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index 35ac69e..2736c29 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -36,14 +36,14 @@
// 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
// 4. return the cost of time (in millisecond) spent on actual encoding.
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes) = 0;
+ size_t max_bytes, size_t* encoded_bytes) = 0;
// DecodeABlock(...) does the following:
// 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
// (in bytes),
// 2. save the decoded audio in |out_data|,
// 3. return the cost of time (in millisecond) spent on actual decoding.
- virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+ virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
int16_t* out_data) = 0;
// Encoding and decode an audio of |audio_duration| (in seconds) and
@@ -67,9 +67,9 @@
rtc::scoped_ptr<uint8_t[]> bit_stream_;
// Maximum number of bytes in output bitstream for a frame of audio.
- int max_bytes_;
+ size_t max_bytes_;
- int encoded_bytes_;
+ size_t encoded_bytes_;
float encoding_time_ms_;
float decoding_time_ms_;
FILE* out_file_;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
index dc59984..b5a86d0 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
@@ -93,7 +93,8 @@
AudioFrame output_frame;
EXPECT_TRUE(acm_->Get10MsAudio(&output_frame));
EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
- const int samples_per_block = output_freq_hz_ * 10 / 1000;
+ const size_t samples_per_block =
+ static_cast<size_t>(output_freq_hz_ * 10 / 1000);
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
if (expected_output_channels_ != kArbitraryChannels) {
if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
index dd570e6..2a0bbe1 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
@@ -160,7 +160,8 @@
AudioFrame output_frame;
EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
- const int samples_per_block = output_freq_hz_ * 10 / 1000;
+ const size_t samples_per_block =
+ static_cast<size_t>(output_freq_hz_ * 10 / 1000);
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
if (exptected_output_channels_ != kArbitraryChannels) {
if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
index 4c11197..1cefeb6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
@@ -344,7 +344,7 @@
int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
enum NetEqOutputType type;
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
bool return_silence = false;
@@ -394,7 +394,7 @@
}
// NetEq always returns 10 ms of audio.
- current_sample_rate_hz_ = samples_per_channel * 100;
+ current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
// Update if resampling is required.
bool need_resampling = (desired_freq_hz != -1) &&
@@ -403,18 +403,19 @@
if (need_resampling && !resampled_last_output_frame_) {
// Prime the resampler with the last frame.
int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
- samples_per_channel =
+ int samples_per_channel_int =
resampler_.Resample10Msec(last_audio_buffer_.get(),
current_sample_rate_hz_,
desired_freq_hz,
num_channels,
AudioFrame::kMaxDataSizeSamples,
temp_output);
- if (samples_per_channel < 0) {
+ if (samples_per_channel_int < 0) {
LOG(LERROR) << "AcmReceiver::GetAudio - "
"Resampling last_audio_buffer_ failed.";
return -1;
}
+ samples_per_channel = static_cast<size_t>(samples_per_channel_int);
}
// The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either
@@ -422,17 +423,18 @@
// TODO(henrik.lundin) Glitches in the output may appear if the output rate
// from NetEq changes. See WebRTC issue 3923.
if (need_resampling) {
- samples_per_channel =
+ int samples_per_channel_int =
resampler_.Resample10Msec(audio_buffer_.get(),
current_sample_rate_hz_,
desired_freq_hz,
num_channels,
AudioFrame::kMaxDataSizeSamples,
audio_frame->data_);
- if (samples_per_channel < 0) {
+ if (samples_per_channel_int < 0) {
LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
return -1;
}
+ samples_per_channel = static_cast<size_t>(samples_per_channel_int);
resampled_last_output_frame_ = true;
} else {
resampled_last_output_frame_ = false;
@@ -448,7 +450,7 @@
audio_frame->num_channels_ = num_channels;
audio_frame->samples_per_channel_ = samples_per_channel;
- audio_frame->sample_rate_hz_ = samples_per_channel * 100;
+ audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
// Should set |vad_activity| before calling SetAudioFrameActivityAndType().
audio_frame->vad_activity_ = previous_audio_activity_;
@@ -787,10 +789,11 @@
frame->sample_rate_hz_ = current_sample_rate_hz_;
}
- frame->samples_per_channel_ = frame->sample_rate_hz_ / 100; // Always 10 ms.
+ frame->samples_per_channel_ =
+ static_cast<size_t>(frame->sample_rate_hz_ / 100); // Always 10 ms.
frame->speech_type_ = AudioFrame::kCNG;
frame->vad_activity_ = AudioFrame::kVadPassive;
- int samples = frame->samples_per_channel_ * frame->num_channels_;
+ size_t samples = frame->samples_per_channel_ * frame->num_channels_;
memset(frame->data_, 0, samples * sizeof(int16_t));
return true;
}
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
index 97d87b1..2650725 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
@@ -29,9 +29,9 @@
int in_freq_hz,
int out_freq_hz,
int num_audio_channels,
- int out_capacity_samples,
+ size_t out_capacity_samples,
int16_t* out_audio) {
- int in_length = in_freq_hz * num_audio_channels / 100;
+ size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
int out_length = out_freq_hz * num_audio_channels / 100;
if (in_freq_hz == out_freq_hz) {
if (out_capacity_samples < in_length) {
@@ -39,7 +39,7 @@
return -1;
}
memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
- return in_length / num_audio_channels;
+ return static_cast<int>(in_length / num_audio_channels);
}
if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h b/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
index a8fc6b6..a19b0c4 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
@@ -26,7 +26,7 @@
int in_freq_hz,
int out_freq_hz,
int num_audio_channels,
- int out_capacity_samples,
+ size_t out_capacity_samples,
int16_t* out_audio);
private:
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
index b96db6b..91df16f 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -29,7 +29,8 @@
: clock_(0),
audio_source_(audio_source),
source_rate_hz_(source_rate_hz),
- input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+ input_block_size_samples_(
+ static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
codec_registered_(false),
test_duration_ms_(test_duration_ms),
frame_type_(kAudioFrameSpeech),
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.h b/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
index 4c4db5b..09fe9e6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
@@ -63,7 +63,7 @@
rtc::scoped_ptr<AudioCoding> acm_;
InputAudioFile* audio_source_;
int source_rate_hz_;
- const int input_block_size_samples_;
+ const size_t input_block_size_samples_;
AudioFrame input_frame_;
bool codec_registered_;
int test_duration_ms_;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
index 1819d59..74e98d9 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -31,7 +31,8 @@
acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
audio_source_(audio_source),
source_rate_hz_(source_rate_hz),
- input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+ input_block_size_samples_(
+ static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
codec_registered_(false),
test_duration_ms_(test_duration_ms),
frame_type_(kAudioFrameSpeech),
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
index 8cdc298..008e264 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
@@ -71,7 +71,7 @@
rtc::scoped_ptr<AudioCodingModule> acm_;
InputAudioFile* audio_source_;
int source_rate_hz_;
- const int input_block_size_samples_;
+ const size_t input_block_size_samples_;
AudioFrame input_frame_;
bool codec_registered_;
int test_duration_ms_;
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 32d60a7..46980d3 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -76,22 +76,24 @@
}
// Stereo-to-mono can be used as in-place.
-int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int DownMix(const AudioFrame& frame,
+ size_t length_out_buff,
+ int16_t* out_buff) {
if (length_out_buff < frame.samples_per_channel_) {
return -1;
}
- for (int n = 0; n < frame.samples_per_channel_; ++n)
+ for (size_t n = 0; n < frame.samples_per_channel_; ++n)
out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
return 0;
}
// Mono-to-stereo can be used as in-place.
-int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
if (length_out_buff < frame.samples_per_channel_) {
return -1;
}
- for (int n = frame.samples_per_channel_; n > 0; --n) {
- int i = n - 1;
+ for (size_t n = frame.samples_per_channel_; n != 0; --n) {
+ size_t i = n - 1;
int16_t sample = frame.data_[i];
out_buff[2 * i + 1] = sample;
out_buff[2 * i] = sample;
@@ -338,11 +340,10 @@
int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
InputData* input_data) {
- if (audio_frame.samples_per_channel_ <= 0) {
+ if (audio_frame.samples_per_channel_ == 0) {
assert(false);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
- "Cannot Add 10 ms audio, payload length is negative or "
- "zero");
+ "Cannot Add 10 ms audio, payload length is zero");
return -1;
}
@@ -354,7 +355,7 @@
}
// If the length and frequency matches. We currently just support raw PCM.
- if ((audio_frame.sample_rate_hz_ / 100) !=
+ if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
audio_frame.samples_per_channel_) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Cannot Add 10 ms audio, input frequency and length doesn't"
@@ -477,17 +478,19 @@
// The result of the resampler is written to output frame.
dest_ptr_audio = preprocess_frame_.data_;
- preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec(
+ int samples_per_channel = resampler_.Resample10Msec(
src_ptr_audio, in_frame.sample_rate_hz_,
codec_manager_.CurrentEncoder()->SampleRateHz(),
preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio);
- if (preprocess_frame_.samples_per_channel_ < 0) {
+ if (samples_per_channel < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Cannot add 10 ms audio, resampling failed");
return -1;
}
+ preprocess_frame_.samples_per_channel_ =
+ static_cast<size_t>(samples_per_channel);
preprocess_frame_.sample_rate_hz_ =
codec_manager_.CurrentEncoder()->SampleRateHz();
}
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
index beb49bc..c451854 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
@@ -248,7 +248,7 @@
struct InputData {
uint32_t input_timestamp;
const int16_t* audio;
- uint16_t length_per_channel;
+ size_t length_per_channel;
uint8_t audio_channel;
// If a re-mix is required (up or down), this buffer will store a re-mixed
// version of the input.
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
index eea51a3..418ddd1 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
@@ -272,7 +272,8 @@
EXPECT_TRUE(acm_->Get10MsAudio(&audio_frame));
EXPECT_EQ(0u, audio_frame.timestamp_);
EXPECT_GT(audio_frame.num_channels_, 0);
- EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+ EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+ audio_frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
}
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
index 0af6af8..e5371d0 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -314,7 +314,8 @@
EXPECT_EQ(id_, audio_frame.id_);
EXPECT_EQ(0u, audio_frame.timestamp_);
EXPECT_GT(audio_frame.num_channels_, 0);
- EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+ EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+ audio_frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
}
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
index cad6ee9..7b9c7ed 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
+++ b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
@@ -326,10 +326,10 @@
// Make up a CodecInst.
send_codec_inst_.channels = external_speech_encoder->NumChannels();
send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
- send_codec_inst_.pacsize =
- rtc::CheckedDivExact(external_speech_encoder->Max10MsFramesInAPacket() *
- send_codec_inst_.plfreq,
- 100);
+ send_codec_inst_.pacsize = rtc::CheckedDivExact(
+ static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+ send_codec_inst_.plfreq),
+ 100);
send_codec_inst_.pltype = -1; // Not valid.
send_codec_inst_.rate = -1; // Not valid.
static const char kName[] = "external";
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.cc b/webrtc/modules/audio_coding/main/test/PCMFile.cc
index 4b08f75..d0ae783 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.cc
+++ b/webrtc/modules/audio_coding/main/test/PCMFile.cc
@@ -150,7 +150,7 @@
}
} else {
int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
- for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
+ for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
stereo_audio[k << 1] = audio_frame.data_[k];
stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
}
@@ -172,7 +172,7 @@
}
}
-void PCMFile::Write10MsData(int16_t* playout_buffer, uint16_t length_smpls) {
+void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
length_smpls) {
return;
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.h b/webrtc/modules/audio_coding/main/test/PCMFile.h
index c4487b8..8353898 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.h
+++ b/webrtc/modules/audio_coding/main/test/PCMFile.h
@@ -36,7 +36,7 @@
int32_t Read10MsData(AudioFrame& audio_frame);
- void Write10MsData(int16_t *playout_buffer, uint16_t length_smpls);
+ void Write10MsData(int16_t *playout_buffer, size_t length_smpls);
void Write10MsData(AudioFrame& audio_frame);
uint16_t PayloadLength10Ms() const;
diff --git a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc b/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
index b28c510..134d975 100644
--- a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
+++ b/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
@@ -159,13 +159,13 @@
while (!_inFile.EndOfFile()) {
_inFile.Read10MsData(audioFrame);
- for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+ for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
audioFrame.data_[n] = (int16_t) floor(
audioFrame.data_[n] * leftPanning + 0.5);
}
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
- for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+ for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
audioFrame.data_[n] = (int16_t) floor(
audioFrame.data_[n] * rightToLeftRatio + 0.5);
}
diff --git a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
index ffbbc8c..0bac401 100644
--- a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
+++ b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
@@ -32,9 +32,9 @@
namespace {
double FrameRms(AudioFrame& frame) {
- int samples = frame.num_channels_ * frame.samples_per_channel_;
+ size_t samples = frame.num_channels_ * frame.samples_per_channel_;
double rms = 0;
- for (int n = 0; n < samples; ++n)
+ for (size_t n = 0; n < samples; ++n)
rms += frame.data_[n] * frame.data_[n];
rms /= samples;
rms = sqrt(rms);
@@ -132,9 +132,9 @@
in_audio_frame.sample_rate_hz_ = codec.plfreq;
in_audio_frame.num_channels_ = codec.channels;
in_audio_frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
- int samples = in_audio_frame.num_channels_ *
+ size_t samples = in_audio_frame.num_channels_ *
in_audio_frame.samples_per_channel_;
- for (int n = 0; n < samples; ++n) {
+ for (size_t n = 0; n < samples; ++n) {
in_audio_frame.data_[n] = kAmp;
}
diff --git a/webrtc/modules/audio_coding/main/test/opus_test.cc b/webrtc/modules/audio_coding/main/test/opus_test.cc
index c61d25a..79124aa 100644
--- a/webrtc/modules/audio_coding/main/test/opus_test.cc
+++ b/webrtc/modules/audio_coding/main/test/opus_test.cc
@@ -270,14 +270,14 @@
if (loop_encode > 0) {
const int kMaxBytes = 1000; // Maximum number of bytes for one packet.
- int16_t bitstream_len_byte;
+ size_t bitstream_len_byte;
uint8_t bitstream[kMaxBytes];
for (int i = 0; i < loop_encode; i++) {
int bitstream_len_byte_int = WebRtcOpus_Encode(
(channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
&audio[read_samples], frame_length, kMaxBytes, bitstream);
ASSERT_GE(bitstream_len_byte_int, 0);
- bitstream_len_byte = static_cast<int16_t>(bitstream_len_byte_int);
+ bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
// Simulate packet loss by setting |packet_loss_| to "true" in
// |percent_loss| percent of the loops.
@@ -341,7 +341,8 @@
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
// Write stand-alone speech to file.
- out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
+ out_file_standalone_.Write10MsData(
+ out_audio, static_cast<size_t>(decoded_samples) * channels);
if (audio_frame.timestamp_ > start_time_stamp) {
// Number of channels should be the same for both stand-alone and
diff --git a/webrtc/modules/audio_coding/neteq/accelerate.cc b/webrtc/modules/audio_coding/neteq/accelerate.cc
index ad74238..1c36fa8 100644
--- a/webrtc/modules/audio_coding/neteq/accelerate.cc
+++ b/webrtc/modules/audio_coding/neteq/accelerate.cc
@@ -18,11 +18,11 @@
size_t input_length,
bool fast_accelerate,
AudioMultiVector* output,
- int16_t* length_change_samples) {
+ size_t* length_change_samples) {
// Input length must be (almost) 30 ms.
- static const int k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
- if (num_channels_ == 0 || static_cast<int>(input_length) / num_channels_ <
- (2 * k15ms - 1) * fs_mult_) {
+ static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
+ if (num_channels_ == 0 ||
+ input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_) {
// Length of input data too short to do accelerate. Simply move all data
// from input to output.
output->PushBackInterleaved(input, input_length);
@@ -34,7 +34,7 @@
void Accelerate::SetParametersForPassiveSpeech(size_t /*len*/,
int16_t* best_correlation,
- int* /*peak_index*/) const {
+ size_t* /*peak_index*/) const {
// When the signal does not contain any active speech, the correlation does
// not matter. Simply set it to zero.
*best_correlation = 0;
diff --git a/webrtc/modules/audio_coding/neteq/accelerate.h b/webrtc/modules/audio_coding/neteq/accelerate.h
index 684f74b..1238b77 100644
--- a/webrtc/modules/audio_coding/neteq/accelerate.h
+++ b/webrtc/modules/audio_coding/neteq/accelerate.h
@@ -45,14 +45,14 @@
size_t input_length,
bool fast_accelerate,
AudioMultiVector* output,
- int16_t* length_change_samples);
+ size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech.
void SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation,
- int* peak_index) const override;
+ size_t* peak_index) const override;
// Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching.
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
index 53dc033..769f0b0 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
@@ -53,10 +53,9 @@
SpeechType* speech_type) {
DCHECK_EQ(sample_rate_hz, 8000);
int16_t temp_type = 1; // Default is speech.
- int16_t ret = WebRtcG711_DecodeU(encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
+ size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
- return ret;
+ return static_cast<int>(ret);
}
int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
@@ -85,10 +84,9 @@
SpeechType* speech_type) {
DCHECK_EQ(sample_rate_hz, 8000);
int16_t temp_type = 1; // Default is speech.
- int16_t ret = WebRtcG711_DecodeA(encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
+ size_t ret = WebRtcG711_DecodeA(encoded, encoded_len, decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
- return ret;
+ return static_cast<int>(ret);
}
int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
@@ -120,10 +118,9 @@
DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
sample_rate_hz == 32000 || sample_rate_hz == 48000)
<< "Unsupported sample rate " << sample_rate_hz;
- int16_t ret =
- WebRtcPcm16b_Decode(encoded, static_cast<int16_t>(encoded_len), decoded);
+ size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
*speech_type = ConvertSpeechType(1);
- return ret;
+ return static_cast<int>(ret);
}
int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
@@ -132,7 +129,7 @@
return static_cast<int>(encoded_len / (2 * Channels()));
}
-AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(int num_channels)
+AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(size_t num_channels)
: channels_(num_channels) {
DCHECK(num_channels > 0);
}
@@ -163,14 +160,13 @@
SpeechType* speech_type) {
DCHECK_EQ(sample_rate_hz, 8000);
int16_t temp_type = 1; // Default is speech.
- int ret = WebRtcIlbcfix_Decode(dec_state_, encoded,
- static_cast<int16_t>(encoded_len), decoded,
+ int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded,
&temp_type);
*speech_type = ConvertSpeechType(temp_type);
return ret;
}
-int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
+size_t AudioDecoderIlbc::DecodePlc(size_t num_frames, int16_t* decoded) {
return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
}
@@ -204,11 +200,10 @@
SpeechType* speech_type) {
DCHECK_EQ(sample_rate_hz, 16000);
int16_t temp_type = 1; // Default is speech.
- int16_t ret =
- WebRtcG722_Decode(dec_state_, encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
+ size_t ret =
+ WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
*speech_type = ConvertSpeechType(temp_type);
- return ret;
+ return static_cast<int>(ret);
}
int AudioDecoderG722::Init() {
@@ -246,29 +241,24 @@
uint8_t* encoded_deinterleaved = new uint8_t[encoded_len];
SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
// Decode left and right.
- int16_t ret = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
- static_cast<int16_t>(encoded_len / 2),
- decoded, &temp_type);
- if (ret >= 0) {
- int decoded_len = ret;
- ret = WebRtcG722_Decode(dec_state_right_,
- &encoded_deinterleaved[encoded_len / 2],
- static_cast<int16_t>(encoded_len / 2),
- &decoded[decoded_len], &temp_type);
- if (ret == decoded_len) {
- ret += decoded_len; // Return total number of samples.
- // Interleave output.
- for (int k = ret / 2; k < ret; k++) {
- int16_t temp = decoded[k];
- memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
- (ret - k - 1) * sizeof(int16_t));
- decoded[2 * k - ret + 1] = temp;
- }
+ size_t decoded_len = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
+ encoded_len / 2, decoded, &temp_type);
+ size_t ret = WebRtcG722_Decode(
+ dec_state_right_, &encoded_deinterleaved[encoded_len / 2],
+ encoded_len / 2, &decoded[decoded_len], &temp_type);
+ if (ret == decoded_len) {
+ ret += decoded_len; // Return total number of samples.
+ // Interleave output.
+ for (size_t k = ret / 2; k < ret; k++) {
+ int16_t temp = decoded[k];
+ memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
+ (ret - k - 1) * sizeof(int16_t));
+ decoded[2 * k - ret + 1] = temp;
}
}
*speech_type = ConvertSpeechType(temp_type);
delete [] encoded_deinterleaved;
- return ret;
+ return static_cast<int>(ret);
}
size_t AudioDecoderG722Stereo::Channels() const {
@@ -312,7 +302,8 @@
// Opus
#ifdef WEBRTC_CODEC_OPUS
-AudioDecoderOpus::AudioDecoderOpus(int num_channels) : channels_(num_channels) {
+AudioDecoderOpus::AudioDecoderOpus(size_t num_channels)
+ : channels_(num_channels) {
DCHECK(num_channels == 1 || num_channels == 2);
WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
}
@@ -328,8 +319,7 @@
SpeechType* speech_type) {
DCHECK_EQ(sample_rate_hz, 48000);
int16_t temp_type = 1; // Default is speech.
- int ret = WebRtcOpus_Decode(dec_state_, encoded,
- static_cast<int16_t>(encoded_len), decoded,
+ int ret = WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded,
&temp_type);
if (ret > 0)
ret *= static_cast<int>(channels_); // Return total number of samples.
@@ -350,8 +340,7 @@
DCHECK_EQ(sample_rate_hz, 48000);
int16_t temp_type = 1; // Default is speech.
- int ret = WebRtcOpus_DecodeFec(dec_state_, encoded,
- static_cast<int16_t>(encoded_len), decoded,
+ int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
&temp_type);
if (ret > 0)
ret *= static_cast<int>(channels_); // Return total number of samples.
@@ -365,8 +354,7 @@
int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
size_t encoded_len) const {
- return WebRtcOpus_DurationEst(dec_state_,
- encoded, static_cast<int>(encoded_len));
+ return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len);
}
int AudioDecoderOpus::PacketDurationRedundant(const uint8_t* encoded,
@@ -376,13 +364,13 @@
return PacketDuration(encoded, encoded_len);
}
- return WebRtcOpus_FecDurationEst(encoded, static_cast<int>(encoded_len));
+ return WebRtcOpus_FecDurationEst(encoded, encoded_len);
}
bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
size_t encoded_len) const {
int fec;
- fec = WebRtcOpus_PacketHasFec(encoded, static_cast<int>(encoded_len));
+ fec = WebRtcOpus_PacketHasFec(encoded, encoded_len);
return (fec == 1);
}
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
index 202d79d..427a0a6 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
@@ -122,7 +122,7 @@
// of channels is derived from the type.
class AudioDecoderPcm16BMultiCh : public AudioDecoderPcm16B {
public:
- explicit AudioDecoderPcm16BMultiCh(int num_channels);
+ explicit AudioDecoderPcm16BMultiCh(size_t num_channels);
size_t Channels() const override;
private:
@@ -137,7 +137,7 @@
AudioDecoderIlbc();
~AudioDecoderIlbc() override;
bool HasDecodePlc() const override;
- int DecodePlc(int num_frames, int16_t* decoded) override;
+ size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
int Init() override;
size_t Channels() const override;
@@ -209,7 +209,7 @@
#ifdef WEBRTC_CODEC_OPUS
class AudioDecoderOpus : public AudioDecoder {
public:
- explicit AudioDecoderOpus(int num_channels);
+ explicit AudioDecoderOpus(size_t num_channels);
~AudioDecoderOpus() override;
int Init() override;
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index 3983c07..a2ef9d1 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -141,7 +141,7 @@
input_len_samples);
rtc::scoped_ptr<int16_t[]> interleaved_input(
new int16_t[channels_ * samples_per_10ms]);
- for (int i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
+ for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
// Duplicate the mono input signal to however many channels the test
@@ -348,7 +348,7 @@
output.get(), &speech_type);
EXPECT_EQ(frame_size_, dec_len);
// Simply call DecodePlc and verify that we get 0 as return value.
- EXPECT_EQ(0, decoder_->DecodePlc(1, output.get()));
+ EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));
}
};
diff --git a/webrtc/modules/audio_coding/neteq/background_noise.cc b/webrtc/modules/audio_coding/neteq/background_noise.cc
index a59f444..d3df269 100644
--- a/webrtc/modules/audio_coding/neteq/background_noise.cc
+++ b/webrtc/modules/audio_coding/neteq/background_noise.cc
@@ -21,6 +21,9 @@
namespace webrtc {
+// static
+const size_t BackgroundNoise::kMaxLpcOrder;
+
BackgroundNoise::BackgroundNoise(size_t num_channels)
: num_channels_(num_channels),
channel_parameters_(new ChannelParameters[num_channels_]),
@@ -150,7 +153,7 @@
void BackgroundNoise::SetFilterState(size_t channel, const int16_t* input,
size_t length) {
assert(channel < num_channels_);
- length = std::min(length, static_cast<size_t>(kMaxLpcOrder));
+ length = std::min(length, kMaxLpcOrder);
memcpy(channel_parameters_[channel].filter_state, input,
length * sizeof(int16_t));
}
@@ -165,7 +168,7 @@
}
int32_t BackgroundNoise::CalculateAutoCorrelation(
- const int16_t* signal, int length, int32_t* auto_correlation) const {
+ const int16_t* signal, size_t length, int32_t* auto_correlation) const {
int16_t signal_max = WebRtcSpl_MaxAbsValueW16(signal, length);
int correlation_scale = kLogVecLen -
WebRtcSpl_NormW32(signal_max * signal_max);
@@ -247,7 +250,7 @@
residual_energy = residual_energy << norm_shift;
// Calculate scale and shift factor.
- parameters.scale = WebRtcSpl_SqrtFloor(residual_energy);
+ parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
// Add 13 to the |scale_shift_|, since the random numbers table is in
// Q13.
// TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
diff --git a/webrtc/modules/audio_coding/neteq/background_noise.h b/webrtc/modules/audio_coding/neteq/background_noise.h
index baf1818..9ad12b7 100644
--- a/webrtc/modules/audio_coding/neteq/background_noise.h
+++ b/webrtc/modules/audio_coding/neteq/background_noise.h
@@ -29,7 +29,7 @@
public:
// TODO(hlundin): For 48 kHz support, increase kMaxLpcOrder to 10.
// Will work anyway, but probably sound a little worse.
- static const int kMaxLpcOrder = 8; // 32000 / 8000 + 4.
+ static const size_t kMaxLpcOrder = 8; // 32000 / 8000 + 4.
explicit BackgroundNoise(size_t num_channels);
virtual ~BackgroundNoise();
@@ -76,9 +76,9 @@
private:
static const int kThresholdIncrement = 229; // 0.0035 in Q16.
- static const int kVecLen = 256;
+ static const size_t kVecLen = 256;
static const int kLogVecLen = 8; // log2(kVecLen).
- static const int kResidualLength = 64;
+ static const size_t kResidualLength = 64;
static const int16_t kLogResidualLength = 6; // log2(kResidualLength)
struct ChannelParameters {
@@ -112,7 +112,7 @@
};
int32_t CalculateAutoCorrelation(const int16_t* signal,
- int length,
+ size_t length,
int32_t* auto_correlation) const;
// Increments the energy threshold by a factor 1 + |kThresholdIncrement|.
diff --git a/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc b/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
index 93f9a55..9054791 100644
--- a/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
+++ b/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -23,16 +23,16 @@
level_factor_ = 253;
}
-void BufferLevelFilter::Update(int buffer_size_packets,
+void BufferLevelFilter::Update(size_t buffer_size_packets,
int time_stretched_samples,
- int packet_len_samples) {
+ size_t packet_len_samples) {
// Filter:
// |filtered_current_level_| = |level_factor_| * |filtered_current_level_| +
// (1 - |level_factor_|) * |buffer_size_packets|
// |level_factor_| and |filtered_current_level_| are in Q8.
// |buffer_size_packets| is in Q0.
filtered_current_level_ = ((level_factor_ * filtered_current_level_) >> 8) +
- ((256 - level_factor_) * buffer_size_packets);
+ ((256 - level_factor_) * static_cast<int>(buffer_size_packets));
// Account for time-scale operations (accelerate and pre-emptive expand).
if (time_stretched_samples && packet_len_samples > 0) {
@@ -42,7 +42,7 @@
// Make sure that the filtered value remains non-negative.
filtered_current_level_ = std::max(0,
filtered_current_level_ -
- (time_stretched_samples << 8) / packet_len_samples);
+ (time_stretched_samples << 8) / static_cast<int>(packet_len_samples));
}
}
diff --git a/webrtc/modules/audio_coding/neteq/buffer_level_filter.h b/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
index 2d2a888..add3cc4 100644
--- a/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
+++ b/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
+#include <stddef.h>
+
#include "webrtc/base/constructormagic.h"
namespace webrtc {
@@ -26,8 +28,8 @@
// corresponding number of packets, and is subtracted from the filtered
// value (thus bypassing the filter operation). |packet_len_samples| is the
// number of audio samples carried in each incoming packet.
- virtual void Update(int buffer_size_packets, int time_stretched_samples,
- int packet_len_samples);
+ virtual void Update(size_t buffer_size_packets, int time_stretched_samples,
+ size_t packet_len_samples);
// Set the current target buffer level (obtained from
// DelayManager::base_target_level()). Used to select the appropriate
diff --git a/webrtc/modules/audio_coding/neteq/comfort_noise.cc b/webrtc/modules/audio_coding/neteq/comfort_noise.cc
index da9683b..3fe6607 100644
--- a/webrtc/modules/audio_coding/neteq/comfort_noise.cc
+++ b/webrtc/modules/audio_coding/neteq/comfort_noise.cc
@@ -79,8 +79,7 @@
CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
// The expression &(*output)[0][0] is a pointer to the first element in
// the first channel.
- if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],
- static_cast<int16_t>(number_of_samples),
+ if (WebRtcCng_Generate(cng_inst, &(*output)[0][0], number_of_samples,
new_period) < 0) {
// Error returned.
output->Zeros(requested_length);
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic.cc b/webrtc/modules/audio_coding/neteq/decision_logic.cc
index 5fb054c..eb10e65 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -24,7 +24,7 @@
namespace webrtc {
DecisionLogic* DecisionLogic::Create(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -56,7 +56,7 @@
}
DecisionLogic::DecisionLogic(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -95,7 +95,7 @@
timescale_hold_off_ = kMinTimescaleInterval;
}
-void DecisionLogic::SetSampleRate(int fs_hz, int output_size_samples) {
+void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
// TODO(hlundin): Change to an enumerator and skip assert.
assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
fs_mult_ = fs_hz / 8000;
@@ -104,7 +104,7 @@
Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf, bool* reset_decoder) {
@@ -123,9 +123,9 @@
}
}
- const int samples_left = static_cast<int>(
- sync_buffer.FutureLength() - expand.overlap_length());
- const int cur_size_samples =
+ const size_t samples_left =
+ sync_buffer.FutureLength() - expand.overlap_length();
+ const size_t cur_size_samples =
samples_left + packet_buffer_.NumSamplesInBuffer(decoder_database_,
decoder_frame_length);
LOG(LS_VERBOSE) << "Buffers: " << packet_buffer_.NumPacketsInBuffer() <<
@@ -153,9 +153,10 @@
}
}
-void DecisionLogic::FilterBufferLevel(int buffer_size_samples,
+void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples,
Modes prev_mode) {
- const int elapsed_time_ms = output_size_samples_ / (8 * fs_mult_);
+ const int elapsed_time_ms =
+ static_cast<int>(output_size_samples_ / (8 * fs_mult_));
delay_manager_->UpdateCounters(elapsed_time_ms);
// Do not update buffer history if currently playing CNG since it will bias
@@ -164,7 +165,7 @@
buffer_level_filter_->SetTargetBufferLevel(
delay_manager_->base_target_level());
- int buffer_size_packets = 0;
+ size_t buffer_size_packets = 0;
if (packet_length_samples_ > 0) {
// Calculate size in packets.
buffer_size_packets = buffer_size_samples / packet_length_samples_;
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic.h b/webrtc/modules/audio_coding/neteq/decision_logic.h
index 672ce93..cb3dba0 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic.h
+++ b/webrtc/modules/audio_coding/neteq/decision_logic.h
@@ -34,7 +34,7 @@
// Static factory function which creates different types of objects depending
// on the |playout_mode|.
static DecisionLogic* Create(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -43,7 +43,7 @@
// Constructor.
DecisionLogic(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -60,7 +60,7 @@
void SoftReset();
// Sets the sample rate and the output block size.
- void SetSampleRate(int fs_hz, int output_size_samples);
+ void SetSampleRate(int fs_hz, size_t output_size_samples);
// Returns the operation that should be done next. |sync_buffer| and |expand|
// are provided for reference. |decoder_frame_length| is the number of samples
@@ -75,7 +75,7 @@
// return value.
Operations GetDecision(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -101,12 +101,12 @@
// Accessors and mutators.
void set_sample_memory(int32_t value) { sample_memory_ = value; }
- int generated_noise_samples() const { return generated_noise_samples_; }
- void set_generated_noise_samples(int value) {
+ size_t generated_noise_samples() const { return generated_noise_samples_; }
+ void set_generated_noise_samples(size_t value) {
generated_noise_samples_ = value;
}
- int packet_length_samples() const { return packet_length_samples_; }
- void set_packet_length_samples(int value) {
+ size_t packet_length_samples() const { return packet_length_samples_; }
+ void set_packet_length_samples(size_t value) {
packet_length_samples_ = value;
}
void set_prev_time_scale(bool value) { prev_time_scale_ = value; }
@@ -134,7 +134,7 @@
// Should be implemented by derived classes.
virtual Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -142,18 +142,18 @@
// Updates the |buffer_level_filter_| with the current buffer level
// |buffer_size_packets|.
- void FilterBufferLevel(int buffer_size_packets, Modes prev_mode);
+ void FilterBufferLevel(size_t buffer_size_packets, Modes prev_mode);
DecoderDatabase* decoder_database_;
const PacketBuffer& packet_buffer_;
DelayManager* delay_manager_;
BufferLevelFilter* buffer_level_filter_;
int fs_mult_;
- int output_size_samples_;
+ size_t output_size_samples_;
CngState cng_state_; // Remember if comfort noise is interrupted by other
// event (e.g., DTMF).
- int generated_noise_samples_;
- int packet_length_samples_;
+ size_t generated_noise_samples_;
+ size_t packet_length_samples_;
int sample_memory_;
bool prev_time_scale_;
int timescale_hold_off_;
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc b/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
index 08a4c4c..ddea644 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
@@ -22,7 +22,7 @@
Operations DecisionLogicFax::GetDecisionSpecialized(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_fax.h b/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
index d9f8db9..861e2fa 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
@@ -23,7 +23,7 @@
public:
// Constructor.
DecisionLogicFax(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -46,7 +46,7 @@
// remain true if it was true before the call).
Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
index e985ee0..d3f6fa6 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -27,7 +27,7 @@
Operations DecisionLogicNormal::GetDecisionSpecialized(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -149,7 +149,7 @@
Operations DecisionLogicNormal::FuturePacketAvailable(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp,
@@ -172,9 +172,9 @@
}
}
- const int samples_left = static_cast<int>(sync_buffer.FutureLength() -
- expand.overlap_length());
- const int cur_size_samples = samples_left +
+ const size_t samples_left =
+ sync_buffer.FutureLength() - expand.overlap_length();
+ const size_t cur_size_samples = samples_left +
packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;
// If previous was comfort noise, then no merge is needed.
@@ -205,7 +205,8 @@
// fs_mult_ * 8 = fs / 1000.)
if (prev_mode == kModeExpand ||
(decoder_frame_length < output_size_samples_ &&
- cur_size_samples > kAllowMergeWithoutExpandMs * fs_mult_ * 8)) {
+ cur_size_samples >
+ static_cast<size_t>(kAllowMergeWithoutExpandMs * fs_mult_ * 8))) {
return kMerge;
} else if (play_dtmf) {
// Play DTMF instead of expand.
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_normal.h b/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
index 047663f..7867407 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
@@ -23,7 +23,7 @@
public:
// Constructor.
DecisionLogicNormal(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -50,7 +50,7 @@
// remain true if it was true before the call).
Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -61,7 +61,7 @@
virtual Operations FuturePacketAvailable(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp,
diff --git a/webrtc/modules/audio_coding/neteq/delay_manager.cc b/webrtc/modules/audio_coding/neteq/delay_manager.cc
index a935561..e7f76f6 100644
--- a/webrtc/modules/audio_coding/neteq/delay_manager.cc
+++ b/webrtc/modules/audio_coding/neteq/delay_manager.cc
@@ -22,7 +22,7 @@
namespace webrtc {
-DelayManager::DelayManager(int max_packets_in_buffer,
+DelayManager::DelayManager(size_t max_packets_in_buffer,
DelayPeakDetector* peak_detector)
: first_packet_received_(false),
max_packets_in_buffer_(max_packets_in_buffer),
@@ -239,7 +239,8 @@
}
// Shift to Q8, then 75%.;
- int max_buffer_packets_q8 = (3 * (max_packets_in_buffer_ << 8)) / 4;
+ int max_buffer_packets_q8 =
+ static_cast<int>((3 * (max_packets_in_buffer_ << 8)) / 4);
target_level_ = std::min(target_level_, max_buffer_packets_q8);
// Sanity check, at least 1 packet (in Q8).
@@ -389,7 +390,8 @@
// |max_packets_in_buffer_|.
if ((maximum_delay_ms_ > 0 && delay_ms > maximum_delay_ms_) ||
(packet_len_ms_ > 0 &&
- delay_ms > 3 * max_packets_in_buffer_ * packet_len_ms_ / 4)) {
+ delay_ms >
+ static_cast<int>(3 * max_packets_in_buffer_ * packet_len_ms_ / 4))) {
return false;
}
minimum_delay_ms_ = delay_ms;
diff --git a/webrtc/modules/audio_coding/neteq/delay_manager.h b/webrtc/modules/audio_coding/neteq/delay_manager.h
index 33c4a40..b0d3f2e 100644
--- a/webrtc/modules/audio_coding/neteq/delay_manager.h
+++ b/webrtc/modules/audio_coding/neteq/delay_manager.h
@@ -32,7 +32,7 @@
// buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
// is the number of packet slots in the buffer). Supply a PeakDetector
// object to the DelayManager.
- DelayManager(int max_packets_in_buffer, DelayPeakDetector* peak_detector);
+ DelayManager(size_t max_packets_in_buffer, DelayPeakDetector* peak_detector);
virtual ~DelayManager();
@@ -132,7 +132,7 @@
void LimitTargetLevel();
bool first_packet_received_;
- const int max_packets_in_buffer_; // Capacity of the packet buffer.
+ const size_t max_packets_in_buffer_; // Capacity of the packet buffer.
IATVector iat_vector_; // Histogram of inter-arrival times.
int iat_factor_; // Forgetting factor for updating the IAT histogram (Q15).
int packet_iat_count_ms_; // Milliseconds elapsed since last packet.
diff --git a/webrtc/modules/audio_coding/neteq/dsp_helper.cc b/webrtc/modules/audio_coding/neteq/dsp_helper.cc
index 3e5c61d..4188914 100644
--- a/webrtc/modules/audio_coding/neteq/dsp_helper.cc
+++ b/webrtc/modules/audio_coding/neteq/dsp_helper.cc
@@ -99,13 +99,13 @@
return end_factor;
}
-void DspHelper::PeakDetection(int16_t* data, int data_length,
- int num_peaks, int fs_mult,
- int* peak_index, int16_t* peak_value) {
- int16_t min_index = 0;
- int16_t max_index = 0;
+void DspHelper::PeakDetection(int16_t* data, size_t data_length,
+ size_t num_peaks, int fs_mult,
+ size_t* peak_index, int16_t* peak_value) {
+ size_t min_index = 0;
+ size_t max_index = 0;
- for (int i = 0; i <= num_peaks - 1; i++) {
+ for (size_t i = 0; i <= num_peaks - 1; i++) {
if (num_peaks == 1) {
// Single peak. The parabola fit assumes that an extra point is
// available; worst case it gets a zero on the high end of the signal.
@@ -148,7 +148,7 @@
}
void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult,
- int* peak_index, int16_t* peak_value) {
+ size_t* peak_index, int16_t* peak_value) {
uint16_t fit_index[13];
if (fs_mult == 1) {
fit_index[0] = 0;
@@ -235,16 +235,16 @@
}
}
-int DspHelper::MinDistortion(const int16_t* signal, int min_lag,
- int max_lag, int length,
- int32_t* distortion_value) {
- int best_index = 0;
+size_t DspHelper::MinDistortion(const int16_t* signal, size_t min_lag,
+ size_t max_lag, size_t length,
+ int32_t* distortion_value) {
+ size_t best_index = 0;
int32_t min_distortion = WEBRTC_SPL_WORD32_MAX;
- for (int i = min_lag; i <= max_lag; i++) {
+ for (size_t i = min_lag; i <= max_lag; i++) {
int32_t sum_diff = 0;
const int16_t* data1 = signal;
const int16_t* data2 = signal - i;
- for (int j = 0; j < length; j++) {
+ for (size_t j = 0; j < length; j++) {
sum_diff += WEBRTC_SPL_ABS_W32(data1[j] - data2[j]);
}
// Compare with previous minimum.
@@ -293,15 +293,15 @@
}
int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
- int output_length, int input_rate_hz,
+ size_t output_length, int input_rate_hz,
bool compensate_delay, int16_t* output) {
// Set filter parameters depending on input frequency.
// NOTE: The phase delay values are wrong compared to the true phase delay
// of the filters. However, the error is preserved (through the +1 term) for
// consistency.
const int16_t* filter_coefficients; // Filter coefficients.
- int16_t filter_length; // Number of coefficients.
- int16_t filter_delay; // Phase delay in samples.
+ size_t filter_length; // Number of coefficients.
+ size_t filter_delay; // Phase delay in samples.
int16_t factor; // Conversion rate (inFsHz / 8000).
switch (input_rate_hz) {
case 8000: {
@@ -345,9 +345,8 @@
// Returns -1 if input signal is too short; 0 otherwise.
return WebRtcSpl_DownsampleFast(
- &input[filter_length - 1], static_cast<int>(input_length) -
- (filter_length - 1), output, output_length, filter_coefficients,
- filter_length, factor, filter_delay);
+ &input[filter_length - 1], input_length - filter_length + 1, output,
+ output_length, filter_coefficients, filter_length, factor, filter_delay);
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/neteq/dsp_helper.h b/webrtc/modules/audio_coding/neteq/dsp_helper.h
index f903256..c40d10a 100644
--- a/webrtc/modules/audio_coding/neteq/dsp_helper.h
+++ b/webrtc/modules/audio_coding/neteq/dsp_helper.h
@@ -78,9 +78,9 @@
// locations and values are written to the arrays |peak_index| and
// |peak_value|, respectively. Both arrays must hold at least |num_peaks|
// elements.
- static void PeakDetection(int16_t* data, int data_length,
- int num_peaks, int fs_mult,
- int* peak_index, int16_t* peak_value);
+ static void PeakDetection(int16_t* data, size_t data_length,
+ size_t num_peaks, int fs_mult,
+ size_t* peak_index, int16_t* peak_value);
// Estimates the height and location of a maximum. The three values in the
// array |signal_points| are used as basis for a parabolic fit, which is then
@@ -89,14 +89,15 @@
// |peak_index| and |peak_value| is given in the full sample rate, as
// indicated by the sample rate multiplier |fs_mult|.
static void ParabolicFit(int16_t* signal_points, int fs_mult,
- int* peak_index, int16_t* peak_value);
+ size_t* peak_index, int16_t* peak_value);
// Calculates the sum-abs-diff for |signal| when compared to a displaced
// version of itself. Returns the displacement lag that results in the minimum
// distortion. The resulting distortion is written to |distortion_value|.
// The values of |min_lag| and |max_lag| are boundaries for the search.
- static int MinDistortion(const int16_t* signal, int min_lag,
- int max_lag, int length, int32_t* distortion_value);
+ static size_t MinDistortion(const int16_t* signal, size_t min_lag,
+ size_t max_lag, size_t length,
+ int32_t* distortion_value);
// Mixes |length| samples from |input1| and |input2| together and writes the
// result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
@@ -122,7 +123,7 @@
// filters if |compensate_delay| is true. Returns -1 if the input is too short
// to produce |output_length| samples, otherwise 0.
static int DownsampleTo4kHz(const int16_t* input, size_t input_length,
- int output_length, int input_rate_hz,
+ size_t output_length, int input_rate_hz,
bool compensate_delay, int16_t* output);
private:
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
index 45601c0..f4d5190 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
+++ b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -149,18 +149,18 @@
}
// Generate num_samples of DTMF signal and write to |output|.
-int DtmfToneGenerator::Generate(int num_samples,
+int DtmfToneGenerator::Generate(size_t num_samples,
AudioMultiVector* output) {
if (!initialized_) {
return kNotInitialized;
}
- if (num_samples < 0 || !output) {
+ if (!output) {
return kParameterError;
}
output->AssertSize(num_samples);
- for (int i = 0; i < num_samples; ++i) {
+ for (size_t i = 0; i < num_samples; ++i) {
// Use recursion formula y[n] = a * y[n - 1] - y[n - 2].
int16_t temp_val_low = ((coeff1_ * sample_history1_[1] + 8192) >> 14)
- sample_history1_[0];
@@ -186,7 +186,7 @@
output->CopyChannel(0, channel);
}
- return num_samples;
+ return static_cast<int>(num_samples);
}
bool DtmfToneGenerator::initialized() const {
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
index 4e51e53..767f66c 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
+++ b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -30,7 +30,7 @@
virtual ~DtmfToneGenerator() {}
virtual int Init(int fs, int event, int attenuation);
virtual void Reset();
- virtual int Generate(int num_samples, AudioMultiVector* output);
+ virtual int Generate(size_t num_samples, AudioMultiVector* output);
virtual bool initialized() const;
private:
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
index ccd7fa6..a55e6c9 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
@@ -171,8 +171,6 @@
// Initialize with valid parameters.
ASSERT_EQ(0, tone_gen.Init(fs, event, attenuation));
EXPECT_TRUE(tone_gen.initialized());
- // Negative number of samples.
- EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Generate(-1, &signal));
// NULL pointer to destination.
EXPECT_EQ(DtmfToneGenerator::kParameterError,
tone_gen.Generate(kNumSamples, NULL));
diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc
index d01465a..c163fee 100644
--- a/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/webrtc/modules/audio_coding/neteq/expand.cc
@@ -47,7 +47,7 @@
expand_duration_samples_(0),
channel_parameters_(new ChannelParameters[num_channels_]) {
assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
- assert(fs <= kMaxSampleRate); // Should not be possible.
+ assert(fs <= static_cast<int>(kMaxSampleRate)); // Should not be possible.
assert(num_channels_ > 0);
memset(expand_lags_, 0, sizeof(expand_lags_));
Reset();
@@ -72,7 +72,7 @@
int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
int16_t* voiced_vector_storage = temp_data;
int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
- static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
@@ -87,7 +87,7 @@
} else {
// This is not the first expansion, parameters are already estimated.
// Extract a noise segment.
- int16_t rand_length = max_lag_;
+ size_t rand_length = max_lag_;
// This only applies to SWB where length could be larger than 256.
assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30);
GenerateRandomVector(2, rand_length, random_vector);
@@ -119,7 +119,7 @@
WebRtcSpl_ScaleAndAddVectorsWithRound(
¶meters.expand_vector0[expansion_vector_position], 3,
¶meters.expand_vector1[expansion_vector_position], 1, 2,
- voiced_vector_storage, static_cast<int>(temp_length));
+ voiced_vector_storage, temp_length);
} else if (current_lag_index_ == 2) {
// Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
assert(expansion_vector_position + temp_length <=
@@ -129,7 +129,7 @@
WebRtcSpl_ScaleAndAddVectorsWithRound(
¶meters.expand_vector0[expansion_vector_position], 1,
¶meters.expand_vector1[expansion_vector_position], 1, 1,
- voiced_vector_storage, static_cast<int>(temp_length));
+ voiced_vector_storage, temp_length);
}
// Get tapering window parameters. Values are in Q15.
@@ -196,10 +196,10 @@
WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
parameters.ar_gain, add_constant,
parameters.ar_gain_scale,
- static_cast<int>(current_lag));
+ current_lag);
WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
parameters.ar_filter, kUnvoicedLpcOrder + 1,
- static_cast<int>(current_lag));
+ current_lag);
memcpy(parameters.ar_filter_state,
&(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
sizeof(int16_t) * kUnvoicedLpcOrder);
@@ -212,7 +212,8 @@
// (>= 31 .. <= 63) * fs_mult => go from 1 to 0 in about 16 ms;
// >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
// temp_shift = getbits(max_lag_) - 5.
- int temp_shift = (31 - WebRtcSpl_NormW32(max_lag_)) - 5;
+ int temp_shift =
+ (31 - WebRtcSpl_NormW32(rtc::checked_cast<int32_t>(max_lag_))) - 5;
int16_t mix_factor_increment = 256 >> temp_shift;
if (stop_muting_) {
mix_factor_increment = 0;
@@ -237,7 +238,7 @@
WebRtcSpl_ScaleAndAddVectorsWithRound(
voiced_vector + temp_length, parameters.current_voice_mix_factor,
unvoiced_vector + temp_length, temp_scale, 14,
- temp_data + temp_length, static_cast<int>(current_lag - temp_length));
+ temp_data + temp_length, current_lag - temp_length);
}
// Select muting slope depending on how many consecutive expands we have
@@ -258,7 +259,7 @@
// Mute to the previous level, then continue with the muting.
WebRtcSpl_AffineTransformVector(temp_data, temp_data,
parameters.mute_factor, 8192,
- 14, static_cast<int>(current_lag));
+ 14, current_lag);
if (!stop_muting_) {
DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
@@ -351,26 +352,26 @@
int32_t auto_correlation[kUnvoicedLpcOrder + 1];
int16_t reflection_coeff[kUnvoicedLpcOrder];
int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
- int best_correlation_index[kNumCorrelationCandidates];
+ size_t best_correlation_index[kNumCorrelationCandidates];
int16_t best_correlation[kNumCorrelationCandidates];
- int16_t best_distortion_index[kNumCorrelationCandidates];
+ size_t best_distortion_index[kNumCorrelationCandidates];
int16_t best_distortion[kNumCorrelationCandidates];
int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
int32_t best_distortion_w32[kNumCorrelationCandidates];
- static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
int fs_mult = fs_hz_ / 8000;
// Pre-calculate common multiplications with fs_mult.
- int fs_mult_4 = fs_mult * 4;
- int fs_mult_20 = fs_mult * 20;
- int fs_mult_120 = fs_mult * 120;
- int fs_mult_dist_len = fs_mult * kDistortionLength;
- int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
+ size_t fs_mult_4 = static_cast<size_t>(fs_mult * 4);
+ size_t fs_mult_20 = static_cast<size_t>(fs_mult * 20);
+ size_t fs_mult_120 = static_cast<size_t>(fs_mult * 120);
+ size_t fs_mult_dist_len = fs_mult * kDistortionLength;
+ size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
- const size_t signal_length = 256 * fs_mult;
+ const size_t signal_length = static_cast<size_t>(256 * fs_mult);
const int16_t* audio_history =
&(*sync_buffer_)[0][sync_buffer_->Size() - signal_length];
@@ -379,7 +380,7 @@
// Calculate correlation in downsampled domain (4 kHz sample rate).
int correlation_scale;
- int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
+ size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
// If it is decided to break bit-exactness |correlation_length| should be
// initialized to the return value of Correlation().
Correlation(audio_history, signal_length, correlation_vector,
@@ -398,11 +399,11 @@
// Calculate distortion around the |kNumCorrelationCandidates| best lags.
int distortion_scale = 0;
- for (int i = 0; i < kNumCorrelationCandidates; i++) {
- int16_t min_index = std::max(fs_mult_20,
- best_correlation_index[i] - fs_mult_4);
- int16_t max_index = std::min(fs_mult_120 - 1,
- best_correlation_index[i] + fs_mult_4);
+ for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
+ size_t min_index = std::max(fs_mult_20,
+ best_correlation_index[i] - fs_mult_4);
+ size_t max_index = std::min(fs_mult_120 - 1,
+ best_correlation_index[i] + fs_mult_4);
best_distortion_index[i] = DspHelper::MinDistortion(
&(audio_history[signal_length - fs_mult_dist_len]), min_index,
max_index, fs_mult_dist_len, &best_distortion_w32[i]);
@@ -416,8 +417,8 @@
// Find the maximizing index |i| of the cost function
// f[i] = best_correlation[i] / best_distortion[i].
int32_t best_ratio = std::numeric_limits<int32_t>::min();
- int best_index = std::numeric_limits<int>::max();
- for (int i = 0; i < kNumCorrelationCandidates; ++i) {
+ size_t best_index = std::numeric_limits<size_t>::max();
+ for (size_t i = 0; i < kNumCorrelationCandidates; ++i) {
int32_t ratio;
if (best_distortion[i] > 0) {
ratio = (best_correlation[i] << 16) / best_distortion[i];
@@ -432,19 +433,20 @@
}
}
- int distortion_lag = best_distortion_index[best_index];
- int correlation_lag = best_correlation_index[best_index];
+ size_t distortion_lag = best_distortion_index[best_index];
+ size_t correlation_lag = best_correlation_index[best_index];
max_lag_ = std::max(distortion_lag, correlation_lag);
// Calculate the exact best correlation in the range between
// |correlation_lag| and |distortion_lag|.
correlation_length =
- std::max(std::min(distortion_lag + 10, fs_mult_120), 60 * fs_mult);
+ std::max(std::min(distortion_lag + 10, fs_mult_120),
+ static_cast<size_t>(60 * fs_mult));
- int start_index = std::min(distortion_lag, correlation_lag);
- int correlation_lags =
- WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1;
- assert(correlation_lags <= 99 * fs_mult + 1); // Cannot be larger.
+ size_t start_index = std::min(distortion_lag, correlation_lag);
+ size_t correlation_lags = static_cast<size_t>(
+ WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1);
+ assert(correlation_lags <= static_cast<size_t>(99 * fs_mult + 1));
for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
ChannelParameters& parameters = channel_parameters_[channel_ix];
@@ -454,7 +456,7 @@
- correlation_lags],
correlation_length + start_index + correlation_lags - 1);
correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
- (31 - WebRtcSpl_NormW32(correlation_length)) - 31;
+ (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
correlation_scale = std::max(0, correlation_scale);
// Calculate the correlation, store in |correlation_vector2|.
@@ -465,7 +467,8 @@
correlation_length, correlation_lags, correlation_scale, -1);
// Find maximizing index.
- best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags);
+ best_index = static_cast<size_t>(
+ WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags));
int32_t max_correlation = correlation_vector2[best_index];
// Compensate index with start offset.
best_index = best_index + start_index;
@@ -508,7 +511,7 @@
// Extract the two vectors expand_vector0 and expand_vector1 from
// |audio_history|.
- int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_);
+ size_t expansion_length = max_lag_ + overlap_length_;
const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
const int16_t* vector2 = vector1 - distortion_lag;
// Normalize the second vector to the same energy as the first.
@@ -527,15 +530,15 @@
// Calculate scaled_energy1 / scaled_energy2 in Q13.
int32_t energy_ratio = WebRtcSpl_DivW32W16(
WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
- energy2 >> scaled_energy2);
+ static_cast<int16_t>(energy2 >> scaled_energy2));
// Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
- amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13);
+ amplitude_ratio =
+ static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
// Copy the two vectors and give them the same energy.
parameters.expand_vector0.Clear();
parameters.expand_vector0.PushBack(vector1, expansion_length);
parameters.expand_vector1.Clear();
- if (parameters.expand_vector1.Size() <
- static_cast<size_t>(expansion_length)) {
+ if (parameters.expand_vector1.Size() < expansion_length) {
parameters.expand_vector1.Extend(
expansion_length - parameters.expand_vector1.Size());
}
@@ -626,7 +629,7 @@
if (channel_ix == 0) {
// Extract a noise segment.
- int16_t noise_length;
+ size_t noise_length;
if (distortion_lag < 40) {
noise_length = 2 * distortion_lag + 30;
} else {
@@ -768,7 +771,7 @@
int* output_scale) const {
// Set parameters depending on sample rate.
const int16_t* filter_coefficients;
- int16_t num_coefficients;
+ size_t num_coefficients;
int16_t downsampling_factor;
if (fs_hz_ == 8000) {
num_coefficients = 3;
@@ -790,14 +793,14 @@
// Correlate from lag 10 to lag 60 in downsampled domain.
// (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
- static const int kCorrelationStartLag = 10;
- static const int kNumCorrelationLags = 54;
- static const int kCorrelationLength = 60;
+ static const size_t kCorrelationStartLag = 10;
+ static const size_t kNumCorrelationLags = 54;
+ static const size_t kCorrelationLength = 60;
// Downsample to 4 kHz sample rate.
- static const int kDownsampledLength = kCorrelationStartLag
+ static const size_t kDownsampledLength = kCorrelationStartLag
+ kNumCorrelationLags + kCorrelationLength;
int16_t downsampled_input[kDownsampledLength];
- static const int kFilterDelay = 0;
+ static const size_t kFilterDelay = 0;
WebRtcSpl_DownsampleFast(
input + input_length - kDownsampledLength * downsampling_factor,
kDownsampledLength * downsampling_factor, downsampled_input,
@@ -859,9 +862,9 @@
bool too_many_expands,
size_t num_noise_samples,
int16_t* buffer) {
- static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
- assert(num_noise_samples <= static_cast<size_t>(kMaxSampleRate / 8000 * 125));
+ assert(num_noise_samples <= (kMaxSampleRate / 8000 * 125));
int16_t* noise_samples = &buffer[kNoiseLpcOrder];
if (background_noise_->initialized()) {
// Use background noise parameters.
@@ -879,12 +882,12 @@
scaled_random_vector, random_vector,
background_noise_->Scale(channel), dc_offset,
background_noise_->ScaleShift(channel),
- static_cast<int>(num_noise_samples));
+ num_noise_samples);
WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
background_noise_->Filter(channel),
kNoiseLpcOrder + 1,
- static_cast<int>(num_noise_samples));
+ num_noise_samples);
background_noise_->SetFilterState(
channel,
@@ -931,7 +934,7 @@
// kBgnFade has reached 0.
WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
bgn_mute_factor, 8192, 14,
- static_cast<int>(num_noise_samples));
+ num_noise_samples);
}
}
// Update mute_factor in BackgroundNoise class.
diff --git a/webrtc/modules/audio_coding/neteq/expand.h b/webrtc/modules/audio_coding/neteq/expand.h
index 3fbafdb..37e58d6 100644
--- a/webrtc/modules/audio_coding/neteq/expand.h
+++ b/webrtc/modules/audio_coding/neteq/expand.h
@@ -64,7 +64,7 @@
// Accessors and mutators.
virtual size_t overlap_length() const;
- int16_t max_lag() const { return max_lag_; }
+ size_t max_lag() const { return max_lag_; }
protected:
static const int kMaxConsecutiveExpands = 200;
@@ -96,11 +96,11 @@
int consecutive_expands_;
private:
- static const int kUnvoicedLpcOrder = 6;
- static const int kNumCorrelationCandidates = 3;
- static const int kDistortionLength = 20;
- static const int kLpcAnalysisLength = 160;
- static const int kMaxSampleRate = 48000;
+ static const size_t kUnvoicedLpcOrder = 6;
+ static const size_t kNumCorrelationCandidates = 3;
+ static const size_t kDistortionLength = 20;
+ static const size_t kLpcAnalysisLength = 160;
+ static const size_t kMaxSampleRate = 48000;
static const int kNumLags = 3;
struct ChannelParameters {
@@ -132,7 +132,7 @@
BackgroundNoise* const background_noise_;
StatisticsCalculator* const statistics_;
const size_t overlap_length_;
- int16_t max_lag_;
+ size_t max_lag_;
size_t expand_lags_[kNumLags];
int lag_index_direction_;
int current_lag_index_;
diff --git a/webrtc/modules/audio_coding/neteq/interface/neteq.h b/webrtc/modules/audio_coding/neteq/interface/neteq.h
index 88bf208..865a8b3 100644
--- a/webrtc/modules/audio_coding/neteq/interface/neteq.h
+++ b/webrtc/modules/audio_coding/neteq/interface/neteq.h
@@ -45,7 +45,7 @@
// decoding (in Q14).
int32_t clockdrift_ppm; // Average clock-drift in parts-per-million
// (positive or negative).
- int added_zero_samples; // Number of zero samples added in "off" mode.
+ size_t added_zero_samples; // Number of zero samples added in "off" mode.
};
enum NetEqOutputType {
@@ -87,7 +87,7 @@
int sample_rate_hz; // Initial value. Will change with input data.
bool enable_audio_classifier;
- int max_packets_in_buffer;
+ size_t max_packets_in_buffer;
int max_delay_ms;
BackgroundNoiseMode background_noise_mode;
NetEqPlayoutMode playout_mode;
@@ -165,7 +165,7 @@
// The speech type is written to |type|, if |type| is not NULL.
// Returns kOK on success, or kFail in case of an error.
virtual int GetAudio(size_t max_length, int16_t* output_audio,
- int* samples_per_channel, int* num_channels,
+ size_t* samples_per_channel, int* num_channels,
NetEqOutputType* type) = 0;
// Associates |rtp_payload_type| with |codec| and stores the information in
diff --git a/webrtc/modules/audio_coding/neteq/merge.cc b/webrtc/modules/audio_coding/neteq/merge.cc
index 2c515c1..b6fb2d8 100644
--- a/webrtc/modules/audio_coding/neteq/merge.cc
+++ b/webrtc/modules/audio_coding/neteq/merge.cc
@@ -31,25 +31,25 @@
: fs_hz_(fs_hz),
num_channels_(num_channels),
fs_mult_(fs_hz_ / 8000),
- timestamps_per_call_(fs_hz_ / 100),
+ timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
expand_(expand),
sync_buffer_(sync_buffer),
expanded_(num_channels_) {
assert(num_channels_ > 0);
}
-int Merge::Process(int16_t* input, size_t input_length,
- int16_t* external_mute_factor_array,
- AudioMultiVector* output) {
+size_t Merge::Process(int16_t* input, size_t input_length,
+ int16_t* external_mute_factor_array,
+ AudioMultiVector* output) {
// TODO(hlundin): Change to an enumerator and skip assert.
assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
fs_hz_ == 48000);
assert(fs_hz_ <= kMaxSampleRate); // Should not be possible.
- int old_length;
- int expand_period;
+ size_t old_length;
+ size_t expand_period;
// Get expansion data to overlap and mix with.
- int expanded_length = GetExpandedSignal(&old_length, &expand_period);
+ size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
// Transfer input signal to an AudioMultiVector.
AudioMultiVector input_vector(num_channels_);
@@ -57,7 +57,7 @@
size_t input_length_per_channel = input_vector.Size();
assert(input_length_per_channel == input_length / num_channels_);
- int16_t best_correlation_index = 0;
+ size_t best_correlation_index = 0;
size_t output_length = 0;
for (size_t channel = 0; channel < num_channels_; ++channel) {
@@ -65,8 +65,8 @@
int16_t* expanded_channel = &expanded_[channel][0];
int16_t expanded_max, input_max;
int16_t new_mute_factor = SignalScaling(
- input_channel, static_cast<int>(input_length_per_channel),
- expanded_channel, &expanded_max, &input_max);
+ input_channel, input_length_per_channel, expanded_channel,
+ &expanded_max, &input_max);
// Adjust muting factor (product of "main" muting factor and expand muting
// factor).
@@ -84,13 +84,13 @@
// Downsample, correlate, and find strongest correlation period for the
// master (i.e., first) channel only.
// Downsample to 4kHz sample rate.
- Downsample(input_channel, static_cast<int>(input_length_per_channel),
- expanded_channel, expanded_length);
+ Downsample(input_channel, input_length_per_channel, expanded_channel,
+ expanded_length);
// Calculate the lag of the strongest correlation period.
best_correlation_index = CorrelateAndPeakSearch(
expanded_max, input_max, old_length,
- static_cast<int>(input_length_per_channel), expand_period);
+ input_length_per_channel, expand_period);
}
static const int kTempDataSize = 3600;
@@ -99,11 +99,11 @@
// Mute the new decoded data if needed (and unmute it linearly).
// This is the overlapping part of expanded_signal.
- int interpolation_length = std::min(
+ size_t interpolation_length = std::min(
kMaxCorrelationLength * fs_mult_,
expanded_length - best_correlation_index);
interpolation_length = std::min(interpolation_length,
- static_cast<int>(input_length_per_channel));
+ input_length_per_channel);
if (*external_mute_factor < 16384) {
// Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
// and so on.
@@ -153,14 +153,14 @@
// Return new added length. |old_length| samples were borrowed from
// |sync_buffer_|.
- return static_cast<int>(output_length) - old_length;
+ return output_length - old_length;
}
-int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
+size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
// Check how much data that is left since earlier.
- *old_length = static_cast<int>(sync_buffer_->FutureLength());
+ *old_length = sync_buffer_->FutureLength();
// Should never be less than overlap_length.
- assert(*old_length >= static_cast<int>(expand_->overlap_length()));
+ assert(*old_length >= expand_->overlap_length());
// Generate data to merge the overlap with using expand.
expand_->SetParametersForMergeAfterExpand();
@@ -171,7 +171,7 @@
// but shift them towards the end of the buffer. This is ok, since all of
// the buffer will be expand data anyway, so as long as the beginning is
// left untouched, we're fine.
- int16_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
+ size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
*old_length = 210 * kMaxSampleRate / 8000;
// This is the truncated length.
@@ -181,34 +181,34 @@
AudioMultiVector expanded_temp(num_channels_);
expand_->Process(&expanded_temp);
- *expand_period = static_cast<int>(expanded_temp.Size()); // Samples per
- // channel.
+ *expand_period = expanded_temp.Size(); // Samples per channel.
expanded_.Clear();
// Copy what is left since earlier into the expanded vector.
expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
- assert(expanded_.Size() == static_cast<size_t>(*old_length));
+ assert(expanded_.Size() == *old_length);
assert(expanded_temp.Size() > 0);
// Do "ugly" copy and paste from the expanded in order to generate more data
// to correlate (but not interpolate) with.
- const int required_length = (120 + 80 + 2) * fs_mult_;
- if (expanded_.Size() < static_cast<size_t>(required_length)) {
- while (expanded_.Size() < static_cast<size_t>(required_length)) {
+ const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
+ if (expanded_.Size() < required_length) {
+ while (expanded_.Size() < required_length) {
// Append one more pitch period each time.
expanded_.PushBack(expanded_temp);
}
// Trim the length to exactly |required_length|.
expanded_.PopBack(expanded_.Size() - required_length);
}
- assert(expanded_.Size() >= static_cast<size_t>(required_length));
+ assert(expanded_.Size() >= required_length);
return required_length;
}
-int16_t Merge::SignalScaling(const int16_t* input, int input_length,
+int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
const int16_t* expanded_signal,
int16_t* expanded_max, int16_t* input_max) const {
// Adjust muting factor if new vector is more or less of the BGN energy.
- const int mod_input_length = std::min(64 * fs_mult_, input_length);
+ const size_t mod_input_length =
+ std::min(static_cast<size_t>(64 * fs_mult_), input_length);
*expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
*input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
@@ -260,13 +260,13 @@
// TODO(hlundin): There are some parameter values in this method that seem
// strange. Compare with Expand::Correlation.
-void Merge::Downsample(const int16_t* input, int input_length,
- const int16_t* expanded_signal, int expanded_length) {
+void Merge::Downsample(const int16_t* input, size_t input_length,
+ const int16_t* expanded_signal, size_t expanded_length) {
const int16_t* filter_coefficients;
- int num_coefficients;
+ size_t num_coefficients;
int decimation_factor = fs_hz_ / 4000;
- static const int kCompensateDelay = 0;
- int length_limit = fs_hz_ / 100; // 10 ms in samples.
+ static const size_t kCompensateDelay = 0;
+ size_t length_limit = static_cast<size_t>(fs_hz_ / 100); // 10 ms in samples.
if (fs_hz_ == 8000) {
filter_coefficients = DspHelper::kDownsample8kHzTbl;
num_coefficients = 3;
@@ -280,7 +280,7 @@
filter_coefficients = DspHelper::kDownsample48kHzTbl;
num_coefficients = 7;
}
- int signal_offset = num_coefficients - 1;
+ size_t signal_offset = num_coefficients - 1;
WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
expanded_length - signal_offset,
expanded_downsampled_, kExpandDownsampLength,
@@ -288,10 +288,10 @@
decimation_factor, kCompensateDelay);
if (input_length <= length_limit) {
// Not quite long enough, so we have to cheat a bit.
- int16_t temp_len = input_length - signal_offset;
+ size_t temp_len = input_length - signal_offset;
// TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
// errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
- int16_t downsamp_temp_len = temp_len / decimation_factor;
+ size_t downsamp_temp_len = temp_len / decimation_factor;
WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
input_downsampled_, downsamp_temp_len,
filter_coefficients, num_coefficients,
@@ -307,12 +307,12 @@
}
}
-int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
- int start_position, int input_length,
- int expand_period) const {
+size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
+ size_t start_position, size_t input_length,
+ size_t expand_period) const {
// Calculate correlation without any normalization.
- const int max_corr_length = kMaxCorrelationLength;
- int stop_position_downsamp =
+ const size_t max_corr_length = kMaxCorrelationLength;
+ size_t stop_position_downsamp =
std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
int correlation_shift = 0;
if (expanded_max * input_max > 26843546) {
@@ -325,8 +325,8 @@
stop_position_downsamp, correlation_shift, 1);
// Normalize correlation to 14 bits and copy to a 16-bit array.
- const int pad_length = static_cast<int>(expand_->overlap_length() - 1);
- const int correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
+ const size_t pad_length = expand_->overlap_length() - 1;
+ const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
rtc::scoped_ptr<int16_t[]> correlation16(
new int16_t[correlation_buffer_size]);
memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
@@ -342,21 +342,20 @@
// (1) w16_bestIndex + input_length <
// timestamps_per_call_ + expand_->overlap_length();
// (2) w16_bestIndex + input_length < start_position.
- int start_index = timestamps_per_call_ +
- static_cast<int>(expand_->overlap_length());
+ size_t start_index = timestamps_per_call_ + expand_->overlap_length();
start_index = std::max(start_position, start_index);
start_index = (input_length > start_index) ? 0 : (start_index - input_length);
// Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
- int start_index_downsamp = start_index / (fs_mult_ * 2);
+ size_t start_index_downsamp = start_index / (fs_mult_ * 2);
// Calculate a modified |stop_position_downsamp| to account for the increased
// start index |start_index_downsamp| and the effective array length.
- int modified_stop_pos =
+ size_t modified_stop_pos =
std::min(stop_position_downsamp,
kMaxCorrelationLength + pad_length - start_index_downsamp);
- int best_correlation_index;
+ size_t best_correlation_index;
int16_t best_correlation;
- static const int kNumCorrelationCandidates = 1;
+ static const size_t kNumCorrelationCandidates = 1;
DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
modified_stop_pos, kNumCorrelationCandidates,
fs_mult_, &best_correlation_index,
@@ -368,16 +367,16 @@
// least 10ms + overlap . (This should never happen thanks to the above
// modification of peak-finding starting point.)
while (((best_correlation_index + input_length) <
- static_cast<int>(timestamps_per_call_ + expand_->overlap_length())) ||
- ((best_correlation_index + input_length) < start_position)) {
+ (timestamps_per_call_ + expand_->overlap_length())) ||
+ ((best_correlation_index + input_length) < start_position)) {
assert(false); // Should never happen.
best_correlation_index += expand_period; // Jump one lag ahead.
}
return best_correlation_index;
}
-int Merge::RequiredFutureSamples() {
- return static_cast<int>(fs_hz_ / 100 * num_channels_); // 10 ms.
+size_t Merge::RequiredFutureSamples() {
+ return fs_hz_ / 100 * num_channels_; // 10 ms.
}
diff --git a/webrtc/modules/audio_coding/neteq/merge.h b/webrtc/modules/audio_coding/neteq/merge.h
index 1b60aec..727e9a6 100644
--- a/webrtc/modules/audio_coding/neteq/merge.h
+++ b/webrtc/modules/audio_coding/neteq/merge.h
@@ -46,11 +46,11 @@
// de-interleaving |input|. The values in |external_mute_factor_array| (Q14)
// will be used to scale the audio, and is updated in the process. The array
// must have |num_channels_| elements.
- virtual int Process(int16_t* input, size_t input_length,
- int16_t* external_mute_factor_array,
- AudioMultiVector* output);
+ virtual size_t Process(int16_t* input, size_t input_length,
+ int16_t* external_mute_factor_array,
+ AudioMultiVector* output);
- virtual int RequiredFutureSamples();
+ virtual size_t RequiredFutureSamples();
protected:
const int fs_hz_;
@@ -58,38 +58,38 @@
private:
static const int kMaxSampleRate = 48000;
- static const int kExpandDownsampLength = 100;
- static const int kInputDownsampLength = 40;
- static const int kMaxCorrelationLength = 60;
+ static const size_t kExpandDownsampLength = 100;
+ static const size_t kInputDownsampLength = 40;
+ static const size_t kMaxCorrelationLength = 60;
// Calls |expand_| to get more expansion data to merge with. The data is
// written to |expanded_signal_|. Returns the length of the expanded data,
// while |expand_period| will be the number of samples in one expansion period
// (typically one pitch period). The value of |old_length| will be the number
// of samples that were taken from the |sync_buffer_|.
- int GetExpandedSignal(int* old_length, int* expand_period);
+ size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
// Analyzes |input| and |expanded_signal| to find maximum values. Returns
// a muting factor (Q14) to be used on the new data.
- int16_t SignalScaling(const int16_t* input, int input_length,
+ int16_t SignalScaling(const int16_t* input, size_t input_length,
const int16_t* expanded_signal,
int16_t* expanded_max, int16_t* input_max) const;
// Downsamples |input| (|input_length| samples) and |expanded_signal| to
// 4 kHz sample rate. The downsampled signals are written to
// |input_downsampled_| and |expanded_downsampled_|, respectively.
- void Downsample(const int16_t* input, int input_length,
- const int16_t* expanded_signal, int expanded_length);
+ void Downsample(const int16_t* input, size_t input_length,
+ const int16_t* expanded_signal, size_t expanded_length);
// Calculates cross-correlation between |input_downsampled_| and
// |expanded_downsampled_|, and finds the correlation maximum. The maximizing
// lag is returned.
- int16_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
- int start_position, int input_length,
- int expand_period) const;
+ size_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
+ size_t start_position, size_t input_length,
+ size_t expand_period) const;
const int fs_mult_; // fs_hz_ / 8000.
- const int timestamps_per_call_;
+ const size_t timestamps_per_call_;
Expand* expand_;
SyncBuffer* sync_buffer_;
int16_t expanded_downsampled_[kExpandDownsampLength];
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h b/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
index 93261ab..d26e2a1 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
@@ -26,7 +26,7 @@
Decode,
int(const uint8_t*, size_t, int, size_t, int16_t*, SpeechType*));
MOCK_CONST_METHOD0(HasDecodePlc, bool());
- MOCK_METHOD2(DecodePlc, int(int, int16_t*));
+ MOCK_METHOD2(DecodePlc, size_t(size_t, int16_t*));
MOCK_METHOD0(Init, int());
MOCK_METHOD5(IncomingPacket, int(const uint8_t*, size_t, uint16_t, uint32_t,
uint32_t));
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h b/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
index ebc6acd..82dee2a 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
@@ -25,8 +25,8 @@
MOCK_METHOD0(Reset,
void());
MOCK_METHOD3(Update,
- void(int buffer_size_packets, int time_stretched_samples,
- int packet_len_samples));
+ void(size_t buffer_size_packets, int time_stretched_samples,
+ size_t packet_len_samples));
MOCK_METHOD1(SetTargetBufferLevel,
void(int target_buffer_level));
MOCK_CONST_METHOD0(filtered_current_level,
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h b/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 1d2dc8e..6fb8585 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -19,7 +19,8 @@
class MockDelayManager : public DelayManager {
public:
- MockDelayManager(int max_packets_in_buffer, DelayPeakDetector* peak_detector)
+ MockDelayManager(size_t max_packets_in_buffer,
+ DelayPeakDetector* peak_detector)
: DelayManager(max_packets_in_buffer, peak_detector) {}
virtual ~MockDelayManager() { Die(); }
MOCK_METHOD0(Die, void());
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h b/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
index 881e900..a1c370e 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
@@ -26,7 +26,7 @@
MOCK_METHOD0(Reset,
void());
MOCK_METHOD2(Generate,
- int(int num_samples, AudioMultiVector* output));
+ int(size_t num_samples, AudioMultiVector* output));
MOCK_CONST_METHOD0(initialized,
bool());
};
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h b/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
index d8c8856..f239b4a 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
@@ -36,10 +36,9 @@
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) override {
- int16_t ret = WebRtcPcm16b_Decode(
- encoded, static_cast<int16_t>(encoded_len), decoded);
+ size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
*speech_type = ConvertSpeechType(1);
- return ret;
+ return static_cast<int>(ret);
}
size_t Channels() const override { return 1; }
@@ -79,7 +78,7 @@
MOCK_CONST_METHOD0(HasDecodePlc,
bool());
MOCK_METHOD2(DecodePlc,
- int(int num_frames, int16_t* decoded));
+ size_t(size_t num_frames, int16_t* decoded));
MOCK_METHOD0(Init,
int());
MOCK_METHOD5(IncomingPacket,
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h b/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
index 0eb7edc..97e54d8 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
@@ -41,7 +41,7 @@
MOCK_CONST_METHOD0(NextRtpHeader,
const RTPHeader*());
MOCK_METHOD1(GetNextPacket,
- Packet*(int* discard_count));
+ Packet*(size_t* discard_count));
MOCK_METHOD0(DiscardNextPacket,
int());
MOCK_METHOD2(DiscardOldPackets,
@@ -49,7 +49,7 @@
MOCK_METHOD1(DiscardAllOldPackets,
int(uint32_t timestamp_limit));
MOCK_CONST_METHOD0(NumPacketsInBuffer,
- int());
+ size_t());
MOCK_METHOD1(IncrementWaitingTimes,
void(int));
MOCK_CONST_METHOD0(current_memory_bytes,
diff --git a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 6f57a4a..3c945f9 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -169,7 +169,7 @@
class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
public ::testing::Test {
protected:
- static const int kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
NetEqExternalVsInternalDecoderTest()
: NetEqExternalDecoderUnitTest(kDecoderPCM16Bswb32kHz,
@@ -188,7 +188,7 @@
void GetAndVerifyOutput() override {
NetEqOutputType output_type;
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
// Get audio from internal decoder instance.
EXPECT_EQ(NetEq::kOK,
@@ -198,12 +198,13 @@
&num_channels,
&output_type));
EXPECT_EQ(1, num_channels);
- EXPECT_EQ(kOutputLengthMs * sample_rate_hz_ / 1000, samples_per_channel);
+ EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+ samples_per_channel);
// Get audio from external decoder instance.
samples_per_channel = GetOutputAudio(kMaxBlockSize, output_, &output_type);
- for (int i = 0; i < samples_per_channel; ++i) {
+ for (size_t i = 0; i < samples_per_channel; ++i) {
ASSERT_EQ(output_[i], output_internal_[i]) <<
"Diff in sample " << i << ".";
}
@@ -240,7 +241,7 @@
class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest,
public ::testing::Test {
protected:
- static const int kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
enum TestStates {
kInitialPhase,
@@ -293,7 +294,7 @@
}
void GetAndVerifyOutput() override {
- int num_samples;
+ size_t num_samples;
NetEqOutputType output_type;
num_samples = GetOutputAudio(kMaxBlockSize, output_, &output_type);
UpdateState(output_type);
@@ -303,7 +304,7 @@
return;
}
- for (int i = 0; i < num_samples; ++i) {
+ for (size_t i = 0; i < num_samples; ++i) {
if (output_[i] != 0)
return;
}
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index 636ae87..d890acb 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -16,6 +16,7 @@
#include <algorithm>
#include "webrtc/base/logging.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/accelerate.h"
@@ -104,7 +105,7 @@
}
fs_hz_ = fs;
fs_mult_ = fs / 8000;
- output_size_samples_ = kOutputSizeMs * 8 * fs_mult_;
+ output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
decoder_frame_length_ = 3 * output_size_samples_;
WebRtcSpl_Init();
if (create_components) {
@@ -154,7 +155,7 @@
}
int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
- int* samples_per_channel, int* num_channels,
+ size_t* samples_per_channel, int* num_channels,
NetEqOutputType* type) {
CriticalSectionScoped lock(crit_sect_.get());
LOG(LS_VERBOSE) << "GetAudio";
@@ -305,10 +306,10 @@
int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
CriticalSectionScoped lock(crit_sect_.get());
assert(decoder_database_.get());
- const int total_samples_in_buffers =
+ const size_t total_samples_in_buffers =
packet_buffer_->NumSamplesInBuffer(decoder_database_.get(),
decoder_frame_length_) +
- static_cast<int>(sync_buffer_->FutureLength());
+ sync_buffer_->FutureLength();
assert(delay_manager_.get());
assert(decision_logic_.get());
stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers,
@@ -603,7 +604,7 @@
}
// Insert packets in buffer.
- int temp_bufsize = packet_buffer_->NumPacketsInBuffer();
+ size_t temp_bufsize = packet_buffer_->NumPacketsInBuffer();
ret = packet_buffer_->InsertPacketList(
&packet_list,
*decoder_database_,
@@ -665,7 +666,8 @@
if ((temp_bufsize > 0) &&
(temp_bufsize != decision_logic_->packet_length_samples())) {
decision_logic_->set_packet_length_samples(temp_bufsize);
- delay_manager_->SetPacketAudioLength((1000 * temp_bufsize) / fs_hz_);
+ delay_manager_->SetPacketAudioLength(
+ static_cast<int>((1000 * temp_bufsize) / fs_hz_));
}
// Update statistics.
@@ -688,7 +690,7 @@
int NetEqImpl::GetAudioInternal(size_t max_length,
int16_t* output,
- int* samples_per_channel,
+ size_t* samples_per_channel,
int* num_channels) {
PacketList packet_list;
DtmfEvent dtmf_event;
@@ -712,7 +714,7 @@
assert(vad_.get());
bool sid_frame_available =
(operation == kRfc3389Cng && !packet_list.empty());
- vad_->Update(decoded_buffer_.get(), length, speech_type,
+ vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
sid_frame_available, fs_hz_);
algorithm_buffer_->Clear();
@@ -811,12 +813,11 @@
LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " <<
output_size_samples_ << " * " << sync_buffer_->Channels();
num_output_samples = max_length;
- num_output_samples_per_channel = static_cast<int>(
- max_length / sync_buffer_->Channels());
+ num_output_samples_per_channel = max_length / sync_buffer_->Channels();
}
- const int samples_from_sync =
- static_cast<int>(sync_buffer_->GetNextAudioInterleaved(
- num_output_samples_per_channel, output));
+ const size_t samples_from_sync =
+ sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
+ output);
*num_channels = static_cast<int>(sync_buffer_->Channels());
LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" <<
" insert " << algorithm_buffer_->Size() << " samples, extract " <<
@@ -922,7 +923,8 @@
last_mode_ == kModePreemptiveExpandSuccess ||
last_mode_ == kModePreemptiveExpandLowEnergy) {
// Subtract (samples_left + output_size_samples_) from sampleMemory.
- decision_logic_->AddSampleMemory(-(samples_left + output_size_samples_));
+ decision_logic_->AddSampleMemory(
+ -(samples_left + rtc::checked_cast<int>(output_size_samples_)));
}
// Check if it is time to play a DTMF event.
@@ -947,8 +949,10 @@
// Check if we already have enough samples in the |sync_buffer_|. If so,
// change decision to normal, unless the decision was merge, accelerate, or
// preemptive expand.
- if (samples_left >= output_size_samples_ && *operation != kMerge &&
- *operation != kAccelerate && *operation != kFastAccelerate &&
+ if (samples_left >= rtc::checked_cast<int>(output_size_samples_) &&
+ *operation != kMerge &&
+ *operation != kAccelerate &&
+ *operation != kFastAccelerate &&
*operation != kPreemptiveExpand) {
*operation = kNormal;
return 0;
@@ -996,10 +1000,10 @@
stats_.ResetMcu();
}
- int required_samples = output_size_samples_;
- const int samples_10_ms = 80 * fs_mult_;
- const int samples_20_ms = 2 * samples_10_ms;
- const int samples_30_ms = 3 * samples_10_ms;
+ size_t required_samples = output_size_samples_;
+ const size_t samples_10_ms = static_cast<size_t>(80 * fs_mult_);
+ const size_t samples_20_ms = 2 * samples_10_ms;
+ const size_t samples_30_ms = 3 * samples_10_ms;
switch (*operation) {
case kExpand: {
@@ -1028,17 +1032,17 @@
case kAccelerate:
case kFastAccelerate: {
// In order to do an accelerate we need at least 30 ms of audio data.
- if (samples_left >= samples_30_ms) {
+ if (samples_left >= static_cast<int>(samples_30_ms)) {
// Already have enough data, so we do not need to extract any more.
decision_logic_->set_sample_memory(samples_left);
decision_logic_->set_prev_time_scale(true);
return 0;
- } else if (samples_left >= samples_10_ms &&
+ } else if (samples_left >= static_cast<int>(samples_10_ms) &&
decoder_frame_length_ >= samples_30_ms) {
// Avoid decoding more data as it might overflow the playout buffer.
*operation = kNormal;
return 0;
- } else if (samples_left < samples_20_ms &&
+ } else if (samples_left < static_cast<int>(samples_20_ms) &&
decoder_frame_length_ < samples_30_ms) {
// Build up decoded data by decoding at least 20 ms of audio data. Do
// not perform accelerate yet, but wait until we only need to do one
@@ -1056,8 +1060,8 @@
case kPreemptiveExpand: {
// In order to do a preemptive expand we need at least 30 ms of decoded
// audio data.
- if ((samples_left >= samples_30_ms) ||
- (samples_left >= samples_10_ms &&
+ if ((samples_left >= static_cast<int>(samples_30_ms)) ||
+ (samples_left >= static_cast<int>(samples_10_ms) &&
decoder_frame_length_ >= samples_30_ms)) {
// Already have enough data, so we do not need to extract any more.
// Or, avoid decoding more data as it might overflow the playout buffer.
@@ -1066,7 +1070,7 @@
decision_logic_->set_prev_time_scale(true);
return 0;
}
- if (samples_left < samples_20_ms &&
+ if (samples_left < static_cast<int>(samples_20_ms) &&
decoder_frame_length_ < samples_30_ms) {
// Build up decoded data by decoding at least 20 ms of audio data.
// Still try to perform preemptive expand.
@@ -1123,7 +1127,7 @@
if (*operation == kAccelerate || *operation == kFastAccelerate) {
// Check that we have enough data (30ms) to do accelerate.
- if (extracted_samples + samples_left < samples_30_ms) {
+ if (extracted_samples + samples_left < static_cast<int>(samples_30_ms)) {
// TODO(hlundin): Write test for this.
// Not enough, do normal operation instead.
*operation = kNormal;
@@ -1274,7 +1278,7 @@
memset(&decoded_buffer_[*decoded_length], 0,
decoder_frame_length_ * decoder->Channels() *
sizeof(decoded_buffer_[0]));
- decode_length = decoder_frame_length_;
+ decode_length = rtc::checked_cast<int>(decoder_frame_length_);
} else if (!packet->primary) {
// This is a redundant payload; call the special decoder method.
LOG(LS_VERBOSE) << "Decoding packet (redundant):" <<
@@ -1307,7 +1311,7 @@
*decoded_length += decode_length;
// Update |decoder_frame_length_| with number of samples per channel.
decoder_frame_length_ =
- decode_length / static_cast<int>(decoder->Channels());
+ static_cast<size_t>(decode_length) / decoder->Channels();
LOG(LS_VERBOSE) << "Decoded " << decode_length << " samples ("
<< decoder->Channels() << " channel(s) -> "
<< decoder_frame_length_ << " samples per channel)";
@@ -1366,11 +1370,11 @@
AudioDecoder::SpeechType speech_type, bool play_dtmf) {
assert(mute_factor_array_.get());
assert(merge_.get());
- int new_length = merge_->Process(decoded_buffer, decoded_length,
- mute_factor_array_.get(),
- algorithm_buffer_.get());
- int expand_length_correction = new_length -
- static_cast<int>(decoded_length / algorithm_buffer_->Channels());
+ size_t new_length = merge_->Process(decoded_buffer, decoded_length,
+ mute_factor_array_.get(),
+ algorithm_buffer_.get());
+ size_t expand_length_correction = new_length -
+ decoded_length / algorithm_buffer_->Channels();
// Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) {
@@ -1394,10 +1398,10 @@
int NetEqImpl::DoExpand(bool play_dtmf) {
while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
- static_cast<size_t>(output_size_samples_)) {
+ output_size_samples_) {
algorithm_buffer_->Clear();
int return_value = expand_->Process(algorithm_buffer_.get());
- int length = static_cast<int>(algorithm_buffer_->Size());
+ size_t length = algorithm_buffer_->Size();
// Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) {
@@ -1428,7 +1432,8 @@
AudioDecoder::SpeechType speech_type,
bool play_dtmf,
bool fast_accelerate) {
- const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
+ const size_t required_samples =
+ static_cast<size_t>(240 * fs_mult_); // Must have 30 ms.
size_t borrowed_samples_per_channel = 0;
size_t num_channels = algorithm_buffer_->Channels();
size_t decoded_length_per_channel = decoded_length / num_channels;
@@ -1444,7 +1449,7 @@
decoded_length = required_samples * num_channels;
}
- int16_t samples_removed;
+ size_t samples_removed;
Accelerate::ReturnCodes return_code =
accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate,
algorithm_buffer_.get(), &samples_removed);
@@ -1501,20 +1506,20 @@
size_t decoded_length,
AudioDecoder::SpeechType speech_type,
bool play_dtmf) {
- const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
+ const size_t required_samples =
+ static_cast<size_t>(240 * fs_mult_); // Must have 30 ms.
size_t num_channels = algorithm_buffer_->Channels();
- int borrowed_samples_per_channel = 0;
- int old_borrowed_samples_per_channel = 0;
+ size_t borrowed_samples_per_channel = 0;
+ size_t old_borrowed_samples_per_channel = 0;
size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms.
- borrowed_samples_per_channel = static_cast<int>(required_samples -
- decoded_length_per_channel);
+ borrowed_samples_per_channel =
+ required_samples - decoded_length_per_channel;
// Calculate how many of these were already played out.
- const int future_length = static_cast<int>(sync_buffer_->FutureLength());
old_borrowed_samples_per_channel =
- (borrowed_samples_per_channel > future_length) ?
- (borrowed_samples_per_channel - future_length) : 0;
+ (borrowed_samples_per_channel > sync_buffer_->FutureLength()) ?
+ (borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0;
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
decoded_buffer,
sizeof(int16_t) * decoded_length);
@@ -1523,9 +1528,9 @@
decoded_length = required_samples * num_channels;
}
- int16_t samples_added;
+ size_t samples_added;
PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
- decoded_buffer, static_cast<int>(decoded_length),
+ decoded_buffer, decoded_length,
old_borrowed_samples_per_channel,
algorithm_buffer_.get(), &samples_added);
stats_.PreemptiveExpandedSamples(samples_added);
@@ -1719,17 +1724,14 @@
void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
- int length;
+ size_t length;
if (decoder && decoder->HasDecodePlc()) {
// Use the decoder's packet-loss concealment.
// TODO(hlundin): Will probably need a longer buffer for multi-channel.
int16_t decoded_buffer[kMaxFrameSize];
length = decoder->DecodePlc(1, decoded_buffer);
- if (length > 0) {
+ if (length > 0)
algorithm_buffer_->PushBackInterleaved(decoded_buffer, length);
- } else {
- length = 0;
- }
} else {
// Do simple zero-stuffing.
length = output_size_samples_;
@@ -1746,14 +1748,14 @@
int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
int16_t* output) const {
size_t out_index = 0;
- int overdub_length = output_size_samples_; // Default value.
+ size_t overdub_length = output_size_samples_; // Default value.
if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
// Special operation for transition from "DTMF only" to "DTMF overdub".
out_index = std::min(
sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
- static_cast<size_t>(output_size_samples_));
- overdub_length = output_size_samples_ - static_cast<int>(out_index);
+ output_size_samples_);
+ overdub_length = output_size_samples_ - out_index;
}
AudioMultiVector dtmf_output(num_channels);
@@ -1765,13 +1767,14 @@
if (dtmf_return_value == 0) {
dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length,
&dtmf_output);
- assert((size_t) overdub_length == dtmf_output.Size());
+ assert(overdub_length == dtmf_output.Size());
}
dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
return dtmf_return_value < 0 ? dtmf_return_value : 0;
}
-int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
+int NetEqImpl::ExtractPackets(size_t required_samples,
+ PacketList* packet_list) {
bool first_packet = true;
uint8_t prev_payload_type = 0;
uint32_t prev_timestamp = 0;
@@ -1790,7 +1793,7 @@
// Packet extraction loop.
do {
timestamp_ = header->timestamp;
- int discard_count = 0;
+ size_t discard_count = 0;
Packet* packet = packet_buffer_->GetNextPacket(&discard_count);
// |header| may be invalid after the |packet_buffer_| operation.
header = NULL;
@@ -1819,7 +1822,7 @@
packet->header.payloadType);
if (decoder) {
if (packet->sync_packet) {
- packet_duration = decoder_frame_length_;
+ packet_duration = rtc::checked_cast<int>(decoder_frame_length_);
} else {
if (packet->primary) {
packet_duration = decoder->PacketDuration(packet->payload,
@@ -1838,7 +1841,7 @@
if (packet_duration <= 0) {
// Decoder did not return a packet duration. Assume that the packet
// contains the same number of samples as the previous one.
- packet_duration = decoder_frame_length_;
+ packet_duration = rtc::checked_cast<int>(decoder_frame_length_);
}
extracted_samples = packet->header.timestamp - first_timestamp +
packet_duration;
@@ -1848,7 +1851,7 @@
next_packet_available = false;
if (header && prev_payload_type == header->payloadType) {
int16_t seq_no_diff = header->sequenceNumber - prev_sequence_number;
- int32_t ts_diff = header->timestamp - prev_timestamp;
+ size_t ts_diff = header->timestamp - prev_timestamp;
if (seq_no_diff == 1 ||
(seq_no_diff == 0 && ts_diff == decoder_frame_length_)) {
// The next sequence number is available, or the next part of a packet
@@ -1857,7 +1860,8 @@
}
prev_sequence_number = header->sequenceNumber;
}
- } while (extracted_samples < required_samples && next_packet_available);
+ } while (extracted_samples < rtc::checked_cast<int>(required_samples) &&
+ next_packet_available);
if (extracted_samples > 0) {
// Delete old packets only when we are going to decode something. Otherwise,
@@ -1886,7 +1890,7 @@
fs_hz_ = fs_hz;
fs_mult_ = fs_hz / 8000;
- output_size_samples_ = kOutputSizeMs * 8 * fs_mult_;
+ output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
decoder_frame_length_ = 3 * output_size_samples_; // Initialize to 30ms.
last_mode_ = kModeNormal;
@@ -1931,9 +1935,7 @@
accelerate_.reset(
accelerate_factory_->Create(fs_hz, channels, *background_noise_));
preemptive_expand_.reset(preemptive_expand_factory_->Create(
- fs_hz, channels,
- *background_noise_,
- static_cast<int>(expand_->overlap_length())));
+ fs_hz, channels, *background_noise_, expand_->overlap_length()));
// Delete ComfortNoise object and create a new one.
comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(),
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.h b/webrtc/modules/audio_coding/neteq/neteq_impl.h
index 55ba067..502204a 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -106,7 +106,7 @@
// Returns kOK on success, or kFail in case of an error.
int GetAudio(size_t max_length,
int16_t* output_audio,
- int* samples_per_channel,
+ size_t* samples_per_channel,
int* num_channels,
NetEqOutputType* type) override;
@@ -203,9 +203,9 @@
protected:
static const int kOutputSizeMs = 10;
- static const int kMaxFrameSize = 2880; // 60 ms @ 48 kHz.
+ static const size_t kMaxFrameSize = 2880; // 60 ms @ 48 kHz.
// TODO(hlundin): Provide a better value for kSyncBufferSize.
- static const int kSyncBufferSize = 2 * kMaxFrameSize;
+ static const size_t kSyncBufferSize = 2 * kMaxFrameSize;
// Inserts a new packet into NetEq. This is used by the InsertPacket method
// above. Returns 0 on success, otherwise an error code.
@@ -225,7 +225,7 @@
// Returns 0 on success, otherwise an error code.
int GetAudioInternal(size_t max_length,
int16_t* output,
- int* samples_per_channel,
+ size_t* samples_per_channel,
int* num_channels) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Provides a decision to the GetAudioInternal method. The decision what to
@@ -318,7 +318,7 @@
// |required_samples| samples. The packets are inserted into |packet_list|.
// Returns the number of samples that the packets in the list will produce, or
// -1 in case of an error.
- int ExtractPackets(int required_samples, PacketList* packet_list)
+ int ExtractPackets(size_t required_samples, PacketList* packet_list)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Resets various variables and objects to new values based on the sample rate
@@ -375,8 +375,8 @@
StatisticsCalculator stats_ GUARDED_BY(crit_sect_);
int fs_hz_ GUARDED_BY(crit_sect_);
int fs_mult_ GUARDED_BY(crit_sect_);
- int output_size_samples_ GUARDED_BY(crit_sect_);
- int decoder_frame_length_ GUARDED_BY(crit_sect_);
+ size_t output_size_samples_ GUARDED_BY(crit_sect_);
+ size_t decoder_frame_length_ GUARDED_BY(crit_sect_);
Modes last_mode_ GUARDED_BY(crit_sect_);
rtc::scoped_ptr<int16_t[]> mute_factor_array_ GUARDED_BY(crit_sect_);
size_t decoded_buffer_length_ GUARDED_BY(crit_sect_);
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 05a8de2..006a5ad 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -384,7 +384,7 @@
neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
// Insert packets. The buffer should not flush.
- for (int i = 1; i <= config_.max_packets_in_buffer; ++i) {
+ for (size_t i = 1; i <= config_.max_packets_in_buffer; ++i) {
EXPECT_EQ(NetEq::kOK,
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
@@ -398,7 +398,7 @@
EXPECT_EQ(NetEq::kOK,
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- EXPECT_EQ(1, packet_buffer_->NumPacketsInBuffer());
+ EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
const RTPHeader* test_header = packet_buffer_->NextRtpHeader();
EXPECT_EQ(rtp_header.header.timestamp, test_header->timestamp);
EXPECT_EQ(rtp_header.header.sequenceNumber, test_header->sequenceNumber);
@@ -413,7 +413,8 @@
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = kPayloadLengthSamples;
uint8_t payload[kPayloadLengthBytes] = {0};
WebRtcRTPHeader rtp_header;
@@ -466,9 +467,9 @@
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
// Pull audio once.
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
EXPECT_EQ(
@@ -480,7 +481,8 @@
EXPECT_EQ(kOutputNormal, type);
// Start with a simple check that the fake decoder is behaving as expected.
- EXPECT_EQ(kPayloadLengthSamples, decoder_.next_value() - 1);
+ EXPECT_EQ(kPayloadLengthSamples,
+ static_cast<size_t>(decoder_.next_value() - 1));
// The value of the last of the output samples is the same as the number of
// samples played from the decoded packet. Thus, this number + the RTP
@@ -500,7 +502,7 @@
// Check that the number of samples still to play from the sync buffer add
// up with what was already played out.
EXPECT_EQ(kPayloadLengthSamples - output[samples_per_channel - 1],
- static_cast<int>(sync_buffer->FutureLength()));
+ sync_buffer->FutureLength());
}
TEST_F(NetEqImplTest, ReorderedPacket) {
@@ -510,7 +512,8 @@
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = kPayloadLengthSamples;
uint8_t payload[kPayloadLengthBytes] = {0};
WebRtcRTPHeader rtp_header;
@@ -544,9 +547,9 @@
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
// Pull audio once.
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
EXPECT_EQ(
@@ -606,7 +609,8 @@
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = kPayloadLengthSamples;
uint8_t payload[kPayloadLengthBytes] = {0};
WebRtcRTPHeader rtp_header;
@@ -623,9 +627,9 @@
EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
// Pull audio once.
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kOK,
@@ -641,7 +645,7 @@
neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
// Insert 10 packets.
- for (int i = 0; i < 10; ++i) {
+ for (size_t i = 0; i < 10; ++i) {
rtp_header.header.sequenceNumber++;
rtp_header.header.timestamp += kPayloadLengthSamples;
EXPECT_EQ(NetEq::kOK,
@@ -651,7 +655,7 @@
}
// Pull audio repeatedly and make sure we get normal output, that is not PLC.
- for (int i = 0; i < 3; ++i) {
+ for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
@@ -672,8 +676,9 @@
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateKhz = 48;
- const int kPayloadLengthSamples = 20 * kSampleRateKhz; // 20 ms.
- const int kPayloadLengthBytes = 10;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(20 * kSampleRateKhz); // 20 ms.
+ const size_t kPayloadLengthBytes = 10;
uint8_t payload[kPayloadLengthBytes] = {0};
int16_t dummy_output[kPayloadLengthSamples] = {0};
@@ -736,9 +741,9 @@
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- const int kMaxOutputSize = 10 * kSampleRateKhz;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateKhz);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
uint32_t timestamp;
uint32_t last_timestamp;
@@ -762,7 +767,7 @@
&num_channels, &type));
EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&last_timestamp));
- for (int i = 1; i < 6; ++i) {
+ for (size_t i = 1; i < 6; ++i) {
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
EXPECT_EQ(1, num_channels);
EXPECT_EQ(expected_type[i - 1], type);
@@ -783,7 +788,7 @@
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- for (int i = 6; i < 8; ++i) {
+ for (size_t i = 6; i < 8; ++i) {
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
EXPECT_EQ(1, num_channels);
EXPECT_EQ(expected_type[i - 1], type);
@@ -811,7 +816,8 @@
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = 1;
uint8_t payload[kPayloadLengthBytes]= {0};
int16_t dummy_output[kPayloadLengthSamples * kChannels] = {0};
@@ -852,7 +858,8 @@
dummy_output +
kPayloadLengthSamples * kChannels),
SetArgPointee<4>(AudioDecoder::kSpeech),
- Return(kPayloadLengthSamples * kChannels)));
+ Return(static_cast<int>(
+ kPayloadLengthSamples * kChannels))));
EXPECT_CALL(decoder_, PacketDuration(Pointee(kSecondPayloadValue),
kPayloadLengthBytes))
@@ -879,9 +886,10 @@
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000 * kChannels;
+ const size_t kMaxOutputSize =
+ static_cast<size_t>(10 * kSampleRateHz / 1000 * kChannels);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index ea88f24..5564e20 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -43,7 +43,7 @@
class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
protected:
static const int kTimeStepMs = 10;
- static const int kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
static const uint8_t kPayloadTypeMono = 95;
static const uint8_t kPayloadTypeMulti = 96;
@@ -52,7 +52,8 @@
sample_rate_hz_(GetParam().sample_rate),
samples_per_ms_(sample_rate_hz_ / 1000),
frame_size_ms_(GetParam().frame_size),
- frame_size_samples_(frame_size_ms_ * samples_per_ms_),
+ frame_size_samples_(
+ static_cast<size_t>(frame_size_ms_ * samples_per_ms_)),
output_size_samples_(10 * samples_per_ms_),
rtp_generator_mono_(samples_per_ms_),
rtp_generator_(samples_per_ms_),
@@ -212,7 +213,7 @@
}
NetEqOutputType output_type;
// Get audio from mono instance.
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
EXPECT_EQ(NetEq::kOK,
neteq_mono_->GetAudio(kMaxBlockSize, output_,
@@ -242,8 +243,8 @@
const int sample_rate_hz_;
const int samples_per_ms_;
const int frame_size_ms_;
- const int frame_size_samples_;
- const int output_size_samples_;
+ const size_t frame_size_samples_;
+ const size_t output_size_samples_;
NetEq* neteq_mono_;
NetEq* neteq_;
test::RtpGenerator rtp_generator_mono_;
@@ -256,8 +257,8 @@
int16_t* output_multi_channel_;
WebRtcRTPHeader rtp_header_mono_;
WebRtcRTPHeader rtp_header_;
- int payload_size_bytes_;
- int multi_payload_size_bytes_;
+ size_t payload_size_bytes_;
+ size_t multi_payload_size_bytes_;
int last_send_time_;
int last_arrival_time_;
rtc::scoped_ptr<test::InputAudioFile> input_file_;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index 7137a68..03fde53 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -37,16 +37,16 @@
namespace webrtc {
-static bool IsAllZero(const int16_t* buf, int buf_length) {
+static bool IsAllZero(const int16_t* buf, size_t buf_length) {
bool all_zero = true;
- for (int n = 0; n < buf_length && all_zero; ++n)
+ for (size_t n = 0; n < buf_length && all_zero; ++n)
all_zero = buf[n] == 0;
return all_zero;
}
-static bool IsAllNonZero(const int16_t* buf, int buf_length) {
+static bool IsAllNonZero(const int16_t* buf, size_t buf_length) {
bool all_non_zero = true;
- for (int n = 0; n < buf_length && all_non_zero; ++n)
+ for (size_t n = 0; n < buf_length && all_non_zero; ++n)
all_non_zero = buf[n] != 0;
return all_non_zero;
}
@@ -172,7 +172,8 @@
ASSERT_EQ(stats.preemptive_rate, ref_stats.preemptive_rate);
ASSERT_EQ(stats.accelerate_rate, ref_stats.accelerate_rate);
ASSERT_EQ(stats.clockdrift_ppm, ref_stats.clockdrift_ppm);
- ASSERT_EQ(stats.added_zero_samples, ref_stats.added_zero_samples);
+ ASSERT_EQ(stats.added_zero_samples,
+ static_cast<size_t>(ref_stats.added_zero_samples));
ASSERT_EQ(stats.secondary_decoded_rate, 0);
ASSERT_LE(stats.speech_expand_rate, ref_stats.expand_rate);
}
@@ -220,9 +221,9 @@
// NetEQ must be polled for data once every 10 ms. Thus, neither of the
// constants below can be changed.
static const int kTimeStepMs = 10;
- static const int kBlockSize8kHz = kTimeStepMs * 8;
- static const int kBlockSize16kHz = kTimeStepMs * 16;
- static const int kBlockSize32kHz = kTimeStepMs * 32;
+ static const size_t kBlockSize8kHz = kTimeStepMs * 8;
+ static const size_t kBlockSize16kHz = kTimeStepMs * 16;
+ static const size_t kBlockSize32kHz = kTimeStepMs * 32;
static const size_t kMaxBlockSize = kBlockSize32kHz;
static const int kInitSampleRateHz = 8000;
@@ -232,7 +233,7 @@
void SelectDecoders(NetEqDecoder* used_codec);
void LoadDecoders();
void OpenInputFile(const std::string &rtp_file);
- void Process(int* out_len);
+ void Process(size_t* out_len);
void DecodeAndCompare(const std::string& rtp_file,
const std::string& ref_file,
const std::string& stat_ref_file,
@@ -272,9 +273,9 @@
// Allocating the static const so that it can be passed by reference.
const int NetEqDecodingTest::kTimeStepMs;
-const int NetEqDecodingTest::kBlockSize8kHz;
-const int NetEqDecodingTest::kBlockSize16kHz;
-const int NetEqDecodingTest::kBlockSize32kHz;
+const size_t NetEqDecodingTest::kBlockSize8kHz;
+const size_t NetEqDecodingTest::kBlockSize16kHz;
+const size_t NetEqDecodingTest::kBlockSize32kHz;
const size_t NetEqDecodingTest::kMaxBlockSize;
const int NetEqDecodingTest::kInitSampleRateHz;
@@ -334,7 +335,7 @@
rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
}
-void NetEqDecodingTest::Process(int* out_len) {
+void NetEqDecodingTest::Process(size_t* out_len) {
// Check if time to receive.
while (packet_ && sim_clock_ >= packet_->time_ms()) {
if (packet_->payload_length_bytes() > 0) {
@@ -358,7 +359,7 @@
ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
(*out_len == kBlockSize16kHz) ||
(*out_len == kBlockSize32kHz));
- output_sample_rate_ = *out_len / 10 * 1000;
+ output_sample_rate_ = static_cast<int>(*out_len / 10 * 1000);
// Increase time.
sim_clock_ += kTimeStepMs;
@@ -394,7 +395,7 @@
std::ostringstream ss;
ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
- int out_len = 0;
+ size_t out_len = 0;
ASSERT_NO_FATAL_FAILURE(Process(&out_len));
ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
@@ -498,7 +499,7 @@
}
// Pull out all data.
for (size_t i = 0; i < num_frames; ++i) {
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -536,7 +537,7 @@
rtp_info,
reinterpret_cast<uint8_t*>(payload),
kPayloadBytes, 0));
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -566,7 +567,7 @@
}
// Pull out data once.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -597,7 +598,7 @@
}
// Pull out data once.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -622,7 +623,7 @@
const size_t kPayloadBytes = kSamples * 2;
double next_input_time_ms = 0.0;
double t_ms;
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
@@ -854,7 +855,7 @@
out_data_[i] = 1;
}
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
EXPECT_EQ(NetEq::kFail,
neteq_->GetAudio(kMaxBlockSize, out_data_,
&samples_per_channel, &num_channels, &type));
@@ -887,7 +888,7 @@
out_data_[i] = 1;
}
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
&samples_per_channel,
&num_channels, &type));
@@ -908,7 +909,7 @@
bool should_be_faded) = 0;
void CheckBgn(int sampling_rate_hz) {
- int16_t expected_samples_per_channel = 0;
+ size_t expected_samples_per_channel = 0;
uint8_t payload_type = 0xFF; // Invalid.
if (sampling_rate_hz == 8000) {
expected_samples_per_channel = kBlockSize8kHz;
@@ -932,7 +933,7 @@
ASSERT_TRUE(input.Init(
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
10 * sampling_rate_hz, // Max 10 seconds loop length.
- static_cast<size_t>(expected_samples_per_channel)));
+ expected_samples_per_channel));
// Payload of 10 ms of PCM16 32 kHz.
uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
@@ -941,19 +942,18 @@
rtp_info.header.payloadType = payload_type;
int number_channels = 0;
- int samples_per_channel = 0;
+ size_t samples_per_channel = 0;
uint32_t receive_timestamp = 0;
for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
- int16_t enc_len_bytes = WebRtcPcm16b_Encode(
+ size_t enc_len_bytes = WebRtcPcm16b_Encode(
input.GetNextBlock(), expected_samples_per_channel, payload);
ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
number_channels = 0;
samples_per_channel = 0;
ASSERT_EQ(0,
- neteq_->InsertPacket(rtp_info, payload,
- static_cast<size_t>(enc_len_bytes),
+ neteq_->InsertPacket(rtp_info, payload, enc_len_bytes,
receive_timestamp));
ASSERT_EQ(0,
neteq_->GetAudio(kBlockSize32kHz,
@@ -1009,7 +1009,7 @@
if (type == kOutputPLCtoCNG) {
plc_to_cng = true;
double sum_squared = 0;
- for (int k = 0; k < number_channels * samples_per_channel; ++k)
+ for (size_t k = 0; k < number_channels * samples_per_channel; ++k)
sum_squared += output[k] * output[k];
TestCondition(sum_squared, n > kFadingThreshold);
} else {
@@ -1168,7 +1168,7 @@
// actual decoded values.
NetEqOutputType output_type;
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
uint32_t receive_timestamp = 0;
for (int n = 0; n < 100; ++n) {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
@@ -1246,7 +1246,7 @@
// actual decoded values.
NetEqOutputType output_type;
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
uint32_t receive_timestamp = 0;
int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
for (int n = 0; n < algorithmic_frame_delay; ++n) {
@@ -1315,7 +1315,7 @@
double next_input_time_ms = 0.0;
int16_t decoded[kBlockSize16kHz];
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
NetEqOutputType output_type;
uint32_t receive_timestamp = 0;
@@ -1418,7 +1418,7 @@
algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
// Insert three speech packets. Three are needed to get the frame length
// correct.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
uint8_t payload[kPayloadBytes] = {0};
@@ -1515,7 +1515,7 @@
timestamp += kCngPeriodSamples;
// Pull audio once and make sure CNG is played.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
diff --git a/webrtc/modules/audio_coding/neteq/normal.cc b/webrtc/modules/audio_coding/neteq/normal.cc
index bf455c9..ebecbf9 100644
--- a/webrtc/modules/audio_coding/neteq/normal.cc
+++ b/webrtc/modules/audio_coding/neteq/normal.cc
@@ -45,12 +45,12 @@
output->PushBackInterleaved(input, length);
int16_t* signal = &(*output)[0][0];
- const unsigned fs_mult = fs_hz_ / 8000;
+ const int fs_mult = fs_hz_ / 8000;
assert(fs_mult > 0);
// fs_shift = log2(fs_mult), rounded down.
// Note that |fs_shift| is not "exact" for 48 kHz.
// TODO(hlundin): Investigate this further.
- const int fs_shift = 30 - WebRtcSpl_NormW32(static_cast<int32_t>(fs_mult));
+ const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
// Check if last RecOut call resulted in an Expand. If so, we have to take
// care of some cross-fading and unmuting.
@@ -73,11 +73,11 @@
int16_t* signal = &(*output)[channel_ix][0];
size_t length_per_channel = length / output->Channels();
// Find largest absolute value in new data.
- int16_t decoded_max = WebRtcSpl_MaxAbsValueW16(
- signal, static_cast<int>(length_per_channel));
+ int16_t decoded_max =
+ WebRtcSpl_MaxAbsValueW16(signal, length_per_channel);
// Adjust muting factor if needed (to BGN level).
- int energy_length = std::min(static_cast<int>(fs_mult * 64),
- static_cast<int>(length_per_channel));
+ size_t energy_length =
+ std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
int scaling = 6 + fs_shift
- WebRtcSpl_NormW32(decoded_max * decoded_max);
scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
@@ -111,7 +111,7 @@
}
// If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
- int increment = static_cast<int>(64 / fs_mult);
+ int increment = 64 / fs_mult;
for (size_t i = 0; i < length_per_channel; i++) {
// Scale with mute factor.
assert(channel_ix < output->Channels());
@@ -131,7 +131,7 @@
assert(fs_shift < 3); // Will always be 0, 1, or, 2.
increment = 4 >> fs_shift;
int fraction = increment;
- for (size_t i = 0; i < 8 * fs_mult; i++) {
+ for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
// TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8
// now for legacy bit-exactness.
assert(channel_ix < output->Channels());
@@ -144,7 +144,7 @@
}
} else if (last_mode == kModeRfc3389Cng) {
assert(output->Channels() == 1); // Not adapted for multi-channel yet.
- static const int kCngLength = 32;
+ static const size_t kCngLength = 32;
int16_t cng_output[kCngLength];
// Reset mute factor and start up fresh.
external_mute_factor_array[0] = 16384;
@@ -167,7 +167,7 @@
assert(fs_shift < 3); // Will always be 0, 1, or, 2.
int16_t increment = 4 >> fs_shift;
int16_t fraction = increment;
- for (size_t i = 0; i < 8 * fs_mult; i++) {
+ for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
// TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8 now
// for legacy bit-exactness.
signal[i] =
@@ -178,7 +178,7 @@
// Previous was neither of Expand, FadeToBGN or RFC3389_CNG, but we are
// still ramping up from previous muting.
// If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
- int increment = static_cast<int>(64 / fs_mult);
+ int increment = 64 / fs_mult;
size_t length_per_channel = length / output->Channels();
for (size_t i = 0; i < length_per_channel; i++) {
for (size_t channel_ix = 0; channel_ix < output->Channels();
diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer.cc b/webrtc/modules/audio_coding/neteq/packet_buffer.cc
index 431e0f1..c89de12 100644
--- a/webrtc/modules/audio_coding/neteq/packet_buffer.cc
+++ b/webrtc/modules/audio_coding/neteq/packet_buffer.cc
@@ -181,7 +181,7 @@
return const_cast<const RTPHeader*>(&(buffer_.front()->header));
}
-Packet* PacketBuffer::GetNextPacket(int* discard_count) {
+Packet* PacketBuffer::GetNextPacket(size_t* discard_count) {
if (Empty()) {
// Buffer is empty.
return NULL;
@@ -194,7 +194,7 @@
// Discard other packets with the same timestamp. These are duplicates or
// redundant payloads that should not be used.
- int discards = 0;
+ size_t discards = 0;
while (!Empty() &&
buffer_.front()->header.timestamp == packet->header.timestamp) {
@@ -240,15 +240,15 @@
return DiscardOldPackets(timestamp_limit, 0);
}
-int PacketBuffer::NumPacketsInBuffer() const {
- return static_cast<int>(buffer_.size());
+size_t PacketBuffer::NumPacketsInBuffer() const {
+ return buffer_.size();
}
-int PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
- int last_decoded_length) const {
+size_t PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
+ size_t last_decoded_length) const {
PacketList::const_iterator it;
- int num_samples = 0;
- int last_duration = last_decoded_length;
+ size_t num_samples = 0;
+ size_t last_duration = last_decoded_length;
for (it = buffer_.begin(); it != buffer_.end(); ++it) {
Packet* packet = (*it);
AudioDecoder* decoder =
diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer.h b/webrtc/modules/audio_coding/neteq/packet_buffer.h
index d2d429b..737845e 100644
--- a/webrtc/modules/audio_coding/neteq/packet_buffer.h
+++ b/webrtc/modules/audio_coding/neteq/packet_buffer.h
@@ -88,7 +88,7 @@
// Subsequent packets with the same timestamp as the one extracted will be
// discarded and properly deleted. The number of discarded packets will be
// written to the output variable |discard_count|.
- virtual Packet* GetNextPacket(int* discard_count);
+ virtual Packet* GetNextPacket(size_t* discard_count);
// Discards the first packet in the buffer. The packet is deleted.
// Returns PacketBuffer::kBufferEmpty if the buffer is empty,
@@ -109,12 +109,12 @@
// Returns the number of packets in the buffer, including duplicates and
// redundant packets.
- virtual int NumPacketsInBuffer() const;
+ virtual size_t NumPacketsInBuffer() const;
// Returns the number of samples in the buffer, including samples carried in
// duplicate and redundant packets.
- virtual int NumSamplesInBuffer(DecoderDatabase* decoder_database,
- int last_decoded_length) const;
+ virtual size_t NumSamplesInBuffer(DecoderDatabase* decoder_database,
+ size_t last_decoded_length) const;
// Increase the waiting time counter for every packet in the buffer by |inc|.
// The default value for |inc| is 1.
diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc b/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
index 61a8ee1..435b6c8 100644
--- a/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
@@ -97,7 +97,7 @@
EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
EXPECT_EQ(4711u, next_ts);
EXPECT_FALSE(buffer.Empty());
- EXPECT_EQ(1, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
const RTPHeader* hdr = buffer.NextRtpHeader();
EXPECT_EQ(&(packet->header), hdr); // Compare pointer addresses.
@@ -116,12 +116,12 @@
Packet* packet = gen.NextPacket(payload_len);
EXPECT_EQ(PacketBuffer::kOK, buffer.InsertPacket(packet));
}
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
EXPECT_FALSE(buffer.Empty());
buffer.Flush();
// Buffer should delete the payloads itself.
- EXPECT_EQ(0, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(0u, buffer.NumPacketsInBuffer());
EXPECT_TRUE(buffer.Empty());
}
@@ -137,7 +137,7 @@
Packet* packet = gen.NextPacket(payload_len);
EXPECT_EQ(PacketBuffer::kOK, buffer.InsertPacket(packet));
}
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
uint32_t next_ts;
EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
EXPECT_EQ(0u, next_ts); // Expect first inserted packet to be first in line.
@@ -145,7 +145,7 @@
// Insert 11th packet; should flush the buffer and insert it after flushing.
Packet* packet = gen.NextPacket(payload_len);
EXPECT_EQ(PacketBuffer::kFlushed, buffer.InsertPacket(packet));
- EXPECT_EQ(1, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
// Expect last inserted packet to be first in line.
EXPECT_EQ(packet->header.timestamp, next_ts);
@@ -179,7 +179,7 @@
¤t_pt,
¤t_cng_pt));
EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
EXPECT_EQ(0, current_pt); // Current payload type changed to 0.
EXPECT_EQ(0xFF, current_cng_pt); // CNG payload type not changed.
@@ -220,7 +220,7 @@
¤t_pt,
¤t_cng_pt));
EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
- EXPECT_EQ(1, buffer.NumPacketsInBuffer()); // Only the last packet.
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer()); // Only the last packet.
EXPECT_EQ(1, current_pt); // Current payload type changed to 0.
EXPECT_EQ(0xFF, current_cng_pt); // CNG payload type not changed.
@@ -256,7 +256,7 @@
{0x0006, 0x0000001E, 1, false, -1},
};
- const int kExpectPacketsInBuffer = 9;
+ const size_t kExpectPacketsInBuffer = 9;
std::vector<Packet*> expect_order(kExpectPacketsInBuffer);
@@ -277,10 +277,10 @@
EXPECT_EQ(kExpectPacketsInBuffer, buffer.NumPacketsInBuffer());
- int drop_count;
- for (int i = 0; i < kExpectPacketsInBuffer; ++i) {
+ size_t drop_count;
+ for (size_t i = 0; i < kExpectPacketsInBuffer; ++i) {
Packet* packet = buffer.GetNextPacket(&drop_count);
- EXPECT_EQ(0, drop_count);
+ EXPECT_EQ(0u, drop_count);
EXPECT_EQ(packet, expect_order[i]); // Compare pointer addresses.
delete[] packet->payload;
delete packet;
@@ -302,7 +302,7 @@
Packet* packet = gen.NextPacket(payload_len);
buffer.InsertPacket(packet);
}
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
// Discard them one by one and make sure that the right packets are at the
// front of the buffer.
@@ -350,7 +350,7 @@
decoder_database,
¤t_pt,
¤t_cng_pt));
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
// Extract them and make sure that come out in the right order.
uint32_t current_ts = start_ts;
@@ -425,7 +425,7 @@
¤t_pt,
¤t_cng_pt));
EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
- EXPECT_EQ(1, buffer->NumPacketsInBuffer());
+ EXPECT_EQ(1u, buffer->NumPacketsInBuffer());
delete buffer;
EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
}
diff --git a/webrtc/modules/audio_coding/neteq/post_decode_vad.cc b/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
index 0749673..714073a 100644
--- a/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
+++ b/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
@@ -45,7 +45,7 @@
}
}
-void PostDecodeVad::Update(int16_t* signal, int length,
+void PostDecodeVad::Update(int16_t* signal, size_t length,
AudioDecoder::SpeechType speech_type,
bool sid_frame,
int fs_hz) {
@@ -68,12 +68,13 @@
}
if (length > 0 && running_) {
- int vad_sample_index = 0;
+ size_t vad_sample_index = 0;
active_speech_ = false;
// Loop through frame sizes 30, 20, and 10 ms.
for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
vad_frame_size_ms -= 10) {
- int vad_frame_size_samples = vad_frame_size_ms * fs_hz / 1000;
+ size_t vad_frame_size_samples =
+ static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
while (length - vad_sample_index >= vad_frame_size_samples) {
int vad_return = WebRtcVad_Process(
vad_instance_, fs_hz, &signal[vad_sample_index],
diff --git a/webrtc/modules/audio_coding/neteq/post_decode_vad.h b/webrtc/modules/audio_coding/neteq/post_decode_vad.h
index fa276aa..2886cf9 100644
--- a/webrtc/modules/audio_coding/neteq/post_decode_vad.h
+++ b/webrtc/modules/audio_coding/neteq/post_decode_vad.h
@@ -46,7 +46,7 @@
// Updates post-decode VAD with the audio data in |signal| having |length|
// samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
- void Update(int16_t* signal, int length,
+ void Update(int16_t* signal, size_t length,
AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz);
// Accessors.
diff --git a/webrtc/modules/audio_coding/neteq/preemptive_expand.cc b/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
index 6a3f8ec..f51a5bd 100644
--- a/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
+++ b/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
@@ -18,14 +18,14 @@
PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
const int16_t* input,
- int input_length,
- int old_data_length,
+ size_t input_length,
+ size_t old_data_length,
AudioMultiVector* output,
- int16_t* length_change_samples) {
+ size_t* length_change_samples) {
old_data_length_per_channel_ = old_data_length;
// Input length must be (almost) 30 ms.
// Also, the new part must be at least |overlap_samples_| elements.
- static const int k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
+ static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
if (num_channels_ == 0 ||
input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ ||
old_data_length >= input_length / num_channels_ - overlap_samples_) {
@@ -41,7 +41,7 @@
void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation,
- int* peak_index) const {
+ size_t* peak_index) const {
// When the signal does not contain any active speech, the correlation does
// not matter. Simply set it to zero.
*best_correlation = 0;
@@ -51,7 +51,7 @@
// the new data.
// but we must ensure that best_correlation is not larger than the new data.
*peak_index = std::min(*peak_index,
- static_cast<int>(len - old_data_length_per_channel_));
+ len - old_data_length_per_channel_);
}
PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
@@ -64,8 +64,7 @@
AudioMultiVector* output) const {
// Pre-calculate common multiplication with |fs_mult_|.
// 120 corresponds to 15 ms.
- int fs_mult_120 = fs_mult_ * 120;
- assert(old_data_length_per_channel_ >= 0); // Make sure it's been set.
+ size_t fs_mult_120 = static_cast<size_t>(fs_mult_ * 120);
// Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
// or passive speech.
if (((best_correlation > kCorrelationThreshold) &&
@@ -107,7 +106,7 @@
int sample_rate_hz,
size_t num_channels,
const BackgroundNoise& background_noise,
- int overlap_samples) const {
+ size_t overlap_samples) const {
return new PreemptiveExpand(
sample_rate_hz, num_channels, background_noise, overlap_samples);
}
diff --git a/webrtc/modules/audio_coding/neteq/preemptive_expand.h b/webrtc/modules/audio_coding/neteq/preemptive_expand.h
index 65da703..ca48e19 100644
--- a/webrtc/modules/audio_coding/neteq/preemptive_expand.h
+++ b/webrtc/modules/audio_coding/neteq/preemptive_expand.h
@@ -32,9 +32,9 @@
PreemptiveExpand(int sample_rate_hz,
size_t num_channels,
const BackgroundNoise& background_noise,
- int overlap_samples)
+ size_t overlap_samples)
: TimeStretch(sample_rate_hz, num_channels, background_noise),
- old_data_length_per_channel_(-1),
+ old_data_length_per_channel_(0),
overlap_samples_(overlap_samples) {
}
@@ -44,17 +44,17 @@
// is provided in the output |length_change_samples|. The method returns
// the outcome of the operation as an enumerator value.
ReturnCodes Process(const int16_t *pw16_decoded,
- int len,
- int old_data_len,
+ size_t len,
+ size_t old_data_len,
AudioMultiVector* output,
- int16_t* length_change_samples);
+ size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech.
void SetParametersForPassiveSpeech(size_t input_length,
int16_t* best_correlation,
- int* peak_index) const override;
+ size_t* peak_index) const override;
// Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching.
@@ -67,8 +67,8 @@
AudioMultiVector* output) const override;
private:
- int old_data_length_per_channel_;
- int overlap_samples_;
+ size_t old_data_length_per_channel_;
+ size_t overlap_samples_;
DISALLOW_COPY_AND_ASSIGN(PreemptiveExpand);
};
@@ -81,7 +81,7 @@
int sample_rate_hz,
size_t num_channels,
const BackgroundNoise& background_noise,
- int overlap_samples) const;
+ size_t overlap_samples) const;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/neteq/random_vector.h b/webrtc/modules/audio_coding/neteq/random_vector.h
index 767dc48..8c75eae 100644
--- a/webrtc/modules/audio_coding/neteq/random_vector.h
+++ b/webrtc/modules/audio_coding/neteq/random_vector.h
@@ -21,7 +21,7 @@
// This class generates pseudo-random samples.
class RandomVector {
public:
- static const int kRandomTableSize = 256;
+ static const size_t kRandomTableSize = 256;
static const int16_t kRandomTable[kRandomTableSize];
RandomVector()
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index df139f7..c716fe4 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -14,6 +14,7 @@
#include <string.h> // memset
#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/modules/audio_coding/neteq/decision_logic.h"
#include "webrtc/modules/audio_coding/neteq/delay_manager.h"
#include "webrtc/system_wrappers/interface/metrics.h"
@@ -140,36 +141,37 @@
next_waiting_time_index_ = 0;
}
-void StatisticsCalculator::ExpandedVoiceSamples(int num_samples) {
+void StatisticsCalculator::ExpandedVoiceSamples(size_t num_samples) {
expanded_speech_samples_ += num_samples;
}
-void StatisticsCalculator::ExpandedNoiseSamples(int num_samples) {
+void StatisticsCalculator::ExpandedNoiseSamples(size_t num_samples) {
expanded_noise_samples_ += num_samples;
}
-void StatisticsCalculator::PreemptiveExpandedSamples(int num_samples) {
+void StatisticsCalculator::PreemptiveExpandedSamples(size_t num_samples) {
preemptive_samples_ += num_samples;
}
-void StatisticsCalculator::AcceleratedSamples(int num_samples) {
+void StatisticsCalculator::AcceleratedSamples(size_t num_samples) {
accelerate_samples_ += num_samples;
}
-void StatisticsCalculator::AddZeros(int num_samples) {
+void StatisticsCalculator::AddZeros(size_t num_samples) {
added_zero_samples_ += num_samples;
}
-void StatisticsCalculator::PacketsDiscarded(int num_packets) {
+void StatisticsCalculator::PacketsDiscarded(size_t num_packets) {
discarded_packets_ += num_packets;
}
-void StatisticsCalculator::LostSamples(int num_samples) {
+void StatisticsCalculator::LostSamples(size_t num_samples) {
lost_timestamps_ += num_samples;
}
-void StatisticsCalculator::IncreaseCounter(int num_samples, int fs_hz) {
- const int time_step_ms = rtc::CheckedDivExact(1000 * num_samples, fs_hz);
+void StatisticsCalculator::IncreaseCounter(size_t num_samples, int fs_hz) {
+ const int time_step_ms =
+ rtc::CheckedDivExact(static_cast<int>(1000 * num_samples), fs_hz);
delayed_packet_outage_counter_.AdvanceClock(time_step_ms);
excess_buffer_delay_.AdvanceClock(time_step_ms);
timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
@@ -207,8 +209,8 @@
void StatisticsCalculator::GetNetworkStatistics(
int fs_hz,
- int num_samples_in_buffers,
- int samples_per_packet,
+ size_t num_samples_in_buffers,
+ size_t samples_per_packet,
const DelayManager& delay_manager,
const DecisionLogic& decision_logic,
NetEqNetworkStatistics *stats) {
@@ -220,8 +222,8 @@
stats->added_zero_samples = added_zero_samples_;
stats->current_buffer_size_ms =
static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
- const int ms_per_packet = decision_logic.packet_length_samples() /
- (fs_hz / 1000);
+ const int ms_per_packet = rtc::checked_cast<int>(
+ decision_logic.packet_length_samples() / (fs_hz / 1000));
stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
ms_per_packet;
stats->jitter_peaks_found = delay_manager.PeakFound();
@@ -230,7 +232,7 @@
stats->packet_loss_rate =
CalculateQ14Ratio(lost_timestamps_, timestamps_since_last_report_);
- const unsigned discarded_samples = discarded_packets_ * samples_per_packet;
+ const size_t discarded_samples = discarded_packets_ * samples_per_packet;
stats->packet_discard_rate =
CalculateQ14Ratio(discarded_samples, timestamps_since_last_report_);
@@ -265,7 +267,7 @@
ResetWaitingTimeStatistics();
}
-uint16_t StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
+uint16_t StatisticsCalculator::CalculateQ14Ratio(size_t numerator,
uint32_t denominator) {
if (numerator == 0) {
return 0;
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.h b/webrtc/modules/audio_coding/neteq/statistics_calculator.h
index d743e5f..3bd3e55 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.h
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.h
@@ -42,32 +42,32 @@
// Reports that |num_samples| samples were produced through expansion, and
// that the expansion produced other than just noise samples.
- void ExpandedVoiceSamples(int num_samples);
+ void ExpandedVoiceSamples(size_t num_samples);
// Reports that |num_samples| samples were produced through expansion, and
// that the expansion produced only noise samples.
- void ExpandedNoiseSamples(int num_samples);
+ void ExpandedNoiseSamples(size_t num_samples);
// Reports that |num_samples| samples were produced through preemptive
// expansion.
- void PreemptiveExpandedSamples(int num_samples);
+ void PreemptiveExpandedSamples(size_t num_samples);
// Reports that |num_samples| samples were removed through accelerate.
- void AcceleratedSamples(int num_samples);
+ void AcceleratedSamples(size_t num_samples);
// Reports that |num_samples| zeros were inserted into the output.
- void AddZeros(int num_samples);
+ void AddZeros(size_t num_samples);
// Reports that |num_packets| packets were discarded.
- void PacketsDiscarded(int num_packets);
+ void PacketsDiscarded(size_t num_packets);
// Reports that |num_samples| were lost.
- void LostSamples(int num_samples);
+ void LostSamples(size_t num_samples);
// Increases the report interval counter with |num_samples| at a sample rate
// of |fs_hz|. This is how the StatisticsCalculator gets notified that current
// time is increasing.
- void IncreaseCounter(int num_samples, int fs_hz);
+ void IncreaseCounter(size_t num_samples, int fs_hz);
// Stores new packet waiting time in waiting time statistics.
void StoreWaitingTime(int waiting_time_ms);
@@ -85,8 +85,8 @@
// yet to play out is |num_samples_in_buffers|, and the number of samples per
// packet is |samples_per_packet|.
void GetNetworkStatistics(int fs_hz,
- int num_samples_in_buffers,
- int samples_per_packet,
+ size_t num_samples_in_buffers,
+ size_t samples_per_packet,
const DelayManager& delay_manager,
const DecisionLogic& decision_logic,
NetEqNetworkStatistics *stats);
@@ -150,15 +150,15 @@
};
// Calculates numerator / denominator, and returns the value in Q14.
- static uint16_t CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
+ static uint16_t CalculateQ14Ratio(size_t numerator, uint32_t denominator);
- uint32_t preemptive_samples_;
- uint32_t accelerate_samples_;
- int added_zero_samples_;
- uint32_t expanded_speech_samples_;
- uint32_t expanded_noise_samples_;
- int discarded_packets_;
- uint32_t lost_timestamps_;
+ size_t preemptive_samples_;
+ size_t accelerate_samples_;
+ size_t added_zero_samples_;
+ size_t expanded_speech_samples_;
+ size_t expanded_noise_samples_;
+ size_t discarded_packets_;
+ size_t lost_timestamps_;
uint32_t timestamps_since_last_report_;
int waiting_times_[kLenWaitingTimes]; // Used as a circular buffer.
int len_waiting_times_;
diff --git a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
index 7e778b8..b2df07a 100644
--- a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
@@ -23,6 +23,8 @@
#include <assert.h>
+#include <algorithm>
+
#include "webrtc/typedefs.h"
// needed for NetEqDecoder
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
@@ -76,27 +78,27 @@
void NetEQTest_GetCodec_and_PT(char* name,
webrtc::NetEqDecoder* codec,
int* PT,
- int frameLen,
+ size_t frameLen,
int* fs,
int* bitrate,
int* useRed);
int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
- int enc_frameSize,
+ size_t enc_frameSize,
int bitrate,
int sampfreq,
int vad,
- int numChannels);
+ size_t numChannels);
void defineCodecs(webrtc::NetEqDecoder* usedCodec, int* noOfCodecs);
-int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels);
-int NetEQTest_encode(int coder,
- int16_t* indata,
- int frameLen,
- unsigned char* encoded,
- int sampleRate,
- int* vad,
- int useVAD,
- int bitrate,
- int numChannels);
+int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels);
+size_t NetEQTest_encode(int coder,
+ int16_t* indata,
+ size_t frameLen,
+ unsigned char* encoded,
+ int sampleRate,
+ int* vad,
+ int useVAD,
+ int bitrate,
+ size_t numChannels);
void makeRTPheader(unsigned char* rtp_data,
int payloadType,
int seqNo,
@@ -109,13 +111,13 @@
uint16_t* blockLen,
int seqNo,
uint32_t ssrc);
-int makeDTMFpayload(unsigned char* payload_data,
- int Event,
- int End,
- int Volume,
- int Duration);
-void stereoDeInterleave(int16_t* audioSamples, int numSamples);
-void stereoInterleave(unsigned char* data, int dataLen, int stride);
+size_t makeDTMFpayload(unsigned char* payload_data,
+ int Event,
+ int End,
+ int Volume,
+ int Duration);
+void stereoDeInterleave(int16_t* audioSamples, size_t numSamples);
+void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride);
/*********************/
/* Codec definitions */
@@ -264,13 +266,14 @@
#endif
int main(int argc, char* argv[]) {
- int packet_size, fs;
+ size_t packet_size;
+ int fs;
webrtc::NetEqDecoder usedCodec;
int payloadType;
int bitrate = 0;
int useVAD, vad;
int useRed = 0;
- int len, enc_len;
+ size_t len, enc_len;
int16_t org_data[4000];
unsigned char rtp_data[8000];
int16_t seqNo = 0xFFF;
@@ -282,14 +285,14 @@
int red_PT[2] = {0};
uint32_t red_TS[2] = {0};
uint16_t red_len[2] = {0};
- int RTPheaderLen = 12;
+ size_t RTPheaderLen = 12;
uint8_t red_data[8000];
#ifdef INSERT_OLD_PACKETS
uint16_t old_length, old_plen;
- int old_enc_len;
+ size_t old_enc_len;
int first_old_packet = 1;
unsigned char old_rtp_data[8000];
- int packet_age = 0;
+ size_t packet_age = 0;
#endif
#ifdef INSERT_DTMF_PACKETS
int NTone = 1;
@@ -298,8 +301,8 @@
bool dtmfSent = false;
#endif
bool usingStereo = false;
- int stereoMode = 0;
- int numChannels = 1;
+ size_t stereoMode = 0;
+ size_t numChannels = 1;
/* check number of parameters */
if ((argc != 6) && (argc != 7)) {
@@ -449,12 +452,13 @@
FILE* out_file = fopen(argv[2], "wb");
CHECK_NOT_NULL(out_file);
printf("Output file: %s\n\n", argv[2]);
- packet_size = atoi(argv[3]);
- if (packet_size <= 0) {
- printf("Packet size %d must be positive", packet_size);
+ int packet_size_int = atoi(argv[3]);
+ if (packet_size_int <= 0) {
+ printf("Packet size %d must be positive", packet_size_int);
return -1;
}
- printf("Packet size: %d\n", packet_size);
+ printf("Packet size: %d\n", packet_size_int);
+ packet_size = static_cast<size_t>(packet_size_int);
// check for stereo
if (argv[4][strlen(argv[4]) - 1] == '*') {
@@ -653,10 +657,6 @@
enc_len =
NetEQTest_encode(usedCodec, org_data, packet_size, &rtp_data[12], fs,
&vad, useVAD, bitrate, numChannels);
- if (enc_len == -1) {
- printf("Error encoding frame\n");
- exit(0);
- }
if (usingStereo && stereoMode != STEREO_MODE_FRAME && vad == 1) {
// interleave the encoded payload for sample-based codecs (not for CNG)
@@ -729,12 +729,12 @@
return -1;
}
#ifdef RANDOM_DATA
- for (int k = 0; k < 12 + enc_len; k++) {
+ for (size_t k = 0; k < 12 + enc_len; k++) {
rtp_data[k] = rand() + rand();
}
#endif
#ifdef RANDOM_PAYLOAD_DATA
- for (int k = 12; k < 12 + enc_len; k++) {
+ for (size_t k = 12; k < 12 + enc_len; k++) {
rtp_data[k] = rand() + rand();
}
#endif
@@ -822,7 +822,7 @@
void NetEQTest_GetCodec_and_PT(char* name,
webrtc::NetEqDecoder* codec,
int* PT,
- int frameLen,
+ size_t frameLen,
int* fs,
int* bitrate,
int* useRed) {
@@ -887,14 +887,14 @@
}
int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
- int enc_frameSize,
+ size_t enc_frameSize,
int bitrate,
int sampfreq,
int vad,
- int numChannels) {
+ size_t numChannels) {
int ok = 0;
- for (int k = 0; k < numChannels; k++) {
+ for (size_t k = 0; k < numChannels; k++) {
VAD_inst[k] = WebRtcVad_Create();
if (!VAD_inst[k]) {
printf("Error: Couldn't allocate memory for VAD instance\n");
@@ -962,7 +962,7 @@
WebRtcG729_EncoderInit(G729enc_inst[k], vad);
if ((vad == 1) && (enc_frameSize != 80)) {
printf("\nError - This simulation only supports VAD for G729 at "
- "10ms packets (not %dms)\n", (enc_frameSize >> 3));
+ "10ms packets (not %" PRIuS "ms)\n", (enc_frameSize >> 3));
}
} else {
printf("\nError - g729 is only developed for 8kHz \n");
@@ -1018,7 +1018,7 @@
}
if ((vad == 1) && (enc_frameSize != 160)) {
printf("\nError - This simulation only supports VAD for Speex at "
- "20ms packets (not %dms)\n",
+ "20ms packets (not %" PRIuS "ms)\n",
(enc_frameSize >> 3));
vad = 0;
}
@@ -1049,7 +1049,7 @@
}
if ((vad == 1) && (enc_frameSize != 320)) {
printf("\nError - This simulation only supports VAD for Speex at "
- "20ms packets (not %dms)\n",
+ "20ms packets (not %" PRIuS "ms)\n",
(enc_frameSize >> 4));
vad = 0;
}
@@ -1238,8 +1238,7 @@
"instance\n");
exit(0);
}
- if (((enc_frameSize / 320) < 0) || ((enc_frameSize / 320) > 3) ||
- ((enc_frameSize % 320) != 0)) {
+ if (((enc_frameSize / 320) > 3) || ((enc_frameSize % 320) != 0)) {
printf("\nError - AMRwb must have frameSize of 20, 40 or 60ms\n");
exit(0);
}
@@ -1320,7 +1319,8 @@
bitrate);
exit(0);
}
- WebRtcIsac_Control(ISAC_inst[k], bitrate, enc_frameSize >> 4);
+ WebRtcIsac_Control(ISAC_inst[k], bitrate,
+ static_cast<int>(enc_frameSize >> 4));
} else {
printf("\nError - iSAC only supports 480 or 960 enc_frameSize (30 or "
"60 ms)\n");
@@ -1379,7 +1379,8 @@
"56000 bps (not %i)\n", bitrate);
exit(0);
}
- WebRtcIsac_Control(ISACSWB_inst[k], bitrate, enc_frameSize >> 5);
+ WebRtcIsac_Control(ISACSWB_inst[k], bitrate,
+ static_cast<int>(enc_frameSize >> 5));
} else {
printf("\nError - iSAC SWB only supports 960 enc_frameSize (30 "
"ms)\n");
@@ -1424,8 +1425,8 @@
return (0);
}
-int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels) {
- for (int k = 0; k < numChannels; k++) {
+int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels) {
+ for (size_t k = 0; k < numChannels; k++) {
WebRtcVad_Free(VAD_inst[k]);
#if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
@@ -1552,35 +1553,34 @@
return (0);
}
-int NetEQTest_encode(int coder,
- int16_t* indata,
- int frameLen,
- unsigned char* encoded,
- int sampleRate,
- int* vad,
- int useVAD,
- int bitrate,
- int numChannels) {
- int cdlen = 0;
+size_t NetEQTest_encode(int coder,
+ int16_t* indata,
+ size_t frameLen,
+ unsigned char* encoded,
+ int sampleRate,
+ int* vad,
+ int useVAD,
+ int bitrate,
+ size_t numChannels) {
+ size_t cdlen = 0;
int16_t* tempdata;
static int first_cng = 1;
- int16_t tempLen;
-
+ size_t tempLen;
*vad = 1;
// check VAD first
if (useVAD) {
*vad = 0;
- int sampleRate_10 = 10 * sampleRate / 1000;
- int sampleRate_20 = 20 * sampleRate / 1000;
- int sampleRate_30 = 30 * sampleRate / 1000;
- for (int k = 0; k < numChannels; k++) {
+ size_t sampleRate_10 = static_cast<size_t>(10 * sampleRate / 1000);
+ size_t sampleRate_20 = static_cast<size_t>(20 * sampleRate / 1000);
+ size_t sampleRate_30 = static_cast<size_t>(30 * sampleRate / 1000);
+ for (size_t k = 0; k < numChannels; k++) {
tempLen = frameLen;
tempdata = &indata[k * frameLen];
int localVad = 0;
/* Partition the signal and test each chunk for VAD.
- All chunks must be VAD=0 to produce a total VAD=0. */
+ All chunks must be VAD=0 to produce a total VAD=0. */
while (tempLen >= sampleRate_10) {
if ((tempLen % sampleRate_30) == 0) { // tempLen is multiple of 30ms
localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata,
@@ -1607,7 +1607,7 @@
if (!*vad) {
// all channels are silent
cdlen = 0;
- for (int k = 0; k < numChannels; k++) {
+ for (size_t k = 0; k < numChannels; k++) {
WebRtcCng_Encode(CNGenc_inst[k], &indata[k * frameLen],
(frameLen <= 640 ? frameLen : 640) /* max 640 */,
encoded, &tempLen, first_cng);
@@ -1621,9 +1621,9 @@
}
// loop over all channels
- int totalLen = 0;
+ size_t totalLen = 0;
- for (int k = 0; k < numChannels; k++) {
+ for (size_t k = 0; k < numChannels; k++) {
/* Encode with the selected coder type */
if (coder == webrtc::kDecoderPCMu) { /*g711 u-law */
#ifdef CODEC_G711
@@ -1652,7 +1652,8 @@
#endif
#ifdef CODEC_ILBC
else if (coder == webrtc::kDecoderILBC) { /*iLBC */
- cdlen = WebRtcIlbcfix_Encode(iLBCenc_inst[k], indata, frameLen, encoded);
+ cdlen = static_cast<size_t>(std::max(
+ WebRtcIlbcfix_Encode(iLBCenc_inst[k], indata, frameLen, encoded), 0));
}
#endif
#if (defined(CODEC_ISAC) || \
@@ -1660,28 +1661,30 @@
// NETEQ_ISACFIX_CODEC
else if (coder == webrtc::kDecoderISAC) { /*iSAC */
int noOfCalls = 0;
- cdlen = 0;
- while (cdlen <= 0) {
+ int res = 0;
+ while (res <= 0) {
#ifdef CODEC_ISAC /* floating point */
- cdlen =
+ res =
WebRtcIsac_Encode(ISAC_inst[k], &indata[noOfCalls * 160], encoded);
#else /* fixed point */
- cdlen = WebRtcIsacfix_Encode(ISAC_inst[k], &indata[noOfCalls * 160],
- encoded);
+ res = WebRtcIsacfix_Encode(ISAC_inst[k], &indata[noOfCalls * 160],
+ encoded);
#endif
noOfCalls++;
}
+ cdlen = static_cast<size_t>(res);
}
#endif
#ifdef CODEC_ISAC_SWB
else if (coder == webrtc::kDecoderISACswb) { /* iSAC SWB */
int noOfCalls = 0;
- cdlen = 0;
- while (cdlen <= 0) {
- cdlen = WebRtcIsac_Encode(ISACSWB_inst[k], &indata[noOfCalls * 320],
- encoded);
+ int res = 0;
+ while (res <= 0) {
+ res = WebRtcIsac_Encode(ISACSWB_inst[k], &indata[noOfCalls * 320],
+ encoded);
noOfCalls++;
}
+ cdlen = static_cast<size_t>(res);
}
#endif
indata += frameLen;
@@ -1757,11 +1760,11 @@
return rtpPointer - rtp_data; // length of header in bytes
}
-int makeDTMFpayload(unsigned char* payload_data,
- int Event,
- int End,
- int Volume,
- int Duration) {
+size_t makeDTMFpayload(unsigned char* payload_data,
+ int Event,
+ int End,
+ int Volume,
+ int Duration) {
unsigned char E, R, V;
R = 0;
V = (unsigned char)Volume;
@@ -1778,11 +1781,11 @@
return (4);
}
-void stereoDeInterleave(int16_t* audioSamples, int numSamples) {
+void stereoDeInterleave(int16_t* audioSamples, size_t numSamples) {
int16_t* tempVec;
int16_t* readPtr, *writeL, *writeR;
- if (numSamples <= 0)
+ if (numSamples == 0)
return;
tempVec = (int16_t*)malloc(sizeof(int16_t) * numSamples);
@@ -1797,7 +1800,7 @@
writeR = &audioSamples[numSamples / 2];
readPtr = tempVec;
- for (int k = 0; k < numSamples; k += 2) {
+ for (size_t k = 0; k < numSamples; k += 2) {
*writeL = *readPtr;
readPtr++;
*writeR = *readPtr;
@@ -1809,7 +1812,7 @@
free(tempVec);
}
-void stereoInterleave(unsigned char* data, int dataLen, int stride) {
+void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride) {
unsigned char* ptrL, *ptrR;
unsigned char temp[10];
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index 134539f..cb0780c 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -59,11 +59,11 @@
}
int EncodeBlock(int16_t* in_data,
- int block_size_samples,
+ size_t block_size_samples,
uint8_t* payload,
- int max_bytes) override {
- const int kFrameSizeSamples = 80; // Samples per 10 ms.
- int encoded_samples = 0;
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
+ size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 85dd54d..47fae36 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -43,8 +43,8 @@
NetEqIsacQualityTest();
void SetUp() override;
void TearDown() override;
- virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
- uint8_t* payload, int max_bytes);
+ virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes);
private:
ISACFIX_MainStruct* isac_encoder_;
int bit_rate_kbps_;
@@ -78,8 +78,8 @@
}
int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
- int block_size_samples,
- uint8_t* payload, int max_bytes) {
+ size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes) {
// ISAC takes 10 ms for every call.
const int subblocks = kIsacBlockDurationMs / 10;
const int subblock_length = 10 * kIsacInputSamplingKhz;
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
index 3a3b326..0406da2 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -103,12 +103,12 @@
NetEqOpusQualityTest();
void SetUp() override;
void TearDown() override;
- virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
- uint8_t* payload, int max_bytes);
+ virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes);
private:
WebRtcOpusEncInst* opus_encoder_;
OpusRepacketizer* repacketizer_;
- int sub_block_size_samples_;
+ size_t sub_block_size_samples_;
int bit_rate_kbps_;
bool fec_;
bool dtx_;
@@ -126,7 +126,8 @@
kDecoderOpus),
opus_encoder_(NULL),
repacketizer_(NULL),
- sub_block_size_samples_(kOpusBlockDurationMs * kOpusSamplingKhz),
+ sub_block_size_samples_(
+ static_cast<size_t>(kOpusBlockDurationMs * kOpusSamplingKhz)),
bit_rate_kbps_(FLAGS_bit_rate_kbps),
fec_(FLAGS_fec),
dtx_(FLAGS_dtx),
@@ -173,8 +174,8 @@
}
int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
- int block_size_samples,
- uint8_t* payload, int max_bytes) {
+ size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes) {
EXPECT_EQ(block_size_samples, sub_block_size_samples_ * sub_packets_);
int16_t* pointer = in_data;
int value;
@@ -192,7 +193,8 @@
}
pointer += sub_block_size_samples_ * channels_;
}
- value = opus_repacketizer_out(repacketizer_, payload, max_bytes);
+ value = opus_repacketizer_out(repacketizer_, payload,
+ static_cast<opus_int32>(max_bytes));
EXPECT_GE(value, 0);
return value;
}
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index d94ceb6..0b89352 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -59,11 +59,11 @@
}
int EncodeBlock(int16_t* in_data,
- int block_size_samples,
+ size_t block_size_samples,
uint8_t* payload,
- int max_bytes) override {
- const int kFrameSizeSamples = 80; // Samples per 10 ms.
- int encoded_samples = 0;
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
+ size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.cc b/webrtc/modules/audio_coding/neteq/time_stretch.cc
index 5577cd2..6ae81e6 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch.cc
+++ b/webrtc/modules/audio_coding/neteq/time_stretch.cc
@@ -12,6 +12,7 @@
#include <algorithm> // min, max
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
@@ -23,9 +24,10 @@
size_t input_len,
bool fast_mode,
AudioMultiVector* output,
- int16_t* length_change_samples) {
+ size_t* length_change_samples) {
// Pre-calculate common multiplication with |fs_mult_|.
- int fs_mult_120 = fs_mult_ * 120; // Corresponds to 15 ms.
+ size_t fs_mult_120 =
+ static_cast<size_t>(fs_mult_ * 120); // Corresponds to 15 ms.
const int16_t* signal;
rtc::scoped_ptr<int16_t[]> signal_array;
@@ -48,8 +50,7 @@
}
// Find maximum absolute value of input signal.
- max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal,
- static_cast<int>(signal_len));
+ max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len);
// Downsample to 4 kHz sample rate and calculate auto-correlation.
DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen,
@@ -58,13 +59,12 @@
AutoCorrelation();
// Find the strongest correlation peak.
- static const int kNumPeaks = 1;
- int peak_index;
+ static const size_t kNumPeaks = 1;
+ size_t peak_index;
int16_t peak_value;
DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks,
fs_mult_, &peak_index, &peak_value);
// Assert that |peak_index| stays within boundaries.
- assert(peak_index >= 0);
assert(peak_index <= (2 * kCorrelationLen - 1) * fs_mult_);
// Compensate peak_index for displaced starting position. The displacement
@@ -73,13 +73,13 @@
// multiplication by fs_mult_ * 2.
peak_index += kMinLag * fs_mult_ * 2;
// Assert that |peak_index| stays within boundaries.
- assert(peak_index >= 20 * fs_mult_);
+ assert(peak_index >= static_cast<size_t>(20 * fs_mult_));
assert(peak_index <= 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_);
// Calculate scaling to ensure that |peak_index| samples can be square-summed
// without overflowing.
int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
- WebRtcSpl_NormW32(peak_index);
+ WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
scaling = std::max(0, scaling);
// |vec1| starts at 15 ms minus one pitch period.
@@ -177,7 +177,7 @@
}
bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
- int peak_index, int scaling) const {
+ size_t peak_index, int scaling) const {
// Check if the signal seems to be active speech or not (simple VAD).
// If (vec1_energy + vec2_energy) / (2 * peak_index) <=
// 8 * background_noise_energy, then we say that the signal contains no
@@ -197,7 +197,8 @@
int right_scale = 16 - WebRtcSpl_NormW32(right_side);
right_scale = std::max(0, right_scale);
left_side = left_side >> right_scale;
- right_side = peak_index * (right_side >> right_scale);
+ right_side =
+ rtc::checked_cast<int32_t>(peak_index) * (right_side >> right_scale);
// Scale |left_side| properly before comparing with |right_side|.
// (|scaling| is the scale factor before energy calculation, thus the scale
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.h b/webrtc/modules/audio_coding/neteq/time_stretch.h
index 7c84e1a..14383d8 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch.h
+++ b/webrtc/modules/audio_coding/neteq/time_stretch.h
@@ -39,7 +39,7 @@
const BackgroundNoise& background_noise)
: sample_rate_hz_(sample_rate_hz),
fs_mult_(sample_rate_hz / 8000),
- num_channels_(static_cast<int>(num_channels)),
+ num_channels_(num_channels),
master_channel_(0), // First channel is master.
background_noise_(background_noise),
max_input_value_(0) {
@@ -48,7 +48,7 @@
sample_rate_hz_ == 32000 ||
sample_rate_hz_ == 48000);
assert(num_channels_ > 0);
- assert(static_cast<int>(master_channel_) < num_channels_);
+ assert(master_channel_ < num_channels_);
memset(auto_correlation_, 0, sizeof(auto_correlation_));
}
@@ -60,7 +60,7 @@
size_t input_len,
bool fast_mode,
AudioMultiVector* output,
- int16_t* length_change_samples);
+ size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
@@ -68,7 +68,7 @@
// implemented by the sub-classes.
virtual void SetParametersForPassiveSpeech(size_t input_length,
int16_t* best_correlation,
- int* peak_index) const = 0;
+ size_t* peak_index) const = 0;
// Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching. This method must be implemented
@@ -82,16 +82,16 @@
bool fast_mode,
AudioMultiVector* output) const = 0;
- static const int kCorrelationLen = 50;
- static const int kLogCorrelationLen = 6; // >= log2(kCorrelationLen).
- static const int kMinLag = 10;
- static const int kMaxLag = 60;
- static const int kDownsampledLen = kCorrelationLen + kMaxLag;
+ static const size_t kCorrelationLen = 50;
+ static const size_t kLogCorrelationLen = 6; // >= log2(kCorrelationLen).
+ static const size_t kMinLag = 10;
+ static const size_t kMaxLag = 60;
+ static const size_t kDownsampledLen = kCorrelationLen + kMaxLag;
static const int kCorrelationThreshold = 14746; // 0.9 in Q14.
const int sample_rate_hz_;
const int fs_mult_; // Sample rate multiplier = sample_rate_hz_ / 8000.
- const int num_channels_;
+ const size_t num_channels_;
const size_t master_channel_;
const BackgroundNoise& background_noise_;
int16_t max_input_value_;
@@ -107,7 +107,7 @@
// Performs a simple voice-activity detection based on the input parameters.
bool SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
- int peak_index, int scaling) const;
+ size_t peak_index, int scaling) const;
DISALLOW_COPY_AND_ASSIGN(TimeStretch);
};
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc b/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
index 05385a1..cbe4b04 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -75,12 +75,12 @@
// Returns the total length change (in samples) that the accelerate operation
// resulted in during the run.
- int TestAccelerate(int loops, bool fast_mode) {
+ size_t TestAccelerate(size_t loops, bool fast_mode) {
Accelerate accelerate(sample_rate_hz_, kNumChannels, background_noise_);
- int total_length_change = 0;
- for (int i = 0; i < loops; ++i) {
+ size_t total_length_change = 0;
+ for (size_t i = 0; i < loops; ++i) {
AudioMultiVector output(kNumChannels);
- int16_t length_change;
+ size_t length_change;
UpdateReturnStats(accelerate.Process(Next30Ms(), block_size_, fast_mode,
&output, &length_change));
total_length_change += length_change;
@@ -110,7 +110,7 @@
TEST_F(TimeStretchTest, Accelerate) {
// TestAccelerate returns the total length change in samples.
- EXPECT_EQ(15268, TestAccelerate(100, false));
+ EXPECT_EQ(15268U, TestAccelerate(100, false));
EXPECT_EQ(9, return_stats_[TimeStretch::kSuccess]);
EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
EXPECT_EQ(33, return_stats_[TimeStretch::kNoStretch]);
@@ -118,7 +118,7 @@
TEST_F(TimeStretchTest, AccelerateFastMode) {
// TestAccelerate returns the total length change in samples.
- EXPECT_EQ(21400, TestAccelerate(100, true));
+ EXPECT_EQ(21400U, TestAccelerate(100, true));
EXPECT_EQ(31, return_stats_[TimeStretch::kSuccess]);
EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
EXPECT_EQ(11, return_stats_[TimeStretch::kNoStretch]);
diff --git a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
index af4b8e1..016acde 100644
--- a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -31,8 +31,8 @@
seq_number_(0),
timestamp_(0),
payload_ssrc_(0xABCD1234) {
- int encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
- CHECK_EQ(2, encoded_len);
+ size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
+ CHECK_EQ(2U, encoded_len);
}
Packet* ConstantPcmPacketSource::NextPacket() {
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 52c34bb..49750c2 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -43,11 +43,11 @@
rtp_header, payload, payload_size_bytes, receive_timestamp));
}
-int NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
- int16_t* output,
- NetEqOutputType* output_type) {
+size_t NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
+ int16_t* output,
+ NetEqOutputType* output_type) {
// Get audio from regular instance.
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(max_length,
@@ -56,7 +56,8 @@
&num_channels,
output_type));
EXPECT_EQ(channels_, num_channels);
- EXPECT_EQ(kOutputLengthMs * sample_rate_hz_ / 1000, samples_per_channel);
+ EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+ samples_per_channel);
return samples_per_channel;
}
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
index 0d4d2f9..202d1f3 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -42,8 +42,8 @@
// Get 10 ms of audio data. The data is written to |output|, which can hold
// (at least) |max_length| elements. Returns number of samples.
- int GetOutputAudio(size_t max_length, int16_t* output,
- NetEqOutputType* output_type);
+ size_t GetOutputAudio(size_t max_length, int16_t* output,
+ NetEqOutputType* output_type);
NetEq* neteq() { return neteq_.get(); }
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 1c76d76..57397e1 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -101,19 +101,19 @@
// Get output audio, but don't do anything with it.
static const int kMaxChannels = 1;
- static const int kMaxSamplesPerMs = 48000 / 1000;
+ static const size_t kMaxSamplesPerMs = 48000 / 1000;
static const int kOutputBlockSizeMs = 10;
- static const int kOutDataLen =
+ static const size_t kOutDataLen =
kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
int16_t out_data[kOutDataLen];
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
&num_channels, NULL);
if (error != NetEq::kOK)
return -1;
- assert(samples_per_channel == kSampRateHz * 10 / 1000);
+ assert(samples_per_channel == static_cast<size_t>(kSampRateHz * 10 / 1000));
time_now_ms += kOutputBlockSizeMs;
if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index c60b993..1c028c9 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -218,8 +218,9 @@
block_duration_ms_(block_duration_ms),
in_sampling_khz_(in_sampling_khz),
out_sampling_khz_(out_sampling_khz),
- in_size_samples_(in_sampling_khz_ * block_duration_ms_),
- out_size_samples_(out_sampling_khz_ * kOutputSizeMs),
+ in_size_samples_(
+ static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
+ out_size_samples_(static_cast<size_t>(out_sampling_khz_ * kOutputSizeMs)),
payload_size_bytes_(0),
max_payload_bytes_(0),
in_file_(new ResampleInputAudioFile(FLAGS_in_filename,
@@ -392,7 +393,7 @@
int NetEqQualityTest::DecodeBlock() {
int channels;
- int samples;
+ size_t samples;
int ret = neteq_->GetAudio(out_size_samples_ * channels_, &out_data_[0],
&samples, &channels, NULL);
@@ -400,9 +401,9 @@
return -1;
} else {
assert(channels == channels_);
- assert(samples == kOutputSizeMs * out_sampling_khz_);
+ assert(samples == static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
CHECK(output_->WriteArray(out_data_.get(), samples * channels));
- return samples;
+ return static_cast<int>(samples);
}
}
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 4a0d808..ba87dbf 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -76,8 +76,8 @@
// |block_size_samples| (samples per channel),
// 2. save the bit stream to |payload| of |max_bytes| bytes in size,
// 3. returns the length of the payload (in bytes),
- virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
- uint8_t* payload, int max_bytes) = 0;
+ virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes) = 0;
// PacketLost(...) determines weather a packet sent at an indicated time gets
// lost or not.
@@ -111,13 +111,13 @@
const int out_sampling_khz_;
// Number of samples per channel in a frame.
- const int in_size_samples_;
+ const size_t in_size_samples_;
// Expected output number of samples per channel in a frame.
- const int out_size_samples_;
+ const size_t out_size_samples_;
size_t payload_size_bytes_;
- int max_payload_bytes_;
+ size_t max_payload_bytes_;
rtc::scoped_ptr<InputAudioFile> in_file_;
rtc::scoped_ptr<AudioSink> output_;
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index 6bcd717..1c08078 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -23,6 +23,7 @@
#include "google/gflags.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
@@ -324,7 +325,7 @@
// Encode it as PCM16.
assert((*payload).get());
payload_len = WebRtcPcm16b_Encode((*replacement_audio).get(),
- static_cast<int16_t>(*frame_size_samples),
+ *frame_size_samples,
(*payload).get());
assert(payload_len == 2 * *frame_size_samples);
// Change payload type to PCM16.
@@ -358,7 +359,7 @@
int main(int argc, char* argv[]) {
static const int kMaxChannels = 5;
- static const int kMaxSamplesPerMs = 48000 / 1000;
+ static const size_t kMaxSamplesPerMs = 48000 / 1000;
static const int kOutputBlockSizeMs = 10;
std::string program_name = argv[0];
@@ -552,11 +553,11 @@
// Check if it is time to get output audio.
if (time_now_ms >= next_output_time_ms) {
- static const int kOutDataLen =
+ static const size_t kOutDataLen =
kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
int16_t out_data[kOutDataLen];
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
&num_channels, NULL);
if (error != NetEq::kOK) {
@@ -564,7 +565,8 @@
neteq->LastError() << std::endl;
} else {
// Calculate sample rate from output size.
- sample_rate_hz = 1000 * samples_per_channel / kOutputBlockSizeMs;
+ sample_rate_hz = rtc::checked_cast<int>(
+ 1000 * samples_per_channel / kOutputBlockSizeMs);
}
// Write to file.
diff --git a/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc b/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
index 47450bc..d69918b 100644
--- a/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
@@ -26,14 +26,11 @@
if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
return false;
resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
- int output_length = 0;
- CHECK_EQ(resampler_.Push(temp_destination.get(),
- static_cast<int>(samples_to_read),
- destination,
- static_cast<int>(samples),
- output_length),
+ size_t output_length = 0;
+ CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read, destination,
+ samples, output_length),
0);
- CHECK_EQ(static_cast<int>(samples), output_length);
+ CHECK_EQ(samples, output_length);
return true;
}