Make AudioFrameType an enum class, and move to audio_coding_module_typedefs.h
Bug: webrtc:5876
Change-Id: I0c92f9410fcf0832bfa321229b3437134255dba6
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/128085
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27190}
diff --git a/modules/audio_coding/test/opus_test.cc b/modules/audio_coding/test/opus_test.cc
index b8d8e67..55f7af0 100644
--- a/modules/audio_coding/test/opus_test.cc
+++ b/modules/audio_coding/test/opus_test.cc
@@ -315,8 +315,8 @@
}
// Send data to the channel. "channel" will handle the loss simulation.
- channel->SendData(kAudioFrameSpeech, payload_type_, rtp_timestamp_,
- bitstream, bitstream_len_byte, NULL);
+ channel->SendData(AudioFrameType::kAudioFrameSpeech, payload_type_,
+ rtp_timestamp_, bitstream, bitstream_len_byte, NULL);
if (first_packet) {
first_packet = false;
start_time_stamp = rtp_timestamp_;