Add mute state field to AudioFrame and switch some callers to use it. Also make AudioFrame::data_ private and instead provide:

const int16_t* data() const;
int16_t* mutable_data();

- data() returns a zeroed static buffer on muted frames (to avoid unnecessary zeroing of the member buffer) and directly returns AudioFrame::data_ on unmuted frames.
- mutable_data(), lazily zeroes AudioFrame::data_ if the frame is currently muted, sets muted=false, and returns AudioFrame::data_.

These accessors serve to "force" callers to be aware of the mute state field, i.e. lazy zeroing is not the primary motivation.

This change only optimizes handling of muted frames where it is somewhat trivial to do so. Other improvements requiring more significant structural changes will come later.

BUG=webrtc:7343
TBR=henrika

Review-Url: https://codereview.webrtc.org/2750783004
Cr-Commit-Position: refs/heads/master@{#18543}
diff --git a/webrtc/modules/audio_coding/test/PCMFile.cc b/webrtc/modules/audio_coding/test/PCMFile.cc
index 5d2d818..03d4fa7 100644
--- a/webrtc/modules/audio_coding/test/PCMFile.cc
+++ b/webrtc/modules/audio_coding/test/PCMFile.cc
@@ -125,11 +125,13 @@
     channels = 2;
   }
 
-  int32_t payload_size = (int32_t) fread(audio_frame.data_, sizeof(uint16_t),
+  int32_t payload_size = (int32_t) fread(audio_frame.mutable_data(),
+                                         sizeof(uint16_t),
                                          samples_10ms_ * channels, pcm_file_);
   if (payload_size < samples_10ms_ * channels) {
+    int16_t* frame_data = audio_frame.mutable_data();
     for (int k = payload_size; k < samples_10ms_ * channels; k++) {
-      audio_frame.data_[k] = 0;
+      frame_data[k] = 0;
     }
     if (auto_rewind_) {
       rewind(pcm_file_);
@@ -149,19 +151,20 @@
   return samples_10ms_;
 }
 
-void PCMFile::Write10MsData(AudioFrame& audio_frame) {
+void PCMFile::Write10MsData(const AudioFrame& audio_frame) {
   if (audio_frame.num_channels_ == 1) {
     if (!save_stereo_) {
-      if (fwrite(audio_frame.data_, sizeof(uint16_t),
+      if (fwrite(audio_frame.data(), sizeof(uint16_t),
                  audio_frame.samples_per_channel_, pcm_file_) !=
           static_cast<size_t>(audio_frame.samples_per_channel_)) {
         return;
       }
     } else {
+      const int16_t* frame_data = audio_frame.data();
       int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
       for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
-        stereo_audio[k << 1] = audio_frame.data_[k];
-        stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
+        stereo_audio[k << 1] = frame_data[k];
+        stereo_audio[(k << 1) + 1] = frame_data[k];
       }
       if (fwrite(stereo_audio, sizeof(int16_t),
                  2 * audio_frame.samples_per_channel_, pcm_file_) !=
@@ -171,7 +174,7 @@
       delete[] stereo_audio;
     }
   } else {
-    if (fwrite(audio_frame.data_, sizeof(int16_t),
+    if (fwrite(audio_frame.data(), sizeof(int16_t),
                audio_frame.num_channels_ * audio_frame.samples_per_channel_,
                pcm_file_) !=
         static_cast<size_t>(audio_frame.num_channels_ *
@@ -181,7 +184,8 @@
   }
 }
 
-void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
+void PCMFile::Write10MsData(const int16_t* playout_buffer,
+                            size_t length_smpls) {
   if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
       length_smpls) {
     return;