Remove remaining quality-analysis (QM).

This was never turned on, contains a lot of complexity and somehow
manages triggering a bug in a downstream project.

BUG=webrtc:5066
R=marpan@webrtc.org
TBR=mflodman@webrtc.org

Review URL: https://codereview.webrtc.org/1917323002 .

Cr-Commit-Position: refs/heads/master@{#12692}
diff --git a/webrtc/modules/include/module_common_types.h b/webrtc/modules/include/module_common_types.h
index 8c72617..3572cd6 100644
--- a/webrtc/modules/include/module_common_types.h
+++ b/webrtc/modules/include/module_common_types.h
@@ -494,25 +494,6 @@
   virtual ~CallStatsObserver() {}
 };
 
-struct VideoContentMetrics {
-  VideoContentMetrics()
-      : motion_magnitude(0.0f),
-        spatial_pred_err(0.0f),
-        spatial_pred_err_h(0.0f),
-        spatial_pred_err_v(0.0f) {}
-
-  void Reset() {
-    motion_magnitude = 0.0f;
-    spatial_pred_err = 0.0f;
-    spatial_pred_err_h = 0.0f;
-    spatial_pred_err_v = 0.0f;
-  }
-  float motion_magnitude;
-  float spatial_pred_err;
-  float spatial_pred_err_h;
-  float spatial_pred_err_v;
-};
-
 /* This class holds up to 60 ms of super-wideband (32 kHz) stereo audio. It
  * allows for adding and subtracting frames while keeping track of the resulting
  * states.
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index 4365c65..4a5f456 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -380,13 +380,11 @@
             'video_coding/video_coding_robustness_unittest.cc',
             'video_coding/video_receiver_unittest.cc',
             'video_coding/video_sender_unittest.cc',
-            'video_coding/qm_select_unittest.cc',
             'video_coding/test/stream_generator.cc',
             'video_coding/test/stream_generator.h',
             'video_coding/utility/frame_dropper_unittest.cc',
             'video_coding/utility/ivf_file_writer_unittest.cc',
             'video_coding/utility/quality_scaler_unittest.cc',
-            'video_processing/test/content_metrics_test.cc',
             'video_processing/test/denoiser_test.cc',
             'video_processing/test/video_processing_unittest.cc',
             'video_processing/test/video_processing_unittest.h',
diff --git a/webrtc/modules/video_coding/BUILD.gn b/webrtc/modules/video_coding/BUILD.gn
index f163b95..fe600da 100644
--- a/webrtc/modules/video_coding/BUILD.gn
+++ b/webrtc/modules/video_coding/BUILD.gn
@@ -14,8 +14,6 @@
     "codec_database.h",
     "codec_timer.cc",
     "codec_timer.h",
-    "content_metrics_processing.cc",
-    "content_metrics_processing.h",
     "decoding_state.cc",
     "decoding_state.h",
     "encoded_frame.cc",
@@ -54,9 +52,6 @@
     "packet_buffer.h",
     "percentile_filter.cc",
     "percentile_filter.h",
-    "qm_select.cc",
-    "qm_select.h",
-    "qm_select_data.h",
     "receiver.cc",
     "receiver.h",
     "rtt_filter.cc",
diff --git a/webrtc/modules/video_coding/content_metrics_processing.cc b/webrtc/modules/video_coding/content_metrics_processing.cc
deleted file mode 100644
index b2586fc..0000000
--- a/webrtc/modules/video_coding/content_metrics_processing.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/content_metrics_processing.h"
-
-#include <math.h>
-
-#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/video_coding/include/video_coding_defines.h"
-
-namespace webrtc {
-//////////////////////////////////
-/// VCMContentMetricsProcessing //
-//////////////////////////////////
-
-VCMContentMetricsProcessing::VCMContentMetricsProcessing()
-    : recursive_avg_factor_(1 / 150.0f),  // matched to  30fps.
-      frame_cnt_uniform_avg_(0),
-      avg_motion_level_(0.0f),
-      avg_spatial_level_(0.0f) {
-  recursive_avg_ = new VideoContentMetrics();
-  uniform_avg_ = new VideoContentMetrics();
-}
-
-VCMContentMetricsProcessing::~VCMContentMetricsProcessing() {
-  delete recursive_avg_;
-  delete uniform_avg_;
-}
-
-int VCMContentMetricsProcessing::Reset() {
-  recursive_avg_->Reset();
-  uniform_avg_->Reset();
-  frame_cnt_uniform_avg_ = 0;
-  avg_motion_level_ = 0.0f;
-  avg_spatial_level_ = 0.0f;
-  return VCM_OK;
-}
-
-void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
-  if (frameRate == 0)
-    frameRate = 1;
-  // Update factor for recursive averaging.
-  recursive_avg_factor_ = static_cast<float>(1000.0f) /
-                          static_cast<float>(frameRate * kQmMinIntervalMs);
-}
-
-VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
-  return recursive_avg_;
-}
-
-VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
-  if (frame_cnt_uniform_avg_ == 0) {
-    return NULL;
-  }
-  // Two metrics are used: motion and spatial level.
-  uniform_avg_->motion_magnitude =
-      avg_motion_level_ / static_cast<float>(frame_cnt_uniform_avg_);
-  uniform_avg_->spatial_pred_err =
-      avg_spatial_level_ / static_cast<float>(frame_cnt_uniform_avg_);
-  return uniform_avg_;
-}
-
-void VCMContentMetricsProcessing::ResetShortTermAvgData() {
-  // Reset.
-  avg_motion_level_ = 0.0f;
-  avg_spatial_level_ = 0.0f;
-  frame_cnt_uniform_avg_ = 0;
-}
-
-int VCMContentMetricsProcessing::UpdateContentData(
-    const VideoContentMetrics* contentMetrics) {
-  if (contentMetrics == NULL) {
-    return VCM_OK;
-  }
-  return ProcessContent(contentMetrics);
-}
-
-int VCMContentMetricsProcessing::ProcessContent(
-    const VideoContentMetrics* contentMetrics) {
-  // Update the recursive averaged metrics: average is over longer window
-  // of time: over QmMinIntervalMs ms.
-  UpdateRecursiveAvg(contentMetrics);
-  // Update the uniform averaged metrics: average is over shorter window
-  // of time: based on ~RTCP reports.
-  UpdateUniformAvg(contentMetrics);
-  return VCM_OK;
-}
-
-void VCMContentMetricsProcessing::UpdateUniformAvg(
-    const VideoContentMetrics* contentMetrics) {
-  // Update frame counter.
-  frame_cnt_uniform_avg_ += 1;
-  // Update averaged metrics: motion and spatial level are used.
-  avg_motion_level_ += contentMetrics->motion_magnitude;
-  avg_spatial_level_ += contentMetrics->spatial_pred_err;
-  return;
-}
-
-void VCMContentMetricsProcessing::UpdateRecursiveAvg(
-    const VideoContentMetrics* contentMetrics) {
-  // Spatial metrics: 2x2, 1x2(H), 2x1(V).
-  recursive_avg_->spatial_pred_err =
-      (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err +
-      recursive_avg_factor_ * contentMetrics->spatial_pred_err;
-
-  recursive_avg_->spatial_pred_err_h =
-      (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h +
-      recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
-
-  recursive_avg_->spatial_pred_err_v =
-      (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v +
-      recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
-
-  // Motion metric: Derived from NFD (normalized frame difference).
-  recursive_avg_->motion_magnitude =
-      (1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude +
-      recursive_avg_factor_ * contentMetrics->motion_magnitude;
-}
-}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/content_metrics_processing.h b/webrtc/modules/video_coding/content_metrics_processing.h
deleted file mode 100644
index 3f67ec1..0000000
--- a/webrtc/modules/video_coding/content_metrics_processing.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-struct VideoContentMetrics;
-
-// QM interval time (in ms)
-enum { kQmMinIntervalMs = 10000 };
-
-// Flag for NFD metric vs motion metric
-enum { kNfdMetric = 1 };
-
-/**********************************/
-/* Content Metrics Processing     */
-/**********************************/
-class VCMContentMetricsProcessing {
- public:
-  VCMContentMetricsProcessing();
-  ~VCMContentMetricsProcessing();
-
-  // Update class with latest metrics.
-  int UpdateContentData(const VideoContentMetrics* contentMetrics);
-
-  // Reset the short-term averaged content data.
-  void ResetShortTermAvgData();
-
-  // Initialize.
-  int Reset();
-
-  // Inform class of current frame rate.
-  void UpdateFrameRate(uint32_t frameRate);
-
-  // Returns the long-term averaged content data: recursive average over longer
-  // time scale.
-  VideoContentMetrics* LongTermAvgData();
-
-  // Returns the short-term averaged content data: uniform average over
-  // shorter time scalE.
-  VideoContentMetrics* ShortTermAvgData();
-
- private:
-  // Compute working average.
-  int ProcessContent(const VideoContentMetrics* contentMetrics);
-
-  // Update the recursive averaged metrics: longer time average (~5/10 secs).
-  void UpdateRecursiveAvg(const VideoContentMetrics* contentMetrics);
-
-  // Update the uniform averaged metrics: shorter time average (~RTCP report).
-  void UpdateUniformAvg(const VideoContentMetrics* contentMetrics);
-
-  VideoContentMetrics* recursive_avg_;
-  VideoContentMetrics* uniform_avg_;
-  float recursive_avg_factor_;
-  uint32_t frame_cnt_uniform_avg_;
-  float avg_motion_level_;
-  float avg_spatial_level_;
-};
-}  // namespace webrtc
-#endif  // WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
diff --git a/webrtc/modules/video_coding/include/video_coding.h b/webrtc/modules/video_coding/include/video_coding.h
index 7d2bdb6..0f85679 100644
--- a/webrtc/modules/video_coding/include/video_coding.h
+++ b/webrtc/modules/video_coding/include/video_coding.h
@@ -31,6 +31,10 @@
 
 class Clock;
 class EncodedImageCallback;
+// TODO(pbos): Remove VCMQMSettingsCallback completely. This might be done by
+// removing the VCM and use VideoSender/VideoReceiver as a public interface
+// directly.
+class VCMQMSettingsCallback;
 class VideoEncoder;
 class VideoDecoder;
 struct CodecSpecificInfo;
@@ -223,7 +227,6 @@
   //                     < 0,    on error.
   virtual int32_t AddVideoFrame(
       const VideoFrame& videoFrame,
-      const VideoContentMetrics* contentMetrics = NULL,
       const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
 
   // Next frame encoded should be an intra frame (keyframe).
diff --git a/webrtc/modules/video_coding/include/video_coding_defines.h b/webrtc/modules/video_coding/include/video_coding_defines.h
index 7c5d00b..ba71803 100644
--- a/webrtc/modules/video_coding/include/video_coding_defines.h
+++ b/webrtc/modules/video_coding/include/video_coding_defines.h
@@ -176,18 +176,6 @@
   virtual ~KeyFrameRequestSender() {}
 };
 
-// Callback used to inform the user of the the desired resolution
-// as subscribed by Media Optimization (Quality Modes)
-class VCMQMSettingsCallback {
- public:
-  virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
-                                     const uint32_t width,
-                                     const uint32_t height) = 0;
-
- protected:
-  virtual ~VCMQMSettingsCallback() {}
-};
-
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
diff --git a/webrtc/modules/video_coding/media_opt_util.h b/webrtc/modules/video_coding/media_opt_util.h
index a8455cb..ad314ac 100644
--- a/webrtc/modules/video_coding/media_opt_util.h
+++ b/webrtc/modules/video_coding/media_opt_util.h
@@ -18,7 +18,6 @@
 
 #include "webrtc/base/exp_filter.h"
 #include "webrtc/modules/video_coding/internal_defines.h"
-#include "webrtc/modules/video_coding/qm_select.h"
 #include "webrtc/system_wrappers/include/trace.h"
 #include "webrtc/typedefs.h"
 
diff --git a/webrtc/modules/video_coding/media_optimization.cc b/webrtc/modules/video_coding/media_optimization.cc
index f24637e..d5fbadc 100644
--- a/webrtc/modules/video_coding/media_optimization.cc
+++ b/webrtc/modules/video_coding/media_optimization.cc
@@ -11,8 +11,6 @@
 #include "webrtc/modules/video_coding/media_optimization.h"
 
 #include "webrtc/base/logging.h"
-#include "webrtc/modules/video_coding/content_metrics_processing.h"
-#include "webrtc/modules/video_coding/qm_select.h"
 #include "webrtc/modules/video_coding/utility/frame_dropper.h"
 #include "webrtc/system_wrappers/include/clock.h"
 
@@ -81,16 +79,11 @@
       max_payload_size_(1460),
       video_target_bitrate_(0),
       incoming_frame_rate_(0),
-      enable_qm_(false),
       encoded_frame_samples_(),
       avg_sent_bit_rate_bps_(0),
       avg_sent_framerate_(0),
       key_frame_cnt_(0),
       delta_frame_cnt_(0),
-      content_(new VCMContentMetricsProcessing()),
-      qm_resolution_(new VCMQmResolution()),
-      last_qm_update_time_(0),
-      last_change_time_(0),
       num_layers_(0),
       suspension_enabled_(false),
       video_suspended_(false),
@@ -113,8 +106,6 @@
   frame_dropper_->Reset();
   loss_prot_logic_->Reset(clock_->TimeInMilliseconds());
   frame_dropper_->SetRates(0, 0);
-  content_->Reset();
-  qm_resolution_->Reset();
   loss_prot_logic_->UpdateFrameRate(incoming_frame_rate_);
   loss_prot_logic_->Reset(clock_->TimeInMilliseconds());
   send_statistics_zero_encode_ = 0;
@@ -124,8 +115,6 @@
   user_frame_rate_ = 0;
   key_frame_cnt_ = 0;
   delta_frame_cnt_ = 0;
-  last_qm_update_time_ = 0;
-  last_change_time_ = 0;
   encoded_frame_samples_.clear();
   avg_sent_bit_rate_bps_ = 0;
   num_layers_ = 1;
@@ -153,12 +142,7 @@
                                                 int num_layers,
                                                 int32_t mtu) {
   // Everything codec specific should be reset here since this means the codec
-  // has changed. If native dimension values have changed, then either user
-  // initiated change, or QM initiated change. Will be able to determine only
-  // after the processing of the first frame.
-  last_change_time_ = clock_->TimeInMilliseconds();
-  content_->Reset();
-  content_->UpdateFrameRate(frame_rate);
+  // has changed.
 
   max_bit_rate_ = max_bit_rate;
   send_codec_type_ = send_codec_type;
@@ -175,16 +159,13 @@
   codec_height_ = height;
   num_layers_ = (num_layers <= 1) ? 1 : num_layers;  // Can also be zero.
   max_payload_size_ = mtu;
-  qm_resolution_->Initialize(target_bitrate_kbps, user_frame_rate_,
-                             codec_width_, codec_height_, num_layers_);
 }
 
 uint32_t MediaOptimization::SetTargetRates(
     uint32_t target_bitrate,
     uint8_t fraction_lost,
     int64_t round_trip_time_ms,
-    VCMProtectionCallback* protection_callback,
-    VCMQMSettingsCallback* qmsettings_callback) {
+    VCMProtectionCallback* protection_callback) {
   CriticalSectionScoped lock(crit_sect_.get());
   VCMProtectionMethod* selected_method = loss_prot_logic_->SelectedMethod();
   float target_bitrate_kbps = static_cast<float>(target_bitrate) / 1000.0f;
@@ -220,7 +201,6 @@
   float protection_overhead_rate = 0.0f;
 
   // Update protection settings, when applicable.
-  float sent_video_rate_kbps = 0.0f;
   if (loss_prot_logic_->SelectedType() != kNone) {
     // Update method will compute the robustness settings for the given
     // protection method and the overhead cost
@@ -255,7 +235,6 @@
     // Get the effective packet loss for encoder ER when applicable. Should be
     // passed to encoder via fraction_lost.
     packet_loss_enc = selected_method->RequiredPacketLossER();
-    sent_video_rate_kbps = static_cast<float>(sent_video_rate_bps) / 1000.0f;
   }
 
   // Source coding rate: total rate - protection overhead.
@@ -271,19 +250,6 @@
       static_cast<float>(video_target_bitrate_) / 1000.0f;
   frame_dropper_->SetRates(target_video_bitrate_kbps, incoming_frame_rate_);
 
-  if (enable_qm_ && qmsettings_callback) {
-    // Update QM with rates.
-    qm_resolution_->UpdateRates(target_video_bitrate_kbps, sent_video_rate_kbps,
-                                incoming_frame_rate_, fraction_lost_);
-    // Check for QM selection.
-    bool select_qm = CheckStatusForQMchange();
-    if (select_qm) {
-      SelectQuality(qmsettings_callback);
-    }
-    // Reset the short-term averaged content data.
-    content_->ResetShortTermAvgData();
-  }
-
   CheckSuspendConditions();
 
   return video_target_bitrate_;
@@ -357,11 +323,6 @@
         loss_prot_logic_->UpdatePacketsPerFrameKey(
             min_packets_per_frame, clock_->TimeInMilliseconds());
       }
-
-      if (enable_qm_) {
-        // Update quality select with encoded length.
-        qm_resolution_->UpdateEncodedSize(encoded_length);
-      }
     }
     if (!delta_frame && encoded_length > 0) {
       loss_prot_logic_->UpdateKeyFrameSize(static_cast<float>(encoded_length));
@@ -378,11 +339,6 @@
   return VCM_OK;
 }
 
-void MediaOptimization::EnableQM(bool enable) {
-  CriticalSectionScoped lock(crit_sect_.get());
-  enable_qm_ = enable;
-}
-
 void MediaOptimization::EnableFrameDropper(bool enable) {
   CriticalSectionScoped lock(crit_sect_.get());
   frame_dropper_->Enable(enable);
@@ -414,19 +370,6 @@
   return frame_dropper_->DropFrame();
 }
 
-void MediaOptimization::UpdateContentData(
-    const VideoContentMetrics* content_metrics) {
-  CriticalSectionScoped lock(crit_sect_.get());
-  // Updating content metrics.
-  if (content_metrics == NULL) {
-    // Disable QM if metrics are NULL.
-    enable_qm_ = false;
-    qm_resolution_->Reset();
-  } else {
-    content_->UpdateContentData(content_metrics);
-  }
-}
-
 void MediaOptimization::UpdateIncomingFrameRate() {
   int64_t now = clock_->TimeInMilliseconds();
   if (incoming_frame_times_[0] == 0) {
@@ -441,36 +384,6 @@
   ProcessIncomingFrameRate(now);
 }
 
-int32_t MediaOptimization::SelectQuality(
-    VCMQMSettingsCallback* video_qmsettings_callback) {
-  // Reset quantities for QM select.
-  qm_resolution_->ResetQM();
-
-  // Update QM will long-term averaged content metrics.
-  qm_resolution_->UpdateContent(content_->LongTermAvgData());
-
-  // Select quality mode.
-  VCMResolutionScale* qm = NULL;
-  int32_t ret = qm_resolution_->SelectResolution(&qm);
-  if (ret < 0) {
-    return ret;
-  }
-
-  // Check for updates to spatial/temporal modes.
-  QMUpdate(qm, video_qmsettings_callback);
-
-  // Reset all the rate and related frame counters quantities.
-  qm_resolution_->ResetRates();
-
-  // Reset counters.
-  last_qm_update_time_ = clock_->TimeInMilliseconds();
-
-  // Reset content metrics.
-  content_->Reset();
-
-  return VCM_OK;
-}
-
 void MediaOptimization::PurgeOldFrameSamples(int64_t now_ms) {
   while (!encoded_frame_samples_.empty()) {
     if (now_ms - encoded_frame_samples_.front().time_complete_ms >
@@ -517,65 +430,6 @@
   }
 }
 
-bool MediaOptimization::QMUpdate(
-    VCMResolutionScale* qm,
-    VCMQMSettingsCallback* video_qmsettings_callback) {
-  // Check for no change.
-  if (!qm->change_resolution_spatial && !qm->change_resolution_temporal) {
-    return false;
-  }
-
-  // Check for change in frame rate.
-  if (qm->change_resolution_temporal) {
-    incoming_frame_rate_ = qm->frame_rate;
-    // Reset frame rate estimate.
-    memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
-  }
-
-  // Check for change in frame size.
-  if (qm->change_resolution_spatial) {
-    codec_width_ = qm->codec_width;
-    codec_height_ = qm->codec_height;
-  }
-
-  LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
-                  "to "
-               << qm->codec_width << "x" << qm->codec_height << "@"
-               << qm->frame_rate;
-
-  // Update VPM with new target frame rate and frame size.
-  // Note: use |qm->frame_rate| instead of |_incoming_frame_rate| for updating
-  // target frame rate in VPM frame dropper. The quantity |_incoming_frame_rate|
-  // will vary/fluctuate, and since we don't want to change the state of the
-  // VPM frame dropper, unless a temporal action was selected, we use the
-  // quantity |qm->frame_rate| for updating.
-  video_qmsettings_callback->SetVideoQMSettings(qm->frame_rate, codec_width_,
-                                                codec_height_);
-  content_->UpdateFrameRate(qm->frame_rate);
-  qm_resolution_->UpdateCodecParameters(qm->frame_rate, codec_width_,
-                                        codec_height_);
-  return true;
-}
-
-// Check timing constraints and look for significant change in:
-// (1) scene content,
-// (2) target bit rate.
-bool MediaOptimization::CheckStatusForQMchange() {
-  bool status = true;
-
-  // Check that we do not call QMSelect too often, and that we waited some time
-  // (to sample the metrics) from the event last_change_time
-  // last_change_time is the time where user changed the size/rate/frame rate
-  // (via SetEncodingData).
-  int64_t now = clock_->TimeInMilliseconds();
-  if ((now - last_qm_update_time_) < kQmMinIntervalMs ||
-      (now - last_change_time_) < kQmMinIntervalMs) {
-    status = false;
-  }
-
-  return status;
-}
-
 // Allowing VCM to keep track of incoming frame rate.
 void MediaOptimization::ProcessIncomingFrameRate(int64_t now) {
   int32_t num = 0;
diff --git a/webrtc/modules/video_coding/media_optimization.h b/webrtc/modules/video_coding/media_optimization.h
index 060cd89..081b2a9 100644
--- a/webrtc/modules/video_coding/media_optimization.h
+++ b/webrtc/modules/video_coding/media_optimization.h
@@ -17,7 +17,6 @@
 #include "webrtc/modules/include/module_common_types.h"
 #include "webrtc/modules/video_coding/include/video_coding.h"
 #include "webrtc/modules/video_coding/media_opt_util.h"
-#include "webrtc/modules/video_coding/qm_select.h"
 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
 namespace webrtc {
@@ -59,11 +58,9 @@
   uint32_t SetTargetRates(uint32_t target_bitrate,
                           uint8_t fraction_lost,
                           int64_t round_trip_time_ms,
-                          VCMProtectionCallback* protection_callback,
-                          VCMQMSettingsCallback* qmsettings_callback);
+                          VCMProtectionCallback* protection_callback);
 
   void SetProtectionMethod(VCMProtectionMethodEnum method);
-  void EnableQM(bool enable);
   void EnableFrameDropper(bool enable);
 
   // Lets the sender suspend video when the rate drops below
@@ -74,8 +71,6 @@
 
   bool DropFrame();
 
-  void UpdateContentData(const VideoContentMetrics* content_metrics);
-
   // Informs Media Optimization of encoded output.
   int32_t UpdateWithEncodedData(const EncodedImage& encoded_image);
 
@@ -98,19 +93,6 @@
   void UpdateSentBitrate(int64_t now_ms) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
   void UpdateSentFramerate() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
 
-  // Computes new Quality Mode.
-  int32_t SelectQuality(VCMQMSettingsCallback* qmsettings_callback)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
-
-  // Verifies if QM settings differ from default, i.e. if an update is required.
-  // Computes actual values, as will be sent to the encoder.
-  bool QMUpdate(VCMResolutionScale* qm,
-                VCMQMSettingsCallback* qmsettings_callback)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
-
-  // Checks if we should make a QM change. Return true if yes, false otherwise.
-  bool CheckStatusForQMchange() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
-
   void ProcessIncomingFrameRate(int64_t now)
       EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
 
@@ -152,16 +134,11 @@
   int video_target_bitrate_ GUARDED_BY(crit_sect_);
   float incoming_frame_rate_ GUARDED_BY(crit_sect_);
   int64_t incoming_frame_times_[kFrameCountHistorySize] GUARDED_BY(crit_sect_);
-  bool enable_qm_ GUARDED_BY(crit_sect_);
   std::list<EncodedFrameSample> encoded_frame_samples_ GUARDED_BY(crit_sect_);
   uint32_t avg_sent_bit_rate_bps_ GUARDED_BY(crit_sect_);
   uint32_t avg_sent_framerate_ GUARDED_BY(crit_sect_);
   uint32_t key_frame_cnt_ GUARDED_BY(crit_sect_);
   uint32_t delta_frame_cnt_ GUARDED_BY(crit_sect_);
-  std::unique_ptr<VCMContentMetricsProcessing> content_ GUARDED_BY(crit_sect_);
-  std::unique_ptr<VCMQmResolution> qm_resolution_ GUARDED_BY(crit_sect_);
-  int64_t last_qm_update_time_ GUARDED_BY(crit_sect_);
-  int64_t last_change_time_ GUARDED_BY(crit_sect_);  // Content/user triggered.
   int num_layers_ GUARDED_BY(crit_sect_);
   bool suspension_enabled_ GUARDED_BY(crit_sect_);
   bool video_suspended_ GUARDED_BY(crit_sect_);
diff --git a/webrtc/modules/video_coding/media_optimization_unittest.cc b/webrtc/modules/video_coding/media_optimization_unittest.cc
index 3f8ac5d..e6a1bcc 100644
--- a/webrtc/modules/video_coding/media_optimization_unittest.cc
+++ b/webrtc/modules/video_coding/media_optimization_unittest.cc
@@ -66,7 +66,7 @@
   media_opt_.SetTargetRates(target_bitrate_kbps * 1000,
                             0,    // Lossrate.
                             100,  // RTT in ms.
-                            nullptr, nullptr);
+                            nullptr);
   media_opt_.EnableFrameDropper(true);
   for (int time = 0; time < 2000; time += frame_time_ms_) {
     ASSERT_NO_FATAL_FAILURE(AddFrameAndAdvanceTime(target_bitrate_kbps, false));
@@ -76,7 +76,7 @@
   media_opt_.SetTargetRates(kThresholdBps - 1000,
                             0,    // Lossrate.
                             100,  // RTT in ms.
-                            nullptr, nullptr);
+                            nullptr);
   // Expect the muter to engage immediately and stay muted.
   // Test during 2 seconds.
   for (int time = 0; time < 2000; time += frame_time_ms_) {
@@ -89,7 +89,7 @@
   media_opt_.SetTargetRates(kThresholdBps + 1000,
                             0,    // Lossrate.
                             100,  // RTT in ms.
-                            nullptr, nullptr);
+                            nullptr);
   // Expect the muter to stay muted.
   // Test during 2 seconds.
   for (int time = 0; time < 2000; time += frame_time_ms_) {
@@ -101,7 +101,7 @@
   media_opt_.SetTargetRates(kThresholdBps + kWindowBps + 1000,
                             0,    // Lossrate.
                             100,  // RTT in ms.
-                            nullptr, nullptr);
+                            nullptr);
   // Expect the muter to disengage immediately.
   // Test during 2 seconds.
   for (int time = 0; time < 2000; time += frame_time_ms_) {
@@ -138,7 +138,7 @@
   // Using 10% of codec bitrate for FEC, should still be able to use all of it.
   protection_callback.fec_rate_bps_ = kCodecBitrateBps / 10;
   uint32_t target_bitrate = media_opt_.SetTargetRates(
-      kMaxBitrateBps, 0, 0, &protection_callback, nullptr);
+      kMaxBitrateBps, 0, 0, &protection_callback);
 
   EXPECT_EQ(kCodecBitrateBps, static_cast<int>(target_bitrate));
 
@@ -146,7 +146,7 @@
   // both equally, but only be half of max (since that ceiling should be hit).
   protection_callback.fec_rate_bps_ = kCodecBitrateBps;
   target_bitrate = media_opt_.SetTargetRates(kMaxBitrateBps, 128, 100,
-                                             &protection_callback, nullptr);
+                                             &protection_callback);
   EXPECT_EQ(kMaxBitrateBps / 2, static_cast<int>(target_bitrate));
 }
 
diff --git a/webrtc/modules/video_coding/qm_select.cc b/webrtc/modules/video_coding/qm_select.cc
deleted file mode 100644
index a090ba1..0000000
--- a/webrtc/modules/video_coding/qm_select.cc
+++ /dev/null
@@ -1,901 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/qm_select.h"
-
-#include <math.h>
-
-#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/video_coding/include/video_coding_defines.h"
-#include "webrtc/modules/video_coding/internal_defines.h"
-#include "webrtc/modules/video_coding/qm_select_data.h"
-#include "webrtc/system_wrappers/include/trace.h"
-
-namespace webrtc {
-
-// QM-METHOD class
-
-VCMQmMethod::VCMQmMethod()
-    : content_metrics_(NULL),
-      width_(0),
-      height_(0),
-      user_frame_rate_(0.0f),
-      native_width_(0),
-      native_height_(0),
-      native_frame_rate_(0.0f),
-      image_type_(kVGA),
-      framerate_level_(kFrameRateHigh),
-      init_(false) {
-  ResetQM();
-}
-
-VCMQmMethod::~VCMQmMethod() {}
-
-void VCMQmMethod::ResetQM() {
-  aspect_ratio_ = 1.0f;
-  motion_.Reset();
-  spatial_.Reset();
-  content_class_ = 0;
-}
-
-uint8_t VCMQmMethod::ComputeContentClass() {
-  ComputeMotionNFD();
-  ComputeSpatial();
-  return content_class_ = 3 * motion_.level + spatial_.level;
-}
-
-void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
-  content_metrics_ = contentMetrics;
-}
-
-void VCMQmMethod::ComputeMotionNFD() {
-  if (content_metrics_) {
-    motion_.value = content_metrics_->motion_magnitude;
-  }
-  // Determine motion level.
-  if (motion_.value < kLowMotionNfd) {
-    motion_.level = kLow;
-  } else if (motion_.value > kHighMotionNfd) {
-    motion_.level = kHigh;
-  } else {
-    motion_.level = kDefault;
-  }
-}
-
-void VCMQmMethod::ComputeSpatial() {
-  float spatial_err = 0.0;
-  float spatial_err_h = 0.0;
-  float spatial_err_v = 0.0;
-  if (content_metrics_) {
-    spatial_err = content_metrics_->spatial_pred_err;
-    spatial_err_h = content_metrics_->spatial_pred_err_h;
-    spatial_err_v = content_metrics_->spatial_pred_err_v;
-  }
-  // Spatial measure: take average of 3 prediction errors.
-  spatial_.value = (spatial_err + spatial_err_h + spatial_err_v) / 3.0f;
-
-  // Reduce thresholds for large scenes/higher pixel correlation.
-  float scale2 = image_type_ > kVGA ? kScaleTexture : 1.0;
-
-  if (spatial_.value > scale2 * kHighTexture) {
-    spatial_.level = kHigh;
-  } else if (spatial_.value < scale2 * kLowTexture) {
-    spatial_.level = kLow;
-  } else {
-    spatial_.level = kDefault;
-  }
-}
-
-ImageType VCMQmMethod::GetImageType(uint16_t width, uint16_t height) {
-  // Get the image type for the encoder frame size.
-  uint32_t image_size = width * height;
-  if (image_size == kSizeOfImageType[kQCIF]) {
-    return kQCIF;
-  } else if (image_size == kSizeOfImageType[kHCIF]) {
-    return kHCIF;
-  } else if (image_size == kSizeOfImageType[kQVGA]) {
-    return kQVGA;
-  } else if (image_size == kSizeOfImageType[kCIF]) {
-    return kCIF;
-  } else if (image_size == kSizeOfImageType[kHVGA]) {
-    return kHVGA;
-  } else if (image_size == kSizeOfImageType[kVGA]) {
-    return kVGA;
-  } else if (image_size == kSizeOfImageType[kQFULLHD]) {
-    return kQFULLHD;
-  } else if (image_size == kSizeOfImageType[kWHD]) {
-    return kWHD;
-  } else if (image_size == kSizeOfImageType[kFULLHD]) {
-    return kFULLHD;
-  } else {
-    // No exact match, find closet one.
-    return FindClosestImageType(width, height);
-  }
-}
-
-ImageType VCMQmMethod::FindClosestImageType(uint16_t width, uint16_t height) {
-  float size = static_cast<float>(width * height);
-  float min = size;
-  int isel = 0;
-  for (int i = 0; i < kNumImageTypes; ++i) {
-    float dist = fabs(size - kSizeOfImageType[i]);
-    if (dist < min) {
-      min = dist;
-      isel = i;
-    }
-  }
-  return static_cast<ImageType>(isel);
-}
-
-FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
-  if (avg_framerate <= kLowFrameRate) {
-    return kFrameRateLow;
-  } else if (avg_framerate <= kMiddleFrameRate) {
-    return kFrameRateMiddle1;
-  } else if (avg_framerate <= kHighFrameRate) {
-    return kFrameRateMiddle2;
-  } else {
-    return kFrameRateHigh;
-  }
-}
-
-// RESOLUTION CLASS
-
-VCMQmResolution::VCMQmResolution() : qm_(new VCMResolutionScale()) {
-  Reset();
-}
-
-VCMQmResolution::~VCMQmResolution() {
-  delete qm_;
-}
-
-void VCMQmResolution::ResetRates() {
-  sum_target_rate_ = 0.0f;
-  sum_incoming_framerate_ = 0.0f;
-  sum_rate_MM_ = 0.0f;
-  sum_rate_MM_sgn_ = 0.0f;
-  sum_packet_loss_ = 0.0f;
-  buffer_level_ = kInitBufferLevel * target_bitrate_;
-  frame_cnt_ = 0;
-  frame_cnt_delta_ = 0;
-  low_buffer_cnt_ = 0;
-  update_rate_cnt_ = 0;
-}
-
-void VCMQmResolution::ResetDownSamplingState() {
-  state_dec_factor_spatial_ = 1.0;
-  state_dec_factor_temporal_ = 1.0;
-  for (int i = 0; i < kDownActionHistorySize; i++) {
-    down_action_history_[i].spatial = kNoChangeSpatial;
-    down_action_history_[i].temporal = kNoChangeTemporal;
-  }
-}
-
-void VCMQmResolution::Reset() {
-  target_bitrate_ = 0.0f;
-  incoming_framerate_ = 0.0f;
-  buffer_level_ = 0.0f;
-  per_frame_bandwidth_ = 0.0f;
-  avg_target_rate_ = 0.0f;
-  avg_incoming_framerate_ = 0.0f;
-  avg_ratio_buffer_low_ = 0.0f;
-  avg_rate_mismatch_ = 0.0f;
-  avg_rate_mismatch_sgn_ = 0.0f;
-  avg_packet_loss_ = 0.0f;
-  encoder_state_ = kStableEncoding;
-  num_layers_ = 1;
-  ResetRates();
-  ResetDownSamplingState();
-  ResetQM();
-}
-
-EncoderState VCMQmResolution::GetEncoderState() {
-  return encoder_state_;
-}
-
-// Initialize state after re-initializing the encoder,
-// i.e., after SetEncodingData() in mediaOpt.
-int VCMQmResolution::Initialize(float bitrate,
-                                float user_framerate,
-                                uint16_t width,
-                                uint16_t height,
-                                int num_layers) {
-  if (user_framerate == 0.0f || width == 0 || height == 0) {
-    return VCM_PARAMETER_ERROR;
-  }
-  Reset();
-  target_bitrate_ = bitrate;
-  incoming_framerate_ = user_framerate;
-  UpdateCodecParameters(user_framerate, width, height);
-  native_width_ = width;
-  native_height_ = height;
-  native_frame_rate_ = user_framerate;
-  num_layers_ = num_layers;
-  // Initial buffer level.
-  buffer_level_ = kInitBufferLevel * target_bitrate_;
-  // Per-frame bandwidth.
-  per_frame_bandwidth_ = target_bitrate_ / user_framerate;
-  init_ = true;
-  return VCM_OK;
-}
-
-void VCMQmResolution::UpdateCodecParameters(float frame_rate,
-                                            uint16_t width,
-                                            uint16_t height) {
-  width_ = width;
-  height_ = height;
-  // |user_frame_rate| is the target frame rate for VPM frame dropper.
-  user_frame_rate_ = frame_rate;
-  image_type_ = GetImageType(width, height);
-}
-
-// Update rate data after every encoded frame.
-void VCMQmResolution::UpdateEncodedSize(size_t encoded_size) {
-  frame_cnt_++;
-  // Convert to Kbps.
-  float encoded_size_kbits = 8.0f * static_cast<float>(encoded_size) / 1000.0f;
-
-  // Update the buffer level:
-  // Note this is not the actual encoder buffer level.
-  // |buffer_level_| is reset to an initial value after SelectResolution is
-  // called, and does not account for frame dropping by encoder or VCM.
-  buffer_level_ += per_frame_bandwidth_ - encoded_size_kbits;
-
-  // Counter for occurrences of low buffer level:
-  // low/negative values means encoder is likely dropping frames.
-  if (buffer_level_ <= kPercBufferThr * kInitBufferLevel * target_bitrate_) {
-    low_buffer_cnt_++;
-  }
-}
-
-// Update various quantities after SetTargetRates in MediaOpt.
-void VCMQmResolution::UpdateRates(float target_bitrate,
-                                  float encoder_sent_rate,
-                                  float incoming_framerate,
-                                  uint8_t packet_loss) {
-  // Sum the target bitrate: this is the encoder rate from previous update
-  // (~1sec), i.e, before the update for next ~1sec.
-  sum_target_rate_ += target_bitrate_;
-  update_rate_cnt_++;
-
-  // Sum the received (from RTCP reports) packet loss rates.
-  sum_packet_loss_ += static_cast<float>(packet_loss / 255.0);
-
-  // Sum the sequence rate mismatch:
-  // Mismatch here is based on the difference between the target rate
-  // used (in previous ~1sec) and the average actual encoding rate measured
-  // at previous ~1sec.
-  float diff = target_bitrate_ - encoder_sent_rate;
-  if (target_bitrate_ > 0.0)
-    sum_rate_MM_ += fabs(diff) / target_bitrate_;
-  int sgnDiff = diff > 0 ? 1 : (diff < 0 ? -1 : 0);
-  // To check for consistent under(+)/over_shooting(-) of target rate.
-  sum_rate_MM_sgn_ += sgnDiff;
-
-  // Update with the current new target and frame rate:
-  // these values are ones the encoder will use for the current/next ~1sec.
-  target_bitrate_ = target_bitrate;
-  incoming_framerate_ = incoming_framerate;
-  sum_incoming_framerate_ += incoming_framerate_;
-  // Update the per_frame_bandwidth:
-  // this is the per_frame_bw for the current/next ~1sec.
-  per_frame_bandwidth_ = 0.0f;
-  if (incoming_framerate_ > 0.0f) {
-    per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
-  }
-}
-
-// Select the resolution factors: frame size and frame rate change (qm scales).
-// Selection is for going down in resolution, or for going back up
-// (if a previous down-sampling action was taken).
-
-// In the current version the following constraints are imposed:
-// 1) We only allow for one action, either down or up, at a given time.
-// 2) The possible down-sampling actions are: spatial by 1/2x1/2, 3/4x3/4;
-//    temporal/frame rate reduction by 1/2 and 2/3.
-// 3) The action for going back up is the reverse of last (spatial or temporal)
-//    down-sampling action. The list of down-sampling actions from the
-//    Initialize() state are kept in |down_action_history_|.
-// 4) The total amount of down-sampling (spatial and/or temporal) from the
-//    Initialize() state (native resolution) is limited by various factors.
-int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
-  if (!init_) {
-    return VCM_UNINITIALIZED;
-  }
-  if (content_metrics_ == NULL) {
-    Reset();
-    *qm = qm_;
-    return VCM_OK;
-  }
-
-  // Check conditions on down-sampling state.
-  assert(state_dec_factor_spatial_ >= 1.0f);
-  assert(state_dec_factor_temporal_ >= 1.0f);
-  assert(state_dec_factor_spatial_ <= kMaxSpatialDown);
-  assert(state_dec_factor_temporal_ <= kMaxTempDown);
-  assert(state_dec_factor_temporal_ * state_dec_factor_spatial_ <=
-         kMaxTotalDown);
-
-  // Compute content class for selection.
-  content_class_ = ComputeContentClass();
-  // Compute various rate quantities for selection.
-  ComputeRatesForSelection();
-
-  // Get the encoder state.
-  ComputeEncoderState();
-
-  // Default settings: no action.
-  SetDefaultAction();
-  *qm = qm_;
-
-  // Check for going back up in resolution, if we have had some down-sampling
-  // relative to native state in Initialize().
-  if (down_action_history_[0].spatial != kNoChangeSpatial ||
-      down_action_history_[0].temporal != kNoChangeTemporal) {
-    if (GoingUpResolution()) {
-      *qm = qm_;
-      return VCM_OK;
-    }
-  }
-
-  // Check for going down in resolution.
-  if (GoingDownResolution()) {
-    *qm = qm_;
-    return VCM_OK;
-  }
-  return VCM_OK;
-}
-
-void VCMQmResolution::SetDefaultAction() {
-  qm_->codec_width = width_;
-  qm_->codec_height = height_;
-  qm_->frame_rate = user_frame_rate_;
-  qm_->change_resolution_spatial = false;
-  qm_->change_resolution_temporal = false;
-  qm_->spatial_width_fact = 1.0f;
-  qm_->spatial_height_fact = 1.0f;
-  qm_->temporal_fact = 1.0f;
-  action_.spatial = kNoChangeSpatial;
-  action_.temporal = kNoChangeTemporal;
-}
-
-void VCMQmResolution::ComputeRatesForSelection() {
-  avg_target_rate_ = 0.0f;
-  avg_incoming_framerate_ = 0.0f;
-  avg_ratio_buffer_low_ = 0.0f;
-  avg_rate_mismatch_ = 0.0f;
-  avg_rate_mismatch_sgn_ = 0.0f;
-  avg_packet_loss_ = 0.0f;
-  if (frame_cnt_ > 0) {
-    avg_ratio_buffer_low_ =
-        static_cast<float>(low_buffer_cnt_) / static_cast<float>(frame_cnt_);
-  }
-  if (update_rate_cnt_ > 0) {
-    avg_rate_mismatch_ =
-        static_cast<float>(sum_rate_MM_) / static_cast<float>(update_rate_cnt_);
-    avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
-                             static_cast<float>(update_rate_cnt_);
-    avg_target_rate_ = static_cast<float>(sum_target_rate_) /
-                       static_cast<float>(update_rate_cnt_);
-    avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
-                              static_cast<float>(update_rate_cnt_);
-    avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
-                       static_cast<float>(update_rate_cnt_);
-  }
-  // For selection we may want to weight some quantities more heavily
-  // with the current (i.e., next ~1sec) rate values.
-  avg_target_rate_ =
-      kWeightRate * avg_target_rate_ + (1.0 - kWeightRate) * target_bitrate_;
-  avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
-                            (1.0 - kWeightRate) * incoming_framerate_;
-  // Use base layer frame rate for temporal layers: this will favor spatial.
-  assert(num_layers_ > 0);
-  framerate_level_ = FrameRateLevel(avg_incoming_framerate_ /
-                                    static_cast<float>(1 << (num_layers_ - 1)));
-}
-
-void VCMQmResolution::ComputeEncoderState() {
-  // Default.
-  encoder_state_ = kStableEncoding;
-
-  // Assign stressed state if:
-  // 1) occurrences of low buffer levels is high, or
-  // 2) rate mis-match is high, and consistent over-shooting by encoder.
-  if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
-      ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
-       (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
-    encoder_state_ = kStressedEncoding;
-  }
-  // Assign easy state if:
-  // 1) rate mis-match is high, and
-  // 2) consistent under-shooting by encoder.
-  if ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
-      (avg_rate_mismatch_sgn_ > kRateUnderShoot)) {
-    encoder_state_ = kEasyEncoding;
-  }
-}
-
-bool VCMQmResolution::GoingUpResolution() {
-  // For going up, we check for undoing the previous down-sampling action.
-
-  float fac_width = kFactorWidthSpatial[down_action_history_[0].spatial];
-  float fac_height = kFactorHeightSpatial[down_action_history_[0].spatial];
-  float fac_temp = kFactorTemporal[down_action_history_[0].temporal];
-  // For going up spatially, we allow for going up by 3/4x3/4 at each stage.
-  // So if the last spatial action was 1/2x1/2 it would be undone in 2 stages.
-  // Modify the fac_width/height for this case.
-  if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
-    fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
-                kFactorWidthSpatial[kOneHalfSpatialUniform];
-    fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
-                 kFactorHeightSpatial[kOneHalfSpatialUniform];
-  }
-
-  // Check if we should go up both spatially and temporally.
-  if (down_action_history_[0].spatial != kNoChangeSpatial &&
-      down_action_history_[0].temporal != kNoChangeTemporal) {
-    if (ConditionForGoingUp(fac_width, fac_height, fac_temp,
-                            kTransRateScaleUpSpatialTemp)) {
-      action_.spatial = down_action_history_[0].spatial;
-      action_.temporal = down_action_history_[0].temporal;
-      UpdateDownsamplingState(kUpResolution);
-      return true;
-    }
-  }
-  // Check if we should go up either spatially or temporally.
-  bool selected_up_spatial = false;
-  bool selected_up_temporal = false;
-  if (down_action_history_[0].spatial != kNoChangeSpatial) {
-    selected_up_spatial = ConditionForGoingUp(fac_width, fac_height, 1.0f,
-                                              kTransRateScaleUpSpatial);
-  }
-  if (down_action_history_[0].temporal != kNoChangeTemporal) {
-    selected_up_temporal =
-        ConditionForGoingUp(1.0f, 1.0f, fac_temp, kTransRateScaleUpTemp);
-  }
-  if (selected_up_spatial && !selected_up_temporal) {
-    action_.spatial = down_action_history_[0].spatial;
-    action_.temporal = kNoChangeTemporal;
-    UpdateDownsamplingState(kUpResolution);
-    return true;
-  } else if (!selected_up_spatial && selected_up_temporal) {
-    action_.spatial = kNoChangeSpatial;
-    action_.temporal = down_action_history_[0].temporal;
-    UpdateDownsamplingState(kUpResolution);
-    return true;
-  } else if (selected_up_spatial && selected_up_temporal) {
-    PickSpatialOrTemporal();
-    UpdateDownsamplingState(kUpResolution);
-    return true;
-  }
-  return false;
-}
-
-bool VCMQmResolution::ConditionForGoingUp(float fac_width,
-                                          float fac_height,
-                                          float fac_temp,
-                                          float scale_fac) {
-  float estimated_transition_rate_up =
-      GetTransitionRate(fac_width, fac_height, fac_temp, scale_fac);
-  // Go back up if:
-  // 1) target rate is above threshold and current encoder state is stable, or
-  // 2) encoder state is easy (encoder is significantly under-shooting target).
-  if (((avg_target_rate_ > estimated_transition_rate_up) &&
-       (encoder_state_ == kStableEncoding)) ||
-      (encoder_state_ == kEasyEncoding)) {
-    return true;
-  } else {
-    return false;
-  }
-}
-
-bool VCMQmResolution::GoingDownResolution() {
-  float estimated_transition_rate_down =
-      GetTransitionRate(1.0f, 1.0f, 1.0f, 1.0f);
-  float max_rate = kFrameRateFac[framerate_level_] * kMaxRateQm[image_type_];
-  // Resolution reduction if:
-  // (1) target rate is below transition rate, or
-  // (2) encoder is in stressed state and target rate below a max threshold.
-  if ((avg_target_rate_ < estimated_transition_rate_down) ||
-      (encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
-    // Get the down-sampling action: based on content class, and how low
-    // average target rate is relative to transition rate.
-    uint8_t spatial_fact =
-        kSpatialAction[content_class_ +
-                       9 * RateClass(estimated_transition_rate_down)];
-    uint8_t temp_fact =
-        kTemporalAction[content_class_ +
-                        9 * RateClass(estimated_transition_rate_down)];
-
-    switch (spatial_fact) {
-      case 4: {
-        action_.spatial = kOneQuarterSpatialUniform;
-        break;
-      }
-      case 2: {
-        action_.spatial = kOneHalfSpatialUniform;
-        break;
-      }
-      case 1: {
-        action_.spatial = kNoChangeSpatial;
-        break;
-      }
-      default: { assert(false); }
-    }
-    switch (temp_fact) {
-      case 3: {
-        action_.temporal = kTwoThirdsTemporal;
-        break;
-      }
-      case 2: {
-        action_.temporal = kOneHalfTemporal;
-        break;
-      }
-      case 1: {
-        action_.temporal = kNoChangeTemporal;
-        break;
-      }
-      default: { assert(false); }
-    }
-    // Only allow for one action (spatial or temporal) at a given time.
-    assert(action_.temporal == kNoChangeTemporal ||
-           action_.spatial == kNoChangeSpatial);
-
-    // Adjust cases not captured in tables, mainly based on frame rate, and
-    // also check for odd frame sizes.
-    AdjustAction();
-
-    // Update down-sampling state.
-    if (action_.spatial != kNoChangeSpatial ||
-        action_.temporal != kNoChangeTemporal) {
-      UpdateDownsamplingState(kDownResolution);
-      return true;
-    }
-  }
-  return false;
-}
-
-float VCMQmResolution::GetTransitionRate(float fac_width,
-                                         float fac_height,
-                                         float fac_temp,
-                                         float scale_fac) {
-  ImageType image_type =
-      GetImageType(static_cast<uint16_t>(fac_width * width_),
-                   static_cast<uint16_t>(fac_height * height_));
-
-  FrameRateLevelClass framerate_level =
-      FrameRateLevel(fac_temp * avg_incoming_framerate_);
-  // If we are checking for going up temporally, and this is the last
-  // temporal action, then use native frame rate.
-  if (down_action_history_[1].temporal == kNoChangeTemporal &&
-      fac_temp > 1.0f) {
-    framerate_level = FrameRateLevel(native_frame_rate_);
-  }
-
-  // The maximum allowed rate below which down-sampling is allowed:
-  // Nominal values based on image format (frame size and frame rate).
-  float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
-
-  uint8_t image_class = image_type > kVGA ? 1 : 0;
-  uint8_t table_index = image_class * 9 + content_class_;
-  // Scale factor for down-sampling transition threshold:
-  // factor based on the content class and the image size.
-  float scaleTransRate = kScaleTransRateQm[table_index];
-  // Threshold bitrate for resolution action.
-  return static_cast<float>(scale_fac * scaleTransRate * max_rate);
-}
-
-void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
-  if (up_down == kUpResolution) {
-    qm_->spatial_width_fact = 1.0f / kFactorWidthSpatial[action_.spatial];
-    qm_->spatial_height_fact = 1.0f / kFactorHeightSpatial[action_.spatial];
-    // If last spatial action was 1/2x1/2, we undo it in two steps, so the
-    // spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
-    if (action_.spatial == kOneQuarterSpatialUniform) {
-      qm_->spatial_width_fact = 1.0f *
-                                kFactorWidthSpatial[kOneHalfSpatialUniform] /
-                                kFactorWidthSpatial[kOneQuarterSpatialUniform];
-      qm_->spatial_height_fact =
-          1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
-          kFactorHeightSpatial[kOneQuarterSpatialUniform];
-    }
-    qm_->temporal_fact = 1.0f / kFactorTemporal[action_.temporal];
-    RemoveLastDownAction();
-  } else if (up_down == kDownResolution) {
-    ConstrainAmountOfDownSampling();
-    ConvertSpatialFractionalToWhole();
-    qm_->spatial_width_fact = kFactorWidthSpatial[action_.spatial];
-    qm_->spatial_height_fact = kFactorHeightSpatial[action_.spatial];
-    qm_->temporal_fact = kFactorTemporal[action_.temporal];
-    InsertLatestDownAction();
-  } else {
-    // This function should only be called if either the Up or Down action
-    // has been selected.
-    assert(false);
-  }
-  UpdateCodecResolution();
-  state_dec_factor_spatial_ = state_dec_factor_spatial_ *
-                              qm_->spatial_width_fact *
-                              qm_->spatial_height_fact;
-  state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
-}
-
-void VCMQmResolution::UpdateCodecResolution() {
-  if (action_.spatial != kNoChangeSpatial) {
-    qm_->change_resolution_spatial = true;
-    qm_->codec_width =
-        static_cast<uint16_t>(width_ / qm_->spatial_width_fact + 0.5f);
-    qm_->codec_height =
-        static_cast<uint16_t>(height_ / qm_->spatial_height_fact + 0.5f);
-    // Size should not exceed native sizes.
-    assert(qm_->codec_width <= native_width_);
-    assert(qm_->codec_height <= native_height_);
-    // New sizes should be multiple of 2, otherwise spatial should not have
-    // been selected.
-    assert(qm_->codec_width % 2 == 0);
-    assert(qm_->codec_height % 2 == 0);
-  }
-  if (action_.temporal != kNoChangeTemporal) {
-    qm_->change_resolution_temporal = true;
-    // Update the frame rate based on the average incoming frame rate.
-    qm_->frame_rate = avg_incoming_framerate_ / qm_->temporal_fact + 0.5f;
-    if (down_action_history_[0].temporal == 0) {
-      // When we undo the last temporal-down action, make sure we go back up
-      // to the native frame rate. Since the incoming frame rate may
-      // fluctuate over time, |avg_incoming_framerate_| scaled back up may
-      // be smaller than |native_frame rate_|.
-      qm_->frame_rate = native_frame_rate_;
-    }
-  }
-}
-
-uint8_t VCMQmResolution::RateClass(float transition_rate) {
-  return avg_target_rate_ < (kFacLowRate * transition_rate)
-             ? 0
-             : (avg_target_rate_ >= transition_rate ? 2 : 1);
-}
-
-// TODO(marpan): Would be better to capture these frame rate adjustments by
-// extending the table data (qm_select_data.h).
-void VCMQmResolution::AdjustAction() {
-  // If the spatial level is default state (neither low or high), motion level
-  // is not high, and spatial action was selected, switch to 2/3 frame rate
-  // reduction if the average incoming frame rate is high.
-  if (spatial_.level == kDefault && motion_.level != kHigh &&
-      action_.spatial != kNoChangeSpatial &&
-      framerate_level_ == kFrameRateHigh) {
-    action_.spatial = kNoChangeSpatial;
-    action_.temporal = kTwoThirdsTemporal;
-  }
-  // If both motion and spatial level are low, and temporal down action was
-  // selected, switch to spatial 3/4x3/4 if the frame rate is not above the
-  // lower middle level (|kFrameRateMiddle1|).
-  if (motion_.level == kLow && spatial_.level == kLow &&
-      framerate_level_ <= kFrameRateMiddle1 &&
-      action_.temporal != kNoChangeTemporal) {
-    action_.spatial = kOneHalfSpatialUniform;
-    action_.temporal = kNoChangeTemporal;
-  }
-  // If spatial action is selected, and there has been too much spatial
-  // reduction already (i.e., 1/4), then switch to temporal action if the
-  // average frame rate is not low.
-  if (action_.spatial != kNoChangeSpatial &&
-      down_action_history_[0].spatial == kOneQuarterSpatialUniform &&
-      framerate_level_ != kFrameRateLow) {
-    action_.spatial = kNoChangeSpatial;
-    action_.temporal = kTwoThirdsTemporal;
-  }
-  // Never use temporal action if number of temporal layers is above 2.
-  if (num_layers_ > 2) {
-    if (action_.temporal != kNoChangeTemporal) {
-      action_.spatial = kOneHalfSpatialUniform;
-    }
-    action_.temporal = kNoChangeTemporal;
-  }
-  // If spatial action was selected, we need to make sure the frame sizes
-  // are multiples of two. Otherwise switch to 2/3 temporal.
-  if (action_.spatial != kNoChangeSpatial && !EvenFrameSize()) {
-    action_.spatial = kNoChangeSpatial;
-    // Only one action (spatial or temporal) is allowed at a given time, so need
-    // to check whether temporal action is currently selected.
-    action_.temporal = kTwoThirdsTemporal;
-  }
-}
-
-void VCMQmResolution::ConvertSpatialFractionalToWhole() {
-  // If 3/4 spatial is selected, check if there has been another 3/4,
-  // and if so, combine them into 1/2. 1/2 scaling is more efficient than 9/16.
-  // Note we define 3/4x3/4 spatial as kOneHalfSpatialUniform.
-  if (action_.spatial == kOneHalfSpatialUniform) {
-    bool found = false;
-    int isel = kDownActionHistorySize;
-    for (int i = 0; i < kDownActionHistorySize; ++i) {
-      if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
-        isel = i;
-        found = true;
-        break;
-      }
-    }
-    if (found) {
-      action_.spatial = kOneQuarterSpatialUniform;
-      state_dec_factor_spatial_ =
-          state_dec_factor_spatial_ /
-          (kFactorWidthSpatial[kOneHalfSpatialUniform] *
-           kFactorHeightSpatial[kOneHalfSpatialUniform]);
-      // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
-      ConstrainAmountOfDownSampling();
-      if (action_.spatial == kNoChangeSpatial) {
-        // Not allowed. Go back to 3/4x3/4 spatial.
-        action_.spatial = kOneHalfSpatialUniform;
-        state_dec_factor_spatial_ =
-            state_dec_factor_spatial_ *
-            kFactorWidthSpatial[kOneHalfSpatialUniform] *
-            kFactorHeightSpatial[kOneHalfSpatialUniform];
-      } else {
-        // Switching is allowed. Remove 3/4x3/4 from the history, and update
-        // the frame size.
-        for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
-          down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
-        }
-        width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
-        height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
-      }
-    }
-  }
-}
-
-// Returns false if the new frame sizes, under the current spatial action,
-// are not multiples of two.
-bool VCMQmResolution::EvenFrameSize() {
-  if (action_.spatial == kOneHalfSpatialUniform) {
-    if ((width_ * 3 / 4) % 2 != 0 || (height_ * 3 / 4) % 2 != 0) {
-      return false;
-    }
-  } else if (action_.spatial == kOneQuarterSpatialUniform) {
-    if ((width_ * 1 / 2) % 2 != 0 || (height_ * 1 / 2) % 2 != 0) {
-      return false;
-    }
-  }
-  return true;
-}
-
-void VCMQmResolution::InsertLatestDownAction() {
-  if (action_.spatial != kNoChangeSpatial) {
-    for (int i = kDownActionHistorySize - 1; i > 0; --i) {
-      down_action_history_[i].spatial = down_action_history_[i - 1].spatial;
-    }
-    down_action_history_[0].spatial = action_.spatial;
-  }
-  if (action_.temporal != kNoChangeTemporal) {
-    for (int i = kDownActionHistorySize - 1; i > 0; --i) {
-      down_action_history_[i].temporal = down_action_history_[i - 1].temporal;
-    }
-    down_action_history_[0].temporal = action_.temporal;
-  }
-}
-
-void VCMQmResolution::RemoveLastDownAction() {
-  if (action_.spatial != kNoChangeSpatial) {
-    // If the last spatial action was 1/2x1/2 we replace it with 3/4x3/4.
-    if (action_.spatial == kOneQuarterSpatialUniform) {
-      down_action_history_[0].spatial = kOneHalfSpatialUniform;
-    } else {
-      for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
-        down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
-      }
-      down_action_history_[kDownActionHistorySize - 1].spatial =
-          kNoChangeSpatial;
-    }
-  }
-  if (action_.temporal != kNoChangeTemporal) {
-    for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
-      down_action_history_[i].temporal = down_action_history_[i + 1].temporal;
-    }
-    down_action_history_[kDownActionHistorySize - 1].temporal =
-        kNoChangeTemporal;
-  }
-}
-
-void VCMQmResolution::ConstrainAmountOfDownSampling() {
-  // Sanity checks on down-sampling selection:
-  // override the settings for too small image size and/or frame rate.
-  // Also check the limit on current down-sampling states.
-
-  float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
-  float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
-  float temporal_fact = kFactorTemporal[action_.temporal];
-  float new_dec_factor_spatial =
-      state_dec_factor_spatial_ * spatial_width_fact * spatial_height_fact;
-  float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
-
-  // No spatial sampling if current frame size is too small, or if the
-  // amount of spatial down-sampling is above maximum spatial down-action.
-  if ((width_ * height_) <= kMinImageSize ||
-      new_dec_factor_spatial > kMaxSpatialDown) {
-    action_.spatial = kNoChangeSpatial;
-    new_dec_factor_spatial = state_dec_factor_spatial_;
-  }
-  // No frame rate reduction if average frame rate is below some point, or if
-  // the amount of temporal down-sampling is above maximum temporal down-action.
-  if (avg_incoming_framerate_ <= kMinFrameRate ||
-      new_dec_factor_temp > kMaxTempDown) {
-    action_.temporal = kNoChangeTemporal;
-    new_dec_factor_temp = state_dec_factor_temporal_;
-  }
-  // Check if the total (spatial-temporal) down-action is above maximum allowed,
-  // if so, disallow the current selected down-action.
-  if (new_dec_factor_spatial * new_dec_factor_temp > kMaxTotalDown) {
-    if (action_.spatial != kNoChangeSpatial) {
-      action_.spatial = kNoChangeSpatial;
-    } else if (action_.temporal != kNoChangeTemporal) {
-      action_.temporal = kNoChangeTemporal;
-    } else {
-      // We only allow for one action (spatial or temporal) at a given time, so
-      // either spatial or temporal action is selected when this function is
-      // called. If the selected action is disallowed from one of the above
-      // 2 prior conditions (on spatial & temporal max down-action), then this
-      // condition "total down-action > |kMaxTotalDown|" would not be entered.
-      assert(false);
-    }
-  }
-}
-
-void VCMQmResolution::PickSpatialOrTemporal() {
-  // Pick the one that has had the most down-sampling thus far.
-  if (state_dec_factor_spatial_ > state_dec_factor_temporal_) {
-    action_.spatial = down_action_history_[0].spatial;
-    action_.temporal = kNoChangeTemporal;
-  } else {
-    action_.spatial = kNoChangeSpatial;
-    action_.temporal = down_action_history_[0].temporal;
-  }
-}
-
-// TODO(marpan): Update when we allow for directional spatial down-sampling.
-void VCMQmResolution::SelectSpatialDirectionMode(float transition_rate) {
-  // Default is 4/3x4/3
-  // For bit rates well below transitional rate, we select 2x2.
-  if (avg_target_rate_ < transition_rate * kRateRedSpatial2X2) {
-    qm_->spatial_width_fact = 2.0f;
-    qm_->spatial_height_fact = 2.0f;
-  }
-  // Otherwise check prediction errors and aspect ratio.
-  float spatial_err = 0.0f;
-  float spatial_err_h = 0.0f;
-  float spatial_err_v = 0.0f;
-  if (content_metrics_) {
-    spatial_err = content_metrics_->spatial_pred_err;
-    spatial_err_h = content_metrics_->spatial_pred_err_h;
-    spatial_err_v = content_metrics_->spatial_pred_err_v;
-  }
-
-  // Favor 1x2 if aspect_ratio is 16:9.
-  if (aspect_ratio_ >= 16.0f / 9.0f) {
-    // Check if 1x2 has lowest prediction error.
-    if (spatial_err_h < spatial_err && spatial_err_h < spatial_err_v) {
-      qm_->spatial_width_fact = 2.0f;
-      qm_->spatial_height_fact = 1.0f;
-    }
-  }
-  // Check for 4/3x4/3 selection: favor 2x2 over 1x2 and 2x1.
-  if (spatial_err < spatial_err_h * (1.0f + kSpatialErr2x2VsHoriz) &&
-      spatial_err < spatial_err_v * (1.0f + kSpatialErr2X2VsVert)) {
-    qm_->spatial_width_fact = 4.0f / 3.0f;
-    qm_->spatial_height_fact = 4.0f / 3.0f;
-  }
-  // Check for 2x1 selection.
-  if (spatial_err_v < spatial_err_h * (1.0f - kSpatialErrVertVsHoriz) &&
-      spatial_err_v < spatial_err * (1.0f - kSpatialErr2X2VsVert)) {
-    qm_->spatial_width_fact = 1.0f;
-    qm_->spatial_height_fact = 2.0f;
-  }
-}
-
-}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/qm_select.h b/webrtc/modules/video_coding/qm_select.h
deleted file mode 100644
index ae0463f..0000000
--- a/webrtc/modules/video_coding/qm_select.h
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
-#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
-
-#include "webrtc/common_types.h"
-#include "webrtc/typedefs.h"
-
-/******************************************************/
-/* Quality Modes: Resolution and Robustness settings  */
-/******************************************************/
-
-namespace webrtc {
-struct VideoContentMetrics;
-
-struct VCMResolutionScale {
-  VCMResolutionScale()
-      : codec_width(640),
-        codec_height(480),
-        frame_rate(30.0f),
-        spatial_width_fact(1.0f),
-        spatial_height_fact(1.0f),
-        temporal_fact(1.0f),
-        change_resolution_spatial(false),
-        change_resolution_temporal(false) {}
-  uint16_t codec_width;
-  uint16_t codec_height;
-  float frame_rate;
-  float spatial_width_fact;
-  float spatial_height_fact;
-  float temporal_fact;
-  bool change_resolution_spatial;
-  bool change_resolution_temporal;
-};
-
-enum ImageType {
-  kQCIF = 0,  // 176x144
-  kHCIF,      // 264x216 = half(~3/4x3/4) CIF.
-  kQVGA,      // 320x240 = quarter VGA.
-  kCIF,       // 352x288
-  kHVGA,      // 480x360 = half(~3/4x3/4) VGA.
-  kVGA,       // 640x480
-  kQFULLHD,   // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
-  kWHD,       // 1280x720
-  kFULLHD,    // 1920x1080
-  kNumImageTypes
-};
-
-const uint32_t kSizeOfImageType[kNumImageTypes] = {
-    25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600};
-
-enum FrameRateLevelClass {
-  kFrameRateLow,
-  kFrameRateMiddle1,
-  kFrameRateMiddle2,
-  kFrameRateHigh
-};
-
-enum ContentLevelClass { kLow, kHigh, kDefault };
-
-struct VCMContFeature {
-  VCMContFeature() : value(0.0f), level(kDefault) {}
-  void Reset() {
-    value = 0.0f;
-    level = kDefault;
-  }
-  float value;
-  ContentLevelClass level;
-};
-
-enum UpDownAction { kUpResolution, kDownResolution };
-
-enum SpatialAction {
-  kNoChangeSpatial,
-  kOneHalfSpatialUniform,     // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
-  kOneQuarterSpatialUniform,  // 1/2 x 1/2: 1/4 pixel reduction.
-  kNumModesSpatial
-};
-
-enum TemporalAction {
-  kNoChangeTemporal,
-  kTwoThirdsTemporal,  // 2/3 frame rate reduction
-  kOneHalfTemporal,    // 1/2 frame rate reduction
-  kNumModesTemporal
-};
-
-struct ResolutionAction {
-  ResolutionAction() : spatial(kNoChangeSpatial), temporal(kNoChangeTemporal) {}
-  SpatialAction spatial;
-  TemporalAction temporal;
-};
-
-// Down-sampling factors for spatial (width and height), and temporal.
-const float kFactorWidthSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
-
-const float kFactorHeightSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
-
-const float kFactorTemporal[kNumModesTemporal] = {1.0f, 1.5f, 2.0f};
-
-enum EncoderState {
-  kStableEncoding,    // Low rate mis-match, stable buffer levels.
-  kStressedEncoding,  // Significant over-shooting of target rate,
-                      // Buffer under-flow, etc.
-  kEasyEncoding       // Significant under-shooting of target rate.
-};
-
-// QmMethod class: main class for resolution and robustness settings
-
-class VCMQmMethod {
- public:
-  VCMQmMethod();
-  virtual ~VCMQmMethod();
-
-  // Reset values
-  void ResetQM();
-  virtual void Reset() = 0;
-
-  // Compute content class.
-  uint8_t ComputeContentClass();
-
-  // Update with the content metrics.
-  void UpdateContent(const VideoContentMetrics* content_metrics);
-
-  // Compute spatial texture magnitude and level.
-  // Spatial texture is a spatial prediction error measure.
-  void ComputeSpatial();
-
-  // Compute motion magnitude and level for NFD metric.
-  // NFD is normalized frame difference (normalized by spatial variance).
-  void ComputeMotionNFD();
-
-  // Get the imageType (CIF, VGA, HD, etc) for the system width/height.
-  ImageType GetImageType(uint16_t width, uint16_t height);
-
-  // Return the closest image type.
-  ImageType FindClosestImageType(uint16_t width, uint16_t height);
-
-  // Get the frame rate level.
-  FrameRateLevelClass FrameRateLevel(float frame_rate);
-
- protected:
-  // Content Data.
-  const VideoContentMetrics* content_metrics_;
-
-  // Encoder frame sizes and native frame sizes.
-  uint16_t width_;
-  uint16_t height_;
-  float user_frame_rate_;
-  uint16_t native_width_;
-  uint16_t native_height_;
-  float native_frame_rate_;
-  float aspect_ratio_;
-  // Image type and frame rate leve, for the current encoder resolution.
-  ImageType image_type_;
-  FrameRateLevelClass framerate_level_;
-  // Content class data.
-  VCMContFeature motion_;
-  VCMContFeature spatial_;
-  uint8_t content_class_;
-  bool init_;
-};
-
-// Resolution settings class
-
-class VCMQmResolution : public VCMQmMethod {
- public:
-  VCMQmResolution();
-  virtual ~VCMQmResolution();
-
-  // Reset all quantities.
-  virtual void Reset();
-
-  // Reset rate quantities and counters after every SelectResolution() call.
-  void ResetRates();
-
-  // Reset down-sampling state.
-  void ResetDownSamplingState();
-
-  // Get the encoder state.
-  EncoderState GetEncoderState();
-
-  // Initialize after SetEncodingData in media_opt.
-  int Initialize(float bitrate,
-                 float user_framerate,
-                 uint16_t width,
-                 uint16_t height,
-                 int num_layers);
-
-  // Update the encoder frame size.
-  void UpdateCodecParameters(float frame_rate, uint16_t width, uint16_t height);
-
-  // Update with actual bit rate (size of the latest encoded frame)
-  // and frame type, after every encoded frame.
-  void UpdateEncodedSize(size_t encoded_size);
-
-  // Update with new target bitrate, actual encoder sent rate, frame_rate,
-  // loss rate: every ~1 sec from SetTargetRates in media_opt.
-  void UpdateRates(float target_bitrate,
-                   float encoder_sent_rate,
-                   float incoming_framerate,
-                   uint8_t packet_loss);
-
-  // Extract ST (spatio-temporal) resolution action.
-  // Inputs: qm: Reference to the quality modes pointer.
-  // Output: the spatial and/or temporal scale change.
-  int SelectResolution(VCMResolutionScale** qm);
-
- private:
-  // Set the default resolution action.
-  void SetDefaultAction();
-
-  // Compute rates for the selection of down-sampling action.
-  void ComputeRatesForSelection();
-
-  // Compute the encoder state.
-  void ComputeEncoderState();
-
-  // Return true if the action is to go back up in resolution.
-  bool GoingUpResolution();
-
-  // Return true if the action is to go down in resolution.
-  bool GoingDownResolution();
-
-  // Check the condition for going up in resolution by the scale factors:
-  // |facWidth|, |facHeight|, |facTemp|.
-  // |scaleFac| is a scale factor for the transition rate.
-  bool ConditionForGoingUp(float fac_width,
-                           float fac_height,
-                           float fac_temp,
-                           float scale_fac);
-
-  // Get the bitrate threshold for the resolution action.
-  // The case |facWidth|=|facHeight|=|facTemp|==1 is for down-sampling action.
-  // |scaleFac| is a scale factor for the transition rate.
-  float GetTransitionRate(float fac_width,
-                          float fac_height,
-                          float fac_temp,
-                          float scale_fac);
-
-  // Update the down-sampling state.
-  void UpdateDownsamplingState(UpDownAction up_down);
-
-  // Update the codec frame size and frame rate.
-  void UpdateCodecResolution();
-
-  // Return a state based on average target rate relative transition rate.
-  uint8_t RateClass(float transition_rate);
-
-  // Adjust the action selected from the table.
-  void AdjustAction();
-
-  // Covert 2 stages of 3/4 (=9/16) spatial decimation to 1/2.
-  void ConvertSpatialFractionalToWhole();
-
-  // Returns true if the new frame sizes, under the selected spatial action,
-  // are of even size.
-  bool EvenFrameSize();
-
-  // Insert latest down-sampling action into the history list.
-  void InsertLatestDownAction();
-
-  // Remove the last (first element) down-sampling action from the list.
-  void RemoveLastDownAction();
-
-  // Check constraints on the amount of down-sampling allowed.
-  void ConstrainAmountOfDownSampling();
-
-  // For going up in resolution: pick spatial or temporal action,
-  // if both actions were separately selected.
-  void PickSpatialOrTemporal();
-
-  // Select the directional (1x2 or 2x1) spatial down-sampling action.
-  void SelectSpatialDirectionMode(float transition_rate);
-
-  enum { kDownActionHistorySize = 10 };
-
-  VCMResolutionScale* qm_;
-  // Encoder rate control parameters.
-  float target_bitrate_;
-  float incoming_framerate_;
-  float per_frame_bandwidth_;
-  float buffer_level_;
-
-  // Data accumulated every ~1sec from MediaOpt.
-  float sum_target_rate_;
-  float sum_incoming_framerate_;
-  float sum_rate_MM_;
-  float sum_rate_MM_sgn_;
-  float sum_packet_loss_;
-  // Counters.
-  uint32_t frame_cnt_;
-  uint32_t frame_cnt_delta_;
-  uint32_t update_rate_cnt_;
-  uint32_t low_buffer_cnt_;
-
-  // Resolution state parameters.
-  float state_dec_factor_spatial_;
-  float state_dec_factor_temporal_;
-
-  // Quantities used for selection.
-  float avg_target_rate_;
-  float avg_incoming_framerate_;
-  float avg_ratio_buffer_low_;
-  float avg_rate_mismatch_;
-  float avg_rate_mismatch_sgn_;
-  float avg_packet_loss_;
-  EncoderState encoder_state_;
-  ResolutionAction action_;
-  // Short history of the down-sampling actions from the Initialize() state.
-  // This is needed for going up in resolution. Since the total amount of
-  // down-sampling actions are constrained, the length of the list need not be
-  // large: i.e., (4/3) ^{kDownActionHistorySize} <= kMaxDownSample.
-  ResolutionAction down_action_history_[kDownActionHistorySize];
-  int num_layers_;
-};
-
-}  // namespace webrtc
-#endif  // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
diff --git a/webrtc/modules/video_coding/qm_select_data.h b/webrtc/modules/video_coding/qm_select_data.h
deleted file mode 100644
index 49190ef..0000000
--- a/webrtc/modules/video_coding/qm_select_data.h
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
-#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
-
-/***************************************************************
-*QMSelectData.h
-* This file includes parameters for content-aware media optimization
-****************************************************************/
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-//
-// PARAMETERS FOR RESOLUTION ADAPTATION
-//
-
-// Initial level of buffer in secs.
-const float kInitBufferLevel = 0.5f;
-
-// Threshold of (max) buffer size below which we consider too low (underflow).
-const float kPercBufferThr = 0.10f;
-
-// Threshold on the occurrences of low buffer levels.
-const float kMaxBufferLow = 0.30f;
-
-// Threshold on rate mismatch.
-const float kMaxRateMisMatch = 0.5f;
-
-// Threshold on amount of under/over encoder shooting.
-const float kRateOverShoot = 0.75f;
-const float kRateUnderShoot = 0.75f;
-
-// Factor to favor weighting the average rates with the current/last data.
-const float kWeightRate = 0.70f;
-
-// Factor for transitional rate for going back up in resolution.
-const float kTransRateScaleUpSpatial = 1.25f;
-const float kTransRateScaleUpTemp = 1.25f;
-const float kTransRateScaleUpSpatialTemp = 1.25f;
-
-// Threshold on packet loss rate, above which favor resolution reduction.
-const float kPacketLossThr = 0.1f;
-
-// Factor for reducing transitional bitrate under packet loss.
-const float kPacketLossRateFac = 1.0f;
-
-// Maximum possible transitional rate for down-sampling:
-// (units in kbps), for 30fps.
-const uint16_t kMaxRateQm[9] = {
-    0,     // QCIF
-    50,    // kHCIF
-    125,   // kQVGA
-    200,   // CIF
-    280,   // HVGA
-    400,   // VGA
-    700,   // QFULLHD
-    1000,  // WHD
-    1500   // FULLHD
-};
-
-// Frame rate scale for maximum transition rate.
-const float kFrameRateFac[4] = {
-    0.5f,   // Low
-    0.7f,   // Middle level 1
-    0.85f,  // Middle level 2
-    1.0f,   // High
-};
-
-// Scale for transitional rate: based on content class
-// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
-const float kScaleTransRateQm[18] = {
-    // VGA and lower
-    0.40f,  // L, L
-    0.50f,  // L, H
-    0.40f,  // L, D
-    0.60f,  // H ,L
-    0.60f,  // H, H
-    0.60f,  // H, D
-    0.50f,  // D, L
-    0.50f,  // D, D
-    0.50f,  // D, H
-
-    // over VGA
-    0.40f,  // L, L
-    0.50f,  // L, H
-    0.40f,  // L, D
-    0.60f,  // H ,L
-    0.60f,  // H, H
-    0.60f,  // H, D
-    0.50f,  // D, L
-    0.50f,  // D, D
-    0.50f,  // D, H
-};
-
-// Threshold on the target rate relative to transitional rate.
-const float kFacLowRate = 0.5f;
-
-// Action for down-sampling:
-// motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
-// rate = 0/1/2, for target rate state relative to transition rate.
-const uint8_t kSpatialAction[27] = {
-    // rateClass = 0:
-    1,  // L, L
-    1,  // L, H
-    1,  // L, D
-    4,  // H ,L
-    1,  // H, H
-    4,  // H, D
-    4,  // D, L
-    1,  // D, H
-    2,  // D, D
-
-    // rateClass = 1:
-    1,  // L, L
-    1,  // L, H
-    1,  // L, D
-    2,  // H ,L
-    1,  // H, H
-    2,  // H, D
-    2,  // D, L
-    1,  // D, H
-    2,  // D, D
-
-    // rateClass = 2:
-    1,  // L, L
-    1,  // L, H
-    1,  // L, D
-    2,  // H ,L
-    1,  // H, H
-    2,  // H, D
-    2,  // D, L
-    1,  // D, H
-    2,  // D, D
-};
-
-const uint8_t kTemporalAction[27] = {
-    // rateClass = 0:
-    3,  // L, L
-    2,  // L, H
-    2,  // L, D
-    1,  // H ,L
-    3,  // H, H
-    1,  // H, D
-    1,  // D, L
-    2,  // D, H
-    1,  // D, D
-
-    // rateClass = 1:
-    3,  // L, L
-    3,  // L, H
-    3,  // L, D
-    1,  // H ,L
-    3,  // H, H
-    1,  // H, D
-    1,  // D, L
-    3,  // D, H
-    1,  // D, D
-
-    // rateClass = 2:
-    1,  // L, L
-    3,  // L, H
-    3,  // L, D
-    1,  // H ,L
-    3,  // H, H
-    1,  // H, D
-    1,  // D, L
-    3,  // D, H
-    1,  // D, D
-};
-
-// Control the total amount of down-sampling allowed.
-const float kMaxSpatialDown = 8.0f;
-const float kMaxTempDown = 3.0f;
-const float kMaxTotalDown = 9.0f;
-
-// Minimum image size for a spatial down-sampling.
-const int kMinImageSize = 176 * 144;
-
-// Minimum frame rate for temporal down-sampling:
-// no frame rate reduction if incomingFrameRate <= MIN_FRAME_RATE.
-const int kMinFrameRate = 8;
-
-//
-// PARAMETERS FOR FEC ADJUSTMENT: TODO (marpan)
-//
-
-//
-// PARAMETETS FOR SETTING LOW/HIGH STATES OF CONTENT METRICS:
-//
-
-// Thresholds for frame rate:
-const int kLowFrameRate = 10;
-const int kMiddleFrameRate = 15;
-const int kHighFrameRate = 25;
-
-// Thresholds for motion: motion level is from NFD.
-const float kHighMotionNfd = 0.075f;
-const float kLowMotionNfd = 0.03f;
-
-// Thresholds for spatial prediction error:
-// this is applied on the average of (2x2,1x2,2x1).
-const float kHighTexture = 0.035f;
-const float kLowTexture = 0.020f;
-
-// Used to reduce thresholds for larger/HD scenes: correction factor since
-// higher correlation in HD scenes means lower spatial prediction error.
-const float kScaleTexture = 0.9f;
-
-// Percentage reduction in transitional bitrate for 2x2 selected over 1x2/2x1.
-const float kRateRedSpatial2X2 = 0.6f;
-
-const float kSpatialErr2x2VsHoriz = 0.1f;   // percentage to favor 2x2 over H
-const float kSpatialErr2X2VsVert = 0.1f;    // percentage to favor 2x2 over V
-const float kSpatialErrVertVsHoriz = 0.1f;  // percentage to favor H over V
-
-}  // namespace webrtc
-
-#endif  // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
diff --git a/webrtc/modules/video_coding/qm_select_unittest.cc b/webrtc/modules/video_coding/qm_select_unittest.cc
deleted file mode 100644
index a0a822c..0000000
--- a/webrtc/modules/video_coding/qm_select_unittest.cc
+++ /dev/null
@@ -1,1302 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/video_coding/qm_select.h"
-
-namespace webrtc {
-
-// Representative values of content metrics for: low/high/medium(default) state,
-// based on parameters settings in qm_select_data.h.
-const float kSpatialLow = 0.01f;
-const float kSpatialMedium = 0.03f;
-const float kSpatialHigh = 0.1f;
-const float kTemporalLow = 0.01f;
-const float kTemporalMedium = 0.06f;
-const float kTemporalHigh = 0.1f;
-
-class QmSelectTest : public ::testing::Test {
- protected:
-  QmSelectTest()
-      : qm_resolution_(new VCMQmResolution()),
-        content_metrics_(new VideoContentMetrics()),
-        qm_scale_(NULL) {}
-  VCMQmResolution* qm_resolution_;
-  VideoContentMetrics* content_metrics_;
-  VCMResolutionScale* qm_scale_;
-
-  void InitQmNativeData(float initial_bit_rate,
-                        int user_frame_rate,
-                        int native_width,
-                        int native_height,
-                        int num_layers);
-
-  void UpdateQmEncodedFrame(size_t* encoded_size, size_t num_updates);
-
-  void UpdateQmRateData(int* target_rate,
-                        int* encoder_sent_rate,
-                        int* incoming_frame_rate,
-                        uint8_t* fraction_lost,
-                        int num_updates);
-
-  void UpdateQmContentData(float motion_metric,
-                           float spatial_metric,
-                           float spatial_metric_horiz,
-                           float spatial_metric_vert);
-
-  bool IsSelectedActionCorrect(VCMResolutionScale* qm_scale,
-                               float fac_width,
-                               float fac_height,
-                               float fac_temp,
-                               uint16_t new_width,
-                               uint16_t new_height,
-                               float new_frame_rate);
-
-  void TearDown() {
-    delete qm_resolution_;
-    delete content_metrics_;
-  }
-};
-
-TEST_F(QmSelectTest, HandleInputs) {
-  // Expect parameter error. Initialize with invalid inputs.
-  EXPECT_EQ(-4, qm_resolution_->Initialize(1000, 0, 640, 480, 1));
-  EXPECT_EQ(-4, qm_resolution_->Initialize(1000, 30, 640, 0, 1));
-  EXPECT_EQ(-4, qm_resolution_->Initialize(1000, 30, 0, 480, 1));
-
-  // Expect uninitialized error.: No valid initialization before selection.
-  EXPECT_EQ(-7, qm_resolution_->SelectResolution(&qm_scale_));
-
-  VideoContentMetrics* content_metrics = NULL;
-  EXPECT_EQ(0, qm_resolution_->Initialize(1000, 30, 640, 480, 1));
-  qm_resolution_->UpdateContent(content_metrics);
-  // Content metrics are NULL: Expect success and no down-sampling action.
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480, 30.0f));
-}
-
-// TODO(marpan): Add a test for number of temporal layers > 1.
-
-// No down-sampling action at high rates.
-TEST_F(QmSelectTest, NoActionHighRate) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(800, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {800, 800, 800};
-  int encoder_sent_rate[] = {800, 800, 800};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
-}
-
-// Rate is well below transition, down-sampling action is taken,
-// depending on the content state.
-TEST_F(QmSelectTest, DownActionLowRate) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial: 2x2 spatial expected.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, low spatial: 2/3 temporal is expected.
-  UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Medium motion, low spatial: 2x2 spatial expected.
-  UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // High motion, high spatial: 2/3 temporal expected.
-  UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(4, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, high spatial: 1/2 temporal expected.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Medium motion, high spatial: 1/2 temporal expected.
-  UpdateQmContentData(kTemporalMedium, kSpatialHigh, kSpatialHigh,
-                      kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // High motion, medium spatial: 2x2 spatial expected.
-  UpdateQmContentData(kTemporalHigh, kSpatialMedium, kSpatialMedium,
-                      kSpatialMedium);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
-  // Target frame rate for frame dropper should be the same as previous == 15.
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, medium spatial: high frame rate, so 1/2 temporal expected.
-  UpdateQmContentData(kTemporalLow, kSpatialMedium, kSpatialMedium,
-                      kSpatialMedium);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Medium motion, medium spatial: high frame rate, so 2/3 temporal expected.
-  UpdateQmContentData(kTemporalMedium, kSpatialMedium, kSpatialMedium,
-                      kSpatialMedium);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(8, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
-}
-
-// Rate mis-match is high, and we have over-shooting.
-// since target rate is below max for down-sampling, down-sampling is selected.
-TEST_F(QmSelectTest, DownActionHighRateMMOvershoot) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(300, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {300, 300, 300};
-  int encoder_sent_rate[] = {900, 900, 900};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
-                                      480, 360, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, high spatial
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
-}
-
-// Rate mis-match is high, target rate is below max for down-sampling,
-// but since we have consistent under-shooting, no down-sampling action.
-TEST_F(QmSelectTest, NoActionHighRateMMUndershoot) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(300, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {300, 300, 300};
-  int encoder_sent_rate[] = {100, 100, 100};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, high spatial
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
-}
-
-// Buffer is underflowing, and target rate is below max for down-sampling,
-// so action is taken.
-TEST_F(QmSelectTest, DownActionBufferUnderflow) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(300, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update with encoded size over a number of frames.
-  // per-frame bandwidth = 15 = 450/30: simulate (decoder) buffer underflow:
-  size_t encoded_size[] = {200, 100, 50, 30, 60, 40, 20, 30, 20, 40};
-  UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {300, 300, 300};
-  int encoder_sent_rate[] = {450, 450, 450};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
-                                      480, 360, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, high spatial
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
-}
-
-// Target rate is below max for down-sampling, but buffer level is stable,
-// so no action is taken.
-TEST_F(QmSelectTest, NoActionBufferStable) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(350, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update with encoded size over a number of frames.
-  // per-frame bandwidth = 15 = 450/30: simulate stable (decoder) buffer levels.
-  size_t encoded_size[] = {40, 10, 10, 16, 18, 20, 17, 20, 16, 15};
-  UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {350, 350, 350};
-  int encoder_sent_rate[] = {350, 450, 450};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
-
-  qm_resolution_->ResetDownSamplingState();
-  // Low motion, high spatial
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
-}
-
-// Very low rate, but no spatial down-sampling below some size (QCIF).
-TEST_F(QmSelectTest, LimitDownSpatialAction) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(10, 30, 176, 144, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 176;
-  uint16_t codec_height = 144;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(0, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {10, 10, 10};
-  int encoder_sent_rate[] = {10, 10, 10};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144, 30.0f));
-}
-
-// Very low rate, but no frame reduction below some frame_rate (8fps).
-TEST_F(QmSelectTest, LimitDownTemporalAction) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(10, 8, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(8.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {10, 10, 10};
-  int encoder_sent_rate[] = {10, 10, 10};
-  int incoming_frame_rate[] = {8, 8, 8};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, medium spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialMedium, kSpatialMedium,
-                      kSpatialMedium);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 8.0f));
-}
-
-// Two stages: spatial down-sample and then back up spatially,
-// as rate as increased.
-TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  // Reset and go up in rate: expected to go back up, in 2 stages of 3/4.
-  qm_resolution_->ResetRates();
-  qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {400, 400, 400, 400, 400};
-  int encoder_sent_rate2[] = {400, 400, 400, 400, 400};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
-
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
-                                      640, 480, 30.0f));
-}
-
-// Two stages: spatial down-sample and then back up spatially, since encoder
-// is under-shooting target even though rate has not increased much.
-TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  // Reset rates and simulate under-shooting scenario.: expect to go back up.
-  // Goes up spatially in two stages for 1/2x1/2 down-sampling.
-  qm_resolution_->ResetRates();
-  qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {200, 200, 200, 200, 200};
-  int encoder_sent_rate2[] = {50, 50, 50, 50, 50};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
-  float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
-
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
-                                      640, 480, 30.0f));
-}
-
-// Two stages: spatial down-sample and then no action to go up,
-// as encoding rate mis-match is too high.
-TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  // Reset and simulate large rate mis-match: expect no action to go back up.
-  qm_resolution_->ResetRates();
-  qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {400, 400, 400, 400, 400};
-  int encoder_sent_rate2[] = {1000, 1000, 1000, 1000, 1000};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240, 30.0f));
-}
-
-// Two stages: temporally down-sample and then back up temporally,
-// as rate as increased.
-TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
-
-  // Reset rates and go up in rate: expect to go back up.
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {400, 400, 400, 400, 400};
-  int encoder_sent_rate2[] = {400, 400, 400, 400, 400};
-  int incoming_frame_rate2[] = {15, 15, 15, 15, 15};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
-}
-
-// Two stages: temporal down-sample and then back up temporally, since encoder
-// is under-shooting target even though rate has not increased much.
-TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
-
-  // Reset rates and simulate under-shooting scenario.: expect to go back up.
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {150, 150, 150, 150, 150};
-  int encoder_sent_rate2[] = {50, 50, 50, 50, 50};
-  int incoming_frame_rate2[] = {15, 15, 15, 15, 15};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
-}
-
-// Two stages: temporal down-sample and then no action to go up,
-// as encoding rate mis-match is too high.
-TEST_F(QmSelectTest, 2StageDownTemporalNoActionUp) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(50, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {50, 50, 50};
-  int encoder_sent_rate[] = {50, 50, 50};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1, 1, 2, 640, 480, 15.5f));
-
-  // Reset and simulate large rate mis-match: expect no action to go back up.
-  qm_resolution_->UpdateCodecParameters(15.0f, codec_width, codec_height);
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {600, 600, 600, 600, 600};
-  int encoder_sent_rate2[] = {1000, 1000, 1000, 1000, 1000};
-  int incoming_frame_rate2[] = {15, 15, 15, 15, 15};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 15.0f));
-}
-// 3 stages: spatial down-sample, followed by temporal down-sample,
-// and then go up to full state, as encoding rate has increased.
-TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(80, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {80, 80, 80};
-  int encoder_sent_rate[] = {80, 80, 80};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  // Change content data: expect temporal down-sample.
-  qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-
-  // Reset rates and go lower in rate.
-  qm_resolution_->ResetRates();
-  int target_rate2[] = {40, 40, 40, 40, 40};
-  int encoder_sent_rate2[] = {40, 40, 40, 40, 40};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
-
-  // Reset rates and go high up in rate: expect to go back up both spatial
-  // and temporally. The 1/2x1/2 spatial is undone in two stages.
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate3[] = {1000, 1000, 1000, 1000, 1000};
-  int encoder_sent_rate3[] = {1000, 1000, 1000, 1000, 1000};
-  int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
-  uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
-                   fraction_lost3, 5);
-
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
-                                      360, 30.0f));
-
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
-                                      640, 480, 30.0f));
-}
-
-// No down-sampling below some total amount.
-TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(150, 30, 1280, 720, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 1280;
-  uint16_t codec_height = 720;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(7, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {150, 150, 150};
-  int encoder_sent_rate[] = {150, 150, 150};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360, 30.0f));
-
-  // Reset and lower rates to get another spatial action (3/4x3/4).
-  // Lower the frame rate for spatial to be selected again.
-  qm_resolution_->ResetRates();
-  qm_resolution_->UpdateCodecParameters(10.0f, 640, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(640, 360));
-  // Update rates for a sequence of intervals.
-  int target_rate2[] = {70, 70, 70, 70, 70};
-  int encoder_sent_rate2[] = {70, 70, 70, 70, 70};
-  int incoming_frame_rate2[] = {10, 10, 10, 10, 10};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, medium spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialMedium, kSpatialMedium,
-                      kSpatialMedium);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
-                                      480, 270, 10.0f));
-
-  // Reset and go to very low rate: no action should be taken,
-  // we went down too much already.
-  qm_resolution_->ResetRates();
-  qm_resolution_->UpdateCodecParameters(10.0f, 480, 270);
-  EXPECT_EQ(3, qm_resolution_->GetImageType(480, 270));
-  // Update rates for a sequence of intervals.
-  int target_rate3[] = {10, 10, 10, 10, 10};
-  int encoder_sent_rate3[] = {10, 10, 10, 10, 10};
-  int incoming_frame_rate3[] = {10, 10, 10, 10, 10};
-  uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
-                   fraction_lost3, 5);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270, 10.0f));
-}
-
-// Multiple down-sampling stages and then undo all of them.
-// Spatial down-sample 3/4x3/4, followed by temporal down-sample 2/3,
-// followed by spatial 3/4x3/4. Then go up to full state,
-// as encoding rate has increased.
-TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(150, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Go down spatial 3/4x3/4.
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {150, 150, 150};
-  int encoder_sent_rate[] = {150, 150, 150};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Medium motion, low spatial.
-  UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
-                                      480, 360, 30.0f));
-  // Go down 2/3 temporal.
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  qm_resolution_->ResetRates();
-  int target_rate2[] = {100, 100, 100, 100, 100};
-  int encoder_sent_rate2[] = {100, 100, 100, 100, 100};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
-
-  // Go down 3/4x3/4 spatial:
-  qm_resolution_->UpdateCodecParameters(20.0f, 480, 360);
-  qm_resolution_->ResetRates();
-  int target_rate3[] = {80, 80, 80, 80, 80};
-  int encoder_sent_rate3[] = {80, 80, 80, 80, 80};
-  int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
-  uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
-                   fraction_lost3, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // High motion, low spatial.
-  UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  // The two spatial actions of 3/4x3/4 are converted to 1/2x1/2,
-  // so scale factor is 2.0.
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 20.0f));
-
-  // Reset rates and go high up in rate: expect to go up:
-  // 1/2x1x2 spatial and 1/2 temporally.
-
-  // Go up 1/2x1/2 spatially and 1/2 temporally. Spatial is done in 2 stages.
-  qm_resolution_->UpdateCodecParameters(15.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate4[] = {1000, 1000, 1000, 1000, 1000};
-  int encoder_sent_rate4[] = {1000, 1000, 1000, 1000, 1000};
-  int incoming_frame_rate4[] = {15, 15, 15, 15, 15};
-  uint8_t fraction_lost4[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate4, encoder_sent_rate4, incoming_frame_rate4,
-                   fraction_lost4, 5);
-
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
-                                      360, 30.0f));
-
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
-                                      640, 480, 30.0f));
-}
-
-// Multiple down-sampling and up-sample stages, with partial undoing.
-// Spatial down-sample 1/2x1/2, followed by temporal down-sample 2/3, undo the
-// temporal, then another temporal, and then undo both spatial and temporal.
-TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(80, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Go down 1/2x1/2 spatial.
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {80, 80, 80};
-  int encoder_sent_rate[] = {80, 80, 80};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Medium motion, low spatial.
-  UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-
-  // Go down 2/3 temporal.
-  qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-  qm_resolution_->ResetRates();
-  int target_rate2[] = {40, 40, 40, 40, 40};
-  int encoder_sent_rate2[] = {40, 40, 40, 40, 40};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Medium motion, high spatial.
-  UpdateQmContentData(kTemporalMedium, kSpatialHigh, kSpatialHigh,
-                      kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
-
-  // Go up 2/3 temporally.
-  qm_resolution_->UpdateCodecParameters(20.0f, 320, 240);
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate3[] = {150, 150, 150, 150, 150};
-  int encoder_sent_rate3[] = {150, 150, 150, 150, 150};
-  int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
-  uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
-                   fraction_lost3, 5);
-
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f / 3.0f, 320,
-                                      240, 30.0f));
-
-  // Go down 2/3 temporal.
-  qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
-  EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
-  qm_resolution_->ResetRates();
-  int target_rate4[] = {40, 40, 40, 40, 40};
-  int encoder_sent_rate4[] = {40, 40, 40, 40, 40};
-  int incoming_frame_rate4[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost4[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate4, encoder_sent_rate4, incoming_frame_rate4,
-                   fraction_lost4, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
-
-  // Go up spatial and temporal. Spatial undoing is done in 2 stages.
-  qm_resolution_->UpdateCodecParameters(20.5f, 320, 240);
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate5[] = {1000, 1000, 1000, 1000, 1000};
-  int encoder_sent_rate5[] = {1000, 1000, 1000, 1000, 1000};
-  int incoming_frame_rate5[] = {20, 20, 20, 20, 20};
-  uint8_t fraction_lost5[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate5, encoder_sent_rate5, incoming_frame_rate5,
-                   fraction_lost5, 5);
-
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
-                                      360, 30.0f));
-
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
-                                      640, 480, 30.0f));
-}
-
-// Multiple down-sampling and up-sample stages, with partial undoing.
-// Spatial down-sample 3/4x3/4, followed by temporal down-sample 2/3,
-// undo the temporal 2/3, and then undo the spatial.
-TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(100, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Go down 3/4x3/4 spatial.
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {100, 100, 100};
-  int encoder_sent_rate[] = {100, 100, 100};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Medium motion, low spatial.
-  UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
-                                      480, 360, 30.0f));
-
-  // Go down 2/3 temporal.
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  qm_resolution_->ResetRates();
-  int target_rate2[] = {100, 100, 100, 100, 100};
-  int encoder_sent_rate2[] = {100, 100, 100, 100, 100};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Low motion, high spatial.
-  UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
-
-  // Go up 2/3 temporal.
-  qm_resolution_->UpdateCodecParameters(20.5f, 480, 360);
-  qm_resolution_->ResetRates();
-  // Update rates for a sequence of intervals.
-  int target_rate3[] = {250, 250, 250, 250, 250};
-  int encoder_sent_rate3[] = {250, 250, 250, 250, 250};
-  int incoming_frame_rate3[] = {20, 20, 20, 20, 120};
-  uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
-                   fraction_lost3, 5);
-
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f / 3.0f, 480,
-                                      360, 30.0f));
-
-  // Go up spatial.
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  qm_resolution_->ResetRates();
-  int target_rate4[] = {500, 500, 500, 500, 500};
-  int encoder_sent_rate4[] = {500, 500, 500, 500, 500};
-  int incoming_frame_rate4[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost4[] = {30, 30, 30, 30, 30};
-  UpdateQmRateData(target_rate4, encoder_sent_rate4, incoming_frame_rate4,
-                   fraction_lost4, 5);
-
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
-                                      640, 480, 30.0f));
-}
-
-// Two stages of 3/4x3/4 converted to one stage of 1/2x1/2.
-TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
-  // Initialize with bitrate, frame rate, native system width/height, and
-  // number of temporal layers.
-  InitQmNativeData(150, 30, 640, 480, 1);
-
-  // Update with encoder frame size.
-  uint16_t codec_width = 640;
-  uint16_t codec_height = 480;
-  qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
-  EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
-
-  // Go down 3/4x3/4 spatial.
-  // Update rates for a sequence of intervals.
-  int target_rate[] = {150, 150, 150};
-  int encoder_sent_rate[] = {150, 150, 150};
-  int incoming_frame_rate[] = {30, 30, 30};
-  uint8_t fraction_lost[] = {10, 10, 10};
-  UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                   fraction_lost, 3);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Medium motion, low spatial.
-  UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
-                                      480, 360, 30.0f));
-
-  // Set rates to go down another 3/4 spatial. Should be converted ton 1/2.
-  qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
-  EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
-  qm_resolution_->ResetRates();
-  int target_rate2[] = {100, 100, 100, 100, 100};
-  int encoder_sent_rate2[] = {100, 100, 100, 100, 100};
-  int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
-  uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
-  UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                   fraction_lost2, 5);
-
-  // Update content: motion level, and 3 spatial prediction errors.
-  // Medium motion, low spatial.
-  UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
-  EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(
-      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
-}
-
-void QmSelectTest::InitQmNativeData(float initial_bit_rate,
-                                    int user_frame_rate,
-                                    int native_width,
-                                    int native_height,
-                                    int num_layers) {
-  EXPECT_EQ(
-      0, qm_resolution_->Initialize(initial_bit_rate, user_frame_rate,
-                                    native_width, native_height, num_layers));
-}
-
-void QmSelectTest::UpdateQmContentData(float motion_metric,
-                                       float spatial_metric,
-                                       float spatial_metric_horiz,
-                                       float spatial_metric_vert) {
-  content_metrics_->motion_magnitude = motion_metric;
-  content_metrics_->spatial_pred_err = spatial_metric;
-  content_metrics_->spatial_pred_err_h = spatial_metric_horiz;
-  content_metrics_->spatial_pred_err_v = spatial_metric_vert;
-  qm_resolution_->UpdateContent(content_metrics_);
-}
-
-void QmSelectTest::UpdateQmEncodedFrame(size_t* encoded_size,
-                                        size_t num_updates) {
-  for (size_t i = 0; i < num_updates; ++i) {
-    // Convert to bytes.
-    size_t encoded_size_update = 1000 * encoded_size[i] / 8;
-    qm_resolution_->UpdateEncodedSize(encoded_size_update);
-  }
-}
-
-void QmSelectTest::UpdateQmRateData(int* target_rate,
-                                    int* encoder_sent_rate,
-                                    int* incoming_frame_rate,
-                                    uint8_t* fraction_lost,
-                                    int num_updates) {
-  for (int i = 0; i < num_updates; ++i) {
-    float target_rate_update = target_rate[i];
-    float encoder_sent_rate_update = encoder_sent_rate[i];
-    float incoming_frame_rate_update = incoming_frame_rate[i];
-    uint8_t fraction_lost_update = fraction_lost[i];
-    qm_resolution_->UpdateRates(target_rate_update, encoder_sent_rate_update,
-                                incoming_frame_rate_update,
-                                fraction_lost_update);
-  }
-}
-
-// Check is the selected action from the QmResolution class is the same
-// as the expected scales from |fac_width|, |fac_height|, |fac_temp|.
-bool QmSelectTest::IsSelectedActionCorrect(VCMResolutionScale* qm_scale,
-                                           float fac_width,
-                                           float fac_height,
-                                           float fac_temp,
-                                           uint16_t new_width,
-                                           uint16_t new_height,
-                                           float new_frame_rate) {
-  if (qm_scale->spatial_width_fact == fac_width &&
-      qm_scale->spatial_height_fact == fac_height &&
-      qm_scale->temporal_fact == fac_temp &&
-      qm_scale->codec_width == new_width &&
-      qm_scale->codec_height == new_height &&
-      qm_scale->frame_rate == new_frame_rate) {
-    return true;
-  } else {
-    return false;
-  }
-}
-}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/video_coding.gypi b/webrtc/modules/video_coding/video_coding.gypi
index f3ca1c7..515c6bc 100644
--- a/webrtc/modules/video_coding/video_coding.gypi
+++ b/webrtc/modules/video_coding/video_coding.gypi
@@ -28,7 +28,6 @@
         # headers
         'codec_database.h',
         'codec_timer.h',
-        'content_metrics_processing.h',
         'decoding_state.h',
         'encoded_frame.h',
         'fec_tables_xor.h',
@@ -49,8 +48,6 @@
         'packet.h',
         'packet_buffer.h',
         'percentile_filter.h',
-        'qm_select_data.h',
-        'qm_select.h',
         'receiver.h',
         'rtt_filter.h',
         'session_info.h',
@@ -61,7 +58,6 @@
         # sources
         'codec_database.cc',
         'codec_timer.cc',
-        'content_metrics_processing.cc',
         'decoding_state.cc',
         'encoded_frame.cc',
         'frame_buffer.cc',
@@ -78,7 +74,6 @@
         'packet.cc',
         'packet_buffer.cc',
         'percentile_filter.cc',
-        'qm_select.cc',
         'receiver.cc',
         'rtt_filter.cc',
         'session_info.cc',
diff --git a/webrtc/modules/video_coding/video_coding_impl.cc b/webrtc/modules/video_coding/video_coding_impl.cc
index 970236d..72bcc9a 100644
--- a/webrtc/modules/video_coding/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/video_coding_impl.cc
@@ -74,16 +74,11 @@
   VideoCodingModuleImpl(Clock* clock,
                         EventFactory* event_factory,
                         VideoEncoderRateObserver* encoder_rate_observer,
-                        VCMQMSettingsCallback* qm_settings_callback,
                         NackSender* nack_sender,
                         KeyFrameRequestSender* keyframe_request_sender,
                         EncodedImageCallback* pre_decode_image_callback)
       : VideoCodingModule(),
-        sender_(clock,
-                &post_encode_callback_,
-                encoder_rate_observer,
-                qm_settings_callback,
-                nullptr),
+        sender_(clock, &post_encode_callback_, encoder_rate_observer, nullptr),
         receiver_(clock,
                   event_factory,
                   pre_decode_image_callback,
@@ -147,9 +142,8 @@
   }
 
   int32_t AddVideoFrame(const VideoFrame& videoFrame,
-                        const VideoContentMetrics* contentMetrics,
                         const CodecSpecificInfo* codecSpecificInfo) override {
-    return sender_.AddVideoFrame(videoFrame, contentMetrics, codecSpecificInfo);
+    return sender_.AddVideoFrame(videoFrame, codecSpecificInfo);
   }
 
   int32_t IntraFrameRequest(size_t stream_index) override {
@@ -298,9 +292,9 @@
     NackSender* nack_sender,
     KeyFrameRequestSender* keyframe_request_sender,
     EncodedImageCallback* pre_decode_image_callback) {
-  return new VideoCodingModuleImpl(
-      clock, nullptr, encoder_rate_observer, qm_settings_callback, nack_sender,
-      keyframe_request_sender, pre_decode_image_callback);
+  return new VideoCodingModuleImpl(clock, nullptr, encoder_rate_observer,
+                                   nack_sender, keyframe_request_sender,
+                                   pre_decode_image_callback);
 }
 
 // Create method for current interface, will be removed when the
@@ -320,9 +314,8 @@
     KeyFrameRequestSender* keyframe_request_sender) {
   assert(clock);
   assert(event_factory);
-  return new VideoCodingModuleImpl(clock, event_factory, nullptr, nullptr,
-                                   nack_sender, keyframe_request_sender,
-                                   nullptr);
+  return new VideoCodingModuleImpl(clock, event_factory, nullptr, nack_sender,
+                                   keyframe_request_sender, nullptr);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/video_coding/video_coding_impl.h b/webrtc/modules/video_coding/video_coding_impl.h
index 9e99ab4..c9992b7 100644
--- a/webrtc/modules/video_coding/video_coding_impl.h
+++ b/webrtc/modules/video_coding/video_coding_impl.h
@@ -59,7 +59,6 @@
   VideoSender(Clock* clock,
               EncodedImageCallback* post_encode_callback,
               VideoEncoderRateObserver* encoder_rate_observer,
-              VCMQMSettingsCallback* qm_settings_callback,
               VCMSendStatisticsCallback* send_stats_callback);
 
   ~VideoSender();
@@ -85,7 +84,6 @@
   void SetVideoProtection(VCMVideoProtection videoProtection);
 
   int32_t AddVideoFrame(const VideoFrame& videoFrame,
-                        const VideoContentMetrics* _contentMetrics,
                         const CodecSpecificInfo* codecSpecificInfo);
 
   int32_t IntraFrameRequest(size_t stream_index);
@@ -116,7 +114,6 @@
   VideoCodec current_codec_;
   rtc::ThreadChecker main_thread_;
 
-  VCMQMSettingsCallback* const qm_settings_callback_;
   VCMProtectionCallback* protection_callback_;
 
   rtc::CriticalSection params_crit_;
diff --git a/webrtc/modules/video_coding/video_sender.cc b/webrtc/modules/video_coding/video_sender.cc
index 4d544ae..a3a4d6d 100644
--- a/webrtc/modules/video_coding/video_sender.cc
+++ b/webrtc/modules/video_coding/video_sender.cc
@@ -27,7 +27,6 @@
 VideoSender::VideoSender(Clock* clock,
                          EncodedImageCallback* post_encode_callback,
                          VideoEncoderRateObserver* encoder_rate_observer,
-                         VCMQMSettingsCallback* qm_settings_callback,
                          VCMSendStatisticsCallback* send_stats_callback)
     : clock_(clock),
       _encoder(nullptr),
@@ -38,16 +37,14 @@
       frame_dropper_enabled_(true),
       _sendStatsTimer(1000, clock_),
       current_codec_(),
-      qm_settings_callback_(qm_settings_callback),
       protection_callback_(nullptr),
       encoder_params_({0, 0, 0, 0}),
       encoder_has_internal_source_(false),
       next_frame_types_(1, kVideoFrameDelta) {
+  _mediaOpt.Reset();
   // Allow VideoSender to be created on one thread but used on another, post
   // construction. This is currently how this class is being used by at least
   // one external project (diffractor).
-  _mediaOpt.EnableQM(qm_settings_callback_ != nullptr);
-  _mediaOpt.Reset();
   main_thread_.DetachFromThread();
 }
 
@@ -203,9 +200,8 @@
 int32_t VideoSender::SetChannelParameters(uint32_t target_bitrate,
                                           uint8_t lossRate,
                                           int64_t rtt) {
-  uint32_t target_rate =
-      _mediaOpt.SetTargetRates(target_bitrate, lossRate, rtt,
-                               protection_callback_, qm_settings_callback_);
+  uint32_t target_rate = _mediaOpt.SetTargetRates(target_bitrate, lossRate, rtt,
+                                                  protection_callback_);
 
   uint32_t input_frame_rate = _mediaOpt.InputFrameRate();
 
@@ -274,7 +270,6 @@
 }
 // Add one raw video frame to the encoder, blocking.
 int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
-                                   const VideoContentMetrics* contentMetrics,
                                    const CodecSpecificInfo* codecSpecificInfo) {
   EncoderParameters encoder_params;
   std::vector<FrameType> next_frame_types;
@@ -296,7 +291,6 @@
     _encoder->OnDroppedFrame();
     return VCM_OK;
   }
-  _mediaOpt.UpdateContentData(contentMetrics);
   // TODO(pbos): Make sure setting send codec is synchronized with video
   // processing so frame size always matches.
   if (!_codecDataBase.MatchesCurrentResolution(videoFrame.width(),
diff --git a/webrtc/modules/video_coding/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
index 3a779ba..a9c6790 100644
--- a/webrtc/modules/video_coding/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/video_sender_unittest.cc
@@ -180,13 +180,13 @@
   TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
 
   void SetUp() override {
-    sender_.reset(new VideoSender(&clock_, &encoded_frame_callback_, nullptr,
-                                  nullptr, nullptr));
+    sender_.reset(
+        new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
   }
 
   void AddFrame() {
     assert(generator_.get());
-    sender_->AddVideoFrame(*generator_->NextFrame(), NULL, NULL);
+    sender_->AddVideoFrame(*generator_->NextFrame(), NULL);
   }
 
   SimulatedClock clock_;
diff --git a/webrtc/modules/video_processing/BUILD.gn b/webrtc/modules/video_processing/BUILD.gn
index 214a7df..1177d9b 100644
--- a/webrtc/modules/video_processing/BUILD.gn
+++ b/webrtc/modules/video_processing/BUILD.gn
@@ -13,8 +13,6 @@
 
 source_set("video_processing") {
   sources = [
-    "content_analysis.cc",
-    "content_analysis.h",
     "frame_preprocessor.cc",
     "frame_preprocessor.h",
     "include/video_processing.h",
@@ -63,7 +61,6 @@
 if (build_video_processing_sse2) {
   source_set("video_processing_sse2") {
     sources = [
-      "content_analysis_sse2.cc",
       "util/denoiser_filter_sse2.cc",
       "util/denoiser_filter_sse2.h",
     ]
diff --git a/webrtc/modules/video_processing/content_analysis.cc b/webrtc/modules/video_processing/content_analysis.cc
deleted file mode 100644
index 76dfb95..0000000
--- a/webrtc/modules/video_processing/content_analysis.cc
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-#include "webrtc/modules/video_processing/content_analysis.h"
-
-#include <math.h>
-#include <stdlib.h>
-
-#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
-
-namespace webrtc {
-
-VPMContentAnalysis::VPMContentAnalysis(bool runtime_cpu_detection)
-    : orig_frame_(NULL),
-      prev_frame_(NULL),
-      width_(0),
-      height_(0),
-      skip_num_(1),
-      border_(8),
-      motion_magnitude_(0.0f),
-      spatial_pred_err_(0.0f),
-      spatial_pred_err_h_(0.0f),
-      spatial_pred_err_v_(0.0f),
-      first_frame_(true),
-      ca_Init_(false),
-      content_metrics_(NULL) {
-  ComputeSpatialMetrics = &VPMContentAnalysis::ComputeSpatialMetrics_C;
-  TemporalDiffMetric = &VPMContentAnalysis::TemporalDiffMetric_C;
-
-  if (runtime_cpu_detection) {
-#if defined(WEBRTC_ARCH_X86_FAMILY)
-    if (WebRtc_GetCPUInfo(kSSE2)) {
-      ComputeSpatialMetrics = &VPMContentAnalysis::ComputeSpatialMetrics_SSE2;
-      TemporalDiffMetric = &VPMContentAnalysis::TemporalDiffMetric_SSE2;
-    }
-#endif
-  }
-  Release();
-}
-
-VPMContentAnalysis::~VPMContentAnalysis() {
-  Release();
-}
-
-VideoContentMetrics* VPMContentAnalysis::ComputeContentMetrics(
-    const VideoFrame& inputFrame) {
-  if (inputFrame.IsZeroSize())
-    return NULL;
-
-  // Init if needed (native dimension change).
-  if (width_ != inputFrame.width() || height_ != inputFrame.height()) {
-    if (VPM_OK != Initialize(inputFrame.width(), inputFrame.height()))
-      return NULL;
-  }
-  // Only interested in the Y plane.
-  orig_frame_ = inputFrame.buffer(kYPlane);
-
-  // Compute spatial metrics: 3 spatial prediction errors.
-  (this->*ComputeSpatialMetrics)();
-
-  // Compute motion metrics
-  if (first_frame_ == false)
-    ComputeMotionMetrics();
-
-  // Saving current frame as previous one: Y only.
-  memcpy(prev_frame_, orig_frame_, width_ * height_);
-
-  first_frame_ = false;
-  ca_Init_ = true;
-
-  return ContentMetrics();
-}
-
-int32_t VPMContentAnalysis::Release() {
-  if (content_metrics_ != NULL) {
-    delete content_metrics_;
-    content_metrics_ = NULL;
-  }
-
-  if (prev_frame_ != NULL) {
-    delete[] prev_frame_;
-    prev_frame_ = NULL;
-  }
-
-  width_ = 0;
-  height_ = 0;
-  first_frame_ = true;
-
-  return VPM_OK;
-}
-
-int32_t VPMContentAnalysis::Initialize(int width, int height) {
-  width_ = width;
-  height_ = height;
-  first_frame_ = true;
-
-  // skip parameter: # of skipped rows: for complexity reduction
-  //  temporal also currently uses it for column reduction.
-  skip_num_ = 1;
-
-  // use skipNum = 2 for 4CIF, WHD
-  if ((height_ >= 576) && (width_ >= 704)) {
-    skip_num_ = 2;
-  }
-  // use skipNum = 4 for FULLL_HD images
-  if ((height_ >= 1080) && (width_ >= 1920)) {
-    skip_num_ = 4;
-  }
-
-  if (content_metrics_ != NULL) {
-    delete content_metrics_;
-  }
-
-  if (prev_frame_ != NULL) {
-    delete[] prev_frame_;
-  }
-
-  // Spatial Metrics don't work on a border of 8. Minimum processing
-  // block size is 16 pixels.  So make sure the width and height support this.
-  if (width_ <= 32 || height_ <= 32) {
-    ca_Init_ = false;
-    return VPM_PARAMETER_ERROR;
-  }
-
-  content_metrics_ = new VideoContentMetrics();
-  if (content_metrics_ == NULL) {
-    return VPM_MEMORY;
-  }
-
-  prev_frame_ = new uint8_t[width_ * height_];  // Y only.
-  if (prev_frame_ == NULL)
-    return VPM_MEMORY;
-
-  return VPM_OK;
-}
-
-// Compute motion metrics: magnitude over non-zero motion vectors,
-//  and size of zero cluster
-int32_t VPMContentAnalysis::ComputeMotionMetrics() {
-  // Motion metrics: only one is derived from normalized
-  //  (MAD) temporal difference
-  (this->*TemporalDiffMetric)();
-  return VPM_OK;
-}
-
-// Normalized temporal difference (MAD): used as a motion level metric
-// Normalize MAD by spatial contrast: images with more contrast
-//  (pixel variance) likely have larger temporal difference
-// To reduce complexity, we compute the metric for a reduced set of points.
-int32_t VPMContentAnalysis::TemporalDiffMetric_C() {
-  // size of original frame
-  int sizei = height_;
-  int sizej = width_;
-  uint32_t tempDiffSum = 0;
-  uint32_t pixelSum = 0;
-  uint64_t pixelSqSum = 0;
-
-  uint32_t num_pixels = 0;  // Counter for # of pixels.
-  const int width_end = ((width_ - 2 * border_) & -16) + border_;
-
-  for (int i = border_; i < sizei - border_; i += skip_num_) {
-    for (int j = border_; j < width_end; j++) {
-      num_pixels += 1;
-      int ssn = i * sizej + j;
-
-      uint8_t currPixel = orig_frame_[ssn];
-      uint8_t prevPixel = prev_frame_[ssn];
-
-      tempDiffSum +=
-          static_cast<uint32_t>(abs((int16_t)(currPixel - prevPixel)));
-      pixelSum += static_cast<uint32_t>(currPixel);
-      pixelSqSum += static_cast<uint64_t>(currPixel * currPixel);
-    }
-  }
-
-  // Default.
-  motion_magnitude_ = 0.0f;
-
-  if (tempDiffSum == 0)
-    return VPM_OK;
-
-  // Normalize over all pixels.
-  float const tempDiffAvg =
-      static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
-  float const pixelSumAvg =
-      static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
-  float const pixelSqSumAvg =
-      static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
-  float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
-
-  if (contrast > 0.0) {
-    contrast = sqrt(contrast);
-    motion_magnitude_ = tempDiffAvg / contrast;
-  }
-  return VPM_OK;
-}
-
-// Compute spatial metrics:
-// To reduce complexity, we compute the metric for a reduced set of points.
-// The spatial metrics are rough estimates of the prediction error cost for
-//  each QM spatial mode: 2x2,1x2,2x1
-// The metrics are a simple estimate of the up-sampling prediction error,
-// estimated assuming sub-sampling for decimation (no filtering),
-// and up-sampling back up with simple bilinear interpolation.
-int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
-  const int sizei = height_;
-  const int sizej = width_;
-
-  // Pixel mean square average: used to normalize the spatial metrics.
-  uint32_t pixelMSA = 0;
-
-  uint32_t spatialErrSum = 0;
-  uint32_t spatialErrVSum = 0;
-  uint32_t spatialErrHSum = 0;
-
-  // make sure work section is a multiple of 16
-  const int width_end = ((sizej - 2 * border_) & -16) + border_;
-
-  for (int i = border_; i < sizei - border_; i += skip_num_) {
-    for (int j = border_; j < width_end; j++) {
-      int ssn1 = i * sizej + j;
-      int ssn2 = (i + 1) * sizej + j;  // bottom
-      int ssn3 = (i - 1) * sizej + j;  // top
-      int ssn4 = i * sizej + j + 1;    // right
-      int ssn5 = i * sizej + j - 1;    // left
-
-      uint16_t refPixel1 = orig_frame_[ssn1] << 1;
-      uint16_t refPixel2 = orig_frame_[ssn1] << 2;
-
-      uint8_t bottPixel = orig_frame_[ssn2];
-      uint8_t topPixel = orig_frame_[ssn3];
-      uint8_t rightPixel = orig_frame_[ssn4];
-      uint8_t leftPixel = orig_frame_[ssn5];
-
-      spatialErrSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
-          refPixel2 - static_cast<uint16_t>(bottPixel + topPixel + leftPixel +
-                                            rightPixel))));
-      spatialErrVSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
-          refPixel1 - static_cast<uint16_t>(bottPixel + topPixel))));
-      spatialErrHSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
-          refPixel1 - static_cast<uint16_t>(leftPixel + rightPixel))));
-      pixelMSA += orig_frame_[ssn1];
-    }
-  }
-
-  // Normalize over all pixels.
-  const float spatialErr = static_cast<float>(spatialErrSum >> 2);
-  const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
-  const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
-  const float norm = static_cast<float>(pixelMSA);
-
-  // 2X2:
-  spatial_pred_err_ = spatialErr / norm;
-  // 1X2:
-  spatial_pred_err_h_ = spatialErrH / norm;
-  // 2X1:
-  spatial_pred_err_v_ = spatialErrV / norm;
-  return VPM_OK;
-}
-
-VideoContentMetrics* VPMContentAnalysis::ContentMetrics() {
-  if (ca_Init_ == false)
-    return NULL;
-
-  content_metrics_->spatial_pred_err = spatial_pred_err_;
-  content_metrics_->spatial_pred_err_h = spatial_pred_err_h_;
-  content_metrics_->spatial_pred_err_v = spatial_pred_err_v_;
-  // Motion metric: normalized temporal difference (MAD).
-  content_metrics_->motion_magnitude = motion_magnitude_;
-
-  return content_metrics_;
-}
-
-}  // namespace webrtc
diff --git a/webrtc/modules/video_processing/content_analysis.h b/webrtc/modules/video_processing/content_analysis.h
deleted file mode 100644
index d3a11bd..0000000
--- a/webrtc/modules/video_processing/content_analysis.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
-#define WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
-
-#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/video_processing/include/video_processing_defines.h"
-#include "webrtc/typedefs.h"
-#include "webrtc/video_frame.h"
-
-namespace webrtc {
-
-class VPMContentAnalysis {
- public:
-  // When |runtime_cpu_detection| is true, runtime selection of an optimized
-  // code path is allowed.
-  explicit VPMContentAnalysis(bool runtime_cpu_detection);
-  ~VPMContentAnalysis();
-
-  // Initialize ContentAnalysis - should be called prior to
-  //  extractContentFeature
-  // Inputs:         width, height
-  // Return value:   0 if OK, negative value upon error
-  int32_t Initialize(int width, int height);
-
-  // Extract content Feature - main function of ContentAnalysis
-  // Input:           new frame
-  // Return value:    pointer to structure containing content Analysis
-  //                  metrics or NULL value upon error
-  VideoContentMetrics* ComputeContentMetrics(const VideoFrame& inputFrame);
-
-  // Release all allocated memory
-  // Output: 0 if OK, negative value upon error
-  int32_t Release();
-
- private:
-  // return motion metrics
-  VideoContentMetrics* ContentMetrics();
-
-  // Normalized temporal difference metric: for motion magnitude
-  typedef int32_t (VPMContentAnalysis::*TemporalDiffMetricFunc)();
-  TemporalDiffMetricFunc TemporalDiffMetric;
-  int32_t TemporalDiffMetric_C();
-
-  // Motion metric method: call 2 metrics (magnitude and size)
-  int32_t ComputeMotionMetrics();
-
-  // Spatial metric method: computes the 3 frame-average spatial
-  //  prediction errors (1x2,2x1,2x2)
-  typedef int32_t (VPMContentAnalysis::*ComputeSpatialMetricsFunc)();
-  ComputeSpatialMetricsFunc ComputeSpatialMetrics;
-  int32_t ComputeSpatialMetrics_C();
-
-#if defined(WEBRTC_ARCH_X86_FAMILY)
-  int32_t ComputeSpatialMetrics_SSE2();
-  int32_t TemporalDiffMetric_SSE2();
-#endif
-
-  const uint8_t* orig_frame_;
-  uint8_t* prev_frame_;
-  int width_;
-  int height_;
-  int skip_num_;
-  int border_;
-
-  // Content Metrics: Stores the local average of the metrics.
-  float motion_magnitude_;    // motion class
-  float spatial_pred_err_;    // spatial class
-  float spatial_pred_err_h_;  // spatial class
-  float spatial_pred_err_v_;  // spatial class
-  bool first_frame_;
-  bool ca_Init_;
-
-  VideoContentMetrics* content_metrics_;
-};
-
-}  // namespace webrtc
-
-#endif  // WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
diff --git a/webrtc/modules/video_processing/content_analysis_sse2.cc b/webrtc/modules/video_processing/content_analysis_sse2.cc
deleted file mode 100644
index 7a60a89..0000000
--- a/webrtc/modules/video_processing/content_analysis_sse2.cc
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_processing/content_analysis.h"
-
-#include <emmintrin.h>
-#include <math.h>
-
-namespace webrtc {
-
-int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
-  uint32_t num_pixels = 0;  // counter for # of pixels
-  const uint8_t* imgBufO = orig_frame_ + border_ * width_ + border_;
-  const uint8_t* imgBufP = prev_frame_ + border_ * width_ + border_;
-
-  const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
-
-  __m128i sad_64 = _mm_setzero_si128();
-  __m128i sum_64 = _mm_setzero_si128();
-  __m128i sqsum_64 = _mm_setzero_si128();
-  const __m128i z = _mm_setzero_si128();
-
-  for (uint16_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
-    __m128i sqsum_32 = _mm_setzero_si128();
-
-    const uint8_t* lineO = imgBufO;
-    const uint8_t* lineP = imgBufP;
-
-    // Work on 16 pixels at a time.  For HD content with a width of 1920
-    // this loop will run ~67 times (depending on border).  Maximum for
-    // abs(o-p) and sum(o) will be 255. _mm_sad_epu8 produces 2 64 bit
-    // results which are then accumulated.  There is no chance of
-    // rollover for these two accumulators.
-    // o*o will have a maximum of 255*255 = 65025.  This will roll over
-    // a 16 bit accumulator as 67*65025 > 65535, but will fit in a
-    // 32 bit accumulator.
-    for (uint16_t j = 0; j < width_end - border_; j += 16) {
-      const __m128i o = _mm_loadu_si128((__m128i*)(lineO));
-      const __m128i p = _mm_loadu_si128((__m128i*)(lineP));
-
-      lineO += 16;
-      lineP += 16;
-
-      // Abs pixel difference between frames.
-      sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p));
-
-      // sum of all pixels in frame
-      sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z));
-
-      // Squared sum of all pixels in frame.
-      const __m128i olo = _mm_unpacklo_epi8(o, z);
-      const __m128i ohi = _mm_unpackhi_epi8(o, z);
-
-      const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo);
-      const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi);
-
-      sqsum_32 = _mm_add_epi32(sqsum_32, sqsum_32_lo);
-      sqsum_32 = _mm_add_epi32(sqsum_32, sqsum_32_hi);
-    }
-
-    // Add to 64 bit running sum as to not roll over.
-    sqsum_64 =
-        _mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z),
-                                              _mm_unpacklo_epi32(sqsum_32, z)));
-
-    imgBufO += width_ * skip_num_;
-    imgBufP += width_ * skip_num_;
-    num_pixels += (width_end - border_);
-  }
-
-  __m128i sad_final_128;
-  __m128i sum_final_128;
-  __m128i sqsum_final_128;
-
-  // Bring sums out of vector registers and into integer register
-  // domain, summing them along the way.
-  _mm_store_si128(&sad_final_128, sad_64);
-  _mm_store_si128(&sum_final_128, sum_64);
-  _mm_store_si128(&sqsum_final_128, sqsum_64);
-
-  uint64_t* sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
-  uint64_t* sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
-  uint64_t* sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
-
-  const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
-  const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
-  const uint32_t tempDiffSum = sad_final_64[0] + sad_final_64[1];
-
-  // Default.
-  motion_magnitude_ = 0.0f;
-
-  if (tempDiffSum == 0)
-    return VPM_OK;
-
-  // Normalize over all pixels.
-  const float tempDiffAvg =
-      static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
-  const float pixelSumAvg =
-      static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
-  const float pixelSqSumAvg =
-      static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
-  float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
-
-  if (contrast > 0.0) {
-    contrast = sqrt(contrast);
-    motion_magnitude_ = tempDiffAvg / contrast;
-  }
-
-  return VPM_OK;
-}
-
-int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
-  const uint8_t* imgBuf = orig_frame_ + border_ * width_;
-  const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
-
-  __m128i se_32 = _mm_setzero_si128();
-  __m128i sev_32 = _mm_setzero_si128();
-  __m128i seh_32 = _mm_setzero_si128();
-  __m128i msa_32 = _mm_setzero_si128();
-  const __m128i z = _mm_setzero_si128();
-
-  // Error is accumulated as a 32 bit value.  Looking at HD content with a
-  // height of 1080 lines, or about 67 macro blocks.  If the 16 bit row
-  // value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
-  // will not roll over a 32 bit accumulator.
-  // skip_num_ is also used to reduce the number of rows
-  for (int32_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
-    __m128i se_16 = _mm_setzero_si128();
-    __m128i sev_16 = _mm_setzero_si128();
-    __m128i seh_16 = _mm_setzero_si128();
-    __m128i msa_16 = _mm_setzero_si128();
-
-    // Row error is accumulated as a 16 bit value.  There are 8
-    // accumulators.  Max value of a 16 bit number is 65529.  Looking
-    // at HD content, 1080p, has a width of 1920, 120 macro blocks.
-    // A mb at a time is processed at a time.  Absolute max error at
-    // a point would be abs(0-255+255+255+255) which equals 1020.
-    // 120*1020 = 122400.  The probability of hitting this is quite low
-    // on well behaved content.  A specially crafted image could roll over.
-    // border_ could also be adjusted to concentrate on just the center of
-    // the images for an HD capture in order to reduce the possiblity of
-    // rollover.
-    const uint8_t* lineTop = imgBuf - width_ + border_;
-    const uint8_t* lineCen = imgBuf + border_;
-    const uint8_t* lineBot = imgBuf + width_ + border_;
-
-    for (int32_t j = 0; j < width_end - border_; j += 16) {
-      const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
-      const __m128i l = _mm_loadu_si128((__m128i*)(lineCen - 1));
-      const __m128i c = _mm_loadu_si128((__m128i*)(lineCen));
-      const __m128i r = _mm_loadu_si128((__m128i*)(lineCen + 1));
-      const __m128i b = _mm_loadu_si128((__m128i*)(lineBot));
-
-      lineTop += 16;
-      lineCen += 16;
-      lineBot += 16;
-
-      // center pixel unpacked
-      __m128i clo = _mm_unpacklo_epi8(c, z);
-      __m128i chi = _mm_unpackhi_epi8(c, z);
-
-      // left right pixels unpacked and added together
-      const __m128i lrlo =
-          _mm_add_epi16(_mm_unpacklo_epi8(l, z), _mm_unpacklo_epi8(r, z));
-      const __m128i lrhi =
-          _mm_add_epi16(_mm_unpackhi_epi8(l, z), _mm_unpackhi_epi8(r, z));
-
-      // top & bottom pixels unpacked and added together
-      const __m128i tblo =
-          _mm_add_epi16(_mm_unpacklo_epi8(t, z), _mm_unpacklo_epi8(b, z));
-      const __m128i tbhi =
-          _mm_add_epi16(_mm_unpackhi_epi8(t, z), _mm_unpackhi_epi8(b, z));
-
-      // running sum of all pixels
-      msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo));
-
-      clo = _mm_slli_epi16(clo, 1);
-      chi = _mm_slli_epi16(chi, 1);
-      const __m128i sevtlo = _mm_subs_epi16(clo, tblo);
-      const __m128i sevthi = _mm_subs_epi16(chi, tbhi);
-      const __m128i sehtlo = _mm_subs_epi16(clo, lrlo);
-      const __m128i sehthi = _mm_subs_epi16(chi, lrhi);
-
-      clo = _mm_slli_epi16(clo, 1);
-      chi = _mm_slli_epi16(chi, 1);
-      const __m128i setlo = _mm_subs_epi16(clo, _mm_add_epi16(lrlo, tblo));
-      const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi));
-
-      // Add to 16 bit running sum
-      se_16 =
-          _mm_add_epi16(se_16, _mm_max_epi16(setlo, _mm_subs_epi16(z, setlo)));
-      se_16 =
-          _mm_add_epi16(se_16, _mm_max_epi16(sethi, _mm_subs_epi16(z, sethi)));
-      sev_16 = _mm_add_epi16(sev_16,
-                             _mm_max_epi16(sevtlo, _mm_subs_epi16(z, sevtlo)));
-      sev_16 = _mm_add_epi16(sev_16,
-                             _mm_max_epi16(sevthi, _mm_subs_epi16(z, sevthi)));
-      seh_16 = _mm_add_epi16(seh_16,
-                             _mm_max_epi16(sehtlo, _mm_subs_epi16(z, sehtlo)));
-      seh_16 = _mm_add_epi16(seh_16,
-                             _mm_max_epi16(sehthi, _mm_subs_epi16(z, sehthi)));
-    }
-
-    // Add to 32 bit running sum as to not roll over.
-    se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16, z),
-                                               _mm_unpacklo_epi16(se_16, z)));
-    sev_32 =
-        _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16, z),
-                                            _mm_unpacklo_epi16(sev_16, z)));
-    seh_32 =
-        _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16, z),
-                                            _mm_unpacklo_epi16(seh_16, z)));
-    msa_32 =
-        _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16, z),
-                                            _mm_unpacklo_epi16(msa_16, z)));
-
-    imgBuf += width_ * skip_num_;
-  }
-
-  __m128i se_128;
-  __m128i sev_128;
-  __m128i seh_128;
-  __m128i msa_128;
-
-  // Bring sums out of vector registers and into integer register
-  // domain, summing them along the way.
-  _mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z),
-                                         _mm_unpacklo_epi32(se_32, z)));
-  _mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z),
-                                          _mm_unpacklo_epi32(sev_32, z)));
-  _mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z),
-                                          _mm_unpacklo_epi32(seh_32, z)));
-  _mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z),
-                                          _mm_unpacklo_epi32(msa_32, z)));
-
-  uint64_t* se_64 = reinterpret_cast<uint64_t*>(&se_128);
-  uint64_t* sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
-  uint64_t* seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
-  uint64_t* msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
-
-  const uint32_t spatialErrSum = se_64[0] + se_64[1];
-  const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
-  const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
-  const uint32_t pixelMSA = msa_64[0] + msa_64[1];
-
-  // Normalize over all pixels.
-  const float spatialErr = static_cast<float>(spatialErrSum >> 2);
-  const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
-  const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
-  const float norm = static_cast<float>(pixelMSA);
-
-  // 2X2:
-  spatial_pred_err_ = spatialErr / norm;
-
-  // 1X2:
-  spatial_pred_err_h_ = spatialErrH / norm;
-
-  // 2X1:
-  spatial_pred_err_v_ = spatialErrV / norm;
-
-  return VPM_OK;
-}
-
-}  // namespace webrtc
diff --git a/webrtc/modules/video_processing/frame_preprocessor.cc b/webrtc/modules/video_processing/frame_preprocessor.cc
index 7393af8..100cdb5 100644
--- a/webrtc/modules/video_processing/frame_preprocessor.cc
+++ b/webrtc/modules/video_processing/frame_preprocessor.cc
@@ -15,12 +15,8 @@
 namespace webrtc {
 
 VPMFramePreprocessor::VPMFramePreprocessor()
-    : content_metrics_(nullptr),
-      resampled_frame_(),
-      enable_ca_(false),
-      frame_cnt_(0) {
+    : resampled_frame_(), frame_cnt_(0) {
   spatial_resampler_ = new VPMSimpleSpatialResampler();
-  ca_ = new VPMContentAnalysis(true);
   vd_ = new VPMVideoDecimator();
   EnableDenoising(false);
   denoised_frame_toggle_ = 0;
@@ -28,17 +24,13 @@
 
 VPMFramePreprocessor::~VPMFramePreprocessor() {
   Reset();
-  delete ca_;
   delete vd_;
   delete spatial_resampler_;
 }
 
 void VPMFramePreprocessor::Reset() {
-  ca_->Release();
   vd_->Reset();
-  content_metrics_ = nullptr;
   spatial_resampler_->Reset();
-  enable_ca_ = false;
   frame_cnt_ = 0;
 }
 
@@ -46,10 +38,6 @@
   vd_->EnableTemporalDecimation(enable);
 }
 
-void VPMFramePreprocessor::EnableContentAnalysis(bool enable) {
-  enable_ca_ = enable;
-}
-
 void VPMFramePreprocessor::SetInputFrameResampleMode(
     VideoFrameResampling resampling_mode) {
   spatial_resampler_->SetInputFrameResampleMode(resampling_mode);
@@ -131,18 +119,8 @@
     current_frame = &resampled_frame_;
   }
 
-  // Perform content analysis on the frame to be encoded.
-  if (enable_ca_ && frame_cnt_ % kSkipFrameCA == 0) {
-    // Compute new metrics every |kSkipFramesCA| frames, starting with
-    // the first frame.
-    content_metrics_ = ca_->ComputeContentMetrics(*current_frame);
-  }
   ++frame_cnt_;
   return current_frame;
 }
 
-VideoContentMetrics* VPMFramePreprocessor::GetContentMetrics() const {
-  return content_metrics_;
-}
-
 }  // namespace webrtc
diff --git a/webrtc/modules/video_processing/frame_preprocessor.h b/webrtc/modules/video_processing/frame_preprocessor.h
index 4c5a6e4..4ac6b76 100644
--- a/webrtc/modules/video_processing/frame_preprocessor.h
+++ b/webrtc/modules/video_processing/frame_preprocessor.h
@@ -14,7 +14,6 @@
 #include <memory>
 
 #include "webrtc/modules/video_processing/include/video_processing.h"
-#include "webrtc/modules/video_processing/content_analysis.h"
 #include "webrtc/modules/video_processing/spatial_resampler.h"
 #include "webrtc/modules/video_processing/video_decimator.h"
 #include "webrtc/typedefs.h"
@@ -38,9 +37,6 @@
 
   void SetInputFrameResampleMode(VideoFrameResampling resampling_mode);
 
-  // Enable content analysis.
-  void EnableContentAnalysis(bool enable);
-
   // Set target resolution: frame rate and dimension.
   int32_t SetTargetResolution(uint32_t width,
                               uint32_t height,
@@ -59,21 +55,17 @@
   // Preprocess output:
   void EnableDenoising(bool enable);
   const VideoFrame* PreprocessFrame(const VideoFrame& frame);
-  VideoContentMetrics* GetContentMetrics() const;
 
  private:
   // The content does not change so much every frame, so to reduce complexity
   // we can compute new content metrics every |kSkipFrameCA| frames.
   enum { kSkipFrameCA = 2 };
 
-  VideoContentMetrics* content_metrics_;
   VideoFrame denoised_frame_[2];
   VideoFrame resampled_frame_;
   VPMSpatialResampler* spatial_resampler_;
-  VPMContentAnalysis* ca_;
   VPMVideoDecimator* vd_;
   std::unique_ptr<VideoDenoiser> denoiser_;
-  bool enable_ca_;
   uint8_t denoised_frame_toggle_;
   uint32_t frame_cnt_;
 };
diff --git a/webrtc/modules/video_processing/include/video_processing.h b/webrtc/modules/video_processing/include/video_processing.h
index 826327e..e2069dd 100644
--- a/webrtc/modules/video_processing/include/video_processing.h
+++ b/webrtc/modules/video_processing/include/video_processing.h
@@ -53,9 +53,6 @@
 
   virtual void EnableDenoising(bool enable) = 0;
   virtual const VideoFrame* PreprocessFrame(const VideoFrame& frame) = 0;
-
-  virtual VideoContentMetrics* GetContentMetrics() const = 0;
-  virtual void EnableContentAnalysis(bool enable) = 0;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/content_metrics_test.cc b/webrtc/modules/video_processing/test/content_metrics_test.cc
deleted file mode 100644
index 80bb564..0000000
--- a/webrtc/modules/video_processing/test/content_metrics_test.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/include/video_processing.h"
-#include "webrtc/modules/video_processing/content_analysis.h"
-#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
-
-namespace webrtc {
-
-#if defined(WEBRTC_IOS)
-TEST_F(VideoProcessingTest, DISABLED_ContentAnalysis) {
-#else
-TEST_F(VideoProcessingTest, ContentAnalysis) {
-#endif
-  VPMContentAnalysis ca__c(false);
-  VPMContentAnalysis ca__sse(true);
-  VideoContentMetrics* _cM_c;
-  VideoContentMetrics* _cM_SSE;
-
-  ca__c.Initialize(width_, height_);
-  ca__sse.Initialize(width_, height_);
-
-  std::unique_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
-  while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
-         frame_length_) {
-    // Using ConvertToI420 to add stride to the image.
-    EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
-                               0, kVideoRotation_0, &video_frame_));
-    _cM_c = ca__c.ComputeContentMetrics(video_frame_);
-    _cM_SSE = ca__sse.ComputeContentMetrics(video_frame_);
-
-    ASSERT_EQ(_cM_c->spatial_pred_err, _cM_SSE->spatial_pred_err);
-    ASSERT_EQ(_cM_c->spatial_pred_err_v, _cM_SSE->spatial_pred_err_v);
-    ASSERT_EQ(_cM_c->spatial_pred_err_h, _cM_SSE->spatial_pred_err_h);
-    ASSERT_EQ(_cM_c->motion_magnitude, _cM_SSE->motion_magnitude);
-  }
-  ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-}
-
-}  // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/video_processing_unittest.cc b/webrtc/modules/video_processing/test/video_processing_unittest.cc
index 790994f..2d0c686 100644
--- a/webrtc/modules/video_processing/test/video_processing_unittest.cc
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.cc
@@ -126,8 +126,6 @@
   rewind(source_file_);
   ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file \n";
 
-  // CA not needed here
-  vp_->EnableContentAnalysis(false);
   // no temporal decimation
   vp_->EnableTemporalDecimation(false);
 
diff --git a/webrtc/modules/video_processing/video_processing.gypi b/webrtc/modules/video_processing/video_processing.gypi
index 429d74c..3e90fd2 100644
--- a/webrtc/modules/video_processing/video_processing.gypi
+++ b/webrtc/modules/video_processing/video_processing.gypi
@@ -20,8 +20,6 @@
       'sources': [
         'include/video_processing.h',
         'include/video_processing_defines.h',
-        'content_analysis.cc',
-        'content_analysis.h',
         'frame_preprocessor.cc',
         'frame_preprocessor.h',
         'spatial_resampler.cc',
@@ -58,7 +56,6 @@
           'target_name': 'video_processing_sse2',
           'type': 'static_library',
           'sources': [
-            'content_analysis_sse2.cc',
             'util/denoiser_filter_sse2.cc',
             'util/denoiser_filter_sse2.h',
           ],
diff --git a/webrtc/modules/video_processing/video_processing_impl.cc b/webrtc/modules/video_processing/video_processing_impl.cc
index b4f86ae..86f75bf 100644
--- a/webrtc/modules/video_processing/video_processing_impl.cc
+++ b/webrtc/modules/video_processing/video_processing_impl.cc
@@ -69,14 +69,4 @@
   return frame_pre_processor_.PreprocessFrame(frame);
 }
 
-VideoContentMetrics* VideoProcessingImpl::GetContentMetrics() const {
-  rtc::CritScope mutex(&mutex_);
-  return frame_pre_processor_.GetContentMetrics();
-}
-
-void VideoProcessingImpl::EnableContentAnalysis(bool enable) {
-  rtc::CritScope mutex(&mutex_);
-  frame_pre_processor_.EnableContentAnalysis(enable);
-}
-
 }  // namespace webrtc
diff --git a/webrtc/modules/video_processing/video_processing_impl.h b/webrtc/modules/video_processing/video_processing_impl.h
index 7f6ef08..21e23c9 100644
--- a/webrtc/modules/video_processing/video_processing_impl.h
+++ b/webrtc/modules/video_processing/video_processing_impl.h
@@ -26,7 +26,6 @@
   // Implements VideoProcessing.
   void EnableTemporalDecimation(bool enable) override;
   void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override;
-  void EnableContentAnalysis(bool enable) override;
   int32_t SetTargetResolution(uint32_t width,
                               uint32_t height,
                               uint32_t frame_rate) override;
@@ -35,7 +34,6 @@
   uint32_t GetDecimatedHeight() const override;
   void EnableDenoising(bool enable) override;
   const VideoFrame* PreprocessFrame(const VideoFrame& frame) override;
-  VideoContentMetrics* GetContentMetrics() const override;
 
  private:
   rtc::CriticalSection mutex_;
diff --git a/webrtc/video/vie_encoder.cc b/webrtc/video/vie_encoder.cc
index e2ac521..2ed7110 100644
--- a/webrtc/video/vie_encoder.cc
+++ b/webrtc/video/vie_encoder.cc
@@ -30,33 +30,13 @@
 
 static const float kStopPaddingThresholdMs = 2000;
 
-class QMVideoSettingsCallback : public VCMQMSettingsCallback {
- public:
-  explicit QMVideoSettingsCallback(VideoProcessing* vpm);
-
-  ~QMVideoSettingsCallback();
-
-  // Update VPM with QM (quality modes: frame size & frame rate) settings.
-  int32_t SetVideoQMSettings(const uint32_t frame_rate,
-                             const uint32_t width,
-                             const uint32_t height);
-
- private:
-  VideoProcessing* vp_;
-};
-
 ViEEncoder::ViEEncoder(uint32_t number_of_cores,
                        ProcessThread* module_process_thread,
                        SendStatisticsProxy* stats_proxy,
                        OveruseFrameDetector* overuse_detector)
     : number_of_cores_(number_of_cores),
       vp_(VideoProcessing::Create()),
-      qm_callback_(new QMVideoSettingsCallback(vp_.get())),
-      video_sender_(Clock::GetRealTimeClock(),
-                    this,
-                    this,
-                    qm_callback_.get(),
-                    this),
+      video_sender_(Clock::GetRealTimeClock(), this, this, this),
       stats_proxy_(stats_proxy),
       overuse_detector_(overuse_detector),
       time_of_last_frame_activity_ms_(0),
@@ -74,9 +54,6 @@
       video_suspended_(false) {
   module_process_thread_->RegisterModule(&video_sender_);
   vp_->EnableTemporalDecimation(true);
-
-  // Enable/disable content analysis: off by default for now.
-  vp_->EnableContentAnalysis(false);
 }
 
 vcm::VideoSender* ViEEncoder::video_sender() {
@@ -291,11 +268,10 @@
       has_received_rpsi_ = false;
     }
 
-    video_sender_.AddVideoFrame(*frame_to_send, vp_->GetContentMetrics(),
-                                &codec_specific_info);
+    video_sender_.AddVideoFrame(*frame_to_send, &codec_specific_info);
     return;
   }
-  video_sender_.AddVideoFrame(*frame_to_send, nullptr, nullptr);
+  video_sender_.AddVideoFrame(*frame_to_send, nullptr);
 }
 
 void ViEEncoder::SendKeyFrame() {
@@ -391,18 +367,4 @@
     stats_proxy_->OnSuspendChange(video_is_suspended);
 }
 
-QMVideoSettingsCallback::QMVideoSettingsCallback(VideoProcessing* vpm)
-    : vp_(vpm) {
-}
-
-QMVideoSettingsCallback::~QMVideoSettingsCallback() {
-}
-
-int32_t QMVideoSettingsCallback::SetVideoQMSettings(
-    const uint32_t frame_rate,
-    const uint32_t width,
-    const uint32_t height) {
-  return vp_->SetTargetResolution(width, height, frame_rate);
-}
-
 }  // namespace webrtc
diff --git a/webrtc/video/vie_encoder.h b/webrtc/video/vie_encoder.h
index e309dd5..f846bdf 100644
--- a/webrtc/video/vie_encoder.h
+++ b/webrtc/video/vie_encoder.h
@@ -34,7 +34,6 @@
 class OveruseFrameDetector;
 class PacedSender;
 class ProcessThread;
-class QMVideoSettingsCallback;
 class SendStatisticsProxy;
 class ViEBitrateObserver;
 class ViEEffectFilter;
@@ -124,7 +123,6 @@
   const uint32_t number_of_cores_;
 
   const std::unique_ptr<VideoProcessing> vp_;
-  const std::unique_ptr<QMVideoSettingsCallback> qm_callback_;
   vcm::VideoSender video_sender_;
 
   rtc::CriticalSection data_cs_;