Pull out the PostFilter to its own NonlinearBeamformer API

This is done to avoid having a nonlinear component in the AEC path.
Now the linear delay and sum is run before the AEC and the postfilter after it.

This change landed originally at: https://codereview.webrtc.org/1982183002/

R=peah@webrtc.org
TBR=henrik.lundin@webrtc.org

Review URL: https://codereview.webrtc.org/2110593003 .

Cr-Commit-Position: refs/heads/master@{#13371}
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index afeebba..2a2e54d 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -128,10 +128,10 @@
 };
 
 struct AudioProcessingImpl::ApmPrivateSubmodules {
-  explicit ApmPrivateSubmodules(Beamformer<float>* beamformer)
+  explicit ApmPrivateSubmodules(NonlinearBeamformer* beamformer)
       : beamformer(beamformer) {}
   // Accessed internally from capture or during initialization
-  std::unique_ptr<Beamformer<float>> beamformer;
+  std::unique_ptr<NonlinearBeamformer> beamformer;
   std::unique_ptr<AgcManagerDirect> agc_manager;
   std::unique_ptr<LevelController> level_controller;
 };
@@ -146,7 +146,7 @@
 }
 
 AudioProcessing* AudioProcessing::Create(const Config& config,
-                                         Beamformer<float>* beamformer) {
+                                         NonlinearBeamformer* beamformer) {
   AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
   if (apm->Initialize() != kNoError) {
     delete apm;
@@ -160,7 +160,7 @@
     : AudioProcessingImpl(config, nullptr) {}
 
 AudioProcessingImpl::AudioProcessingImpl(const Config& config,
-                                         Beamformer<float>* beamformer)
+                                         NonlinearBeamformer* beamformer)
     : public_submodules_(new ApmPublicSubmodules()),
       private_submodules_(new ApmPrivateSubmodules(beamformer)),
       constants_(config.Get<ExperimentalAgc>().startup_min_volume,
@@ -699,8 +699,8 @@
   }
 
   if (capture_nonlocked_.beamformer_enabled) {
-    private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(),
-                                                  ca->split_data_f());
+    private_submodules_->beamformer->AnalyzeChunk(*ca->split_data_f());
+    // Discards all channels by the leftmost one.
     ca->set_num_channels(1);
   }
 
@@ -746,6 +746,10 @@
   RETURN_ON_ERR(public_submodules_->echo_control_mobile->ProcessCaptureAudio(
       ca, stream_delay_ms()));
 
+  if (capture_nonlocked_.beamformer_enabled) {
+    private_submodules_->beamformer->PostFilter(ca->split_data_f());
+  }
+
   public_submodules_->voice_detection->ProcessCaptureAudio(ca);
 
   if (constants_.use_experimental_agc &&
@@ -1223,7 +1227,7 @@
   if (capture_nonlocked_.beamformer_enabled) {
     if (!private_submodules_->beamformer) {
       private_submodules_->beamformer.reset(new NonlinearBeamformer(
-          capture_.array_geometry, capture_.target_direction));
+          capture_.array_geometry, 1u, capture_.target_direction));
     }
     private_submodules_->beamformer->Initialize(kChunkSizeMs,
                                                 capture_nonlocked_.split_rate);