Use backticks not vertical bars to denote variables in comments for /modules/audio_processing

Bug: webrtc:12338
Change-Id: I85bff694dd2ead83c939c4d1945eff82e1296001
No-Presubmit: True
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227161
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34690}
diff --git a/modules/audio_processing/aec_dump/aec_dump_factory.h b/modules/audio_processing/aec_dump/aec_dump_factory.h
index 429a8a5..c902a58 100644
--- a/modules/audio_processing/aec_dump/aec_dump_factory.h
+++ b/modules/audio_processing/aec_dump/aec_dump_factory.h
@@ -26,10 +26,10 @@
 
 class RTC_EXPORT AecDumpFactory {
  public:
-  // The |worker_queue| may not be null and must outlive the created
+  // The `worker_queue` may not be null and must outlive the created
   // AecDump instance. |max_log_size_bytes == -1| means the log size
-  // will be unlimited. |handle| may not be null. The AecDump takes
-  // responsibility for |handle| and closes it in the destructor. A
+  // will be unlimited. `handle` may not be null. The AecDump takes
+  // responsibility for `handle` and closes it in the destructor. A
   // non-null return value indicates that the file has been
   // sucessfully opened.
   static std::unique_ptr<AecDump> Create(webrtc::FileWrapper file,
diff --git a/modules/audio_processing/aecm/aecm_core.cc b/modules/audio_processing/aecm/aecm_core.cc
index 78c0133..fbc3239 100644
--- a/modules/audio_processing/aecm/aecm_core.cc
+++ b/modules/audio_processing/aecm/aecm_core.cc
@@ -124,7 +124,7 @@
     -1140, -998,  -856,  -713,  -571,  -428,  -285,  -142};
 
 
-// Moves the pointer to the next entry and inserts |far_spectrum| and
+// Moves the pointer to the next entry and inserts `far_spectrum` and
 // corresponding Q-domain in its buffer.
 //
 // Inputs:
@@ -574,7 +574,7 @@
   // Obtain an output frame.
   WebRtc_ReadBuffer(aecm->outFrameBuf, (void**)&out_ptr, out, FRAME_LEN);
   if (out_ptr != out) {
-    // ReadBuffer() hasn't copied to |out| in this case.
+    // ReadBuffer() hasn't copied to `out` in this case.
     memcpy(out, out_ptr, FRAME_LEN * sizeof(int16_t));
   }
 
@@ -616,22 +616,22 @@
 
 // ExtractFractionPart(a, zeros)
 //
-// returns the fraction part of |a|, with |zeros| number of leading zeros, as an
-// int16_t scaled to Q8. There is no sanity check of |a| in the sense that the
+// returns the fraction part of `a`, with `zeros` number of leading zeros, as an
+// int16_t scaled to Q8. There is no sanity check of `a` in the sense that the
 // number of zeros match.
 static int16_t ExtractFractionPart(uint32_t a, int zeros) {
   return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23);
 }
 
-// Calculates and returns the log of |energy| in Q8. The input |energy| is
-// supposed to be in Q(|q_domain|).
+// Calculates and returns the log of `energy` in Q8. The input `energy` is
+// supposed to be in Q(`q_domain`).
 static int16_t LogOfEnergyInQ8(uint32_t energy, int q_domain) {
   static const int16_t kLogLowValue = PART_LEN_SHIFT << 7;
   int16_t log_energy_q8 = kLogLowValue;
   if (energy > 0) {
     int zeros = WebRtcSpl_NormU32(energy);
     int16_t frac = ExtractFractionPart(energy, zeros);
-    // log2 of |energy| in Q8.
+    // log2 of `energy` in Q8.
     log_energy_q8 += ((31 - zeros) << 8) + frac - (q_domain << 8);
   }
   return log_energy_q8;
diff --git a/modules/audio_processing/aecm/aecm_core.h b/modules/audio_processing/aecm/aecm_core.h
index aaa74e1..d6d0d8d 100644
--- a/modules/audio_processing/aecm/aecm_core.h
+++ b/modules/audio_processing/aecm/aecm_core.h
@@ -58,7 +58,7 @@
   void* delay_estimator;
   uint16_t currentDelay;
   // Far end history variables
-  // TODO(bjornv): Replace |far_history| with ring_buffer.
+  // TODO(bjornv): Replace `far_history` with ring_buffer.
   uint16_t far_history[PART_LEN1 * MAX_DELAY];
   int far_history_pos;
   int far_q_domains[MAX_DELAY];
@@ -271,7 +271,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // WebRtcAecm_UpdateFarHistory()
 //
-// Moves the pointer to the next entry and inserts |far_spectrum| and
+// Moves the pointer to the next entry and inserts `far_spectrum` and
 // corresponding Q-domain in its buffer.
 //
 // Inputs:
diff --git a/modules/audio_processing/aecm/aecm_core_c.cc b/modules/audio_processing/aecm/aecm_core_c.cc
index 7b6ca59..d363dd2 100644
--- a/modules/audio_processing/aecm/aecm_core_c.cc
+++ b/modules/audio_processing/aecm/aecm_core_c.cc
@@ -98,7 +98,7 @@
       // Track the minimum.
       if (aecm->noiseEst[i] < (1 << minTrackShift)) {
         // For small values, decrease noiseEst[i] every
-        // |kNoiseEstIncCount| block. The regular approach below can not
+        // `kNoiseEstIncCount` block. The regular approach below can not
         // go further down due to truncation.
         aecm->noiseEstTooHighCtr[i]++;
         if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) {
@@ -125,7 +125,7 @@
         aecm->noiseEst[i] >>= 11;
       } else {
         // Make incremental increases based on size every
-        // |kNoiseEstIncCount| block
+        // `kNoiseEstIncCount` block
         aecm->noiseEstTooLowCtr[i]++;
         if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) {
           aecm->noiseEst[i] += (aecm->noiseEst[i] >> 9) + 1;
@@ -181,7 +181,7 @@
   // FFT of signal
   for (i = 0; i < PART_LEN; i++) {
     // Window time domain signal and insert into real part of
-    // transformation array |fft|
+    // transformation array `fft`
     int16_t scaled_time_signal = time_signal[i] * (1 << time_signal_scaling);
     fft[i] = (int16_t)((scaled_time_signal * WebRtcAecm_kSqrtHanning[i]) >> 14);
     scaled_time_signal = time_signal[i + PART_LEN] * (1 << time_signal_scaling);
@@ -204,8 +204,8 @@
                                 const int16_t* nearendClean) {
   int i, j, outCFFT;
   int32_t tmp32no1;
-  // Reuse |efw| for the inverse FFT output after transferring
-  // the contents to |fft|.
+  // Reuse `efw` for the inverse FFT output after transferring
+  // the contents to `fft`.
   int16_t* ifft_out = (int16_t*)efw;
 
   // Synthesis
@@ -312,7 +312,7 @@
     } else {
       // Approximation for magnitude of complex fft output
       // magn = sqrt(real^2 + imag^2)
-      // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
+      // magn ~= alpha * max(`imag`,`real`) + beta * min(`imag`,`real`)
       //
       // The parameters alpha and beta are stored in Q15
 
@@ -541,7 +541,7 @@
     }
 
     zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
-    RTC_DCHECK_GE(zeros16, 0);  // |zeros16| is a norm, hence non-negative.
+    RTC_DCHECK_GE(zeros16, 0);  // `zeros16` is a norm, hence non-negative.
     dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld;
     if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) {
       tmp16no1 = aecm->nearFilt[i] * (1 << zeros16);
diff --git a/modules/audio_processing/aecm/aecm_core_mips.cc b/modules/audio_processing/aecm/aecm_core_mips.cc
index f2f43e1..828aa6d2f 100644
--- a/modules/audio_processing/aecm/aecm_core_mips.cc
+++ b/modules/audio_processing/aecm/aecm_core_mips.cc
@@ -822,7 +822,7 @@
     } else {
       // Approximation for magnitude of complex fft output
       // magn = sqrt(real^2 + imag^2)
-      // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
+      // magn ~= alpha * max(`imag`,`real`) + beta * min(`imag`,`real`)
       //
       // The parameters alpha and beta are stored in Q15
       tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
@@ -1106,7 +1106,7 @@
     }
 
     zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
-    RTC_DCHECK_GE(zeros16, 0);  // |zeros16| is a norm, hence non-negative.
+    RTC_DCHECK_GE(zeros16, 0);  // `zeros16` is a norm, hence non-negative.
     dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld;
     if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) {
       tmp16no1 = aecm->nearFilt[i] << zeros16;
@@ -1411,7 +1411,7 @@
       // Track the minimum.
       if (tnoise < (1 << minTrackShift)) {
         // For small values, decrease noiseEst[i] every
-        // |kNoiseEstIncCount| block. The regular approach below can not
+        // `kNoiseEstIncCount` block. The regular approach below can not
         // go further down due to truncation.
         aecm->noiseEstTooHighCtr[i]++;
         if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) {
@@ -1442,7 +1442,7 @@
               : "hi", "lo");
         } else {
           // Make incremental increases based on size every
-          // |kNoiseEstIncCount| block
+          // `kNoiseEstIncCount` block
           aecm->noiseEstTooLowCtr[i]++;
           if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) {
             __asm __volatile(
@@ -1484,7 +1484,7 @@
       // Track the minimum.
       if (tnoise1 < (1 << minTrackShift)) {
         // For small values, decrease noiseEst[i] every
-        // |kNoiseEstIncCount| block. The regular approach below can not
+        // `kNoiseEstIncCount` block. The regular approach below can not
         // go further down due to truncation.
         aecm->noiseEstTooHighCtr[i + 1]++;
         if (aecm->noiseEstTooHighCtr[i + 1] >= kNoiseEstIncCount) {
@@ -1515,7 +1515,7 @@
               : "hi", "lo");
         } else {
           // Make incremental increases based on size every
-          // |kNoiseEstIncCount| block
+          // `kNoiseEstIncCount` block
           aecm->noiseEstTooLowCtr[i + 1]++;
           if (aecm->noiseEstTooLowCtr[i + 1] >= kNoiseEstIncCount) {
             __asm __volatile(
diff --git a/modules/audio_processing/agc/agc.h b/modules/audio_processing/agc/agc.h
index b9bd5ea..2693d94 100644
--- a/modules/audio_processing/agc/agc.h
+++ b/modules/audio_processing/agc/agc.h
@@ -24,13 +24,13 @@
   Agc();
   virtual ~Agc();
 
-  // |audio| must be mono; in a multi-channel stream, provide the first (usually
+  // `audio` must be mono; in a multi-channel stream, provide the first (usually
   // left) channel.
   virtual void Process(const int16_t* audio, size_t length, int sample_rate_hz);
 
   // Retrieves the difference between the target RMS level and the current
   // signal RMS level in dB. Returns true if an update is available and false
-  // otherwise, in which case |error| should be ignored and no action taken.
+  // otherwise, in which case `error` should be ignored and no action taken.
   virtual bool GetRmsErrorDb(int* error);
   virtual void Reset();
 
diff --git a/modules/audio_processing/agc/agc_manager_direct.cc b/modules/audio_processing/agc/agc_manager_direct.cc
index e2a5b99..0cd67ca 100644
--- a/modules/audio_processing/agc/agc_manager_direct.cc
+++ b/modules/audio_processing/agc/agc_manager_direct.cc
@@ -280,7 +280,7 @@
 void MonoAgc::SetMaxLevel(int level) {
   RTC_DCHECK_GE(level, clipped_level_min_);
   max_level_ = level;
-  // Scale the |kSurplusCompressionGain| linearly across the restricted
+  // Scale the `kSurplusCompressionGain` linearly across the restricted
   // level range.
   max_compression_gain_ =
       kMaxCompressionGain + std::floor((1.f * kMaxMicLevel - max_level_) /
@@ -307,7 +307,7 @@
   int level = stream_analog_level_;
   // Reasons for taking action at startup:
   // 1) A person starting a call is expected to be heard.
-  // 2) Independent of interpretation of |level| == 0 we should raise it so the
+  // 2) Independent of interpretation of `level` == 0 we should raise it so the
   // AGC can do its job properly.
   if (level == 0 && !startup_) {
     RTC_DLOG(LS_INFO)
diff --git a/modules/audio_processing/agc/agc_manager_direct.h b/modules/audio_processing/agc/agc_manager_direct.h
index d80a255..27569d8 100644
--- a/modules/audio_processing/agc/agc_manager_direct.h
+++ b/modules/audio_processing/agc/agc_manager_direct.h
@@ -112,7 +112,7 @@
   FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectStandaloneTest,
                            EnableClippingPredictorLowersVolume);
 
-  // Dependency injection for testing. Don't delete |agc| as the memory is owned
+  // Dependency injection for testing. Don't delete `agc` as the memory is owned
   // by the manager.
   AgcManagerDirect(
       Agc* agc,
@@ -196,7 +196,7 @@
 
   // Set the maximum level the AGC is allowed to apply. Also updates the
   // maximum compression gain to compensate. The level must be at least
-  // |kClippedLevelMin|.
+  // `kClippedLevelMin`.
   void SetMaxLevel(int level);
 
   int CheckVolumeAndReset();
diff --git a/modules/audio_processing/agc/gain_control.h b/modules/audio_processing/agc/gain_control.h
index f8c706b..389b211 100644
--- a/modules/audio_processing/agc/gain_control.h
+++ b/modules/audio_processing/agc/gain_control.h
@@ -20,12 +20,12 @@
 // Recommended to be enabled on the client-side.
 class GainControl {
  public:
-  // When an analog mode is set, this must be called prior to |ProcessStream()|
+  // When an analog mode is set, this must be called prior to `ProcessStream()`
   // to pass the current analog level from the audio HAL. Must be within the
-  // range provided to |set_analog_level_limits()|.
+  // range provided to `set_analog_level_limits()`.
   virtual int set_stream_analog_level(int level) = 0;
 
-  // When an analog mode is set, this should be called after |ProcessStream()|
+  // When an analog mode is set, this should be called after `ProcessStream()`
   // to obtain the recommended new analog level for the audio HAL. It is the
   // users responsibility to apply this level.
   virtual int stream_analog_level() const = 0;
@@ -33,7 +33,7 @@
   enum Mode {
     // Adaptive mode intended for use if an analog volume control is available
     // on the capture device. It will require the user to provide coupling
-    // between the OS mixer controls and AGC through the |stream_analog_level()|
+    // between the OS mixer controls and AGC through the `stream_analog_level()`
     // functions.
     //
     // It consists of an analog gain prescription for the audio device and a
@@ -61,7 +61,7 @@
   virtual int set_mode(Mode mode) = 0;
   virtual Mode mode() const = 0;
 
-  // Sets the target peak |level| (or envelope) of the AGC in dBFs (decibels
+  // Sets the target peak `level` (or envelope) of the AGC in dBFs (decibels
   // from digital full-scale). The convention is to use positive values. For
   // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
   // level 3 dB below full-scale. Limited to [0, 31].
@@ -71,7 +71,7 @@
   virtual int set_target_level_dbfs(int level) = 0;
   virtual int target_level_dbfs() const = 0;
 
-  // Sets the maximum |gain| the digital compression stage may apply, in dB. A
+  // Sets the maximum `gain` the digital compression stage may apply, in dB. A
   // higher number corresponds to greater compression, while a value of 0 will
   // leave the signal uncompressed. Limited to [0, 90].
   virtual int set_compression_gain_db(int gain) = 0;
@@ -83,7 +83,7 @@
   virtual int enable_limiter(bool enable) = 0;
   virtual bool is_limiter_enabled() const = 0;
 
-  // Sets the |minimum| and |maximum| analog levels of the audio capture device.
+  // Sets the `minimum` and `maximum` analog levels of the audio capture device.
   // Must be set if and only if an analog mode is used. Limited to [0, 65535].
   virtual int set_analog_level_limits(int minimum, int maximum) = 0;
   virtual int analog_level_minimum() const = 0;
diff --git a/modules/audio_processing/agc/legacy/analog_agc.cc b/modules/audio_processing/agc/legacy/analog_agc.cc
index b53e3f9..e40a3f1 100644
--- a/modules/audio_processing/agc/legacy/analog_agc.cc
+++ b/modules/audio_processing/agc/legacy/analog_agc.cc
@@ -160,7 +160,7 @@
 
   /* apply slowly varying digital gain */
   if (stt->micVol > stt->maxAnalog) {
-    /* |maxLevel| is strictly >= |micVol|, so this condition should be
+    /* `maxLevel` is strictly >= `micVol`, so this condition should be
      * satisfied here, ensuring there is no divide-by-zero. */
     RTC_DCHECK_GT(stt->maxLevel, stt->maxAnalog);
 
diff --git a/modules/audio_processing/agc/legacy/digital_agc.cc b/modules/audio_processing/agc/legacy/digital_agc.cc
index e0c0766..4cd86ac 100644
--- a/modules/audio_processing/agc/legacy/digital_agc.cc
+++ b/modules/audio_processing/agc/legacy/digital_agc.cc
@@ -184,9 +184,9 @@
     numFIX -= (int32_t)logApprox * diffGain;       // Q14
 
     // Calculate ratio
-    // Shift |numFIX| as much as possible.
-    // Ensure we avoid wrap-around in |den| as well.
-    if (numFIX > (den >> 8) || -numFIX > (den >> 8)) {  // |den| is Q8.
+    // Shift `numFIX` as much as possible.
+    // Ensure we avoid wrap-around in `den` as well.
+    if (numFIX > (den >> 8) || -numFIX > (den >> 8)) {  // `den` is Q8.
       zeros = WebRtcSpl_NormW32(numFIX);
     } else {
       zeros = WebRtcSpl_NormW32(den) + 8;
diff --git a/modules/audio_processing/agc/loudness_histogram.cc b/modules/audio_processing/agc/loudness_histogram.cc
index 4775ff7..b0a1f53 100644
--- a/modules/audio_processing/agc/loudness_histogram.cc
+++ b/modules/audio_processing/agc/loudness_histogram.cc
@@ -114,7 +114,7 @@
 
 void LoudnessHistogram::RemoveTransient() {
   // Don't expect to be here if high-activity region is longer than
-  // |kTransientWidthThreshold| or there has not been any transient.
+  // `kTransientWidthThreshold` or there has not been any transient.
   RTC_DCHECK_LE(len_high_activity_, kTransientWidthThreshold);
   int index =
       (buffer_index_ > 0) ? (buffer_index_ - 1) : len_circular_buffer_ - 1;
diff --git a/modules/audio_processing/agc/loudness_histogram.h b/modules/audio_processing/agc/loudness_histogram.h
index badd443..51b3871 100644
--- a/modules/audio_processing/agc/loudness_histogram.h
+++ b/modules/audio_processing/agc/loudness_histogram.h
@@ -25,7 +25,7 @@
   static LoudnessHistogram* Create();
 
   // Create a sliding LoudnessHistogram, i.e. the histogram represents the last
-  // |window_size| samples.
+  // `window_size` samples.
   static LoudnessHistogram* Create(int window_size);
   ~LoudnessHistogram();
 
@@ -49,7 +49,7 @@
   LoudnessHistogram();
   explicit LoudnessHistogram(int window);
 
-  // Find the histogram bin associated with the given |rms|.
+  // Find the histogram bin associated with the given `rms`.
   int GetBinIndex(double rms);
 
   void RemoveOldestEntryAndUpdate();
@@ -63,10 +63,10 @@
   // Number of times the histogram is updated
   int num_updates_;
   // Audio content, this should be equal to the sum of the components of
-  // |bin_count_q10_|.
+  // `bin_count_q10_`.
   int64_t audio_content_q10_;
 
-  // LoudnessHistogram of input RMS in Q10 with |kHistSize_| bins. In each
+  // LoudnessHistogram of input RMS in Q10 with `kHistSize_` bins. In each
   // 'Update(),' we increment the associated histogram-bin with the given
   // probability. The increment is implemented in Q10 to avoid rounding errors.
   int64_t bin_count_q10_[kHistSize];
diff --git a/modules/audio_processing/agc2/biquad_filter.cc b/modules/audio_processing/agc2/biquad_filter.cc
index da8557c..ccb7807 100644
--- a/modules/audio_processing/agc2/biquad_filter.cc
+++ b/modules/audio_processing/agc2/biquad_filter.cc
@@ -15,7 +15,7 @@
 namespace webrtc {
 
 // Transposed direct form I implementation of a bi-quad filter applied to an
-// input signal |x| to produce an output signal |y|.
+// input signal `x` to produce an output signal `y`.
 void BiQuadFilter::Process(rtc::ArrayView<const float> x,
                            rtc::ArrayView<float> y) {
   for (size_t k = 0; k < x.size(); ++k) {
diff --git a/modules/audio_processing/agc2/biquad_filter_unittest.cc b/modules/audio_processing/agc2/biquad_filter_unittest.cc
index cd9a272..55ca1a5 100644
--- a/modules/audio_processing/agc2/biquad_filter_unittest.cc
+++ b/modules/audio_processing/agc2/biquad_filter_unittest.cc
@@ -64,7 +64,7 @@
                         rtc::ArrayView<const float> computed,
                         const float tolerance) {
   // The relative error is undefined when the expected value is 0.
-  // When that happens, check the absolute error instead. |safe_den| is used
+  // When that happens, check the absolute error instead. `safe_den` is used
   // below to implement such logic.
   auto safe_den = [](float x) { return (x == 0.f) ? 1.f : std::fabs(x); };
   ASSERT_EQ(expected.size(), computed.size());
diff --git a/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc b/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
index bc92613..221b499 100644
--- a/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
+++ b/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
@@ -105,7 +105,7 @@
     const auto interval = q.top();
     q.pop();
 
-    // Split |interval| and enqueue.
+    // Split `interval` and enqueue.
     double x_split = (interval.x0 + interval.x1) / 2.0;
     q.emplace(interval.x0, x_split,
               LimiterUnderApproximationNegativeError(limiter, interval.x0,
@@ -135,7 +135,7 @@
 void PrecomputeKneeApproxParams(const LimiterDbGainCurve* limiter,
                                 test::InterpolatedParameters* parameters) {
   static_assert(kInterpolatedGainCurveKneePoints > 2, "");
-  // Get |kInterpolatedGainCurveKneePoints| - 1 equally spaced points.
+  // Get `kInterpolatedGainCurveKneePoints` - 1 equally spaced points.
   const std::vector<double> points = test::LinSpace(
       limiter->knee_start_linear(), limiter->limiter_start_linear(),
       kInterpolatedGainCurveKneePoints - 1);
diff --git a/modules/audio_processing/agc2/compute_interpolated_gain_curve.h b/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
index 5f52441..08b676f 100644
--- a/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
+++ b/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
@@ -29,8 +29,8 @@
 
 // Knee and beyond-knee regions approximation parameters.
 // The gain curve is approximated as a piece-wise linear function.
-// |approx_params_x_| are the boundaries between adjacent linear pieces,
-// |approx_params_m_| and |approx_params_q_| are the slope and the y-intercept
+// `approx_params_x_` are the boundaries between adjacent linear pieces,
+// `approx_params_m_` and `approx_params_q_` are the slope and the y-intercept
 // values of each piece.
 struct InterpolatedParameters {
   std::array<float, kInterpolatedGainCurveTotalPoints>
diff --git a/modules/audio_processing/agc2/fixed_digital_level_estimator.cc b/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
index 3e9bb2e..eb8a64a 100644
--- a/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
+++ b/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
@@ -26,7 +26,7 @@
 constexpr float kAttackFilterConstant = 0.f;
 // This is computed from kDecayMs by
 // 10 ** (-1/20 * subframe_duration / kDecayMs).
-// |subframe_duration| is |kFrameDurationMs / kSubFramesInFrame|.
+// `subframe_duration` is |kFrameDurationMs / kSubFramesInFrame|.
 // kDecayMs is defined in agc2_testing_common.h
 constexpr float kDecayFilterConstant = 0.9998848773724686f;
 
diff --git a/modules/audio_processing/agc2/interpolated_gain_curve.cc b/modules/audio_processing/agc2/interpolated_gain_curve.cc
index 3dd5010..ac7fbec 100644
--- a/modules/audio_processing/agc2/interpolated_gain_curve.cc
+++ b/modules/audio_processing/agc2/interpolated_gain_curve.cc
@@ -151,11 +151,11 @@
 }
 
 // Looks up a gain to apply given a non-negative input level.
-// The cost of this operation depends on the region in which |input_level|
+// The cost of this operation depends on the region in which `input_level`
 // falls.
 // For the identity and the saturation regions the cost is O(1).
 // For the other regions, namely knee and limiter, the cost is
-// O(2 + log2(|LightkInterpolatedGainCurveTotalPoints|), plus O(1) for the
+// O(2 + log2(`LightkInterpolatedGainCurveTotalPoints`), plus O(1) for the
 // linear interpolation (one product and one sum).
 float InterpolatedGainCurve::LookUpGainToApply(float input_level) const {
   UpdateStats(input_level);
diff --git a/modules/audio_processing/agc2/limiter.h b/modules/audio_processing/agc2/limiter.h
index df7b540..f8eec3d 100644
--- a/modules/audio_processing/agc2/limiter.h
+++ b/modules/audio_processing/agc2/limiter.h
@@ -31,7 +31,7 @@
   Limiter& operator=(const Limiter& limiter) = delete;
   ~Limiter();
 
-  // Applies limiter and hard-clipping to |signal|.
+  // Applies limiter and hard-clipping to `signal`.
   void Process(AudioFrameView<float> signal);
   InterpolatedGainCurve::Stats GetGainCurveStats() const;
 
diff --git a/modules/audio_processing/agc2/limiter_db_gain_curve.cc b/modules/audio_processing/agc2/limiter_db_gain_curve.cc
index d55ed5d..d47c0b2 100644
--- a/modules/audio_processing/agc2/limiter_db_gain_curve.cc
+++ b/modules/audio_processing/agc2/limiter_db_gain_curve.cc
@@ -105,7 +105,7 @@
          input_level_linear;
 }
 
-// Computes the first derivative of GetGainLinear() in |x|.
+// Computes the first derivative of GetGainLinear() in `x`.
 double LimiterDbGainCurve::GetGainFirstDerivativeLinear(double x) const {
   // Beyond-knee region only.
   RTC_CHECK_GE(x, limiter_start_linear_ - 1e-7 * kMaxAbsFloatS16Value);
diff --git a/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc b/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc
index 431c01f..3ddeec8 100644
--- a/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc
+++ b/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc
@@ -40,7 +40,7 @@
 //         [ y_{m-1} ]
 // x and y are sub-array of equal length; x is never moved, whereas y slides.
 // The cross-correlation between y_0 and x corresponds to the auto-correlation
-// for the maximum pitch period. Hence, the first value in |auto_corr| has an
+// for the maximum pitch period. Hence, the first value in `auto_corr` has an
 // inverted lag equal to 0 that corresponds to a lag equal to the maximum
 // pitch period.
 void AutoCorrelationCalculator::ComputeOnPitchBuffer(
diff --git a/modules/audio_processing/agc2/rnn_vad/auto_correlation.h b/modules/audio_processing/agc2/rnn_vad/auto_correlation.h
index d58558c..1ae5054 100644
--- a/modules/audio_processing/agc2/rnn_vad/auto_correlation.h
+++ b/modules/audio_processing/agc2/rnn_vad/auto_correlation.h
@@ -31,7 +31,7 @@
   ~AutoCorrelationCalculator();
 
   // Computes the auto-correlation coefficients for a target pitch interval.
-  // |auto_corr| indexes are inverted lags.
+  // `auto_corr` indexes are inverted lags.
   void ComputeOnPitchBuffer(
       rtc::ArrayView<const float, kBufSize12kHz> pitch_buf,
       rtc::ArrayView<float, kNumLags12kHz> auto_corr);
diff --git a/modules/audio_processing/agc2/rnn_vad/common.h b/modules/audio_processing/agc2/rnn_vad/common.h
index be5a2d5..c099373 100644
--- a/modules/audio_processing/agc2/rnn_vad/common.h
+++ b/modules/audio_processing/agc2/rnn_vad/common.h
@@ -52,8 +52,8 @@
 constexpr int kInitialMinPitch12kHz = kInitialMinPitch24kHz / 2;
 constexpr int kMaxPitch12kHz = kMaxPitch24kHz / 2;
 static_assert(kMaxPitch12kHz > kInitialMinPitch12kHz, "");
-// The inverted lags for the pitch interval [|kInitialMinPitch12kHz|,
-// |kMaxPitch12kHz|] are in the range [0, |kNumLags12kHz|].
+// The inverted lags for the pitch interval [`kInitialMinPitch12kHz`,
+// `kMaxPitch12kHz`] are in the range [0, `kNumLags12kHz`].
 constexpr int kNumLags12kHz = kMaxPitch12kHz - kInitialMinPitch12kHz;
 
 // 48 kHz constants.
diff --git a/modules/audio_processing/agc2/rnn_vad/features_extraction.cc b/modules/audio_processing/agc2/rnn_vad/features_extraction.cc
index f86eba7..5c276c8 100644
--- a/modules/audio_processing/agc2/rnn_vad/features_extraction.cc
+++ b/modules/audio_processing/agc2/rnn_vad/features_extraction.cc
@@ -55,10 +55,10 @@
   if (use_high_pass_filter_) {
     std::array<float, kFrameSize10ms24kHz> samples_filtered;
     hpf_.Process(samples, samples_filtered);
-    // Feed buffer with the pre-processed version of |samples|.
+    // Feed buffer with the pre-processed version of `samples`.
     pitch_buf_24kHz_.Push(samples_filtered);
   } else {
-    // Feed buffer with |samples|.
+    // Feed buffer with `samples`.
     pitch_buf_24kHz_.Push(samples);
   }
   // Extract the LP residual.
diff --git a/modules/audio_processing/agc2/rnn_vad/features_extraction.h b/modules/audio_processing/agc2/rnn_vad/features_extraction.h
index f4cea7a..d47a85b 100644
--- a/modules/audio_processing/agc2/rnn_vad/features_extraction.h
+++ b/modules/audio_processing/agc2/rnn_vad/features_extraction.h
@@ -33,7 +33,7 @@
   void Reset();
   // Analyzes the samples, computes the feature vector and returns true if
   // silence is detected (false if not). When silence is detected,
-  // |feature_vector| is partially written and therefore must not be used to
+  // `feature_vector` is partially written and therefore must not be used to
   // feed the VAD RNN.
   bool CheckSilenceComputeFeatures(
       rtc::ArrayView<const float, kFrameSize10ms24kHz> samples,
diff --git a/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc b/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc
index 98da39e..96f956a 100644
--- a/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc
+++ b/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc
@@ -29,7 +29,7 @@
 }
 
 // Number of 10 ms frames required to fill a pitch buffer having size
-// |kBufSize24kHz|.
+// `kBufSize24kHz`.
 constexpr int kNumTestDataFrames = ceil(kBufSize24kHz, kFrameSize10ms24kHz);
 // Number of samples for the test data.
 constexpr int kNumTestDataSize = kNumTestDataFrames * kFrameSize10ms24kHz;
@@ -47,8 +47,8 @@
   }
 }
 
-// Feeds |features_extractor| with |samples| splitting it in 10 ms frames.
-// For every frame, the output is written into |feature_vector|. Returns true
+// Feeds `features_extractor` with `samples` splitting it in 10 ms frames.
+// For every frame, the output is written into `feature_vector`. Returns true
 // if silence is detected in the last frame.
 bool FeedTestData(FeaturesExtractor& features_extractor,
                   rtc::ArrayView<const float> samples,
diff --git a/modules/audio_processing/agc2/rnn_vad/lp_residual.cc b/modules/audio_processing/agc2/rnn_vad/lp_residual.cc
index c553aa2..484bfba 100644
--- a/modules/audio_processing/agc2/rnn_vad/lp_residual.cc
+++ b/modules/audio_processing/agc2/rnn_vad/lp_residual.cc
@@ -22,9 +22,9 @@
 namespace rnn_vad {
 namespace {
 
-// Computes auto-correlation coefficients for |x| and writes them in
-// |auto_corr|. The lag values are in {0, ..., max_lag - 1}, where max_lag
-// equals the size of |auto_corr|.
+// Computes auto-correlation coefficients for `x` and writes them in
+// `auto_corr`. The lag values are in {0, ..., max_lag - 1}, where max_lag
+// equals the size of `auto_corr`.
 void ComputeAutoCorrelation(
     rtc::ArrayView<const float> x,
     rtc::ArrayView<float, kNumLpcCoefficients> auto_corr) {
diff --git a/modules/audio_processing/agc2/rnn_vad/lp_residual.h b/modules/audio_processing/agc2/rnn_vad/lp_residual.h
index 380d9f6..d04c536 100644
--- a/modules/audio_processing/agc2/rnn_vad/lp_residual.h
+++ b/modules/audio_processing/agc2/rnn_vad/lp_residual.h
@@ -21,14 +21,14 @@
 // Linear predictive coding (LPC) inverse filter length.
 constexpr int kNumLpcCoefficients = 5;
 
-// Given a frame |x|, computes a post-processed version of LPC coefficients
+// Given a frame `x`, computes a post-processed version of LPC coefficients
 // tailored for pitch estimation.
 void ComputeAndPostProcessLpcCoefficients(
     rtc::ArrayView<const float> x,
     rtc::ArrayView<float, kNumLpcCoefficients> lpc_coeffs);
 
-// Computes the LP residual for the input frame |x| and the LPC coefficients
-// |lpc_coeffs|. |y| and |x| can point to the same array for in-place
+// Computes the LP residual for the input frame `x` and the LPC coefficients
+// `lpc_coeffs`. `y` and `x` can point to the same array for in-place
 // computation.
 void ComputeLpResidual(
     rtc::ArrayView<const float, kNumLpcCoefficients> lpc_coeffs,
diff --git a/modules/audio_processing/agc2/rnn_vad/pitch_search.cc b/modules/audio_processing/agc2/rnn_vad/pitch_search.cc
index 77a1188..419620f 100644
--- a/modules/audio_processing/agc2/rnn_vad/pitch_search.cc
+++ b/modules/audio_processing/agc2/rnn_vad/pitch_search.cc
@@ -44,7 +44,7 @@
   CandidatePitchPeriods pitch_periods = ComputePitchPeriod12kHz(
       pitch_buffer_12kHz_view, auto_correlation_12kHz_view, cpu_features_);
   // The refinement is done using the pitch buffer that contains 24 kHz samples.
-  // Therefore, adapt the inverted lags in |pitch_candidates_inv_lags| from 12
+  // Therefore, adapt the inverted lags in `pitch_candidates_inv_lags` from 12
   // to 24 kHz.
   pitch_periods.best *= 2;
   pitch_periods.second_best *= 2;
diff --git a/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc b/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
index 0b8a77e..4000e33 100644
--- a/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
+++ b/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
@@ -54,18 +54,18 @@
                                       float next_auto_correlation) {
   if ((next_auto_correlation - prev_auto_correlation) >
       0.7f * (curr_auto_correlation - prev_auto_correlation)) {
-    return 1;  // |next_auto_correlation| is the largest auto-correlation
+    return 1;  // `next_auto_correlation` is the largest auto-correlation
                // coefficient.
   } else if ((prev_auto_correlation - next_auto_correlation) >
              0.7f * (curr_auto_correlation - next_auto_correlation)) {
-    return -1;  // |prev_auto_correlation| is the largest auto-correlation
+    return -1;  // `prev_auto_correlation` is the largest auto-correlation
                 // coefficient.
   }
   return 0;
 }
 
-// Refines a pitch period |lag| encoded as lag with pseudo-interpolation. The
-// output sample rate is twice as that of |lag|.
+// Refines a pitch period `lag` encoded as lag with pseudo-interpolation. The
+// output sample rate is twice as that of `lag`.
 int PitchPseudoInterpolationLagPitchBuf(
     int lag,
     rtc::ArrayView<const float, kBufSize24kHz> pitch_buffer,
@@ -217,8 +217,8 @@
       auto_correlation[best_inverted_lag + 1],
       auto_correlation[best_inverted_lag],
       auto_correlation[best_inverted_lag - 1]);
-  // TODO(bugs.webrtc.org/9076): When retraining, check if |offset| below should
-  // be subtracted since |inverted_lag| is an inverted lag but offset is a lag.
+  // TODO(bugs.webrtc.org/9076): When retraining, check if `offset` below should
+  // be subtracted since `inverted_lag` is an inverted lag but offset is a lag.
   return 2 * best_inverted_lag + offset;
 }
 
@@ -359,7 +359,7 @@
         }
       }
     }
-    // Update |squared_energy_y| for the next inverted lag.
+    // Update `squared_energy_y` for the next inverted lag.
     const float y_old = pitch_buffer[inverted_lag];
     const float y_new = pitch_buffer[inverted_lag + kFrameSize20ms12kHz];
     denominator -= y_old * y_old;
@@ -458,8 +458,8 @@
         initial_pitch.period, /*multiplier=*/1, period_divisor);
     RTC_DCHECK_GE(alternative_pitch.period, kMinPitch24kHz);
     // When looking at |alternative_pitch.period|, we also look at one of its
-    // sub-harmonics. |kSubHarmonicMultipliers| is used to know where to look.
-    // |period_divisor| == 2 is a special case since |dual_alternative_period|
+    // sub-harmonics. `kSubHarmonicMultipliers` is used to know where to look.
+    // `period_divisor` == 2 is a special case since `dual_alternative_period`
     // might be greater than the maximum pitch period.
     int dual_alternative_period = GetAlternativePitchPeriod(
         initial_pitch.period, kSubHarmonicMultipliers[period_divisor - 2],
@@ -473,7 +473,7 @@
            "coincide.";
     // Compute an auto-correlation score for the primary pitch candidate
     // |alternative_pitch.period| by also looking at its possible sub-harmonic
-    // |dual_alternative_period|.
+    // `dual_alternative_period`.
     const float xy_primary_period = ComputeAutoCorrelation(
         kMaxPitch24kHz - alternative_pitch.period, pitch_buffer, vector_math);
     // TODO(webrtc:10480): Copy `xy_primary_period` if the secondary period is
diff --git a/modules/audio_processing/agc2/rnn_vad/ring_buffer.h b/modules/audio_processing/agc2/rnn_vad/ring_buffer.h
index f0270af..a6f7fdd 100644
--- a/modules/audio_processing/agc2/rnn_vad/ring_buffer.h
+++ b/modules/audio_processing/agc2/rnn_vad/ring_buffer.h
@@ -35,7 +35,7 @@
   ~RingBuffer() = default;
   // Set the ring buffer values to zero.
   void Reset() { buffer_.fill(0); }
-  // Replace the least recently pushed array in the buffer with |new_values|.
+  // Replace the least recently pushed array in the buffer with `new_values`.
   void Push(rtc::ArrayView<const T, S> new_values) {
     std::memcpy(buffer_.data() + S * tail_, new_values.data(), S * sizeof(T));
     tail_ += 1;
@@ -43,7 +43,7 @@
       tail_ = 0;
   }
   // Return an array view onto the array with a given delay. A view on the last
-  // and least recently push array is returned when |delay| is 0 and N - 1
+  // and least recently push array is returned when `delay` is 0 and N - 1
   // respectively.
   rtc::ArrayView<const T, S> GetArrayView(int delay) const {
     RTC_DCHECK_LE(0, delay);
diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc b/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc
index ecbb198..91501fb 100644
--- a/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc
+++ b/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc
@@ -32,7 +32,7 @@
 
 // TODO(bugs.chromium.org/10480): Hard-code optimized layout and remove this
 // function to improve setup time.
-// Casts and scales |weights| and re-arranges the layout.
+// Casts and scales `weights` and re-arranges the layout.
 std::vector<float> PreprocessWeights(rtc::ArrayView<const int8_t> weights,
                                      int output_size) {
   if (output_size == 1) {
diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc b/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc
index 482016e..ef37410 100644
--- a/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc
+++ b/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc
@@ -24,7 +24,7 @@
 std::vector<float> PreprocessGruTensor(rtc::ArrayView<const int8_t> tensor_src,
                                        int output_size) {
   // Transpose, cast and scale.
-  // |n| is the size of the first dimension of the 3-dim tensor |weights|.
+  // `n` is the size of the first dimension of the 3-dim tensor `weights`.
   const int n = rtc::CheckedDivExact(rtc::dchecked_cast<int>(tensor_src.size()),
                                      output_size * kNumGruGates);
   const int stride_src = kNumGruGates * output_size;
diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc b/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc
index 989b235..f33cd14 100644
--- a/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc
+++ b/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc
@@ -49,7 +49,7 @@
 // constant below to true in order to write new expected output binary files.
 constexpr bool kWriteComputedOutputToFile = false;
 
-// Avoids that one forgets to set |kWriteComputedOutputToFile| back to false
+// Avoids that one forgets to set `kWriteComputedOutputToFile` back to false
 // when the expected output files are re-exported.
 TEST(RnnVadTest, CheckWriteComputedOutputIsFalse) {
   ASSERT_FALSE(kWriteComputedOutputToFile)
diff --git a/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc b/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc
index f577571..af00583 100644
--- a/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc
+++ b/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc
@@ -50,7 +50,7 @@
     for (int i = 0; i < N; ++i)
       chunk[i] = static_cast<T>(i + 1);
     seq_buf.Push(chunk);
-    // With the next Push(), |last| will be moved left by N positions.
+    // With the next Push(), `last` will be moved left by N positions.
     const T last = chunk[N - 1];
     for (int i = 0; i < N; ++i)
       chunk[i] = static_cast<T>(last + i + 1);
diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc
index 91c0086..a10b0f7 100644
--- a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc
+++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc
@@ -23,7 +23,7 @@
 
 // Weights for each FFT coefficient for each Opus band (Nyquist frequency
 // excluded). The size of each band is specified in
-// |kOpusScaleNumBins24kHz20ms|.
+// `kOpusScaleNumBins24kHz20ms`.
 constexpr std::array<float, kFrameSize20ms24kHz / 2> kOpusBandWeights24kHz20ms =
     {{
         0.f,       0.25f,      0.5f,       0.75f,  // Band 0
diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h
index aa7b1c6..f4b293a 100644
--- a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h
+++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h
@@ -50,8 +50,8 @@
   ~SpectralCorrelator();
 
   // Computes the band-wise spectral auto-correlations.
-  // |x| must:
-  //  - have size equal to |kFrameSize20ms24kHz|;
+  // `x` must:
+  //  - have size equal to `kFrameSize20ms24kHz`;
   //  - be encoded as vectors of interleaved real-complex FFT coefficients
   //    where x[1] = y[1] = 0 (the Nyquist frequency coefficient is omitted).
   void ComputeAutoCorrelation(
@@ -59,8 +59,8 @@
       rtc::ArrayView<float, kOpusBands24kHz> auto_corr) const;
 
   // Computes the band-wise spectral cross-correlations.
-  // |x| and |y| must:
-  //  - have size equal to |kFrameSize20ms24kHz|;
+  // `x` and `y` must:
+  //  - have size equal to `kFrameSize20ms24kHz`;
   //  - be encoded as vectors of interleaved real-complex FFT coefficients where
   //    x[1] = y[1] = 0 (the Nyquist frequency coefficient is omitted).
   void ComputeCrossCorrelation(
@@ -82,12 +82,12 @@
 
 // TODO(bugs.webrtc.org/10480): Move to anonymous namespace in
 // spectral_features.cc. Creates a DCT table for arrays having size equal to
-// |kNumBands|. Declared here for unit testing.
+// `kNumBands`. Declared here for unit testing.
 std::array<float, kNumBands * kNumBands> ComputeDctTable();
 
 // TODO(bugs.webrtc.org/10480): Move to anonymous namespace in
-// spectral_features.cc. Computes DCT for |in| given a pre-computed DCT table.
-// In-place computation is not allowed and |out| can be smaller than |in| in
+// spectral_features.cc. Computes DCT for `in` given a pre-computed DCT table.
+// In-place computation is not allowed and `out` can be smaller than `in` in
 // order to only compute the first DCT coefficients. Declared here for unit
 // testing.
 void ComputeDct(rtc::ArrayView<const float> in,
diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc
index 11a44a5..ece4eb5 100644
--- a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc
+++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc
@@ -28,7 +28,7 @@
 namespace rnn_vad {
 namespace {
 
-// Generates the values for the array named |kOpusBandWeights24kHz20ms| in the
+// Generates the values for the array named `kOpusBandWeights24kHz20ms` in the
 // anonymous namespace of the .cc file, which is the array of FFT coefficient
 // weights for the Opus scale triangular filters.
 std::vector<float> ComputeTriangularFiltersWeights() {
@@ -66,7 +66,7 @@
 
 // Checks that the computed triangular filters weights for the Opus scale are
 // monotonic withing each Opus band. This test should only be enabled when
-// ComputeTriangularFiltersWeights() is changed and |kOpusBandWeights24kHz20ms|
+// ComputeTriangularFiltersWeights() is changed and `kOpusBandWeights24kHz20ms`
 // is updated accordingly.
 TEST(RnnVadTest, DISABLED_TestOpusScaleWeights) {
   auto weights = ComputeTriangularFiltersWeights();
diff --git a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h
index dd3b62a..d186479 100644
--- a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h
+++ b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h
@@ -46,9 +46,9 @@
     buf_.fill(0);
   }
   // Pushes the results from the comparison between the most recent item and
-  // those that are still in the ring buffer. The first element in |values| must
+  // those that are still in the ring buffer. The first element in `values` must
   // correspond to the comparison between the most recent item and the second
-  // most recent one in the ring buffer, whereas the last element in |values|
+  // most recent one in the ring buffer, whereas the last element in `values`
   // must correspond to the comparison between the most recent item and the
   // oldest one in the ring buffer.
   void Push(rtc::ArrayView<T, S - 1> values) {
@@ -64,7 +64,7 @@
     }
   }
   // Reads the value that corresponds to comparison of two items in the ring
-  // buffer having delay |delay1| and |delay2|. The two arguments must not be
+  // buffer having delay `delay1` and `delay2`. The two arguments must not be
   // equal and both must be in {0, ..., S - 1}.
   T GetValue(int delay1, int delay2) const {
     int row = S - 1 - delay1;
diff --git a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc
index 6f61c87..1509ca5 100644
--- a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc
+++ b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc
@@ -58,17 +58,17 @@
     SCOPED_TRACE(t);
     const int t_removed = ring_buf.GetArrayView(kRingBufSize - 1)[0];
     ring_buf.Push({&t, 1});
-    // The head of the ring buffer is |t|.
+    // The head of the ring buffer is `t`.
     ASSERT_EQ(t, ring_buf.GetArrayView(0)[0]);
-    // Create the comparisons between |t| and the older elements in the ring
+    // Create the comparisons between `t` and the older elements in the ring
     // buffer.
     std::array<PairType, kRingBufSize - 1> new_comparions;
     for (int i = 0; i < kRingBufSize - 1; ++i) {
-      // Start comparing |t| to the second newest element in the ring buffer.
+      // Start comparing `t` to the second newest element in the ring buffer.
       const int delay = i + 1;
       const auto t_prev = ring_buf.GetArrayView(delay)[0];
       ASSERT_EQ(std::max(0, t - delay), t_prev);
-      // Compare the last element |t| with |t_prev|.
+      // Compare the last element `t` with `t_prev`.
       new_comparions[i].first = t_prev;
       new_comparions[i].second = t;
     }
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index 3eecf0d..ab0af44 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -71,8 +71,8 @@
   // Usage:
   // channels()[channel][sample].
   // Where:
-  // 0 <= channel < |buffer_num_channels_|
-  // 0 <= sample < |buffer_num_frames_|
+  // 0 <= channel < `buffer_num_channels_`
+  // 0 <= sample < `buffer_num_frames_`
   float* const* channels() { return data_->channels(); }
   const float* const* channels_const() const { return data_->channels(); }
 
@@ -80,9 +80,9 @@
   // Usage:
   // split_bands(channel)[band][sample].
   // Where:
-  // 0 <= channel < |buffer_num_channels_|
-  // 0 <= band < |num_bands_|
-  // 0 <= sample < |num_split_frames_|
+  // 0 <= channel < `buffer_num_channels_`
+  // 0 <= band < `num_bands_`
+  // 0 <= sample < `num_split_frames_`
   const float* const* split_bands_const(size_t channel) const {
     return split_data_.get() ? split_data_->bands(channel)
                              : data_->bands(channel);
@@ -96,9 +96,9 @@
   // Usage:
   // split_channels(band)[channel][sample].
   // Where:
-  // 0 <= band < |num_bands_|
-  // 0 <= channel < |buffer_num_channels_|
-  // 0 <= sample < |num_split_frames_|
+  // 0 <= band < `num_bands_`
+  // 0 <= channel < `buffer_num_channels_`
+  // 0 <= sample < `num_split_frames_`
   const float* const* split_channels_const(Band band) const {
     if (split_data_.get()) {
       return split_data_->channels(band);
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index 4a19855..5acf693 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -1325,7 +1325,7 @@
           capture_.key_pressed);
     }
 
-    // Experimental APM sub-module that analyzes |capture_buffer|.
+    // Experimental APM sub-module that analyzes `capture_buffer`.
     if (submodules_.capture_analyzer) {
       submodules_.capture_analyzer->Analyze(capture_buffer);
     }
diff --git a/modules/audio_processing/audio_processing_impl.h b/modules/audio_processing/audio_processing_impl.h
index 686e417..2c22536 100644
--- a/modules/audio_processing/audio_processing_impl.h
+++ b/modules/audio_processing/audio_processing_impl.h
@@ -169,7 +169,7 @@
       const ApmSubmoduleCreationOverrides& overrides);
 
   // Class providing thread-safe message pipe functionality for
-  // |runtime_settings_|.
+  // `runtime_settings_`.
   class RuntimeSettingEnqueuer {
    public:
     explicit RuntimeSettingEnqueuer(
@@ -320,8 +320,8 @@
 
   // Collects configuration settings from public and private
   // submodules to be saved as an audioproc::Config message on the
-  // AecDump if it is attached.  If not |forced|, only writes the current
-  // config if it is different from the last saved one; if |forced|,
+  // AecDump if it is attached.  If not `forced`, only writes the current
+  // config if it is different from the last saved one; if `forced`,
   // writes the config regardless of the last saved.
   void WriteAecDumpConfigMessage(bool forced)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_);
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
index 4d30a34..faeca79 100644
--- a/modules/audio_processing/audio_processing_unittest.cc
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -321,10 +321,10 @@
 
 // Reads a 10 ms chunk of int16 interleaved audio from the given (assumed
 // stereo) file, converts to deinterleaved float (optionally downmixing) and
-// returns the result in |cb|. Returns false if the file ended (or on error) and
+// returns the result in `cb`. Returns false if the file ended (or on error) and
 // true otherwise.
 //
-// |int_data| and |float_data| are just temporary space that must be
+// `int_data` and `float_data` are just temporary space that must be
 // sufficiently large to hold the 10 ms chunk.
 bool ReadChunk(FILE* file,
                int16_t* int_data,
@@ -596,7 +596,7 @@
                                            int system_delay_ms,
                                            int delay_min,
                                            int delay_max) {
-  // The |revframe_| and |frame_| should include the proper frame information,
+  // The `revframe_` and `frame_` should include the proper frame information,
   // hence can be used for extracting information.
   Int16FrameData tmp_frame;
   std::queue<Int16FrameData*> frame_queue;
@@ -606,7 +606,7 @@
   SetFrameTo(&tmp_frame, 0);
 
   EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-  // Initialize the |frame_queue| with empty frames.
+  // Initialize the `frame_queue` with empty frames.
   int frame_delay = delay_ms / 10;
   while (frame_delay < 0) {
     Int16FrameData* frame = new Int16FrameData();
@@ -1884,7 +1884,7 @@
     if (!absl::GetFlag(FLAGS_write_apm_ref_data)) {
       const int kIntNear = 1;
       // When running the test on a N7 we get a {2, 6} difference of
-      // |has_voice_count| and |max_output_average| is up to 18 higher.
+      // `has_voice_count` and `max_output_average` is up to 18 higher.
       // All numbers being consistently higher on N7 compare to ref_data.
       // TODO(bjornv): If we start getting more of these offsets on Android we
       // should consider a different approach. Either using one slack for all,
@@ -2058,7 +2058,7 @@
   static void TearDownTestSuite() { ClearTempFiles(); }
 
   // Runs a process pass on files with the given parameters and dumps the output
-  // to a file specified with |output_file_prefix|. Both forward and reverse
+  // to a file specified with `output_file_prefix`. Both forward and reverse
   // output streams are dumped.
   static void ProcessFormat(int input_rate,
                             int output_rate,
@@ -2277,7 +2277,7 @@
           out_ptr = cmp_data.get();
         }
 
-        // Update the |sq_error| and |variance| accumulators with the highest
+        // Update the `sq_error` and `variance` accumulators with the highest
         // SNR of reference vs output.
         UpdateBestSNR(ref_data.get(), out_ptr, ref_length, expected_delay,
                       &variance, &sq_error);
diff --git a/modules/audio_processing/echo_control_mobile_impl.h b/modules/audio_processing/echo_control_mobile_impl.h
index 23f3c06..f7f2626 100644
--- a/modules/audio_processing/echo_control_mobile_impl.h
+++ b/modules/audio_processing/echo_control_mobile_impl.h
@@ -42,7 +42,7 @@
     kLoudSpeakerphone
   };
 
-  // Sets echo control appropriate for the audio routing |mode| on the device.
+  // Sets echo control appropriate for the audio routing `mode` on the device.
   // It can and should be updated during a call if the audio routing changes.
   int set_routing_mode(RoutingMode mode);
   RoutingMode routing_mode() const;
diff --git a/modules/audio_processing/gain_controller2_unittest.cc b/modules/audio_processing/gain_controller2_unittest.cc
index 85c08bb..b1ab00e 100644
--- a/modules/audio_processing/gain_controller2_unittest.cc
+++ b/modules/audio_processing/gain_controller2_unittest.cc
@@ -27,7 +27,7 @@
 namespace {
 
 void SetAudioBufferSamples(float value, AudioBuffer* ab) {
-  // Sets all the samples in |ab| to |value|.
+  // Sets all the samples in `ab` to `value`.
   for (size_t k = 0; k < ab->num_channels(); ++k) {
     std::fill(ab->channels()[k], ab->channels()[k] + ab->num_frames(), value);
   }
diff --git a/modules/audio_processing/include/audio_frame_proxies.h b/modules/audio_processing/include/audio_frame_proxies.h
index 2d0f5b5..5dd111c 100644
--- a/modules/audio_processing/include/audio_frame_proxies.h
+++ b/modules/audio_processing/include/audio_frame_proxies.h
@@ -16,21 +16,21 @@
 class AudioFrame;
 class AudioProcessing;
 
-// Processes a 10 ms |frame| of the primary audio stream using the provided
+// Processes a 10 ms `frame` of the primary audio stream using the provided
 // AudioProcessing object. On the client-side, this is the near-end (or
-// captured) audio. The |sample_rate_hz_|, |num_channels_|, and
-// |samples_per_channel_| members of |frame| must be valid. If changed from the
+// captured) audio. The `sample_rate_hz_`, `num_channels_`, and
+// `samples_per_channel_` members of `frame` must be valid. If changed from the
 // previous call to this function, it will trigger an initialization of the
 // provided AudioProcessing object.
 // The function returns any error codes passed from the AudioProcessing
 // ProcessStream method.
 int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame);
 
-// Processes a 10 ms |frame| of the reverse direction audio stream using the
+// Processes a 10 ms `frame` of the reverse direction audio stream using the
 // provided AudioProcessing object. The frame may be modified. On the
 // client-side, this is the far-end (or to be rendered) audio. The
-// |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_| members of
-// |frame| must be valid. If changed from the previous call to this function, it
+// `sample_rate_hz_`, `num_channels_`, and `samples_per_channel_` members of
+// `frame` must be valid. If changed from the previous call to this function, it
 // will trigger an initialization of the provided AudioProcessing object.
 // The function returns any error codes passed from the AudioProcessing
 // ProcessReverseStream method.
diff --git a/modules/audio_processing/include/audio_frame_view.h b/modules/audio_processing/include/audio_frame_view.h
index ab5779a..9786cd9 100644
--- a/modules/audio_processing/include/audio_frame_view.h
+++ b/modules/audio_processing/include/audio_frame_view.h
@@ -19,8 +19,8 @@
 template <class T>
 class AudioFrameView {
  public:
-  // |num_channels| and |channel_size| describe the T**
-  // |audio_samples|. |audio_samples| is assumed to point to a
+  // `num_channels` and `channel_size` describe the T**
+  // `audio_samples`. `audio_samples` is assumed to point to a
   // two-dimensional |num_channels * channel_size| array of floats.
   AudioFrameView(T* const* audio_samples,
                  size_t num_channels,
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 64b1b5d..047776b 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -53,7 +53,7 @@
 class CustomProcessing;
 
 // Use to enable experimental gain control (AGC). At startup the experimental
-// AGC moves the microphone volume up to |startup_min_volume| if the current
+// AGC moves the microphone volume up to `startup_min_volume` if the current
 // microphone volume is set too low. The value is clamped to its operating range
 // [12, 255]. Here, 255 maps to 100%.
 //
@@ -99,8 +99,8 @@
 //
 // APM operates on two audio streams on a frame-by-frame basis. Frames of the
 // primary stream, on which all processing is applied, are passed to
-// |ProcessStream()|. Frames of the reverse direction stream are passed to
-// |ProcessReverseStream()|. On the client-side, this will typically be the
+// `ProcessStream()`. Frames of the reverse direction stream are passed to
+// `ProcessReverseStream()`. On the client-side, this will typically be the
 // near-end (capture) and far-end (render) streams, respectively. APM should be
 // placed in the signal chain as close to the audio hardware abstraction layer
 // (HAL) as possible.
@@ -264,7 +264,7 @@
       bool enabled = false;
     } transient_suppression;
 
-    // Enables reporting of |voice_detected| in webrtc::AudioProcessingStats.
+    // Enables reporting of `voice_detected` in webrtc::AudioProcessingStats.
     struct VoiceDetection {
       bool enabled = false;
     } voice_detection;
@@ -377,7 +377,7 @@
     // Enables the next generation AGC functionality. This feature replaces the
     // standard methods of gain control in the previous AGC. Enabling this
     // submodule enables an adaptive digital AGC followed by a limiter. By
-    // setting |fixed_gain_db|, the limiter can be turned into a compressor that
+    // setting `fixed_gain_db`, the limiter can be turned into a compressor that
     // first applies a fixed gain. The adaptive digital AGC can be turned off by
     // setting |adaptive_digital_mode=false|.
     struct RTC_EXPORT GainController2 {
@@ -425,7 +425,7 @@
       bool enabled = true;
     } residual_echo_detector;
 
-    // Enables reporting of |output_rms_dbfs| in webrtc::AudioProcessingStats.
+    // Enables reporting of `output_rms_dbfs` in webrtc::AudioProcessingStats.
     struct LevelEstimation {
       bool enabled = false;
     } level_estimation;
@@ -501,7 +501,7 @@
     }
 
     // Creates a runtime setting to notify play-out (aka render) volume changes.
-    // |volume| is the unnormalized volume, the maximum of which
+    // `volume` is the unnormalized volume, the maximum of which
     static RuntimeSetting CreatePlayoutVolumeChange(int volume) {
       return {Type::kPlayoutVolumeChange, volume};
     }
@@ -562,13 +562,13 @@
   //
   // It is also not necessary to call if the audio parameters (sample
   // rate and number of channels) have changed. Passing updated parameters
-  // directly to |ProcessStream()| and |ProcessReverseStream()| is permissible.
+  // directly to `ProcessStream()` and `ProcessReverseStream()` is permissible.
   // If the parameters are known at init-time though, they may be provided.
   // TODO(webrtc:5298): Change to return void.
   virtual int Initialize() = 0;
 
   // The int16 interfaces require:
-  //   - only |NativeRate|s be used
+  //   - only `NativeRate`s be used
   //   - that the input, output and reverse rates must match
   //   - that |processing_config.output_stream()| matches
   //     |processing_config.input_stream()|.
@@ -616,7 +616,7 @@
   virtual bool PostRuntimeSetting(RuntimeSetting setting) = 0;
 
   // Accepts and produces a 10 ms frame interleaved 16 bit integer audio as
-  // specified in |input_config| and |output_config|. |src| and |dest| may use
+  // specified in `input_config` and `output_config`. `src` and `dest` may use
   // the same memory, if desired.
   virtual int ProcessStream(const int16_t* const src,
                             const StreamConfig& input_config,
@@ -624,35 +624,35 @@
                             int16_t* const dest) = 0;
 
   // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
-  // |src| points to a channel buffer, arranged according to |input_stream|. At
-  // output, the channels will be arranged according to |output_stream| in
-  // |dest|.
+  // `src` points to a channel buffer, arranged according to `input_stream`. At
+  // output, the channels will be arranged according to `output_stream` in
+  // `dest`.
   //
-  // The output must have one channel or as many channels as the input. |src|
-  // and |dest| may use the same memory, if desired.
+  // The output must have one channel or as many channels as the input. `src`
+  // and `dest` may use the same memory, if desired.
   virtual int ProcessStream(const float* const* src,
                             const StreamConfig& input_config,
                             const StreamConfig& output_config,
                             float* const* dest) = 0;
 
   // Accepts and produces a 10 ms frame of interleaved 16 bit integer audio for
-  // the reverse direction audio stream as specified in |input_config| and
-  // |output_config|. |src| and |dest| may use the same memory, if desired.
+  // the reverse direction audio stream as specified in `input_config` and
+  // `output_config`. `src` and `dest` may use the same memory, if desired.
   virtual int ProcessReverseStream(const int16_t* const src,
                                    const StreamConfig& input_config,
                                    const StreamConfig& output_config,
                                    int16_t* const dest) = 0;
 
   // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
-  // |data| points to a channel buffer, arranged according to |reverse_config|.
+  // `data` points to a channel buffer, arranged according to `reverse_config`.
   virtual int ProcessReverseStream(const float* const* src,
                                    const StreamConfig& input_config,
                                    const StreamConfig& output_config,
                                    float* const* dest) = 0;
 
   // Accepts deinterleaved float audio with the range [-1, 1]. Each element
-  // of |data| points to a channel buffer, arranged according to
-  // |reverse_config|.
+  // of `data` points to a channel buffer, arranged according to
+  // `reverse_config`.
   virtual int AnalyzeReverseStream(const float* const* data,
                                    const StreamConfig& reverse_config) = 0;
 
@@ -675,7 +675,7 @@
 
   // This must be called if and only if echo processing is enabled.
   //
-  // Sets the |delay| in ms between ProcessReverseStream() receiving a far-end
+  // Sets the `delay` in ms between ProcessReverseStream() receiving a far-end
   // frame and ProcessStream() receiving a near-end frame containing the
   // corresponding echo. On the client-side this can be expressed as
   //   delay = (t_render - t_analyze) + (t_process - t_capture)
@@ -695,10 +695,10 @@
 
   // Creates and attaches an webrtc::AecDump for recording debugging
   // information.
-  // The |worker_queue| may not be null and must outlive the created
+  // The `worker_queue` may not be null and must outlive the created
   // AecDump instance. |max_log_size_bytes == -1| means the log size
-  // will be unlimited. |handle| may not be null. The AecDump takes
-  // responsibility for |handle| and closes it in the destructor. A
+  // will be unlimited. `handle` may not be null. The AecDump takes
+  // responsibility for `handle` and closes it in the destructor. A
   // return value of true indicates that the file has been
   // sucessfully opened, while a value of false indicates that
   // opening the file failed.
@@ -726,7 +726,7 @@
 
   // Get audio processing statistics.
   virtual AudioProcessingStats GetStatistics() = 0;
-  // TODO(webrtc:5298) Deprecated variant. The |has_remote_tracks| argument
+  // TODO(webrtc:5298) Deprecated variant. The `has_remote_tracks` argument
   // should be set if there are active remote tracks (this would usually be true
   // during a call). If there are no remote tracks some of the stats will not be
   // set by AudioProcessing, because they only make sense if there is at least
diff --git a/modules/audio_processing/include/audio_processing_statistics.h b/modules/audio_processing/include/audio_processing_statistics.h
index 87babee..c81d7eb 100644
--- a/modules/audio_processing/include/audio_processing_statistics.h
+++ b/modules/audio_processing/include/audio_processing_statistics.h
@@ -50,9 +50,9 @@
   // The delay metrics consists of the delay median and standard deviation. It
   // also consists of the fraction of delay estimates that can make the echo
   // cancellation perform poorly. The values are aggregated until the first
-  // call to |GetStatistics()| and afterwards aggregated and updated every
+  // call to `GetStatistics()` and afterwards aggregated and updated every
   // second. Note that if there are several clients pulling metrics from
-  // |GetStatistics()| during a session the first call from any of them will
+  // `GetStatistics()` during a session the first call from any of them will
   // change to one second aggregation window for all.
   absl::optional<int32_t> delay_median_ms;
   absl::optional<int32_t> delay_standard_deviation_ms;
@@ -64,7 +64,7 @@
 
   // The instantaneous delay estimate produced in the AEC. The unit is in
   // milliseconds and the value is the instantaneous value at the time of the
-  // call to |GetStatistics()|.
+  // call to `GetStatistics()`.
   absl::optional<int32_t> delay_ms;
 };
 
diff --git a/modules/audio_processing/level_estimator.h b/modules/audio_processing/level_estimator.h
index 1d8a071..d2bcfa1 100644
--- a/modules/audio_processing/level_estimator.h
+++ b/modules/audio_processing/level_estimator.h
@@ -35,7 +35,7 @@
   // The computation follows: https://tools.ietf.org/html/rfc6465
   // with the intent that it can provide the RTP audio level indication.
   //
-  // Frames passed to ProcessStream() with an |_energy| of zero are considered
+  // Frames passed to ProcessStream() with an `_energy` of zero are considered
   // to have been muted. The RMS of the frame will be interpreted as -127.
   int RMS() { return rms_.Average(); }
 
diff --git a/modules/audio_processing/optionally_built_submodule_creators.h b/modules/audio_processing/optionally_built_submodule_creators.h
index c96e66f..7de337b 100644
--- a/modules/audio_processing/optionally_built_submodule_creators.h
+++ b/modules/audio_processing/optionally_built_submodule_creators.h
@@ -20,7 +20,7 @@
 // These overrides are only to be used for testing purposes.
 // Each flag emulates a preprocessor macro to exclude a submodule of APM from
 // the build, e.g. WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR. If the corresponding
-// flag |transient_suppression| is enabled, then the creators will return
+// flag `transient_suppression` is enabled, then the creators will return
 // nullptr instead of a submodule instance, as if the macro had been defined.
 struct ApmSubmoduleCreationOverrides {
   bool transient_suppression = false;
@@ -29,7 +29,7 @@
 // Creates a transient suppressor.
 // Will instead return nullptr if one of the following is true:
 // * WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR is defined
-// * The corresponding override in |overrides| is enabled.
+// * The corresponding override in `overrides` is enabled.
 std::unique_ptr<TransientSuppressor> CreateTransientSuppressor(
     const ApmSubmoduleCreationOverrides& overrides);
 
diff --git a/modules/audio_processing/residual_echo_detector.h b/modules/audio_processing/residual_echo_detector.h
index 5d18ecb..44252af 100644
--- a/modules/audio_processing/residual_echo_detector.h
+++ b/modules/audio_processing/residual_echo_detector.h
@@ -51,12 +51,12 @@
  private:
   static int instance_count_;
   std::unique_ptr<ApmDataDumper> data_dumper_;
-  // Keep track if the |Process| function has been previously called.
+  // Keep track if the `Process` function has been previously called.
   bool first_process_call_ = true;
   // Buffer for storing the power of incoming farend buffers. This is needed for
   // cases where calls to BufferFarend and Process are jittery.
   CircularBuffer render_buffer_;
-  // Count how long ago it was that the size of |render_buffer_| was zero. This
+  // Count how long ago it was that the size of `render_buffer_` was zero. This
   // value is also reset to zero when clock drift is detected and a value from
   // the renderbuffer is discarded, even though the buffer is not actually zero
   // at that point. This is done to avoid repeatedly removing elements in this
diff --git a/modules/audio_processing/rms_level.h b/modules/audio_processing/rms_level.h
index e1a6d56..4955d1b 100644
--- a/modules/audio_processing/rms_level.h
+++ b/modules/audio_processing/rms_level.h
@@ -47,7 +47,7 @@
   void Analyze(rtc::ArrayView<const int16_t> data);
   void Analyze(rtc::ArrayView<const float> data);
 
-  // If all samples with the given |length| have a magnitude of zero, this is
+  // If all samples with the given `length` have a magnitude of zero, this is
   // a shortcut to avoid some computation.
   void AnalyzeMuted(size_t length);
 
@@ -62,7 +62,7 @@
   Levels AverageAndPeak();
 
  private:
-  // Compares |block_size| with |block_size_|. If they are different, calls
+  // Compares `block_size` with `block_size_`. If they are different, calls
   // Reset() and stores the new size.
   void CheckBlockSize(size_t block_size);
 
diff --git a/modules/audio_processing/test/audio_processing_simulator.cc b/modules/audio_processing/test/audio_processing_simulator.cc
index 1f05f43..c61110f 100644
--- a/modules/audio_processing/test/audio_processing_simulator.cc
+++ b/modules/audio_processing/test/audio_processing_simulator.cc
@@ -206,7 +206,7 @@
   if (settings_.simulate_mic_gain) {
     if (settings_.aec_dump_input_filename) {
       // When the analog gain is simulated and an AEC dump is used as input, set
-      // the undo level to |aec_dump_mic_level_| to virtually restore the
+      // the undo level to `aec_dump_mic_level_` to virtually restore the
       // unmodified microphone signal level.
       fake_recording_device_.SetUndoMicLevel(aec_dump_mic_level_);
     }
@@ -261,7 +261,7 @@
 
   // Store the mic level suggested by AGC.
   // Note that when the analog gain is simulated and an AEC dump is used as
-  // input, |analog_mic_level_| will not be used with set_stream_analog_level().
+  // input, `analog_mic_level_` will not be used with set_stream_analog_level().
   analog_mic_level_ = ap_->recommended_stream_analog_level();
   if (settings_.simulate_mic_gain) {
     fake_recording_device_.SetMicLevel(analog_mic_level_);
diff --git a/modules/audio_processing/test/audioproc_float_impl.h b/modules/audio_processing/test/audioproc_float_impl.h
index 0687c43..5ed3aef 100644
--- a/modules/audio_processing/test/audioproc_float_impl.h
+++ b/modules/audio_processing/test/audioproc_float_impl.h
@@ -19,11 +19,11 @@
 namespace test {
 
 // This function implements the audio processing simulation utility. Pass
-// |input_aecdump| to provide the content of an AEC dump file as a string; if
-// |input_aecdump| is not passed, a WAV or AEC input dump file must be specified
-// via the |argv| argument. Pass |processed_capture_samples| to write in it the
-// samples processed on the capture side; if |processed_capture_samples| is not
-// passed, the output file can optionally be specified via the |argv| argument.
+// `input_aecdump` to provide the content of an AEC dump file as a string; if
+// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified
+// via the `argv` argument. Pass `processed_capture_samples` to write in it the
+// samples processed on the capture side; if `processed_capture_samples` is not
+// passed, the output file can optionally be specified via the `argv` argument.
 // Any audio_processing object specified in the input is used for the
 // simulation. Note that when the audio_processing object is specified all
 // functionality that relies on using the internal builder is deactivated,
@@ -34,11 +34,11 @@
                        char* argv[]);
 
 // This function implements the audio processing simulation utility. Pass
-// |input_aecdump| to provide the content of an AEC dump file as a string; if
-// |input_aecdump| is not passed, a WAV or AEC input dump file must be specified
-// via the |argv| argument. Pass |processed_capture_samples| to write in it the
-// samples processed on the capture side; if |processed_capture_samples| is not
-// passed, the output file can optionally be specified via the |argv| argument.
+// `input_aecdump` to provide the content of an AEC dump file as a string; if
+// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified
+// via the `argv` argument. Pass `processed_capture_samples` to write in it the
+// samples processed on the capture side; if `processed_capture_samples` is not
+// passed, the output file can optionally be specified via the `argv` argument.
 int AudioprocFloatImpl(std::unique_ptr<AudioProcessingBuilder> ap_builder,
                        int argc,
                        char* argv[],
diff --git a/modules/audio_processing/test/conversational_speech/simulator.cc b/modules/audio_processing/test/conversational_speech/simulator.cc
index 0591252..20c8608 100644
--- a/modules/audio_processing/test/conversational_speech/simulator.cc
+++ b/modules/audio_processing/test/conversational_speech/simulator.cc
@@ -125,8 +125,8 @@
   return audiotracks_map;
 }
 
-// Writes all the values in |source_samples| via |wav_writer|. If the number of
-// previously written samples in |wav_writer| is less than |interval_begin|, it
+// Writes all the values in `source_samples` via `wav_writer`. If the number of
+// previously written samples in `wav_writer` is less than `interval_begin`, it
 // adds zeros as left padding. The padding corresponds to intervals during which
 // a speaker is not active.
 void PadLeftWriteChunk(rtc::ArrayView<const int16_t> source_samples,
@@ -145,9 +145,9 @@
   wav_writer->WriteSamples(source_samples.data(), source_samples.size());
 }
 
-// Appends zeros via |wav_writer|. The number of zeros is always non-negative
+// Appends zeros via `wav_writer`. The number of zeros is always non-negative
 // and equal to the difference between the previously written samples and
-// |pad_samples|.
+// `pad_samples`.
 void PadRightWrite(WavWriter* wav_writer, size_t pad_samples) {
   RTC_CHECK(wav_writer);
   RTC_CHECK_GE(pad_samples, wav_writer->num_samples());
diff --git a/modules/audio_processing/test/fake_recording_device.h b/modules/audio_processing/test/fake_recording_device.h
index b4d2a10..4017037 100644
--- a/modules/audio_processing/test/fake_recording_device.h
+++ b/modules/audio_processing/test/fake_recording_device.h
@@ -52,14 +52,14 @@
   void SetUndoMicLevel(const int level);
 
   // Simulates the analog gain.
-  // If |real_device_level| is a valid level, the unmodified mic signal is
-  // virtually restored. To skip the latter step set |real_device_level| to
+  // If `real_device_level` is a valid level, the unmodified mic signal is
+  // virtually restored. To skip the latter step set `real_device_level` to
   // an empty value.
   void SimulateAnalogGain(rtc::ArrayView<int16_t> buffer);
 
   // Simulates the analog gain.
-  // If |real_device_level| is a valid level, the unmodified mic signal is
-  // virtually restored. To skip the latter step set |real_device_level| to
+  // If `real_device_level` is a valid level, the unmodified mic signal is
+  // virtually restored. To skip the latter step set `real_device_level` to
   // an empty value.
   void SimulateAnalogGain(ChannelBuffer<float>* buffer);
 
diff --git a/modules/audio_processing/test/fake_recording_device_unittest.cc b/modules/audio_processing/test/fake_recording_device_unittest.cc
index 74bb47f..2ac8b1d 100644
--- a/modules/audio_processing/test/fake_recording_device_unittest.cc
+++ b/modules/audio_processing/test/fake_recording_device_unittest.cc
@@ -75,7 +75,7 @@
 }
 
 // Checks that the samples in each pair have the same sign unless the sample in
-// |dst| is zero (because of zero gain).
+// `dst` is zero (because of zero gain).
 void CheckSameSign(const ChannelBuffer<float>* src,
                    const ChannelBuffer<float>* dst) {
   RTC_DCHECK_EQ(src->num_channels(), dst->num_channels());
diff --git a/modules/audio_processing/test/performance_timer.h b/modules/audio_processing/test/performance_timer.h
index b6e0da7..5375ba7 100644
--- a/modules/audio_processing/test/performance_timer.h
+++ b/modules/audio_processing/test/performance_timer.h
@@ -31,7 +31,7 @@
   double GetDurationStandardDeviation() const;
 
   // These methods are the same as those above, but they ignore the first
-  // |number_of_warmup_samples| measurements.
+  // `number_of_warmup_samples` measurements.
   double GetDurationAverage(size_t number_of_warmup_samples) const;
   double GetDurationStandardDeviation(size_t number_of_warmup_samples) const;
 
diff --git a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py
index 60d1e85..c425885 100644
--- a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py
+++ b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py
@@ -88,7 +88,7 @@
         data_cell_scores = data_with_config[data_with_config.eval_score_name ==
                                             score_name]
 
-        # Exactly one of |params_to_plot| must match:
+        # Exactly one of `params_to_plot` must match:
         (matching_param, ) = [
             x for x in filter_params if '-' + x in config_json
         ]
diff --git a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py
index b0be37c..ecae2ed 100644
--- a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py
+++ b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py
@@ -133,7 +133,7 @@
                                 {score1: value1, ...}}] into a numeric
                      value
   Returns:
-    the config that has the largest values of |score_weighting| applied
+    the config that has the largest values of `score_weighting` applied
     to its scores.
   """
 
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py
index 23f6eff..59c5f74 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py
@@ -397,7 +397,7 @@
         # TODO(alessiob): Fix or remove if not needed.
         # thd = np.sqrt(np.sum(b_terms[1:]**2)) / b_terms[0]
 
-        # TODO(alessiob): Check the range of |thd_plus_noise| and update the class
+        # TODO(alessiob): Check the range of `thd_plus_noise` and update the class
         # docstring above if accordingly.
         thd_plus_noise = distortion_and_noise / b_terms[0]
 
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
index fb3aae0..0affbed 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
@@ -363,7 +363,7 @@
     @classmethod
     def _SliceDataForScoreStatsTableCell(cls, scores, capture, render,
                                          echo_simulator):
-        """Slices |scores| to extract the data for a tab."""
+        """Slices `scores` to extract the data for a tab."""
         masks = []
 
         masks.append(scores.capture == capture)
@@ -378,7 +378,7 @@
 
     @classmethod
     def _FindUniqueTuples(cls, data_frame, fields):
-        """Slices |data_frame| to a list of fields and finds unique tuples."""
+        """Slices `data_frame` to a list of fields and finds unique tuples."""
         return data_frame[fields].drop_duplicates().values.tolist()
 
     @classmethod
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py
index f9125fa..af022bd 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py
@@ -47,7 +47,7 @@
 
     Hard-clipping may occur in the mix; a warning is raised when this happens.
 
-    If |echo_filepath| is None, nothing is done and |capture_input_filepath| is
+    If `echo_filepath` is None, nothing is done and `capture_input_filepath` is
     returned.
 
     Args:
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py
index e41637c..95e8019 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py
@@ -174,7 +174,7 @@
         """Detects hard clipping.
 
     Hard clipping is simply detected by counting samples that touch either the
-    lower or upper bound too many times in a row (according to |threshold|).
+    lower or upper bound too many times in a row (according to `threshold`).
     The presence of a single sequence of samples meeting such property is enough
     to label the signal as hard clipped.
 
@@ -295,16 +295,16 @@
                    noise,
                    target_snr=0.0,
                    pad_noise=MixPadding.NO_PADDING):
-        """Mixes |signal| and |noise| with a target SNR.
+        """Mixes `signal` and `noise` with a target SNR.
 
-    Mix |signal| and |noise| with a desired SNR by scaling |noise|.
+    Mix `signal` and `noise` with a desired SNR by scaling `noise`.
     If the target SNR is +/- infinite, a copy of signal/noise is returned.
-    If |signal| is shorter than |noise|, the length of the mix equals that of
-    |signal|. Otherwise, the mix length depends on whether padding is applied.
-    When padding is not applied, that is |pad_noise| is set to NO_PADDING
-    (default), the mix length equals that of |noise| - i.e., |signal| is
-    truncated. Otherwise, |noise| is extended and the resulting mix has the same
-    length of |signal|.
+    If `signal` is shorter than `noise`, the length of the mix equals that of
+    `signal`. Otherwise, the mix length depends on whether padding is applied.
+    When padding is not applied, that is `pad_noise` is set to NO_PADDING
+    (default), the mix length equals that of `noise` - i.e., `signal` is
+    truncated. Otherwise, `noise` is extended and the resulting mix has the same
+    length of `signal`.
 
     Args:
       signal: AudioSegment instance (signal).
@@ -342,18 +342,18 @@
         signal_duration = len(signal)
         noise_duration = len(noise)
         if signal_duration <= noise_duration:
-            # Ignore |pad_noise|, |noise| is truncated if longer that |signal|, the
-            # mix will have the same length of |signal|.
+            # Ignore `pad_noise`, `noise` is truncated if longer that `signal`, the
+            # mix will have the same length of `signal`.
             return signal.overlay(noise.apply_gain(gain_db))
         elif pad_noise == cls.MixPadding.NO_PADDING:
-            # |signal| is longer than |noise|, but no padding is applied to |noise|.
-            # Truncate |signal|.
+            # `signal` is longer than `noise`, but no padding is applied to `noise`.
+            # Truncate `signal`.
             return noise.overlay(signal, gain_during_overlay=gain_db)
         elif pad_noise == cls.MixPadding.ZERO_PADDING:
             # TODO(alessiob): Check that this works as expected.
             return signal.overlay(noise.apply_gain(gain_db))
         elif pad_noise == cls.MixPadding.LOOP:
-            # |signal| is longer than |noise|, extend |noise| by looping.
+            # `signal` is longer than `noise`, extend `noise` by looping.
             return signal.overlay(noise.apply_gain(gain_db), loop=True)
         else:
             raise exceptions.SignalProcessingException('invalid padding type')
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py
index fe30c9c..69b3a16 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py
@@ -264,7 +264,7 @@
 
     The file name is parsed to extract input signal creator and params. If a
     creator is matched and the parameters are valid, a new signal is generated
-    and written in |input_signal_filepath|.
+    and written in `input_signal_filepath`.
 
     Args:
       input_signal_filepath: Path to the input signal audio file to write.
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py
index 6d0cb79..f75098a 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py
@@ -116,7 +116,7 @@
             key = noisy_signal_filepaths.keys()[0]
             return noisy_signal_filepaths[key], reference_signal_filepaths[key]
 
-        # Test the |copy_with_identity| flag.
+        # Test the `copy_with_identity` flag.
         for copy_with_identity in [False, True]:
             # Instance the generator through the factory.
             factory = test_data_generation_factory.TestDataGeneratorFactory(
@@ -126,7 +126,7 @@
             factory.SetOutputDirectoryPrefix('datagen-')
             generator = factory.GetInstance(
                 test_data_generation.IdentityTestDataGenerator)
-            # Check |copy_with_identity| is set correctly.
+            # Check `copy_with_identity` is set correctly.
             self.assertEqual(copy_with_identity, generator.copy_with_identity)
 
             # Generate test data and extract the paths to the noise and the reference
@@ -137,7 +137,7 @@
             noisy_signal_filepath, reference_signal_filepath = (
                 GetNoiseReferenceFilePaths(generator))
 
-            # Check that a copy is made if and only if |copy_with_identity| is True.
+            # Check that a copy is made if and only if `copy_with_identity` is True.
             if copy_with_identity:
                 self.assertNotEqual(noisy_signal_filepath,
                                     input_signal_filepath)
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc b/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc
index 9906eca..b47f622 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc
@@ -63,7 +63,7 @@
   std::unique_ptr<Vad> vad = CreateVad(Vad::Aggressiveness::kVadNormal);
   std::array<int16_t, kMaxFrameLen> samples;
   char buff = 0;     // Buffer to write one bit per frame.
-  uint8_t next = 0;  // Points to the next bit to write in |buff|.
+  uint8_t next = 0;  // Points to the next bit to write in `buff`.
   while (true) {
     // Process frame.
     const auto read_samples =
diff --git a/modules/audio_processing/test/test_utils.h b/modules/audio_processing/test/test_utils.h
index e2d243e..30674cb 100644
--- a/modules/audio_processing/test/test_utils.h
+++ b/modules/audio_processing/test/test_utils.h
@@ -78,7 +78,7 @@
   explicit ChannelBufferWavReader(std::unique_ptr<WavReader> file);
   ~ChannelBufferWavReader();
 
-  // Reads data from the file according to the |buffer| format. Returns false if
+  // Reads data from the file according to the `buffer` format. Returns false if
   // a full buffer can't be read from the file.
   bool Read(ChannelBuffer<float>* buffer);
 
@@ -115,7 +115,7 @@
       delete;
   ~ChannelBufferVectorWriter();
 
-  // Creates an interleaved copy of |buffer|, converts the samples to float S16
+  // Creates an interleaved copy of `buffer`, converts the samples to float S16
   // and appends the result to output_.
   void Write(const ChannelBuffer<float>& buffer);
 
diff --git a/modules/audio_processing/three_band_filter_bank.cc b/modules/audio_processing/three_band_filter_bank.cc
index 2a7d272..fc665ef 100644
--- a/modules/audio_processing/three_band_filter_bank.cc
+++ b/modules/audio_processing/three_band_filter_bank.cc
@@ -39,16 +39,16 @@
 namespace webrtc {
 namespace {
 
-// Factors to take into account when choosing |kFilterSize|:
-//   1. Higher |kFilterSize|, means faster transition, which ensures less
+// Factors to take into account when choosing `kFilterSize`:
+//   1. Higher `kFilterSize`, means faster transition, which ensures less
 //      aliasing. This is especially important when there is non-linear
 //      processing between the splitting and merging.
 //   2. The delay that this filter bank introduces is
-//      |kNumBands| * |kSparsity| * |kFilterSize| / 2, so it increases linearly
-//      with |kFilterSize|.
-//   3. The computation complexity also increases linearly with |kFilterSize|.
+//      `kNumBands` * `kSparsity` * `kFilterSize` / 2, so it increases linearly
+//      with `kFilterSize`.
+//   3. The computation complexity also increases linearly with `kFilterSize`.
 
-// The Matlab code to generate these |kFilterCoeffs| is:
+// The Matlab code to generate these `kFilterCoeffs` is:
 //
 // N = kNumBands * kSparsity * kFilterSize - 1;
 // h = fir1(N, 1 / (2 * kNumBands), kaiser(N + 1, 3.5));
@@ -59,7 +59,7 @@
 
 // Because the total bandwidth of the lower and higher band is double the middle
 // one (because of the spectrum parity), the low-pass prototype is half the
-// bandwidth of 1 / (2 * |kNumBands|) and is then shifted with cosine modulation
+// bandwidth of 1 / (2 * `kNumBands`) and is then shifted with cosine modulation
 // to the right places.
 // A Kaiser window is used because of its flexibility and the alpha is set to
 // 3.5, since that sets a stop band attenuation of 40dB ensuring a fast
@@ -100,8 +100,8 @@
      {1.f, -2.f, 1.f},
      {1.73205077f, 0.f, -1.73205077f}};
 
-// Filters the input signal |in| with the filter |filter| using a shift by
-// |in_shift|, taking into account the previous state.
+// Filters the input signal `in` with the filter `filter` using a shift by
+// `in_shift`, taking into account the previous state.
 void FilterCore(
     rtc::ArrayView<const float, kFilterSize> filter,
     rtc::ArrayView<const float, ThreeBandFilterBank::kSplitBandSize> in,
@@ -164,10 +164,10 @@
 ThreeBandFilterBank::~ThreeBandFilterBank() = default;
 
 // The analysis can be separated in these steps:
-//   1. Serial to parallel downsampling by a factor of |kNumBands|.
-//   2. Filtering of |kSparsity| different delayed signals with polyphase
+//   1. Serial to parallel downsampling by a factor of `kNumBands`.
+//   2. Filtering of `kSparsity` different delayed signals with polyphase
 //      decomposition of the low-pass prototype filter and upsampled by a factor
-//      of |kSparsity|.
+//      of `kSparsity`.
 //   3. Modulating with cosines and accumulating to get the desired band.
 void ThreeBandFilterBank::Analysis(
     rtc::ArrayView<const float, kFullBandSize> in,
@@ -222,9 +222,9 @@
 // The synthesis can be separated in these steps:
 //   1. Modulating with cosines.
 //   2. Filtering each one with a polyphase decomposition of the low-pass
-//      prototype filter upsampled by a factor of |kSparsity| and accumulating
-//      |kSparsity| signals with different delays.
-//   3. Parallel to serial upsampling by a factor of |kNumBands|.
+//      prototype filter upsampled by a factor of `kSparsity` and accumulating
+//      `kSparsity` signals with different delays.
+//   3. Parallel to serial upsampling by a factor of `kNumBands`.
 void ThreeBandFilterBank::Synthesis(
     rtc::ArrayView<const rtc::ArrayView<float>, ThreeBandFilterBank::kNumBands>
         in,
diff --git a/modules/audio_processing/three_band_filter_bank.h b/modules/audio_processing/three_band_filter_bank.h
index e6346de..db66cab 100644
--- a/modules/audio_processing/three_band_filter_bank.h
+++ b/modules/audio_processing/three_band_filter_bank.h
@@ -55,13 +55,13 @@
   ThreeBandFilterBank();
   ~ThreeBandFilterBank();
 
-  // Splits |in| of size kFullBandSize into 3 downsampled frequency bands in
-  // |out|, each of size 160.
+  // Splits `in` of size kFullBandSize into 3 downsampled frequency bands in
+  // `out`, each of size 160.
   void Analysis(rtc::ArrayView<const float, kFullBandSize> in,
                 rtc::ArrayView<const rtc::ArrayView<float>, kNumBands> out);
 
-  // Merges the 3 downsampled frequency bands in |in|, each of size 160, into
-  // |out|, which is of size kFullBandSize.
+  // Merges the 3 downsampled frequency bands in `in`, each of size 160, into
+  // `out`, which is of size kFullBandSize.
   void Synthesis(rtc::ArrayView<const rtc::ArrayView<float>, kNumBands> in,
                  rtc::ArrayView<float, kFullBandSize> out);
 
diff --git a/modules/audio_processing/transient/click_annotate.cc b/modules/audio_processing/transient/click_annotate.cc
index 21641f8..f3f040f 100644
--- a/modules/audio_processing/transient/click_annotate.cc
+++ b/modules/audio_processing/transient/click_annotate.cc
@@ -26,7 +26,7 @@
 // Creates a send times array, one for each step.
 // Each block that contains a transient, has an infinite send time.
 // The resultant array is written to a DAT file
-// Returns -1 on error or |lost_packets| otherwise.
+// Returns -1 on error or `lost_packets` otherwise.
 int main(int argc, char* argv[]) {
   if (argc != 5) {
     printf("\n%s - Application to generate a RTP timing file.\n\n", argv[0]);
diff --git a/modules/audio_processing/transient/dyadic_decimator.h b/modules/audio_processing/transient/dyadic_decimator.h
index fcb56b7..52467e8 100644
--- a/modules/audio_processing/transient/dyadic_decimator.h
+++ b/modules/audio_processing/transient/dyadic_decimator.h
@@ -18,7 +18,7 @@
 namespace webrtc {
 
 // Returns the proper length of the output buffer that you should use for the
-// given |in_length| and decimation |odd_sequence|.
+// given `in_length` and decimation `odd_sequence`.
 // Return -1 on error.
 inline size_t GetOutLengthToDyadicDecimate(size_t in_length,
                                            bool odd_sequence) {
@@ -34,10 +34,10 @@
 // Performs a dyadic decimation: removes every odd/even member of a sequence
 // halving its overall length.
 // Arguments:
-//    in: array of |in_length|.
+//    in: array of `in_length`.
 //    odd_sequence: If false, the odd members will be removed (1, 3, 5, ...);
 //                  if true, the even members will be removed (0, 2, 4, ...).
-//    out: array of |out_length|. |out_length| must be large enough to
+//    out: array of `out_length`. `out_length` must be large enough to
 //         hold the decimated output. The necessary length can be provided by
 //         GetOutLengthToDyadicDecimate().
 //         Must be previously allocated.
diff --git a/modules/audio_processing/transient/dyadic_decimator_unittest.cc b/modules/audio_processing/transient/dyadic_decimator_unittest.cc
index 3e65a7b..e4776d6 100644
--- a/modules/audio_processing/transient/dyadic_decimator_unittest.cc
+++ b/modules/audio_processing/transient/dyadic_decimator_unittest.cc
@@ -42,7 +42,7 @@
                                static_cast<int16_t*>(NULL), kOutBufferLength);
   EXPECT_EQ(0u, out_samples);
 
-  // Less than required |out_length|.
+  // Less than required `out_length`.
   out_samples = DyadicDecimate(test_buffer_even_len, kEvenBufferLength,
                                false,  // Even sequence.
                                test_buffer_out, 2);
diff --git a/modules/audio_processing/transient/file_utils.h b/modules/audio_processing/transient/file_utils.h
index 6184017..b748337 100644
--- a/modules/audio_processing/transient/file_utils.h
+++ b/modules/audio_processing/transient/file_utils.h
@@ -50,63 +50,63 @@
 // Returns 0 if correct, -1 on error.
 int ConvertDoubleToByteArray(double value, uint8_t out_bytes[8]);
 
-// Reads |length| 16-bit integers from |file| to |buffer|.
-// |file| must be previously opened.
+// Reads `length` 16-bit integers from `file` to `buffer`.
+// `file` must be previously opened.
 // Returns the number of 16-bit integers read or -1 on error.
 size_t ReadInt16BufferFromFile(FileWrapper* file,
                                size_t length,
                                int16_t* buffer);
 
-// Reads |length| 16-bit integers from |file| and stores those values
-// (converting them) in |buffer|.
-// |file| must be previously opened.
+// Reads `length` 16-bit integers from `file` and stores those values
+// (converting them) in `buffer`.
+// `file` must be previously opened.
 // Returns the number of 16-bit integers read or -1 on error.
 size_t ReadInt16FromFileToFloatBuffer(FileWrapper* file,
                                       size_t length,
                                       float* buffer);
 
-// Reads |length| 16-bit integers from |file| and stores those values
-// (converting them) in |buffer|.
-// |file| must be previously opened.
+// Reads `length` 16-bit integers from `file` and stores those values
+// (converting them) in `buffer`.
+// `file` must be previously opened.
 // Returns the number of 16-bit integers read or -1 on error.
 size_t ReadInt16FromFileToDoubleBuffer(FileWrapper* file,
                                        size_t length,
                                        double* buffer);
 
-// Reads |length| floats in binary representation (4 bytes) from |file| to
-// |buffer|.
-// |file| must be previously opened.
+// Reads `length` floats in binary representation (4 bytes) from `file` to
+// `buffer`.
+// `file` must be previously opened.
 // Returns the number of floats read or -1 on error.
 size_t ReadFloatBufferFromFile(FileWrapper* file, size_t length, float* buffer);
 
-// Reads |length| doubles in binary representation (8 bytes) from |file| to
-// |buffer|.
-// |file| must be previously opened.
+// Reads `length` doubles in binary representation (8 bytes) from `file` to
+// `buffer`.
+// `file` must be previously opened.
 // Returns the number of doubles read or -1 on error.
 size_t ReadDoubleBufferFromFile(FileWrapper* file,
                                 size_t length,
                                 double* buffer);
 
-// Writes |length| 16-bit integers from |buffer| in binary representation (2
-// bytes) to |file|. It flushes |file|, so after this call there are no
+// Writes `length` 16-bit integers from `buffer` in binary representation (2
+// bytes) to `file`. It flushes `file`, so after this call there are no
 // writings pending.
-// |file| must be previously opened.
+// `file` must be previously opened.
 // Returns the number of doubles written or -1 on error.
 size_t WriteInt16BufferToFile(FileWrapper* file,
                               size_t length,
                               const int16_t* buffer);
 
-// Writes |length| floats from |buffer| in binary representation (4 bytes) to
-// |file|. It flushes |file|, so after this call there are no writtings pending.
-// |file| must be previously opened.
+// Writes `length` floats from `buffer` in binary representation (4 bytes) to
+// `file`. It flushes `file`, so after this call there are no writtings pending.
+// `file` must be previously opened.
 // Returns the number of doubles written or -1 on error.
 size_t WriteFloatBufferToFile(FileWrapper* file,
                               size_t length,
                               const float* buffer);
 
-// Writes |length| doubles from |buffer| in binary representation (8 bytes) to
-// |file|. It flushes |file|, so after this call there are no writings pending.
-// |file| must be previously opened.
+// Writes `length` doubles from `buffer` in binary representation (8 bytes) to
+// `file`. It flushes `file`, so after this call there are no writings pending.
+// `file` must be previously opened.
 // Returns the number of doubles written or -1 on error.
 size_t WriteDoubleBufferToFile(FileWrapper* file,
                                size_t length,
diff --git a/modules/audio_processing/transient/moving_moments.h b/modules/audio_processing/transient/moving_moments.h
index 6dc0520..70451dc 100644
--- a/modules/audio_processing/transient/moving_moments.h
+++ b/modules/audio_processing/transient/moving_moments.h
@@ -26,13 +26,13 @@
 // the last values of the moments. When needed.
 class MovingMoments {
  public:
-  // Creates a Moving Moments object, that uses the last |length| values
+  // Creates a Moving Moments object, that uses the last `length` values
   // (including the new value introduced in every new calculation).
   explicit MovingMoments(size_t length);
   ~MovingMoments();
 
-  // Calculates the new values using |in|. Results will be in the out buffers.
-  // |first| and |second| must be allocated with at least |in_length|.
+  // Calculates the new values using `in`. Results will be in the out buffers.
+  // `first` and `second` must be allocated with at least `in_length`.
   void CalculateMoments(const float* in,
                         size_t in_length,
                         float* first,
@@ -40,7 +40,7 @@
 
  private:
   size_t length_;
-  // A queue holding the |length_| latest input values.
+  // A queue holding the `length_` latest input values.
   std::queue<float> queue_;
   // Sum of the values of the queue.
   float sum_;
diff --git a/modules/audio_processing/transient/transient_detector.cc b/modules/audio_processing/transient/transient_detector.cc
index f03a2ea..5c35505 100644
--- a/modules/audio_processing/transient/transient_detector.cc
+++ b/modules/audio_processing/transient/transient_detector.cc
@@ -43,8 +43,8 @@
              sample_rate_hz == ts::kSampleRate48kHz);
   int samples_per_transient = sample_rate_hz * kTransientLengthMs / 1000;
   // Adjustment to avoid data loss while downsampling, making
-  // |samples_per_chunk_| and |samples_per_transient| always divisible by
-  // |kLeaves|.
+  // `samples_per_chunk_` and `samples_per_transient` always divisible by
+  // `kLeaves`.
   samples_per_chunk_ -= samples_per_chunk_ % kLeaves;
   samples_per_transient -= samples_per_transient % kLeaves;
 
@@ -137,7 +137,7 @@
 
   // In the current implementation we return the max of the current result and
   // the previous results, so the high results have a width equals to
-  // |transient_length|.
+  // `transient_length`.
   return *std::max_element(previous_results_.begin(), previous_results_.end());
 }
 
diff --git a/modules/audio_processing/transient/transient_detector.h b/modules/audio_processing/transient/transient_detector.h
index 5ede2e8..a3dbb7f 100644
--- a/modules/audio_processing/transient/transient_detector.h
+++ b/modules/audio_processing/transient/transient_detector.h
@@ -37,8 +37,8 @@
 
   ~TransientDetector();
 
-  // Calculates the log-likelihood of the existence of a transient in |data|.
-  // |data_length| has to be equal to |samples_per_chunk_|.
+  // Calculates the log-likelihood of the existence of a transient in `data`.
+  // `data_length` has to be equal to `samples_per_chunk_`.
   // Returns a value between 0 and 1, as a non linear representation of this
   // likelihood.
   // Returns a negative value on error.
@@ -71,7 +71,7 @@
   float last_second_moment_[kLeaves];
 
   // We keep track of the previous results from the previous chunks, so it can
-  // be used to effectively give results according to the |transient_length|.
+  // be used to effectively give results according to the `transient_length`.
   std::deque<float> previous_results_;
 
   // Number of chunks that are going to return only zeros at the beginning of
diff --git a/modules/audio_processing/transient/transient_suppressor.h b/modules/audio_processing/transient/transient_suppressor.h
index bb262b0..982ddbd 100644
--- a/modules/audio_processing/transient/transient_suppressor.h
+++ b/modules/audio_processing/transient/transient_suppressor.h
@@ -27,22 +27,22 @@
                          int detector_rate_hz,
                          int num_channels) = 0;
 
-  // Processes a |data| chunk, and returns it with keystrokes suppressed from
+  // Processes a `data` chunk, and returns it with keystrokes suppressed from
   // it. The float format is assumed to be int16 ranged. If there are more than
-  // one channel, the chunks are concatenated one after the other in |data|.
-  // |data_length| must be equal to |data_length_|.
-  // |num_channels| must be equal to |num_channels_|.
-  // A sub-band, ideally the higher, can be used as |detection_data|. If it is
-  // NULL, |data| is used for the detection too. The |detection_data| is always
+  // one channel, the chunks are concatenated one after the other in `data`.
+  // `data_length` must be equal to `data_length_`.
+  // `num_channels` must be equal to `num_channels_`.
+  // A sub-band, ideally the higher, can be used as `detection_data`. If it is
+  // NULL, `data` is used for the detection too. The `detection_data` is always
   // assumed mono.
   // If a reference signal (e.g. keyboard microphone) is available, it can be
-  // passed in as |reference_data|. It is assumed mono and must have the same
-  // length as |data|. NULL is accepted if unavailable.
+  // passed in as `reference_data`. It is assumed mono and must have the same
+  // length as `data`. NULL is accepted if unavailable.
   // This suppressor performs better if voice information is available.
-  // |voice_probability| is the probability of voice being present in this chunk
-  // of audio. If voice information is not available, |voice_probability| must
+  // `voice_probability` is the probability of voice being present in this chunk
+  // of audio. If voice information is not available, `voice_probability` must
   // always be set to 1.
-  // |key_pressed| determines if a key was pressed on this audio chunk.
+  // `key_pressed` determines if a key was pressed on this audio chunk.
   // Returns 0 on success and -1 otherwise.
   virtual int Suppress(float* data,
                        size_t data_length,
diff --git a/modules/audio_processing/transient/transient_suppressor_impl.cc b/modules/audio_processing/transient/transient_suppressor_impl.cc
index d515d30..8e43d78 100644
--- a/modules/audio_processing/transient/transient_suppressor_impl.cc
+++ b/modules/audio_processing/transient/transient_suppressor_impl.cc
@@ -194,7 +194,7 @@
 
     using_reference_ = detector_->using_reference();
 
-    // |detector_smoothed_| follows the |detector_result| when this last one is
+    // `detector_smoothed_` follows the `detector_result` when this last one is
     // increasing, but has an exponential decaying tail to be able to suppress
     // the ringing of keyclicks.
     float smooth_factor = using_reference_ ? 0.6 : 0.1;
@@ -223,7 +223,7 @@
 }
 
 // This should only be called when detection is enabled. UpdateBuffers() must
-// have been called. At return, |out_buffer_| will be filled with the
+// have been called. At return, `out_buffer_` will be filled with the
 // processed output.
 void TransientSuppressorImpl::Suppress(float* in_ptr,
                                        float* spectral_mean,
@@ -325,7 +325,7 @@
 }
 
 // Shift buffers to make way for new data. Must be called after
-// |detection_enabled_| is updated by UpdateKeypress().
+// `detection_enabled_` is updated by UpdateKeypress().
 void TransientSuppressorImpl::UpdateBuffers(float* data) {
   // TODO(aluebs): Change to ring buffer.
   memmove(in_buffer_.get(), &in_buffer_[data_length_],
@@ -350,9 +350,9 @@
 }
 
 // Restores the unvoiced signal if a click is present.
-// Attenuates by a certain factor every peak in the |fft_buffer_| that exceeds
-// the spectral mean. The attenuation depends on |detector_smoothed_|.
-// If a restoration takes place, the |magnitudes_| are updated to the new value.
+// Attenuates by a certain factor every peak in the `fft_buffer_` that exceeds
+// the spectral mean. The attenuation depends on `detector_smoothed_`.
+// If a restoration takes place, the `magnitudes_` are updated to the new value.
 void TransientSuppressorImpl::HardRestoration(float* spectral_mean) {
   const float detector_result =
       1.f - std::pow(1.f - detector_smoothed_, using_reference_ ? 200.f : 50.f);
@@ -376,10 +376,10 @@
 }
 
 // Restores the voiced signal if a click is present.
-// Attenuates by a certain factor every peak in the |fft_buffer_| that exceeds
+// Attenuates by a certain factor every peak in the `fft_buffer_` that exceeds
 // the spectral mean and that is lower than some function of the current block
-// frequency mean. The attenuation depends on |detector_smoothed_|.
-// If a restoration takes place, the |magnitudes_| are updated to the new value.
+// frequency mean. The attenuation depends on `detector_smoothed_`.
+// If a restoration takes place, the `magnitudes_` are updated to the new value.
 void TransientSuppressorImpl::SoftRestoration(float* spectral_mean) {
   // Get the spectral magnitude mean of the current block.
   float block_frequency_mean = 0;
diff --git a/modules/audio_processing/transient/transient_suppressor_impl.h b/modules/audio_processing/transient/transient_suppressor_impl.h
index 4737af5..fa8186e 100644
--- a/modules/audio_processing/transient/transient_suppressor_impl.h
+++ b/modules/audio_processing/transient/transient_suppressor_impl.h
@@ -34,22 +34,22 @@
                  int detector_rate_hz,
                  int num_channels) override;
 
-  // Processes a |data| chunk, and returns it with keystrokes suppressed from
+  // Processes a `data` chunk, and returns it with keystrokes suppressed from
   // it. The float format is assumed to be int16 ranged. If there are more than
-  // one channel, the chunks are concatenated one after the other in |data|.
-  // |data_length| must be equal to |data_length_|.
-  // |num_channels| must be equal to |num_channels_|.
-  // A sub-band, ideally the higher, can be used as |detection_data|. If it is
-  // NULL, |data| is used for the detection too. The |detection_data| is always
+  // one channel, the chunks are concatenated one after the other in `data`.
+  // `data_length` must be equal to `data_length_`.
+  // `num_channels` must be equal to `num_channels_`.
+  // A sub-band, ideally the higher, can be used as `detection_data`. If it is
+  // NULL, `data` is used for the detection too. The `detection_data` is always
   // assumed mono.
   // If a reference signal (e.g. keyboard microphone) is available, it can be
-  // passed in as |reference_data|. It is assumed mono and must have the same
-  // length as |data|. NULL is accepted if unavailable.
+  // passed in as `reference_data`. It is assumed mono and must have the same
+  // length as `data`. NULL is accepted if unavailable.
   // This suppressor performs better if voice information is available.
-  // |voice_probability| is the probability of voice being present in this chunk
-  // of audio. If voice information is not available, |voice_probability| must
+  // `voice_probability` is the probability of voice being present in this chunk
+  // of audio. If voice information is not available, `voice_probability` must
   // always be set to 1.
-  // |key_pressed| determines if a key was pressed on this audio chunk.
+  // `key_pressed` determines if a key was pressed on this audio chunk.
   // Returns 0 on success and -1 otherwise.
   int Suppress(float* data,
                size_t data_length,
diff --git a/modules/audio_processing/transient/wpd_node.h b/modules/audio_processing/transient/wpd_node.h
index 6a52fb7..41614fa 100644
--- a/modules/audio_processing/transient/wpd_node.h
+++ b/modules/audio_processing/transient/wpd_node.h
@@ -25,7 +25,7 @@
   WPDNode(size_t length, const float* coefficients, size_t coefficients_length);
   ~WPDNode();
 
-  // Updates the node data. |parent_data| / 2 must be equals to |length_|.
+  // Updates the node data. `parent_data` / 2 must be equals to `length_`.
   // Returns 0 if correct, and -1 otherwise.
   int Update(const float* parent_data, size_t parent_data_length);
 
diff --git a/modules/audio_processing/transient/wpd_tree.h b/modules/audio_processing/transient/wpd_tree.h
index c54220f..13cb8d9 100644
--- a/modules/audio_processing/transient/wpd_tree.h
+++ b/modules/audio_processing/transient/wpd_tree.h
@@ -65,7 +65,7 @@
   // If level or index are out of bounds the function will return NULL.
   WPDNode* NodeAt(int level, int index);
 
-  // Updates all the nodes of the tree with the new data. |data_length| must be
+  // Updates all the nodes of the tree with the new data. `data_length` must be
   // teh same that was used for the creation of the tree.
   // Returns 0 if correct, and -1 otherwise.
   int Update(const float* data, size_t data_length);
diff --git a/modules/audio_processing/typing_detection.h b/modules/audio_processing/typing_detection.h
index d8fb359..9d96583 100644
--- a/modules/audio_processing/typing_detection.h
+++ b/modules/audio_processing/typing_detection.h
@@ -22,7 +22,7 @@
 
   // Run the detection algortihm. Shall be called every 10 ms. Returns true if
   // typing is detected, or false if not, based on the update period as set with
-  // SetParameters(). See |report_detection_update_period_| description below.
+  // SetParameters(). See `report_detection_update_period_` description below.
   bool Process(bool key_pressed, bool vad_activity);
 
   // Gets the time in seconds since the last detection.
@@ -43,14 +43,14 @@
   int penalty_counter_;
 
   // Counter since last time the detection status reported by Process() was
-  // updated. See also |report_detection_update_period_|.
+  // updated. See also `report_detection_update_period_`.
   int counter_since_last_detection_update_;
 
   // The detection status to report. Updated every
-  // |report_detection_update_period_| call to Process().
+  // `report_detection_update_period_` call to Process().
   bool detection_to_report_;
 
-  // What |detection_to_report_| should be set to next time it is updated.
+  // What `detection_to_report_` should be set to next time it is updated.
   bool new_detection_to_report_;
 
   // Settable threshold values.
@@ -61,10 +61,10 @@
   // Penalty added for a typing + activity coincide.
   int cost_per_typing_;
 
-  // Threshold for |penalty_counter_|.
+  // Threshold for `penalty_counter_`.
   int reporting_threshold_;
 
-  // How much we reduce |penalty_counter_| every 10 ms.
+  // How much we reduce `penalty_counter_` every 10 ms.
   int penalty_decay_;
 
   // How old typing events we allow.
diff --git a/modules/audio_processing/utility/delay_estimator.cc b/modules/audio_processing/utility/delay_estimator.cc
index 73c70b0..6868392 100644
--- a/modules/audio_processing/utility/delay_estimator.cc
+++ b/modules/audio_processing/utility/delay_estimator.cc
@@ -55,7 +55,7 @@
   return ((int)tmp);
 }
 
-// Compares the |binary_vector| with all rows of the |binary_matrix| and counts
+// Compares the `binary_vector` with all rows of the `binary_matrix` and counts
 // per row the number of times they have the same value.
 //
 // Inputs:
@@ -74,7 +74,7 @@
                                int32_t* bit_counts) {
   int n = 0;
 
-  // Compare |binary_vector| with all rows of the |binary_matrix|
+  // Compare `binary_vector` with all rows of the `binary_matrix`
   for (; n < matrix_size; n++) {
     bit_counts[n] = (int32_t)BitCount(binary_vector ^ binary_matrix[n]);
   }
@@ -83,9 +83,9 @@
 // Collects necessary statistics for the HistogramBasedValidation().  This
 // function has to be called prior to calling HistogramBasedValidation().  The
 // statistics updated and used by the HistogramBasedValidation() are:
-//  1. the number of |candidate_hits|, which states for how long we have had the
-//     same |candidate_delay|
-//  2. the |histogram| of candidate delays over time.  This histogram is
+//  1. the number of `candidate_hits`, which states for how long we have had the
+//     same `candidate_delay`
+//  2. the `histogram` of candidate delays over time.  This histogram is
 //     weighted with respect to a reliability measure and time-varying to cope
 //     with possible delay shifts.
 // For further description see commented code.
@@ -93,7 +93,7 @@
 // Inputs:
 //  - candidate_delay   : The delay to validate.
 //  - valley_depth_q14  : The cost function has a valley/minimum at the
-//                        |candidate_delay| location.  |valley_depth_q14| is the
+//                        `candidate_delay` location.  `valley_depth_q14` is the
 //                        cost function difference between the minimum and
 //                        maximum locations.  The value is in the Q14 domain.
 //  - valley_level_q14  : Is the cost function value at the minimum, in Q14.
@@ -109,37 +109,37 @@
   int i = 0;
 
   RTC_DCHECK_EQ(self->history_size, self->farend->history_size);
-  // Reset |candidate_hits| if we have a new candidate.
+  // Reset `candidate_hits` if we have a new candidate.
   if (candidate_delay != self->last_candidate_delay) {
     self->candidate_hits = 0;
     self->last_candidate_delay = candidate_delay;
   }
   self->candidate_hits++;
 
-  // The |histogram| is updated differently across the bins.
-  // 1. The |candidate_delay| histogram bin is increased with the
-  //    |valley_depth|, which is a simple measure of how reliable the
-  //    |candidate_delay| is.  The histogram is not increased above
-  //    |kHistogramMax|.
+  // The `histogram` is updated differently across the bins.
+  // 1. The `candidate_delay` histogram bin is increased with the
+  //    `valley_depth`, which is a simple measure of how reliable the
+  //    `candidate_delay` is.  The histogram is not increased above
+  //    `kHistogramMax`.
   self->histogram[candidate_delay] += valley_depth;
   if (self->histogram[candidate_delay] > kHistogramMax) {
     self->histogram[candidate_delay] = kHistogramMax;
   }
-  // 2. The histogram bins in the neighborhood of |candidate_delay| are
+  // 2. The histogram bins in the neighborhood of `candidate_delay` are
   //    unaffected.  The neighborhood is defined as x + {-2, -1, 0, 1}.
-  // 3. The histogram bins in the neighborhood of |last_delay| are decreased
-  //    with |decrease_in_last_set|.  This value equals the difference between
-  //    the cost function values at the locations |candidate_delay| and
-  //    |last_delay| until we reach |max_hits_for_slow_change| consecutive hits
-  //    at the |candidate_delay|.  If we exceed this amount of hits the
-  //    |candidate_delay| is a "potential" candidate and we start decreasing
-  //    these histogram bins more rapidly with |valley_depth|.
+  // 3. The histogram bins in the neighborhood of `last_delay` are decreased
+  //    with `decrease_in_last_set`.  This value equals the difference between
+  //    the cost function values at the locations `candidate_delay` and
+  //    `last_delay` until we reach `max_hits_for_slow_change` consecutive hits
+  //    at the `candidate_delay`.  If we exceed this amount of hits the
+  //    `candidate_delay` is a "potential" candidate and we start decreasing
+  //    these histogram bins more rapidly with `valley_depth`.
   if (self->candidate_hits < max_hits_for_slow_change) {
     decrease_in_last_set =
         (self->mean_bit_counts[self->compare_delay] - valley_level_q14) *
         kQ14Scaling;
   }
-  // 4. All other bins are decreased with |valley_depth|.
+  // 4. All other bins are decreased with `valley_depth`.
   // TODO(bjornv): Investigate how to make this loop more efficient.  Split up
   // the loop?  Remove parts that doesn't add too much.
   for (i = 0; i < self->history_size; ++i) {
@@ -157,15 +157,15 @@
   }
 }
 
-// Validates the |candidate_delay|, estimated in WebRtc_ProcessBinarySpectrum(),
+// Validates the `candidate_delay`, estimated in WebRtc_ProcessBinarySpectrum(),
 // based on a mix of counting concurring hits with a modified histogram
 // of recent delay estimates.  In brief a candidate is valid (returns 1) if it
 // is the most likely according to the histogram.  There are a couple of
 // exceptions that are worth mentioning:
-//  1. If the |candidate_delay| < |last_delay| it can be that we are in a
+//  1. If the `candidate_delay` < `last_delay` it can be that we are in a
 //     non-causal state, breaking a possible echo control algorithm.  Hence, we
 //     open up for a quicker change by allowing the change even if the
-//     |candidate_delay| is not the most likely one according to the histogram.
+//     `candidate_delay` is not the most likely one according to the histogram.
 //  2. There's a minimum number of hits (kMinRequiredHits) and the histogram
 //     value has to reached a minimum (kMinHistogramThreshold) to be valid.
 //  3. The action is also depending on the filter length used for echo control.
@@ -177,7 +177,7 @@
 //  - candidate_delay     : The delay to validate.
 //
 // Return value:
-//  - is_histogram_valid  : 1 - The |candidate_delay| is valid.
+//  - is_histogram_valid  : 1 - The `candidate_delay` is valid.
 //                          0 - Otherwise.
 static int HistogramBasedValidation(const BinaryDelayEstimator* self,
                                     int candidate_delay) {
@@ -186,22 +186,22 @@
   const int delay_difference = candidate_delay - self->last_delay;
   int is_histogram_valid = 0;
 
-  // The histogram based validation of |candidate_delay| is done by comparing
-  // the |histogram| at bin |candidate_delay| with a |histogram_threshold|.
-  // This |histogram_threshold| equals a |fraction| of the |histogram| at bin
-  // |last_delay|.  The |fraction| is a piecewise linear function of the
-  // |delay_difference| between the |candidate_delay| and the |last_delay|
+  // The histogram based validation of `candidate_delay` is done by comparing
+  // the `histogram` at bin `candidate_delay` with a `histogram_threshold`.
+  // This `histogram_threshold` equals a `fraction` of the `histogram` at bin
+  // `last_delay`.  The `fraction` is a piecewise linear function of the
+  // `delay_difference` between the `candidate_delay` and the `last_delay`
   // allowing for a quicker move if
   //  i) a potential echo control filter can not handle these large differences.
-  // ii) keeping |last_delay| instead of updating to |candidate_delay| could
+  // ii) keeping `last_delay` instead of updating to `candidate_delay` could
   //     force an echo control into a non-causal state.
   // We further require the histogram to have reached a minimum value of
-  // |kMinHistogramThreshold|.  In addition, we also require the number of
-  // |candidate_hits| to be more than |kMinRequiredHits| to remove spurious
+  // `kMinHistogramThreshold`.  In addition, we also require the number of
+  // `candidate_hits` to be more than `kMinRequiredHits` to remove spurious
   // values.
 
-  // Calculate a comparison histogram value (|histogram_threshold|) that is
-  // depending on the distance between the |candidate_delay| and |last_delay|.
+  // Calculate a comparison histogram value (`histogram_threshold`) that is
+  // depending on the distance between the `candidate_delay` and `last_delay`.
   // TODO(bjornv): How much can we gain by turning the fraction calculation
   // into tables?
   if (delay_difference > self->allowed_offset) {
@@ -226,9 +226,9 @@
   return is_histogram_valid;
 }
 
-// Performs a robust validation of the |candidate_delay| estimated in
+// Performs a robust validation of the `candidate_delay` estimated in
 // WebRtc_ProcessBinarySpectrum().  The algorithm takes the
-// |is_instantaneous_valid| and the |is_histogram_valid| and combines them
+// `is_instantaneous_valid` and the `is_histogram_valid` and combines them
 // into a robust validation.  The HistogramBasedValidation() has to be called
 // prior to this call.
 // For further description on how the combination is done, see commented code.
@@ -250,18 +250,18 @@
   int is_robust = 0;
 
   // The final robust validation is based on the two algorithms; 1) the
-  // |is_instantaneous_valid| and 2) the histogram based with result stored in
-  // |is_histogram_valid|.
-  //   i) Before we actually have a valid estimate (|last_delay| == -2), we say
+  // `is_instantaneous_valid` and 2) the histogram based with result stored in
+  // `is_histogram_valid`.
+  //   i) Before we actually have a valid estimate (`last_delay` == -2), we say
   //      a candidate is valid if either algorithm states so
-  //      (|is_instantaneous_valid| OR |is_histogram_valid|).
+  //      (`is_instantaneous_valid` OR `is_histogram_valid`).
   is_robust =
       (self->last_delay < 0) && (is_instantaneous_valid || is_histogram_valid);
   //  ii) Otherwise, we need both algorithms to be certain
-  //      (|is_instantaneous_valid| AND |is_histogram_valid|)
+  //      (`is_instantaneous_valid` AND `is_histogram_valid`)
   is_robust |= is_instantaneous_valid && is_histogram_valid;
   // iii) With one exception, i.e., the histogram based algorithm can overrule
-  //      the instantaneous one if |is_histogram_valid| = 1 and the histogram
+  //      the instantaneous one if `is_histogram_valid` = 1 and the histogram
   //      is significantly strong.
   is_robust |= is_histogram_valid &&
                (self->histogram[candidate_delay] > self->last_delay_histogram);
@@ -373,13 +373,13 @@
 void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle,
                                  uint32_t binary_far_spectrum) {
   RTC_DCHECK(handle);
-  // Shift binary spectrum history and insert current |binary_far_spectrum|.
+  // Shift binary spectrum history and insert current `binary_far_spectrum`.
   memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]),
           (handle->history_size - 1) * sizeof(uint32_t));
   handle->binary_far_history[0] = binary_far_spectrum;
 
   // Shift history of far-end binary spectrum bit counts and insert bit count
-  // of current |binary_far_spectrum|.
+  // of current `binary_far_spectrum`.
   memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]),
           (handle->history_size - 1) * sizeof(int));
   handle->far_bit_counts[0] = BitCount(binary_far_spectrum);
@@ -402,7 +402,7 @@
   free(self->histogram);
   self->histogram = NULL;
 
-  // BinaryDelayEstimator does not have ownership of |farend|, hence we do not
+  // BinaryDelayEstimator does not have ownership of `farend`, hence we do not
   // free the memory here. That should be handled separately by the user.
   self->farend = NULL;
 
@@ -454,8 +454,8 @@
     // Only update far-end buffers if we need.
     history_size = WebRtc_AllocateFarendBufferMemory(far, history_size);
   }
-  // The extra array element in |mean_bit_counts| and |histogram| is a dummy
-  // element only used while |last_delay| == -2, i.e., before we have a valid
+  // The extra array element in `mean_bit_counts` and `histogram` is a dummy
+  // element only used while `last_delay` == -2, i.e., before we have a valid
   // estimate.
   self->mean_bit_counts = static_cast<int32_t*>(
       realloc(self->mean_bit_counts,
@@ -539,36 +539,36 @@
   }
   if (self->near_history_size > 1) {
     // If we apply lookahead, shift near-end binary spectrum history. Insert
-    // current |binary_near_spectrum| and pull out the delayed one.
+    // current `binary_near_spectrum` and pull out the delayed one.
     memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]),
             (self->near_history_size - 1) * sizeof(uint32_t));
     self->binary_near_history[0] = binary_near_spectrum;
     binary_near_spectrum = self->binary_near_history[self->lookahead];
   }
 
-  // Compare with delayed spectra and store the |bit_counts| for each delay.
+  // Compare with delayed spectra and store the `bit_counts` for each delay.
   BitCountComparison(binary_near_spectrum, self->farend->binary_far_history,
                      self->history_size, self->bit_counts);
 
-  // Update |mean_bit_counts|, which is the smoothed version of |bit_counts|.
+  // Update `mean_bit_counts`, which is the smoothed version of `bit_counts`.
   for (i = 0; i < self->history_size; i++) {
-    // |bit_counts| is constrained to [0, 32], meaning we can smooth with a
+    // `bit_counts` is constrained to [0, 32], meaning we can smooth with a
     // factor up to 2^26. We use Q9.
     int32_t bit_count = (self->bit_counts[i] << 9);  // Q9.
 
-    // Update |mean_bit_counts| only when far-end signal has something to
-    // contribute. If |far_bit_counts| is zero the far-end signal is weak and
+    // Update `mean_bit_counts` only when far-end signal has something to
+    // contribute. If `far_bit_counts` is zero the far-end signal is weak and
     // we likely have a poor echo condition, hence don't update.
     if (self->farend->far_bit_counts[i] > 0) {
-      // Make number of right shifts piecewise linear w.r.t. |far_bit_counts|.
+      // Make number of right shifts piecewise linear w.r.t. `far_bit_counts`.
       int shifts = kShiftsAtZero;
       shifts -= (kShiftsLinearSlope * self->farend->far_bit_counts[i]) >> 4;
       WebRtc_MeanEstimatorFix(bit_count, shifts, &(self->mean_bit_counts[i]));
     }
   }
 
-  // Find |candidate_delay|, |value_best_candidate| and |value_worst_candidate|
-  // of |mean_bit_counts|.
+  // Find `candidate_delay`, `value_best_candidate` and `value_worst_candidate`
+  // of `mean_bit_counts`.
   for (i = 0; i < self->history_size; i++) {
     if (self->mean_bit_counts[i] < value_best_candidate) {
       value_best_candidate = self->mean_bit_counts[i];
@@ -580,25 +580,25 @@
   }
   valley_depth = value_worst_candidate - value_best_candidate;
 
-  // The |value_best_candidate| is a good indicator on the probability of
-  // |candidate_delay| being an accurate delay (a small |value_best_candidate|
+  // The `value_best_candidate` is a good indicator on the probability of
+  // `candidate_delay` being an accurate delay (a small `value_best_candidate`
   // means a good binary match). In the following sections we make a decision
-  // whether to update |last_delay| or not.
+  // whether to update `last_delay` or not.
   // 1) If the difference bit counts between the best and the worst delay
   //    candidates is too small we consider the situation to be unreliable and
-  //    don't update |last_delay|.
-  // 2) If the situation is reliable we update |last_delay| if the value of the
+  //    don't update `last_delay`.
+  // 2) If the situation is reliable we update `last_delay` if the value of the
   //    best candidate delay has a value less than
-  //     i) an adaptive threshold |minimum_probability|, or
-  //    ii) this corresponding value |last_delay_probability|, but updated at
+  //     i) an adaptive threshold `minimum_probability`, or
+  //    ii) this corresponding value `last_delay_probability`, but updated at
   //        this time instant.
 
-  // Update |minimum_probability|.
+  // Update `minimum_probability`.
   if ((self->minimum_probability > kProbabilityLowerLimit) &&
       (valley_depth > kProbabilityMinSpread)) {
     // The "hard" threshold can't be lower than 17 (in Q9).
     // The valley in the curve also has to be distinct, i.e., the
-    // difference between |value_worst_candidate| and |value_best_candidate| has
+    // difference between `value_worst_candidate` and `value_best_candidate` has
     // to be large enough.
     int32_t threshold = value_best_candidate + kProbabilityOffset;
     if (threshold < kProbabilityLowerLimit) {
@@ -608,17 +608,17 @@
       self->minimum_probability = threshold;
     }
   }
-  // Update |last_delay_probability|.
+  // Update `last_delay_probability`.
   // We use a Markov type model, i.e., a slowly increasing level over time.
   self->last_delay_probability++;
-  // Validate |candidate_delay|.  We have a reliable instantaneous delay
+  // Validate `candidate_delay`.  We have a reliable instantaneous delay
   // estimate if
-  //  1) The valley is distinct enough (|valley_depth| > |kProbabilityOffset|)
+  //  1) The valley is distinct enough (`valley_depth` > `kProbabilityOffset`)
   // and
   //  2) The depth of the valley is deep enough
-  //      (|value_best_candidate| < |minimum_probability|)
+  //      (`value_best_candidate` < `minimum_probability`)
   //     and deeper than the best estimate so far
-  //      (|value_best_candidate| < |last_delay_probability|)
+  //      (`value_best_candidate` < `last_delay_probability`)
   valid_candidate = ((valley_depth > kProbabilityOffset) &&
                      ((value_best_candidate < self->minimum_probability) ||
                       (value_best_candidate < self->last_delay_probability)));
@@ -650,7 +650,7 @@
           (self->histogram[candidate_delay] > kLastHistogramMax
                ? kLastHistogramMax
                : self->histogram[candidate_delay]);
-      // Adjust the histogram if we made a change to |last_delay|, though it was
+      // Adjust the histogram if we made a change to `last_delay`, though it was
       // not the most likely one according to the histogram.
       if (self->histogram[candidate_delay] <
           self->histogram[self->compare_delay]) {
@@ -680,7 +680,7 @@
     // Simply a linear function of the histogram height at delay estimate.
     quality = self->histogram[self->compare_delay] / kHistogramMax;
   } else {
-    // Note that |last_delay_probability| states how deep the minimum of the
+    // Note that `last_delay_probability` states how deep the minimum of the
     // cost function is, so it is rather an error probability.
     quality = (float)(kMaxBitCountsQ9 - self->last_delay_probability) /
               kMaxBitCountsQ9;
diff --git a/modules/audio_processing/utility/delay_estimator.h b/modules/audio_processing/utility/delay_estimator.h
index df281bc..b6fc36a 100644
--- a/modules/audio_processing/utility/delay_estimator.h
+++ b/modules/audio_processing/utility/delay_estimator.h
@@ -81,7 +81,7 @@
 //
 // Return value:
 //      - BinaryDelayEstimatorFarend*
-//                        : Created |handle|. If the memory can't be allocated
+//                        : Created `handle`. If the memory can't be allocated
 //                          or if any of the input parameters are invalid NULL
 //                          is returned.
 //
@@ -159,7 +159,7 @@
     BinaryDelayEstimatorFarend* farend,
     int max_lookahead);
 
-// Re-allocates |history_size| dependent buffers. The far-end buffers will be
+// Re-allocates `history_size` dependent buffers. The far-end buffers will be
 // updated at the same time if needed.
 //
 // Input:
@@ -237,7 +237,7 @@
 //                                      delay value.
 float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self);
 
-// Updates the |mean_value| recursively with a step size of 2^-|factor|. This
+// Updates the `mean_value` recursively with a step size of 2^-`factor`. This
 // function is used internally in the Binary Delay Estimator as well as the
 // Fixed point wrapper.
 //
diff --git a/modules/audio_processing/utility/delay_estimator_internal.h b/modules/audio_processing/utility/delay_estimator_internal.h
index fce95d8..891e200 100644
--- a/modules/audio_processing/utility/delay_estimator_internal.h
+++ b/modules/audio_processing/utility/delay_estimator_internal.h
@@ -25,7 +25,7 @@
 typedef struct {
   // Pointers to mean values of spectrum.
   SpectrumType* mean_far_spectrum;
-  // |mean_far_spectrum| initialization indicator.
+  // `mean_far_spectrum` initialization indicator.
   int far_spectrum_initialized;
 
   int spectrum_size;
@@ -37,7 +37,7 @@
 typedef struct {
   // Pointers to mean values of spectrum.
   SpectrumType* mean_near_spectrum;
-  // |mean_near_spectrum| initialization indicator.
+  // `mean_near_spectrum` initialization indicator.
   int near_spectrum_initialized;
 
   int spectrum_size;
diff --git a/modules/audio_processing/utility/delay_estimator_unittest.cc b/modules/audio_processing/utility/delay_estimator_unittest.cc
index 65d8e14..651d836 100644
--- a/modules/audio_processing/utility/delay_estimator_unittest.cc
+++ b/modules/audio_processing/utility/delay_estimator_unittest.cc
@@ -80,7 +80,7 @@
   memset(far_u16_, 1, sizeof(far_u16_));
   memset(near_u16_, 2, sizeof(near_u16_));
   // Construct a sequence of binary spectra used to verify delay estimate. The
-  // |kSequenceLength| has to be long enough for the delay estimation to leave
+  // `kSequenceLength` has to be long enough for the delay estimation to leave
   // the initialized state.
   binary_spectrum_[0] = 1;
   for (int i = 1; i < (kSequenceLength + kHistorySize); i++) {
@@ -132,7 +132,7 @@
   // Initialize Binary Delay Estimator
   WebRtc_InitBinaryDelayEstimator(binary_);
   // Verify initialization. This does not guarantee a complete check, since
-  // |last_delay| may be equal to -2 before initialization if done on the fly.
+  // `last_delay` may be equal to -2 before initialization if done on the fly.
   EXPECT_EQ(-2, binary_->last_delay);
 }
 
@@ -144,7 +144,7 @@
 
   if (delay != -2) {
     // Verify correct delay estimate. In the non-causal case the true delay
-    // is equivalent with the |offset|.
+    // is equivalent with the `offset`.
     EXPECT_EQ(offset, delay);
   }
 }
@@ -160,7 +160,7 @@
   WebRtc_InitBinaryDelayEstimator(binary1);
   WebRtc_InitBinaryDelayEstimator(binary2);
   // Verify initialization. This does not guarantee a complete check, since
-  // |last_delay| may be equal to -2 before initialization if done on the fly.
+  // `last_delay` may be equal to -2 before initialization if done on the fly.
   EXPECT_EQ(-2, binary1->last_delay);
   EXPECT_EQ(-2, binary2->last_delay);
   for (int i = kLookahead; i < (kSequenceLength + kLookahead); i++) {
@@ -174,12 +174,12 @@
     VerifyDelay(binary2,
                 far_offset + kLookahead + lookahead_offset + near_offset,
                 delay_2);
-    // Expect the two delay estimates to be offset by |lookahead_offset| +
-    // |near_offset| when we have left the initial state.
+    // Expect the two delay estimates to be offset by `lookahead_offset` +
+    // `near_offset` when we have left the initial state.
     if ((delay_1 != -2) && (delay_2 != -2)) {
       EXPECT_EQ(delay_1, delay_2 - lookahead_offset - near_offset);
     }
-    // For the case of identical signals |delay_1| and |delay_2| should match
+    // For the case of identical signals `delay_1` and `delay_2` should match
     // all the time, unless one of them has robust validation turned on.  In
     // that case the robust validation leaves the initial state faster.
     if ((near_offset == 0) && (lookahead_offset == 0)) {
@@ -208,8 +208,8 @@
   BinaryDelayEstimator* binary2 = WebRtc_CreateBinaryDelayEstimator(
       binary_farend_, kLookahead + lookahead_offset);
   // Verify the delay for both causal and non-causal systems. For causal systems
-  // the delay is equivalent with a positive |offset| of the far-end sequence.
-  // For non-causal systems the delay is equivalent with a negative |offset| of
+  // the delay is equivalent with a positive `offset` of the far-end sequence.
+  // For non-causal systems the delay is equivalent with a negative `offset` of
   // the far-end sequence.
   binary_->robust_validation_enabled = ref_robust_validation;
   binary2->robust_validation_enabled = robust_validation;
@@ -242,23 +242,23 @@
   EXPECT_TRUE(handle == NULL);
 
   // WebRtc_InitDelayEstimatorFarend() and WebRtc_InitDelayEstimator() should
-  // return -1 if we have a NULL pointer as |handle|.
+  // return -1 if we have a NULL pointer as `handle`.
   EXPECT_EQ(-1, WebRtc_InitDelayEstimatorFarend(NULL));
   EXPECT_EQ(-1, WebRtc_InitDelayEstimator(NULL));
 
   // WebRtc_AddFarSpectrumFloat() should return -1 if we have:
-  // 1) NULL pointer as |handle|.
+  // 1) NULL pointer as `handle`.
   // 2) NULL pointer as far-end spectrum.
   // 3) Incorrect spectrum size.
   EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(NULL, far_f_, spectrum_size_));
-  // Use |farend_handle_| which is properly created at SetUp().
+  // Use `farend_handle_` which is properly created at SetUp().
   EXPECT_EQ(-1,
             WebRtc_AddFarSpectrumFloat(farend_handle_, NULL, spectrum_size_));
   EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_,
                                            spectrum_size_ + 1));
 
   // WebRtc_AddFarSpectrumFix() should return -1 if we have:
-  // 1) NULL pointer as |handle|.
+  // 1) NULL pointer as `handle`.
   // 2) NULL pointer as far-end spectrum.
   // 3) Incorrect spectrum size.
   // 4) Too high precision in far-end spectrum (Q-domain > 15).
@@ -271,8 +271,8 @@
                                          spectrum_size_, 16));
 
   // WebRtc_set_history_size() should return -1 if:
-  // 1) |handle| is a NULL.
-  // 2) |history_size| <= 1.
+  // 1) `handle` is a NULL.
+  // 2) `history_size` <= 1.
   EXPECT_EQ(-1, WebRtc_set_history_size(NULL, 1));
   EXPECT_EQ(-1, WebRtc_set_history_size(handle_, 1));
   // WebRtc_history_size() should return -1 if:
@@ -293,43 +293,43 @@
   EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, -1));
 
   // WebRtc_set_allowed_offset() should return -1 if we have:
-  // 1) NULL pointer as |handle|.
-  // 2) |allowed_offset| < 0.
+  // 1) NULL pointer as `handle`.
+  // 2) `allowed_offset` < 0.
   EXPECT_EQ(-1, WebRtc_set_allowed_offset(NULL, 0));
   EXPECT_EQ(-1, WebRtc_set_allowed_offset(handle_, -1));
 
   EXPECT_EQ(-1, WebRtc_get_allowed_offset(NULL));
 
   // WebRtc_enable_robust_validation() should return -1 if we have:
-  // 1) NULL pointer as |handle|.
-  // 2) Incorrect |enable| value (not 0 or 1).
+  // 1) NULL pointer as `handle`.
+  // 2) Incorrect `enable` value (not 0 or 1).
   EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, kEnable[0]));
   EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, -1));
   EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, 2));
 
   // WebRtc_is_robust_validation_enabled() should return -1 if we have NULL
-  // pointer as |handle|.
+  // pointer as `handle`.
   EXPECT_EQ(-1, WebRtc_is_robust_validation_enabled(NULL));
 
   // WebRtc_DelayEstimatorProcessFloat() should return -1 if we have:
-  // 1) NULL pointer as |handle|.
+  // 1) NULL pointer as `handle`.
   // 2) NULL pointer as near-end spectrum.
   // 3) Incorrect spectrum size.
   // 4) Non matching history sizes if multiple delay estimators using the same
   //    far-end reference.
   EXPECT_EQ(-1,
             WebRtc_DelayEstimatorProcessFloat(NULL, near_f_, spectrum_size_));
-  // Use |handle_| which is properly created at SetUp().
+  // Use `handle_` which is properly created at SetUp().
   EXPECT_EQ(-1,
             WebRtc_DelayEstimatorProcessFloat(handle_, NULL, spectrum_size_));
   EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_,
                                                   spectrum_size_ + 1));
-  // |tmp_handle| is already in a non-matching state.
+  // `tmp_handle` is already in a non-matching state.
   EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(tmp_handle, near_f_,
                                                   spectrum_size_));
 
   // WebRtc_DelayEstimatorProcessFix() should return -1 if we have:
-  // 1) NULL pointer as |handle|.
+  // 1) NULL pointer as `handle`.
   // 2) NULL pointer as near-end spectrum.
   // 3) Incorrect spectrum size.
   // 4) Too high precision in near-end spectrum (Q-domain > 15).
@@ -343,12 +343,12 @@
                                                 spectrum_size_ + 1, 0));
   EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
                                                 spectrum_size_, 16));
-  // |tmp_handle| is already in a non-matching state.
+  // `tmp_handle` is already in a non-matching state.
   EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(tmp_handle, near_u16_,
                                                 spectrum_size_, 0));
   WebRtc_FreeDelayEstimator(tmp_handle);
 
-  // WebRtc_last_delay() should return -1 if we have a NULL pointer as |handle|.
+  // WebRtc_last_delay() should return -1 if we have a NULL pointer as `handle`.
   EXPECT_EQ(-1, WebRtc_last_delay(NULL));
 
   // Free any local memory if needed.
@@ -422,7 +422,7 @@
 TEST_F(DelayEstimatorTest, CorrectLastDelay) {
   // In this test we verify that we get the correct last delay upon valid call.
   // We simply process the same data until we leave the initialized state
-  // (|last_delay| = -2). Then we compare the Process() output with the
+  // (`last_delay` = -2). Then we compare the Process() output with the
   // last_delay() call.
 
   // TODO(bjornv): Update quality values for robust validation.
@@ -488,8 +488,8 @@
 
   BinaryDelayEstimator* binary_handle = binary_;
   // WebRtc_CreateBinaryDelayEstimator() should return -1 if we have a NULL
-  // pointer as |binary_farend| or invalid input values. Upon failure, the
-  // |binary_handle| should be NULL.
+  // pointer as `binary_farend` or invalid input values. Upon failure, the
+  // `binary_handle` should be NULL.
   // Make sure we have a non-NULL value at start, so we can detect NULL after
   // create failure.
   binary_handle = WebRtc_CreateBinaryDelayEstimator(NULL, kLookahead);
@@ -506,12 +506,12 @@
   int32_t mean_value_before = mean_value;
   int32_t new_mean_value = mean_value * 2;
 
-  // Increasing |mean_value|.
+  // Increasing `mean_value`.
   WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value);
   EXPECT_LT(mean_value_before, mean_value);
   EXPECT_GT(new_mean_value, mean_value);
 
-  // Decreasing |mean_value|.
+  // Decreasing `mean_value`.
   new_mean_value = mean_value / 2;
   mean_value_before = mean_value;
   WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value);
@@ -569,7 +569,7 @@
 
 TEST_F(DelayEstimatorTest, AllowedOffsetNoImpactWhenRobustValidationDisabled) {
   // The same setup as in ExactDelayEstimateMultipleNearSameSpectrum with the
-  // difference that |allowed_offset| is set for the reference binary delay
+  // difference that `allowed_offset` is set for the reference binary delay
   // estimator.
 
   binary_->allowed_offset = 10;
diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.cc b/modules/audio_processing/utility/delay_estimator_wrapper.cc
index 8eac2f6..521a8a0 100644
--- a/modules/audio_processing/utility/delay_estimator_wrapper.cc
+++ b/modules/audio_processing/utility/delay_estimator_wrapper.cc
@@ -19,8 +19,8 @@
 
 namespace webrtc {
 
-// Only bit |kBandFirst| through bit |kBandLast| are processed and
-// |kBandFirst| - |kBandLast| must be < 32.
+// Only bit `kBandFirst` through bit `kBandLast` are processed and
+// `kBandFirst` - `kBandLast` must be < 32.
 enum { kBandFirst = 12 };
 enum { kBandLast = 43 };
 
@@ -48,8 +48,8 @@
   *mean_value += (new_value - *mean_value) * scale;
 }
 
-// Computes the binary spectrum by comparing the input |spectrum| with a
-// |threshold_spectrum|. Float and fixed point versions.
+// Computes the binary spectrum by comparing the input `spectrum` with a
+// `threshold_spectrum`. Float and fixed point versions.
 //
 // Inputs:
 //      - spectrum            : Spectrum of which the binary spectrum should be
@@ -69,11 +69,11 @@
   RTC_DCHECK_LT(q_domain, 16);
 
   if (!(*threshold_initialized)) {
-    // Set the |threshold_spectrum| to half the input |spectrum| as starting
+    // Set the `threshold_spectrum` to half the input `spectrum` as starting
     // value. This speeds up the convergence.
     for (i = kBandFirst; i <= kBandLast; i++) {
       if (spectrum[i] > 0) {
-        // Convert input spectrum from Q(|q_domain|) to Q15.
+        // Convert input spectrum from Q(`q_domain`) to Q15.
         int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain);
         threshold_spectrum[i].int32_ = (spectrum_q15 >> 1);
         *threshold_initialized = 1;
@@ -81,11 +81,11 @@
     }
   }
   for (i = kBandFirst; i <= kBandLast; i++) {
-    // Convert input spectrum from Q(|q_domain|) to Q15.
+    // Convert input spectrum from Q(`q_domain`) to Q15.
     int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain);
-    // Update the |threshold_spectrum|.
+    // Update the `threshold_spectrum`.
     WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_));
-    // Convert |spectrum| at current frequency bin to a binary value.
+    // Convert `spectrum` at current frequency bin to a binary value.
     if (spectrum_q15 > threshold_spectrum[i].int32_) {
       out = SetBit(out, i - kBandFirst);
     }
@@ -102,7 +102,7 @@
   const float kScale = 1 / 64.0;
 
   if (!(*threshold_initialized)) {
-    // Set the |threshold_spectrum| to half the input |spectrum| as starting
+    // Set the `threshold_spectrum` to half the input `spectrum` as starting
     // value. This speeds up the convergence.
     for (i = kBandFirst; i <= kBandLast; i++) {
       if (spectrum[i] > 0.0f) {
@@ -113,9 +113,9 @@
   }
 
   for (i = kBandFirst; i <= kBandLast; i++) {
-    // Update the |threshold_spectrum|.
+    // Update the `threshold_spectrum`.
     MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_));
-    // Convert |spectrum| at current frequency bin to a binary value.
+    // Convert `spectrum` at current frequency bin to a binary value.
     if (spectrum[i] > threshold_spectrum[i].float_) {
       out = SetBit(out, i - kBandFirst);
     }
@@ -219,7 +219,7 @@
     return -1;
   }
   if (far_q > 15) {
-    // If |far_q| is larger than 15 we cannot guarantee no wrap around.
+    // If `far_q` is larger than 15 we cannot guarantee no wrap around.
     return -1;
   }
 
@@ -433,7 +433,7 @@
     return -1;
   }
   if (near_q > 15) {
-    // If |near_q| is larger than 15 we cannot guarantee no wrap around.
+    // If `near_q` is larger than 15 we cannot guarantee no wrap around.
     return -1;
   }
 
diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.h b/modules/audio_processing/utility/delay_estimator_wrapper.h
index dbcafaf..a90cbe3 100644
--- a/modules/audio_processing/utility/delay_estimator_wrapper.h
+++ b/modules/audio_processing/utility/delay_estimator_wrapper.h
@@ -35,7 +35,7 @@
 //                        determined together with WebRtc_set_lookahead().
 //
 // Return value:
-//  - void*             : Created |handle|. If the memory can't be allocated or
+//  - void*             : Created `handle`. If the memory can't be allocated or
 //                        if any of the input parameters are invalid NULL is
 //                        returned.
 void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size);
@@ -85,13 +85,13 @@
 //                        WebRtc_CreateDelayEstimatorFarend().
 //
 //                        Note that WebRtc_CreateDelayEstimator does not take
-//                        ownership of |farend_handle|, which has to be torn
+//                        ownership of `farend_handle`, which has to be torn
 //                        down properly after this instance.
 //
 //      - max_lookahead : Maximum amount of non-causal lookahead allowed. The
 //                        actual amount of lookahead used can be controlled by
-//                        WebRtc_set_lookahead(...). The default |lookahead| is
-//                        set to |max_lookahead| at create time. Use
+//                        WebRtc_set_lookahead(...). The default `lookahead` is
+//                        set to `max_lookahead` at create time. Use
 //                        WebRtc_set_lookahead(...) before start if a different
 //                        value is desired.
 //
@@ -106,12 +106,12 @@
 //                        estimated.
 //
 //                        Note that the effective range of delay estimates is
-//                        [-|lookahead|,... ,|history_size|-|lookahead|)
-//                        where |history_size| is set through
+//                        [-`lookahead`,... ,`history_size`-`lookahead`)
+//                        where `history_size` is set through
 //                        WebRtc_set_history_size().
 //
 // Return value:
-//      - void*         : Created |handle|. If the memory can't be allocated or
+//      - void*         : Created `handle`. If the memory can't be allocated or
 //                        if any of the input parameters are invalid NULL is
 //                        returned.
 void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead);
@@ -129,12 +129,12 @@
 //      - actual_shifts : The actual number of shifts performed.
 int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift);
 
-// Sets the effective |history_size| used. Valid values from 2. We simply need
-// at least two delays to compare to perform an estimate. If |history_size| is
+// Sets the effective `history_size` used. Valid values from 2. We simply need
+// at least two delays to compare to perform an estimate. If `history_size` is
 // changed, buffers are reallocated filling in with zeros if necessary.
-// Note that changing the |history_size| affects both buffers in far-end and
+// Note that changing the `history_size` affects both buffers in far-end and
 // near-end. Hence it is important to change all DelayEstimators that use the
-// same reference far-end, to the same |history_size| value.
+// same reference far-end, to the same `history_size` value.
 // Inputs:
 //  - handle            : Pointer to the delay estimation instance.
 //  - history_size      : Effective history size to be used.
@@ -148,8 +148,8 @@
 //      - handle        : Pointer to the delay estimation instance.
 int WebRtc_history_size(const void* handle);
 
-// Sets the amount of |lookahead| to use. Valid values are [0, max_lookahead]
-// where |max_lookahead| was set at create time through
+// Sets the amount of `lookahead` to use. Valid values are [0, max_lookahead]
+// where `max_lookahead` was set at create time through
 // WebRtc_CreateDelayEstimator(...).
 //
 // Input:
@@ -157,8 +157,8 @@
 //      - lookahead     : The amount of lookahead to be used.
 //
 // Return value:
-//      - new_lookahead : The actual amount of lookahead set, unless |handle| is
-//                        a NULL pointer or |lookahead| is invalid, for which an
+//      - new_lookahead : The actual amount of lookahead set, unless `handle` is
+//                        a NULL pointer or `lookahead` is invalid, for which an
 //                        error is returned.
 int WebRtc_set_lookahead(void* handle, int lookahead);
 
@@ -167,12 +167,12 @@
 //      - handle        : Pointer to the delay estimation instance.
 int WebRtc_lookahead(void* handle);
 
-// Sets the |allowed_offset| used in the robust validation scheme.  If the
+// Sets the `allowed_offset` used in the robust validation scheme.  If the
 // delay estimator is used in an echo control component, this parameter is
-// related to the filter length.  In principle |allowed_offset| should be set to
+// related to the filter length.  In principle `allowed_offset` should be set to
 // the echo control filter length minus the expected echo duration, i.e., the
 // delay offset the echo control can handle without quality regression.  The
-// default value, used if not set manually, is zero.  Note that |allowed_offset|
+// default value, used if not set manually, is zero.  Note that `allowed_offset`
 // has to be non-negative.
 // Inputs:
 //  - handle            : Pointer to the delay estimation instance.
@@ -180,7 +180,7 @@
 //                        the echo control filter can handle.
 int WebRtc_set_allowed_offset(void* handle, int allowed_offset);
 
-// Returns the |allowed_offset| in number of partitions.
+// Returns the `allowed_offset` in number of partitions.
 int WebRtc_get_allowed_offset(const void* handle);
 
 // Enables/Disables a robust validation functionality in the delay estimation.
diff --git a/modules/audio_processing/utility/pffft_wrapper.h b/modules/audio_processing/utility/pffft_wrapper.h
index 160f0da..983c2fd 100644
--- a/modules/audio_processing/utility/pffft_wrapper.h
+++ b/modules/audio_processing/utility/pffft_wrapper.h
@@ -51,7 +51,7 @@
   // TODO(https://crbug.com/webrtc/9577): Consider adding a factory and making
   // the ctor private.
   // static std::unique_ptr<Pffft> Create(size_t fft_size,
-  // FftType fft_type); Ctor. |fft_size| must be a supported size (see
+  // FftType fft_type); Ctor. `fft_size` must be a supported size (see
   // Pffft::IsValidFftSize()). If not supported, the code will crash.
   Pffft(size_t fft_size, FftType fft_type);
   Pffft(const Pffft&) = delete;
@@ -73,9 +73,9 @@
   // Computes the backward fast Fourier transform.
   void BackwardTransform(const FloatBuffer& in, FloatBuffer* out, bool ordered);
 
-  // Multiplies the frequency components of |fft_x| and |fft_y| and accumulates
-  // them into |out|. The arrays must have been obtained with
-  // ForwardTransform(..., /*ordered=*/false) - i.e., |fft_x| and |fft_y| must
+  // Multiplies the frequency components of `fft_x` and `fft_y` and accumulates
+  // them into `out`. The arrays must have been obtained with
+  // ForwardTransform(..., /*ordered=*/false) - i.e., `fft_x` and `fft_y` must
   // not be ordered.
   void FrequencyDomainConvolve(const FloatBuffer& fft_x,
                                const FloatBuffer& fft_y,
diff --git a/modules/audio_processing/vad/gmm.h b/modules/audio_processing/vad/gmm.h
index 93eb675..d9d68ec 100644
--- a/modules/audio_processing/vad/gmm.h
+++ b/modules/audio_processing/vad/gmm.h
@@ -20,13 +20,13 @@
 // Where a 'mixture' is a Gaussian density.
 
 struct GmmParameters {
-  // weight[n] = log(w[n]) - |dimension|/2 * log(2*pi) - 1/2 * log(det(cov[n]));
+  // weight[n] = log(w[n]) - `dimension`/2 * log(2*pi) - 1/2 * log(det(cov[n]));
   // where cov[n] is the covariance matrix of mixture n;
   const double* weight;
-  // pointer to the first element of a |num_mixtures|x|dimension| matrix
+  // pointer to the first element of a `num_mixtures`x`dimension` matrix
   // where kth row is the mean of the kth mixture.
   const double* mean;
-  // pointer to the first element of a |num_mixtures|x|dimension|x|dimension|
+  // pointer to the first element of a `num_mixtures`x`dimension`x`dimension`
   // 3D-matrix, where the kth 2D-matrix is the inverse of the covariance
   // matrix of the kth mixture.
   const double* covar_inverse;
@@ -36,8 +36,8 @@
   int num_mixtures;
 };
 
-// Evaluate the given GMM, according to |gmm_parameters|, at the given point
-// |x|. If the dimensionality of the given GMM is larger that the maximum
+// Evaluate the given GMM, according to `gmm_parameters`, at the given point
+// `x`. If the dimensionality of the given GMM is larger that the maximum
 // acceptable dimension by the following function -1 is returned.
 double EvaluateGmm(const double* x, const GmmParameters& gmm_parameters);
 
diff --git a/modules/audio_processing/vad/pitch_based_vad.h b/modules/audio_processing/vad/pitch_based_vad.h
index e005e23..fa3abc2 100644
--- a/modules/audio_processing/vad/pitch_based_vad.h
+++ b/modules/audio_processing/vad/pitch_based_vad.h
@@ -34,7 +34,7 @@
   //   p_combined: an array which contains the combined activity probabilities
   //               computed prior to the call of this function. The method,
   //               then, computes the voicing probabilities and combine them
-  //               with the given values. The result are returned in |p|.
+  //               with the given values. The result are returned in `p`.
   int VoicingProbability(const AudioFeatures& features, double* p_combined);
 
  private:
diff --git a/modules/audio_processing/vad/pitch_internal.h b/modules/audio_processing/vad/pitch_internal.h
index 938745d..e382c1f 100644
--- a/modules/audio_processing/vad/pitch_internal.h
+++ b/modules/audio_processing/vad/pitch_internal.h
@@ -14,7 +14,7 @@
 namespace webrtc {
 
 // TODO(turajs): Write a description of this function. Also be consistent with
-// usage of |sampling_rate_hz| vs |kSamplingFreqHz|.
+// usage of `sampling_rate_hz` vs `kSamplingFreqHz`.
 void GetSubframesPitchParameters(int sampling_rate_hz,
                                  double* gains,
                                  double* lags,
diff --git a/modules/audio_processing/vad/standalone_vad.h b/modules/audio_processing/vad/standalone_vad.h
index 3dff416..b084633 100644
--- a/modules/audio_processing/vad/standalone_vad.h
+++ b/modules/audio_processing/vad/standalone_vad.h
@@ -26,12 +26,12 @@
 
   // Outputs
   //   p: a buffer where probabilities are written to.
-  //   length_p: number of elements of |p|.
+  //   length_p: number of elements of `p`.
   //
   // return value:
   //    -1: if no audio is stored or VAD returns error.
   //     0: in success.
-  // In case of error the content of |activity| is unchanged.
+  // In case of error the content of `activity` is unchanged.
   //
   // Note that due to a high false-positive (VAD decision is active while the
   // processed audio is just background noise) rate, stand-alone VAD is used as
diff --git a/modules/audio_processing/vad/standalone_vad_unittest.cc b/modules/audio_processing/vad/standalone_vad_unittest.cc
index 22b1f49..0fa2ed7 100644
--- a/modules/audio_processing/vad/standalone_vad_unittest.cc
+++ b/modules/audio_processing/vad/standalone_vad_unittest.cc
@@ -31,7 +31,7 @@
   for (size_t n = 0; n < kMaxNumFrames; n++)
     EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
 
-  // Pretend |p| is shorter that it should be.
+  // Pretend `p` is shorter that it should be.
   EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames - 1));
 
   EXPECT_EQ(0, vad->GetActivity(p, kMaxNumFrames));
diff --git a/modules/audio_processing/vad/vad_audio_proc.cc b/modules/audio_processing/vad/vad_audio_proc.cc
index 97cf651..aaf8214 100644
--- a/modules/audio_processing/vad/vad_audio_proc.cc
+++ b/modules/audio_processing/vad/vad_audio_proc.cc
@@ -132,7 +132,7 @@
                       kNumSubframeSamples + kNumPastSignalSamples, kLpcOrder);
 }
 
-// Compute |kNum10msSubframes| sets of LPC coefficients, one per 10 ms input.
+// Compute `kNum10msSubframes` sets of LPC coefficients, one per 10 ms input.
 // The analysis window is 15 ms long and it is centered on the first half of
 // each 10ms sub-frame. This is equivalent to computing LPC coefficients for the
 // first half of each 10 ms subframe.
@@ -169,7 +169,7 @@
   return fractional_index;
 }
 
-// 1 / A(z), where A(z) is defined by |lpc| is a model of the spectral envelope
+// 1 / A(z), where A(z) is defined by `lpc` is a model of the spectral envelope
 // of the input signal. The local maximum of the spectral envelope corresponds
 // with the local minimum of A(z). It saves complexity, as we save one
 // inversion. Furthermore, we find the first local maximum of magnitude squared,
diff --git a/modules/audio_processing/vad/vad_circular_buffer.h b/modules/audio_processing/vad/vad_circular_buffer.h
index 46b03d4..c1806f9 100644
--- a/modules/audio_processing/vad/vad_circular_buffer.h
+++ b/modules/audio_processing/vad/vad_circular_buffer.h
@@ -38,8 +38,8 @@
   // The mean value of the elements in the buffer. The return value is zero if
   // buffer is empty, i.e. no value is inserted.
   double Mean();
-  // Remove transients. If the values exceed |val_threshold| for a period
-  // shorter then or equal to |width_threshold|, then that period is considered
+  // Remove transients. If the values exceed `val_threshold` for a period
+  // shorter then or equal to `width_threshold`, then that period is considered
   // transient and set to zero.
   int RemoveTransient(int width_threshold, double val_threshold);
 
@@ -49,7 +49,7 @@
   // insertion. |index = 1| is the one before the most recent insertion, and
   // so on.
   int Get(int index, double* value) const;
-  // Set a given position to |value|. |index| is interpreted as above.
+  // Set a given position to `value`. `index` is interpreted as above.
   int Set(int index, double value);
   // Return the number of valid elements in the buffer.
   int BufferLevel();
diff --git a/modules/audio_processing/vad/voice_activity_detector.cc b/modules/audio_processing/vad/voice_activity_detector.cc
index f0d34c6..ce4d46b 100644
--- a/modules/audio_processing/vad/voice_activity_detector.cc
+++ b/modules/audio_processing/vad/voice_activity_detector.cc
@@ -32,7 +32,7 @@
 VoiceActivityDetector::~VoiceActivityDetector() = default;
 
 // Because ISAC has a different chunk length, it updates
-// |chunkwise_voice_probabilities_| and |chunkwise_rms_| when there is new data.
+// `chunkwise_voice_probabilities_` and `chunkwise_rms_` when there is new data.
 // Otherwise it clears them.
 void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
                                          size_t length,
@@ -49,7 +49,7 @@
   }
   RTC_DCHECK_EQ(length, kLength10Ms);
 
-  // Each chunk needs to be passed into |standalone_vad_|, because internally it
+  // Each chunk needs to be passed into `standalone_vad_`, because internally it
   // buffers the audio and processes it all at once when GetActivity() is
   // called.
   RTC_CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
diff --git a/modules/audio_processing/vad/voice_activity_detector_unittest.cc b/modules/audio_processing/vad/voice_activity_detector_unittest.cc
index 3214bd9..80f21c8 100644
--- a/modules/audio_processing/vad/voice_activity_detector_unittest.cc
+++ b/modules/audio_processing/vad/voice_activity_detector_unittest.cc
@@ -133,7 +133,7 @@
     vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
 
     // Before the |vad has enough data to process an ISAC block it will return
-    // the default value, 1.f, which would ruin the |max_probability| value.
+    // the default value, 1.f, which would ruin the `max_probability` value.
     if (i > kNumChunksPerIsacBlock) {
       max_probability = std::max(max_probability, vad.last_voice_probability());
     }
@@ -156,7 +156,7 @@
     vad.ProcessChunk(&data[0], data.size(), 2 * kSampleRateHz);
 
     // Before the |vad has enough data to process an ISAC block it will return
-    // the default value, 1.f, which would ruin the |max_probability| value.
+    // the default value, 1.f, which would ruin the `max_probability` value.
     if (i > kNumChunksPerIsacBlock) {
       max_probability = std::max(max_probability, vad.last_voice_probability());
     }