Use backticks not vertical bars to denote variables in comments for /modules/audio_coding

Bug: webrtc:12338
Change-Id: I02613d9fca45d00e2477f334b7a0416e7912e26b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227037
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34621}
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 7c62e98..e4d3b9e 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -564,9 +564,9 @@
 void AudioEncoderOpusImpl::SetReceiverFrameLengthRange(
     int min_frame_length_ms,
     int max_frame_length_ms) {
-  // Ensure that |SetReceiverFrameLengthRange| is called before
-  // |EnableAudioNetworkAdaptor|, otherwise we need to recreate
-  // |audio_network_adaptor_|, which is not a needed use case.
+  // Ensure that `SetReceiverFrameLengthRange` is called before
+  // `EnableAudioNetworkAdaptor`, otherwise we need to recreate
+  // `audio_network_adaptor_`, which is not a needed use case.
   RTC_DCHECK(!audio_network_adaptor_);
   FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
                             &config_.supported_frame_lengths_ms);
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
index f1953ea..daca6aa 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -228,8 +228,8 @@
 
 TEST_P(AudioEncoderOpusTest, SetReceiverFrameLengthRange) {
   auto states = CreateCodec(sample_rate_hz_, 2);
-  // Before calling to |SetReceiverFrameLengthRange|,
-  // |supported_frame_lengths_ms| should contain only the frame length being
+  // Before calling to `SetReceiverFrameLengthRange`,
+  // `supported_frame_lengths_ms` should contain only the frame length being
   // used.
   using ::testing::ElementsAre;
   EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
@@ -348,7 +348,7 @@
   // will fail.
   constexpr float kPacketLossFraction_1 = 0.02f;
   constexpr float kPacketLossFraction_2 = 0.198f;
-  // |kSecondSampleTimeMs| is chosen to ease the calculation since
+  // `kSecondSampleTimeMs` is chosen to ease the calculation since
   // 0.9999 ^ 6931 = 0.5.
   constexpr int64_t kSecondSampleTimeMs = 6931;
 
@@ -380,7 +380,7 @@
   states->encoder->OnReceivedUplinkBandwidth(kDefaultOpusRate * 2,
                                              absl::nullopt);
 
-  // Since |OnReceivedOverhead| has not been called, the codec bitrate should
+  // Since `OnReceivedOverhead` has not been called, the codec bitrate should
   // not change.
   EXPECT_EQ(kDefaultOpusRate, states->encoder->GetTargetBitrate());
 }
diff --git a/modules/audio_coding/codecs/opus/opus_fec_test.cc b/modules/audio_coding/codecs/opus/opus_fec_test.cc
index 1923647..0636935 100644
--- a/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -218,8 +218,8 @@
 
       time_now_ms += block_duration_ms_;
 
-      // |data_pointer_| is incremented and wrapped across
-      // |loop_length_samples_|.
+      // `data_pointer_` is incremented and wrapped across
+      // `loop_length_samples_`.
       data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
                       loop_length_samples_;
     }
diff --git a/modules/audio_coding/codecs/opus/opus_interface.cc b/modules/audio_coding/codecs/opus/opus_interface.cc
index f684452..0337919 100644
--- a/modules/audio_coding/codecs/opus/opus_interface.cc
+++ b/modules/audio_coding/codecs/opus/opus_interface.cc
@@ -574,8 +574,8 @@
 
 /* For decoder to determine if it is to output speech or comfort noise. */
 static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
-  // Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
-  // to be so if the following |encoded_byte| are 0 or 1.
+  // Audio type becomes comfort noise if `encoded_byte` is 1 and keeps
+  // to be so if the following `encoded_byte` are 0 or 1.
   if (encoded_bytes == 0 && inst->in_dtx_mode) {
     return 2;  // Comfort noise.
   } else if (encoded_bytes == 1 || encoded_bytes == 2) {
@@ -595,7 +595,7 @@
   }
 }
 
-/* |frame_size| is set to maximum Opus frame size in the normal case, and
+/* `frame_size` is set to maximum Opus frame size in the normal case, and
  * is set to the number of samples needed for PLC in case of losses.
  * It is up to the caller to make sure the value is correct. */
 static int DecodeNative(OpusDecInst* inst,
@@ -632,9 +632,9 @@
       FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz);
 
   if (inst->plc_use_prev_decoded_samples) {
-    /* The number of samples we ask for is |number_of_lost_frames| times
-     * |prev_decoded_samples_|. Limit the number of samples to maximum
-     * |MaxFrameSizePerChannel()|. */
+    /* The number of samples we ask for is `number_of_lost_frames` times
+     * `prev_decoded_samples_`. Limit the number of samples to maximum
+     * `MaxFrameSizePerChannel()`. */
     plc_samples = inst->prev_decoded_samples;
     const int max_samples_per_channel =
         MaxFrameSizePerChannel(inst->sample_rate_hz);
@@ -729,9 +729,9 @@
 
 int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
   if (inst->plc_use_prev_decoded_samples) {
-    /* The number of samples we ask for is |number_of_lost_frames| times
-     * |prev_decoded_samples_|. Limit the number of samples to maximum
-     * |MaxFrameSizePerChannel()|. */
+    /* The number of samples we ask for is `number_of_lost_frames` times
+     * `prev_decoded_samples_`. Limit the number of samples to maximum
+     * `MaxFrameSizePerChannel()`. */
     const int plc_samples = inst->prev_decoded_samples;
     const int max_samples_per_channel =
         MaxFrameSizePerChannel(inst->sample_rate_hz);
@@ -826,8 +826,8 @@
   // as binary values with uniform probability, they can be extracted directly
   // from the most significant bits of the first byte of compressed data.
   for (int n = 0; n < channels; n++) {
-    // The LBRR bit for channel 1 is on the (|silk_frames| + 1)-th bit, and
-    // that of channel 2 is on the |(|silk_frames| + 1) * 2 + 1|-th bit.
+    // The LBRR bit for channel 1 is on the (`silk_frames` + 1)-th bit, and
+    // that of channel 2 is on the |(`silk_frames` + 1) * 2 + 1|-th bit.
     if (frame_data[0][0] & (0x80 >> ((n + 1) * (silk_frames + 1) - 1)))
       return 1;
   }
diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc
index 80cab50..b507a32 100644
--- a/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -115,10 +115,10 @@
 
   void TestCbrEffect(bool dtx, int block_length_ms);
 
-  // Prepare |speech_data_| for encoding, read from a hard-coded file.
+  // Prepare `speech_data_` for encoding, read from a hard-coded file.
   // After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
-  // block of |block_length_ms| milliseconds. The data is looped every
-  // |loop_length_ms| milliseconds.
+  // block of `block_length_ms` milliseconds. The data is looped every
+  // `loop_length_ms` milliseconds.
   void PrepareSpeechData(int block_length_ms, int loop_length_ms);
 
   int EncodeDecode(WebRtcOpusEncInst* encoder,
@@ -310,24 +310,24 @@
   // one with an arbitrary size and the other of 1-byte, then stops sending for
   // a certain number of frames.
 
-  // |max_dtx_frames| is the maximum number of frames Opus can stay in DTX.
+  // `max_dtx_frames` is the maximum number of frames Opus can stay in DTX.
   // TODO(kwiberg): Why does this number depend on the encoding sample rate?
   const int max_dtx_frames =
       (encoder_sample_rate_hz_ == 16000 ? 800 : 400) / block_length_ms + 1;
 
-  // We run |kRunTimeMs| milliseconds of pure silence.
+  // We run `kRunTimeMs` milliseconds of pure silence.
   const int kRunTimeMs = 4500;
 
-  // We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in
+  // We check that, after a `kCheckTimeMs` milliseconds (given that the CNG in
   // Opus needs time to adapt), the absolute values of DTX decoded signal are
-  // bounded by |kOutputValueBound|.
+  // bounded by `kOutputValueBound`.
   const int kCheckTimeMs = 4000;
 
 #if defined(OPUS_FIXED_POINT)
   // Fixed-point Opus generates a random (comfort) noise, which has a less
   // predictable value bound than its floating-point Opus. This value depends on
   // input signal, and the time window for checking the output values (between
-  // |kCheckTimeMs| and |kRunTimeMs|).
+  // `kCheckTimeMs` and `kRunTimeMs`).
   const uint16_t kOutputValueBound = 30;
 
 #else
@@ -336,7 +336,7 @@
 
   int time = 0;
   while (time < kRunTimeMs) {
-    // DTX mode is maintained for maximum |max_dtx_frames| frames.
+    // DTX mode is maintained for maximum `max_dtx_frames` frames.
     int i = 0;
     for (; i < max_dtx_frames; ++i) {
       time += block_length_ms;
diff --git a/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h b/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
index a89dfd8..a280ca2 100644
--- a/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
+++ b/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
@@ -29,11 +29,11 @@
   AudioRingBuffer(size_t channels, size_t max_frames);
   ~AudioRingBuffer();
 
-  // Copies |data| to the buffer and advances the write pointer. |channels| must
+  // Copies `data` to the buffer and advances the write pointer. `channels` must
   // be the same as at creation time.
   void Write(const float* const* data, size_t channels, size_t frames);
 
-  // Copies from the buffer to |data| and advances the read pointer. |channels|
+  // Copies from the buffer to `data` and advances the read pointer. `channels`
   // must be the same as at creation time.
   void Read(float* const* data, size_t channels, size_t frames);
 
diff --git a/modules/audio_coding/codecs/opus/test/blocker.cc b/modules/audio_coding/codecs/opus/test/blocker.cc
index 7f102b5..33406ce 100644
--- a/modules/audio_coding/codecs/opus/test/blocker.cc
+++ b/modules/audio_coding/codecs/opus/test/blocker.cc
@@ -16,7 +16,7 @@
 
 namespace {
 
-// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
+// Adds `a` and `b` frame by frame into `result` (basically matrix addition).
 void AddFrames(const float* const* a,
                size_t a_start_index,
                const float* const* b,
@@ -33,7 +33,7 @@
   }
 }
 
-// Copies |src| into |dst| channel by channel.
+// Copies `src` into `dst` channel by channel.
 void CopyFrames(const float* const* src,
                 size_t src_start_index,
                 size_t num_frames,
@@ -46,7 +46,7 @@
   }
 }
 
-// Moves |src| into |dst| channel by channel.
+// Moves `src` into `dst` channel by channel.
 void MoveFrames(const float* const* src,
                 size_t src_start_index,
                 size_t num_frames,
@@ -69,8 +69,8 @@
   }
 }
 
-// Pointwise multiplies each channel of |frames| with |window|. Results are
-// stored in |frames|.
+// Pointwise multiplies each channel of `frames` with `window`. Results are
+// stored in `frames`.
 void ApplyWindow(const float* window,
                  size_t num_frames,
                  size_t num_channels,
@@ -134,7 +134,7 @@
 // On each call to ProcessChunk():
 // 1. New input gets read into sections _b_ and _c_ of the input buffer.
 // 2. We block starting from frame_offset.
-// 3. We block until we reach a block |bl| that doesn't contain any frames
+// 3. We block until we reach a block `bl` that doesn't contain any frames
 //    from sections _a_ or _b_ of the input buffer.
 // 4. We window the current block, fire the callback for processing, window
 //    again, and overlap/add to the output buffer.
@@ -142,7 +142,7 @@
 // 6. For both the input and the output buffers, we copy section _c_ into
 //    section _a_.
 // 7. We set the new frame_offset to be the difference between the first frame
-//    of |bl| and the border between sections _b_ and _c_.
+//    of `bl` and the border between sections _b_ and _c_.
 //
 // When block_size > chunk_size the input and output buffers look like this:
 //
@@ -153,13 +153,13 @@
 // On each call to ProcessChunk():
 // The procedure is the same as above, except for:
 // 1. New input gets read into section _c_ of the input buffer.
-// 3. We block until we reach a block |bl| that doesn't contain any frames
+// 3. We block until we reach a block `bl` that doesn't contain any frames
 //    from section _a_ of the input buffer.
 // 5. We copy section _a_ of the output buffer into output.
 // 6. For both the input and the output buffers, we copy sections _b_ and _c_
 //    into section _a_ and _b_.
 // 7. We set the new frame_offset to be the difference between the first frame
-//    of |bl| and the border between sections _a_ and _b_.
+//    of `bl` and the border between sections _a_ and _b_.
 //
 // * delay here refers to inintial_delay_
 //
diff --git a/modules/audio_coding/codecs/opus/test/blocker.h b/modules/audio_coding/codecs/opus/test/blocker.h
index 26177bc..59b7e29 100644
--- a/modules/audio_coding/codecs/opus/test/blocker.h
+++ b/modules/audio_coding/codecs/opus/test/blocker.h
@@ -39,7 +39,7 @@
 // of audio, which is not a power of 2. Blocker allows us to specify the
 // transform and all other necessary processing via the Process() callback
 // function without any constraints on the transform-size
-// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
+// (read: `block_size_`) or received-audio-size (read: `chunk_size_`).
 // We handle this for the multichannel audio case, allowing for different
 // numbers of input and output channels (for example, beamforming takes 2 or
 // more input channels and returns 1 output channel). Audio signals are
@@ -53,8 +53,8 @@
 //   sending back a processed chunk
 //
 // To use blocker:
-// 1. Impelment a BlockerCallback object |bc|.
-// 2. Instantiate a Blocker object |b|, passing in |bc|.
+// 1. Impelment a BlockerCallback object `bc`.
+// 2. Instantiate a Blocker object `b`, passing in `bc`.
 // 3. As you receive audio, call b.ProcessChunk() to get processed audio.
 //
 // A small amount of delay is added to the first received chunk to deal with
@@ -101,7 +101,7 @@
   // input and output buffers are responsible for saving those frames between
   // calls to ProcessChunk().
   //
-  // Both contain |initial delay| + |chunk_size| frames. The input is a fairly
+  // Both contain |initial delay| + `chunk_size` frames. The input is a fairly
   // standard FIFO, but due to the overlap-add it's harder to use an
   // AudioRingBuffer for the output.
   AudioRingBuffer input_buffer_;
@@ -116,7 +116,7 @@
   std::unique_ptr<float[]> window_;
 
   // The amount of frames between the start of contiguous blocks. For example,
-  // |shift_amount_| = |block_size_| / 2 for a Hann window.
+  // `shift_amount_` = `block_size_` / 2 for a Hann window.
   size_t shift_amount_;
 
   BlockerCallback* callback_;
diff --git a/modules/audio_coding/codecs/opus/test/lapped_transform.h b/modules/audio_coding/codecs/opus/test/lapped_transform.h
index 3620df3..bb25c34 100644
--- a/modules/audio_coding/codecs/opus/test/lapped_transform.h
+++ b/modules/audio_coding/codecs/opus/test/lapped_transform.h
@@ -84,11 +84,11 @@
                                    std::complex<float>* const* out_block) = 0;
   };
 
-  // Construct a transform instance. |chunk_length| is the number of samples in
-  // each channel. |window| defines the window, owned by the caller (a copy is
-  // made internally); |window| should have length equal to |block_length|.
-  // |block_length| defines the length of a block, in samples.
-  // |shift_amount| is in samples. |callback| is the caller-owned audio
+  // Construct a transform instance. `chunk_length` is the number of samples in
+  // each channel. `window` defines the window, owned by the caller (a copy is
+  // made internally); `window` should have length equal to `block_length`.
+  // `block_length` defines the length of a block, in samples.
+  // `shift_amount` is in samples. `callback` is the caller-owned audio
   // processing function called for each block of the input chunk.
   LappedTransform(size_t num_in_channels,
                   size_t num_out_channels,
@@ -99,10 +99,10 @@
                   Callback* callback);
   ~LappedTransform();
 
-  // Main audio processing helper method. Internally slices |in_chunk| into
+  // Main audio processing helper method. Internally slices `in_chunk` into
   // blocks, transforms them to frequency domain, calls the callback for each
   // block and returns a de-blocked time domain chunk of audio through
-  // |out_chunk|. Both buffers are caller-owned.
+  // `out_chunk`. Both buffers are caller-owned.
   void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
 
   // Get the chunk length.
@@ -132,8 +132,8 @@
 
   // Returns the initial delay.
   //
-  // This is the delay introduced by the |blocker_| to be able to get and return
-  // chunks of |chunk_length|, but process blocks of |block_length|.
+  // This is the delay introduced by the `blocker_` to be able to get and return
+  // chunks of `chunk_length`, but process blocks of `block_length`.
   size_t initial_delay() const { return blocker_.initial_delay(); }
 
  private: