Use backticks not vertical bars to denote variables in comments for /api
Bug: webrtc:12338
Change-Id: Ib97b2c3d64dbd895f261ffa76a2e885bd934a87f
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/226940
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34554}
diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc
index c6e5cf4..0c39d51 100644
--- a/api/audio/audio_frame.cc
+++ b/api/audio/audio_frame.cc
@@ -52,7 +52,7 @@
}
void AudioFrame::ResetWithoutMuting() {
- // TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
+ // TODO(wu): Zero is a valid value for `timestamp_`. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
elapsed_time_ms_ = -1;
diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h
index 78539f5..726b9a9 100644
--- a/api/audio/audio_frame.h
+++ b/api/audio/audio_frame.h
@@ -139,7 +139,7 @@
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
- // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
+ // by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
//
@@ -149,7 +149,7 @@
// sync buffer is the small sample-holding buffer located after the audio
// decoder and before where samples are assembled into output frames.
//
- // |RtpPacketInfos| may also be empty if the audio samples did not come from
+ // `RtpPacketInfos` may also be empty if the audio samples did not come from
// RTP packets. E.g. if the audio were locally generated by packet loss
// concealment, comfort noise generation, etc.
RtpPacketInfos packet_infos_;
@@ -165,7 +165,7 @@
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
- // capture timestamp of a received frame is found in |packet_infos_|.
+ // capture timestamp of a received frame is found in `packet_infos_`.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
diff --git a/api/audio/audio_frame_processor.h b/api/audio/audio_frame_processor.h
index bc21d14..cb65c48 100644
--- a/api/audio/audio_frame_processor.h
+++ b/api/audio/audio_frame_processor.h
@@ -28,12 +28,12 @@
// Processes the frame received from WebRTC, is called by WebRTC off the
// realtime audio capturing path. AudioFrameProcessor must reply with
- // processed frames by calling |sink_callback| if it was provided in SetSink()
- // call. |sink_callback| can be called in the context of Process().
+ // processed frames by calling `sink_callback` if it was provided in SetSink()
+ // call. `sink_callback` can be called in the context of Process().
virtual void Process(std::unique_ptr<AudioFrame> frame) = 0;
// Atomically replaces the current sink with the new one. Before the
- // first call to this function, or if the provided |sink_callback| is nullptr,
+ // first call to this function, or if the provided `sink_callback` is nullptr,
// processed frames are simply discarded.
virtual void SetSink(OnAudioFrameCallback sink_callback) = 0;
};
diff --git a/api/audio/audio_mixer.h b/api/audio/audio_mixer.h
index b290cfa..3483df2 100644
--- a/api/audio/audio_mixer.h
+++ b/api/audio/audio_mixer.h
@@ -35,9 +35,9 @@
kError, // The audio_frame will not be used.
};
- // Overwrites |audio_frame|. The data_ field is overwritten with
+ // Overwrites `audio_frame`. The data_ field is overwritten with
// 10 ms of new audio (either 1 or 2 interleaved channels) at
- // |sample_rate_hz|. All fields in |audio_frame| must be updated.
+ // `sample_rate_hz`. All fields in `audio_frame` must be updated.
virtual AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
AudioFrame* audio_frame) = 0;
@@ -66,7 +66,7 @@
// should mix at a rate that doesn't cause quality loss of the
// sources' audio. The mixing rate is one of the rates listed in
// AudioProcessing::NativeRate. All fields in
- // |audio_frame_for_mixing| must be updated.
+ // `audio_frame_for_mixing` must be updated.
virtual void Mix(size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) = 0;