Add plumbing of RtpPacketInfos to each AudioFrame as input for SourceTracker.
This change adds the plumbing of RtpPacketInfo from ChannelReceive::OnRtpPacket() to ChannelReceive::GetAudioFrameWithInfo() for audio. It is a step towards replacing the non-spec compliant ContributingSources that updates itself at packet-receive time, with the spec-compliant SourceTracker that will update itself at frame-delivery-to-track time.
Bug: webrtc:10668
Change-Id: I03385d6865bbc7bfbef7634f88de820a934f787a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/139890
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Minyue Li <minyue@webrtc.org>
Commit-Queue: Chen Xing <chxg@google.com>
Cr-Commit-Position: refs/heads/master@{#28434}
diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn
index b5a6111..46396d6 100644
--- a/api/audio/BUILD.gn
+++ b/api/audio/BUILD.gn
@@ -16,6 +16,7 @@
]
deps = [
+ "..:rtp_packet_info",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
]
diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc
index 1e706b9..4c07aaf 100644
--- a/api/audio/audio_frame.cc
+++ b/api/audio/audio_frame.cc
@@ -39,6 +39,7 @@
speech_type_ = kUndefined;
vad_activity_ = kVadUnknown;
profile_timestamp_ms_ = 0;
+ packet_infos_ = RtpPacketInfos();
}
void AudioFrame::UpdateFrame(uint32_t timestamp,
@@ -72,6 +73,7 @@
timestamp_ = src.timestamp_;
elapsed_time_ms_ = src.elapsed_time_ms_;
ntp_time_ms_ = src.ntp_time_ms_;
+ packet_infos_ = src.packet_infos_;
muted_ = src.muted();
samples_per_channel_ = src.samples_per_channel_;
sample_rate_hz_ = src.sample_rate_hz_;
diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h
index 8f1dc62..70eb701 100644
--- a/api/audio/audio_frame.h
+++ b/api/audio/audio_frame.h
@@ -14,6 +14,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "api/rtp_packet_infos.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@@ -115,6 +116,22 @@
// class/struct needs an explicit out-of-line destructor" build error.
int64_t profile_timestamp_ms_ = 0;
+ // Information about packets used to assemble this audio frame. This is needed
+ // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
+ // MediaStreamTrack, in order to implement getContributingSources(). See:
+ // https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
+ //
+ // TODO(bugs.webrtc.org/10757):
+ // Note that this information might not be fully accurate since we currently
+ // don't have a proper way to track it across the audio sync buffer. The
+ // sync buffer is the small sample-holding buffer located after the audio
+ // decoder and before where samples are assembled into output frames.
+ //
+ // |RtpPacketInfos| may also be empty if the audio samples did not come from
+ // RTP packets. E.g. if the audio were locally generated by packet loss
+ // concealment, comfort noise generation, etc.
+ RtpPacketInfos packet_infos_;
+
private:
// A permamently zeroed out buffer to represent muted frames. This is a
// header-only class, so the only way to avoid creating a separate empty