Reland "Wrap Alpha and YUV frame into one EncodedImage for transmission"

This reverts commit d756fd06fed1b6c65dcb263cbd8f00ca23d72f3b.

Original change's description:
> Revert "Wrap Alpha and YUV frame into one EncodedImage for transmission"
>
> This reverts commit 5670c86aeccc9bc1191725431de7998d21b73c07.
>
> Reason for revert: Breaks downstream build. Need to add "#include <cstring>" to stereo_encoder_adapter.cc to use std::memcpy.
>
> Original change's description:
> > Wrap Alpha and YUV frame into one EncodedImage for transmission
> >
> > With alpha channel, we observe the artifacts on the receiver side, and
> > the reason is that when YUV channel has a key frame, it gives frame_buffer2
> > a chance to drop some previous frames. Then it is possible that some alpha
> > frames got dropped, which break the alpha frame dependence chain.
> >
> > In this CL, we pack the YUV frame and alpha encoded frame together as one
> > entity to solve the issue.
> >
> > Bug: webrtc:8773
> > Change-Id: Ibe746a46cb41fd92b399a7069e1d89f02f292af7
> > Reviewed-on: https://webrtc-review.googlesource.com/38481
> > Commit-Queue: Qiang Chen <qiangchen@chromium.org>
> > Reviewed-by: Emircan Uysaler <emircan@webrtc.org>
> > Cr-Commit-Position: refs/heads/master@{#21737}
>
> TBR=qiangchen@chromium.org,emircan@webrtc.org
>
> Change-Id: I11eff814ce093bf6db327ebcd21b1b71a1929849
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: webrtc:8773
> Reviewed-on: https://webrtc-review.googlesource.com/43260
> Reviewed-by: Taylor Brandstetter <deadbeef@webrtc.org>
> Commit-Queue: Taylor Brandstetter <deadbeef@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#21739}

TBR=deadbeef@webrtc.org,qiangchen@chromium.org,emircan@webrtc.org

Change-Id: I0d64b7e7a62e4f35aa012270d3826a23b3fb2337
Bug: webrtc:8773
Reviewed-on: https://webrtc-review.googlesource.com/43440
Commit-Queue: Qiang Chen <qiangchen@chromium.org>
Reviewed-by: Qiang Chen <qiangchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#21749}
diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn
index 85edeeb..d493e4e 100644
--- a/modules/video_coding/BUILD.gn
+++ b/modules/video_coding/BUILD.gn
@@ -292,8 +292,10 @@
 
 rtc_static_library("webrtc_stereo") {
   sources = [
+    "codecs/stereo/include/multiplex_encoded_image_packer.h",
     "codecs/stereo/include/stereo_decoder_adapter.h",
     "codecs/stereo/include/stereo_encoder_adapter.h",
+    "codecs/stereo/multiplex_encoded_image_packer.cc",
     "codecs/stereo/stereo_decoder_adapter.cc",
     "codecs/stereo/stereo_encoder_adapter.cc",
   ]
@@ -314,6 +316,7 @@
     "../../common_video:common_video",
     "../../rtc_base:rtc_base",
     "../../system_wrappers",
+    "../rtp_rtcp:rtp_rtcp_format",
   ]
 }
 
diff --git a/modules/video_coding/codecs/stereo/include/multiplex_encoded_image_packer.h b/modules/video_coding/codecs/stereo/include/multiplex_encoded_image_packer.h
new file mode 100644
index 0000000..f7557ba
--- /dev/null
+++ b/modules/video_coding/codecs/stereo/include/multiplex_encoded_image_packer.h
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
+#define MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
+
+#include <vector>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "common_video/include/video_frame.h"
+
+namespace webrtc {
+
+// Struct describing the whole bundle of multiple frames of an image.
+// This struct is expected to be the set in the beginning of a picture's
+// bitstream data.
+struct MultiplexImageHeader {
+  // The number of frame components making up the complete picture data.
+  // For example, |frame_count| = 2 for the case of YUV frame with Alpha frame.
+  uint8_t component_count;
+
+  // The increasing image ID given by the encoder. For different components
+  // of a single picture, they have the same |picture_index|.
+  uint16_t image_index;
+
+  // The location of the first MultiplexImageComponentHeader in the bitstream,
+  // in terms of byte from the beginning of the bitstream.
+  uint32_t first_component_header_offset;
+};
+const int kMultiplexImageHeaderSize =
+    sizeof(uint8_t) + sizeof(uint16_t) + sizeof(uint32_t);
+
+// Struct describing the individual image component's content.
+struct MultiplexImageComponentHeader {
+  // The location of the next MultiplexImageComponentHeader in the bitstream,
+  // in terms of the byte from the beginning of the bitstream;
+  uint32_t next_component_header_offset;
+
+  // Identifies which component this frame represent, i.e. YUV frame vs Alpha
+  // frame.
+  uint8_t component_index;
+
+  // The location of the real encoded image data of the frame in the bitstream,
+  // in terms of byte from the beginning of the bitstream.
+  uint32_t bitstream_offset;
+
+  // Indicates the number of bytes of the encoded image data.
+  uint32_t bitstream_length;
+
+  // Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
+  VideoCodecType codec_type;
+
+  // Indicated the underlying frame is a key frame or delta frame.
+  FrameType frame_type;
+};
+const int kMultiplexImageComponentHeaderSize =
+    sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
+    sizeof(uint8_t) + sizeof(uint8_t);
+
+// Struct holding the encoded image for one component.
+struct MultiplexImageComponent {
+  // Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
+  VideoCodecType codec_type;
+
+  // Identifies which component this frame represent, i.e. YUV frame vs Alpha
+  // frame.
+  int component_index;
+
+  // Stores the actual frame data of the encoded image.
+  EncodedImage encoded_image;
+};
+
+// Struct holding the whole frame bundle of components of an image.
+struct MultiplexImage {
+  int image_index;
+  int component_count;
+  std::vector<MultiplexImageComponent> image_components;
+
+  MultiplexImage(int picture_index, int frame_count);
+};
+
+// A utility class providing conversion between two representations of a
+// multiplex image frame:
+// 1. Packed version is just one encoded image, we pack all necessary metadata
+//    in the bitstream as headers.
+// 2. Unpacked version is essentially a list of encoded images, one for one
+//    component.
+class MultiplexEncodedImagePacker {
+ public:
+  // Note: It is caller responsibility to release the buffer of the result.
+  static EncodedImage PackAndRelease(const MultiplexImage& image);
+
+  // Note: The image components just share the memory with |combined_image|.
+  static MultiplexImage Unpack(const EncodedImage& combined_image);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
diff --git a/modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h b/modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h
index 95283f1..544a218 100644
--- a/modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h
+++ b/modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h
@@ -18,6 +18,7 @@
 #include "api/video_codecs/sdp_video_format.h"
 #include "api/video_codecs/video_encoder.h"
 #include "api/video_codecs/video_encoder_factory.h"
+#include "modules/video_coding/codecs/stereo/include/multiplex_encoded_image_packer.h"
 #include "modules/video_coding/include/video_codec_interface.h"
 
 namespace webrtc {
@@ -65,12 +66,13 @@
   std::vector<std::unique_ptr<AdapterEncodedImageCallback>> adapter_callbacks_;
   EncodedImageCallback* encoded_complete_callback_;
 
-  // Holds the encoded image info.
-  struct ImageStereoInfo;
-  std::map<uint32_t /* timestamp */, ImageStereoInfo> image_stereo_info_;
+  std::map<uint32_t /* timestamp */, MultiplexImage> stashed_images_;
 
   uint16_t picture_index_ = 0;
   std::vector<uint8_t> stereo_dummy_planes_;
+
+  int key_frame_interval_;
+  EncodedImage combined_image_;
 };
 
 }  // namespace webrtc
diff --git a/modules/video_coding/codecs/stereo/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/stereo/multiplex_encoded_image_packer.cc
new file mode 100644
index 0000000..6a33bb1
--- /dev/null
+++ b/modules/video_coding/codecs/stereo/multiplex_encoded_image_packer.cc
@@ -0,0 +1,230 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/stereo/include/multiplex_encoded_image_packer.h"
+
+#include <cstring>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+int PackHeader(uint8_t* buffer, MultiplexImageHeader header) {
+  int offset = 0;
+  ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, header.component_count);
+  offset += sizeof(uint8_t);
+
+  ByteWriter<uint16_t>::WriteBigEndian(buffer + offset, header.image_index);
+  offset += sizeof(uint16_t);
+
+  ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+                                       header.first_component_header_offset);
+  offset += sizeof(uint32_t);
+
+  RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
+  return offset;
+}
+
+MultiplexImageHeader UnpackHeader(uint8_t* buffer) {
+  MultiplexImageHeader header;
+  int offset = 0;
+  header.component_count = ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint8_t);
+
+  header.image_index = ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint16_t);
+
+  header.first_component_header_offset =
+      ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint32_t);
+
+  RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
+  return header;
+}
+
+int PackFrameHeader(uint8_t* buffer,
+                    MultiplexImageComponentHeader frame_header) {
+  int offset = 0;
+  ByteWriter<uint32_t>::WriteBigEndian(
+      buffer + offset, frame_header.next_component_header_offset);
+  offset += sizeof(uint32_t);
+
+  ByteWriter<uint8_t>::WriteBigEndian(buffer + offset,
+                                      frame_header.component_index);
+  offset += sizeof(uint8_t);
+
+  ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+                                       frame_header.bitstream_offset);
+  offset += sizeof(uint32_t);
+
+  ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+                                       frame_header.bitstream_length);
+  offset += sizeof(uint32_t);
+
+  ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
+  offset += sizeof(uint8_t);
+
+  ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.frame_type);
+  offset += sizeof(uint8_t);
+
+  RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
+  return offset;
+}
+
+MultiplexImageComponentHeader UnpackFrameHeader(uint8_t* buffer) {
+  MultiplexImageComponentHeader frame_header;
+  int offset = 0;
+
+  frame_header.next_component_header_offset =
+      ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint32_t);
+
+  frame_header.component_index =
+      ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint8_t);
+
+  frame_header.bitstream_offset =
+      ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint32_t);
+
+  frame_header.bitstream_length =
+      ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+  offset += sizeof(uint32_t);
+
+  frame_header.codec_type = static_cast<VideoCodecType>(
+      ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
+  offset += sizeof(uint8_t);
+
+  frame_header.frame_type = static_cast<FrameType>(
+      ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
+  offset += sizeof(uint8_t);
+
+  RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
+  return frame_header;
+}
+
+void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
+  memcpy(buffer, image.encoded_image._buffer, image.encoded_image._length);
+}
+
+MultiplexImage::MultiplexImage(int picture_index, int frame_count)
+    : image_index(picture_index), component_count(frame_count) {}
+
+EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
+    const MultiplexImage& multiplex_image) {
+  MultiplexImageHeader header;
+  std::vector<MultiplexImageComponentHeader> frame_headers;
+
+  header.component_count = multiplex_image.component_count;
+  header.image_index = multiplex_image.image_index;
+  int header_offset = kMultiplexImageHeaderSize;
+  header.first_component_header_offset = header_offset;
+  int bitstream_offset = header_offset + kMultiplexImageComponentHeaderSize *
+                                             header.component_count;
+
+  const std::vector<MultiplexImageComponent>& images =
+      multiplex_image.image_components;
+  EncodedImage combined_image = images[0].encoded_image;
+  for (size_t i = 0; i < images.size(); i++) {
+    MultiplexImageComponentHeader frame_header;
+    header_offset += kMultiplexImageComponentHeaderSize;
+    frame_header.next_component_header_offset =
+        (i == images.size() - 1) ? 0 : header_offset;
+    frame_header.component_index = images[i].component_index;
+
+    frame_header.bitstream_offset = bitstream_offset;
+    frame_header.bitstream_length =
+        static_cast<uint32_t>(images[i].encoded_image._length);
+    bitstream_offset += frame_header.bitstream_length;
+
+    frame_header.codec_type = images[i].codec_type;
+    frame_header.frame_type = images[i].encoded_image._frameType;
+
+    // As long as one component is delta frame, we have to mark the combined
+    // frame as delta frame, because it is necessary for all components to be
+    // key frame so as to decode the whole image without previous frame data.
+    // Thus only when all components are key frames, we can mark the combined
+    // frame as key frame.
+    if (frame_header.frame_type == FrameType::kVideoFrameDelta) {
+      combined_image._frameType = FrameType::kVideoFrameDelta;
+    }
+
+    frame_headers.push_back(frame_header);
+  }
+
+  combined_image._length = combined_image._size = bitstream_offset;
+  combined_image._buffer = new uint8_t[combined_image._length];
+
+  // header
+  header_offset = PackHeader(combined_image._buffer, header);
+  RTC_DCHECK_EQ(header.first_component_header_offset,
+                kMultiplexImageHeaderSize);
+
+  // Frame Header
+  for (size_t i = 0; i < images.size(); i++) {
+    int relative_offset = PackFrameHeader(
+        combined_image._buffer + header_offset, frame_headers[i]);
+    RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
+
+    header_offset = frame_headers[i].next_component_header_offset;
+    RTC_DCHECK_EQ(header_offset,
+                  (i == images.size() - 1)
+                      ? 0
+                      : (kMultiplexImageHeaderSize +
+                         kMultiplexImageComponentHeaderSize * (i + 1)));
+  }
+
+  // Bitstreams
+  for (size_t i = 0; i < images.size(); i++) {
+    PackBitstream(combined_image._buffer + frame_headers[i].bitstream_offset,
+                  images[i]);
+    delete[] images[i].encoded_image._buffer;
+  }
+
+  return combined_image;
+}
+
+MultiplexImage MultiplexEncodedImagePacker::Unpack(
+    const EncodedImage& combined_image) {
+  const MultiplexImageHeader& header = UnpackHeader(combined_image._buffer);
+
+  MultiplexImage multiplex_image(header.image_index, header.component_count);
+
+  std::vector<MultiplexImageComponentHeader> frame_headers;
+
+  int header_offset = header.first_component_header_offset;
+
+  while (header_offset > 0) {
+    frame_headers.push_back(
+        UnpackFrameHeader(combined_image._buffer + header_offset));
+    header_offset = frame_headers.back().next_component_header_offset;
+  }
+
+  RTC_DCHECK_LE(frame_headers.size(), header.component_count);
+  for (size_t i = 0; i < frame_headers.size(); i++) {
+    MultiplexImageComponent image_component;
+    image_component.component_index = frame_headers[i].component_index;
+    image_component.codec_type = frame_headers[i].codec_type;
+
+    EncodedImage encoded_image = combined_image;
+    encoded_image._frameType = frame_headers[i].frame_type;
+    encoded_image._length = encoded_image._size =
+        static_cast<size_t>(frame_headers[i].bitstream_length);
+    encoded_image._buffer =
+        combined_image._buffer + frame_headers[i].bitstream_offset;
+
+    image_component.encoded_image = encoded_image;
+
+    multiplex_image.image_components.push_back(image_component);
+  }
+
+  return multiplex_image;
+}
+
+}  // namespace webrtc
diff --git a/modules/video_coding/codecs/stereo/stereo_decoder_adapter.cc b/modules/video_coding/codecs/stereo/stereo_decoder_adapter.cc
index 96d5552..59fcc50 100644
--- a/modules/video_coding/codecs/stereo/stereo_decoder_adapter.cc
+++ b/modules/video_coding/codecs/stereo/stereo_decoder_adapter.cc
@@ -114,21 +114,24 @@
     const RTPFragmentationHeader* /*fragmentation*/,
     const CodecSpecificInfo* codec_specific_info,
     int64_t render_time_ms) {
-  const CodecSpecificInfoStereo& stereo_info =
-      codec_specific_info->codecSpecific.stereo;
-  RTC_DCHECK_LT(static_cast<size_t>(stereo_info.indices.frame_index),
-                decoders_.size());
-  if (stereo_info.indices.frame_count == 1) {
-    RTC_DCHECK_EQ(static_cast<int>(stereo_info.indices.frame_index), 0);
+  const MultiplexImage& image =
+      MultiplexEncodedImagePacker::Unpack(input_image);
+
+  if (image.component_count == 1) {
     RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
                decoded_data_.end());
     decoded_data_.emplace(std::piecewise_construct,
                           std::forward_as_tuple(input_image._timeStamp),
                           std::forward_as_tuple(kAXXStream));
   }
-
-  int32_t rv = decoders_[stereo_info.indices.frame_index]->Decode(
-      input_image, missing_frames, nullptr, nullptr, render_time_ms);
+  int32_t rv = 0;
+  for (size_t i = 0; i < image.image_components.size(); i++) {
+    rv = decoders_[image.image_components[i].component_index]->Decode(
+        image.image_components[i].encoded_image, missing_frames, nullptr,
+        nullptr, render_time_ms);
+    if (rv != WEBRTC_VIDEO_CODEC_OK)
+      return rv;
+  }
   return rv;
 }
 
@@ -174,8 +177,6 @@
   }
   RTC_DCHECK(decoded_data_.find(decoded_image->timestamp()) ==
              decoded_data_.end());
-  // decoded_data_[decoded_image->timestamp()] =
-  //     DecodedImageData(stream_idx, *decoded_image, decode_time_ms, qp);
   decoded_data_.emplace(
       std::piecewise_construct,
       std::forward_as_tuple(decoded_image->timestamp()),
diff --git a/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc b/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc
index 60e3ea8..b44165e 100644
--- a/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc
+++ b/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc
@@ -10,6 +10,8 @@
 
 #include "modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h"
 
+#include <cstring>
+
 #include "common_video/include/video_frame.h"
 #include "common_video/include/video_frame_buffer.h"
 #include "common_video/libyuv/include/webrtc_libyuv.h"
@@ -43,20 +45,6 @@
   const AlphaCodecStream stream_idx_;
 };
 
-// Holds the encoded image info.
-struct StereoEncoderAdapter::ImageStereoInfo {
-  ImageStereoInfo(uint16_t picture_index, uint8_t frame_count)
-      : picture_index(picture_index),
-        frame_count(frame_count),
-        encoded_count(0) {}
-  uint16_t picture_index;
-  uint8_t frame_count;
-  uint8_t encoded_count;
-
- private:
-  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ImageStereoInfo);
-};
-
 StereoEncoderAdapter::StereoEncoderAdapter(
     VideoEncoderFactory* factory,
     const SdpVideoFormat& associated_format)
@@ -80,6 +68,26 @@
   RTC_DCHECK_EQ(kVideoCodecStereo, inst->codecType);
   VideoCodec settings = *inst;
   settings.codecType = PayloadStringToCodecType(associated_format_.name);
+
+  // Take over the key frame interval at adapter level, because we have to
+  // sync the key frames for both sub-encoders.
+  switch (settings.codecType) {
+    case kVideoCodecVP8:
+      key_frame_interval_ = settings.VP8()->keyFrameInterval;
+      settings.VP8()->keyFrameInterval = 0;
+      break;
+    case kVideoCodecVP9:
+      key_frame_interval_ = settings.VP9()->keyFrameInterval;
+      settings.VP9()->keyFrameInterval = 0;
+      break;
+    case kVideoCodecH264:
+      key_frame_interval_ = settings.H264()->keyFrameInterval;
+      settings.H264()->keyFrameInterval = 0;
+      break;
+    default:
+      break;
+  }
+
   for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
     std::unique_ptr<VideoEncoder> encoder =
         factory_->CreateVideoEncoder(associated_format_);
@@ -104,16 +112,24 @@
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
 
+  std::vector<FrameType> adjusted_frame_types;
+  if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
+    adjusted_frame_types.push_back(kVideoFrameKey);
+  } else {
+    adjusted_frame_types.push_back(kVideoFrameDelta);
+  }
   const bool has_alpha = input_image.video_frame_buffer()->type() ==
                          VideoFrameBuffer::Type::kI420A;
-  image_stereo_info_.emplace(
+  stashed_images_.emplace(
       std::piecewise_construct, std::forward_as_tuple(input_image.timestamp()),
-      std::forward_as_tuple(picture_index_++,
+      std::forward_as_tuple(picture_index_,
                             has_alpha ? kAlphaCodecStreams : 1));
 
+  ++picture_index_;
+
   // Encode YUV
   int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
-                                         frame_types);
+                                         &adjusted_frame_types);
   // If we do not receive an alpha frame, we send a single frame for this
   // |picture_index_|. The receiver will receive |frame_count| as 1 which
   // soecifies this case.
@@ -132,7 +148,7 @@
   VideoFrame alpha_image(alpha_buffer, input_image.timestamp(),
                          input_image.render_time_ms(), input_image.rotation());
   rv = encoders_[kAXXStream]->Encode(alpha_image, codec_specific_info,
-                                     frame_types);
+                                     &adjusted_frame_types);
   return rv;
 }
 
@@ -174,6 +190,16 @@
   }
   encoders_.clear();
   adapter_callbacks_.clear();
+  for (auto& stashed_image : stashed_images_) {
+    for (auto& image_component : stashed_image.second.image_components) {
+      delete[] image_component.encoded_image._buffer;
+    }
+  }
+  stashed_images_.clear();
+  if (combined_image_._buffer) {
+    delete[] combined_image_._buffer;
+    combined_image_._buffer = nullptr;
+  }
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
@@ -186,26 +212,47 @@
     const EncodedImage& encodedImage,
     const CodecSpecificInfo* codecSpecificInfo,
     const RTPFragmentationHeader* fragmentation) {
-  const VideoCodecType associated_codec_type = codecSpecificInfo->codecType;
-  const auto& image_stereo_info_itr =
-      image_stereo_info_.find(encodedImage._timeStamp);
-  RTC_DCHECK(image_stereo_info_itr != image_stereo_info_.end());
-  ImageStereoInfo& image_stereo_info = image_stereo_info_itr->second;
-  const uint8_t frame_count = image_stereo_info.frame_count;
-  const uint16_t picture_index = image_stereo_info.picture_index;
-  if (++image_stereo_info.encoded_count == frame_count)
-    image_stereo_info_.erase(image_stereo_info_itr);
-
   CodecSpecificInfo codec_info = *codecSpecificInfo;
   codec_info.codecType = kVideoCodecStereo;
-  codec_info.codec_name = "stereo";
-  codec_info.codecSpecific.stereo.associated_codec_type = associated_codec_type;
-  codec_info.codecSpecific.stereo.indices.frame_index = stream_idx;
-  codec_info.codecSpecific.stereo.indices.frame_count = frame_count;
-  codec_info.codecSpecific.stereo.indices.picture_index = picture_index;
+  const auto& stashed_image_itr = stashed_images_.find(encodedImage._timeStamp);
+  const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
+  RTC_DCHECK(stashed_image_itr != stashed_images_.end());
+  MultiplexImage& stashed_image = stashed_image_itr->second;
+  const uint8_t frame_count = stashed_image.component_count;
 
-  encoded_complete_callback_->OnEncodedImage(encodedImage, &codec_info,
-                                             fragmentation);
+  // Save the image
+  MultiplexImageComponent image_component;
+  image_component.component_index = stream_idx;
+  image_component.codec_type =
+      PayloadStringToCodecType(associated_format_.name);
+  image_component.encoded_image = encodedImage;
+  image_component.encoded_image._buffer = new uint8_t[encodedImage._length];
+  std::memcpy(image_component.encoded_image._buffer, encodedImage._buffer,
+              encodedImage._length);
+
+  stashed_image.image_components.push_back(image_component);
+
+  if (stashed_image.image_components.size() == frame_count) {
+    // Complete case
+    auto iter = stashed_images_.begin();
+    while (iter != stashed_images_.end() && iter != stashed_image_next_itr) {
+      // No image at all, skip.
+      if (iter->second.image_components.size() == 0)
+        continue;
+
+      // We have to send out those stashed frames, otherwise the delta frame
+      // dependency chain is broken.
+      if (combined_image_._buffer)
+        delete[] combined_image_._buffer;
+      combined_image_ =
+          MultiplexEncodedImagePacker::PackAndRelease(iter->second);
+      encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info,
+                                                 fragmentation);
+      iter++;
+    }
+
+    stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);
+  }
   return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
 }
 
diff --git a/modules/video_coding/codecs/stereo/test/stereo_adapter_unittest.cc b/modules/video_coding/codecs/stereo/test/stereo_adapter_unittest.cc
index 976a5bb..b1e1275 100644
--- a/modules/video_coding/codecs/stereo/test/stereo_adapter_unittest.cc
+++ b/modules/video_coding/codecs/stereo/test/stereo_adapter_unittest.cc
@@ -112,12 +112,6 @@
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
 
   EXPECT_EQ(kVideoCodecStereo, codec_specific_info.codecType);
-  EXPECT_EQ(kStereoAssociatedCodecType,
-            codec_specific_info.codecSpecific.stereo.associated_codec_type);
-  EXPECT_EQ(0, codec_specific_info.codecSpecific.stereo.indices.frame_index);
-  EXPECT_EQ(1, codec_specific_info.codecSpecific.stereo.indices.frame_count);
-  EXPECT_EQ(0ull,
-            codec_specific_info.codecSpecific.stereo.indices.picture_index);
 
   EXPECT_EQ(
       WEBRTC_VIDEO_CODEC_OK,
@@ -131,38 +125,16 @@
 
 TEST_F(TestStereoAdapter, EncodeDecodeI420AFrame) {
   std::unique_ptr<VideoFrame> yuva_frame = CreateI420AInputFrame();
-  const size_t expected_num_encoded_frames = 2;
-  SetWaitForEncodedFramesThreshold(expected_num_encoded_frames);
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             encoder_->Encode(*yuva_frame, nullptr, nullptr));
-  std::vector<EncodedImage> encoded_frames;
-  std::vector<CodecSpecificInfo> codec_specific_infos;
-  ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_infos));
-  EXPECT_EQ(expected_num_encoded_frames, encoded_frames.size());
-  EXPECT_EQ(expected_num_encoded_frames, codec_specific_infos.size());
+  EncodedImage encoded_frame;
+  CodecSpecificInfo codec_specific_info;
+  ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
 
-  const CodecSpecificInfo& yuv_info = codec_specific_infos[kYUVStream];
-  EXPECT_EQ(kVideoCodecStereo, yuv_info.codecType);
-  EXPECT_EQ(kStereoAssociatedCodecType,
-            yuv_info.codecSpecific.stereo.associated_codec_type);
-  EXPECT_EQ(kYUVStream, yuv_info.codecSpecific.stereo.indices.frame_index);
-  EXPECT_EQ(kAlphaCodecStreams,
-            yuv_info.codecSpecific.stereo.indices.frame_count);
-  EXPECT_EQ(0ull, yuv_info.codecSpecific.stereo.indices.picture_index);
+  EXPECT_EQ(kVideoCodecStereo, codec_specific_info.codecType);
 
-  const CodecSpecificInfo& axx_info = codec_specific_infos[kAXXStream];
-  EXPECT_EQ(kVideoCodecStereo, axx_info.codecType);
-  EXPECT_EQ(kStereoAssociatedCodecType,
-            axx_info.codecSpecific.stereo.associated_codec_type);
-  EXPECT_EQ(kAXXStream, axx_info.codecSpecific.stereo.indices.frame_index);
-  EXPECT_EQ(kAlphaCodecStreams,
-            axx_info.codecSpecific.stereo.indices.frame_count);
-  EXPECT_EQ(0ull, axx_info.codecSpecific.stereo.indices.picture_index);
-
-  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frames[kYUVStream],
-                                                    false, nullptr, &yuv_info));
-  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frames[kAXXStream],
-                                                    false, nullptr, &axx_info));
+  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+            decoder_->Decode(encoded_frame, false, nullptr, nullptr));
   std::unique_ptr<VideoFrame> decoded_frame;
   rtc::Optional<uint8_t> decoded_qp;
   ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));