FrameBuffer for the new jitter buffer.

BUG=webrtc:5514
R=danilchap@webrtc.org, mflodman@webrtc.org

Review URL: https://codereview.webrtc.org/1969403007 .

Cr-Commit-Position: refs/heads/master@{#12798}
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index 145f4c0..e11bbfe 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -362,6 +362,7 @@
             'video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc',
             'video_coding/codecs/vp8/simulcast_unittest.cc',
             'video_coding/codecs/vp8/simulcast_unittest.h',
+            'video_coding/frame_buffer2_unittest.cc',
             'video_coding/include/mock/mock_vcm_callbacks.h',
             'video_coding/decoding_state_unittest.cc',
             'video_coding/histogram_unittest.cc',
diff --git a/webrtc/modules/video_coding/BUILD.gn b/webrtc/modules/video_coding/BUILD.gn
index 7c6c9ed..755e6ef 100644
--- a/webrtc/modules/video_coding/BUILD.gn
+++ b/webrtc/modules/video_coding/BUILD.gn
@@ -21,6 +21,8 @@
     "fec_tables_xor.h",
     "frame_buffer.cc",
     "frame_buffer.h",
+    "frame_buffer2.cc",
+    "frame_buffer2.h",
     "frame_object.cc",
     "frame_object.h",
     "generic_decoder.cc",
diff --git a/webrtc/modules/video_coding/frame_buffer2.cc b/webrtc/modules/video_coding/frame_buffer2.cc
new file mode 100644
index 0000000..c6a1a06
--- /dev/null
+++ b/webrtc/modules/video_coding/frame_buffer2.cc
@@ -0,0 +1,154 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/frame_buffer2.h"
+
+#include <algorithm>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_coding/frame_object.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
+#include "webrtc/modules/video_coding/sequence_number_util.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace video_coding {
+
+namespace {
+// The maximum age of decoded frames tracked by frame buffer, compared to
+// |newest_picture_id_|.
+constexpr int kMaxFrameAge = 4096;
+
+// The maximum number of decoded frames being tracked by the frame buffer.
+constexpr int kMaxNumHistoryFrames = 256;
+}  // namespace
+
+bool FrameBuffer::FrameComp::operator()(const FrameKey& f1,
+                                        const FrameKey& f2) const {
+  // first = picture id
+  // second = spatial layer
+  if (f1.first == f2.first)
+    return f1.second < f2.second;
+  return AheadOf(f2.first, f1.first);
+}
+
+FrameBuffer::FrameBuffer(Clock* clock,
+                         VCMJitterEstimator* jitter_estimator,
+                         const VCMTiming* timing)
+    : clock_(clock),
+      frame_inserted_event_(false, false),
+      jitter_estimator_(jitter_estimator),
+      timing_(timing),
+      newest_picture_id_(-1) {}
+
+std::unique_ptr<FrameObject> FrameBuffer::NextFrame(int64_t max_wait_time_ms) {
+  int64_t latest_return_time = clock_->TimeInMilliseconds() + max_wait_time_ms;
+  while (true) {
+    int64_t now = clock_->TimeInMilliseconds();
+    int64_t wait_ms = max_wait_time_ms;
+
+    crit_.Enter();
+    frame_inserted_event_.Reset();
+    auto next_frame = frames_.end();
+    for (auto frame_it = frames_.begin(); frame_it != frames_.end();
+         ++frame_it) {
+      const FrameObject& frame = *frame_it->second;
+      if (IsContinuous(frame)) {
+        next_frame = frame_it;
+        int64_t render_time = timing_->RenderTimeMs(frame.timestamp, now);
+        wait_ms = timing_->MaxWaitingTime(render_time, now);
+
+        // This will cause the frame buffer to prefer high framerate rather
+        // than high resolution in the case of the decoder not decoding fast
+        // enough and the stream has multiple spatial and temporal layers.
+        if (wait_ms == 0)
+          continue;
+
+        break;
+      }
+    }
+    crit_.Leave();
+
+    // If the timout occures, return. Otherwise a new frame has been inserted
+    // and the best frame to decode next will be selected again.
+    wait_ms = std::min<int64_t>(wait_ms, latest_return_time - now);
+    wait_ms = std::max<int64_t>(wait_ms, 0);
+    if (!frame_inserted_event_.Wait(wait_ms)) {
+      crit_.Enter();
+      if (next_frame != frames_.end()) {
+        // TODO(philipel): update jitter estimator with correct values.
+        jitter_estimator_->UpdateEstimate(100, 100);
+
+        decoded_frames_.insert(next_frame->first);
+        std::unique_ptr<FrameObject> frame = std::move(next_frame->second);
+        frames_.erase(frames_.begin(), ++next_frame);
+        crit_.Leave();
+        return frame;
+      } else {
+        crit_.Leave();
+        return std::unique_ptr<FrameObject>();
+      }
+    }
+  }
+}
+
+void FrameBuffer::InsertFrame(std::unique_ptr<FrameObject> frame) {
+  rtc::CritScope lock(&crit_);
+  if (newest_picture_id_ == -1)
+    newest_picture_id_ = frame->picture_id;
+
+  if (AheadOf<uint16_t>(frame->picture_id, newest_picture_id_))
+    newest_picture_id_ = frame->picture_id;
+
+  // Remove frames as long as we have too many, |kMaxNumHistoryFrames|.
+  while (decoded_frames_.size() > kMaxNumHistoryFrames)
+    decoded_frames_.erase(decoded_frames_.begin());
+
+  // Remove frames that are too old, |kMaxNumHistoryFrames|.
+  uint16_t old_picture_id = Subtract<1 << 16>(newest_picture_id_, kMaxFrameAge);
+  auto old_decoded_it =
+      decoded_frames_.lower_bound(FrameKey(old_picture_id, 0));
+  decoded_frames_.erase(decoded_frames_.begin(), old_decoded_it);
+
+  FrameKey key(frame->picture_id, frame->spatial_layer);
+  frames_[key] = std::move(frame);
+  frame_inserted_event_.Set();
+}
+
+bool FrameBuffer::IsContinuous(const FrameObject& frame) const {
+  // If a frame with an earlier picture id was inserted compared to the last
+  // decoded frames picture id then that frame arrived too late.
+  if (!decoded_frames_.empty() &&
+      AheadOf(decoded_frames_.rbegin()->first, frame.picture_id)) {
+    return false;
+  }
+
+  // Have we decoded all frames that this frame depend on?
+  for (size_t r = 0; r < frame.num_references; ++r) {
+    FrameKey ref_key(frame.references[r], frame.spatial_layer);
+    if (decoded_frames_.find(ref_key) == decoded_frames_.end())
+      return false;
+  }
+
+  // If this is a layer frame, have we decoded the lower layer of this
+  // super frame.
+  if (frame.inter_layer_predicted) {
+    RTC_DCHECK_GT(frame.spatial_layer, 0);
+    FrameKey ref_key(frame.picture_id, frame.spatial_layer - 1);
+    if (decoded_frames_.find(ref_key) == decoded_frames_.end())
+      return false;
+  }
+
+  return true;
+}
+
+}  // namespace video_coding
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/frame_buffer2.h b/webrtc/modules/video_coding/frame_buffer2.h
new file mode 100644
index 0000000..10cae42
--- /dev/null
+++ b/webrtc/modules/video_coding/frame_buffer2.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
+#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/event.h"
+#include "webrtc/base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+class VCMJitterEstimator;
+class VCMTiming;
+
+namespace video_coding {
+
+class FrameObject;
+
+class FrameBuffer {
+ public:
+  FrameBuffer(Clock* clock,
+              VCMJitterEstimator* jitter_estimator,
+              const VCMTiming* timing);
+
+  // Insert a frame into the frame buffer.
+  void InsertFrame(std::unique_ptr<FrameObject> frame);
+
+  // Get the next frame for decoding. Will return at latest after
+  // |max_wait_time_ms|, with either a managed FrameObject or an empty
+  // unique ptr if there is no available frame for decoding.
+  std::unique_ptr<FrameObject> NextFrame(int64_t max_wait_time_ms);
+
+ private:
+  // FrameKey is a pair of (picture id, spatial layer).
+  using FrameKey = std::pair<uint16_t, uint8_t>;
+
+  // Comparator used to sort frames, first on their picture id, and second
+  // on their spatial layer.
+  struct FrameComp {
+    bool operator()(const FrameKey& f1, const FrameKey& f2) const;
+  };
+
+  // Determines whether a frame is continuous.
+  bool IsContinuous(const FrameObject& frame) const
+      EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+  // Keep track of decoded frames.
+  std::set<FrameKey, FrameComp> decoded_frames_ GUARDED_BY(crit_);
+
+  // The actual buffer that holds the FrameObjects.
+  std::map<FrameKey, std::unique_ptr<FrameObject>, FrameComp> frames_
+      GUARDED_BY(crit_);
+
+  rtc::CriticalSection crit_;
+  Clock* const clock_;
+  rtc::Event frame_inserted_event_;
+  VCMJitterEstimator* const jitter_estimator_;
+  const VCMTiming* const timing_;
+  int newest_picture_id_ GUARDED_BY(crit_);
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(FrameBuffer);
+};
+
+}  // namespace video_coding
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
diff --git a/webrtc/modules/video_coding/frame_buffer2_unittest.cc b/webrtc/modules/video_coding/frame_buffer2_unittest.cc
new file mode 100644
index 0000000..67706ce
--- /dev/null
+++ b/webrtc/modules/video_coding/frame_buffer2_unittest.cc
@@ -0,0 +1,329 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/frame_buffer2.h"
+
+#include <algorithm>
+#include <cstring>
+#include <limits>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/base/random.h"
+#include "webrtc/modules/video_coding/frame_object.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
+#include "webrtc/modules/video_coding/sequence_number_util.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class VCMTimingFake : public VCMTiming {
+ public:
+  explicit VCMTimingFake(Clock* clock) : VCMTiming(clock) {}
+
+  int64_t RenderTimeMs(uint32_t frame_timestamp,
+                       int64_t now_ms) const override {
+    if (last_ms_ == -1) {
+      last_ms_ = now_ms + kDelayMs;
+      last_timestamp_ = frame_timestamp;
+    }
+
+    uint32_t diff = MinDiff(frame_timestamp, last_timestamp_);
+    if (AheadOf(frame_timestamp, last_timestamp_))
+      last_ms_ += diff / 90;
+    else
+      last_ms_ -= diff / 90;
+
+    last_timestamp_ = frame_timestamp;
+    return last_ms_;
+  }
+
+  uint32_t MaxWaitingTime(int64_t render_time_ms,
+                          int64_t now_ms) const override {
+    return std::max<int>(0, render_time_ms - now_ms - kDecodeTime);
+  }
+
+ private:
+  static constexpr int kDelayMs = 50;
+  static constexpr int kDecodeTime = kDelayMs / 2;
+  mutable uint32_t last_timestamp_ = 0;
+  mutable int64_t last_ms_ = -1;
+};
+
+class VCMJitterEstimatorMock : public VCMJitterEstimator {
+ public:
+  explicit VCMJitterEstimatorMock(Clock* clock) : VCMJitterEstimator(clock) {}
+
+  MOCK_METHOD1(UpdateRtt, void(int64_t rttMs));
+  MOCK_METHOD3(UpdateEstimate,
+               void(int64_t frameDelayMs,
+                    uint32_t frameSizeBytes,
+                    bool incompleteFrame));
+};
+
+class FrameObjectMock : public FrameObject {
+ public:
+  MOCK_CONST_METHOD1(GetBitstream, bool(uint8_t* destination));
+};
+
+class TestFrameBuffer2 : public ::testing::Test {
+ protected:
+  static constexpr int kMaxReferences = 5;
+  static constexpr int kFps1 = 1000;
+  static constexpr int kFps10 = kFps1 / 10;
+  static constexpr int kFps20 = kFps1 / 20;
+
+  TestFrameBuffer2()
+      : clock_(0),
+        timing_(&clock_),
+        jitter_estimator_(&clock_),
+        buffer_(&clock_, &jitter_estimator_, &timing_),
+        rand_(0x34678213),
+        tear_down_(false),
+        extract_thread_(&ExtractLoop, this, "Extract Thread"),
+        trigger_extract_event_(false, false),
+        crit_acquired_event_(false, false) {}
+
+  void SetUp() override { extract_thread_.Start(); }
+
+  void TearDown() override {
+    tear_down_ = true;
+    trigger_extract_event_.Set();
+    extract_thread_.Stop();
+  }
+
+  template <typename... T>
+  void InsertFrame(uint16_t picture_id,
+                   uint8_t spatial_layer,
+                   int64_t ts_ms,
+                   bool inter_layer_predicted,
+                   T... refs) {
+    static_assert(sizeof...(refs) <= kMaxReferences,
+                  "To many references specified for FrameObject.");
+    std::array<uint16_t, sizeof...(refs)> references = {{refs...}};
+
+    std::unique_ptr<FrameObjectMock> frame(new FrameObjectMock());
+    frame->picture_id = picture_id;
+    frame->spatial_layer = spatial_layer;
+    frame->timestamp = ts_ms * 90;
+    frame->num_references = references.size();
+    frame->inter_layer_predicted = inter_layer_predicted;
+    for (size_t r = 0; r < references.size(); ++r)
+      frame->references[r] = references[r];
+
+    buffer_.InsertFrame(std::move(frame));
+  }
+
+  void ExtractFrame(int64_t max_wait_time = 0) {
+    crit_.Enter();
+    if (max_wait_time == 0) {
+      frames_.emplace_back(buffer_.NextFrame(0));
+      crit_.Leave();
+    } else {
+      max_wait_time_ = max_wait_time;
+      trigger_extract_event_.Set();
+      crit_.Leave();
+      // Make sure |crit_| is aquired by |extract_thread_| before returning.
+      crit_acquired_event_.Wait(rtc::Event::kForever);
+    }
+  }
+
+  void CheckFrame(size_t index, int picture_id, int spatial_layer) {
+    rtc::CritScope lock(&crit_);
+    ASSERT_LT(index, frames_.size());
+    ASSERT_TRUE(frames_[index]);
+    ASSERT_EQ(picture_id, frames_[index]->picture_id);
+    ASSERT_EQ(spatial_layer, frames_[index]->spatial_layer);
+  }
+
+  void CheckNoFrame(size_t index) {
+    rtc::CritScope lock(&crit_);
+    ASSERT_LT(index, frames_.size());
+    ASSERT_FALSE(frames_[index]);
+  }
+
+  static bool ExtractLoop(void* obj) {
+    TestFrameBuffer2* tfb = static_cast<TestFrameBuffer2*>(obj);
+    while (true) {
+      tfb->trigger_extract_event_.Wait(rtc::Event::kForever);
+      {
+        rtc::CritScope lock(&tfb->crit_);
+        tfb->crit_acquired_event_.Set();
+        if (tfb->tear_down_)
+          return false;
+
+        tfb->frames_.emplace_back(tfb->buffer_.NextFrame(tfb->max_wait_time_));
+      }
+    }
+  }
+
+  uint32_t Rand() { return rand_.Rand<uint32_t>(); }
+
+  SimulatedClock clock_;
+  VCMTimingFake timing_;
+  VCMJitterEstimatorMock jitter_estimator_;
+  FrameBuffer buffer_;
+  std::vector<std::unique_ptr<FrameObject>> frames_;
+  Random rand_;
+
+  int64_t max_wait_time_;
+  bool tear_down_;
+  rtc::PlatformThread extract_thread_;
+  rtc::Event trigger_extract_event_;
+  rtc::Event crit_acquired_event_;
+  rtc::CriticalSection crit_;
+};
+
+TEST_F(TestFrameBuffer2, ExtractFromEmptyBuffer) {
+  ExtractFrame();
+  CheckNoFrame(0);
+}
+
+TEST_F(TestFrameBuffer2, WaitForFrame) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  ExtractFrame(20);
+  InsertFrame(pid, 0, ts, false);
+  CheckFrame(0, pid, 0);
+}
+
+TEST_F(TestFrameBuffer2, OneSuperFrame) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  ExtractFrame(20);
+  InsertFrame(pid, 1, ts, true);
+  InsertFrame(pid, 0, ts, false);
+  ExtractFrame();
+
+  CheckFrame(0, pid, 0);
+  CheckFrame(1, pid, 1);
+}
+
+TEST_F(TestFrameBuffer2, OneLayerStream) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  InsertFrame(pid, 0, ts, false);
+  ExtractFrame();
+  CheckFrame(0, pid, 0);
+  for (int i = 1; i < 10; ++i) {
+    InsertFrame(pid + i, 0, ts + i * kFps10, false, pid + i - 1);
+    ExtractFrame();
+    clock_.AdvanceTimeMilliseconds(kFps10);
+    CheckFrame(i, pid + i, 0);
+  }
+}
+
+TEST_F(TestFrameBuffer2, OneLayerStreamReordered) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  InsertFrame(pid, 0, ts, false);
+  ExtractFrame();
+  CheckFrame(0, pid, 0);
+  for (int i = 1; i < 10; i += 2) {
+    ExtractFrame(15);
+    InsertFrame(pid + i + 1, 0, ts + (i + 1) * kFps10, false, pid + i);
+    clock_.AdvanceTimeMilliseconds(kFps10);
+    InsertFrame(pid + i, 0, ts + i * kFps10, false, pid + i - 1);
+    clock_.AdvanceTimeMilliseconds(kFps10);
+    ExtractFrame();
+    CheckFrame(i, pid + i, 0);
+    CheckFrame(i + 1, pid + i + 1, 0);
+  }
+}
+
+TEST_F(TestFrameBuffer2, DropTemporalLayerSlowDecoder) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  InsertFrame(pid, 0, ts, false);
+  InsertFrame(pid + 1, 0, ts + kFps20, false);
+  for (int i = 2; i < 10; i += 2) {
+    uint32_t ts_tl0 = ts + i / 2 * kFps10;
+    InsertFrame(pid + i, 0, ts_tl0, false, pid + i - 2);
+    InsertFrame(pid + i + 1, 0, ts_tl0 + kFps20, false, pid + i, pid + i - 1);
+  }
+
+  for (int i = 0; i < 10; ++i) {
+    ExtractFrame();
+    clock_.AdvanceTimeMilliseconds(60);
+  }
+
+  CheckFrame(0, pid, 0);
+  CheckFrame(1, pid + 1, 0);
+  CheckFrame(2, pid + 2, 0);
+  CheckFrame(3, pid + 4, 0);
+  CheckFrame(4, pid + 6, 0);
+  CheckFrame(5, pid + 8, 0);
+  CheckNoFrame(6);
+  CheckNoFrame(7);
+  CheckNoFrame(8);
+  CheckNoFrame(9);
+}
+
+TEST_F(TestFrameBuffer2, DropSpatialLayerSlowDecoder) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  InsertFrame(pid, 0, ts, false);
+  InsertFrame(pid, 1, ts, false);
+  for (int i = 1; i < 6; ++i) {
+    uint32_t ts_tl0 = ts + i * kFps10;
+    InsertFrame(pid + i, 0, ts_tl0, false, pid + i - 1);
+    InsertFrame(pid + i, 1, ts_tl0, false, pid + i - 1);
+  }
+
+  ExtractFrame();
+  ExtractFrame();
+  clock_.AdvanceTimeMilliseconds(55);
+  for (int i = 2; i < 12; ++i) {
+    ExtractFrame();
+    clock_.AdvanceTimeMilliseconds(55);
+  }
+
+  CheckFrame(0, pid, 0);
+  CheckFrame(1, pid, 1);
+  CheckFrame(2, pid + 1, 0);
+  CheckFrame(3, pid + 1, 1);
+  CheckFrame(4, pid + 2, 0);
+  CheckFrame(5, pid + 2, 1);
+  CheckFrame(6, pid + 3, 0);
+  CheckFrame(7, pid + 4, 0);
+  CheckFrame(8, pid + 5, 0);
+  CheckNoFrame(9);
+  CheckNoFrame(10);
+  CheckNoFrame(11);
+}
+
+TEST_F(TestFrameBuffer2, InsertLateFrame) {
+  uint16_t pid = Rand();
+  uint32_t ts = Rand();
+
+  InsertFrame(pid, 0, ts, false);
+  ExtractFrame();
+  InsertFrame(pid + 2, 0, ts, false);
+  ExtractFrame();
+  InsertFrame(pid + 1, 0, ts, false, pid);
+  ExtractFrame();
+
+  CheckFrame(0, pid, 0);
+  CheckFrame(1, pid + 2, 0);
+  CheckNoFrame(2);
+}
+
+}  // namespace video_coding
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/frame_object.cc b/webrtc/modules/video_coding/frame_object.cc
index 1634f89..7b9ec0d 100644
--- a/webrtc/modules/video_coding/frame_object.cc
+++ b/webrtc/modules/video_coding/frame_object.cc
@@ -18,6 +18,7 @@
 FrameObject::FrameObject()
     : picture_id(0),
       spatial_layer(0),
+      timestamp(0),
       num_references(0),
       inter_layer_predicted(false) {}
 
diff --git a/webrtc/modules/video_coding/frame_object.h b/webrtc/modules/video_coding/frame_object.h
index 80ab0ec..e8bb481 100644
--- a/webrtc/modules/video_coding/frame_object.h
+++ b/webrtc/modules/video_coding/frame_object.h
@@ -31,6 +31,7 @@
   // have to be constructed from the header data relevant to that codec.
   uint16_t picture_id;
   uint8_t spatial_layer;
+  uint32_t timestamp;
 
   size_t num_references;
   uint16_t references[kMaxFrameReferences];
diff --git a/webrtc/modules/video_coding/timing.h b/webrtc/modules/video_coding/timing.h
index a45eee3..e593c9a 100644
--- a/webrtc/modules/video_coding/timing.h
+++ b/webrtc/modules/video_coding/timing.h
@@ -28,7 +28,7 @@
   // The primary timing component should be passed
   // if this is the dual timing component.
   explicit VCMTiming(Clock* clock, VCMTiming* master_timing = NULL);
-  ~VCMTiming();
+  virtual ~VCMTiming();
 
   // Resets the timing to the initial state.
   void Reset();
@@ -69,11 +69,11 @@
   // Returns the receiver system time when the frame with timestamp
   // frame_timestamp should be rendered, assuming that the system time currently
   // is now_ms.
-  int64_t RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const;
+  virtual int64_t RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const;
 
   // Returns the maximum time in ms that we can wait for a frame to become
   // complete before we must pass it to the decoder.
-  uint32_t MaxWaitingTime(int64_t render_time_ms, int64_t now_ms) const;
+  virtual uint32_t MaxWaitingTime(int64_t render_time_ms, int64_t now_ms) const;
 
   // Returns the current target delay which is required delay + decode time +
   // render delay.
diff --git a/webrtc/modules/video_coding/video_coding.gypi b/webrtc/modules/video_coding/video_coding.gypi
index 8d31a6a..27454a4 100644
--- a/webrtc/modules/video_coding/video_coding.gypi
+++ b/webrtc/modules/video_coding/video_coding.gypi
@@ -32,6 +32,7 @@
         'encoded_frame.h',
         'fec_tables_xor.h',
         'frame_buffer.h',
+        'frame_buffer2.h',
         'frame_object.h',
         'rtp_frame_reference_finder.h',
         'generic_decoder.h',
@@ -62,6 +63,7 @@
         'decoding_state.cc',
         'encoded_frame.cc',
         'frame_buffer.cc',
+        'frame_buffer2.cc',
         'frame_object.cc',
         'rtp_frame_reference_finder.cc',
         'generic_decoder.cc',