Make VideoFrameType an enum class, and move to separate file and target

Bug: webrtc:5876, webrtc:6883
Change-Id: I1435cfa9e8e54c4ba2978261048ff3fbb993ce0e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/126225
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27239}
diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn
index 0e0866c..dfe183b 100644
--- a/modules/video_coding/BUILD.gn
+++ b/modules/video_coding/BUILD.gn
@@ -64,6 +64,7 @@
     "..:module_api",
     "../../:webrtc_common",
     "../../api:rtp_headers",
+    "../../api/video:video_frame_type",
     "../rtp_rtcp:rtp_rtcp_format",
     "../rtp_rtcp:rtp_video_header",
     "//third_party/abseil-cpp/absl/types:optional",
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index e641bae..9d96100 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -67,17 +67,17 @@
 VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
   switch (type) {
     case videoFrameTypeIDR:
-      return kVideoFrameKey;
+      return VideoFrameType::kVideoFrameKey;
     case videoFrameTypeSkip:
     case videoFrameTypeI:
     case videoFrameTypeP:
     case videoFrameTypeIPMixed:
-      return kVideoFrameDelta;
+      return VideoFrameType::kVideoFrameDelta;
     case videoFrameTypeInvalid:
       break;
   }
   RTC_NOTREACHED() << "Unexpected/invalid frame type: " << type;
-  return kEmptyFrame;
+  return VideoFrameType::kEmptyFrame;
 }
 
 }  // namespace
@@ -409,7 +409,8 @@
   if (!send_key_frame && frame_types) {
     for (size_t i = 0; i < frame_types->size() && i < configurations_.size();
          ++i) {
-      if ((*frame_types)[i] == kVideoFrameKey && configurations_[i].sending) {
+      if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
+          configurations_[i].sending) {
         send_key_frame = true;
         break;
       }
@@ -462,7 +463,7 @@
     }
     if (frame_types != nullptr) {
       // Skip frame?
-      if ((*frame_types)[i] == kEmptyFrame) {
+      if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
         continue;
       }
     }
diff --git a/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc b/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
index 57c3a9f..8bd543a 100644
--- a/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
+++ b/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
@@ -70,7 +70,7 @@
   CodecSpecificInfo codec_specific_info;
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, 0));
   std::unique_ptr<VideoFrame> decoded_frame;
@@ -97,7 +97,7 @@
   CodecSpecificInfo codec_specific_info;
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, 0));
   std::unique_ptr<VideoFrame> decoded_frame;
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
index 8c2d69b..5e4a97f 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -88,7 +88,8 @@
   ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
   offset += sizeof(uint8_t);
 
-  ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.frame_type);
+  ByteWriter<uint8_t>::WriteBigEndian(
+      buffer + offset, static_cast<uint8_t>(frame_header.frame_type));
   offset += sizeof(uint8_t);
 
   RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
index 6e3c5e2..16bcd51 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -145,9 +145,9 @@
 
   std::vector<VideoFrameType> adjusted_frame_types;
   if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
-    adjusted_frame_types.push_back(kVideoFrameKey);
+    adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey);
   } else {
-    adjusted_frame_types.push_back(kVideoFrameDelta);
+    adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
   }
   const bool has_alpha = input_image.video_frame_buffer()->type() ==
                          VideoFrameBuffer::Type::kI420A;
diff --git a/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
index 3aa7c28..4c14680 100644
--- a/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
+++ b/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
@@ -276,7 +276,7 @@
   const MultiplexImageComponent& component = unpacked_frame.image_components[0];
   EXPECT_EQ(0, component.component_index);
   EXPECT_NE(nullptr, component.encoded_image.data());
-  EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, component.encoded_image._frameType);
 }
 
 TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
@@ -299,7 +299,8 @@
         unpacked_frame.image_components[i];
     EXPECT_EQ(i, component.component_index);
     EXPECT_NE(nullptr, component.encoded_image.data());
-    EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
+    EXPECT_EQ(VideoFrameType::kVideoFrameKey,
+              component.encoded_image._frameType);
   }
 }
 
@@ -314,7 +315,9 @@
     const MultiplexImage& unpacked_frame =
         MultiplexEncodedImagePacker::Unpack(encoded_frame);
     EXPECT_EQ(i, unpacked_frame.image_index);
-    EXPECT_EQ(i ? kVideoFrameDelta : kVideoFrameKey, encoded_frame._frameType);
+    EXPECT_EQ(
+        i ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey,
+        encoded_frame._frameType);
   }
 }
 
diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
index 0eaee37..fe7fcb6 100644
--- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -332,11 +332,11 @@
       contains_idr = true;
     }
   }
-  if (encoded_frame._frameType == kVideoFrameKey) {
+  if (encoded_frame._frameType == VideoFrameType::kVideoFrameKey) {
     EXPECT_TRUE(contains_sps) << "Keyframe should contain SPS.";
     EXPECT_TRUE(contains_pps) << "Keyframe should contain PPS.";
     EXPECT_TRUE(contains_idr) << "Keyframe should contain IDR.";
-  } else if (encoded_frame._frameType == kVideoFrameDelta) {
+  } else if (encoded_frame._frameType == VideoFrameType::kVideoFrameDelta) {
     EXPECT_FALSE(contains_sps) << "Delta frame should not contain SPS.";
     EXPECT_FALSE(contains_pps) << "Delta frame should not contain PPS.";
     EXPECT_FALSE(contains_idr) << "Delta frame should not contain IDR.";
diff --git a/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc b/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
index 99c71da..4a7f978 100644
--- a/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
@@ -222,7 +222,7 @@
     if (frame_stat.encoding_successful) {
       ++video_stat.num_encoded_frames;
 
-      if (frame_stat.frame_type == kVideoFrameKey) {
+      if (frame_stat.frame_type == VideoFrameType::kVideoFrameKey) {
         key_frame_size_bytes.AddSample(frame_stat.length_bytes);
         ++video_stat.num_key_frames;
       } else {
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index f1bc8ec..f73e06f 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -287,8 +287,9 @@
 
   // Encode.
   const std::vector<VideoFrameType> frame_types =
-      (frame_number == 0) ? std::vector<VideoFrameType>{kVideoFrameKey}
-                          : std::vector<VideoFrameType>{kVideoFrameDelta};
+      (frame_number == 0)
+          ? std::vector<VideoFrameType>{VideoFrameType::kVideoFrameKey}
+          : std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
   const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
   for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
     FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index d7258c9..be94365 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -209,7 +209,7 @@
 
   // Always start with a complete key frame.
   if (key_frame_required_) {
-    if (input_image._frameType != kVideoFrameKey)
+    if (input_image._frameType != VideoFrameType::kVideoFrameKey)
       return WEBRTC_VIDEO_CODEC_ERROR;
     // We have a key frame - is it complete?
     if (input_image._completeFrame) {
@@ -220,7 +220,8 @@
   }
   // Restrict error propagation using key frame requests.
   // Reset on a key frame refresh.
-  if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
+  if (input_image._frameType == VideoFrameType::kVideoFrameKey &&
+      input_image._completeFrame) {
     propagation_cnt_ = -1;
     // Start count on first loss.
   } else if ((!input_image._completeFrame || missing_frames) &&
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 580f706..7282c55 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -756,7 +756,8 @@
   if (!send_key_frame && frame_types) {
     for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
          ++i) {
-      if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
+      if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
+          send_stream_[i]) {
         send_key_frame = true;
         break;
       }
@@ -925,7 +926,7 @@
        ++encoder_idx, --stream_idx) {
     vpx_codec_iter_t iter = NULL;
     encoded_images_[encoder_idx].set_size(0);
-    encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
+    encoded_images_[encoder_idx]._frameType = VideoFrameType::kVideoFrameDelta;
     CodecSpecificInfo codec_specific;
     const vpx_codec_cx_pkt_t* pkt = NULL;
     while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
@@ -947,7 +948,8 @@
       if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
         // check if encoded frame is a key frame
         if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
-          encoded_images_[encoder_idx]._frameType = kVideoFrameKey;
+          encoded_images_[encoder_idx]._frameType =
+              VideoFrameType::kVideoFrameKey;
         }
         encoded_images_[encoder_idx].SetSpatialIndex(stream_idx);
         PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, encoder_idx,
diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 05e2f05..705d60f 100644
--- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -209,7 +209,7 @@
   EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
 
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, -1));
   std::unique_ptr<VideoFrame> decoded_frame;
@@ -323,7 +323,7 @@
   EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
 
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, -1));
@@ -354,12 +354,12 @@
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
             decoder_->Decode(encoded_frame, false, nullptr, -1));
   // Setting complete back to true. Forcing a delta frame.
-  encoded_frame._frameType = kVideoFrameDelta;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameDelta;
   encoded_frame._completeFrame = true;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
             decoder_->Decode(encoded_frame, false, nullptr, -1));
   // Now setting a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, -1));
   std::unique_ptr<VideoFrame> decoded_frame;
@@ -484,7 +484,8 @@
       .Times(2)
       .WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
 
-  auto delta_frame = std::vector<VideoFrameType>{kVideoFrameDelta};
+  auto delta_frame =
+      std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
   encoder.Encode(*NextInputFrame(), &delta_frame);
 }
 
diff --git a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
index 68124e4..913bc01 100644
--- a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -127,7 +127,7 @@
   CodecSpecificInfo codec_specific_info;
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, 0));
   std::unique_ptr<VideoFrame> decoded_frame;
@@ -227,7 +227,7 @@
   CodecSpecificInfo codec_specific_info;
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, 0));
   std::unique_ptr<VideoFrame> decoded_frame;
@@ -566,15 +566,19 @@
         const bool is_first_upper_layer_frame = (sl_idx > 0 && frame_num == 0);
         if (is_first_upper_layer_frame) {
           if (inter_layer_pred == InterLayerPredMode::kOn) {
-            EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
+            EXPECT_EQ(encoded_frame[0]._frameType,
+                      VideoFrameType::kVideoFrameDelta);
           } else {
-            EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
+            EXPECT_EQ(encoded_frame[0]._frameType,
+                      VideoFrameType::kVideoFrameKey);
           }
         } else if (sl_idx == 0 && frame_num == 0) {
-          EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
+          EXPECT_EQ(encoded_frame[0]._frameType,
+                    VideoFrameType::kVideoFrameKey);
         } else {
           for (size_t i = 0; i <= sl_idx; ++i) {
-            EXPECT_EQ(encoded_frame[i]._frameType, kVideoFrameDelta);
+            EXPECT_EQ(encoded_frame[i]._frameType,
+                      VideoFrameType::kVideoFrameDelta);
           }
         }
       }
@@ -623,7 +627,7 @@
 
         for (size_t i = 0; i <= sl_idx; ++i) {
           const bool is_keyframe =
-              encoded_frame[0]._frameType == kVideoFrameKey;
+              encoded_frame[0]._frameType == VideoFrameType::kVideoFrameKey;
           const bool is_first_upper_layer_frame =
               (i == sl_idx && frame_num == 0);
           // Interframe references are there, unless it's a keyframe,
@@ -693,7 +697,7 @@
             encoder_->Encode(*NextInputFrame(), nullptr));
   ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
   ASSERT_EQ(codec_specific_info.size(), 1u);
-  EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
+  EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
   EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1);
   EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
 
@@ -712,7 +716,7 @@
             encoder_->Encode(*NextInputFrame(), nullptr));
   ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
   ASSERT_EQ(codec_specific_info.size(), 2u);
-  EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
+  EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
   EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
   EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
   EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted, true);
@@ -772,7 +776,7 @@
               encoder_->Encode(*NextInputFrame(), nullptr));
     ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
     ASSERT_EQ(codec_specific_info.size(), 1u);
-    EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
+    EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
     EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1 - i % 2);
     EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted,
               true);
@@ -793,7 +797,7 @@
             encoder_->Encode(*NextInputFrame(), nullptr));
   ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
   ASSERT_EQ(codec_specific_info.size(), 2u);
-  EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
+  EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
   EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
   EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
   EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted,
@@ -1442,7 +1446,7 @@
   CodecSpecificInfo codec_specific_info;
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
   // First frame should be a key frame.
-  encoded_frame._frameType = kVideoFrameKey;
+  encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame, false, nullptr, 0));
   std::unique_ptr<VideoFrame> decoded_frame;
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index 8982416..a6c7643 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -727,7 +727,7 @@
 
   // We only support one stream at the moment.
   if (frame_types && !frame_types->empty()) {
-    if ((*frame_types)[0] == kVideoFrameKey) {
+    if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
       force_key_frame_ = true;
     }
   }
@@ -1324,9 +1324,9 @@
   RTC_DCHECK(is_key_frame || !force_key_frame_);
 
   // Check if encoded frame is a key frame.
-  encoded_image_._frameType = kVideoFrameDelta;
+  encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
   if (is_key_frame) {
-    encoded_image_._frameType = kVideoFrameKey;
+    encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
     force_key_frame_ = false;
   }
   RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
@@ -1539,7 +1539,7 @@
   }
   // Always start with a complete key frame.
   if (key_frame_required_) {
-    if (input_image._frameType != kVideoFrameKey)
+    if (input_image._frameType != VideoFrameType::kVideoFrameKey)
       return WEBRTC_VIDEO_CODEC_ERROR;
     // We have a key frame - is it complete?
     if (input_image._completeFrame) {
diff --git a/modules/video_coding/decoding_state.cc b/modules/video_coding/decoding_state.cc
index 1d54063..16266cb 100644
--- a/modules/video_coding/decoding_state.cc
+++ b/modules/video_coding/decoding_state.cc
@@ -100,7 +100,7 @@
     uint16_t frame_index = picture_id_ % kFrameDecodedLength;
     if (in_initial_state_) {
       frame_decoded_cleared_to_ = frame_index;
-    } else if (frame->FrameType() == kVideoFrameKey) {
+    } else if (frame->FrameType() == VideoFrameType::kVideoFrameKey) {
       memset(frame_decoded_, 0, sizeof(frame_decoded_));
       frame_decoded_cleared_to_ = frame_index;
     } else {
@@ -176,7 +176,8 @@
   if (frame->TemporalId() == kNoTemporalIdx ||
       frame->Tl0PicId() == kNoTl0PicIdx) {
     full_sync_ = true;
-  } else if (frame->FrameType() == kVideoFrameKey || frame->LayerSync()) {
+  } else if (frame->FrameType() == VideoFrameType::kVideoFrameKey ||
+             frame->LayerSync()) {
     full_sync_ = true;
   } else if (full_sync_) {
     // Verify that we are still in sync.
@@ -207,7 +208,7 @@
   // A key frame is always considered continuous as it doesn't refer to any
   // frames and therefore won't introduce any errors even if prior frames are
   // missing.
-  if (frame->FrameType() == kVideoFrameKey &&
+  if (frame->FrameType() == VideoFrameType::kVideoFrameKey &&
       HaveSpsAndPps(frame->GetNaluInfos())) {
     return true;
   }
diff --git a/modules/video_coding/decoding_state_unittest.cc b/modules/video_coding/decoding_state_unittest.cc
index 6aeeb64..a578900 100644
--- a/modules/video_coding/decoding_state_unittest.cc
+++ b/modules/video_coding/decoding_state_unittest.cc
@@ -38,7 +38,7 @@
   packet.video_header.is_first_packet_in_frame = true;
   packet.timestamp = 1;
   packet.seqNum = 0xffff;
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.codec = kVideoCodecVP8;
   auto& vp8_header =
       packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
@@ -50,12 +50,12 @@
   // Always start with a key frame.
   dec_state.Reset();
   EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_LE(0, frame_key.InsertPacket(packet, 0, frame_data));
   EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
   dec_state.SetState(&frame);
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   // Use pictureId
   packet.video_header.is_first_packet_in_frame = false;
   vp8_header.pictureId = 0x0002;
@@ -171,7 +171,7 @@
   VCMPacket packet;
   packet.timestamp = 1;
   packet.seqNum = 1;
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   FrameData frame_data;
   frame_data.rtt_ms = 0;
   frame_data.rolling_average_packets_per_frame = -1;
@@ -186,14 +186,14 @@
   // Now insert empty packet belonging to the same frame.
   packet.timestamp = 1;
   packet.seqNum = 2;
-  packet.frameType = kEmptyFrame;
+  packet.frameType = VideoFrameType::kEmptyFrame;
   packet.sizeBytes = 0;
   dec_state.UpdateOldPacket(&packet);
   EXPECT_EQ(dec_state.sequence_num(), 2);
   // Now insert delta packet belonging to the same frame.
   packet.timestamp = 1;
   packet.seqNum = 3;
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.sizeBytes = 1400;
   dec_state.UpdateOldPacket(&packet);
   EXPECT_EQ(dec_state.sequence_num(), 3);
@@ -201,7 +201,7 @@
   // sequence number.
   packet.timestamp = 0;
   packet.seqNum = 4;
-  packet.frameType = kEmptyFrame;
+  packet.frameType = VideoFrameType::kEmptyFrame;
   packet.sizeBytes = 0;
   dec_state.UpdateOldPacket(&packet);
   EXPECT_EQ(dec_state.sequence_num(), 3);
@@ -215,7 +215,7 @@
   // tl0PicIdx 0, temporal id 0.
   VCMFrameBuffer frame;
   VCMPacket packet;
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.codec = kVideoCodecVP8;
   packet.timestamp = 0;
   packet.seqNum = 0;
@@ -266,7 +266,7 @@
   // Insert key frame - should update sync value.
   // A key frame is always a base layer.
   frame.Reset();
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   packet.video_header.is_first_packet_in_frame = true;
   packet.timestamp = 5;
   packet.seqNum = 5;
@@ -280,7 +280,7 @@
   // After sync, a continuous PictureId is required
   // (continuous base layer is not enough )
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.timestamp = 6;
   packet.seqNum = 6;
   vp8_header.tl0PicIdx = 3;
@@ -290,7 +290,7 @@
   EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
   EXPECT_TRUE(dec_state.full_sync());
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.is_first_packet_in_frame = true;
   packet.timestamp = 8;
   packet.seqNum = 8;
@@ -305,7 +305,7 @@
 
   // Insert a non-ref frame - should update sync value.
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.is_first_packet_in_frame = true;
   packet.timestamp = 9;
   packet.seqNum = 9;
@@ -325,7 +325,7 @@
   // Base layer.
   frame.Reset();
   dec_state.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.is_first_packet_in_frame = true;
   packet.markerBit = 1;
   packet.timestamp = 0;
@@ -339,7 +339,7 @@
   EXPECT_TRUE(dec_state.full_sync());
   // Layer 2 - 2 packets (insert one, lose one).
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.is_first_packet_in_frame = true;
   packet.markerBit = 0;
   packet.timestamp = 1;
@@ -352,7 +352,7 @@
   EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
   // Layer 1
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.is_first_packet_in_frame = true;
   packet.markerBit = 1;
   packet.timestamp = 2;
@@ -371,7 +371,7 @@
   VCMFrameBuffer frame;
   VCMPacket packet;
   frame.Reset();
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   packet.video_header.codec = kVideoCodecVP8;
   packet.timestamp = 0;
   packet.seqNum = 0;
@@ -390,7 +390,7 @@
   // Continuous sequence number but discontinuous picture id. This implies a
   // a loss and we have to fall back to only decoding the base layer.
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.timestamp += 3000;
   ++packet.seqNum;
   vp8_header.temporalIdx = 1;
@@ -426,7 +426,7 @@
   VCMDecodingState dec_state;
   VCMFrameBuffer frame;
   VCMPacket packet;
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   packet.video_header.codec = kVideoCodecVP8;
   packet.timestamp = 0;
   packet.seqNum = 0;
@@ -479,7 +479,7 @@
   frame_data.rolling_average_packets_per_frame = -1;
 
   // Key frame as first frame
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
   EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
   dec_state.SetState(&frame);
@@ -493,7 +493,7 @@
 
   // Ref to 11, continuous
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   vp9_hdr.picture_id = 12;
   vp9_hdr.num_ref_pics = 1;
   vp9_hdr.pid_diff[0] = 1;
@@ -523,14 +523,14 @@
   frame_data.rolling_average_packets_per_frame = -1;
 
   // Key frame as first frame
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
   EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
   dec_state.SetState(&frame);
 
   // Ref to 10, continuous
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   vp9_hdr.picture_id = 15;
   vp9_hdr.num_ref_pics = 1;
   vp9_hdr.pid_diff[0] = 5;
@@ -579,23 +579,23 @@
   frame_data.rolling_average_packets_per_frame = -1;
 
   // Key frame as first frame
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
   EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
 
   // Delta frame as first frame
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
   EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
 
   // Key frame then delta frame
   frame.Reset();
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
   dec_state.SetState(&frame);
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   vp9_hdr.num_ref_pics = 1;
   vp9_hdr.picture_id = 15;
   vp9_hdr.pid_diff[0] = 5;
@@ -639,7 +639,7 @@
 
   // Key Frame, continuous
   frame.Reset();
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
   vp9_hdr.num_ref_pics = 0;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
@@ -648,7 +648,7 @@
 
   // Frame at last index, ref to KF, continuous
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
   vp9_hdr.num_ref_pics = 1;
   vp9_hdr.pid_diff[0] = 1;
@@ -684,7 +684,7 @@
 
   // Key frame, continuous
   frame.Reset();
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   vp9_hdr.picture_id = 25;
   vp9_hdr.num_ref_pics = 0;
   EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
@@ -693,7 +693,7 @@
 
   // Ref to KF, continuous
   frame.Reset();
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
   vp9_hdr.picture_id = 26;
   vp9_hdr.num_ref_pics = 1;
   vp9_hdr.pid_diff[0] = 1;
diff --git a/modules/video_coding/encoded_frame.cc b/modules/video_coding/encoded_frame.cc
index 6e8e342..a53f88d 100644
--- a/modules/video_coding/encoded_frame.cc
+++ b/modules/video_coding/encoded_frame.cc
@@ -39,7 +39,7 @@
   SetSpatialIndex(absl::nullopt);
   _renderTimeMs = -1;
   _payloadType = 0;
-  _frameType = kVideoFrameDelta;
+  _frameType = VideoFrameType::kVideoFrameDelta;
   _encodedWidth = 0;
   _encodedHeight = 0;
   _completeFrame = false;
diff --git a/modules/video_coding/fec_controller_default.cc b/modules/video_coding/fec_controller_default.cc
index 4502f2c..38fd076 100644
--- a/modules/video_coding/fec_controller_default.cc
+++ b/modules/video_coding/fec_controller_default.cc
@@ -181,7 +181,8 @@
   const size_t encoded_length = encoded_image_length;
   CritScope lock(&crit_sect_);
   if (encoded_length > 0) {
-    const bool delta_frame = encoded_image_frametype != kVideoFrameKey;
+    const bool delta_frame =
+        encoded_image_frametype != VideoFrameType::kVideoFrameKey;
     if (max_payload_size_ > 0 && encoded_length > 0) {
       const float min_packets_per_frame =
           encoded_length / static_cast<float>(max_payload_size_);
diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc
index 19b822b..2c9aa23 100644
--- a/modules/video_coding/frame_buffer.cc
+++ b/modules/video_coding/frame_buffer.cc
@@ -94,7 +94,7 @@
     // We only take the ntp timestamp of the first packet of a frame.
     ntp_time_ms_ = packet.ntp_time_ms_;
     _codec = packet.codec();
-    if (packet.frameType != kEmptyFrame) {
+    if (packet.frameType != VideoFrameType::kEmptyFrame) {
       // first media packet
       SetState(kStateIncomplete);
     }
diff --git a/modules/video_coding/generic_decoder.cc b/modules/video_coding/generic_decoder.cc
index 6ef4c36..dd23066 100644
--- a/modules/video_coding/generic_decoder.cc
+++ b/modules/video_coding/generic_decoder.cc
@@ -220,7 +220,7 @@
   // Set correctly only for key frames. Thus, use latest key frame
   // content type. If the corresponding key frame was lost, decode will fail
   // and content type will be ignored.
-  if (frame.FrameType() == kVideoFrameKey) {
+  if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
     _frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
     _last_keyframe_content_type = frame.contentType();
   } else {
diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc
index 12c191d..cc27323 100644
--- a/modules/video_coding/jitter_buffer.cc
+++ b/modules/video_coding/jitter_buffer.cc
@@ -45,7 +45,7 @@
 typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
 
 bool IsKeyFrame(FrameListPair pair) {
-  return pair.second->FrameType() == kVideoFrameKey;
+  return pair.second->FrameType() == VideoFrameType::kVideoFrameKey;
 }
 
 bool HasNonEmptyState(FrameListPair pair) {
@@ -83,7 +83,8 @@
     free_frames->push_back(it->second);
     erase(it++);
     ++drop_count;
-    if (it != end() && it->second->FrameType() == kVideoFrameKey) {
+    if (it != end() &&
+        it->second->FrameType() == VideoFrameType::kVideoFrameKey) {
       *key_frame_it = it;
       return drop_count;
     }
@@ -651,7 +652,7 @@
 
   // Empty packets may bias the jitter estimate (lacking size component),
   // therefore don't let empty packet trigger the following updates:
-  if (packet.frameType != kEmptyFrame) {
+  if (packet.frameType != VideoFrameType::kEmptyFrame) {
     if (waiting_for_completion_.timestamp == packet.timestamp) {
       // This can get bad if we have a lot of duplicate packets,
       // we will then count some packet multiple times.
@@ -690,7 +691,7 @@
         frame->IncrementNackCount();
       }
       if (!UpdateNackList(packet.seqNum) &&
-          packet.frameType != kVideoFrameKey) {
+          packet.frameType != VideoFrameType::kVideoFrameKey) {
         buffer_state = kFlushIndicator;
       }
 
@@ -926,9 +927,10 @@
   }
   if (last_decoded_state_.in_initial_state()) {
     VCMFrameBuffer* next_frame = NextFrame();
-    const bool first_frame_is_key = next_frame &&
-                                    next_frame->FrameType() == kVideoFrameKey &&
-                                    next_frame->HaveFirstPacket();
+    const bool first_frame_is_key =
+        next_frame &&
+        next_frame->FrameType() == VideoFrameType::kVideoFrameKey &&
+        next_frame->HaveFirstPacket();
     if (!first_frame_is_key) {
       bool have_non_empty_frame =
           decodable_frames_.end() != find_if(decodable_frames_.begin(),
@@ -1131,7 +1133,7 @@
 void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
   incoming_frame_count_++;
 
-  if (frame.FrameType() == kVideoFrameKey) {
+  if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
     TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
                             "KeyComplete");
   } else {
@@ -1142,7 +1144,7 @@
   // Update receive statistics. We count all layers, thus when you use layers
   // adding all key and delta frames might differ from frame count.
   if (frame.IsSessionComplete()) {
-    if (frame.FrameType() == kVideoFrameKey) {
+    if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
       ++receive_statistics_.key_frames;
       if (receive_statistics_.key_frames == 1) {
         RTC_LOG(LS_INFO) << "Received first complete key frame";
diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc
index 68a4b84..402b8fb 100644
--- a/modules/video_coding/jitter_buffer_unittest.cc
+++ b/modules/video_coding/jitter_buffer_unittest.cc
@@ -47,7 +47,7 @@
     packet_.seqNum = 1234;
     packet_.timestamp = 1;
     packet_.markerBit = true;
-    packet_.frameType = kVideoFrameKey;
+    packet_.frameType = VideoFrameType::kVideoFrameKey;
     packet_.video_header.codec = kVideoCodecVP9;
     packet_.video_header.codec = kVideoCodecVP9;
     vp9_header.flexible_mode = false;
@@ -248,7 +248,8 @@
     video_header.codec = kVideoCodecGeneric;
     video_header.is_first_packet_in_frame = true;
     packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header,
-                                kVideoFrameDelta, /*ntp_time_ms=*/0));
+                                VideoFrameType::kVideoFrameDelta,
+                                /*ntp_time_ms=*/0));
   }
 
   VCMEncodedFrame* DecodeCompleteFrame() {
@@ -364,8 +365,9 @@
 
   VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) {
     stream_generator_->GenerateFrame(
-        frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
-        (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
+        frame_type, (frame_type != VideoFrameType::kEmptyFrame) ? 1 : 0,
+        (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
+        clock_->TimeInMilliseconds());
     VCMFrameBufferEnum ret = InsertPacketAndPop(0);
     clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
     return ret;
@@ -385,7 +387,8 @@
   }
 
   void DropFrame(int num_packets) {
-    stream_generator_->GenerateFrame(kVideoFrameDelta, num_packets, 0,
+    stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta,
+                                     num_packets, 0,
                                      clock_->TimeInMilliseconds());
     for (int i = 0; i < num_packets; ++i)
       stream_generator_->DropLastPacket();
@@ -434,7 +437,7 @@
 
 TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
   // Always start with a complete key frame when not allowing errors.
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->timestamp += 123 * 90;
@@ -445,14 +448,14 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
   metrics::Reset();
   // Always start with a complete key frame when not allowing errors.
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->timestamp += 123 * 90;
@@ -463,7 +466,7 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   // Verify that histograms are updated when the jitter buffer is stopped.
@@ -487,7 +490,7 @@
 }
 
 TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
 
@@ -509,12 +512,12 @@
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
 
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
 
@@ -552,13 +555,13 @@
   frame_out = DecodeCompleteFrame();
 
   CheckOutFrame(frame_out, 100 * size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
   // Always start with a complete key frame.
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
 
@@ -572,7 +575,7 @@
   ++seq_num_;
   packet_->seqNum = seq_num_;
   packet_->markerBit = false;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->timestamp += 33 * 90;
 
   EXPECT_EQ(kIncomplete,
@@ -608,14 +611,14 @@
   frame_out = DecodeCompleteFrame();
 
   CheckOutFrame(frame_out, 100 * size_, false);
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
   // Insert the "first" packet last.
   seq_num_ += 100;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = false;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -655,12 +658,12 @@
 
   CheckOutFrame(frame_out, 100 * size_, false);
 
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
 
@@ -686,7 +689,7 @@
 
   seq_num_ -= 3;
   timestamp_ -= 33 * 90;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
@@ -710,17 +713,17 @@
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
 
@@ -734,7 +737,7 @@
 
   // Now send in a complete delta frame (Frame C), but with a sequence number
   // gap. No pic index either, so no temporal scalability cheating :)
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   // Leave a gap of 2 sequence numbers and two frames.
   packet_->seqNum = seq_num_ + 3;
   packet_->timestamp = timestamp_ + (66 * 90);
@@ -784,7 +787,7 @@
 }
 
 TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
@@ -820,14 +823,14 @@
   ASSERT_TRUE(frame_out != NULL);
   CheckOutFrame(frame_out, 2 * size_, false);
 
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   EXPECT_EQ(3, jitter_buffer_->num_packets());
   EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -843,14 +846,14 @@
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   ASSERT_TRUE(frame_out != NULL);
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   // Insert 3 delta frames.
   for (uint16_t i = 1; i <= 3; ++i) {
     packet_->seqNum = seq_num_ + i;
     packet_->timestamp = timestamp_ + (i * 33) * 90;
-    packet_->frameType = kVideoFrameDelta;
+    packet_->frameType = VideoFrameType::kVideoFrameDelta;
     EXPECT_EQ(kCompleteSession,
               jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
@@ -872,7 +875,7 @@
     frame_out = DecodeCompleteFrame();
     ASSERT_TRUE(frame_out != NULL);
     CheckOutFrame(frame_out, size_, false);
-    EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+    EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
     jitter_buffer_->ReleaseFrame(frame_out);
   }
 }
@@ -902,7 +905,7 @@
 
   packet_->seqNum = 65485;
   packet_->timestamp = 1000;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   vp9_header.picture_id = 5;
   vp9_header.tl0_pic_idx = 200;
   vp9_header.temporal_idx = 0;
@@ -914,7 +917,7 @@
   // Insert next temporal layer 0.
   packet_->seqNum = 65489;
   packet_->timestamp = 13000;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   vp9_header.picture_id = 9;
   vp9_header.tl0_pic_idx = 201;
   vp9_header.temporal_idx = 0;
@@ -923,12 +926,12 @@
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(1000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
   EXPECT_EQ(13000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
@@ -957,7 +960,7 @@
 
   packet_->seqNum = 65486;
   packet_->timestamp = 6000;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   vp9_header.picture_id = 6;
   vp9_header.temporal_idx = 2;
   vp9_header.temporal_up_switch = true;
@@ -965,7 +968,7 @@
 
   packet_->seqNum = 65487;
   packet_->timestamp = 9000;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   vp9_header.picture_id = 7;
   vp9_header.temporal_idx = 1;
   vp9_header.temporal_up_switch = true;
@@ -974,7 +977,7 @@
   // Insert first frame with SS data.
   packet_->seqNum = 65485;
   packet_->timestamp = 3000;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.width = 352;
   packet_->video_header.height = 288;
   vp9_header.picture_id = 5;
@@ -987,7 +990,7 @@
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(3000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_FALSE(
       frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@@ -995,14 +998,14 @@
 
   frame_out = DecodeCompleteFrame();
   EXPECT_EQ(6000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
   EXPECT_EQ(9000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
   jitter_buffer_->ReleaseFrame(frame_out);
@@ -1034,7 +1037,7 @@
   packet_->markerBit = false;
   packet_->seqNum = 65486;
   packet_->timestamp = 6000;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   vp9_header.spatial_idx = 0;
   vp9_header.picture_id = 6;
   vp9_header.temporal_idx = 1;
@@ -1044,7 +1047,7 @@
   packet_->video_header.is_first_packet_in_frame = false;
   packet_->markerBit = true;
   packet_->seqNum = 65487;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   vp9_header.spatial_idx = 1;
   vp9_header.picture_id = 6;
   vp9_header.temporal_idx = 1;
@@ -1055,7 +1058,7 @@
   packet_->markerBit = true;
   packet_->seqNum = 65485;
   packet_->timestamp = 3000;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   vp9_header.spatial_idx = 1;
   vp9_header.picture_id = 5;
   vp9_header.temporal_idx = 0;
@@ -1066,7 +1069,7 @@
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = 65484;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.width = 352;
   packet_->video_header.height = 288;
   vp9_header.spatial_idx = 0;
@@ -1080,7 +1083,7 @@
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(3000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_FALSE(
       frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@@ -1088,14 +1091,14 @@
 
   frame_out = DecodeCompleteFrame();
   EXPECT_EQ(6000U, frame_out->Timestamp());
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
@@ -1121,7 +1124,7 @@
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
@@ -1129,7 +1132,7 @@
   auto& h264_header =
       packet_->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
   packet_->timestamp = timestamp_;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->video_header.codec = kVideoCodecH264;
@@ -1148,7 +1151,7 @@
   packet_->timestamp = timestamp_;
   ++seq_num_;
   packet_->seqNum = seq_num_;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->video_header.codec = kVideoCodecH264;
@@ -1166,7 +1169,7 @@
 
   ++seq_num_;
   packet_->seqNum = seq_num_;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = false;
   packet_->markerBit = true;
   packet_->video_header.codec = kVideoCodecH264;
@@ -1187,7 +1190,7 @@
   packet_->timestamp = timestamp_;
   ++seq_num_;
   packet_->seqNum = seq_num_;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->video_header.codec = kVideoCodecH264;
@@ -1206,7 +1209,7 @@
 
 TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
   seq_num_ = 0xfff0;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
@@ -1249,14 +1252,14 @@
 
   CheckOutFrame(frame_out, 100 * size_, false);
 
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
 TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
   // Insert "first" packet last seqnum.
   seq_num_ = 10;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = false;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1298,7 +1301,7 @@
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 100 * size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
@@ -1309,7 +1312,7 @@
   //  t = 3000     t = 2000
   seq_num_ = 2;
   timestamp_ = 3000;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->timestamp = timestamp_;
@@ -1322,12 +1325,12 @@
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(3000u, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   seq_num_--;
   timestamp_ = 2000;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1344,7 +1347,7 @@
 
   seq_num_ = 2;
   timestamp_ = 3000;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1359,13 +1362,13 @@
 
   CheckOutFrame(frame_out, size_, false);
 
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
 
   jitter_buffer_->ReleaseFrame(frame_out);
 
   seq_num_--;
   timestamp_ = 0xffffff00;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1382,7 +1385,7 @@
   //  t = 0xffffff00        t = 33*90
 
   timestamp_ = 0xffffff00;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
@@ -1409,7 +1412,7 @@
 
   seq_num_++;
   timestamp_ += 33 * 90;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
@@ -1431,7 +1434,7 @@
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
-  EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
@@ -1442,7 +1445,7 @@
   // t = 0xffffff00    t = 2700
 
   timestamp_ = 0xffffff00;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->timestamp = timestamp_;
@@ -1455,7 +1458,7 @@
   // Insert next frame.
   seq_num_++;
   timestamp_ = 2700;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1467,13 +1470,13 @@
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(0xffffff00, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
   EXPECT_EQ(2700u, frame_out2->Timestamp());
   CheckOutFrame(frame_out2, size_, false);
-  EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out2);
 }
 
@@ -1485,7 +1488,7 @@
 
   seq_num_ = 2;
   timestamp_ = 2700;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1498,7 +1501,7 @@
   // Insert second frame
   seq_num_--;
   timestamp_ = 0xffffff00;
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
@@ -1510,13 +1513,13 @@
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(0xffffff00, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
   EXPECT_EQ(2700u, frame_out2->Timestamp());
   CheckOutFrame(frame_out2, size_, false);
-  EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out2);
 }
 
@@ -1584,7 +1587,7 @@
 
     if (loop == 50) {
       first_key_frame_timestamp = packet_->timestamp;
-      packet_->frameType = kVideoFrameKey;
+      packet_->frameType = VideoFrameType::kVideoFrameKey;
     }
 
     // Insert frame.
@@ -1611,7 +1614,7 @@
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
-  EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
 
@@ -1628,7 +1631,7 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
     packet_->timestamp = timestamp_;
-    packet_->frameType = kEmptyFrame;
+    packet_->frameType = VideoFrameType::kEmptyFrame;
 
     EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   }
@@ -1639,7 +1642,7 @@
   // received the marker bit, unless we have received a packet from a later
   // timestamp.
   // Start with a complete key frame - insert and decode.
-  packet_->frameType = kVideoFrameKey;
+  packet_->frameType = VideoFrameType::kVideoFrameKey;
   packet_->video_header.is_first_packet_in_frame = true;
   packet_->markerBit = true;
   bool retransmitted = false;
@@ -1652,7 +1655,7 @@
 
   packet_->seqNum += 2;
   packet_->timestamp += 33 * 90;
-  packet_->frameType = kVideoFrameDelta;
+  packet_->frameType = VideoFrameType::kVideoFrameDelta;
   packet_->video_header.is_first_packet_in_frame = false;
   packet_->markerBit = false;
 
@@ -1673,23 +1676,24 @@
   jitter_buffer_->SetNackMode(kNack, -1, -1);
   jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
   // Insert a key frame and decode it.
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   EXPECT_TRUE(DecodeCompleteFrame());
   DropFrame(1);
   // Fill the jitter buffer.
-  EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kVideoFrameDelta), kNoError);
+  EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kVideoFrameDelta),
+            kNoError);
   // Make sure we can't decode these frames.
   EXPECT_FALSE(DecodeCompleteFrame());
   // This frame will make the jitter buffer recycle frames until a key frame.
   // Since none is found it will have to wait until the next key frame before
   // decoding.
-  EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
+  EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
   EXPECT_FALSE(DecodeCompleteFrame());
 }
 
 TEST_F(TestRunningJitterBuffer, EmptyPackets) {
   // Make sure a frame can get complete even though empty packets are missing.
-  stream_generator_->GenerateFrame(kVideoFrameKey, 3, 3,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 3,
                                    clock_->TimeInMilliseconds());
   bool request_key_frame = false;
   // Insert empty packet.
@@ -1719,11 +1723,11 @@
   EXPECT_EQ(0u, bitrate);
 
   // Insert a couple of key and delta frames.
-  InsertFrame(kVideoFrameKey);
-  InsertFrame(kVideoFrameDelta);
-  InsertFrame(kVideoFrameDelta);
-  InsertFrame(kVideoFrameKey);
-  InsertFrame(kVideoFrameDelta);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameDelta);
+  InsertFrame(VideoFrameType::kVideoFrameDelta);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameDelta);
   // Decode some of them to make sure the statistics doesn't depend on frames
   // being decoded.
   EXPECT_TRUE(DecodeCompleteFrame());
@@ -1735,7 +1739,7 @@
   // Insert 20 more frames to get estimates of bitrate and framerate over
   // 1 second.
   for (int i = 0; i < 20; ++i) {
-    InsertFrame(kVideoFrameDelta);
+    InsertFrame(VideoFrameType::kVideoFrameDelta);
   }
   jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
   // TODO(holmer): The current implementation returns the average of the last
@@ -1746,7 +1750,7 @@
   // Insert 25 more frames to get estimates of bitrate and framerate over
   // 2 seconds.
   for (int i = 0; i < 25; ++i) {
-    InsertFrame(kVideoFrameDelta);
+    InsertFrame(VideoFrameType::kVideoFrameDelta);
   }
   jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
   EXPECT_EQ(kDefaultFrameRate, framerate);
@@ -1755,45 +1759,48 @@
 
 TEST_F(TestRunningJitterBuffer, SkipToKeyFrame) {
   // Insert delta frames.
-  EXPECT_GE(InsertFrames(5, kVideoFrameDelta), kNoError);
+  EXPECT_GE(InsertFrames(5, VideoFrameType::kVideoFrameDelta), kNoError);
   // Can't decode without a key frame.
   EXPECT_FALSE(DecodeCompleteFrame());
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   // Skip to the next key frame.
   EXPECT_TRUE(DecodeCompleteFrame());
 }
 
 TEST_F(TestRunningJitterBuffer, DontSkipToKeyFrameIfDecodable) {
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
   const int kNumDeltaFrames = 5;
-  EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
-  InsertFrame(kVideoFrameKey);
+  EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+            kNoError);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   for (int i = 0; i < kNumDeltaFrames + 1; ++i) {
     EXPECT_TRUE(DecodeCompleteFrame());
   }
 }
 
 TEST_F(TestRunningJitterBuffer, KeyDeltaKeyDelta) {
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
   const int kNumDeltaFrames = 5;
-  EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
-  InsertFrame(kVideoFrameKey);
-  EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
-  InsertFrame(kVideoFrameKey);
+  EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+            kNoError);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
+  EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+            kNoError);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   for (int i = 0; i < 2 * (kNumDeltaFrames + 1); ++i) {
     EXPECT_TRUE(DecodeCompleteFrame());
   }
 }
 
 TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
-  stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
                                    clock_->TimeInMilliseconds());
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
-  stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
                                    clock_->TimeInMilliseconds());
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
   EXPECT_EQ(kCompleteSession, InsertPacketAndPop(1));
@@ -1806,22 +1813,23 @@
 TEST_F(TestJitterBufferNack, EmptyPackets) {
   // Make sure empty packets doesn't clog the jitter buffer.
   jitter_buffer_->SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
-  EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kEmptyFrame), kNoError);
-  InsertFrame(kVideoFrameKey);
+  EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kEmptyFrame),
+            kNoError);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
 }
 
 TEST_F(TestJitterBufferNack, NackTooOldPackets) {
   // Insert a key frame and decode it.
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   EXPECT_TRUE(DecodeCompleteFrame());
 
   // Drop one frame and insert |kNackHistoryLength| to trigger NACKing a too
   // old packet.
   DropFrame(1);
   // Insert a frame which should trigger a recycle until the next key frame.
-  EXPECT_EQ(kFlushIndicator,
-            InsertFrames(oldest_packet_to_nack_ + 1, kVideoFrameDelta));
+  EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
+                                          VideoFrameType::kVideoFrameDelta));
   EXPECT_FALSE(DecodeCompleteFrame());
 
   bool request_key_frame = false;
@@ -1831,25 +1839,27 @@
   EXPECT_FALSE(request_key_frame);
   EXPECT_EQ(0u, nack_list.size());
 
-  EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
   // Waiting for a key frame.
   EXPECT_FALSE(DecodeCompleteFrame());
 
   // The next complete continuous frame isn't a key frame, but we're waiting
   // for one.
   EXPECT_FALSE(DecodeCompleteFrame());
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   // Skipping ahead to the key frame.
   EXPECT_TRUE(DecodeCompleteFrame());
 }
 
 TEST_F(TestJitterBufferNack, NackLargeJitterBuffer) {
   // Insert a key frame and decode it.
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   EXPECT_TRUE(DecodeCompleteFrame());
 
   // Insert a frame which should trigger a recycle until the next key frame.
-  EXPECT_GE(InsertFrames(oldest_packet_to_nack_, kVideoFrameDelta), kNoError);
+  EXPECT_GE(
+      InsertFrames(oldest_packet_to_nack_, VideoFrameType::kVideoFrameDelta),
+      kNoError);
 
   bool request_key_frame = false;
   std::vector<uint16_t> nack_list =
@@ -1864,13 +1874,13 @@
 
 TEST_F(TestJitterBufferNack, NackListFull) {
   // Insert a key frame and decode it.
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   EXPECT_TRUE(DecodeCompleteFrame());
 
   // Generate and drop |kNackHistoryLength| packets to fill the NACK list.
   DropFrame(max_nack_list_size_ + 1);
   // Insert a frame which should trigger a recycle until the next key frame.
-  EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
+  EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
   EXPECT_FALSE(DecodeCompleteFrame());
 
   bool request_key_frame = false;
@@ -1879,7 +1889,7 @@
   // packet.
   EXPECT_FALSE(request_key_frame);
 
-  EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
   // Now we have a packet in the jitter buffer, a key frame will be requested
   // since it's not a key frame.
   jitter_buffer_->GetNackList(&request_key_frame);
@@ -1889,7 +1899,7 @@
   // The next complete continuous frame isn't a key frame, but we're waiting
   // for one.
   EXPECT_FALSE(DecodeCompleteFrame());
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   // Skipping ahead to the key frame.
   EXPECT_TRUE(DecodeCompleteFrame());
 }
@@ -1897,7 +1907,7 @@
 TEST_F(TestJitterBufferNack, NoNackListReturnedBeforeFirstDecode) {
   DropFrame(10);
   // Insert a frame and try to generate a NACK list. Shouldn't get one.
-  EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
   bool request_key_frame = false;
   std::vector<uint16_t> nack_list =
       jitter_buffer_->GetNackList(&request_key_frame);
@@ -1908,8 +1918,8 @@
 
 TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
-  InsertFrame(kVideoFrameKey);
-  stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
+  InsertFrame(VideoFrameType::kVideoFrameKey);
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
                                    clock_->TimeInMilliseconds());
   stream_generator_->NextPacket(NULL);  // Drop packet.
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
@@ -1921,7 +1931,7 @@
 
 TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
-  stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
                                    clock_->TimeInMilliseconds());
   VCMPacket packet;
   stream_generator_->PopPacket(&packet, 0);
@@ -1948,7 +1958,7 @@
 
 TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
-  stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
                                    clock_->TimeInMilliseconds());
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   // Drop second packet.
@@ -1968,14 +1978,14 @@
   VCMPacket packet;
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
   // First frame is delta.
-  stream_generator_->GenerateFrame(kVideoFrameDelta, 3, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 3, 0,
                                    clock_->TimeInMilliseconds());
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   // Drop second packet in frame.
   ASSERT_TRUE(stream_generator_->PopPacket(&packet, 0));
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   // Second frame is key.
-  stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
                                    clock_->TimeInMilliseconds() + 10);
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   // Drop second packet in frame.
@@ -1993,13 +2003,13 @@
 TEST_F(TestJitterBufferNack, NormalOperation) {
   EXPECT_EQ(kNack, jitter_buffer_->nack_mode());
 
-  EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
   EXPECT_TRUE(DecodeCompleteFrame());
 
   //  ----------------------------------------------------------------
   // | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
   //  ----------------------------------------------------------------
-  stream_generator_->GenerateFrame(kVideoFrameKey, 100, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 100, 0,
                                    clock_->TimeInMilliseconds());
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
@@ -2032,10 +2042,10 @@
   // | 65532 | | 65533 | 65534 | 65535 | x | 1 | .. | 9 | x | 11 |.....| 96 |
   //  -------   ------------------------------------------------------------
   stream_generator_->Init(65532, clock_->TimeInMilliseconds());
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_FALSE(request_key_frame);
   EXPECT_TRUE(DecodeCompleteFrame());
-  stream_generator_->GenerateFrame(kVideoFrameDelta, 100, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 100, 0,
                                    clock_->TimeInMilliseconds());
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   while (stream_generator_->PacketsRemaining() > 1) {
@@ -2066,10 +2076,10 @@
   // | 65532 | 65533 | 65534 | x | 0 | 1 |
   //  -----------------------------------
   stream_generator_->Init(65532, clock_->TimeInMilliseconds());
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_FALSE(request_key_frame);
   EXPECT_TRUE(DecodeCompleteFrame());
-  stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
+  stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
                                    clock_->TimeInMilliseconds());
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   for (int i = 0; i < 5; ++i) {
@@ -2079,7 +2089,7 @@
     } else {
       stream_generator_->NextPacket(NULL);  // Drop packet
     }
-    stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
+    stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
                                      clock_->TimeInMilliseconds());
     clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   }
@@ -2094,7 +2104,7 @@
 
 TEST_F(TestJitterBufferNack, ResetByFutureKeyFrameDoesntError) {
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
   bool extended = false;
   std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
@@ -2105,14 +2115,14 @@
   // a keyframe, even if all of the nack list needs to be flushed.
   stream_generator_->Init(10000, clock_->TimeInMilliseconds());
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
-  InsertFrame(kVideoFrameKey);
+  InsertFrame(VideoFrameType::kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
   nack_list = jitter_buffer_->GetNackList(&extended);
   EXPECT_EQ(0u, nack_list.size());
 
   // Stream should be decodable from this point.
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
-  InsertFrame(kVideoFrameDelta);
+  InsertFrame(VideoFrameType::kVideoFrameDelta);
   EXPECT_TRUE(DecodeCompleteFrame());
   nack_list = jitter_buffer_->GetNackList(&extended);
   EXPECT_EQ(0u, nack_list.size());
diff --git a/modules/video_coding/packet.cc b/modules/video_coding/packet.cc
index 5b826e8..5dd01e8 100644
--- a/modules/video_coding/packet.cc
+++ b/modules/video_coding/packet.cc
@@ -23,7 +23,7 @@
       sizeBytes(0),
       markerBit(false),
       timesNacked(-1),
-      frameType(kEmptyFrame),
+      frameType(VideoFrameType::kEmptyFrame),
       completeNALU(kNaluUnset),
       insertStartCode(false),
       video_header(),
diff --git a/modules/video_coding/packet.h b/modules/video_coding/packet.h
index cd4b330..c0d6a6c 100644
--- a/modules/video_coding/packet.h
+++ b/modules/video_coding/packet.h
@@ -16,7 +16,7 @@
 
 #include "absl/types/optional.h"
 #include "api/rtp_headers.h"
-#include "common_types.h"  // NOLINT(build/include)
+#include "api/video/video_frame_type.h"
 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
 #include "modules/rtp_rtcp/source/rtp_video_header.h"
 
diff --git a/modules/video_coding/packet_buffer.cc b/modules/video_coding/packet_buffer.cc
index 11cc06e..804ce4a 100644
--- a/modules/video_coding/packet_buffer.cc
+++ b/modules/video_coding/packet_buffer.cc
@@ -126,7 +126,7 @@
 
     int64_t now_ms = clock_->TimeInMilliseconds();
     last_received_packet_ms_ = now_ms;
-    if (packet->frameType == kVideoFrameKey)
+    if (packet->frameType == VideoFrameType::kVideoFrameKey)
       last_received_keyframe_packet_ms_ = now_ms;
 
     found_frames = FindFrames(seq_num);
@@ -378,9 +378,11 @@
         const size_t first_packet_index = start_seq_num % size_;
         RTC_CHECK_LT(first_packet_index, size_);
         if (is_h264_keyframe) {
-          data_buffer_[first_packet_index].frameType = kVideoFrameKey;
+          data_buffer_[first_packet_index].frameType =
+              VideoFrameType::kVideoFrameKey;
         } else {
-          data_buffer_[first_packet_index].frameType = kVideoFrameDelta;
+          data_buffer_[first_packet_index].frameType =
+              VideoFrameType::kVideoFrameDelta;
         }
 
         // If this is not a keyframe, make sure there are no gaps in the
diff --git a/modules/video_coding/receiver_unittest.cc b/modules/video_coding/receiver_unittest.cc
index 29bb209..083c13c 100644
--- a/modules/video_coding/receiver_unittest.cc
+++ b/modules/video_coding/receiver_unittest.cc
@@ -59,8 +59,10 @@
   int32_t InsertFrame(VideoFrameType frame_type, bool complete) {
     int num_of_packets = complete ? 1 : 2;
     stream_generator_->GenerateFrame(
-        frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
-        (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
+        frame_type,
+        (frame_type != VideoFrameType::kEmptyFrame) ? num_of_packets : 0,
+        (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
+        clock_->TimeInMilliseconds());
     int32_t ret = InsertPacketAndPop(0);
     if (!complete) {
       // Drop the second packet.
@@ -94,7 +96,7 @@
   const int kMinDelayMs = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
                             kMaxNonDecodableDuration);
-  EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs);
   EXPECT_TRUE(DecodeNextFrame());
@@ -113,7 +115,7 @@
                             kMaxNonDecodableDuration);
   const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
   for (int i = 0; i < kNumFrames; ++i) {
-    EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+    EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
   }
   bool request_key_frame = false;
   std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
@@ -133,12 +135,12 @@
                             kMaxNonDecodableDuration);
   timing_.set_min_playout_delay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
-  EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
   // Insert an incomplete frame.
-  EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
   // Insert enough frames to have too long non-decodable sequence.
   for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
-    EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+    EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
   }
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
@@ -163,13 +165,13 @@
                             kMaxNonDecodableDuration);
   timing_.set_min_playout_delay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
-  EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
   // Insert an incomplete frame.
-  EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
   // Insert all but one frame to not trigger a key frame request due to
   // too long duration of non-decodable frames.
   for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
-    EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+    EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
   }
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
@@ -195,14 +197,14 @@
                             kMaxNonDecodableDuration);
   timing_.set_min_playout_delay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
-  EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
   // Insert enough frames to have too long non-decodable sequence, except that
   // we don't have any losses.
   for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
-    EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+    EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
   }
   // Insert an incomplete frame.
-  EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
                                   key_frame_inserted);
@@ -227,14 +229,14 @@
                             kMaxNonDecodableDuration);
   timing_.set_min_playout_delay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
-  EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
   // Insert an incomplete frame.
-  EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
   // Insert enough frames to have too long non-decodable sequence.
   for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
-    EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+    EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
   }
-  EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+  EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
                                   key_frame_inserted);
diff --git a/modules/video_coding/rtp_frame_reference_finder.cc b/modules/video_coding/rtp_frame_reference_finder.cc
index 6d4860d..8191017 100644
--- a/modules/video_coding/rtp_frame_reference_finder.cc
+++ b/modules/video_coding/rtp_frame_reference_finder.cc
@@ -195,12 +195,13 @@
   // otherwise we use sequence number.
   if (picture_id != kNoPictureId) {
     frame->id.picture_id = unwrapper_.Unwrap(picture_id);
-    frame->num_references = frame->frame_type() == kVideoFrameKey ? 0 : 1;
+    frame->num_references =
+        frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1;
     frame->references[0] = frame->id.picture_id - 1;
     return kHandOff;
   }
 
-  if (frame->frame_type() == kVideoFrameKey) {
+  if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
     last_seq_num_gop_.insert(std::make_pair(
         frame->last_seq_num(),
         std::make_pair(frame->last_seq_num(), frame->last_seq_num())));
@@ -234,7 +235,7 @@
   // this frame.
   uint16_t last_picture_id_gop = seq_num_it->second.first;
   uint16_t last_picture_id_with_padding_gop = seq_num_it->second.second;
-  if (frame->frame_type() == kVideoFrameDelta) {
+  if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) {
     uint16_t prev_seq_num = frame->first_seq_num() - 1;
 
     if (prev_seq_num != last_picture_id_with_padding_gop)
@@ -246,7 +247,8 @@
   // Since keyframes can cause reordering we can't simply assign the
   // picture id according to some incrementing counter.
   frame->id.picture_id = frame->last_seq_num();
-  frame->num_references = frame->frame_type() == kVideoFrameDelta;
+  frame->num_references =
+      frame->frame_type() == VideoFrameType::kVideoFrameDelta;
   frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop);
   if (AheadOf<uint16_t>(frame->id.picture_id, last_picture_id_gop)) {
     seq_num_it->second.first = frame->id.picture_id;
@@ -306,7 +308,7 @@
   not_yet_received_frames_.erase(not_yet_received_frames_.begin(),
                                  clean_frames_to);
 
-  if (frame->frame_type() == kVideoFrameKey) {
+  if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
     frame->num_references = 0;
     layer_info_[unwrapped_tl0].fill(-1);
     UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
@@ -483,13 +485,13 @@
 
     info = &gof_info_it->second;
 
-    if (frame->frame_type() == kVideoFrameKey) {
+    if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
       frame->num_references = 0;
       FrameReceivedVp9(frame->id.picture_id, info);
       UnwrapPictureIds(frame);
       return kHandOff;
     }
-  } else if (frame->frame_type() == kVideoFrameKey) {
+  } else if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
     if (frame->id.spatial_layer == 0) {
       RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure";
       return kDrop;
@@ -500,7 +502,7 @@
 
     info = &gof_info_it->second;
 
-    if (frame->frame_type() == kVideoFrameKey) {
+    if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
       frame->num_references = 0;
       FrameReceivedVp9(frame->id.picture_id, info);
       UnwrapPictureIds(frame);
diff --git a/modules/video_coding/rtp_frame_reference_finder_unittest.cc b/modules/video_coding/rtp_frame_reference_finder_unittest.cc
index 5d8f6f1..520cb88 100644
--- a/modules/video_coding/rtp_frame_reference_finder_unittest.cc
+++ b/modules/video_coding/rtp_frame_reference_finder_unittest.cc
@@ -82,7 +82,8 @@
     VCMPacket packet;
     packet.video_header.codec = kVideoCodecGeneric;
     packet.seqNum = seq_num_start;
-    packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
+    packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
+                                : VideoFrameType::kVideoFrameDelta;
     ref_packet_buffer_->InsertPacket(&packet);
 
     packet.seqNum = seq_num_end;
@@ -106,7 +107,8 @@
     packet.seqNum = seq_num_start;
     packet.video_header.is_last_packet_in_frame =
         (seq_num_start == seq_num_end);
-    packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
+    packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
+                                : VideoFrameType::kVideoFrameDelta;
     auto& vp8_header =
         packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
     vp8_header.pictureId = pid % (1 << 15);
@@ -144,7 +146,8 @@
     packet.seqNum = seq_num_start;
     packet.video_header.is_last_packet_in_frame =
         (seq_num_start == seq_num_end);
-    packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
+    packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
+                                : VideoFrameType::kVideoFrameDelta;
     vp9_header.flexible_mode = false;
     vp9_header.picture_id = pid % (1 << 15);
     vp9_header.temporal_idx = tid;
@@ -186,7 +189,8 @@
     packet.seqNum = seq_num_start;
     packet.video_header.is_last_packet_in_frame =
         (seq_num_start == seq_num_end);
-    packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
+    packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
+                                : VideoFrameType::kVideoFrameDelta;
     vp9_header.inter_layer_predicted = inter;
     vp9_header.flexible_mode = true;
     vp9_header.picture_id = pid % (1 << 15);
diff --git a/modules/video_coding/session_info.cc b/modules/video_coding/session_info.cc
index cba0409..c89f40f 100644
--- a/modules/video_coding/session_info.cc
+++ b/modules/video_coding/session_info.cc
@@ -36,7 +36,7 @@
 
 VCMSessionInfo::VCMSessionInfo()
     : complete_(false),
-      frame_type_(kVideoFrameDelta),
+      frame_type_(VideoFrameType::kVideoFrameDelta),
       packets_(),
       empty_seq_num_low_(-1),
       empty_seq_num_high_(-1),
@@ -172,7 +172,7 @@
 
 void VCMSessionInfo::Reset() {
   complete_ = false;
-  frame_type_ = kVideoFrameDelta;
+  frame_type_ = VideoFrameType::kVideoFrameDelta;
   packets_.clear();
   empty_seq_num_low_ = -1;
   empty_seq_num_high_ = -1;
@@ -426,7 +426,7 @@
 int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
                                  uint8_t* frame_buffer,
                                  const FrameData& frame_data) {
-  if (packet.frameType == kEmptyFrame) {
+  if (packet.frameType == VideoFrameType::kEmptyFrame) {
     // Update sequence number of an empty packet.
     // Only media packets are inserted into the packet list.
     InformOfEmptyPacket(packet.seqNum);
@@ -479,7 +479,8 @@
           << "Received packet with a sequence number which is out "
              "of frame boundaries";
       return -3;
-    } else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
+    } else if (frame_type_ == VideoFrameType::kEmptyFrame &&
+               packet.frameType != VideoFrameType::kEmptyFrame) {
       // Update the frame type with the type of the first media packet.
       // TODO(mikhal): Can this trigger?
       frame_type_ = packet.frameType;
diff --git a/modules/video_coding/session_info_unittest.cc b/modules/video_coding/session_info_unittest.cc
index 54173ef..282834b 100644
--- a/modules/video_coding/session_info_unittest.cc
+++ b/modules/video_coding/session_info_unittest.cc
@@ -23,7 +23,7 @@
     memset(packet_buffer_, 0, sizeof(packet_buffer_));
     memset(frame_buffer_, 0, sizeof(frame_buffer_));
     session_.Reset();
-    packet_.frameType = kVideoFrameDelta;
+    packet_.frameType = VideoFrameType::kVideoFrameDelta;
     packet_.sizeBytes = packet_buffer_size();
     packet_.dataPtr = packet_buffer_;
     packet_.seqNum = 0;
@@ -116,12 +116,12 @@
   packet_.video_header.is_first_packet_in_frame = true;
   packet_.seqNum = 0xFFFE;
   packet_.sizeBytes = packet_buffer_size();
-  packet_.frameType = kVideoFrameKey;
+  packet_.frameType = VideoFrameType::kVideoFrameKey;
   FillPacket(0);
   EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
                                       packet_, frame_buffer_, frame_data)));
   EXPECT_FALSE(session_.HaveLastPacket());
-  EXPECT_EQ(kVideoFrameKey, session_.FrameType());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey, session_.FrameType());
 
   packet_.video_header.is_first_packet_in_frame = false;
   packet_.markerBit = true;
@@ -138,7 +138,7 @@
   packet_.markerBit = true;
   packet_.seqNum = 2;
   packet_.sizeBytes = 0;
-  packet_.frameType = kEmptyFrame;
+  packet_.frameType = VideoFrameType::kEmptyFrame;
   EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
   EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
 }
@@ -309,7 +309,7 @@
 TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
   packet_.video_header.is_first_packet_in_frame = false;
   packet_.completeNALU = kNaluComplete;
-  packet_.frameType = kEmptyFrame;
+  packet_.frameType = VideoFrameType::kEmptyFrame;
   packet_.sizeBytes = 0;
   packet_.seqNum = 0;
   packet_.markerBit = false;
diff --git a/modules/video_coding/test/stream_generator.cc b/modules/video_coding/test/stream_generator.cc
index 022edb6..8858812 100644
--- a/modules/video_coding/test/stream_generator.cc
+++ b/modules/video_coding/test/stream_generator.cc
@@ -44,7 +44,7 @@
   }
   for (int i = 0; i < num_empty_packets; ++i) {
     packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
-                                      false, kEmptyFrame));
+                                      false, VideoFrameType::kEmptyFrame));
     ++sequence_number_;
   }
 }
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index edef45d..8eb2daa 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -76,11 +76,11 @@
     bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
     // Only store the base layer.
     if (encoded_image.SpatialIndex().value_or(0) == 0) {
-      if (encoded_image._frameType == kVideoFrameKey) {
+      if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
         // TODO(nisse): Why not size() ?
         encoded_key_frame_.Allocate(encoded_image.capacity());
         encoded_key_frame_.set_size(encoded_image.size());
-        encoded_key_frame_._frameType = kVideoFrameKey;
+        encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
         encoded_key_frame_._completeFrame = encoded_image._completeFrame;
         memcpy(encoded_key_frame_.data(), encoded_image.data(),
                encoded_image.size());
@@ -295,17 +295,17 @@
 void SimulcastTestFixtureImpl::RunActiveStreamsTest(
     const std::vector<bool> active_streams) {
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
+                                          VideoFrameType::kVideoFrameDelta);
   UpdateActiveStreams(active_streams);
   // Set sufficient bitrate for all streams so we can test active without
   // bitrate being an issue.
   SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
 
-  ExpectStreams(kVideoFrameKey, active_streams);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, active_streams);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -397,33 +397,36 @@
 void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  frame_types[0] = kVideoFrameKey;
-  ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+  frame_types[0] = VideoFrameType::kVideoFrameKey;
+  ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
-  frame_types[1] = kVideoFrameKey;
-  ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+  std::fill(frame_types.begin(), frame_types.end(),
+            VideoFrameType::kVideoFrameDelta);
+  frame_types[1] = VideoFrameType::kVideoFrameKey;
+  ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
-  frame_types[2] = kVideoFrameKey;
-  ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+  std::fill(frame_types.begin(), frame_types.end(),
+            VideoFrameType::kVideoFrameDelta);
+  frame_types[2] = VideoFrameType::kVideoFrameKey;
+  ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
-  ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
+  std::fill(frame_types.begin(), frame_types.end(),
+            VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -432,11 +435,11 @@
   // We should always encode the base layer.
   SetRates(kMinBitrates[0] - 1, 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 1);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 1);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -445,11 +448,11 @@
   // We have just enough to get only the first stream and padding for two.
   SetRates(kMinBitrates[0], 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 1);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 1);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -459,11 +462,11 @@
   // the first stream maxed out (at |maxBitrate|), and padding for two.
   SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 1);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 1);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -472,11 +475,11 @@
   // We have just enough to send two streams, so padding for one stream.
   SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 2);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 2);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -486,11 +489,11 @@
   // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 2);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 2);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -499,11 +502,11 @@
   // We have just enough to send all streams.
   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 3);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 3);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -512,44 +515,44 @@
   // We should get three media streams.
   SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
-  ExpectStreams(kVideoFrameKey, 3);
+                                          VideoFrameType::kVideoFrameDelta);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
-  ExpectStreams(kVideoFrameDelta, 3);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
   // We should only get two streams and padding for one.
   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
-  ExpectStreams(kVideoFrameDelta, 2);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
   // We should only get the first stream and padding for two.
   SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
-  ExpectStreams(kVideoFrameDelta, 1);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
   // We don't have enough bitrate for the thumbnail stream, but we should get
   // it anyway with current configuration.
   SetRates(kTargetBitrates[0] - 1, 30);
-  ExpectStreams(kVideoFrameDelta, 1);
+  ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
   // We should only get two streams and padding for one.
   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
   // We get a key frame because a new stream is being enabled.
-  ExpectStreams(kVideoFrameKey, 2);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 
   // We should get all three streams.
   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
   // We get a key frame because a new stream is being enabled.
-  ExpectStreams(kVideoFrameKey, 3);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
 }
@@ -618,10 +621,11 @@
   // Encode one frame and verify.
   SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                          kVideoFrameDelta);
+                                          VideoFrameType::kVideoFrameDelta);
   EXPECT_CALL(
       encoder_callback_,
-      OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
+      OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
+                                 VideoFrameType::kVideoFrameKey),
                            Field(&EncodedImage::_encodedWidth, width),
                            Field(&EncodedImage::_encodedHeight, height)),
                      _, _))
@@ -637,7 +641,7 @@
   SetUpRateAllocator();
   EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
   SetRates(settings_.startBitrate, 30);
-  ExpectStreams(kVideoFrameKey, 1);
+  ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
   // Resize |input_frame_| to the new resolution.
   input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
   input_buffer_->InitializeData();
@@ -853,7 +857,7 @@
           testing::Invoke([&](const EncodedImage& encoded_image,
                               const CodecSpecificInfo* codec_specific_info,
                               const RTPFragmentationHeader* fragmentation) {
-            EXPECT_EQ(encoded_image._frameType, kVideoFrameKey);
+            EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
 
             size_t index = encoded_image.SpatialIndex().value_or(0);
             // TODO(nisse): Why not size()
diff --git a/modules/video_coding/video_packet_buffer_unittest.cc b/modules/video_coding/video_packet_buffer_unittest.cc
index 9d8a3b0..5f7b8bc 100644
--- a/modules/video_coding/video_packet_buffer_unittest.cc
+++ b/modules/video_coding/video_packet_buffer_unittest.cc
@@ -65,8 +65,8 @@
     packet.video_header.codec = kVideoCodecGeneric;
     packet.timestamp = timestamp;
     packet.seqNum = seq_num;
-    packet.frameType =
-        keyframe == kKeyFrame ? kVideoFrameKey : kVideoFrameDelta;
+    packet.frameType = keyframe == kKeyFrame ? VideoFrameType::kVideoFrameKey
+                                             : VideoFrameType::kVideoFrameDelta;
     packet.video_header.is_first_packet_in_frame = first == kFirst;
     packet.video_header.is_last_packet_in_frame = last == kLast;
     packet.sizeBytes = data_size;
@@ -163,7 +163,7 @@
   VCMPacket packet;
   packet.video_header.codec = kVideoCodecGeneric;
   packet.seqNum = seq_num;
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   packet.video_header.is_first_packet_in_frame = true;
   packet.video_header.is_last_packet_in_frame = false;
   packet.timesNacked = 0;
@@ -788,7 +788,7 @@
   packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
   packet.timestamp = 1;
   packet.seqNum = 1;
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
 
   packet.video_header.codec = kVideoCodecH264;
@@ -803,7 +803,7 @@
   packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
   packet.timestamp = 2;
   packet.seqNum = 2;
-  packet.frameType = kVideoFrameDelta;
+  packet.frameType = VideoFrameType::kVideoFrameDelta;
 
   EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
 
@@ -815,7 +815,7 @@
   packet.video_header.codec = kVideoCodecH264;
   packet.timestamp = 1;
   packet.seqNum = 1;
-  packet.frameType = kVideoFrameKey;
+  packet.frameType = VideoFrameType::kVideoFrameKey;
   packet.video_header.is_first_packet_in_frame = true;
   packet.video_header.is_last_packet_in_frame = true;
   auto& h264_header =
@@ -922,7 +922,8 @@
   packet_buffer_->InsertPacket(&packet_);
 
   ASSERT_EQ(1u, frames_from_callback_.size());
-  EXPECT_EQ(kVideoFrameKey, frames_from_callback_[kSeqNum]->frame_type());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey,
+            frames_from_callback_[kSeqNum]->frame_type());
 }
 
 TEST_F(TestPacketBufferH264IdrIsKeyframe, SpsPpsIdrIsKeyframe) {
@@ -936,7 +937,8 @@
   packet_buffer_->InsertPacket(&packet_);
 
   ASSERT_EQ(1u, frames_from_callback_.size());
-  EXPECT_EQ(kVideoFrameKey, frames_from_callback_[kSeqNum]->frame_type());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey,
+            frames_from_callback_[kSeqNum]->frame_type());
 }
 
 class TestPacketBufferH264SpsPpsIdrIsKeyframe
@@ -955,7 +957,8 @@
   packet_buffer_->InsertPacket(&packet_);
 
   ASSERT_EQ(1u, frames_from_callback_.size());
-  EXPECT_EQ(kVideoFrameDelta, frames_from_callback_[5]->frame_type());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
+            frames_from_callback_[5]->frame_type());
 }
 
 TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
@@ -968,7 +971,8 @@
   packet_buffer_->InsertPacket(&packet_);
 
   ASSERT_EQ(1u, frames_from_callback_.size());
-  EXPECT_EQ(kVideoFrameDelta, frames_from_callback_[kSeqNum]->frame_type());
+  EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
+            frames_from_callback_[kSeqNum]->frame_type());
 }
 
 TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIdrIsKeyframe) {
@@ -982,7 +986,8 @@
   packet_buffer_->InsertPacket(&packet_);
 
   ASSERT_EQ(1u, frames_from_callback_.size());
-  EXPECT_EQ(kVideoFrameKey, frames_from_callback_[kSeqNum]->frame_type());
+  EXPECT_EQ(VideoFrameType::kVideoFrameKey,
+            frames_from_callback_[kSeqNum]->frame_type());
 }
 
 }  // namespace video_coding
diff --git a/modules/video_coding/video_receiver.cc b/modules/video_coding/video_receiver.cc
index 9c58f7b..db9ceb8 100644
--- a/modules/video_coding/video_receiver.cc
+++ b/modules/video_coding/video_receiver.cc
@@ -288,7 +288,7 @@
     if (drop_frames_until_keyframe_) {
       // Still getting delta frames, schedule another keyframe request as if
       // decode failed.
-      if (frame->FrameType() != kVideoFrameKey) {
+      if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
         drop_frame = true;
         _scheduleKeyRequest = true;
         // TODO(tommi): Consider if we could instead post a task to the module
@@ -384,7 +384,7 @@
                                       size_t payloadLength,
                                       const WebRtcRTPHeader& rtpInfo) {
   RTC_DCHECK_RUN_ON(&module_thread_checker_);
-  if (rtpInfo.frameType == kVideoFrameKey) {
+  if (rtpInfo.frameType == VideoFrameType::kVideoFrameKey) {
     TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
                  rtpInfo.header.sequenceNumber);
   }
diff --git a/modules/video_coding/video_receiver_unittest.cc b/modules/video_coding/video_receiver_unittest.cc
index 0d26fc5..780779b 100644
--- a/modules/video_coding/video_receiver_unittest.cc
+++ b/modules/video_coding/video_receiver_unittest.cc
@@ -58,7 +58,7 @@
 
   WebRtcRTPHeader GetDefaultVp8Header() const {
     WebRtcRTPHeader header = {};
-    header.frameType = kEmptyFrame;
+    header.frameType = VideoFrameType::kEmptyFrame;
     header.header.markerBit = false;
     header.header.payloadType = kUnusedPayloadType;
     header.header.ssrc = 1;
@@ -122,14 +122,14 @@
   header.video_header().video_type_header.emplace<RTPVideoHeaderVP8>();
 
   // Insert one video frame to get one frame decoded.
-  header.frameType = kVideoFrameKey;
+  header.frameType = VideoFrameType::kVideoFrameKey;
   header.video_header().is_first_packet_in_frame = true;
   header.header.markerBit = true;
   InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header);
 
   clock_.AdvanceTimeMilliseconds(33);
   header.header.timestamp += 3000;
-  header.frameType = kEmptyFrame;
+  header.frameType = VideoFrameType::kEmptyFrame;
   header.video_header().is_first_packet_in_frame = false;
   header.header.markerBit = false;
   // Insert padding frames.
@@ -172,9 +172,9 @@
     // Insert 2 video frames.
     for (int j = 0; j < 2; ++j) {
       if (i == 0 && j == 0)  // First frame should be a key frame.
-        header.frameType = kVideoFrameKey;
+        header.frameType = VideoFrameType::kVideoFrameKey;
       else
-        header.frameType = kVideoFrameDelta;
+        header.frameType = VideoFrameType::kVideoFrameDelta;
       header.video_header().is_first_packet_in_frame = true;
       header.header.markerBit = true;
       InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header);
@@ -183,7 +183,7 @@
     }
 
     // Insert 2 padding only frames.
-    header.frameType = kEmptyFrame;
+    header.frameType = VideoFrameType::kEmptyFrame;
     header.video_header().is_first_packet_in_frame = false;
     header.header.markerBit = false;
     for (int j = 0; j < 2; ++j) {