Stop using LOG macros in favor of RTC_ prefixed macros.
This CL has been generated with the following script:
for m in PLOG \
LOG_TAG \
LOG_GLEM \
LOG_GLE_EX \
LOG_GLE \
LAST_SYSTEM_ERROR \
LOG_ERRNO_EX \
LOG_ERRNO \
LOG_ERR_EX \
LOG_ERR \
LOG_V \
LOG_F \
LOG_T_F \
LOG_E \
LOG_T \
LOG_CHECK_LEVEL_V \
LOG_CHECK_LEVEL \
LOG
do
git grep -l $m | xargs sed -i "s,\b$m\b,RTC_$m,g"
done
git checkout rtc_base/logging.h
git cl format
Bug: webrtc:8452
Change-Id: I1a53ef3e0a5ef6e244e62b2e012b864914784600
Reviewed-on: https://webrtc-review.googlesource.com/21325
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20617}
diff --git a/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoDecoderH264.mm b/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoDecoderH264.mm
index 4cf48bd..261b945 100644
--- a/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoDecoderH264.mm
+++ b/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoDecoderH264.mm
@@ -48,7 +48,7 @@
std::unique_ptr<RTCFrameDecodeParams> decodeParams(
reinterpret_cast<RTCFrameDecodeParams *>(params));
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
+ RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
return;
}
// TODO(tkchin): Handle CVO properly.
@@ -126,7 +126,7 @@
// This can happen after backgrounding. We need to wait for the next
// sps/pps before we can resume so we request a keyframe by returning an
// error.
- LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
+ RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
CMSampleBufferRef sampleBuffer = nullptr;
@@ -153,7 +153,7 @@
#endif
CFRelease(sampleBuffer);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
+ RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
diff --git a/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm b/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm
index 3eca55b..a72ce09 100644
--- a/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm
+++ b/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm
@@ -100,7 +100,7 @@
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
if (cvRet != kCVReturnSuccess) {
- LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
return false;
}
uint8_t *dstY = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
@@ -122,7 +122,7 @@
frameBuffer.height);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
if (ret) {
- LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
+ RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
return false;
}
return true;
@@ -130,13 +130,13 @@
CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
if (!pixel_buffer_pool) {
- LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
+ RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
return nullptr;
}
CVPixelBufferRef pixel_buffer;
CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, &pixel_buffer);
if (ret != kCVReturnSuccess) {
- LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
+ RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
// We probably want to drop frames here, since failure probably means
// that the pool is empty.
return nullptr;
@@ -306,7 +306,7 @@
webrtc::Clock::GetRealTimeClock(), .5, .95));
_packetizationMode = RTCH264PacketizationModeNonInterleaved;
_profile = ExtractProfile([codecInfo nativeSdpVideoFormat]);
- LOG(LS_INFO) << "Using profile " << CFStringToString(_profile);
+ RTC_LOG(LS_INFO) << "Using profile " << CFStringToString(_profile);
RTC_CHECK([codecInfo.name isEqualToString:kRTCVideoCodecH264Name]);
#if defined(WEBRTC_IOS)
@@ -405,7 +405,7 @@
}
RTC_DCHECK(pixelBuffer);
if (!CopyVideoFrameToNV12PixelBuffer([frame.buffer toI420], pixelBuffer)) {
- LOG(LS_ERROR) << "Failed to copy frame data.";
+ RTC_LOG(LS_ERROR) << "Failed to copy frame data.";
CVBufferRelease(pixelBuffer);
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -456,7 +456,7 @@
CVBufferRelease(pixelBuffer);
}
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
+ RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
@@ -496,7 +496,7 @@
// Resetting the session when this happens fixes the issue.
// In addition we request a keyframe so video can recover quickly.
resetCompressionSession = YES;
- LOG(LS_INFO) << "Resetting compression session due to invalid pool.";
+ RTC_LOG(LS_INFO) << "Resetting compression session due to invalid pool.";
}
#endif
@@ -523,7 +523,7 @@
if (![compressionSessionPixelFormats
containsObject:[NSNumber numberWithLong:framePixelFormat]]) {
resetCompressionSession = YES;
- LOG(LS_INFO) << "Resetting compression session due to non-matching pixel format.";
+ RTC_LOG(LS_INFO) << "Resetting compression session due to non-matching pixel format.";
}
}
@@ -591,7 +591,7 @@
encoder_specs = nullptr;
}
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to create compression session: " << status;
+ RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
@@ -601,9 +601,9 @@
nullptr,
&hwaccl_enabled);
if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) {
- LOG(LS_INFO) << "Compression session created with hw accl enabled";
+ RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled";
} else {
- LOG(LS_INFO) << "Compression session created with hw accl disabled";
+ RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled";
}
#endif
[self configureCompressionSession];
@@ -674,7 +674,7 @@
CFRelease(dataRateLimits);
}
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to set data rate limit";
+ RTC_LOG(LS_ERROR) << "Failed to set data rate limit";
}
_encoderBitrateBps = bitrateBps;
@@ -691,11 +691,11 @@
timestamp:(uint32_t)timestamp
rotation:(RTCVideoRotation)rotation {
if (status != noErr) {
- LOG(LS_ERROR) << "H264 encode failed.";
+ RTC_LOG(LS_ERROR) << "H264 encode failed.";
return;
}
if (infoFlags & kVTEncodeInfo_FrameDropped) {
- LOG(LS_INFO) << "H264 encode dropped frame.";
+ RTC_LOG(LS_INFO) << "H264 encode dropped frame.";
return;
}
@@ -708,7 +708,7 @@
}
if (isKeyframe) {
- LOG(LS_INFO) << "Generated keyframe";
+ RTC_LOG(LS_INFO) << "Generated keyframe";
}
// Convert the sample buffer into a buffer suitable for RTP packetization.
@@ -745,7 +745,7 @@
BOOL res = _callback(frame, codecSpecificInfo, header);
if (!res) {
- LOG(LS_ERROR) << "Encode callback failed";
+ RTC_LOG(LS_ERROR) << "Encode callback failed";
return;
}
_bitrateAdjuster->Update(frame.buffer.length);
diff --git a/sdk/objc/Framework/Classes/VideoToolbox/helpers.cc b/sdk/objc/Framework/Classes/VideoToolbox/helpers.cc
index 1e4a53f..ac957f1 100644
--- a/sdk/objc/Framework/Classes/VideoToolbox/helpers.cc
+++ b/sdk/objc/Framework/Classes/VideoToolbox/helpers.cc
@@ -44,8 +44,8 @@
CFRelease(cfNum);
if (status != noErr) {
std::string key_string = CFStringToString(key);
- LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
- << " to " << value << ": " << status;
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
}
}
@@ -60,8 +60,8 @@
CFRelease(cfNum);
if (status != noErr) {
std::string key_string = CFStringToString(key);
- LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
- << " to " << value << ": " << status;
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
}
}
@@ -71,8 +71,8 @@
OSStatus status = VTSessionSetProperty(session, key, cf_bool);
if (status != noErr) {
std::string key_string = CFStringToString(key);
- LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
- << " to " << value << ": " << status;
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
}
}
@@ -84,7 +84,7 @@
if (status != noErr) {
std::string key_string = CFStringToString(key);
std::string val_string = CFStringToString(value);
- LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
- << " to " << val_string << ": " << status;
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << val_string << ": " << status;
}
}
diff --git a/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.cc b/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.cc
index 1ba18e8..f6ee106 100644
--- a/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.cc
+++ b/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.cc
@@ -42,7 +42,7 @@
CMVideoFormatDescriptionRef description =
CMSampleBufferGetFormatDescription(avcc_sample_buffer);
if (description == nullptr) {
- LOG(LS_ERROR) << "Failed to get sample buffer's description.";
+ RTC_LOG(LS_ERROR) << "Failed to get sample buffer's description.";
return false;
}
@@ -52,7 +52,7 @@
OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
description, 0, nullptr, nullptr, ¶m_set_count, &nalu_header_size);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to get parameter set.";
+ RTC_LOG(LS_ERROR) << "Failed to get parameter set.";
return false;
}
RTC_CHECK_EQ(nalu_header_size, kAvccHeaderByteSize);
@@ -73,7 +73,7 @@
status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
description, i, ¶m_set, ¶m_set_size, nullptr, nullptr);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to get parameter set.";
+ RTC_LOG(LS_ERROR) << "Failed to get parameter set.";
return false;
}
// Update buffer.
@@ -91,7 +91,7 @@
CMBlockBufferRef block_buffer =
CMSampleBufferGetDataBuffer(avcc_sample_buffer);
if (block_buffer == nullptr) {
- LOG(LS_ERROR) << "Failed to get sample buffer's block buffer.";
+ RTC_LOG(LS_ERROR) << "Failed to get sample buffer's block buffer.";
return false;
}
CMBlockBufferRef contiguous_buffer = nullptr;
@@ -100,8 +100,8 @@
status = CMBlockBufferCreateContiguous(
nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
- << status;
+ RTC_LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
return false;
}
} else {
@@ -117,7 +117,7 @@
status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr, nullptr,
&data_ptr);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to get block buffer data.";
+ RTC_LOG(LS_ERROR) << "Failed to get block buffer data.";
CFRelease(contiguous_buffer);
return false;
}
@@ -173,11 +173,11 @@
const uint8_t* data = nullptr;
size_t data_len = 0;
if (!reader.ReadNalu(&data, &data_len)) {
- LOG(LS_ERROR) << "Failed to read SPS";
+ RTC_LOG(LS_ERROR) << "Failed to read SPS";
return false;
}
if (!reader.ReadNalu(&data, &data_len)) {
- LOG(LS_ERROR) << "Failed to read PPS";
+ RTC_LOG(LS_ERROR) << "Failed to read PPS";
return false;
}
}
@@ -190,7 +190,7 @@
reader.BytesRemaining(), kCMBlockBufferAssureMemoryNowFlag,
&block_buffer);
if (status != kCMBlockBufferNoErr) {
- LOG(LS_ERROR) << "Failed to create block buffer.";
+ RTC_LOG(LS_ERROR) << "Failed to create block buffer.";
return false;
}
@@ -200,8 +200,8 @@
status = CMBlockBufferCreateContiguous(
nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
- << status;
+ RTC_LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
CFRelease(block_buffer);
return false;
}
@@ -216,7 +216,7 @@
status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr,
&block_buffer_size, &data_ptr);
if (status != kCMBlockBufferNoErr) {
- LOG(LS_ERROR) << "Failed to get block buffer data pointer.";
+ RTC_LOG(LS_ERROR) << "Failed to get block buffer data pointer.";
CFRelease(contiguous_buffer);
return false;
}
@@ -238,7 +238,7 @@
nullptr, video_format, 1, 0, nullptr, 0,
nullptr, out_sample_buffer);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to create sample buffer.";
+ RTC_LOG(LS_ERROR) << "Failed to create sample buffer.";
CFRelease(contiguous_buffer);
return false;
}
@@ -284,23 +284,23 @@
// Skip AUD.
if (ParseNaluType(annexb_buffer[4]) == kAud) {
if (!reader.ReadNalu(¶m_set_ptrs[0], ¶m_set_sizes[0])) {
- LOG(LS_ERROR) << "Failed to read AUD";
+ RTC_LOG(LS_ERROR) << "Failed to read AUD";
return nullptr;
}
}
if (!reader.ReadNalu(¶m_set_ptrs[0], ¶m_set_sizes[0])) {
- LOG(LS_ERROR) << "Failed to read SPS";
+ RTC_LOG(LS_ERROR) << "Failed to read SPS";
return nullptr;
}
if (!reader.ReadNalu(¶m_set_ptrs[1], ¶m_set_sizes[1])) {
- LOG(LS_ERROR) << "Failed to read PPS";
+ RTC_LOG(LS_ERROR) << "Failed to read PPS";
return nullptr;
}
status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
kCFAllocatorDefault, 2, param_set_ptrs, param_set_sizes, 4,
&description);
if (status != noErr) {
- LOG(LS_ERROR) << "Failed to create video format description.";
+ RTC_LOG(LS_ERROR) << "Failed to create video format description.";
return nullptr;
}
return description;