Revert "Add SetAudioPlayout and SetAudioRecording methods to the PeerConnection API"
This reverts commit 90bace095806a635411edd40fb8490a144e59e63.
Reason for revert: The original problem of this CL has been fixed in https://webrtc-review.googlesource.com/17540 but sounds like it is also adding voice_engine as a dependency of pc:peerconnection. We should investigate this because probably we can avoid it.
Original change's description:
> Add SetAudioPlayout and SetAudioRecording methods to the PeerConnection API
>
> (this CL is based on the work by Taylor and Steve in https://webrtc-review.googlesource.com/c/src/+/10201)
>
> This SetAudioPlayout method lets applications disable audio playout while
> still processing incoming audio data and generating statistics on the
> received audio.
>
> This may be useful if the application wants to set up media flows as
> soon as possible, but isn't ready to play audio yet. Currently, native
> applications don't have any API point to control this, unless they
> completely implement their own AudioDeviceModule.
>
> The SetAudioRecording works in a similar fashion but for the recorded
> audio. One difference is that calling SetAudioRecording(false) does not
> keep any audio processing alive.
>
> TBR=solenberg
>
> Bug: webrtc:7313
> Change-Id: I0aa075f6bfef9818f1080f85a8ff7842fb0750aa
> Reviewed-on: https://webrtc-review.googlesource.com/16180
> Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
> Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#20499}
TBR=solenberg@webrtc.org,henrika@webrtc.org,kwiberg@webrtc.org
Change-Id: I8431227e21dbffcfed3dd0e6bd7ce26c4ce09394
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:7313
Reviewed-on: https://webrtc-review.googlesource.com/17701
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20512}
diff --git a/api/peerconnectioninterface.h b/api/peerconnectioninterface.h
index e82fa75..5b9df68 100644
--- a/api/peerconnectioninterface.h
+++ b/api/peerconnectioninterface.h
@@ -788,21 +788,6 @@
std::unique_ptr<rtc::BitrateAllocationStrategy>
bitrate_allocation_strategy) {}
- // Enable/disable playout of received audio streams. Enabled by default. Note
- // that even if playout is enabled, streams will only be played out if the
- // appropriate SDP is also applied. Setting |playout| to false will stop
- // playout of the underlying audio device but starts a task which will poll
- // for audio data every 10ms to ensure that audio processing happens and the
- // audio statistics are updated.
- // TODO(henrika): deprecate and remove this.
- virtual void SetAudioPlayout(bool playout) {}
-
- // Enable/disable recording of transmitted audio streams. Enabled by default.
- // Note that even if recording is enabled, streams will only be recorded if
- // the appropriate SDP is also applied.
- // TODO(henrika): deprecate and remove this.
- virtual void SetAudioRecording(bool recording) {}
-
// Returns the current SignalingState.
virtual SignalingState signaling_state() = 0;
diff --git a/api/peerconnectionproxy.h b/api/peerconnectionproxy.h
index 78fe402..a8ea3fa 100644
--- a/api/peerconnectionproxy.h
+++ b/api/peerconnectionproxy.h
@@ -100,8 +100,6 @@
PROXY_METHOD1(bool,
RemoveIceCandidates,
const std::vector<cricket::Candidate>&);
- PROXY_METHOD1(void, SetAudioPlayout, bool)
- PROXY_METHOD1(void, SetAudioRecording, bool)
PROXY_METHOD1(void, RegisterUMAObserver, UMAObserver*)
PROXY_METHOD1(RTCError, SetBitrate, const BitrateParameters&);
PROXY_METHOD1(void,
diff --git a/audio/BUILD.gn b/audio/BUILD.gn
index 80545ca..a9ca0d5 100644
--- a/audio/BUILD.gn
+++ b/audio/BUILD.gn
@@ -23,8 +23,6 @@
"audio_transport_proxy.cc",
"audio_transport_proxy.h",
"conversion.h",
- "null_audio_poller.cc",
- "null_audio_poller.h",
"scoped_voe_interface.h",
"time_interval.cc",
"time_interval.h",
@@ -54,7 +52,6 @@
"../modules/pacing:pacing",
"../modules/remote_bitrate_estimator:remote_bitrate_estimator",
"../modules/rtp_rtcp:rtp_rtcp",
- "../rtc_base:rtc_base",
"../rtc_base:rtc_base_approved",
"../rtc_base:rtc_task_queue",
"../system_wrappers",
diff --git a/audio/audio_state.cc b/audio/audio_state.cc
index 9b5f74f..2a84f5c 100644
--- a/audio/audio_state.cc
+++ b/audio/audio_state.cc
@@ -12,11 +12,8 @@
#include "modules/audio_device/include/audio_device.h"
#include "rtc_base/atomicops.h"
-#include "rtc_base/bind.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
-#include "rtc_base/ptr_util.h"
-#include "rtc_base/thread.h"
#include "voice_engine/transmit_mixer.h"
namespace webrtc {
@@ -62,40 +59,6 @@
return transmit_mixer->typing_noise_detected();
}
-void AudioState::SetPlayout(bool enabled) {
- LOG(INFO) << "SetPlayout(" << enabled << ")";
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- const bool currently_enabled = (null_audio_poller_ == nullptr);
- if (enabled == currently_enabled) {
- return;
- }
- VoEBase* const voe = VoEBase::GetInterface(voice_engine());
- RTC_DCHECK(voe);
- if (enabled) {
- null_audio_poller_.reset();
- }
- // Will stop/start playout of the underlying device, if necessary, and
- // remember the setting for when it receives subsequent calls of
- // StartPlayout.
- voe->SetPlayout(enabled);
- if (!enabled) {
- null_audio_poller_ =
- rtc::MakeUnique<NullAudioPoller>(&audio_transport_proxy_);
- }
-}
-
-void AudioState::SetRecording(bool enabled) {
- LOG(INFO) << "SetRecording(" << enabled << ")";
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- // TODO(henrika): keep track of state as in SetPlayout().
- VoEBase* const voe = VoEBase::GetInterface(voice_engine());
- RTC_DCHECK(voe);
- // Will stop/start recording of the underlying device, if necessary, and
- // remember the setting for when it receives subsequent calls of
- // StartPlayout.
- voe->SetRecording(enabled);
-}
-
// Reference count; implementation copied from rtc::RefCountedObject.
void AudioState::AddRef() const {
rtc::AtomicOps::Increment(&ref_count_);
diff --git a/audio/audio_state.h b/audio/audio_state.h
index 023c7b1..86d60b6 100644
--- a/audio/audio_state.h
+++ b/audio/audio_state.h
@@ -11,10 +11,7 @@
#ifndef AUDIO_AUDIO_STATE_H_
#define AUDIO_AUDIO_STATE_H_
-#include <memory>
-
#include "audio/audio_transport_proxy.h"
-#include "audio/null_audio_poller.h"
#include "audio/scoped_voe_interface.h"
#include "call/audio_state.h"
#include "rtc_base/constructormagic.h"
@@ -36,9 +33,6 @@
return config_.audio_processing.get();
}
- void SetPlayout(bool enabled) override;
- void SetRecording(bool enabled) override;
-
VoiceEngine* voice_engine();
rtc::scoped_refptr<AudioMixer> mixer();
bool typing_noise_detected() const;
@@ -63,11 +57,6 @@
// recorded audio to the VoE AudioTransport.
AudioTransportProxy audio_transport_proxy_;
- // Null audio poller is used to continue polling the audio streams if audio
- // playout is disabled so that audio processing still happens and the audio
- // stats are still updated.
- std::unique_ptr<NullAudioPoller> null_audio_poller_;
-
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioState);
};
} // namespace internal
diff --git a/audio/null_audio_poller.cc b/audio/null_audio_poller.cc
deleted file mode 100644
index c22b3d8..0000000
--- a/audio/null_audio_poller.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/null_audio_poller.h"
-#include "rtc_base/logging.h"
-#include "rtc_base/thread.h"
-
-namespace webrtc {
-namespace internal {
-
-namespace {
-
-constexpr int64_t kPollDelayMs = 10; // WebRTC uses 10ms by default
-
-constexpr size_t kNumChannels = 1;
-constexpr uint32_t kSamplesPerSecond = 48000; // 48kHz
-constexpr size_t kNumSamples = kSamplesPerSecond / 100; // 10ms of samples
-
-} // namespace
-
-NullAudioPoller::NullAudioPoller(AudioTransport* audio_transport)
- : audio_transport_(audio_transport),
- reschedule_at_(rtc::TimeMillis() + kPollDelayMs) {
- RTC_DCHECK(audio_transport);
- OnMessage(nullptr); // Start the poll loop.
-}
-
-NullAudioPoller::~NullAudioPoller() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- rtc::Thread::Current()->Clear(this);
-}
-
-void NullAudioPoller::OnMessage(rtc::Message* msg) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
-
- // Buffer to hold the audio samples.
- int16_t buffer[kNumSamples * kNumChannels];
- // Output variables from |NeedMorePlayData|.
- size_t n_samples;
- int64_t elapsed_time_ms;
- int64_t ntp_time_ms;
- audio_transport_->NeedMorePlayData(kNumSamples, sizeof(int16_t), kNumChannels,
- kSamplesPerSecond, buffer, n_samples,
- &elapsed_time_ms, &ntp_time_ms);
-
- // Reschedule the next poll iteration. If, for some reason, the given
- // reschedule time has already passed, reschedule as soon as possible.
- int64_t now = rtc::TimeMillis();
- if (reschedule_at_ < now) {
- reschedule_at_ = now;
- }
- rtc::Thread::Current()->PostAt(RTC_FROM_HERE, reschedule_at_, this, 0);
-
- // Loop after next will be kPollDelayMs later.
- reschedule_at_ += kPollDelayMs;
-}
-
-} // namespace internal
-} // namespace webrtc
diff --git a/audio/null_audio_poller.h b/audio/null_audio_poller.h
deleted file mode 100644
index 27c7e99..0000000
--- a/audio/null_audio_poller.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_NULL_AUDIO_POLLER_H_
-#define AUDIO_NULL_AUDIO_POLLER_H_
-
-#include "modules/audio_device/include/audio_device_defines.h"
-#include "rtc_base/messagehandler.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-namespace internal {
-
-class NullAudioPoller final : public rtc::MessageHandler {
- public:
- explicit NullAudioPoller(AudioTransport* audio_transport);
- ~NullAudioPoller();
-
- protected:
- void OnMessage(rtc::Message* msg) override;
-
- private:
- const rtc::ThreadChecker thread_checker_;
- AudioTransport* const audio_transport_;
- int64_t reschedule_at_;
-};
-
-} // namespace internal
-} // namespace webrtc
-
-#endif // AUDIO_NULL_AUDIO_POLLER_H_
diff --git a/call/audio_state.h b/call/audio_state.h
index ad411d1..7719388 100644
--- a/call/audio_state.h
+++ b/call/audio_state.h
@@ -44,17 +44,6 @@
virtual AudioProcessing* audio_processing() = 0;
- // Enable/disable playout of the audio channels. Enabled by default.
- // This will stop playout of the underlying audio device but start a task
- // which will poll for audio data every 10ms to ensure that audio processing
- // happens and the audio stats are updated.
- virtual void SetPlayout(bool enabled) = 0;
-
- // Enable/disable recording of the audio channels. Enabled by default.
- // This will stop recording of the underlying audio device and no audio
- // packets will be encoded or transmitted.
- virtual void SetRecording(bool enabled) = 0;
-
// TODO(solenberg): Replace scoped_refptr with shared_ptr once we can use it.
static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);
diff --git a/media/engine/fakewebrtcvoiceengine.h b/media/engine/fakewebrtcvoiceengine.h
index 55d3100..7e8e5c2 100644
--- a/media/engine/fakewebrtcvoiceengine.h
+++ b/media/engine/fakewebrtcvoiceengine.h
@@ -99,8 +99,6 @@
WEBRTC_STUB(StartSend, (int channel));
WEBRTC_STUB(StopPlayout, (int channel));
WEBRTC_STUB(StopSend, (int channel));
- WEBRTC_STUB(SetPlayout, (bool enable));
- WEBRTC_STUB(SetRecording, (bool enable));
size_t GetNetEqCapacity() const {
auto ch = channels_.find(last_channel_);
diff --git a/pc/BUILD.gn b/pc/BUILD.gn
index ac78dfe..116c1ac 100644
--- a/pc/BUILD.gn
+++ b/pc/BUILD.gn
@@ -183,7 +183,6 @@
"../rtc_base:rtc_base_approved",
"../stats",
"../system_wrappers:system_wrappers",
- "../voice_engine:voice_engine",
]
public_deps = [
diff --git a/pc/peerconnection.cc b/pc/peerconnection.cc
index 89452c0..03d34d0 100644
--- a/pc/peerconnection.cc
+++ b/pc/peerconnection.cc
@@ -1323,30 +1323,6 @@
call_->SetBitrateAllocationStrategy(std::move(bitrate_allocation_strategy));
}
-void PeerConnection::SetAudioPlayout(bool playout) {
- if (!worker_thread()->IsCurrent()) {
- worker_thread()->Invoke<void>(
- RTC_FROM_HERE,
- rtc::Bind(&PeerConnection::SetAudioPlayout, this, playout));
- return;
- }
- auto audio_state =
- factory_->channel_manager()->media_engine()->GetAudioState();
- audio_state->SetPlayout(playout);
-}
-
-void PeerConnection::SetAudioRecording(bool recording) {
- if (!worker_thread()->IsCurrent()) {
- worker_thread()->Invoke<void>(
- RTC_FROM_HERE,
- rtc::Bind(&PeerConnection::SetAudioRecording, this, recording));
- return;
- }
- auto audio_state =
- factory_->channel_manager()->media_engine()->GetAudioState();
- audio_state->SetRecording(recording);
-}
-
std::unique_ptr<rtc::SSLCertificate>
PeerConnection::GetRemoteAudioSSLCertificate() {
if (!session_) {
diff --git a/pc/peerconnection.h b/pc/peerconnection.h
index 97068b9..9163c36 100644
--- a/pc/peerconnection.h
+++ b/pc/peerconnection.h
@@ -143,9 +143,6 @@
std::unique_ptr<rtc::BitrateAllocationStrategy>
bitrate_allocation_strategy) override;
- void SetAudioPlayout(bool playout) override;
- void SetAudioRecording(bool recording) override;
-
RTC_DEPRECATED bool StartRtcEventLog(rtc::PlatformFile file,
int64_t max_size_bytes) override;
bool StartRtcEventLog(std::unique_ptr<RtcEventLogOutput> output) override;
diff --git a/pc/peerconnection_integrationtest.cc b/pc/peerconnection_integrationtest.cc
index 305ed94..3289ccb 100644
--- a/pc/peerconnection_integrationtest.cc
+++ b/pc/peerconnection_integrationtest.cc
@@ -3564,76 +3564,6 @@
kMaxWaitForFramesMs);
}
-// Test that SetAudioPlayout can be used to disable audio playout from the
-// start, then later enable it. This may be useful, for example, if the caller
-// needs to play a local ringtone until some event occurs, after which it
-// switches to playing the received audio.
-TEST_F(PeerConnectionIntegrationTest, DisableAndEnableAudioPlayout) {
- ASSERT_TRUE(CreatePeerConnectionWrappers());
- ConnectFakeSignaling();
-
- // Set up audio-only call where audio playout is disabled on caller's side.
- caller()->pc()->SetAudioPlayout(false);
- caller()->AddAudioOnlyMediaStream();
- callee()->AddAudioOnlyMediaStream();
- caller()->CreateAndSetAndSignalOffer();
- ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-
- // Pump messages for a second.
- WAIT(false, 1000);
- // Since audio playout is disabled, the caller shouldn't have received
- // anything (at the playout level, at least).
- EXPECT_EQ(0, caller()->audio_frames_received());
- // As a sanity check, make sure the callee (for which playout isn't disabled)
- // did still see frames on its audio level.
- ASSERT_GT(callee()->audio_frames_received(), 0);
-
- // Enable playout again, and ensure audio starts flowing.
- caller()->pc()->SetAudioPlayout(true);
- ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
- kDefaultExpectedAudioFrameCount, 0,
- kMaxWaitForFramesMs);
-}
-
-double GetAudioEnergyStat(PeerConnectionWrapper* pc) {
- auto report = pc->NewGetStats();
- auto track_stats_list =
- report->GetStatsOfType<webrtc::RTCMediaStreamTrackStats>();
- const webrtc::RTCMediaStreamTrackStats* remote_track_stats = nullptr;
- for (const auto* track_stats : track_stats_list) {
- if (track_stats->remote_source.is_defined() &&
- *track_stats->remote_source) {
- remote_track_stats = track_stats;
- break;
- }
- }
-
- if (!remote_track_stats->total_audio_energy.is_defined()) {
- return 0.0;
- }
- return *remote_track_stats->total_audio_energy;
-}
-
-// Test that if audio playout is disabled via the SetAudioPlayout() method, then
-// incoming audio is still processed and statistics are generated.
-TEST_F(PeerConnectionIntegrationTest,
- DisableAudioPlayoutStillGeneratesAudioStats) {
- ASSERT_TRUE(CreatePeerConnectionWrappers());
- ConnectFakeSignaling();
-
- // Set up audio-only call where playout is disabled but audio-processing is
- // still active.
- caller()->AddAudioOnlyMediaStream();
- callee()->AddAudioOnlyMediaStream();
- caller()->pc()->SetAudioPlayout(false);
-
- caller()->CreateAndSetAndSignalOffer();
- ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-
- // Wait for the callee to receive audio stats.
- EXPECT_TRUE_WAIT(GetAudioEnergyStat(caller()) > 0, kMaxWaitForFramesMs);
-}
-
} // namespace
#endif // if !defined(THREAD_SANITIZER)
diff --git a/sdk/android/api/org/webrtc/PeerConnection.java b/sdk/android/api/org/webrtc/PeerConnection.java
index 5dd8832..66e8075 100644
--- a/sdk/android/api/org/webrtc/PeerConnection.java
+++ b/sdk/android/api/org/webrtc/PeerConnection.java
@@ -363,18 +363,6 @@
public native void setRemoteDescription(SdpObserver observer, SessionDescription sdp);
- // True if remote audio should be played out. Defaults to true.
- // Note that even if playout is enabled, streams will only be played out if
- // the appropriate SDP is also applied. The main purpose of this API is to
- // be able to control the exact time when audio playout starts.
- public native void setAudioPlayout(boolean playout);
-
- // True if local audio shall be recorded. Defaults to true.
- // Note that even if recording is enabled, streams will only be recorded if
- // the appropriate SDP is also applied. The main purpose of this API is to
- // be able to control the exact time when audio recording starts.
- public native void setAudioRecording(boolean recording);
-
public boolean setConfiguration(RTCConfiguration config) {
return nativeSetConfiguration(config, nativeObserver);
}
diff --git a/sdk/android/src/jni/pc/peerconnection_jni.cc b/sdk/android/src/jni/pc/peerconnection_jni.cc
index 3c6bf76..a542c28 100644
--- a/sdk/android/src/jni/pc/peerconnection_jni.cc
+++ b/sdk/android/src/jni/pc/peerconnection_jni.cc
@@ -166,22 +166,6 @@
observer, JavaToNativeSessionDescription(jni, j_sdp));
}
-JNI_FUNCTION_DECLARATION(void,
- PeerConnection_setAudioPlayout,
- JNIEnv* jni,
- jobject j_pc,
- jboolean playout) {
- ExtractNativePC(jni, j_pc)->SetAudioPlayout(playout);
-}
-
-JNI_FUNCTION_DECLARATION(void,
- PeerConnection_setAudioRecording,
- JNIEnv* jni,
- jobject j_pc,
- jboolean recording) {
- ExtractNativePC(jni, j_pc)->SetAudioRecording(recording);
-}
-
JNI_FUNCTION_DECLARATION(jboolean,
PeerConnection_nativeSetConfiguration,
JNIEnv* jni,
diff --git a/voice_engine/include/voe_base.h b/voice_engine/include/voe_base.h
index a62a2b4..94ac6ac 100644
--- a/voice_engine/include/voe_base.h
+++ b/voice_engine/include/voe_base.h
@@ -139,21 +139,6 @@
// Stops sending packets from a specified |channel|.
virtual int StopSend(int channel) = 0;
- // Enable or disable playout to the underlying device. Takes precedence over
- // StartPlayout. Though calls to StartPlayout are remembered; if
- // SetPlayout(true) is called after StartPlayout, playout will be started.
- //
- // By default, playout is enabled.
- virtual int SetPlayout(bool enabled) = 0;
-
- // Enable or disable recording (which drives sending of encoded audio packtes)
- // from the underlying device. Takes precedence over StartSend. Though calls
- // to StartSend are remembered; if SetRecording(true) is called after
- // StartSend, recording will be started.
- //
- // By default, recording is enabled.
- virtual int SetRecording(bool enabled) = 0;
-
// TODO(xians): Make the interface pure virtual after libjingle
// implements the interface in its FakeWebRtcVoiceEngine.
virtual AudioTransport* audio_transport() { return NULL; }
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc
index 9e7a5f4..b14bf95 100644
--- a/voice_engine/voe_base_impl.cc
+++ b/voice_engine/voe_base_impl.cc
@@ -407,7 +407,7 @@
LOG_F(LS_ERROR) << "Failed to initialize playout";
return -1;
}
- if (playout_enabled_ && shared_->audio_device()->StartPlayout() != 0) {
+ if (shared_->audio_device()->StartPlayout() != 0) {
LOG_F(LS_ERROR) << "Failed to start playout";
return -1;
}
@@ -416,10 +416,7 @@
}
int32_t VoEBaseImpl::StopPlayout() {
- if (!playout_enabled_) {
- return 0;
- }
- // Stop audio-device playing if no channel is playing out.
+ // Stop audio-device playing if no channel is playing out
if (shared_->NumOfPlayingChannels() == 0) {
if (shared_->audio_device()->StopPlayout() != 0) {
LOG(LS_ERROR) << "StopPlayout() failed to stop playout";
@@ -430,12 +427,15 @@
}
int32_t VoEBaseImpl::StartSend() {
- if (!shared_->audio_device()->Recording()) {
+ if (!shared_->audio_device()->RecordingIsInitialized() &&
+ !shared_->audio_device()->Recording()) {
if (shared_->audio_device()->InitRecording() != 0) {
LOG_F(LS_ERROR) << "Failed to initialize recording";
return -1;
}
- if (recording_enabled_ && shared_->audio_device()->StartRecording() != 0) {
+ }
+ if (!shared_->audio_device()->Recording()) {
+ if (shared_->audio_device()->StartRecording() != 0) {
LOG_F(LS_ERROR) << "Failed to start recording";
return -1;
}
@@ -444,11 +444,8 @@
}
int32_t VoEBaseImpl::StopSend() {
- if (!recording_enabled_) {
- return 0;
- }
- // Stop audio-device recording if no channel is recording.
if (shared_->NumOfSendingChannels() == 0) {
+ // Stop audio-device recording if no channel is recording
if (shared_->audio_device()->StopRecording() != 0) {
LOG(LS_ERROR) << "StopSend() failed to stop recording";
return -1;
@@ -459,58 +456,6 @@
return 0;
}
-int32_t VoEBaseImpl::SetPlayout(bool enabled) {
- LOG(INFO) << "SetPlayout(" << enabled << ")";
- if (playout_enabled_ == enabled) {
- return 0;
- }
- playout_enabled_ = enabled;
- if (shared_->NumOfPlayingChannels() == 0) {
- // If there are no channels attempting to play out yet, there's nothing to
- // be done; we should be in a "not playing out" state either way.
- return 0;
- }
- int32_t ret;
- if (enabled) {
- ret = shared_->audio_device()->StartPlayout();
- if (ret != 0) {
- LOG(LS_ERROR) << "SetPlayout(true) failed to start playout";
- }
- } else {
- ret = shared_->audio_device()->StopPlayout();
- if (ret != 0) {
- LOG(LS_ERROR) << "SetPlayout(false) failed to stop playout";
- }
- }
- return ret;
-}
-
-int32_t VoEBaseImpl::SetRecording(bool enabled) {
- LOG(INFO) << "SetRecording(" << enabled << ")";
- if (recording_enabled_ == enabled) {
- return 0;
- }
- recording_enabled_ = enabled;
- if (shared_->NumOfSendingChannels() == 0) {
- // If there are no channels attempting to record out yet, there's nothing to
- // be done; we should be in a "not recording" state either way.
- return 0;
- }
- int32_t ret;
- if (enabled) {
- ret = shared_->audio_device()->StartRecording();
- if (ret != 0) {
- LOG(LS_ERROR) << "SetRecording(true) failed to start recording";
- }
- } else {
- ret = shared_->audio_device()->StopRecording();
- if (ret != 0) {
- LOG(LS_ERROR) << "SetRecording(false) failed to stop recording";
- }
- }
- return ret;
-}
-
int32_t VoEBaseImpl::TerminateInternal() {
// Delete any remaining channel objects
shared_->channel_manager().DestroyAllChannels();
diff --git a/voice_engine/voe_base_impl.h b/voice_engine/voe_base_impl.h
index e647124..a3c4c1f 100644
--- a/voice_engine/voe_base_impl.h
+++ b/voice_engine/voe_base_impl.h
@@ -45,9 +45,6 @@
int StopPlayout(int channel) override;
int StopSend(int channel) override;
- int SetPlayout(bool enabled) override;
- int SetRecording(bool enabled) override;
-
AudioTransport* audio_transport() override { return this; }
// AudioTransport
@@ -106,8 +103,6 @@
AudioFrame audioFrame_;
voe::SharedData* shared_;
- bool playout_enabled_ = true;
- bool recording_enabled_ = true;
};
} // namespace webrtc