blob: 492a717ccf8bb55f718178cec3f4d10859a3e29b [file] [log] [blame]
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001/*
2 * libjingle
3 * Copyright 2011 Google Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include "talk/session/media/currentspeakermonitor.h"
29
30#include "talk/base/logging.h"
31#include "talk/session/media/call.h"
32
33namespace cricket {
34
35namespace {
36const int kMaxAudioLevel = 9;
37// To avoid overswitching, we disable switching for a period of time after a
38// switch is done.
39const int kDefaultMinTimeBetweenSwitches = 1000;
40}
41
buildbot@webrtc.orgca272362014-05-08 23:10:23 +000042CurrentSpeakerMonitor::CurrentSpeakerMonitor(
43 AudioSourceContext* audio_source_context, BaseSession* session)
henrike@webrtc.org28e20752013-07-10 00:45:36 +000044 : started_(false),
buildbot@webrtc.orgca272362014-05-08 23:10:23 +000045 audio_source_context_(audio_source_context),
henrike@webrtc.org28e20752013-07-10 00:45:36 +000046 session_(session),
47 current_speaker_ssrc_(0),
48 earliest_permitted_switch_time_(0),
49 min_time_between_switches_(kDefaultMinTimeBetweenSwitches) {
50}
51
52CurrentSpeakerMonitor::~CurrentSpeakerMonitor() {
53 Stop();
54}
55
56void CurrentSpeakerMonitor::Start() {
57 if (!started_) {
buildbot@webrtc.orgca272362014-05-08 23:10:23 +000058 audio_source_context_->SignalAudioMonitor.connect(
henrike@webrtc.org28e20752013-07-10 00:45:36 +000059 this, &CurrentSpeakerMonitor::OnAudioMonitor);
buildbot@webrtc.orgca272362014-05-08 23:10:23 +000060 audio_source_context_->SignalMediaStreamsUpdate.connect(
henrike@webrtc.org28e20752013-07-10 00:45:36 +000061 this, &CurrentSpeakerMonitor::OnMediaStreamsUpdate);
buildbot@webrtc.org49a6a272014-05-21 00:24:54 +000062 audio_source_context_->SignalMediaStreamsReset.connect(
63 this, &CurrentSpeakerMonitor::OnMediaStreamsReset);
henrike@webrtc.org28e20752013-07-10 00:45:36 +000064
65 started_ = true;
66 }
67}
68
69void CurrentSpeakerMonitor::Stop() {
70 if (started_) {
buildbot@webrtc.orgca272362014-05-08 23:10:23 +000071 audio_source_context_->SignalAudioMonitor.disconnect(this);
72 audio_source_context_->SignalMediaStreamsUpdate.disconnect(this);
henrike@webrtc.org28e20752013-07-10 00:45:36 +000073
74 started_ = false;
75 ssrc_to_speaking_state_map_.clear();
76 current_speaker_ssrc_ = 0;
77 earliest_permitted_switch_time_ = 0;
78 }
79}
80
81void CurrentSpeakerMonitor::set_min_time_between_switches(
82 uint32 min_time_between_switches) {
83 min_time_between_switches_ = min_time_between_switches;
84}
85
buildbot@webrtc.orgca272362014-05-08 23:10:23 +000086void CurrentSpeakerMonitor::OnAudioMonitor(
87 AudioSourceContext* audio_source_context, const AudioInfo& info) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +000088 std::map<uint32, int> active_ssrc_to_level_map;
89 cricket::AudioInfo::StreamList::const_iterator stream_list_it;
90 for (stream_list_it = info.active_streams.begin();
91 stream_list_it != info.active_streams.end(); ++stream_list_it) {
92 uint32 ssrc = stream_list_it->first;
93 active_ssrc_to_level_map[ssrc] = stream_list_it->second;
94
95 // It's possible we haven't yet added this source to our map. If so,
96 // add it now with a "not speaking" state.
97 if (ssrc_to_speaking_state_map_.find(ssrc) ==
98 ssrc_to_speaking_state_map_.end()) {
99 ssrc_to_speaking_state_map_[ssrc] = SS_NOT_SPEAKING;
100 }
101 }
102
103 int max_level = 0;
104 uint32 loudest_speaker_ssrc = 0;
105
106 // Update the speaking states of all participants based on the new audio
107 // level information. Also retain loudest speaker.
108 std::map<uint32, SpeakingState>::iterator state_it;
109 for (state_it = ssrc_to_speaking_state_map_.begin();
110 state_it != ssrc_to_speaking_state_map_.end(); ++state_it) {
111 bool is_previous_speaker = current_speaker_ssrc_ == state_it->first;
112
113 // This uses a state machine in order to gradually identify
114 // members as having started or stopped speaking. Matches the
115 // algorithm used by the hangouts js code.
116
117 std::map<uint32, int>::const_iterator level_it =
118 active_ssrc_to_level_map.find(state_it->first);
119 // Note that the stream map only contains streams with non-zero audio
120 // levels.
121 int level = (level_it != active_ssrc_to_level_map.end()) ?
122 level_it->second : 0;
123 switch (state_it->second) {
124 case SS_NOT_SPEAKING:
125 if (level > 0) {
126 // Reset level because we don't think they're really speaking.
127 level = 0;
128 state_it->second = SS_MIGHT_BE_SPEAKING;
129 } else {
130 // State unchanged.
131 }
132 break;
133 case SS_MIGHT_BE_SPEAKING:
134 if (level > 0) {
135 state_it->second = SS_SPEAKING;
136 } else {
137 state_it->second = SS_NOT_SPEAKING;
138 }
139 break;
140 case SS_SPEAKING:
141 if (level > 0) {
142 // State unchanged.
143 } else {
144 state_it->second = SS_WAS_SPEAKING_RECENTLY1;
145 if (is_previous_speaker) {
146 // Assume this is an inter-word silence and assign him the highest
147 // volume.
148 level = kMaxAudioLevel;
149 }
150 }
151 break;
152 case SS_WAS_SPEAKING_RECENTLY1:
153 if (level > 0) {
154 state_it->second = SS_SPEAKING;
155 } else {
156 state_it->second = SS_WAS_SPEAKING_RECENTLY2;
157 if (is_previous_speaker) {
158 // Assume this is an inter-word silence and assign him the highest
159 // volume.
160 level = kMaxAudioLevel;
161 }
162 }
163 break;
164 case SS_WAS_SPEAKING_RECENTLY2:
165 if (level > 0) {
166 state_it->second = SS_SPEAKING;
167 } else {
168 state_it->second = SS_NOT_SPEAKING;
169 }
170 break;
171 }
172
173 if (level > max_level) {
174 loudest_speaker_ssrc = state_it->first;
175 max_level = level;
176 } else if (level > 0 && level == max_level && is_previous_speaker) {
177 // Favor continuity of loudest speakers if audio levels are equal.
178 loudest_speaker_ssrc = state_it->first;
179 }
180 }
181
182 // We avoid over-switching by disabling switching for a period of time after
183 // a switch is done.
184 uint32 now = talk_base::Time();
185 if (earliest_permitted_switch_time_ <= now &&
186 current_speaker_ssrc_ != loudest_speaker_ssrc) {
187 current_speaker_ssrc_ = loudest_speaker_ssrc;
188 LOG(LS_INFO) << "Current speaker changed to " << current_speaker_ssrc_;
189 earliest_permitted_switch_time_ = now + min_time_between_switches_;
190 SignalUpdate(this, current_speaker_ssrc_);
191 }
192}
193
buildbot@webrtc.orgca272362014-05-08 23:10:23 +0000194void CurrentSpeakerMonitor::OnMediaStreamsUpdate(
buildbot@webrtc.org49a6a272014-05-21 00:24:54 +0000195 AudioSourceContext* audio_source_context, BaseSession* session,
buildbot@webrtc.orgca272362014-05-08 23:10:23 +0000196 const MediaStreams& added, const MediaStreams& removed) {
buildbot@webrtc.org49a6a272014-05-21 00:24:54 +0000197
buildbot@webrtc.orgca272362014-05-08 23:10:23 +0000198 if (audio_source_context == audio_source_context_ && session == session_) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000199 // Update the speaking state map based on added and removed streams.
200 for (std::vector<cricket::StreamParams>::const_iterator
buildbot@webrtc.org49a6a272014-05-21 00:24:54 +0000201 it = removed.audio().begin(); it != removed.audio().end(); ++it) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000202 ssrc_to_speaking_state_map_.erase(it->first_ssrc());
203 }
204
205 for (std::vector<cricket::StreamParams>::const_iterator
buildbot@webrtc.org49a6a272014-05-21 00:24:54 +0000206 it = added.audio().begin(); it != added.audio().end(); ++it) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000207 ssrc_to_speaking_state_map_[it->first_ssrc()] = SS_NOT_SPEAKING;
208 }
209 }
210}
211
buildbot@webrtc.org49a6a272014-05-21 00:24:54 +0000212void CurrentSpeakerMonitor::OnMediaStreamsReset(
213 AudioSourceContext* audio_source_context, BaseSession* session) {
214 if (audio_source_context == audio_source_context_ && session == session_) {
215 ssrc_to_speaking_state_map_.clear();
216 }
217}
218
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000219} // namespace cricket