blob: f827db47f6721a09ea3104d46c2ffe069439e0f1 [file] [log] [blame]
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001/*
2 * Copyright 2004 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
Yves Gerey988cc082018-10-23 12:03:01 +020010#include <string>
11#include <utility>
andresp@webrtc.orgff689be2015-02-12 11:54:26 +000012
Steve Anton2acd1632019-03-25 13:48:30 -070013#include "absl/algorithm/container.h"
Steve Anton10542f22019-01-11 09:11:00 -080014#include "rtc_base/atomic_ops.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020015#include "rtc_base/checks.h"
16#include "rtc_base/logging.h"
Steve Anton10542f22019-01-11 09:11:00 -080017#include "rtc_base/message_queue.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020018#include "rtc_base/thread.h"
Steve Anton10542f22019-01-11 09:11:00 -080019#include "rtc_base/time_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020020#include "rtc_base/trace_event.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000021
22namespace rtc {
andrespcdf61722016-07-08 02:45:40 -070023namespace {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000024
Yves Gerey665174f2018-06-19 15:03:05 +020025const int kMaxMsgLatency = 150; // 150 ms
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -070026const int kSlowDispatchLoggingThreshold = 50; // 50 ms
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000027
danilchap3c6abd22017-09-06 05:46:29 -070028class RTC_SCOPED_LOCKABLE MarkProcessingCritScope {
andrespcdf61722016-07-08 02:45:40 -070029 public:
jbauch5b361732017-07-06 23:51:37 -070030 MarkProcessingCritScope(const CriticalSection* cs, size_t* processing)
danilchap3c6abd22017-09-06 05:46:29 -070031 RTC_EXCLUSIVE_LOCK_FUNCTION(cs)
jbauch5b361732017-07-06 23:51:37 -070032 : cs_(cs), processing_(processing) {
andrespcdf61722016-07-08 02:45:40 -070033 cs_->Enter();
jbauch5b361732017-07-06 23:51:37 -070034 *processing_ += 1;
andrespcdf61722016-07-08 02:45:40 -070035 }
36
danilchap3c6abd22017-09-06 05:46:29 -070037 ~MarkProcessingCritScope() RTC_UNLOCK_FUNCTION() {
jbauch5b361732017-07-06 23:51:37 -070038 *processing_ -= 1;
andrespcdf61722016-07-08 02:45:40 -070039 cs_->Leave();
40 }
41
42 private:
43 const CriticalSection* const cs_;
jbauch5b361732017-07-06 23:51:37 -070044 size_t* processing_;
andrespcdf61722016-07-08 02:45:40 -070045
jbauch5b361732017-07-06 23:51:37 -070046 RTC_DISALLOW_COPY_AND_ASSIGN(MarkProcessingCritScope);
andrespcdf61722016-07-08 02:45:40 -070047};
48} // namespace
49
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000050//------------------------------------------------------------------
51// MessageQueueManager
52
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000053MessageQueueManager* MessageQueueManager::Instance() {
Niels Möller5e007b72018-09-07 12:35:44 +020054 static MessageQueueManager* const instance = new MessageQueueManager;
55 return instance;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000056}
57
jbauch5b361732017-07-06 23:51:37 -070058MessageQueueManager::MessageQueueManager() : processing_(0) {}
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000059
Yves Gerey665174f2018-06-19 15:03:05 +020060MessageQueueManager::~MessageQueueManager() {}
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000061
Yves Gerey665174f2018-06-19 15:03:05 +020062void MessageQueueManager::Add(MessageQueue* message_queue) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000063 return Instance()->AddInternal(message_queue);
64}
Yves Gerey665174f2018-06-19 15:03:05 +020065void MessageQueueManager::AddInternal(MessageQueue* message_queue) {
jbauch5b361732017-07-06 23:51:37 -070066 CritScope cs(&crit_);
67 // Prevent changes while the list of message queues is processed.
68 RTC_DCHECK_EQ(processing_, 0);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000069 message_queues_.push_back(message_queue);
70}
71
Yves Gerey665174f2018-06-19 15:03:05 +020072void MessageQueueManager::Remove(MessageQueue* message_queue) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000073 return Instance()->RemoveInternal(message_queue);
74}
Yves Gerey665174f2018-06-19 15:03:05 +020075void MessageQueueManager::RemoveInternal(MessageQueue* message_queue) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000076 {
jbauch5b361732017-07-06 23:51:37 -070077 CritScope cs(&crit_);
78 // Prevent changes while the list of message queues is processed.
79 RTC_DCHECK_EQ(processing_, 0);
Yves Gerey665174f2018-06-19 15:03:05 +020080 std::vector<MessageQueue*>::iterator iter;
Steve Anton2acd1632019-03-25 13:48:30 -070081 iter = absl::c_find(message_queues_, message_queue);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000082 if (iter != message_queues_.end()) {
83 message_queues_.erase(iter);
84 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000085 }
86}
87
Yves Gerey665174f2018-06-19 15:03:05 +020088void MessageQueueManager::Clear(MessageHandler* handler) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000089 return Instance()->ClearInternal(handler);
90}
Yves Gerey665174f2018-06-19 15:03:05 +020091void MessageQueueManager::ClearInternal(MessageHandler* handler) {
jbauch5b361732017-07-06 23:51:37 -070092 // Deleted objects may cause re-entrant calls to ClearInternal. This is
93 // allowed as the list of message queues does not change while queues are
94 // cleared.
95 MarkProcessingCritScope cs(&crit_, &processing_);
jbauch5b361732017-07-06 23:51:37 -070096 for (MessageQueue* queue : message_queues_) {
97 queue->Clear(handler);
98 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000099}
100
Niels Möller8909a632018-09-06 08:42:44 +0200101void MessageQueueManager::ProcessAllMessageQueuesForTesting() {
deadbeeff5f03e82016-06-06 11:16:06 -0700102 return Instance()->ProcessAllMessageQueuesInternal();
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700103}
104
deadbeeff5f03e82016-06-06 11:16:06 -0700105void MessageQueueManager::ProcessAllMessageQueuesInternal() {
Taylor Brandstetterfe7d0912016-09-15 17:47:42 -0700106 // This works by posting a delayed message at the current time and waiting
107 // for it to be dispatched on all queues, which will ensure that all messages
108 // that came before it were also dispatched.
109 volatile int queues_not_done = 0;
110
111 // This class is used so that whether the posted message is processed, or the
112 // message queue is simply cleared, queues_not_done gets decremented.
113 class ScopedIncrement : public MessageData {
114 public:
115 ScopedIncrement(volatile int* value) : value_(value) {
116 AtomicOps::Increment(value_);
117 }
118 ~ScopedIncrement() override { AtomicOps::Decrement(value_); }
119
120 private:
121 volatile int* value_;
122 };
123
deadbeeff5f03e82016-06-06 11:16:06 -0700124 {
jbauch5b361732017-07-06 23:51:37 -0700125 MarkProcessingCritScope cs(&crit_, &processing_);
deadbeeff5f03e82016-06-06 11:16:06 -0700126 for (MessageQueue* queue : message_queues_) {
Niels Möller8909a632018-09-06 08:42:44 +0200127 if (!queue->IsProcessingMessagesForTesting()) {
pthatcher1749bc32017-02-08 13:18:00 -0800128 // If the queue is not processing messages, it can
129 // be ignored. If we tried to post a message to it, it would be dropped
130 // or ignored.
Taylor Brandstetterfe7d0912016-09-15 17:47:42 -0700131 continue;
132 }
133 queue->PostDelayed(RTC_FROM_HERE, 0, nullptr, MQID_DISPOSE,
134 new ScopedIncrement(&queues_not_done));
deadbeeff5f03e82016-06-06 11:16:06 -0700135 }
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700136 }
Niels Möller8909a632018-09-06 08:42:44 +0200137
138 rtc::Thread* current = rtc::Thread::Current();
139 // Note: One of the message queues may have been on this thread, which is
140 // why we can't synchronously wait for queues_not_done to go to 0; we need
141 // to process messages as well.
Ying Wangb2940902018-09-05 09:40:40 +0000142 while (AtomicOps::AcquireLoad(&queues_not_done) > 0) {
Niels Möller8909a632018-09-06 08:42:44 +0200143 if (current) {
144 current->ProcessMessages(0);
145 }
deadbeeff5f03e82016-06-06 11:16:06 -0700146 }
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700147}
148
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000149//------------------------------------------------------------------
150// MessageQueue
jbauch25d1f282016-02-05 00:25:02 -0800151MessageQueue::MessageQueue(SocketServer* ss, bool init_queue)
André Susano Pinto02a57972016-07-22 13:30:05 +0200152 : fPeekKeep_(false),
153 dmsgq_next_num_(0),
154 fInitialized_(false),
155 fDestroyed_(false),
156 stop_(0),
157 ss_(ss) {
danilchapbebf54c2016-04-28 01:32:48 -0700158 RTC_DCHECK(ss);
159 // Currently, MessageQueue holds a socket server, and is the base class for
160 // Thread. It seems like it makes more sense for Thread to hold the socket
161 // server, and provide it to the MessageQueue, since the Thread controls
162 // the I/O model, and MQ is agnostic to those details. Anyway, this causes
163 // messagequeue_unittest to depend on network libraries... yuck.
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000164 ss_->SetMessageQueue(this);
jbauch25d1f282016-02-05 00:25:02 -0800165 if (init_queue) {
166 DoInit();
167 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000168}
169
danilchapbebf54c2016-04-28 01:32:48 -0700170MessageQueue::MessageQueue(std::unique_ptr<SocketServer> ss, bool init_queue)
171 : MessageQueue(ss.get(), init_queue) {
172 own_ss_ = std::move(ss);
173}
174
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000175MessageQueue::~MessageQueue() {
jbauch25d1f282016-02-05 00:25:02 -0800176 DoDestroy();
177}
178
179void MessageQueue::DoInit() {
180 if (fInitialized_) {
181 return;
182 }
183
184 fInitialized_ = true;
185 MessageQueueManager::Add(this);
186}
187
188void MessageQueue::DoDestroy() {
189 if (fDestroyed_) {
190 return;
191 }
192
193 fDestroyed_ = true;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000194 // The signal is done from here to ensure
195 // that it always gets called when the queue
196 // is going away.
197 SignalQueueDestroyed();
henrike@webrtc.org99b41622014-05-21 20:42:17 +0000198 MessageQueueManager::Remove(this);
Niels Möller5e007b72018-09-07 12:35:44 +0200199 ClearInternal(nullptr, MQID_ANY, nullptr);
jbauch9ccedc32016-02-25 01:14:56 -0800200
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000201 if (ss_) {
deadbeef37f5ecf2017-02-27 14:06:41 -0800202 ss_->SetMessageQueue(nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000203 }
204}
205
jbauch9ccedc32016-02-25 01:14:56 -0800206SocketServer* MessageQueue::socketserver() {
jbauch9ccedc32016-02-25 01:14:56 -0800207 return ss_;
208}
209
jbauch9ccedc32016-02-25 01:14:56 -0800210void MessageQueue::WakeUpSocketServer() {
jbauch9ccedc32016-02-25 01:14:56 -0800211 ss_->WakeUp();
212}
213
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000214void MessageQueue::Quit() {
André Susano Pinto02a57972016-07-22 13:30:05 +0200215 AtomicOps::ReleaseStore(&stop_, 1);
jbauch9ccedc32016-02-25 01:14:56 -0800216 WakeUpSocketServer();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000217}
218
219bool MessageQueue::IsQuitting() {
André Susano Pinto02a57972016-07-22 13:30:05 +0200220 return AtomicOps::AcquireLoad(&stop_) != 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000221}
222
Niels Möller8909a632018-09-06 08:42:44 +0200223bool MessageQueue::IsProcessingMessagesForTesting() {
pthatcher1749bc32017-02-08 13:18:00 -0800224 return !IsQuitting();
225}
226
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000227void MessageQueue::Restart() {
André Susano Pinto02a57972016-07-22 13:30:05 +0200228 AtomicOps::ReleaseStore(&stop_, 0);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000229}
230
Yves Gerey665174f2018-06-19 15:03:05 +0200231bool MessageQueue::Peek(Message* pmsg, int cmsWait) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000232 if (fPeekKeep_) {
233 *pmsg = msgPeek_;
234 return true;
235 }
236 if (!Get(pmsg, cmsWait))
237 return false;
238 msgPeek_ = *pmsg;
239 fPeekKeep_ = true;
240 return true;
241}
242
Yves Gerey665174f2018-06-19 15:03:05 +0200243bool MessageQueue::Get(Message* pmsg, int cmsWait, bool process_io) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000244 // Return and clear peek if present
245 // Always return the peek if it exists so there is Peek/Get symmetry
246
247 if (fPeekKeep_) {
248 *pmsg = msgPeek_;
249 fPeekKeep_ = false;
250 return true;
251 }
252
253 // Get w/wait + timer scan / dispatch + socket / event multiplexer dispatch
254
Honghai Zhang82d78622016-05-06 11:29:15 -0700255 int64_t cmsTotal = cmsWait;
256 int64_t cmsElapsed = 0;
257 int64_t msStart = TimeMillis();
258 int64_t msCurrent = msStart;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000259 while (true) {
260 // Check for sent messages
261 ReceiveSends();
262
263 // Check for posted events
Honghai Zhang82d78622016-05-06 11:29:15 -0700264 int64_t cmsDelayNext = kForever;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000265 bool first_pass = true;
266 while (true) {
267 // All queue operations need to be locked, but nothing else in this loop
268 // (specifically handling disposed message) can happen inside the crit.
269 // Otherwise, disposed MessageHandlers will cause deadlocks.
270 {
271 CritScope cs(&crit_);
272 // On the first pass, check for delayed messages that have been
273 // triggered and calculate the next trigger time.
274 if (first_pass) {
275 first_pass = false;
276 while (!dmsgq_.empty()) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700277 if (msCurrent < dmsgq_.top().msTrigger_) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000278 cmsDelayNext = TimeDiff(dmsgq_.top().msTrigger_, msCurrent);
279 break;
280 }
281 msgq_.push_back(dmsgq_.top().msg_);
282 dmsgq_.pop();
283 }
284 }
285 // Pull a message off the message queue, if available.
286 if (msgq_.empty()) {
287 break;
288 } else {
289 *pmsg = msgq_.front();
290 msgq_.pop_front();
291 }
292 } // crit_ is released here.
293
294 // Log a warning for time-sensitive messages that we're late to deliver.
295 if (pmsg->ts_sensitive) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700296 int64_t delay = TimeDiff(msCurrent, pmsg->ts_sensitive);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000297 if (delay > 0) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100298 RTC_LOG_F(LS_WARNING)
299 << "id: " << pmsg->message_id
300 << " delay: " << (delay + kMaxMsgLatency) << "ms";
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000301 }
302 }
303 // If this was a dispose message, delete it and skip it.
304 if (MQID_DISPOSE == pmsg->message_id) {
deadbeef37f5ecf2017-02-27 14:06:41 -0800305 RTC_DCHECK(nullptr == pmsg->phandler);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000306 delete pmsg->pdata;
307 *pmsg = Message();
308 continue;
309 }
310 return true;
311 }
312
André Susano Pinto02a57972016-07-22 13:30:05 +0200313 if (IsQuitting())
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000314 break;
315
316 // Which is shorter, the delay wait or the asked wait?
317
Honghai Zhang82d78622016-05-06 11:29:15 -0700318 int64_t cmsNext;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000319 if (cmsWait == kForever) {
320 cmsNext = cmsDelayNext;
321 } else {
Honghai Zhang82d78622016-05-06 11:29:15 -0700322 cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000323 if ((cmsDelayNext != kForever) && (cmsDelayNext < cmsNext))
324 cmsNext = cmsDelayNext;
325 }
326
jbauch9ccedc32016-02-25 01:14:56 -0800327 {
328 // Wait and multiplex in the meantime
Honghai Zhang82d78622016-05-06 11:29:15 -0700329 if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
jbauch9ccedc32016-02-25 01:14:56 -0800330 return false;
331 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000332
333 // If the specified timeout expired, return
334
Honghai Zhang82d78622016-05-06 11:29:15 -0700335 msCurrent = TimeMillis();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000336 cmsElapsed = TimeDiff(msCurrent, msStart);
337 if (cmsWait != kForever) {
338 if (cmsElapsed >= cmsWait)
339 return false;
340 }
341 }
342 return false;
343}
344
Yves Gerey665174f2018-06-19 15:03:05 +0200345void MessageQueue::ReceiveSends() {}
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000346
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700347void MessageQueue::Post(const Location& posted_from,
348 MessageHandler* phandler,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200349 uint32_t id,
350 MessageData* pdata,
351 bool time_sensitive) {
Niels Möller4ba6c262018-10-24 15:13:07 +0200352 if (IsQuitting()) {
353 delete pdata;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000354 return;
Niels Möller4ba6c262018-10-24 15:13:07 +0200355 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000356
357 // Keep thread safe
358 // Add the message to the end of the queue
359 // Signal for the multiplexer to return
360
jbauch9ccedc32016-02-25 01:14:56 -0800361 {
362 CritScope cs(&crit_);
363 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700364 msg.posted_from = posted_from;
jbauch9ccedc32016-02-25 01:14:56 -0800365 msg.phandler = phandler;
366 msg.message_id = id;
367 msg.pdata = pdata;
368 if (time_sensitive) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700369 msg.ts_sensitive = TimeMillis() + kMaxMsgLatency;
jbauch9ccedc32016-02-25 01:14:56 -0800370 }
371 msgq_.push_back(msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000372 }
jbauch9ccedc32016-02-25 01:14:56 -0800373 WakeUpSocketServer();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000374}
375
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700376void MessageQueue::PostDelayed(const Location& posted_from,
377 int cmsDelay,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000378 MessageHandler* phandler,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200379 uint32_t id,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000380 MessageData* pdata) {
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700381 return DoDelayPost(posted_from, cmsDelay, TimeAfter(cmsDelay), phandler, id,
382 pdata);
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000383}
384
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700385void MessageQueue::PostAt(const Location& posted_from,
386 uint32_t tstamp,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000387 MessageHandler* phandler,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200388 uint32_t id,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000389 MessageData* pdata) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700390 // This should work even if it is used (unexpectedly).
Taylor Brandstetter2b3bf6b2016-05-19 14:57:31 -0700391 int64_t delay = static_cast<uint32_t>(TimeMillis()) - tstamp;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700392 return DoDelayPost(posted_from, delay, tstamp, phandler, id, pdata);
Honghai Zhang82d78622016-05-06 11:29:15 -0700393}
394
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700395void MessageQueue::PostAt(const Location& posted_from,
396 int64_t tstamp,
Honghai Zhang82d78622016-05-06 11:29:15 -0700397 MessageHandler* phandler,
398 uint32_t id,
399 MessageData* pdata) {
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700400 return DoDelayPost(posted_from, TimeUntil(tstamp), tstamp, phandler, id,
401 pdata);
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000402}
403
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700404void MessageQueue::DoDelayPost(const Location& posted_from,
405 int64_t cmsDelay,
Honghai Zhang82d78622016-05-06 11:29:15 -0700406 int64_t tstamp,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200407 MessageHandler* phandler,
408 uint32_t id,
409 MessageData* pdata) {
André Susano Pinto02a57972016-07-22 13:30:05 +0200410 if (IsQuitting()) {
Niels Möller4ba6c262018-10-24 15:13:07 +0200411 delete pdata;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000412 return;
Taylor Brandstetter2b3bf6b2016-05-19 14:57:31 -0700413 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000414
415 // Keep thread safe
416 // Add to the priority queue. Gets sorted soonest first.
417 // Signal for the multiplexer to return.
418
jbauch9ccedc32016-02-25 01:14:56 -0800419 {
420 CritScope cs(&crit_);
421 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700422 msg.posted_from = posted_from;
jbauch9ccedc32016-02-25 01:14:56 -0800423 msg.phandler = phandler;
424 msg.message_id = id;
425 msg.pdata = pdata;
426 DelayedMessage dmsg(cmsDelay, tstamp, dmsgq_next_num_, msg);
427 dmsgq_.push(dmsg);
428 // If this message queue processes 1 message every millisecond for 50 days,
429 // we will wrap this number. Even then, only messages with identical times
430 // will be misordered, and then only briefly. This is probably ok.
nisse7ce109a2017-01-31 00:57:56 -0800431 ++dmsgq_next_num_;
432 RTC_DCHECK_NE(0, dmsgq_next_num_);
jbauch9ccedc32016-02-25 01:14:56 -0800433 }
434 WakeUpSocketServer();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000435}
436
437int MessageQueue::GetDelay() {
438 CritScope cs(&crit_);
439
440 if (!msgq_.empty())
441 return 0;
442
443 if (!dmsgq_.empty()) {
444 int delay = TimeUntil(dmsgq_.top().msTrigger_);
445 if (delay < 0)
446 delay = 0;
447 return delay;
448 }
449
450 return kForever;
451}
452
Peter Boström0c4e06b2015-10-07 12:23:21 +0200453void MessageQueue::Clear(MessageHandler* phandler,
454 uint32_t id,
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000455 MessageList* removed) {
456 CritScope cs(&crit_);
Niels Möller5e007b72018-09-07 12:35:44 +0200457 ClearInternal(phandler, id, removed);
458}
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000459
Niels Möller5e007b72018-09-07 12:35:44 +0200460void MessageQueue::ClearInternal(MessageHandler* phandler,
461 uint32_t id,
462 MessageList* removed) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000463 // Remove messages with phandler
464
465 if (fPeekKeep_ && msgPeek_.Match(phandler, id)) {
466 if (removed) {
467 removed->push_back(msgPeek_);
468 } else {
469 delete msgPeek_.pdata;
470 }
471 fPeekKeep_ = false;
472 }
473
474 // Remove from ordered message queue
475
476 for (MessageList::iterator it = msgq_.begin(); it != msgq_.end();) {
477 if (it->Match(phandler, id)) {
478 if (removed) {
479 removed->push_back(*it);
480 } else {
481 delete it->pdata;
482 }
483 it = msgq_.erase(it);
484 } else {
485 ++it;
486 }
487 }
488
489 // Remove from priority queue. Not directly iterable, so use this approach
decurtis@webrtc.org2af30572015-02-21 01:59:50 +0000490
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000491 PriorityQueue::container_type::iterator new_end = dmsgq_.container().begin();
492 for (PriorityQueue::container_type::iterator it = new_end;
493 it != dmsgq_.container().end(); ++it) {
494 if (it->msg_.Match(phandler, id)) {
495 if (removed) {
496 removed->push_back(it->msg_);
497 } else {
498 delete it->msg_.pdata;
499 }
500 } else {
decurtis@webrtc.org2af30572015-02-21 01:59:50 +0000501 *new_end++ = *it;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000502 }
503 }
504 dmsgq_.container().erase(new_end, dmsgq_.container().end());
505 dmsgq_.reheap();
506}
507
Yves Gerey665174f2018-06-19 15:03:05 +0200508void MessageQueue::Dispatch(Message* pmsg) {
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700509 TRACE_EVENT2("webrtc", "MessageQueue::Dispatch", "src_file_and_line",
510 pmsg->posted_from.file_and_line(), "src_func",
511 pmsg->posted_from.function_name());
512 int64_t start_time = TimeMillis();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000513 pmsg->phandler->OnMessage(pmsg);
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700514 int64_t end_time = TimeMillis();
515 int64_t diff = TimeDiff(end_time, start_time);
516 if (diff >= kSlowDispatchLoggingThreshold) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100517 RTC_LOG(LS_INFO) << "Message took " << diff
518 << "ms to dispatch. Posted from: "
519 << pmsg->posted_from.ToString();
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700520 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000521}
522
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000523} // namespace rtc