blob: 503d5aff966bde3354352173891fe93e91350ce6 [file] [log] [blame]
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001/*
2 * Copyright 2004 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
andresp@webrtc.orgff689be2015-02-12 11:54:26 +000010#include <algorithm>
11
deadbeeff5f03e82016-06-06 11:16:06 -070012#include "webrtc/base/atomicops.h"
danilchapbebf54c2016-04-28 01:32:48 -070013#include "webrtc/base/checks.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000014#include "webrtc/base/common.h"
15#include "webrtc/base/logging.h"
16#include "webrtc/base/messagequeue.h"
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -070017#include "webrtc/base/stringencode.h"
deadbeeff5f03e82016-06-06 11:16:06 -070018#include "webrtc/base/thread.h"
pbos79e28422016-04-29 08:48:05 -070019#include "webrtc/base/trace_event.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000020
21namespace rtc {
andrespcdf61722016-07-08 02:45:40 -070022namespace {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000023
Honghai Zhang82d78622016-05-06 11:29:15 -070024const int kMaxMsgLatency = 150; // 150 ms
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -070025const int kSlowDispatchLoggingThreshold = 50; // 50 ms
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000026
andrespcdf61722016-07-08 02:45:40 -070027class SCOPED_LOCKABLE DebugNonReentrantCritScope {
28 public:
29 DebugNonReentrantCritScope(const CriticalSection* cs, bool* locked)
30 EXCLUSIVE_LOCK_FUNCTION(cs)
31 : cs_(cs), locked_(locked) {
32 cs_->Enter();
33 ASSERT(!*locked_);
34 *locked_ = true;
35 }
36
37 ~DebugNonReentrantCritScope() UNLOCK_FUNCTION() {
38 *locked_ = false;
39 cs_->Leave();
40 }
41
42 private:
43 const CriticalSection* const cs_;
44 bool* locked_;
45
46 RTC_DISALLOW_COPY_AND_ASSIGN(DebugNonReentrantCritScope);
47};
48} // namespace
49
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000050//------------------------------------------------------------------
51// MessageQueueManager
52
53MessageQueueManager* MessageQueueManager::instance_ = NULL;
54
55MessageQueueManager* MessageQueueManager::Instance() {
56 // Note: This is not thread safe, but it is first called before threads are
57 // spawned.
58 if (!instance_)
59 instance_ = new MessageQueueManager;
60 return instance_;
61}
62
63bool MessageQueueManager::IsInitialized() {
64 return instance_ != NULL;
65}
66
andrespcdf61722016-07-08 02:45:40 -070067MessageQueueManager::MessageQueueManager() : locked_(false) {}
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000068
69MessageQueueManager::~MessageQueueManager() {
70}
71
72void MessageQueueManager::Add(MessageQueue *message_queue) {
73 return Instance()->AddInternal(message_queue);
74}
75void MessageQueueManager::AddInternal(MessageQueue *message_queue) {
andrespcdf61722016-07-08 02:45:40 -070076 DebugNonReentrantCritScope cs(&crit_, &locked_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000077 message_queues_.push_back(message_queue);
78}
79
80void MessageQueueManager::Remove(MessageQueue *message_queue) {
81 // If there isn't a message queue manager instance, then there isn't a queue
82 // to remove.
83 if (!instance_) return;
84 return Instance()->RemoveInternal(message_queue);
85}
86void MessageQueueManager::RemoveInternal(MessageQueue *message_queue) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000087 // If this is the last MessageQueue, destroy the manager as well so that
88 // we don't leak this object at program shutdown. As mentioned above, this is
89 // not thread-safe, but this should only happen at program termination (when
90 // the ThreadManager is destroyed, and threads are no longer active).
91 bool destroy = false;
92 {
andrespcdf61722016-07-08 02:45:40 -070093 DebugNonReentrantCritScope cs(&crit_, &locked_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000094 std::vector<MessageQueue *>::iterator iter;
95 iter = std::find(message_queues_.begin(), message_queues_.end(),
96 message_queue);
97 if (iter != message_queues_.end()) {
98 message_queues_.erase(iter);
99 }
100 destroy = message_queues_.empty();
101 }
102 if (destroy) {
103 instance_ = NULL;
104 delete this;
105 }
106}
107
108void MessageQueueManager::Clear(MessageHandler *handler) {
109 // If there isn't a message queue manager instance, then there aren't any
110 // queues to remove this handler from.
111 if (!instance_) return;
112 return Instance()->ClearInternal(handler);
113}
114void MessageQueueManager::ClearInternal(MessageHandler *handler) {
andrespcdf61722016-07-08 02:45:40 -0700115 DebugNonReentrantCritScope cs(&crit_, &locked_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000116 std::vector<MessageQueue *>::iterator iter;
117 for (iter = message_queues_.begin(); iter != message_queues_.end(); iter++)
118 (*iter)->Clear(handler);
119}
120
deadbeeff5f03e82016-06-06 11:16:06 -0700121void MessageQueueManager::ProcessAllMessageQueues() {
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700122 if (!instance_) {
123 return;
124 }
deadbeeff5f03e82016-06-06 11:16:06 -0700125 return Instance()->ProcessAllMessageQueuesInternal();
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700126}
127
deadbeeff5f03e82016-06-06 11:16:06 -0700128void MessageQueueManager::ProcessAllMessageQueuesInternal() {
Taylor Brandstetterfe7d0912016-09-15 17:47:42 -0700129 // This works by posting a delayed message at the current time and waiting
130 // for it to be dispatched on all queues, which will ensure that all messages
131 // that came before it were also dispatched.
132 volatile int queues_not_done = 0;
133
134 // This class is used so that whether the posted message is processed, or the
135 // message queue is simply cleared, queues_not_done gets decremented.
136 class ScopedIncrement : public MessageData {
137 public:
138 ScopedIncrement(volatile int* value) : value_(value) {
139 AtomicOps::Increment(value_);
140 }
141 ~ScopedIncrement() override { AtomicOps::Decrement(value_); }
142
143 private:
144 volatile int* value_;
145 };
146
deadbeeff5f03e82016-06-06 11:16:06 -0700147 {
andrespcdf61722016-07-08 02:45:40 -0700148 DebugNonReentrantCritScope cs(&crit_, &locked_);
deadbeeff5f03e82016-06-06 11:16:06 -0700149 for (MessageQueue* queue : message_queues_) {
Taylor Brandstetterfe7d0912016-09-15 17:47:42 -0700150 if (queue->IsQuitting()) {
151 // If the queue is quitting, it's done processing messages so it can
152 // be ignored. If we tried to post a message to it, it would be dropped.
153 continue;
154 }
155 queue->PostDelayed(RTC_FROM_HERE, 0, nullptr, MQID_DISPOSE,
156 new ScopedIncrement(&queues_not_done));
deadbeeff5f03e82016-06-06 11:16:06 -0700157 }
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700158 }
deadbeeff5f03e82016-06-06 11:16:06 -0700159 // Note: One of the message queues may have been on this thread, which is why
160 // we can't synchronously wait for queues_not_done to go to 0; we need to
161 // process messages as well.
162 while (AtomicOps::AcquireLoad(&queues_not_done) > 0) {
163 rtc::Thread::Current()->ProcessMessages(0);
164 }
Taylor Brandstetterb3c68102016-05-27 14:15:43 -0700165}
166
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000167//------------------------------------------------------------------
168// MessageQueue
jbauch25d1f282016-02-05 00:25:02 -0800169MessageQueue::MessageQueue(SocketServer* ss, bool init_queue)
André Susano Pinto02a57972016-07-22 13:30:05 +0200170 : fPeekKeep_(false),
171 dmsgq_next_num_(0),
172 fInitialized_(false),
173 fDestroyed_(false),
174 stop_(0),
175 ss_(ss) {
danilchapbebf54c2016-04-28 01:32:48 -0700176 RTC_DCHECK(ss);
177 // Currently, MessageQueue holds a socket server, and is the base class for
178 // Thread. It seems like it makes more sense for Thread to hold the socket
179 // server, and provide it to the MessageQueue, since the Thread controls
180 // the I/O model, and MQ is agnostic to those details. Anyway, this causes
181 // messagequeue_unittest to depend on network libraries... yuck.
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000182 ss_->SetMessageQueue(this);
jbauch25d1f282016-02-05 00:25:02 -0800183 if (init_queue) {
184 DoInit();
185 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000186}
187
danilchapbebf54c2016-04-28 01:32:48 -0700188MessageQueue::MessageQueue(std::unique_ptr<SocketServer> ss, bool init_queue)
189 : MessageQueue(ss.get(), init_queue) {
190 own_ss_ = std::move(ss);
191}
192
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000193MessageQueue::~MessageQueue() {
jbauch25d1f282016-02-05 00:25:02 -0800194 DoDestroy();
195}
196
197void MessageQueue::DoInit() {
198 if (fInitialized_) {
199 return;
200 }
201
202 fInitialized_ = true;
203 MessageQueueManager::Add(this);
204}
205
206void MessageQueue::DoDestroy() {
207 if (fDestroyed_) {
208 return;
209 }
210
211 fDestroyed_ = true;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000212 // The signal is done from here to ensure
213 // that it always gets called when the queue
214 // is going away.
215 SignalQueueDestroyed();
henrike@webrtc.org99b41622014-05-21 20:42:17 +0000216 MessageQueueManager::Remove(this);
217 Clear(NULL);
jbauch9ccedc32016-02-25 01:14:56 -0800218
219 SharedScope ss(&ss_lock_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000220 if (ss_) {
221 ss_->SetMessageQueue(NULL);
222 }
223}
224
jbauch9ccedc32016-02-25 01:14:56 -0800225SocketServer* MessageQueue::socketserver() {
226 SharedScope ss(&ss_lock_);
227 return ss_;
228}
229
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000230void MessageQueue::set_socketserver(SocketServer* ss) {
jbauch9ccedc32016-02-25 01:14:56 -0800231 // Need to lock exclusively here to prevent simultaneous modifications from
232 // other threads. Can't be a shared lock to prevent races with other reading
233 // threads.
234 // Other places that only read "ss_" can use a shared lock as simultaneous
235 // read access is allowed.
236 ExclusiveScope es(&ss_lock_);
danilchapbebf54c2016-04-28 01:32:48 -0700237 ss_ = ss ? ss : own_ss_.get();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000238 ss_->SetMessageQueue(this);
239}
240
jbauch9ccedc32016-02-25 01:14:56 -0800241void MessageQueue::WakeUpSocketServer() {
242 SharedScope ss(&ss_lock_);
243 ss_->WakeUp();
244}
245
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000246void MessageQueue::Quit() {
André Susano Pinto02a57972016-07-22 13:30:05 +0200247 AtomicOps::ReleaseStore(&stop_, 1);
jbauch9ccedc32016-02-25 01:14:56 -0800248 WakeUpSocketServer();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000249}
250
251bool MessageQueue::IsQuitting() {
André Susano Pinto02a57972016-07-22 13:30:05 +0200252 return AtomicOps::AcquireLoad(&stop_) != 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000253}
254
255void MessageQueue::Restart() {
André Susano Pinto02a57972016-07-22 13:30:05 +0200256 AtomicOps::ReleaseStore(&stop_, 0);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000257}
258
259bool MessageQueue::Peek(Message *pmsg, int cmsWait) {
260 if (fPeekKeep_) {
261 *pmsg = msgPeek_;
262 return true;
263 }
264 if (!Get(pmsg, cmsWait))
265 return false;
266 msgPeek_ = *pmsg;
267 fPeekKeep_ = true;
268 return true;
269}
270
271bool MessageQueue::Get(Message *pmsg, int cmsWait, bool process_io) {
272 // Return and clear peek if present
273 // Always return the peek if it exists so there is Peek/Get symmetry
274
275 if (fPeekKeep_) {
276 *pmsg = msgPeek_;
277 fPeekKeep_ = false;
278 return true;
279 }
280
281 // Get w/wait + timer scan / dispatch + socket / event multiplexer dispatch
282
Honghai Zhang82d78622016-05-06 11:29:15 -0700283 int64_t cmsTotal = cmsWait;
284 int64_t cmsElapsed = 0;
285 int64_t msStart = TimeMillis();
286 int64_t msCurrent = msStart;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000287 while (true) {
288 // Check for sent messages
289 ReceiveSends();
290
291 // Check for posted events
Honghai Zhang82d78622016-05-06 11:29:15 -0700292 int64_t cmsDelayNext = kForever;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000293 bool first_pass = true;
294 while (true) {
295 // All queue operations need to be locked, but nothing else in this loop
296 // (specifically handling disposed message) can happen inside the crit.
297 // Otherwise, disposed MessageHandlers will cause deadlocks.
298 {
299 CritScope cs(&crit_);
300 // On the first pass, check for delayed messages that have been
301 // triggered and calculate the next trigger time.
302 if (first_pass) {
303 first_pass = false;
304 while (!dmsgq_.empty()) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700305 if (msCurrent < dmsgq_.top().msTrigger_) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000306 cmsDelayNext = TimeDiff(dmsgq_.top().msTrigger_, msCurrent);
307 break;
308 }
309 msgq_.push_back(dmsgq_.top().msg_);
310 dmsgq_.pop();
311 }
312 }
313 // Pull a message off the message queue, if available.
314 if (msgq_.empty()) {
315 break;
316 } else {
317 *pmsg = msgq_.front();
318 msgq_.pop_front();
319 }
320 } // crit_ is released here.
321
322 // Log a warning for time-sensitive messages that we're late to deliver.
323 if (pmsg->ts_sensitive) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700324 int64_t delay = TimeDiff(msCurrent, pmsg->ts_sensitive);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000325 if (delay > 0) {
326 LOG_F(LS_WARNING) << "id: " << pmsg->message_id << " delay: "
327 << (delay + kMaxMsgLatency) << "ms";
328 }
329 }
330 // If this was a dispose message, delete it and skip it.
331 if (MQID_DISPOSE == pmsg->message_id) {
332 ASSERT(NULL == pmsg->phandler);
333 delete pmsg->pdata;
334 *pmsg = Message();
335 continue;
336 }
337 return true;
338 }
339
André Susano Pinto02a57972016-07-22 13:30:05 +0200340 if (IsQuitting())
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000341 break;
342
343 // Which is shorter, the delay wait or the asked wait?
344
Honghai Zhang82d78622016-05-06 11:29:15 -0700345 int64_t cmsNext;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000346 if (cmsWait == kForever) {
347 cmsNext = cmsDelayNext;
348 } else {
Honghai Zhang82d78622016-05-06 11:29:15 -0700349 cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000350 if ((cmsDelayNext != kForever) && (cmsDelayNext < cmsNext))
351 cmsNext = cmsDelayNext;
352 }
353
jbauch9ccedc32016-02-25 01:14:56 -0800354 {
355 // Wait and multiplex in the meantime
356 SharedScope ss(&ss_lock_);
Honghai Zhang82d78622016-05-06 11:29:15 -0700357 if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
jbauch9ccedc32016-02-25 01:14:56 -0800358 return false;
359 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000360
361 // If the specified timeout expired, return
362
Honghai Zhang82d78622016-05-06 11:29:15 -0700363 msCurrent = TimeMillis();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000364 cmsElapsed = TimeDiff(msCurrent, msStart);
365 if (cmsWait != kForever) {
366 if (cmsElapsed >= cmsWait)
367 return false;
368 }
369 }
370 return false;
371}
372
373void MessageQueue::ReceiveSends() {
374}
375
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700376void MessageQueue::Post(const Location& posted_from,
377 MessageHandler* phandler,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200378 uint32_t id,
379 MessageData* pdata,
380 bool time_sensitive) {
André Susano Pinto02a57972016-07-22 13:30:05 +0200381 if (IsQuitting())
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000382 return;
383
384 // Keep thread safe
385 // Add the message to the end of the queue
386 // Signal for the multiplexer to return
387
jbauch9ccedc32016-02-25 01:14:56 -0800388 {
389 CritScope cs(&crit_);
390 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700391 msg.posted_from = posted_from;
jbauch9ccedc32016-02-25 01:14:56 -0800392 msg.phandler = phandler;
393 msg.message_id = id;
394 msg.pdata = pdata;
395 if (time_sensitive) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700396 msg.ts_sensitive = TimeMillis() + kMaxMsgLatency;
jbauch9ccedc32016-02-25 01:14:56 -0800397 }
398 msgq_.push_back(msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000399 }
jbauch9ccedc32016-02-25 01:14:56 -0800400 WakeUpSocketServer();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000401}
402
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700403void MessageQueue::PostDelayed(const Location& posted_from,
404 int cmsDelay,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000405 MessageHandler* phandler,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200406 uint32_t id,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000407 MessageData* pdata) {
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700408 return DoDelayPost(posted_from, cmsDelay, TimeAfter(cmsDelay), phandler, id,
409 pdata);
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000410}
411
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700412void MessageQueue::PostAt(const Location& posted_from,
413 uint32_t tstamp,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000414 MessageHandler* phandler,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200415 uint32_t id,
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000416 MessageData* pdata) {
Honghai Zhang82d78622016-05-06 11:29:15 -0700417 // This should work even if it is used (unexpectedly).
Taylor Brandstetter2b3bf6b2016-05-19 14:57:31 -0700418 int64_t delay = static_cast<uint32_t>(TimeMillis()) - tstamp;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700419 return DoDelayPost(posted_from, delay, tstamp, phandler, id, pdata);
Honghai Zhang82d78622016-05-06 11:29:15 -0700420}
421
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700422void MessageQueue::PostAt(const Location& posted_from,
423 int64_t tstamp,
Honghai Zhang82d78622016-05-06 11:29:15 -0700424 MessageHandler* phandler,
425 uint32_t id,
426 MessageData* pdata) {
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700427 return DoDelayPost(posted_from, TimeUntil(tstamp), tstamp, phandler, id,
428 pdata);
kwiberg@webrtc.org67186fe2015-03-09 22:21:53 +0000429}
430
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700431void MessageQueue::DoDelayPost(const Location& posted_from,
432 int64_t cmsDelay,
Honghai Zhang82d78622016-05-06 11:29:15 -0700433 int64_t tstamp,
Peter Boström0c4e06b2015-10-07 12:23:21 +0200434 MessageHandler* phandler,
435 uint32_t id,
436 MessageData* pdata) {
André Susano Pinto02a57972016-07-22 13:30:05 +0200437 if (IsQuitting()) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000438 return;
Taylor Brandstetter2b3bf6b2016-05-19 14:57:31 -0700439 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000440
441 // Keep thread safe
442 // Add to the priority queue. Gets sorted soonest first.
443 // Signal for the multiplexer to return.
444
jbauch9ccedc32016-02-25 01:14:56 -0800445 {
446 CritScope cs(&crit_);
447 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700448 msg.posted_from = posted_from;
jbauch9ccedc32016-02-25 01:14:56 -0800449 msg.phandler = phandler;
450 msg.message_id = id;
451 msg.pdata = pdata;
452 DelayedMessage dmsg(cmsDelay, tstamp, dmsgq_next_num_, msg);
453 dmsgq_.push(dmsg);
454 // If this message queue processes 1 message every millisecond for 50 days,
455 // we will wrap this number. Even then, only messages with identical times
456 // will be misordered, and then only briefly. This is probably ok.
457 VERIFY(0 != ++dmsgq_next_num_);
458 }
459 WakeUpSocketServer();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000460}
461
462int MessageQueue::GetDelay() {
463 CritScope cs(&crit_);
464
465 if (!msgq_.empty())
466 return 0;
467
468 if (!dmsgq_.empty()) {
469 int delay = TimeUntil(dmsgq_.top().msTrigger_);
470 if (delay < 0)
471 delay = 0;
472 return delay;
473 }
474
475 return kForever;
476}
477
Peter Boström0c4e06b2015-10-07 12:23:21 +0200478void MessageQueue::Clear(MessageHandler* phandler,
479 uint32_t id,
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000480 MessageList* removed) {
481 CritScope cs(&crit_);
482
483 // Remove messages with phandler
484
485 if (fPeekKeep_ && msgPeek_.Match(phandler, id)) {
486 if (removed) {
487 removed->push_back(msgPeek_);
488 } else {
489 delete msgPeek_.pdata;
490 }
491 fPeekKeep_ = false;
492 }
493
494 // Remove from ordered message queue
495
496 for (MessageList::iterator it = msgq_.begin(); it != msgq_.end();) {
497 if (it->Match(phandler, id)) {
498 if (removed) {
499 removed->push_back(*it);
500 } else {
501 delete it->pdata;
502 }
503 it = msgq_.erase(it);
504 } else {
505 ++it;
506 }
507 }
508
509 // Remove from priority queue. Not directly iterable, so use this approach
decurtis@webrtc.org2af30572015-02-21 01:59:50 +0000510
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000511 PriorityQueue::container_type::iterator new_end = dmsgq_.container().begin();
512 for (PriorityQueue::container_type::iterator it = new_end;
513 it != dmsgq_.container().end(); ++it) {
514 if (it->msg_.Match(phandler, id)) {
515 if (removed) {
516 removed->push_back(it->msg_);
517 } else {
518 delete it->msg_.pdata;
519 }
520 } else {
decurtis@webrtc.org2af30572015-02-21 01:59:50 +0000521 *new_end++ = *it;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000522 }
523 }
524 dmsgq_.container().erase(new_end, dmsgq_.container().end());
525 dmsgq_.reheap();
526}
527
528void MessageQueue::Dispatch(Message *pmsg) {
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700529 TRACE_EVENT2("webrtc", "MessageQueue::Dispatch", "src_file_and_line",
530 pmsg->posted_from.file_and_line(), "src_func",
531 pmsg->posted_from.function_name());
532 int64_t start_time = TimeMillis();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000533 pmsg->phandler->OnMessage(pmsg);
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700534 int64_t end_time = TimeMillis();
535 int64_t diff = TimeDiff(end_time, start_time);
536 if (diff >= kSlowDispatchLoggingThreshold) {
537 LOG(LS_INFO) << "Message took " << diff << "ms to dispatch. Posted from: "
538 << pmsg->posted_from.ToString();
539 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000540}
541
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000542} // namespace rtc