blob: 039f82ad929df478572e4cd0258e642d251667f1 [file] [log] [blame]
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001/*
2 * Copyright 2004 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020011#include "rtc_base/thread.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000012
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000013#if defined(WEBRTC_WIN)
14#include <comdef.h>
15#elif defined(WEBRTC_POSIX)
16#include <time.h>
Tommi51492422017-12-04 15:18:23 +010017#else
18#error "Either WEBRTC_WIN or WEBRTC_POSIX needs to be defined."
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000019#endif
20
Artem Titov80d02ad2018-05-21 12:20:39 +020021#if defined(WEBRTC_WIN)
22// Disable warning that we don't care about:
23// warning C4722: destructor never returns, potential memory leak
24#pragma warning(disable : 4722)
25#endif
26
Yves Gerey988cc082018-10-23 12:03:01 +020027#include <stdio.h>
Jonas Olssona4d87372019-07-05 19:08:33 +020028
Yves Gerey988cc082018-10-23 12:03:01 +020029#include <utility>
Yves Gerey2e00abc2018-10-05 15:39:24 +020030
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010031#include "absl/algorithm/container.h"
Artem Titovd15a5752021-02-10 14:31:24 +010032#include "api/sequence_checker.h"
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010033#include "rtc_base/atomic_ops.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020034#include "rtc_base/checks.h"
Markus Handell3cb525b2020-07-16 16:16:09 +020035#include "rtc_base/deprecated/recursive_critical_section.h"
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +020036#include "rtc_base/event.h"
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +010037#include "rtc_base/internal/default_socket_server.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020038#include "rtc_base/logging.h"
Steve Anton10542f22019-01-11 09:11:00 -080039#include "rtc_base/null_socket_server.h"
Sebastian Janssonda7267a2020-03-03 10:48:05 +010040#include "rtc_base/task_utils/to_queued_task.h"
Steve Anton10542f22019-01-11 09:11:00 -080041#include "rtc_base/time_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020042#include "rtc_base/trace_event.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000043
Kári Tristan Helgason62b13452018-10-12 12:57:49 +020044#if defined(WEBRTC_MAC)
45#include "rtc_base/system/cocoa_threading.h"
Yves Gerey988cc082018-10-23 12:03:01 +020046
Kári Tristan Helgason62b13452018-10-12 12:57:49 +020047/*
48 * These are forward-declarations for methods that are part of the
49 * ObjC runtime. They are declared in the private header objc-internal.h.
50 * These calls are what clang inserts when using @autoreleasepool in ObjC,
51 * but here they are used directly in order to keep this file C++.
52 * https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
53 */
54extern "C" {
55void* objc_autoreleasePoolPush(void);
56void objc_autoreleasePoolPop(void* pool);
57}
58
59namespace {
60class ScopedAutoReleasePool {
61 public:
62 ScopedAutoReleasePool() : pool_(objc_autoreleasePoolPush()) {}
63 ~ScopedAutoReleasePool() { objc_autoreleasePoolPop(pool_); }
64
65 private:
66 void* const pool_;
67};
68} // namespace
69#endif
70
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000071namespace rtc {
Steve Antonbcc1a762019-12-11 11:21:53 -080072namespace {
73
74class MessageHandlerWithTask final : public MessageHandler {
75 public:
Tomas Gunnarsson77baeee2020-09-24 22:39:21 +020076 MessageHandlerWithTask() {}
Steve Antonbcc1a762019-12-11 11:21:53 -080077
78 void OnMessage(Message* msg) override {
79 static_cast<rtc_thread_internal::MessageLikeTask*>(msg->pdata)->Run();
80 delete msg->pdata;
81 }
82
83 private:
84 ~MessageHandlerWithTask() override {}
85
86 RTC_DISALLOW_COPY_AND_ASSIGN(MessageHandlerWithTask);
87};
88
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010089class RTC_SCOPED_LOCKABLE MarkProcessingCritScope {
90 public:
Markus Handell3cb525b2020-07-16 16:16:09 +020091 MarkProcessingCritScope(const RecursiveCriticalSection* cs,
92 size_t* processing) RTC_EXCLUSIVE_LOCK_FUNCTION(cs)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010093 : cs_(cs), processing_(processing) {
94 cs_->Enter();
95 *processing_ += 1;
96 }
97
98 ~MarkProcessingCritScope() RTC_UNLOCK_FUNCTION() {
99 *processing_ -= 1;
100 cs_->Leave();
101 }
102
103 private:
Markus Handell3cb525b2020-07-16 16:16:09 +0200104 const RecursiveCriticalSection* const cs_;
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100105 size_t* processing_;
106
107 RTC_DISALLOW_COPY_AND_ASSIGN(MarkProcessingCritScope);
108};
109
Steve Antonbcc1a762019-12-11 11:21:53 -0800110} // namespace
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000111
112ThreadManager* ThreadManager::Instance() {
Niels Möller14682a32018-05-24 08:54:25 +0200113 static ThreadManager* const thread_manager = new ThreadManager();
114 return thread_manager;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000115}
116
nisse7866cfe2017-04-26 01:45:31 -0700117ThreadManager::~ThreadManager() {
118 // By above RTC_DEFINE_STATIC_LOCAL.
119 RTC_NOTREACHED() << "ThreadManager should never be destructed.";
120}
121
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000122// static
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100123void ThreadManager::Add(Thread* message_queue) {
124 return Instance()->AddInternal(message_queue);
125}
126void ThreadManager::AddInternal(Thread* message_queue) {
127 CritScope cs(&crit_);
128 // Prevent changes while the list of message queues is processed.
129 RTC_DCHECK_EQ(processing_, 0);
130 message_queues_.push_back(message_queue);
131}
132
133// static
134void ThreadManager::Remove(Thread* message_queue) {
135 return Instance()->RemoveInternal(message_queue);
136}
137void ThreadManager::RemoveInternal(Thread* message_queue) {
138 {
139 CritScope cs(&crit_);
140 // Prevent changes while the list of message queues is processed.
141 RTC_DCHECK_EQ(processing_, 0);
142 std::vector<Thread*>::iterator iter;
143 iter = absl::c_find(message_queues_, message_queue);
144 if (iter != message_queues_.end()) {
145 message_queues_.erase(iter);
146 }
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100147#if RTC_DCHECK_IS_ON
148 RemoveFromSendGraph(message_queue);
149#endif
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100150 }
151}
152
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100153#if RTC_DCHECK_IS_ON
154void ThreadManager::RemoveFromSendGraph(Thread* thread) {
155 for (auto it = send_graph_.begin(); it != send_graph_.end();) {
156 if (it->first == thread) {
157 it = send_graph_.erase(it);
158 } else {
159 it->second.erase(thread);
160 ++it;
161 }
162 }
163}
164
165void ThreadManager::RegisterSendAndCheckForCycles(Thread* source,
166 Thread* target) {
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200167 RTC_DCHECK(source);
168 RTC_DCHECK(target);
169
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100170 CritScope cs(&crit_);
171 std::deque<Thread*> all_targets({target});
172 // We check the pre-existing who-sends-to-who graph for any path from target
173 // to source. This loop is guaranteed to terminate because per the send graph
174 // invariant, there are no cycles in the graph.
Jianjun Zhuc33eeab2020-05-26 17:43:17 +0800175 for (size_t i = 0; i < all_targets.size(); i++) {
176 const auto& targets = send_graph_[all_targets[i]];
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100177 all_targets.insert(all_targets.end(), targets.begin(), targets.end());
178 }
179 RTC_CHECK_EQ(absl::c_count(all_targets, source), 0)
180 << " send loop between " << source->name() << " and " << target->name();
181
182 // We may now insert source -> target without creating a cycle, since there
183 // was no path from target to source per the prior CHECK.
184 send_graph_[source].insert(target);
185}
186#endif
187
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100188// static
189void ThreadManager::Clear(MessageHandler* handler) {
190 return Instance()->ClearInternal(handler);
191}
192void ThreadManager::ClearInternal(MessageHandler* handler) {
193 // Deleted objects may cause re-entrant calls to ClearInternal. This is
194 // allowed as the list of message queues does not change while queues are
195 // cleared.
196 MarkProcessingCritScope cs(&crit_, &processing_);
197 for (Thread* queue : message_queues_) {
198 queue->Clear(handler);
199 }
200}
201
202// static
203void ThreadManager::ProcessAllMessageQueuesForTesting() {
204 return Instance()->ProcessAllMessageQueuesInternal();
205}
206
207void ThreadManager::ProcessAllMessageQueuesInternal() {
208 // This works by posting a delayed message at the current time and waiting
209 // for it to be dispatched on all queues, which will ensure that all messages
210 // that came before it were also dispatched.
211 volatile int queues_not_done = 0;
212
213 // This class is used so that whether the posted message is processed, or the
214 // message queue is simply cleared, queues_not_done gets decremented.
215 class ScopedIncrement : public MessageData {
216 public:
217 ScopedIncrement(volatile int* value) : value_(value) {
218 AtomicOps::Increment(value_);
219 }
220 ~ScopedIncrement() override { AtomicOps::Decrement(value_); }
221
222 private:
223 volatile int* value_;
224 };
225
226 {
227 MarkProcessingCritScope cs(&crit_, &processing_);
228 for (Thread* queue : message_queues_) {
229 if (!queue->IsProcessingMessagesForTesting()) {
230 // If the queue is not processing messages, it can
231 // be ignored. If we tried to post a message to it, it would be dropped
232 // or ignored.
233 continue;
234 }
235 queue->PostDelayed(RTC_FROM_HERE, 0, nullptr, MQID_DISPOSE,
236 new ScopedIncrement(&queues_not_done));
237 }
238 }
239
240 rtc::Thread* current = rtc::Thread::Current();
241 // Note: One of the message queues may have been on this thread, which is
242 // why we can't synchronously wait for queues_not_done to go to 0; we need
243 // to process messages as well.
244 while (AtomicOps::AcquireLoad(&queues_not_done) > 0) {
245 if (current) {
246 current->ProcessMessages(0);
247 }
248 }
249}
250
251// static
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000252Thread* Thread::Current() {
nisse7866cfe2017-04-26 01:45:31 -0700253 ThreadManager* manager = ThreadManager::Instance();
254 Thread* thread = manager->CurrentThread();
255
Niels Moller9d1840c2019-05-21 07:26:37 +0000256#ifndef NO_MAIN_THREAD_WRAPPING
257 // Only autowrap the thread which instantiated the ThreadManager.
258 if (!thread && manager->IsMainThread()) {
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +0100259 thread = new Thread(CreateDefaultSocketServer());
Niels Moller9d1840c2019-05-21 07:26:37 +0000260 thread->WrapCurrentWithThreadManager(manager, true);
261 }
262#endif
263
nisse7866cfe2017-04-26 01:45:31 -0700264 return thread;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000265}
266
267#if defined(WEBRTC_POSIX)
Niels Moller9d1840c2019-05-21 07:26:37 +0000268ThreadManager::ThreadManager() : main_thread_ref_(CurrentThreadRef()) {
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200269#if defined(WEBRTC_MAC)
270 InitCocoaMultiThreading();
271#endif
deadbeef37f5ecf2017-02-27 14:06:41 -0800272 pthread_key_create(&key_, nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000273}
274
Yves Gerey665174f2018-06-19 15:03:05 +0200275Thread* ThreadManager::CurrentThread() {
276 return static_cast<Thread*>(pthread_getspecific(key_));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000277}
278
Sebastian Jansson178a6852020-01-14 11:12:26 +0100279void ThreadManager::SetCurrentThreadInternal(Thread* thread) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000280 pthread_setspecific(key_, thread);
281}
282#endif
283
284#if defined(WEBRTC_WIN)
Niels Moller9d1840c2019-05-21 07:26:37 +0000285ThreadManager::ThreadManager()
286 : key_(TlsAlloc()), main_thread_ref_(CurrentThreadRef()) {}
Yves Gerey665174f2018-06-19 15:03:05 +0200287
288Thread* ThreadManager::CurrentThread() {
289 return static_cast<Thread*>(TlsGetValue(key_));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000290}
291
Sebastian Jansson178a6852020-01-14 11:12:26 +0100292void ThreadManager::SetCurrentThreadInternal(Thread* thread) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000293 TlsSetValue(key_, thread);
294}
295#endif
296
Sebastian Jansson178a6852020-01-14 11:12:26 +0100297void ThreadManager::SetCurrentThread(Thread* thread) {
298#if RTC_DLOG_IS_ON
299 if (CurrentThread() && thread) {
300 RTC_DLOG(LS_ERROR) << "SetCurrentThread: Overwriting an existing value?";
301 }
302#endif // RTC_DLOG_IS_ON
Tommi6866dc72020-05-15 10:11:56 +0200303
304 if (thread) {
305 thread->EnsureIsCurrentTaskQueue();
306 } else {
307 Thread* current = CurrentThread();
308 if (current) {
309 // The current thread is being cleared, e.g. as a result of
310 // UnwrapCurrent() being called or when a thread is being stopped
311 // (see PreRun()). This signals that the Thread instance is being detached
312 // from the thread, which also means that TaskQueue::Current() must not
313 // return a pointer to the Thread instance.
314 current->ClearCurrentTaskQueue();
315 }
316 }
317
Sebastian Jansson178a6852020-01-14 11:12:26 +0100318 SetCurrentThreadInternal(thread);
319}
320
321void rtc::ThreadManager::ChangeCurrentThreadForTest(rtc::Thread* thread) {
322 SetCurrentThreadInternal(thread);
323}
324
Yves Gerey665174f2018-06-19 15:03:05 +0200325Thread* ThreadManager::WrapCurrentThread() {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000326 Thread* result = CurrentThread();
deadbeef37f5ecf2017-02-27 14:06:41 -0800327 if (nullptr == result) {
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +0100328 result = new Thread(CreateDefaultSocketServer());
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000329 result->WrapCurrentWithThreadManager(this, true);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000330 }
331 return result;
332}
333
334void ThreadManager::UnwrapCurrentThread() {
335 Thread* t = CurrentThread();
336 if (t && !(t->IsOwned())) {
337 t->UnwrapCurrent();
338 delete t;
339 }
340}
341
Niels Moller9d1840c2019-05-21 07:26:37 +0000342bool ThreadManager::IsMainThread() {
343 return IsThreadRefEqual(CurrentThreadRef(), main_thread_ref_);
344}
345
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000346Thread::ScopedDisallowBlockingCalls::ScopedDisallowBlockingCalls()
Yves Gerey665174f2018-06-19 15:03:05 +0200347 : thread_(Thread::Current()),
348 previous_state_(thread_->SetAllowBlockingCalls(false)) {}
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000349
350Thread::ScopedDisallowBlockingCalls::~ScopedDisallowBlockingCalls() {
nisseede5da42017-01-12 05:15:36 -0800351 RTC_DCHECK(thread_->IsCurrent());
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000352 thread_->SetAllowBlockingCalls(previous_state_);
353}
354
Tommife041642021-04-07 10:08:28 +0200355#if RTC_DCHECK_IS_ON
356Thread::ScopedCountBlockingCalls::ScopedCountBlockingCalls(
357 std::function<void(uint32_t, uint32_t)> callback)
358 : thread_(Thread::Current()),
359 base_blocking_call_count_(thread_->GetBlockingCallCount()),
360 base_could_be_blocking_call_count_(
361 thread_->GetCouldBeBlockingCallCount()),
362 result_callback_(std::move(callback)) {}
363
364Thread::ScopedCountBlockingCalls::~ScopedCountBlockingCalls() {
365 result_callback_(GetBlockingCallCount(), GetCouldBeBlockingCallCount());
366}
367
368uint32_t Thread::ScopedCountBlockingCalls::GetBlockingCallCount() const {
369 return thread_->GetBlockingCallCount() - base_blocking_call_count_;
370}
371
372uint32_t Thread::ScopedCountBlockingCalls::GetCouldBeBlockingCallCount() const {
373 return thread_->GetCouldBeBlockingCallCount() -
374 base_could_be_blocking_call_count_;
375}
376
377uint32_t Thread::ScopedCountBlockingCalls::GetTotalBlockedCallCount() const {
378 return GetBlockingCallCount() + GetCouldBeBlockingCallCount();
379}
380#endif
381
Taylor Brandstetter08672602018-03-02 15:20:33 -0800382Thread::Thread(SocketServer* ss) : Thread(ss, /*do_init=*/true) {}
danilchapbebf54c2016-04-28 01:32:48 -0700383
384Thread::Thread(std::unique_ptr<SocketServer> ss)
Taylor Brandstetter08672602018-03-02 15:20:33 -0800385 : Thread(std::move(ss), /*do_init=*/true) {}
386
387Thread::Thread(SocketServer* ss, bool do_init)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100388 : fPeekKeep_(false),
Sebastian Jansson61380c02020-01-17 14:46:08 +0100389 delayed_next_num_(0),
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100390 fInitialized_(false),
391 fDestroyed_(false),
392 stop_(0),
393 ss_(ss) {
394 RTC_DCHECK(ss);
395 ss_->SetMessageQueue(this);
Taylor Brandstetter08672602018-03-02 15:20:33 -0800396 SetName("Thread", this); // default name
397 if (do_init) {
398 DoInit();
399 }
400}
401
402Thread::Thread(std::unique_ptr<SocketServer> ss, bool do_init)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100403 : Thread(ss.get(), do_init) {
404 own_ss_ = std::move(ss);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000405}
406
407Thread::~Thread() {
408 Stop();
jbauch25d1f282016-02-05 00:25:02 -0800409 DoDestroy();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000410}
411
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100412void Thread::DoInit() {
413 if (fInitialized_) {
414 return;
415 }
416
417 fInitialized_ = true;
418 ThreadManager::Add(this);
419}
420
421void Thread::DoDestroy() {
422 if (fDestroyed_) {
423 return;
424 }
425
426 fDestroyed_ = true;
427 // The signal is done from here to ensure
428 // that it always gets called when the queue
429 // is going away.
430 SignalQueueDestroyed();
431 ThreadManager::Remove(this);
432 ClearInternal(nullptr, MQID_ANY, nullptr);
433
434 if (ss_) {
435 ss_->SetMessageQueue(nullptr);
436 }
437}
438
439SocketServer* Thread::socketserver() {
440 return ss_;
441}
442
443void Thread::WakeUpSocketServer() {
444 ss_->WakeUp();
445}
446
447void Thread::Quit() {
448 AtomicOps::ReleaseStore(&stop_, 1);
449 WakeUpSocketServer();
450}
451
452bool Thread::IsQuitting() {
453 return AtomicOps::AcquireLoad(&stop_) != 0;
454}
455
456void Thread::Restart() {
457 AtomicOps::ReleaseStore(&stop_, 0);
458}
459
460bool Thread::Peek(Message* pmsg, int cmsWait) {
461 if (fPeekKeep_) {
462 *pmsg = msgPeek_;
463 return true;
464 }
465 if (!Get(pmsg, cmsWait))
466 return false;
467 msgPeek_ = *pmsg;
468 fPeekKeep_ = true;
469 return true;
470}
471
472bool Thread::Get(Message* pmsg, int cmsWait, bool process_io) {
473 // Return and clear peek if present
474 // Always return the peek if it exists so there is Peek/Get symmetry
475
476 if (fPeekKeep_) {
477 *pmsg = msgPeek_;
478 fPeekKeep_ = false;
479 return true;
480 }
481
482 // Get w/wait + timer scan / dispatch + socket / event multiplexer dispatch
483
484 int64_t cmsTotal = cmsWait;
485 int64_t cmsElapsed = 0;
486 int64_t msStart = TimeMillis();
487 int64_t msCurrent = msStart;
488 while (true) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100489 // Check for posted events
490 int64_t cmsDelayNext = kForever;
491 bool first_pass = true;
492 while (true) {
493 // All queue operations need to be locked, but nothing else in this loop
494 // (specifically handling disposed message) can happen inside the crit.
495 // Otherwise, disposed MessageHandlers will cause deadlocks.
496 {
497 CritScope cs(&crit_);
498 // On the first pass, check for delayed messages that have been
499 // triggered and calculate the next trigger time.
500 if (first_pass) {
501 first_pass = false;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100502 while (!delayed_messages_.empty()) {
503 if (msCurrent < delayed_messages_.top().run_time_ms_) {
504 cmsDelayNext =
505 TimeDiff(delayed_messages_.top().run_time_ms_, msCurrent);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100506 break;
507 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100508 messages_.push_back(delayed_messages_.top().msg_);
509 delayed_messages_.pop();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100510 }
511 }
512 // Pull a message off the message queue, if available.
Sebastian Jansson61380c02020-01-17 14:46:08 +0100513 if (messages_.empty()) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100514 break;
515 } else {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100516 *pmsg = messages_.front();
517 messages_.pop_front();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100518 }
519 } // crit_ is released here.
520
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100521 // If this was a dispose message, delete it and skip it.
522 if (MQID_DISPOSE == pmsg->message_id) {
523 RTC_DCHECK(nullptr == pmsg->phandler);
524 delete pmsg->pdata;
525 *pmsg = Message();
526 continue;
527 }
528 return true;
529 }
530
531 if (IsQuitting())
532 break;
533
534 // Which is shorter, the delay wait or the asked wait?
535
536 int64_t cmsNext;
537 if (cmsWait == kForever) {
538 cmsNext = cmsDelayNext;
539 } else {
540 cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
541 if ((cmsDelayNext != kForever) && (cmsDelayNext < cmsNext))
542 cmsNext = cmsDelayNext;
543 }
544
545 {
546 // Wait and multiplex in the meantime
547 if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
548 return false;
549 }
550
551 // If the specified timeout expired, return
552
553 msCurrent = TimeMillis();
554 cmsElapsed = TimeDiff(msCurrent, msStart);
555 if (cmsWait != kForever) {
556 if (cmsElapsed >= cmsWait)
557 return false;
558 }
559 }
560 return false;
561}
562
563void Thread::Post(const Location& posted_from,
564 MessageHandler* phandler,
565 uint32_t id,
566 MessageData* pdata,
567 bool time_sensitive) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100568 RTC_DCHECK(!time_sensitive);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100569 if (IsQuitting()) {
570 delete pdata;
571 return;
572 }
573
574 // Keep thread safe
575 // Add the message to the end of the queue
576 // Signal for the multiplexer to return
577
578 {
579 CritScope cs(&crit_);
580 Message msg;
581 msg.posted_from = posted_from;
582 msg.phandler = phandler;
583 msg.message_id = id;
584 msg.pdata = pdata;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100585 messages_.push_back(msg);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100586 }
587 WakeUpSocketServer();
588}
589
590void Thread::PostDelayed(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100591 int delay_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100592 MessageHandler* phandler,
593 uint32_t id,
594 MessageData* pdata) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100595 return DoDelayPost(posted_from, delay_ms, TimeAfter(delay_ms), phandler, id,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100596 pdata);
597}
598
599void Thread::PostAt(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100600 int64_t run_at_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100601 MessageHandler* phandler,
602 uint32_t id,
603 MessageData* pdata) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100604 return DoDelayPost(posted_from, TimeUntil(run_at_ms), run_at_ms, phandler, id,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100605 pdata);
606}
607
608void Thread::DoDelayPost(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100609 int64_t delay_ms,
610 int64_t run_at_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100611 MessageHandler* phandler,
612 uint32_t id,
613 MessageData* pdata) {
614 if (IsQuitting()) {
615 delete pdata;
616 return;
617 }
618
619 // Keep thread safe
620 // Add to the priority queue. Gets sorted soonest first.
621 // Signal for the multiplexer to return.
622
623 {
624 CritScope cs(&crit_);
625 Message msg;
626 msg.posted_from = posted_from;
627 msg.phandler = phandler;
628 msg.message_id = id;
629 msg.pdata = pdata;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100630 DelayedMessage delayed(delay_ms, run_at_ms, delayed_next_num_, msg);
631 delayed_messages_.push(delayed);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100632 // If this message queue processes 1 message every millisecond for 50 days,
633 // we will wrap this number. Even then, only messages with identical times
634 // will be misordered, and then only briefly. This is probably ok.
Sebastian Jansson61380c02020-01-17 14:46:08 +0100635 ++delayed_next_num_;
636 RTC_DCHECK_NE(0, delayed_next_num_);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100637 }
638 WakeUpSocketServer();
639}
640
641int Thread::GetDelay() {
642 CritScope cs(&crit_);
643
Sebastian Jansson61380c02020-01-17 14:46:08 +0100644 if (!messages_.empty())
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100645 return 0;
646
Sebastian Jansson61380c02020-01-17 14:46:08 +0100647 if (!delayed_messages_.empty()) {
648 int delay = TimeUntil(delayed_messages_.top().run_time_ms_);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100649 if (delay < 0)
650 delay = 0;
651 return delay;
652 }
653
654 return kForever;
655}
656
657void Thread::ClearInternal(MessageHandler* phandler,
658 uint32_t id,
659 MessageList* removed) {
660 // Remove messages with phandler
661
662 if (fPeekKeep_ && msgPeek_.Match(phandler, id)) {
663 if (removed) {
664 removed->push_back(msgPeek_);
665 } else {
666 delete msgPeek_.pdata;
667 }
668 fPeekKeep_ = false;
669 }
670
671 // Remove from ordered message queue
672
Sebastian Jansson61380c02020-01-17 14:46:08 +0100673 for (auto it = messages_.begin(); it != messages_.end();) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100674 if (it->Match(phandler, id)) {
675 if (removed) {
676 removed->push_back(*it);
677 } else {
678 delete it->pdata;
679 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100680 it = messages_.erase(it);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100681 } else {
682 ++it;
683 }
684 }
685
686 // Remove from priority queue. Not directly iterable, so use this approach
687
Sebastian Jansson61380c02020-01-17 14:46:08 +0100688 auto new_end = delayed_messages_.container().begin();
689 for (auto it = new_end; it != delayed_messages_.container().end(); ++it) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100690 if (it->msg_.Match(phandler, id)) {
691 if (removed) {
692 removed->push_back(it->msg_);
693 } else {
694 delete it->msg_.pdata;
695 }
696 } else {
697 *new_end++ = *it;
698 }
699 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100700 delayed_messages_.container().erase(new_end,
701 delayed_messages_.container().end());
702 delayed_messages_.reheap();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100703}
704
705void Thread::Dispatch(Message* pmsg) {
706 TRACE_EVENT2("webrtc", "Thread::Dispatch", "src_file",
707 pmsg->posted_from.file_name(), "src_func",
708 pmsg->posted_from.function_name());
Harald Alvestrandba694422021-01-27 21:52:14 +0000709 RTC_DCHECK_RUN_ON(this);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100710 int64_t start_time = TimeMillis();
711 pmsg->phandler->OnMessage(pmsg);
712 int64_t end_time = TimeMillis();
713 int64_t diff = TimeDiff(end_time, start_time);
Harald Alvestrandba694422021-01-27 21:52:14 +0000714 if (diff >= dispatch_warning_ms_) {
715 RTC_LOG(LS_INFO) << "Message to " << name() << " took " << diff
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100716 << "ms to dispatch. Posted from: "
717 << pmsg->posted_from.ToString();
Harald Alvestrandba694422021-01-27 21:52:14 +0000718 // To avoid log spew, move the warning limit to only give warning
719 // for delays that are larger than the one observed.
720 dispatch_warning_ms_ = diff + 1;
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100721 }
722}
723
nisse7866cfe2017-04-26 01:45:31 -0700724bool Thread::IsCurrent() const {
725 return ThreadManager::Instance()->CurrentThread() == this;
726}
727
danilchapbebf54c2016-04-28 01:32:48 -0700728std::unique_ptr<Thread> Thread::CreateWithSocketServer() {
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +0100729 return std::unique_ptr<Thread>(new Thread(CreateDefaultSocketServer()));
danilchapbebf54c2016-04-28 01:32:48 -0700730}
731
732std::unique_ptr<Thread> Thread::Create() {
733 return std::unique_ptr<Thread>(
734 new Thread(std::unique_ptr<SocketServer>(new NullSocketServer())));
735}
736
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000737bool Thread::SleepMs(int milliseconds) {
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000738 AssertBlockingIsAllowedOnCurrentThread();
739
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000740#if defined(WEBRTC_WIN)
741 ::Sleep(milliseconds);
742 return true;
743#else
744 // POSIX has both a usleep() and a nanosleep(), but the former is deprecated,
745 // so we use nanosleep() even though it has greater precision than necessary.
746 struct timespec ts;
747 ts.tv_sec = milliseconds / 1000;
748 ts.tv_nsec = (milliseconds % 1000) * 1000000;
deadbeef37f5ecf2017-02-27 14:06:41 -0800749 int ret = nanosleep(&ts, nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000750 if (ret != 0) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100751 RTC_LOG_ERR(LS_WARNING) << "nanosleep() returning early";
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000752 return false;
753 }
754 return true;
755#endif
756}
757
758bool Thread::SetName(const std::string& name, const void* obj) {
Tommi51492422017-12-04 15:18:23 +0100759 RTC_DCHECK(!IsRunning());
760
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000761 name_ = name;
762 if (obj) {
Niels Mölleraba06332018-10-16 15:14:15 +0200763 // The %p specifier typically produce at most 16 hex digits, possibly with a
764 // 0x prefix. But format is implementation defined, so add some margin.
765 char buf[30];
766 snprintf(buf, sizeof(buf), " 0x%p", obj);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000767 name_ += buf;
768 }
769 return true;
770}
771
Harald Alvestrandba694422021-01-27 21:52:14 +0000772void Thread::SetDispatchWarningMs(int deadline) {
773 if (!IsCurrent()) {
774 PostTask(webrtc::ToQueuedTask(
775 [this, deadline]() { SetDispatchWarningMs(deadline); }));
776 return;
777 }
778 RTC_DCHECK_RUN_ON(this);
779 dispatch_warning_ms_ = deadline;
780}
781
Niels Möllerd2e50132019-06-11 09:24:14 +0200782bool Thread::Start() {
Tommi51492422017-12-04 15:18:23 +0100783 RTC_DCHECK(!IsRunning());
784
785 if (IsRunning())
786 return false;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000787
André Susano Pinto02a57972016-07-22 13:30:05 +0200788 Restart(); // reset IsQuitting() if the thread is being restarted
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000789
790 // Make sure that ThreadManager is created on the main thread before
791 // we start a new thread.
792 ThreadManager::Instance();
793
Tommi51492422017-12-04 15:18:23 +0100794 owned_ = true;
795
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000796#if defined(WEBRTC_WIN)
Niels Möllerd2e50132019-06-11 09:24:14 +0200797 thread_ = CreateThread(nullptr, 0, PreRun, this, 0, &thread_id_);
Tommi51492422017-12-04 15:18:23 +0100798 if (!thread_) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000799 return false;
800 }
801#elif defined(WEBRTC_POSIX)
802 pthread_attr_t attr;
803 pthread_attr_init(&attr);
804
Niels Möllerd2e50132019-06-11 09:24:14 +0200805 int error_code = pthread_create(&thread_, &attr, PreRun, this);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000806 if (0 != error_code) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100807 RTC_LOG(LS_ERROR) << "Unable to create pthread, error " << error_code;
Tommi51492422017-12-04 15:18:23 +0100808 thread_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000809 return false;
810 }
Tommi51492422017-12-04 15:18:23 +0100811 RTC_DCHECK(thread_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000812#endif
813 return true;
814}
815
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000816bool Thread::WrapCurrent() {
817 return WrapCurrentWithThreadManager(ThreadManager::Instance(), true);
818}
819
820void Thread::UnwrapCurrent() {
821 // Clears the platform-specific thread-specific storage.
deadbeef37f5ecf2017-02-27 14:06:41 -0800822 ThreadManager::Instance()->SetCurrentThread(nullptr);
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000823#if defined(WEBRTC_WIN)
deadbeef37f5ecf2017-02-27 14:06:41 -0800824 if (thread_ != nullptr) {
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000825 if (!CloseHandle(thread_)) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100826 RTC_LOG_GLE(LS_ERROR)
827 << "When unwrapping thread, failed to close handle.";
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000828 }
deadbeef37f5ecf2017-02-27 14:06:41 -0800829 thread_ = nullptr;
Tommi51492422017-12-04 15:18:23 +0100830 thread_id_ = 0;
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000831 }
Tommi51492422017-12-04 15:18:23 +0100832#elif defined(WEBRTC_POSIX)
833 thread_ = 0;
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000834#endif
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000835}
836
837void Thread::SafeWrapCurrent() {
838 WrapCurrentWithThreadManager(ThreadManager::Instance(), false);
839}
840
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000841void Thread::Join() {
Tommi51492422017-12-04 15:18:23 +0100842 if (!IsRunning())
843 return;
844
845 RTC_DCHECK(!IsCurrent());
846 if (Current() && !Current()->blocking_calls_allowed_) {
847 RTC_LOG(LS_WARNING) << "Waiting for the thread to join, "
Jonas Olssonb2b20312020-01-14 12:11:31 +0100848 "but blocking calls have been disallowed";
Tommi51492422017-12-04 15:18:23 +0100849 }
jiayl@webrtc.org1fd362c2014-09-26 16:57:07 +0000850
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000851#if defined(WEBRTC_WIN)
Tommi51492422017-12-04 15:18:23 +0100852 RTC_DCHECK(thread_ != nullptr);
853 WaitForSingleObject(thread_, INFINITE);
854 CloseHandle(thread_);
855 thread_ = nullptr;
856 thread_id_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000857#elif defined(WEBRTC_POSIX)
Tommi51492422017-12-04 15:18:23 +0100858 pthread_join(thread_, nullptr);
859 thread_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000860#endif
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000861}
862
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000863bool Thread::SetAllowBlockingCalls(bool allow) {
nisseede5da42017-01-12 05:15:36 -0800864 RTC_DCHECK(IsCurrent());
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000865 bool previous = blocking_calls_allowed_;
866 blocking_calls_allowed_ = allow;
867 return previous;
868}
869
870// static
871void Thread::AssertBlockingIsAllowedOnCurrentThread() {
tfarinaa41ab932015-10-30 16:08:48 -0700872#if !defined(NDEBUG)
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000873 Thread* current = Thread::Current();
nisseede5da42017-01-12 05:15:36 -0800874 RTC_DCHECK(!current || current->blocking_calls_allowed_);
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000875#endif
876}
877
deadbeefdc20e262017-01-31 15:10:44 -0800878// static
879#if defined(WEBRTC_WIN)
880DWORD WINAPI Thread::PreRun(LPVOID pv) {
881#else
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000882void* Thread::PreRun(void* pv) {
deadbeefdc20e262017-01-31 15:10:44 -0800883#endif
Niels Möllerd2e50132019-06-11 09:24:14 +0200884 Thread* thread = static_cast<Thread*>(pv);
885 ThreadManager::Instance()->SetCurrentThread(thread);
886 rtc::SetCurrentThreadName(thread->name_.c_str());
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200887#if defined(WEBRTC_MAC)
888 ScopedAutoReleasePool pool;
889#endif
Niels Möllerd2e50132019-06-11 09:24:14 +0200890 thread->Run();
891
Tommi51492422017-12-04 15:18:23 +0100892 ThreadManager::Instance()->SetCurrentThread(nullptr);
kthelgasonde6adbe2017-02-22 00:42:11 -0800893#ifdef WEBRTC_WIN
894 return 0;
895#else
896 return nullptr;
897#endif
Jonas Olssona4d87372019-07-05 19:08:33 +0200898} // namespace rtc
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000899
900void Thread::Run() {
901 ProcessMessages(kForever);
902}
903
904bool Thread::IsOwned() {
Tommi51492422017-12-04 15:18:23 +0100905 RTC_DCHECK(IsRunning());
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000906 return owned_;
907}
908
909void Thread::Stop() {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100910 Thread::Quit();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000911 Join();
912}
913
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700914void Thread::Send(const Location& posted_from,
915 MessageHandler* phandler,
916 uint32_t id,
917 MessageData* pdata) {
Sebastian Jansson5d9b9642020-01-17 13:10:54 +0100918 RTC_DCHECK(!IsQuitting());
André Susano Pinto02a57972016-07-22 13:30:05 +0200919 if (IsQuitting())
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000920 return;
921
922 // Sent messages are sent to the MessageHandler directly, in the context
923 // of "thread", like Win32 SendMessage. If in the right context,
924 // call the handler directly.
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000925 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700926 msg.posted_from = posted_from;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000927 msg.phandler = phandler;
928 msg.message_id = id;
929 msg.pdata = pdata;
930 if (IsCurrent()) {
Tommife041642021-04-07 10:08:28 +0200931#if RTC_DCHECK_IS_ON
932 RTC_DCHECK_RUN_ON(this);
933 could_be_blocking_call_count_++;
934#endif
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100935 msg.phandler->OnMessage(&msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000936 return;
937 }
938
jiayl@webrtc.org3987b6d2014-09-24 17:14:05 +0000939 AssertBlockingIsAllowedOnCurrentThread();
940
Yves Gerey665174f2018-06-19 15:03:05 +0200941 Thread* current_thread = Thread::Current();
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200942
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100943#if RTC_DCHECK_IS_ON
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200944 if (current_thread) {
Tommife041642021-04-07 10:08:28 +0200945 RTC_DCHECK_RUN_ON(current_thread);
946 current_thread->blocking_call_count_++;
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200947 RTC_DCHECK(current_thread->IsInvokeToThreadAllowed(this));
948 ThreadManager::Instance()->RegisterSendAndCheckForCycles(current_thread,
949 this);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000950 }
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200951#endif
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000952
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200953 // Perhaps down the line we can get rid of this workaround and always require
954 // current_thread to be valid when Send() is called.
955 std::unique_ptr<rtc::Event> done_event;
956 if (!current_thread)
957 done_event.reset(new rtc::Event());
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000958
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200959 bool ready = false;
960 PostTask(webrtc::ToQueuedTask(
961 [&msg]() mutable { msg.phandler->OnMessage(&msg); },
962 [this, &ready, current_thread, done = done_event.get()] {
963 if (current_thread) {
964 CritScope cs(&crit_);
965 ready = true;
966 current_thread->socketserver()->WakeUp();
967 } else {
968 done->Set();
969 }
970 }));
971
972 if (current_thread) {
973 bool waited = false;
974 crit_.Enter();
975 while (!ready) {
976 crit_.Leave();
977 current_thread->socketserver()->Wait(kForever, false);
978 waited = true;
979 crit_.Enter();
980 }
981 crit_.Leave();
982
983 // Our Wait loop above may have consumed some WakeUp events for this
984 // Thread, that weren't relevant to this Send. Losing these WakeUps can
985 // cause problems for some SocketServers.
986 //
987 // Concrete example:
988 // Win32SocketServer on thread A calls Send on thread B. While processing
989 // the message, thread B Posts a message to A. We consume the wakeup for
990 // that Post while waiting for the Send to complete, which means that when
991 // we exit this loop, we need to issue another WakeUp, or else the Posted
992 // message won't be processed in a timely manner.
993
994 if (waited) {
995 current_thread->socketserver()->WakeUp();
996 }
997 } else {
998 done_event->Wait(rtc::Event::kForever);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000999 }
1000}
1001
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -07001002void Thread::InvokeInternal(const Location& posted_from,
Danil Chapovalov89313452019-11-29 12:56:43 +01001003 rtc::FunctionView<void()> functor) {
Steve Antonc5d7c522019-12-03 10:14:05 -08001004 TRACE_EVENT2("webrtc", "Thread::Invoke", "src_file", posted_from.file_name(),
1005 "src_func", posted_from.function_name());
Danil Chapovalov89313452019-11-29 12:56:43 +01001006
1007 class FunctorMessageHandler : public MessageHandler {
1008 public:
1009 explicit FunctorMessageHandler(rtc::FunctionView<void()> functor)
Tomas Gunnarsson77baeee2020-09-24 22:39:21 +02001010 : functor_(functor) {}
Danil Chapovalov89313452019-11-29 12:56:43 +01001011 void OnMessage(Message* msg) override { functor_(); }
1012
1013 private:
1014 rtc::FunctionView<void()> functor_;
1015 } handler(functor);
1016
1017 Send(posted_from, &handler);
tommi@webrtc.org7c64ed22015-03-17 14:25:37 +00001018}
1019
Tommi6866dc72020-05-15 10:11:56 +02001020// Called by the ThreadManager when being set as the current thread.
1021void Thread::EnsureIsCurrentTaskQueue() {
1022 task_queue_registration_ =
1023 std::make_unique<TaskQueueBase::CurrentTaskQueueSetter>(this);
1024}
1025
1026// Called by the ThreadManager when being set as the current thread.
1027void Thread::ClearCurrentTaskQueue() {
1028 task_queue_registration_.reset();
1029}
1030
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001031void Thread::QueuedTaskHandler::OnMessage(Message* msg) {
1032 RTC_DCHECK(msg);
1033 auto* data = static_cast<ScopedMessageData<webrtc::QueuedTask>*>(msg->pdata);
1034 std::unique_ptr<webrtc::QueuedTask> task = std::move(data->data());
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001035 // Thread expects handler to own Message::pdata when OnMessage is called
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001036 // Since MessageData is no longer needed, delete it.
1037 delete data;
1038
1039 // QueuedTask interface uses Run return value to communicate who owns the
1040 // task. false means QueuedTask took the ownership.
1041 if (!task->Run())
1042 task.release();
1043}
1044
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001045void Thread::AllowInvokesToThread(Thread* thread) {
1046#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
1047 if (!IsCurrent()) {
1048 PostTask(webrtc::ToQueuedTask(
1049 [thread, this]() { AllowInvokesToThread(thread); }));
1050 return;
1051 }
1052 RTC_DCHECK_RUN_ON(this);
1053 allowed_threads_.push_back(thread);
1054 invoke_policy_enabled_ = true;
1055#endif
1056}
1057
1058void Thread::DisallowAllInvokes() {
1059#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
1060 if (!IsCurrent()) {
1061 PostTask(webrtc::ToQueuedTask([this]() { DisallowAllInvokes(); }));
1062 return;
1063 }
1064 RTC_DCHECK_RUN_ON(this);
1065 allowed_threads_.clear();
1066 invoke_policy_enabled_ = true;
1067#endif
1068}
1069
Tommife041642021-04-07 10:08:28 +02001070#if RTC_DCHECK_IS_ON
1071uint32_t Thread::GetBlockingCallCount() const {
1072 RTC_DCHECK_RUN_ON(this);
1073 return blocking_call_count_;
1074}
1075uint32_t Thread::GetCouldBeBlockingCallCount() const {
1076 RTC_DCHECK_RUN_ON(this);
1077 return could_be_blocking_call_count_;
1078}
1079#endif
1080
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001081// Returns true if no policies added or if there is at least one policy
1082// that permits invocation to |target| thread.
1083bool Thread::IsInvokeToThreadAllowed(rtc::Thread* target) {
1084#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
1085 RTC_DCHECK_RUN_ON(this);
1086 if (!invoke_policy_enabled_) {
1087 return true;
1088 }
1089 for (const auto* thread : allowed_threads_) {
1090 if (thread == target) {
1091 return true;
1092 }
1093 }
1094 return false;
1095#else
1096 return true;
1097#endif
1098}
1099
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001100void Thread::PostTask(std::unique_ptr<webrtc::QueuedTask> task) {
1101 // Though Post takes MessageData by raw pointer (last parameter), it still
1102 // takes it with ownership.
1103 Post(RTC_FROM_HERE, &queued_task_handler_,
1104 /*id=*/0, new ScopedMessageData<webrtc::QueuedTask>(std::move(task)));
1105}
1106
1107void Thread::PostDelayedTask(std::unique_ptr<webrtc::QueuedTask> task,
1108 uint32_t milliseconds) {
1109 // Though PostDelayed takes MessageData by raw pointer (last parameter),
1110 // it still takes it with ownership.
1111 PostDelayed(RTC_FROM_HERE, milliseconds, &queued_task_handler_,
1112 /*id=*/0,
1113 new ScopedMessageData<webrtc::QueuedTask>(std::move(task)));
1114}
1115
1116void Thread::Delete() {
1117 Stop();
1118 delete this;
1119}
1120
Niels Möller8909a632018-09-06 08:42:44 +02001121bool Thread::IsProcessingMessagesForTesting() {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001122 return (owned_ || IsCurrent()) && !IsQuitting();
Niels Möller8909a632018-09-06 08:42:44 +02001123}
1124
Peter Boström0c4e06b2015-10-07 12:23:21 +02001125void Thread::Clear(MessageHandler* phandler,
1126 uint32_t id,
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001127 MessageList* removed) {
1128 CritScope cs(&crit_);
Niels Möller5e007b72018-09-07 12:35:44 +02001129 ClearInternal(phandler, id, removed);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001130}
1131
1132bool Thread::ProcessMessages(int cmsLoop) {
deadbeef22e08142017-06-12 14:30:28 -07001133 // Using ProcessMessages with a custom clock for testing and a time greater
1134 // than 0 doesn't work, since it's not guaranteed to advance the custom
1135 // clock's time, and may get stuck in an infinite loop.
1136 RTC_DCHECK(GetClockForTesting() == nullptr || cmsLoop == 0 ||
1137 cmsLoop == kForever);
Honghai Zhang82d78622016-05-06 11:29:15 -07001138 int64_t msEnd = (kForever == cmsLoop) ? 0 : TimeAfter(cmsLoop);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001139 int cmsNext = cmsLoop;
1140
1141 while (true) {
Kári Tristan Helgason62b13452018-10-12 12:57:49 +02001142#if defined(WEBRTC_MAC)
1143 ScopedAutoReleasePool pool;
1144#endif
kthelgasonde6adbe2017-02-22 00:42:11 -08001145 Message msg;
1146 if (!Get(&msg, cmsNext))
1147 return !IsQuitting();
1148 Dispatch(&msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001149
kthelgasonde6adbe2017-02-22 00:42:11 -08001150 if (cmsLoop != kForever) {
1151 cmsNext = static_cast<int>(TimeUntil(msEnd));
1152 if (cmsNext < 0)
1153 return true;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001154 }
1155 }
1156}
1157
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001158bool Thread::WrapCurrentWithThreadManager(ThreadManager* thread_manager,
1159 bool need_synchronize_access) {
Tommi51492422017-12-04 15:18:23 +01001160 RTC_DCHECK(!IsRunning());
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001161
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001162#if defined(WEBRTC_WIN)
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001163 if (need_synchronize_access) {
1164 // We explicitly ask for no rights other than synchronization.
1165 // This gives us the best chance of succeeding.
1166 thread_ = OpenThread(SYNCHRONIZE, FALSE, GetCurrentThreadId());
1167 if (!thread_) {
Mirko Bonadei675513b2017-11-09 11:09:25 +01001168 RTC_LOG_GLE(LS_ERROR) << "Unable to get handle to thread.";
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001169 return false;
1170 }
1171 thread_id_ = GetCurrentThreadId();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001172 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001173#elif defined(WEBRTC_POSIX)
1174 thread_ = pthread_self();
1175#endif
1176 owned_ = false;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001177 thread_manager->SetCurrentThread(this);
1178 return true;
1179}
1180
Tommi51492422017-12-04 15:18:23 +01001181bool Thread::IsRunning() {
Tommi51492422017-12-04 15:18:23 +01001182#if defined(WEBRTC_WIN)
1183 return thread_ != nullptr;
1184#elif defined(WEBRTC_POSIX)
1185 return thread_ != 0;
1186#endif
1187}
1188
Steve Antonbcc1a762019-12-11 11:21:53 -08001189// static
1190MessageHandler* Thread::GetPostTaskMessageHandler() {
1191 // Allocate at first call, never deallocate.
1192 static MessageHandler* handler = new MessageHandlerWithTask;
1193 return handler;
1194}
1195
Taylor Brandstetter08672602018-03-02 15:20:33 -08001196AutoThread::AutoThread()
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +01001197 : Thread(CreateDefaultSocketServer(), /*do_init=*/false) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001198 if (!ThreadManager::Instance()->CurrentThread()) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001199 // DoInit registers with ThreadManager. Do that only if we intend to
Niels Möller5a8f8602019-06-12 11:30:59 +02001200 // be rtc::Thread::Current(), otherwise ProcessAllMessageQueuesInternal will
1201 // post a message to a queue that no running thread is serving.
1202 DoInit();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001203 ThreadManager::Instance()->SetCurrentThread(this);
1204 }
1205}
1206
1207AutoThread::~AutoThread() {
1208 Stop();
Steve Anton3b80aac2017-10-19 10:17:12 -07001209 DoDestroy();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001210 if (ThreadManager::Instance()->CurrentThread() == this) {
deadbeef37f5ecf2017-02-27 14:06:41 -08001211 ThreadManager::Instance()->SetCurrentThread(nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001212 }
1213}
1214
nisse7eaa4ea2017-05-08 05:25:41 -07001215AutoSocketServerThread::AutoSocketServerThread(SocketServer* ss)
Taylor Brandstetter08672602018-03-02 15:20:33 -08001216 : Thread(ss, /*do_init=*/false) {
1217 DoInit();
nisse7eaa4ea2017-05-08 05:25:41 -07001218 old_thread_ = ThreadManager::Instance()->CurrentThread();
Tommi51492422017-12-04 15:18:23 +01001219 // Temporarily set the current thread to nullptr so that we can keep checks
1220 // around that catch unintentional pointer overwrites.
1221 rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
nisse7eaa4ea2017-05-08 05:25:41 -07001222 rtc::ThreadManager::Instance()->SetCurrentThread(this);
1223 if (old_thread_) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001224 ThreadManager::Remove(old_thread_);
nisse7eaa4ea2017-05-08 05:25:41 -07001225 }
1226}
1227
1228AutoSocketServerThread::~AutoSocketServerThread() {
1229 RTC_DCHECK(ThreadManager::Instance()->CurrentThread() == this);
1230 // Some tests post destroy messages to this thread. To avoid memory
1231 // leaks, we have to process those messages. In particular
1232 // P2PTransportChannelPingTest, relying on the message posted in
1233 // cricket::Connection::Destroy.
1234 ProcessMessages(0);
Steve Anton3b80aac2017-10-19 10:17:12 -07001235 // Stop and destroy the thread before clearing it as the current thread.
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001236 // Sometimes there are messages left in the Thread that will be
Steve Anton3b80aac2017-10-19 10:17:12 -07001237 // destroyed by DoDestroy, and sometimes the destructors of the message and/or
1238 // its contents rely on this thread still being set as the current thread.
1239 Stop();
1240 DoDestroy();
Tommi51492422017-12-04 15:18:23 +01001241 rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
nisse7eaa4ea2017-05-08 05:25:41 -07001242 rtc::ThreadManager::Instance()->SetCurrentThread(old_thread_);
1243 if (old_thread_) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001244 ThreadManager::Add(old_thread_);
nisse7eaa4ea2017-05-08 05:25:41 -07001245 }
1246}
1247
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001248} // namespace rtc