blob: f9680650a96f9175d950283d25e7c2d85f2dd340 [file] [log] [blame]
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001/*
2 * Copyright 2004 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020011#include "rtc_base/thread.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000012
Ali Tofigh7fa90572022-03-17 15:47:49 +010013#include "absl/strings/string_view.h"
14
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000015#if defined(WEBRTC_WIN)
16#include <comdef.h>
17#elif defined(WEBRTC_POSIX)
18#include <time.h>
Tommi51492422017-12-04 15:18:23 +010019#else
20#error "Either WEBRTC_WIN or WEBRTC_POSIX needs to be defined."
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000021#endif
22
Artem Titov80d02ad2018-05-21 12:20:39 +020023#if defined(WEBRTC_WIN)
24// Disable warning that we don't care about:
25// warning C4722: destructor never returns, potential memory leak
26#pragma warning(disable : 4722)
27#endif
28
Yves Gerey988cc082018-10-23 12:03:01 +020029#include <stdio.h>
Jonas Olssona4d87372019-07-05 19:08:33 +020030
Yves Gerey988cc082018-10-23 12:03:01 +020031#include <utility>
Yves Gerey2e00abc2018-10-05 15:39:24 +020032
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010033#include "absl/algorithm/container.h"
Artem Titovd15a5752021-02-10 14:31:24 +010034#include "api/sequence_checker.h"
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010035#include "rtc_base/atomic_ops.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020036#include "rtc_base/checks.h"
Markus Handell3cb525b2020-07-16 16:16:09 +020037#include "rtc_base/deprecated/recursive_critical_section.h"
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +020038#include "rtc_base/event.h"
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +010039#include "rtc_base/internal/default_socket_server.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020040#include "rtc_base/logging.h"
Steve Anton10542f22019-01-11 09:11:00 -080041#include "rtc_base/null_socket_server.h"
Sebastian Janssonda7267a2020-03-03 10:48:05 +010042#include "rtc_base/task_utils/to_queued_task.h"
Steve Anton10542f22019-01-11 09:11:00 -080043#include "rtc_base/time_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020044#include "rtc_base/trace_event.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000045
Kári Tristan Helgason62b13452018-10-12 12:57:49 +020046#if defined(WEBRTC_MAC)
47#include "rtc_base/system/cocoa_threading.h"
Yves Gerey988cc082018-10-23 12:03:01 +020048
Kári Tristan Helgason62b13452018-10-12 12:57:49 +020049/*
50 * These are forward-declarations for methods that are part of the
51 * ObjC runtime. They are declared in the private header objc-internal.h.
52 * These calls are what clang inserts when using @autoreleasepool in ObjC,
53 * but here they are used directly in order to keep this file C++.
54 * https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
55 */
56extern "C" {
57void* objc_autoreleasePoolPush(void);
58void objc_autoreleasePoolPop(void* pool);
59}
60
61namespace {
62class ScopedAutoReleasePool {
63 public:
64 ScopedAutoReleasePool() : pool_(objc_autoreleasePoolPush()) {}
65 ~ScopedAutoReleasePool() { objc_autoreleasePoolPop(pool_); }
66
67 private:
68 void* const pool_;
69};
70} // namespace
71#endif
72
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000073namespace rtc {
Steve Antonbcc1a762019-12-11 11:21:53 -080074namespace {
75
76class MessageHandlerWithTask final : public MessageHandler {
77 public:
Tomas Gunnarsson77baeee2020-09-24 22:39:21 +020078 MessageHandlerWithTask() {}
Steve Antonbcc1a762019-12-11 11:21:53 -080079
Byoungchan Lee14af7622022-01-12 05:24:58 +090080 MessageHandlerWithTask(const MessageHandlerWithTask&) = delete;
81 MessageHandlerWithTask& operator=(const MessageHandlerWithTask&) = delete;
82
Steve Antonbcc1a762019-12-11 11:21:53 -080083 void OnMessage(Message* msg) override {
84 static_cast<rtc_thread_internal::MessageLikeTask*>(msg->pdata)->Run();
85 delete msg->pdata;
86 }
87
88 private:
89 ~MessageHandlerWithTask() override {}
Steve Antonbcc1a762019-12-11 11:21:53 -080090};
91
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010092class RTC_SCOPED_LOCKABLE MarkProcessingCritScope {
93 public:
Markus Handell3cb525b2020-07-16 16:16:09 +020094 MarkProcessingCritScope(const RecursiveCriticalSection* cs,
95 size_t* processing) RTC_EXCLUSIVE_LOCK_FUNCTION(cs)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010096 : cs_(cs), processing_(processing) {
97 cs_->Enter();
98 *processing_ += 1;
99 }
100
101 ~MarkProcessingCritScope() RTC_UNLOCK_FUNCTION() {
102 *processing_ -= 1;
103 cs_->Leave();
104 }
105
Byoungchan Lee14af7622022-01-12 05:24:58 +0900106 MarkProcessingCritScope(const MarkProcessingCritScope&) = delete;
107 MarkProcessingCritScope& operator=(const MarkProcessingCritScope&) = delete;
108
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100109 private:
Markus Handell3cb525b2020-07-16 16:16:09 +0200110 const RecursiveCriticalSection* const cs_;
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100111 size_t* processing_;
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100112};
113
Steve Antonbcc1a762019-12-11 11:21:53 -0800114} // namespace
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000115
116ThreadManager* ThreadManager::Instance() {
Niels Möller14682a32018-05-24 08:54:25 +0200117 static ThreadManager* const thread_manager = new ThreadManager();
118 return thread_manager;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000119}
120
nisse7866cfe2017-04-26 01:45:31 -0700121ThreadManager::~ThreadManager() {
122 // By above RTC_DEFINE_STATIC_LOCAL.
Artem Titovd3251962021-11-15 16:57:07 +0100123 RTC_DCHECK_NOTREACHED() << "ThreadManager should never be destructed.";
nisse7866cfe2017-04-26 01:45:31 -0700124}
125
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000126// static
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100127void ThreadManager::Add(Thread* message_queue) {
128 return Instance()->AddInternal(message_queue);
129}
130void ThreadManager::AddInternal(Thread* message_queue) {
131 CritScope cs(&crit_);
132 // Prevent changes while the list of message queues is processed.
133 RTC_DCHECK_EQ(processing_, 0);
134 message_queues_.push_back(message_queue);
135}
136
137// static
138void ThreadManager::Remove(Thread* message_queue) {
139 return Instance()->RemoveInternal(message_queue);
140}
141void ThreadManager::RemoveInternal(Thread* message_queue) {
142 {
143 CritScope cs(&crit_);
144 // Prevent changes while the list of message queues is processed.
145 RTC_DCHECK_EQ(processing_, 0);
146 std::vector<Thread*>::iterator iter;
147 iter = absl::c_find(message_queues_, message_queue);
148 if (iter != message_queues_.end()) {
149 message_queues_.erase(iter);
150 }
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100151#if RTC_DCHECK_IS_ON
152 RemoveFromSendGraph(message_queue);
153#endif
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100154 }
155}
156
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100157#if RTC_DCHECK_IS_ON
158void ThreadManager::RemoveFromSendGraph(Thread* thread) {
159 for (auto it = send_graph_.begin(); it != send_graph_.end();) {
160 if (it->first == thread) {
161 it = send_graph_.erase(it);
162 } else {
163 it->second.erase(thread);
164 ++it;
165 }
166 }
167}
168
169void ThreadManager::RegisterSendAndCheckForCycles(Thread* source,
170 Thread* target) {
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200171 RTC_DCHECK(source);
172 RTC_DCHECK(target);
173
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100174 CritScope cs(&crit_);
175 std::deque<Thread*> all_targets({target});
176 // We check the pre-existing who-sends-to-who graph for any path from target
177 // to source. This loop is guaranteed to terminate because per the send graph
178 // invariant, there are no cycles in the graph.
Jianjun Zhuc33eeab2020-05-26 17:43:17 +0800179 for (size_t i = 0; i < all_targets.size(); i++) {
180 const auto& targets = send_graph_[all_targets[i]];
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100181 all_targets.insert(all_targets.end(), targets.begin(), targets.end());
182 }
183 RTC_CHECK_EQ(absl::c_count(all_targets, source), 0)
184 << " send loop between " << source->name() << " and " << target->name();
185
186 // We may now insert source -> target without creating a cycle, since there
187 // was no path from target to source per the prior CHECK.
188 send_graph_[source].insert(target);
189}
190#endif
191
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100192// static
193void ThreadManager::Clear(MessageHandler* handler) {
194 return Instance()->ClearInternal(handler);
195}
196void ThreadManager::ClearInternal(MessageHandler* handler) {
197 // Deleted objects may cause re-entrant calls to ClearInternal. This is
198 // allowed as the list of message queues does not change while queues are
199 // cleared.
200 MarkProcessingCritScope cs(&crit_, &processing_);
201 for (Thread* queue : message_queues_) {
202 queue->Clear(handler);
203 }
204}
205
206// static
207void ThreadManager::ProcessAllMessageQueuesForTesting() {
208 return Instance()->ProcessAllMessageQueuesInternal();
209}
210
211void ThreadManager::ProcessAllMessageQueuesInternal() {
212 // This works by posting a delayed message at the current time and waiting
213 // for it to be dispatched on all queues, which will ensure that all messages
214 // that came before it were also dispatched.
215 volatile int queues_not_done = 0;
216
217 // This class is used so that whether the posted message is processed, or the
218 // message queue is simply cleared, queues_not_done gets decremented.
219 class ScopedIncrement : public MessageData {
220 public:
221 ScopedIncrement(volatile int* value) : value_(value) {
222 AtomicOps::Increment(value_);
223 }
224 ~ScopedIncrement() override { AtomicOps::Decrement(value_); }
225
226 private:
227 volatile int* value_;
228 };
229
230 {
231 MarkProcessingCritScope cs(&crit_, &processing_);
232 for (Thread* queue : message_queues_) {
233 if (!queue->IsProcessingMessagesForTesting()) {
234 // If the queue is not processing messages, it can
235 // be ignored. If we tried to post a message to it, it would be dropped
236 // or ignored.
237 continue;
238 }
239 queue->PostDelayed(RTC_FROM_HERE, 0, nullptr, MQID_DISPOSE,
240 new ScopedIncrement(&queues_not_done));
241 }
242 }
243
244 rtc::Thread* current = rtc::Thread::Current();
245 // Note: One of the message queues may have been on this thread, which is
246 // why we can't synchronously wait for queues_not_done to go to 0; we need
247 // to process messages as well.
248 while (AtomicOps::AcquireLoad(&queues_not_done) > 0) {
249 if (current) {
250 current->ProcessMessages(0);
251 }
252 }
253}
254
255// static
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000256Thread* Thread::Current() {
nisse7866cfe2017-04-26 01:45:31 -0700257 ThreadManager* manager = ThreadManager::Instance();
258 Thread* thread = manager->CurrentThread();
259
nisse7866cfe2017-04-26 01:45:31 -0700260 return thread;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000261}
262
263#if defined(WEBRTC_POSIX)
Niels Möller98d26df2022-02-07 10:35:29 +0100264ThreadManager::ThreadManager() {
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200265#if defined(WEBRTC_MAC)
266 InitCocoaMultiThreading();
267#endif
deadbeef37f5ecf2017-02-27 14:06:41 -0800268 pthread_key_create(&key_, nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000269}
270
Yves Gerey665174f2018-06-19 15:03:05 +0200271Thread* ThreadManager::CurrentThread() {
272 return static_cast<Thread*>(pthread_getspecific(key_));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000273}
274
Sebastian Jansson178a6852020-01-14 11:12:26 +0100275void ThreadManager::SetCurrentThreadInternal(Thread* thread) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000276 pthread_setspecific(key_, thread);
277}
278#endif
279
280#if defined(WEBRTC_WIN)
Niels Möller98d26df2022-02-07 10:35:29 +0100281ThreadManager::ThreadManager() : key_(TlsAlloc()) {}
Yves Gerey665174f2018-06-19 15:03:05 +0200282
283Thread* ThreadManager::CurrentThread() {
284 return static_cast<Thread*>(TlsGetValue(key_));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000285}
286
Sebastian Jansson178a6852020-01-14 11:12:26 +0100287void ThreadManager::SetCurrentThreadInternal(Thread* thread) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000288 TlsSetValue(key_, thread);
289}
290#endif
291
Sebastian Jansson178a6852020-01-14 11:12:26 +0100292void ThreadManager::SetCurrentThread(Thread* thread) {
293#if RTC_DLOG_IS_ON
294 if (CurrentThread() && thread) {
295 RTC_DLOG(LS_ERROR) << "SetCurrentThread: Overwriting an existing value?";
296 }
297#endif // RTC_DLOG_IS_ON
Tommi6866dc72020-05-15 10:11:56 +0200298
299 if (thread) {
300 thread->EnsureIsCurrentTaskQueue();
301 } else {
302 Thread* current = CurrentThread();
303 if (current) {
304 // The current thread is being cleared, e.g. as a result of
305 // UnwrapCurrent() being called or when a thread is being stopped
306 // (see PreRun()). This signals that the Thread instance is being detached
307 // from the thread, which also means that TaskQueue::Current() must not
308 // return a pointer to the Thread instance.
309 current->ClearCurrentTaskQueue();
310 }
311 }
312
Sebastian Jansson178a6852020-01-14 11:12:26 +0100313 SetCurrentThreadInternal(thread);
314}
315
316void rtc::ThreadManager::ChangeCurrentThreadForTest(rtc::Thread* thread) {
317 SetCurrentThreadInternal(thread);
318}
319
Yves Gerey665174f2018-06-19 15:03:05 +0200320Thread* ThreadManager::WrapCurrentThread() {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000321 Thread* result = CurrentThread();
deadbeef37f5ecf2017-02-27 14:06:41 -0800322 if (nullptr == result) {
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +0100323 result = new Thread(CreateDefaultSocketServer());
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000324 result->WrapCurrentWithThreadManager(this, true);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000325 }
326 return result;
327}
328
329void ThreadManager::UnwrapCurrentThread() {
330 Thread* t = CurrentThread();
331 if (t && !(t->IsOwned())) {
332 t->UnwrapCurrent();
333 delete t;
334 }
335}
336
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000337Thread::ScopedDisallowBlockingCalls::ScopedDisallowBlockingCalls()
Yves Gerey665174f2018-06-19 15:03:05 +0200338 : thread_(Thread::Current()),
339 previous_state_(thread_->SetAllowBlockingCalls(false)) {}
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000340
341Thread::ScopedDisallowBlockingCalls::~ScopedDisallowBlockingCalls() {
nisseede5da42017-01-12 05:15:36 -0800342 RTC_DCHECK(thread_->IsCurrent());
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000343 thread_->SetAllowBlockingCalls(previous_state_);
344}
345
Tommife041642021-04-07 10:08:28 +0200346#if RTC_DCHECK_IS_ON
347Thread::ScopedCountBlockingCalls::ScopedCountBlockingCalls(
348 std::function<void(uint32_t, uint32_t)> callback)
349 : thread_(Thread::Current()),
350 base_blocking_call_count_(thread_->GetBlockingCallCount()),
351 base_could_be_blocking_call_count_(
352 thread_->GetCouldBeBlockingCallCount()),
353 result_callback_(std::move(callback)) {}
354
355Thread::ScopedCountBlockingCalls::~ScopedCountBlockingCalls() {
Tomas Gunnarsson89f3dd52021-04-14 12:54:10 +0200356 if (GetTotalBlockedCallCount() >= min_blocking_calls_for_callback_) {
357 result_callback_(GetBlockingCallCount(), GetCouldBeBlockingCallCount());
358 }
Tommife041642021-04-07 10:08:28 +0200359}
360
361uint32_t Thread::ScopedCountBlockingCalls::GetBlockingCallCount() const {
362 return thread_->GetBlockingCallCount() - base_blocking_call_count_;
363}
364
365uint32_t Thread::ScopedCountBlockingCalls::GetCouldBeBlockingCallCount() const {
366 return thread_->GetCouldBeBlockingCallCount() -
367 base_could_be_blocking_call_count_;
368}
369
370uint32_t Thread::ScopedCountBlockingCalls::GetTotalBlockedCallCount() const {
371 return GetBlockingCallCount() + GetCouldBeBlockingCallCount();
372}
373#endif
374
Taylor Brandstetter08672602018-03-02 15:20:33 -0800375Thread::Thread(SocketServer* ss) : Thread(ss, /*do_init=*/true) {}
danilchapbebf54c2016-04-28 01:32:48 -0700376
377Thread::Thread(std::unique_ptr<SocketServer> ss)
Taylor Brandstetter08672602018-03-02 15:20:33 -0800378 : Thread(std::move(ss), /*do_init=*/true) {}
379
380Thread::Thread(SocketServer* ss, bool do_init)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100381 : fPeekKeep_(false),
Sebastian Jansson61380c02020-01-17 14:46:08 +0100382 delayed_next_num_(0),
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100383 fInitialized_(false),
384 fDestroyed_(false),
385 stop_(0),
386 ss_(ss) {
387 RTC_DCHECK(ss);
388 ss_->SetMessageQueue(this);
Taylor Brandstetter08672602018-03-02 15:20:33 -0800389 SetName("Thread", this); // default name
390 if (do_init) {
391 DoInit();
392 }
393}
394
395Thread::Thread(std::unique_ptr<SocketServer> ss, bool do_init)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100396 : Thread(ss.get(), do_init) {
397 own_ss_ = std::move(ss);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000398}
399
400Thread::~Thread() {
401 Stop();
jbauch25d1f282016-02-05 00:25:02 -0800402 DoDestroy();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000403}
404
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100405void Thread::DoInit() {
406 if (fInitialized_) {
407 return;
408 }
409
410 fInitialized_ = true;
411 ThreadManager::Add(this);
412}
413
414void Thread::DoDestroy() {
415 if (fDestroyed_) {
416 return;
417 }
418
419 fDestroyed_ = true;
420 // The signal is done from here to ensure
421 // that it always gets called when the queue
422 // is going away.
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100423 if (ss_) {
424 ss_->SetMessageQueue(nullptr);
425 }
Niels Möller9bd24572021-04-19 12:18:27 +0200426 ThreadManager::Remove(this);
427 ClearInternal(nullptr, MQID_ANY, nullptr);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100428}
429
430SocketServer* Thread::socketserver() {
431 return ss_;
432}
433
434void Thread::WakeUpSocketServer() {
435 ss_->WakeUp();
436}
437
438void Thread::Quit() {
439 AtomicOps::ReleaseStore(&stop_, 1);
440 WakeUpSocketServer();
441}
442
443bool Thread::IsQuitting() {
444 return AtomicOps::AcquireLoad(&stop_) != 0;
445}
446
447void Thread::Restart() {
448 AtomicOps::ReleaseStore(&stop_, 0);
449}
450
451bool Thread::Peek(Message* pmsg, int cmsWait) {
452 if (fPeekKeep_) {
453 *pmsg = msgPeek_;
454 return true;
455 }
456 if (!Get(pmsg, cmsWait))
457 return false;
458 msgPeek_ = *pmsg;
459 fPeekKeep_ = true;
460 return true;
461}
462
463bool Thread::Get(Message* pmsg, int cmsWait, bool process_io) {
464 // Return and clear peek if present
465 // Always return the peek if it exists so there is Peek/Get symmetry
466
467 if (fPeekKeep_) {
468 *pmsg = msgPeek_;
469 fPeekKeep_ = false;
470 return true;
471 }
472
473 // Get w/wait + timer scan / dispatch + socket / event multiplexer dispatch
474
475 int64_t cmsTotal = cmsWait;
476 int64_t cmsElapsed = 0;
477 int64_t msStart = TimeMillis();
478 int64_t msCurrent = msStart;
479 while (true) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100480 // Check for posted events
481 int64_t cmsDelayNext = kForever;
482 bool first_pass = true;
483 while (true) {
484 // All queue operations need to be locked, but nothing else in this loop
485 // (specifically handling disposed message) can happen inside the crit.
486 // Otherwise, disposed MessageHandlers will cause deadlocks.
487 {
488 CritScope cs(&crit_);
489 // On the first pass, check for delayed messages that have been
490 // triggered and calculate the next trigger time.
491 if (first_pass) {
492 first_pass = false;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100493 while (!delayed_messages_.empty()) {
494 if (msCurrent < delayed_messages_.top().run_time_ms_) {
495 cmsDelayNext =
496 TimeDiff(delayed_messages_.top().run_time_ms_, msCurrent);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100497 break;
498 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100499 messages_.push_back(delayed_messages_.top().msg_);
500 delayed_messages_.pop();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100501 }
502 }
503 // Pull a message off the message queue, if available.
Sebastian Jansson61380c02020-01-17 14:46:08 +0100504 if (messages_.empty()) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100505 break;
506 } else {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100507 *pmsg = messages_.front();
508 messages_.pop_front();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100509 }
510 } // crit_ is released here.
511
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100512 // If this was a dispose message, delete it and skip it.
513 if (MQID_DISPOSE == pmsg->message_id) {
514 RTC_DCHECK(nullptr == pmsg->phandler);
515 delete pmsg->pdata;
516 *pmsg = Message();
517 continue;
518 }
519 return true;
520 }
521
522 if (IsQuitting())
523 break;
524
525 // Which is shorter, the delay wait or the asked wait?
526
527 int64_t cmsNext;
528 if (cmsWait == kForever) {
529 cmsNext = cmsDelayNext;
530 } else {
531 cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
532 if ((cmsDelayNext != kForever) && (cmsDelayNext < cmsNext))
533 cmsNext = cmsDelayNext;
534 }
535
536 {
537 // Wait and multiplex in the meantime
538 if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
539 return false;
540 }
541
542 // If the specified timeout expired, return
543
544 msCurrent = TimeMillis();
545 cmsElapsed = TimeDiff(msCurrent, msStart);
546 if (cmsWait != kForever) {
547 if (cmsElapsed >= cmsWait)
548 return false;
549 }
550 }
551 return false;
552}
553
554void Thread::Post(const Location& posted_from,
555 MessageHandler* phandler,
556 uint32_t id,
557 MessageData* pdata,
558 bool time_sensitive) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100559 RTC_DCHECK(!time_sensitive);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100560 if (IsQuitting()) {
561 delete pdata;
562 return;
563 }
564
565 // Keep thread safe
566 // Add the message to the end of the queue
567 // Signal for the multiplexer to return
568
569 {
570 CritScope cs(&crit_);
571 Message msg;
572 msg.posted_from = posted_from;
573 msg.phandler = phandler;
574 msg.message_id = id;
575 msg.pdata = pdata;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100576 messages_.push_back(msg);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100577 }
578 WakeUpSocketServer();
579}
580
581void Thread::PostDelayed(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100582 int delay_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100583 MessageHandler* phandler,
584 uint32_t id,
585 MessageData* pdata) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100586 return DoDelayPost(posted_from, delay_ms, TimeAfter(delay_ms), phandler, id,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100587 pdata);
588}
589
590void Thread::PostAt(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100591 int64_t run_at_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100592 MessageHandler* phandler,
593 uint32_t id,
594 MessageData* pdata) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100595 return DoDelayPost(posted_from, TimeUntil(run_at_ms), run_at_ms, phandler, id,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100596 pdata);
597}
598
599void Thread::DoDelayPost(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100600 int64_t delay_ms,
601 int64_t run_at_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100602 MessageHandler* phandler,
603 uint32_t id,
604 MessageData* pdata) {
605 if (IsQuitting()) {
606 delete pdata;
607 return;
608 }
609
610 // Keep thread safe
611 // Add to the priority queue. Gets sorted soonest first.
612 // Signal for the multiplexer to return.
613
614 {
615 CritScope cs(&crit_);
616 Message msg;
617 msg.posted_from = posted_from;
618 msg.phandler = phandler;
619 msg.message_id = id;
620 msg.pdata = pdata;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100621 DelayedMessage delayed(delay_ms, run_at_ms, delayed_next_num_, msg);
622 delayed_messages_.push(delayed);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100623 // If this message queue processes 1 message every millisecond for 50 days,
624 // we will wrap this number. Even then, only messages with identical times
625 // will be misordered, and then only briefly. This is probably ok.
Sebastian Jansson61380c02020-01-17 14:46:08 +0100626 ++delayed_next_num_;
627 RTC_DCHECK_NE(0, delayed_next_num_);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100628 }
629 WakeUpSocketServer();
630}
631
632int Thread::GetDelay() {
633 CritScope cs(&crit_);
634
Sebastian Jansson61380c02020-01-17 14:46:08 +0100635 if (!messages_.empty())
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100636 return 0;
637
Sebastian Jansson61380c02020-01-17 14:46:08 +0100638 if (!delayed_messages_.empty()) {
639 int delay = TimeUntil(delayed_messages_.top().run_time_ms_);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100640 if (delay < 0)
641 delay = 0;
642 return delay;
643 }
644
645 return kForever;
646}
647
648void Thread::ClearInternal(MessageHandler* phandler,
649 uint32_t id,
650 MessageList* removed) {
651 // Remove messages with phandler
652
653 if (fPeekKeep_ && msgPeek_.Match(phandler, id)) {
654 if (removed) {
655 removed->push_back(msgPeek_);
656 } else {
657 delete msgPeek_.pdata;
658 }
659 fPeekKeep_ = false;
660 }
661
662 // Remove from ordered message queue
663
Sebastian Jansson61380c02020-01-17 14:46:08 +0100664 for (auto it = messages_.begin(); it != messages_.end();) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100665 if (it->Match(phandler, id)) {
666 if (removed) {
667 removed->push_back(*it);
668 } else {
669 delete it->pdata;
670 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100671 it = messages_.erase(it);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100672 } else {
673 ++it;
674 }
675 }
676
677 // Remove from priority queue. Not directly iterable, so use this approach
678
Sebastian Jansson61380c02020-01-17 14:46:08 +0100679 auto new_end = delayed_messages_.container().begin();
680 for (auto it = new_end; it != delayed_messages_.container().end(); ++it) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100681 if (it->msg_.Match(phandler, id)) {
682 if (removed) {
683 removed->push_back(it->msg_);
684 } else {
685 delete it->msg_.pdata;
686 }
687 } else {
688 *new_end++ = *it;
689 }
690 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100691 delayed_messages_.container().erase(new_end,
692 delayed_messages_.container().end());
693 delayed_messages_.reheap();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100694}
695
696void Thread::Dispatch(Message* pmsg) {
697 TRACE_EVENT2("webrtc", "Thread::Dispatch", "src_file",
698 pmsg->posted_from.file_name(), "src_func",
699 pmsg->posted_from.function_name());
Harald Alvestrandba694422021-01-27 21:52:14 +0000700 RTC_DCHECK_RUN_ON(this);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100701 int64_t start_time = TimeMillis();
702 pmsg->phandler->OnMessage(pmsg);
703 int64_t end_time = TimeMillis();
704 int64_t diff = TimeDiff(end_time, start_time);
Harald Alvestrandba694422021-01-27 21:52:14 +0000705 if (diff >= dispatch_warning_ms_) {
706 RTC_LOG(LS_INFO) << "Message to " << name() << " took " << diff
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100707 << "ms to dispatch. Posted from: "
708 << pmsg->posted_from.ToString();
Harald Alvestrandba694422021-01-27 21:52:14 +0000709 // To avoid log spew, move the warning limit to only give warning
710 // for delays that are larger than the one observed.
711 dispatch_warning_ms_ = diff + 1;
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100712 }
713}
714
nisse7866cfe2017-04-26 01:45:31 -0700715bool Thread::IsCurrent() const {
716 return ThreadManager::Instance()->CurrentThread() == this;
717}
718
danilchapbebf54c2016-04-28 01:32:48 -0700719std::unique_ptr<Thread> Thread::CreateWithSocketServer() {
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +0100720 return std::unique_ptr<Thread>(new Thread(CreateDefaultSocketServer()));
danilchapbebf54c2016-04-28 01:32:48 -0700721}
722
723std::unique_ptr<Thread> Thread::Create() {
724 return std::unique_ptr<Thread>(
725 new Thread(std::unique_ptr<SocketServer>(new NullSocketServer())));
726}
727
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000728bool Thread::SleepMs(int milliseconds) {
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000729 AssertBlockingIsAllowedOnCurrentThread();
730
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000731#if defined(WEBRTC_WIN)
732 ::Sleep(milliseconds);
733 return true;
734#else
735 // POSIX has both a usleep() and a nanosleep(), but the former is deprecated,
736 // so we use nanosleep() even though it has greater precision than necessary.
737 struct timespec ts;
738 ts.tv_sec = milliseconds / 1000;
739 ts.tv_nsec = (milliseconds % 1000) * 1000000;
deadbeef37f5ecf2017-02-27 14:06:41 -0800740 int ret = nanosleep(&ts, nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000741 if (ret != 0) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100742 RTC_LOG_ERR(LS_WARNING) << "nanosleep() returning early";
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000743 return false;
744 }
745 return true;
746#endif
747}
748
Ali Tofigh7fa90572022-03-17 15:47:49 +0100749bool Thread::SetName(absl::string_view name, const void* obj) {
Tommi51492422017-12-04 15:18:23 +0100750 RTC_DCHECK(!IsRunning());
751
Ali Tofigh7fa90572022-03-17 15:47:49 +0100752 name_ = std::string(name);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000753 if (obj) {
Niels Mölleraba06332018-10-16 15:14:15 +0200754 // The %p specifier typically produce at most 16 hex digits, possibly with a
755 // 0x prefix. But format is implementation defined, so add some margin.
756 char buf[30];
757 snprintf(buf, sizeof(buf), " 0x%p", obj);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000758 name_ += buf;
759 }
760 return true;
761}
762
Harald Alvestrandba694422021-01-27 21:52:14 +0000763void Thread::SetDispatchWarningMs(int deadline) {
764 if (!IsCurrent()) {
765 PostTask(webrtc::ToQueuedTask(
766 [this, deadline]() { SetDispatchWarningMs(deadline); }));
767 return;
768 }
769 RTC_DCHECK_RUN_ON(this);
770 dispatch_warning_ms_ = deadline;
771}
772
Niels Möllerd2e50132019-06-11 09:24:14 +0200773bool Thread::Start() {
Tommi51492422017-12-04 15:18:23 +0100774 RTC_DCHECK(!IsRunning());
775
776 if (IsRunning())
777 return false;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000778
André Susano Pinto02a57972016-07-22 13:30:05 +0200779 Restart(); // reset IsQuitting() if the thread is being restarted
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000780
781 // Make sure that ThreadManager is created on the main thread before
782 // we start a new thread.
783 ThreadManager::Instance();
784
Tommi51492422017-12-04 15:18:23 +0100785 owned_ = true;
786
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000787#if defined(WEBRTC_WIN)
Niels Möllerd2e50132019-06-11 09:24:14 +0200788 thread_ = CreateThread(nullptr, 0, PreRun, this, 0, &thread_id_);
Tommi51492422017-12-04 15:18:23 +0100789 if (!thread_) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000790 return false;
791 }
792#elif defined(WEBRTC_POSIX)
793 pthread_attr_t attr;
794 pthread_attr_init(&attr);
795
Niels Möllerd2e50132019-06-11 09:24:14 +0200796 int error_code = pthread_create(&thread_, &attr, PreRun, this);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000797 if (0 != error_code) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100798 RTC_LOG(LS_ERROR) << "Unable to create pthread, error " << error_code;
Tommi51492422017-12-04 15:18:23 +0100799 thread_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000800 return false;
801 }
Tommi51492422017-12-04 15:18:23 +0100802 RTC_DCHECK(thread_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000803#endif
804 return true;
805}
806
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000807bool Thread::WrapCurrent() {
808 return WrapCurrentWithThreadManager(ThreadManager::Instance(), true);
809}
810
811void Thread::UnwrapCurrent() {
812 // Clears the platform-specific thread-specific storage.
deadbeef37f5ecf2017-02-27 14:06:41 -0800813 ThreadManager::Instance()->SetCurrentThread(nullptr);
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000814#if defined(WEBRTC_WIN)
deadbeef37f5ecf2017-02-27 14:06:41 -0800815 if (thread_ != nullptr) {
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000816 if (!CloseHandle(thread_)) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100817 RTC_LOG_GLE(LS_ERROR)
818 << "When unwrapping thread, failed to close handle.";
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000819 }
deadbeef37f5ecf2017-02-27 14:06:41 -0800820 thread_ = nullptr;
Tommi51492422017-12-04 15:18:23 +0100821 thread_id_ = 0;
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000822 }
Tommi51492422017-12-04 15:18:23 +0100823#elif defined(WEBRTC_POSIX)
824 thread_ = 0;
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000825#endif
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000826}
827
828void Thread::SafeWrapCurrent() {
829 WrapCurrentWithThreadManager(ThreadManager::Instance(), false);
830}
831
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000832void Thread::Join() {
Tommi51492422017-12-04 15:18:23 +0100833 if (!IsRunning())
834 return;
835
836 RTC_DCHECK(!IsCurrent());
837 if (Current() && !Current()->blocking_calls_allowed_) {
838 RTC_LOG(LS_WARNING) << "Waiting for the thread to join, "
Jonas Olssonb2b20312020-01-14 12:11:31 +0100839 "but blocking calls have been disallowed";
Tommi51492422017-12-04 15:18:23 +0100840 }
jiayl@webrtc.org1fd362c2014-09-26 16:57:07 +0000841
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000842#if defined(WEBRTC_WIN)
Tommi51492422017-12-04 15:18:23 +0100843 RTC_DCHECK(thread_ != nullptr);
844 WaitForSingleObject(thread_, INFINITE);
845 CloseHandle(thread_);
846 thread_ = nullptr;
847 thread_id_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000848#elif defined(WEBRTC_POSIX)
Tommi51492422017-12-04 15:18:23 +0100849 pthread_join(thread_, nullptr);
850 thread_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000851#endif
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000852}
853
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000854bool Thread::SetAllowBlockingCalls(bool allow) {
nisseede5da42017-01-12 05:15:36 -0800855 RTC_DCHECK(IsCurrent());
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000856 bool previous = blocking_calls_allowed_;
857 blocking_calls_allowed_ = allow;
858 return previous;
859}
860
861// static
862void Thread::AssertBlockingIsAllowedOnCurrentThread() {
tfarinaa41ab932015-10-30 16:08:48 -0700863#if !defined(NDEBUG)
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000864 Thread* current = Thread::Current();
nisseede5da42017-01-12 05:15:36 -0800865 RTC_DCHECK(!current || current->blocking_calls_allowed_);
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000866#endif
867}
868
deadbeefdc20e262017-01-31 15:10:44 -0800869// static
870#if defined(WEBRTC_WIN)
871DWORD WINAPI Thread::PreRun(LPVOID pv) {
872#else
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000873void* Thread::PreRun(void* pv) {
deadbeefdc20e262017-01-31 15:10:44 -0800874#endif
Niels Möllerd2e50132019-06-11 09:24:14 +0200875 Thread* thread = static_cast<Thread*>(pv);
876 ThreadManager::Instance()->SetCurrentThread(thread);
877 rtc::SetCurrentThreadName(thread->name_.c_str());
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200878#if defined(WEBRTC_MAC)
879 ScopedAutoReleasePool pool;
880#endif
Niels Möllerd2e50132019-06-11 09:24:14 +0200881 thread->Run();
882
Tommi51492422017-12-04 15:18:23 +0100883 ThreadManager::Instance()->SetCurrentThread(nullptr);
kthelgasonde6adbe2017-02-22 00:42:11 -0800884#ifdef WEBRTC_WIN
885 return 0;
886#else
887 return nullptr;
888#endif
Jonas Olssona4d87372019-07-05 19:08:33 +0200889} // namespace rtc
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000890
891void Thread::Run() {
892 ProcessMessages(kForever);
893}
894
895bool Thread::IsOwned() {
Tommi51492422017-12-04 15:18:23 +0100896 RTC_DCHECK(IsRunning());
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000897 return owned_;
898}
899
900void Thread::Stop() {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100901 Thread::Quit();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000902 Join();
903}
904
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700905void Thread::Send(const Location& posted_from,
906 MessageHandler* phandler,
907 uint32_t id,
908 MessageData* pdata) {
Sebastian Jansson5d9b9642020-01-17 13:10:54 +0100909 RTC_DCHECK(!IsQuitting());
André Susano Pinto02a57972016-07-22 13:30:05 +0200910 if (IsQuitting())
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000911 return;
912
913 // Sent messages are sent to the MessageHandler directly, in the context
914 // of "thread", like Win32 SendMessage. If in the right context,
915 // call the handler directly.
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000916 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700917 msg.posted_from = posted_from;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000918 msg.phandler = phandler;
919 msg.message_id = id;
920 msg.pdata = pdata;
921 if (IsCurrent()) {
Tommife041642021-04-07 10:08:28 +0200922#if RTC_DCHECK_IS_ON
Artem Titov15737162021-05-25 11:17:07 +0200923 RTC_DCHECK(this->IsInvokeToThreadAllowed(this));
Tommife041642021-04-07 10:08:28 +0200924 RTC_DCHECK_RUN_ON(this);
925 could_be_blocking_call_count_++;
926#endif
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100927 msg.phandler->OnMessage(&msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000928 return;
929 }
930
jiayl@webrtc.org3987b6d2014-09-24 17:14:05 +0000931 AssertBlockingIsAllowedOnCurrentThread();
932
Yves Gerey665174f2018-06-19 15:03:05 +0200933 Thread* current_thread = Thread::Current();
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200934
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100935#if RTC_DCHECK_IS_ON
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200936 if (current_thread) {
Tommife041642021-04-07 10:08:28 +0200937 RTC_DCHECK_RUN_ON(current_thread);
938 current_thread->blocking_call_count_++;
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200939 RTC_DCHECK(current_thread->IsInvokeToThreadAllowed(this));
940 ThreadManager::Instance()->RegisterSendAndCheckForCycles(current_thread,
941 this);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000942 }
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200943#endif
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000944
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200945 // Perhaps down the line we can get rid of this workaround and always require
946 // current_thread to be valid when Send() is called.
947 std::unique_ptr<rtc::Event> done_event;
948 if (!current_thread)
949 done_event.reset(new rtc::Event());
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000950
Tomas Gunnarsson0fd4c4e2020-09-04 16:33:25 +0200951 bool ready = false;
952 PostTask(webrtc::ToQueuedTask(
953 [&msg]() mutable { msg.phandler->OnMessage(&msg); },
954 [this, &ready, current_thread, done = done_event.get()] {
955 if (current_thread) {
956 CritScope cs(&crit_);
957 ready = true;
958 current_thread->socketserver()->WakeUp();
959 } else {
960 done->Set();
961 }
962 }));
963
964 if (current_thread) {
965 bool waited = false;
966 crit_.Enter();
967 while (!ready) {
968 crit_.Leave();
969 current_thread->socketserver()->Wait(kForever, false);
970 waited = true;
971 crit_.Enter();
972 }
973 crit_.Leave();
974
975 // Our Wait loop above may have consumed some WakeUp events for this
976 // Thread, that weren't relevant to this Send. Losing these WakeUps can
977 // cause problems for some SocketServers.
978 //
979 // Concrete example:
980 // Win32SocketServer on thread A calls Send on thread B. While processing
981 // the message, thread B Posts a message to A. We consume the wakeup for
982 // that Post while waiting for the Send to complete, which means that when
983 // we exit this loop, we need to issue another WakeUp, or else the Posted
984 // message won't be processed in a timely manner.
985
986 if (waited) {
987 current_thread->socketserver()->WakeUp();
988 }
989 } else {
990 done_event->Wait(rtc::Event::kForever);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000991 }
992}
993
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700994void Thread::InvokeInternal(const Location& posted_from,
Danil Chapovalov89313452019-11-29 12:56:43 +0100995 rtc::FunctionView<void()> functor) {
Steve Antonc5d7c522019-12-03 10:14:05 -0800996 TRACE_EVENT2("webrtc", "Thread::Invoke", "src_file", posted_from.file_name(),
997 "src_func", posted_from.function_name());
Danil Chapovalov89313452019-11-29 12:56:43 +0100998
999 class FunctorMessageHandler : public MessageHandler {
1000 public:
1001 explicit FunctorMessageHandler(rtc::FunctionView<void()> functor)
Tomas Gunnarsson77baeee2020-09-24 22:39:21 +02001002 : functor_(functor) {}
Danil Chapovalov89313452019-11-29 12:56:43 +01001003 void OnMessage(Message* msg) override { functor_(); }
1004
1005 private:
1006 rtc::FunctionView<void()> functor_;
1007 } handler(functor);
1008
1009 Send(posted_from, &handler);
tommi@webrtc.org7c64ed22015-03-17 14:25:37 +00001010}
1011
Tommi6866dc72020-05-15 10:11:56 +02001012// Called by the ThreadManager when being set as the current thread.
1013void Thread::EnsureIsCurrentTaskQueue() {
1014 task_queue_registration_ =
1015 std::make_unique<TaskQueueBase::CurrentTaskQueueSetter>(this);
1016}
1017
1018// Called by the ThreadManager when being set as the current thread.
1019void Thread::ClearCurrentTaskQueue() {
1020 task_queue_registration_.reset();
1021}
1022
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001023void Thread::QueuedTaskHandler::OnMessage(Message* msg) {
1024 RTC_DCHECK(msg);
1025 auto* data = static_cast<ScopedMessageData<webrtc::QueuedTask>*>(msg->pdata);
Mirko Bonadei179b46b2021-07-24 21:50:24 +02001026 std::unique_ptr<webrtc::QueuedTask> task(data->Release());
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001027 // Thread expects handler to own Message::pdata when OnMessage is called
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001028 // Since MessageData is no longer needed, delete it.
1029 delete data;
1030
1031 // QueuedTask interface uses Run return value to communicate who owns the
1032 // task. false means QueuedTask took the ownership.
1033 if (!task->Run())
1034 task.release();
1035}
1036
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001037void Thread::AllowInvokesToThread(Thread* thread) {
Mirko Bonadei481e3452021-07-30 13:57:25 +02001038#if (!defined(NDEBUG) || RTC_DCHECK_IS_ON)
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001039 if (!IsCurrent()) {
1040 PostTask(webrtc::ToQueuedTask(
1041 [thread, this]() { AllowInvokesToThread(thread); }));
1042 return;
1043 }
1044 RTC_DCHECK_RUN_ON(this);
1045 allowed_threads_.push_back(thread);
1046 invoke_policy_enabled_ = true;
1047#endif
1048}
1049
1050void Thread::DisallowAllInvokes() {
Mirko Bonadei481e3452021-07-30 13:57:25 +02001051#if (!defined(NDEBUG) || RTC_DCHECK_IS_ON)
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001052 if (!IsCurrent()) {
1053 PostTask(webrtc::ToQueuedTask([this]() { DisallowAllInvokes(); }));
1054 return;
1055 }
1056 RTC_DCHECK_RUN_ON(this);
1057 allowed_threads_.clear();
1058 invoke_policy_enabled_ = true;
1059#endif
1060}
1061
Tommife041642021-04-07 10:08:28 +02001062#if RTC_DCHECK_IS_ON
1063uint32_t Thread::GetBlockingCallCount() const {
1064 RTC_DCHECK_RUN_ON(this);
1065 return blocking_call_count_;
1066}
1067uint32_t Thread::GetCouldBeBlockingCallCount() const {
1068 RTC_DCHECK_RUN_ON(this);
1069 return could_be_blocking_call_count_;
1070}
1071#endif
1072
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001073// Returns true if no policies added or if there is at least one policy
Artem Titov96e3b992021-07-26 16:03:14 +02001074// that permits invocation to `target` thread.
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001075bool Thread::IsInvokeToThreadAllowed(rtc::Thread* target) {
Mirko Bonadei481e3452021-07-30 13:57:25 +02001076#if (!defined(NDEBUG) || RTC_DCHECK_IS_ON)
Artem Titovdfc5f0d2020-07-03 12:09:26 +02001077 RTC_DCHECK_RUN_ON(this);
1078 if (!invoke_policy_enabled_) {
1079 return true;
1080 }
1081 for (const auto* thread : allowed_threads_) {
1082 if (thread == target) {
1083 return true;
1084 }
1085 }
1086 return false;
1087#else
1088 return true;
1089#endif
1090}
1091
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001092void Thread::PostTask(std::unique_ptr<webrtc::QueuedTask> task) {
1093 // Though Post takes MessageData by raw pointer (last parameter), it still
1094 // takes it with ownership.
1095 Post(RTC_FROM_HERE, &queued_task_handler_,
1096 /*id=*/0, new ScopedMessageData<webrtc::QueuedTask>(std::move(task)));
1097}
1098
1099void Thread::PostDelayedTask(std::unique_ptr<webrtc::QueuedTask> task,
1100 uint32_t milliseconds) {
Henrik Boströmcf9899c2022-01-20 09:46:16 +01001101 // This implementation does not support low precision yet.
1102 PostDelayedHighPrecisionTask(std::move(task), milliseconds);
1103}
1104
1105void Thread::PostDelayedHighPrecisionTask(
1106 std::unique_ptr<webrtc::QueuedTask> task,
1107 uint32_t milliseconds) {
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001108 // Though PostDelayed takes MessageData by raw pointer (last parameter),
1109 // it still takes it with ownership.
Henrik Boströmcf9899c2022-01-20 09:46:16 +01001110 PostDelayed(RTC_FROM_HERE, milliseconds, &queued_task_handler_, /*id=*/0,
Danil Chapovalov912b3b82019-11-22 15:52:40 +01001111 new ScopedMessageData<webrtc::QueuedTask>(std::move(task)));
1112}
1113
1114void Thread::Delete() {
1115 Stop();
1116 delete this;
1117}
1118
Niels Möller8909a632018-09-06 08:42:44 +02001119bool Thread::IsProcessingMessagesForTesting() {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001120 return (owned_ || IsCurrent()) && !IsQuitting();
Niels Möller8909a632018-09-06 08:42:44 +02001121}
1122
Peter Boström0c4e06b2015-10-07 12:23:21 +02001123void Thread::Clear(MessageHandler* phandler,
1124 uint32_t id,
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001125 MessageList* removed) {
1126 CritScope cs(&crit_);
Niels Möller5e007b72018-09-07 12:35:44 +02001127 ClearInternal(phandler, id, removed);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001128}
1129
1130bool Thread::ProcessMessages(int cmsLoop) {
deadbeef22e08142017-06-12 14:30:28 -07001131 // Using ProcessMessages with a custom clock for testing and a time greater
1132 // than 0 doesn't work, since it's not guaranteed to advance the custom
1133 // clock's time, and may get stuck in an infinite loop.
1134 RTC_DCHECK(GetClockForTesting() == nullptr || cmsLoop == 0 ||
1135 cmsLoop == kForever);
Honghai Zhang82d78622016-05-06 11:29:15 -07001136 int64_t msEnd = (kForever == cmsLoop) ? 0 : TimeAfter(cmsLoop);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001137 int cmsNext = cmsLoop;
1138
1139 while (true) {
Kári Tristan Helgason62b13452018-10-12 12:57:49 +02001140#if defined(WEBRTC_MAC)
1141 ScopedAutoReleasePool pool;
1142#endif
kthelgasonde6adbe2017-02-22 00:42:11 -08001143 Message msg;
1144 if (!Get(&msg, cmsNext))
1145 return !IsQuitting();
1146 Dispatch(&msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001147
kthelgasonde6adbe2017-02-22 00:42:11 -08001148 if (cmsLoop != kForever) {
1149 cmsNext = static_cast<int>(TimeUntil(msEnd));
1150 if (cmsNext < 0)
1151 return true;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001152 }
1153 }
1154}
1155
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001156bool Thread::WrapCurrentWithThreadManager(ThreadManager* thread_manager,
1157 bool need_synchronize_access) {
Tommi51492422017-12-04 15:18:23 +01001158 RTC_DCHECK(!IsRunning());
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001159
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001160#if defined(WEBRTC_WIN)
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001161 if (need_synchronize_access) {
1162 // We explicitly ask for no rights other than synchronization.
1163 // This gives us the best chance of succeeding.
1164 thread_ = OpenThread(SYNCHRONIZE, FALSE, GetCurrentThreadId());
1165 if (!thread_) {
Mirko Bonadei675513b2017-11-09 11:09:25 +01001166 RTC_LOG_GLE(LS_ERROR) << "Unable to get handle to thread.";
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001167 return false;
1168 }
1169 thread_id_ = GetCurrentThreadId();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001170 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001171#elif defined(WEBRTC_POSIX)
1172 thread_ = pthread_self();
1173#endif
1174 owned_ = false;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001175 thread_manager->SetCurrentThread(this);
1176 return true;
1177}
1178
Tommi51492422017-12-04 15:18:23 +01001179bool Thread::IsRunning() {
Tommi51492422017-12-04 15:18:23 +01001180#if defined(WEBRTC_WIN)
1181 return thread_ != nullptr;
1182#elif defined(WEBRTC_POSIX)
1183 return thread_ != 0;
1184#endif
1185}
1186
Steve Antonbcc1a762019-12-11 11:21:53 -08001187// static
1188MessageHandler* Thread::GetPostTaskMessageHandler() {
1189 // Allocate at first call, never deallocate.
1190 static MessageHandler* handler = new MessageHandlerWithTask;
1191 return handler;
1192}
1193
Taylor Brandstetter08672602018-03-02 15:20:33 -08001194AutoThread::AutoThread()
Mirko Bonadeie5f4c6b2021-01-15 10:41:01 +01001195 : Thread(CreateDefaultSocketServer(), /*do_init=*/false) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001196 if (!ThreadManager::Instance()->CurrentThread()) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001197 // DoInit registers with ThreadManager. Do that only if we intend to
Niels Möller5a8f8602019-06-12 11:30:59 +02001198 // be rtc::Thread::Current(), otherwise ProcessAllMessageQueuesInternal will
1199 // post a message to a queue that no running thread is serving.
1200 DoInit();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001201 ThreadManager::Instance()->SetCurrentThread(this);
1202 }
1203}
1204
1205AutoThread::~AutoThread() {
1206 Stop();
Steve Anton3b80aac2017-10-19 10:17:12 -07001207 DoDestroy();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001208 if (ThreadManager::Instance()->CurrentThread() == this) {
deadbeef37f5ecf2017-02-27 14:06:41 -08001209 ThreadManager::Instance()->SetCurrentThread(nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001210 }
1211}
1212
nisse7eaa4ea2017-05-08 05:25:41 -07001213AutoSocketServerThread::AutoSocketServerThread(SocketServer* ss)
Taylor Brandstetter08672602018-03-02 15:20:33 -08001214 : Thread(ss, /*do_init=*/false) {
1215 DoInit();
nisse7eaa4ea2017-05-08 05:25:41 -07001216 old_thread_ = ThreadManager::Instance()->CurrentThread();
Tommi51492422017-12-04 15:18:23 +01001217 // Temporarily set the current thread to nullptr so that we can keep checks
1218 // around that catch unintentional pointer overwrites.
1219 rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
nisse7eaa4ea2017-05-08 05:25:41 -07001220 rtc::ThreadManager::Instance()->SetCurrentThread(this);
1221 if (old_thread_) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001222 ThreadManager::Remove(old_thread_);
nisse7eaa4ea2017-05-08 05:25:41 -07001223 }
1224}
1225
1226AutoSocketServerThread::~AutoSocketServerThread() {
1227 RTC_DCHECK(ThreadManager::Instance()->CurrentThread() == this);
Steve Anton3b80aac2017-10-19 10:17:12 -07001228 // Stop and destroy the thread before clearing it as the current thread.
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001229 // Sometimes there are messages left in the Thread that will be
Steve Anton3b80aac2017-10-19 10:17:12 -07001230 // destroyed by DoDestroy, and sometimes the destructors of the message and/or
1231 // its contents rely on this thread still being set as the current thread.
1232 Stop();
1233 DoDestroy();
Tommi51492422017-12-04 15:18:23 +01001234 rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
nisse7eaa4ea2017-05-08 05:25:41 -07001235 rtc::ThreadManager::Instance()->SetCurrentThread(old_thread_);
1236 if (old_thread_) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001237 ThreadManager::Add(old_thread_);
nisse7eaa4ea2017-05-08 05:25:41 -07001238 }
1239}
1240
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001241} // namespace rtc