logd: single std::mutex for locking log buffers and tracking readers
There are only three places where the log buffer lock is not already
held when the reader lock is taken:
1) In LogReader, when a new reader connects
2) In LogReader, when a misbehaving reader disconnects
3) LogReaderThread::ThreadFunction()
1) and 2) happen sufficiently rarely that there's no impact if they
additionally held a global lock.
3) is refactored in this CL. Previously, it would do the below in a loop
1) Lock the reader lock then wait on a condition variable
2) Unlock the reader lock
3) Lock the log buffer lock in LogBuffer::FlushTo()
4) In each iteration in the LogBuffer::FlushTo() loop
1) Lock then unlock the reader lock in FilterSecondPass()
2) Unlock the log buffer lock to send the message, then re-lock it
5) Unlock the log buffer lock when leaving LogBuffer::FlushTo()
If these locks are collapsed into a single lock, then this simplifies to:
1) Lock the single lock then wait on a condition variable
2) In each iteration in the LogBuffer::FlushTo() loop
1) Unlock the single lock to send the message, then re-lock it
Collapsing both these locks into a single lock simplifes the code and
removes the overhead of acquiring the second lock, in the majority of
use cases where the first lock is already held.
Secondly, this lock will be a plain std::mutex instead of a RwLock.
RwLock's are appropriate when there is a substantial imbalance between
readers and writers and high contention, neither are true for logd.
Bug: 169736426
Test: logging unit tests
Change-Id: Ia511506f2d0935a5321c1b2f65569066f91ecb06
diff --git a/logd/SerializedLogBuffer.cpp b/logd/SerializedLogBuffer.cpp
index 6d1576f..aa80864 100644
--- a/logd/SerializedLogBuffer.cpp
+++ b/logd/SerializedLogBuffer.cpp
@@ -41,9 +41,9 @@
}
// Release any sleeping reader threads to dump their current content.
- auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
+ auto lock = std::lock_guard{logd_lock};
for (const auto& reader_thread : reader_list_->reader_threads()) {
- reader_thread->triggerReader_Locked();
+ reader_thread->TriggerReader();
}
}
@@ -86,7 +86,7 @@
auto sequence = sequence_.fetch_add(1, std::memory_order_relaxed);
- auto lock = std::lock_guard{lock_};
+ auto lock = std::lock_guard{logd_lock};
if (logs_[log_id].empty()) {
logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4));
@@ -140,8 +140,6 @@
}
void SerializedLogBuffer::Prune(log_id_t log_id, size_t bytes_to_free, uid_t uid) {
- auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
-
auto& log_buffer = logs_[log_id];
auto it = log_buffer.begin();
while (it != log_buffer.end()) {
@@ -158,7 +156,7 @@
// fast enough to not back-up logd. Instead, we can achieve an nearly-as-efficient
// but not error-prune batching effect by waking the reader whenever any chunk is
// about to be pruned.
- reader_thread->triggerReader_Locked();
+ reader_thread->TriggerReader();
}
// Some readers may be still reading from this log chunk, log a warning that they are
@@ -198,22 +196,14 @@
std::unique_ptr<FlushToState> SerializedLogBuffer::CreateFlushToState(uint64_t start,
LogMask log_mask) {
- return std::make_unique<SerializedFlushToState>(start, log_mask);
-}
-
-void SerializedLogBuffer::DeleteFlushToState(std::unique_ptr<FlushToState> state) {
- auto lock = std::unique_lock{lock_};
- state.reset();
+ return std::make_unique<SerializedFlushToState>(start, log_mask, logs_);
}
bool SerializedLogBuffer::FlushTo(
LogWriter* writer, FlushToState& abstract_state,
const std::function<FilterResult(log_id_t log_id, pid_t pid, uint64_t sequence,
log_time realtime)>& filter) {
- auto lock = std::unique_lock{lock_};
-
auto& state = reinterpret_cast<SerializedFlushToState&>(abstract_state);
- state.InitializeLogs(logs_);
while (state.HasUnreadLogs()) {
LogWithId top = state.PopNextUnreadLog();
@@ -245,13 +235,14 @@
unsigned char entry_copy[kMaxEntrySize] __attribute__((uninitialized));
CHECK_LT(entry->msg_len(), LOGGER_ENTRY_MAX_PAYLOAD + 1);
memcpy(entry_copy, entry, sizeof(*entry) + entry->msg_len());
- lock.unlock();
+ logd_lock.unlock();
if (!reinterpret_cast<SerializedLogEntry*>(entry_copy)->Flush(writer, log_id)) {
+ logd_lock.lock();
return false;
}
- lock.lock();
+ logd_lock.lock();
}
state.set_start(state.start() + 1);
@@ -259,7 +250,7 @@
}
bool SerializedLogBuffer::Clear(log_id_t id, uid_t uid) {
- auto lock = std::lock_guard{lock_};
+ auto lock = std::lock_guard{logd_lock};
Prune(id, ULONG_MAX, uid);
// Clearing SerializedLogBuffer never waits for readers and therefore is always successful.
@@ -275,7 +266,7 @@
}
size_t SerializedLogBuffer::GetSize(log_id_t id) {
- auto lock = std::lock_guard{lock_};
+ auto lock = std::lock_guard{logd_lock};
return max_size_[id];
}
@@ -288,7 +279,7 @@
return false;
}
- auto lock = std::lock_guard{lock_};
+ auto lock = std::lock_guard{logd_lock};
max_size_[id] = size;
MaybePrune(id);