Skip to content
This repository was archived by the owner on Feb 25, 2025. It is now read-only.

Commit 0b909cc

Browse files
committed
Fix platformview deadlock.
1 parent 842281e commit 0b909cc

10 files changed

+222
-62
lines changed

fml/message_loop_task_queues.cc

Lines changed: 95 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,14 @@
1111
#include <optional>
1212

1313
#include "flutter/fml/make_copyable.h"
14+
#include "flutter/fml/message_loop.h"
1415
#include "flutter/fml/task_source.h"
1516
#include "flutter/fml/thread_local.h"
1617

1718
namespace fml {
1819

20+
std::mutex gThreadMergingLock;
21+
1922
std::mutex MessageLoopTaskQueues::creation_mutex_;
2023

2124
const size_t TaskQueueId::kUnmerged = ULONG_MAX;
@@ -127,29 +130,7 @@ bool MessageLoopTaskQueues::HasPendingTasks(TaskQueueId queue_id) const {
127130
fml::closure MessageLoopTaskQueues::GetNextTaskToRun(TaskQueueId queue_id,
128131
fml::TimePoint from_time) {
129132
std::lock_guard guard(queue_mutex_);
130-
if (!HasPendingTasksUnlocked(queue_id)) {
131-
return nullptr;
132-
}
133-
TaskSource::TopTask top = PeekNextTaskUnlocked(queue_id);
134-
135-
if (!HasPendingTasksUnlocked(queue_id)) {
136-
WakeUpUnlocked(queue_id, fml::TimePoint::Max());
137-
} else {
138-
WakeUpUnlocked(queue_id, GetNextWakeTimeUnlocked(queue_id));
139-
}
140-
141-
if (top.task.GetTargetTime() > from_time) {
142-
return nullptr;
143-
}
144-
fml::closure invocation = top.task.GetTask();
145-
queue_entries_.at(top.task_queue_id)
146-
->task_source->PopTask(top.task.GetTaskSourceGrade());
147-
{
148-
std::scoped_lock creation(creation_mutex_);
149-
const auto task_source_grade = top.task.GetTaskSourceGrade();
150-
tls_task_source_grade.reset(new TaskSourceGradeHolder{task_source_grade});
151-
}
152-
return invocation;
133+
return GetNextTaskToRunUnlocked(queue_id, from_time);
153134
}
154135

155136
void MessageLoopTaskQueues::WakeUpUnlocked(TaskQueueId queue_id,
@@ -194,24 +175,7 @@ void MessageLoopTaskQueues::RemoveTaskObserver(TaskQueueId queue_id,
194175
std::vector<fml::closure> MessageLoopTaskQueues::GetObserversToNotify(
195176
TaskQueueId queue_id) const {
196177
std::lock_guard guard(queue_mutex_);
197-
std::vector<fml::closure> observers;
198-
199-
if (queue_entries_.at(queue_id)->subsumed_by != _kUnmerged) {
200-
return observers;
201-
}
202-
203-
for (const auto& observer : queue_entries_.at(queue_id)->task_observers) {
204-
observers.push_back(observer.second);
205-
}
206-
207-
auto& subsumed_set = queue_entries_.at(queue_id)->owner_of;
208-
for (auto& subsumed : subsumed_set) {
209-
for (const auto& observer : queue_entries_.at(subsumed)->task_observers) {
210-
observers.push_back(observer.second);
211-
}
212-
}
213-
214-
return observers;
178+
return GetObserversToNotifyUnlocked(queue_id);
215179
}
216180

217181
void MessageLoopTaskQueues::SetWakeable(TaskQueueId queue_id,
@@ -262,7 +226,9 @@ bool MessageLoopTaskQueues::Merge(TaskQueueId owner, TaskQueueId subsumed) {
262226
<< subsumed_entry->subsumed_by;
263227
return false;
264228
}
229+
265230
// All checking is OK, set merged state.
231+
std::scoped_lock lock(gThreadMergingLock);
266232
owner_entry->owner_of.insert(subsumed);
267233
subsumed_entry->subsumed_by = owner;
268234

@@ -319,11 +285,7 @@ bool MessageLoopTaskQueues::Unmerge(TaskQueueId owner, TaskQueueId subsumed) {
319285
bool MessageLoopTaskQueues::Owns(TaskQueueId owner,
320286
TaskQueueId subsumed) const {
321287
std::lock_guard guard(queue_mutex_);
322-
if (owner == _kUnmerged || subsumed == _kUnmerged) {
323-
return false;
324-
}
325-
auto& subsumed_set = queue_entries_.at(owner)->owner_of;
326-
return subsumed_set.find(subsumed) != subsumed_set.end();
288+
return OwnsUnlocked(owner, subsumed);
327289
}
328290

329291
std::set<TaskQueueId> MessageLoopTaskQueues::GetSubsumedTaskQueueId(
@@ -346,6 +308,33 @@ void MessageLoopTaskQueues::ResumeSecondarySource(TaskQueueId queue_id) {
346308
}
347309
}
348310

311+
void MessageLoopTaskQueues::FlushExpiredTasksNow(TaskQueueId queue_id) {
312+
std::lock_guard guard(queue_mutex_);
313+
if (!MessageLoop::IsInitializedForCurrentThread()) {
314+
return;
315+
}
316+
317+
TaskQueueId current_queue_id = MessageLoop::GetCurrentTaskQueueId();
318+
if (current_queue_id == queue_id ||
319+
OwnsUnlocked(current_queue_id, queue_id) ||
320+
OwnsUnlocked(queue_id, current_queue_id)) {
321+
const auto now = fml::TimePoint::Now();
322+
fml::closure invocation;
323+
do {
324+
invocation = GetNextTaskToRunUnlocked(queue_id, now);
325+
if (!invocation) {
326+
return;
327+
}
328+
invocation();
329+
std::vector<fml::closure> observers =
330+
GetObserversToNotifyUnlocked(queue_id);
331+
for (const auto& observer : observers) {
332+
observer();
333+
}
334+
} while (invocation);
335+
}
336+
}
337+
349338
// Subsumed queues will never have pending tasks.
350339
// Owning queues will consider both their and their subsumed tasks.
351340
bool MessageLoopTaskQueues::HasPendingTasksUnlocked(
@@ -407,4 +396,64 @@ TaskSource::TopTask MessageLoopTaskQueues::PeekNextTaskUnlocked(
407396
return top_task.value();
408397
}
409398

399+
bool MessageLoopTaskQueues::OwnsUnlocked(TaskQueueId owner,
400+
TaskQueueId subsumed) const {
401+
if (owner == _kUnmerged || subsumed == _kUnmerged) {
402+
return false;
403+
}
404+
auto& subsumed_set = queue_entries_.at(owner)->owner_of;
405+
return subsumed_set.find(subsumed) != subsumed_set.end();
406+
}
407+
408+
fml::closure MessageLoopTaskQueues::GetNextTaskToRunUnlocked(
409+
TaskQueueId queue_id,
410+
fml::TimePoint from_time) {
411+
if (!HasPendingTasksUnlocked(queue_id)) {
412+
return nullptr;
413+
}
414+
415+
TaskSource::TopTask top = PeekNextTaskUnlocked(queue_id);
416+
417+
if (!HasPendingTasksUnlocked(queue_id)) {
418+
WakeUpUnlocked(queue_id, fml::TimePoint::Max());
419+
} else {
420+
WakeUpUnlocked(queue_id, GetNextWakeTimeUnlocked(queue_id));
421+
}
422+
423+
if (top.task.GetTargetTime() > from_time) {
424+
return nullptr;
425+
}
426+
427+
fml::closure invocation = top.task.GetTask();
428+
queue_entries_.at(top.task_queue_id)
429+
->task_source->PopTask(top.task.GetTaskSourceGrade());
430+
{
431+
std::scoped_lock creation(creation_mutex_);
432+
const auto task_source_grade = top.task.GetTaskSourceGrade();
433+
tls_task_source_grade.reset(new TaskSourceGradeHolder{task_source_grade});
434+
}
435+
return invocation;
436+
}
437+
438+
std::vector<fml::closure> MessageLoopTaskQueues::GetObserversToNotifyUnlocked(
439+
TaskQueueId queue_id) const {
440+
std::vector<fml::closure> observers;
441+
442+
if (queue_entries_.at(queue_id)->subsumed_by != _kUnmerged) {
443+
return observers;
444+
}
445+
446+
for (const auto& observer : queue_entries_.at(queue_id)->task_observers) {
447+
observers.push_back(observer.second);
448+
}
449+
450+
auto& subsumed_set = queue_entries_.at(queue_id)->owner_of;
451+
for (auto& subsumed : subsumed_set) {
452+
for (const auto& observer : queue_entries_.at(subsumed)->task_observers) {
453+
observers.push_back(observer.second);
454+
}
455+
}
456+
457+
return observers;
458+
}
410459
} // namespace fml

fml/message_loop_task_queues.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@ namespace fml {
2323

2424
static const TaskQueueId _kUnmerged = TaskQueueId(TaskQueueId::kUnmerged);
2525

26+
/// To avoid data races between the platform thread posting sync tasks to the
27+
/// raster thread and the raster thread merging.
28+
extern std::mutex gThreadMergingLock;
29+
2630
/// A collection of tasks and observers associated with one TaskQueue.
2731
///
2832
/// Often a TaskQueue has a one-to-one relationship with a fml::MessageLoop,
@@ -137,6 +141,8 @@ class MessageLoopTaskQueues
137141

138142
void ResumeSecondarySource(TaskQueueId queue_id);
139143

144+
void FlushExpiredTasksNow(TaskQueueId queue_id);
145+
140146
private:
141147
class MergedQueuesRunner;
142148

@@ -152,6 +158,14 @@ class MessageLoopTaskQueues
152158

153159
fml::TimePoint GetNextWakeTimeUnlocked(TaskQueueId queue_id) const;
154160

161+
bool OwnsUnlocked(TaskQueueId owner, TaskQueueId subsumed) const;
162+
163+
fml::closure GetNextTaskToRunUnlocked(TaskQueueId queue_id,
164+
fml::TimePoint from_time);
165+
166+
std::vector<fml::closure> GetObserversToNotifyUnlocked(
167+
TaskQueueId queue_id) const;
168+
155169
static std::mutex creation_mutex_;
156170
static fml::RefPtr<MessageLoopTaskQueues> instance_;
157171

fml/raster_thread_merger.cc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,19 @@ RasterThreadMerger::CreateOrShareThreadMerger(
5050
}
5151

5252
void RasterThreadMerger::MergeWithLease(size_t lease_term) {
53-
std::scoped_lock lock(mutex_);
5453
if (TaskQueuesAreSame()) {
5554
return;
5655
}
57-
if (!IsEnabledUnSafe()) {
56+
if (!IsEnabled()) {
5857
return;
5958
}
60-
FML_DCHECK(lease_term > 0) << "lease_term should be positive.";
6159

60+
// Here we flush the raster task runner before thead merging to make
61+
// sure these sync tasks the platform thread post has been executed.
62+
shared_merger_->FlushSubsumedTaskQueueNow();
63+
64+
std::scoped_lock lock(mutex_);
65+
FML_DCHECK(lease_term > 0) << "lease_term should be positive.";
6266
if (IsMergedUnSafe()) {
6367
merged_condition_.notify_one();
6468
return;

fml/raster_thread_merger_unittests.cc

Lines changed: 81 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -413,14 +413,15 @@ TEST(RasterThreadMerger, RunExpiredTasksWhileFirstTaskMergesThreads) {
413413
raster_thread_merger->MergeWithLease(1);
414414
post_merge.CountDown();
415415
});
416-
417-
loop_raster->GetTaskRunner()->PostTask([&]() {
418-
ASSERT_TRUE(raster_thread_merger->IsOnRasterizingThread());
419-
ASSERT_TRUE(raster_thread_merger->IsOnPlatformThread());
420-
ASSERT_EQ(fml::MessageLoop::GetCurrentTaskQueueId(), qid_platform);
421-
raster_thread_merger->DecrementLease();
422-
post_merge.CountDown();
423-
});
416+
loop_raster->GetTaskRunner()->PostDelayedTask(
417+
[&]() {
418+
ASSERT_TRUE(raster_thread_merger->IsOnRasterizingThread());
419+
ASSERT_TRUE(raster_thread_merger->IsOnPlatformThread());
420+
ASSERT_EQ(fml::MessageLoop::GetCurrentTaskQueueId(), qid_platform);
421+
raster_thread_merger->DecrementLease();
422+
post_merge.CountDown();
423+
},
424+
fml::TimeDelta::FromMilliseconds(100));
424425

425426
loop_raster->RunExpiredTasksNow();
426427
post_merge.Wait();
@@ -657,5 +658,77 @@ TEST(RasterThreadMerger,
657658
ASSERT_FALSE(merger_from_3_to_1->IsMerged());
658659
}
659660

661+
TEST(RasterThreadMerger, FlushAllExpiredTasksBeforeThreadMerging) {
662+
fml::MessageLoop* loop_platform = nullptr;
663+
fml::AutoResetWaitableEvent latch1;
664+
std::thread thread_platform([&loop_platform, &latch1]() {
665+
fml::MessageLoop::EnsureInitializedForCurrentThread();
666+
loop_platform = &fml::MessageLoop::GetCurrent();
667+
loop_platform->GetTaskRunner()->PostTask([&]() { latch1.Signal(); });
668+
loop_platform->Run();
669+
});
670+
671+
fml::MessageLoop* loop_raster = nullptr;
672+
fml::AutoResetWaitableEvent latch2;
673+
std::thread thread_raster([&loop_raster, &loop_platform, &latch1, &latch2]() {
674+
latch1.Wait();
675+
676+
fml::MessageLoop::EnsureInitializedForCurrentThread();
677+
loop_raster = &fml::MessageLoop::GetCurrent();
678+
fml::TaskQueueId qid_platform =
679+
loop_platform->GetTaskRunner()->GetTaskQueueId();
680+
fml::TaskQueueId qid_raster =
681+
loop_raster->GetTaskRunner()->GetTaskQueueId();
682+
fml::CountDownLatch post_merge(4);
683+
const auto raster_thread_merger =
684+
fml::MakeRefCounted<fml::RasterThreadMerger>(qid_platform, qid_raster);
685+
686+
int order = 0;
687+
loop_raster->GetTaskRunner()->PostTask([&]() {
688+
ASSERT_TRUE(raster_thread_merger->IsOnRasterizingThread());
689+
ASSERT_FALSE(raster_thread_merger->IsOnPlatformThread());
690+
ASSERT_EQ(fml::MessageLoop::GetCurrentTaskQueueId(), qid_raster);
691+
raster_thread_merger->MergeWithLease(1);
692+
ASSERT_EQ(3, ++order);
693+
post_merge.CountDown();
694+
});
695+
696+
loop_raster->GetTaskRunner()->PostTask([&]() {
697+
ASSERT_TRUE(raster_thread_merger->IsOnRasterizingThread());
698+
ASSERT_FALSE(raster_thread_merger->IsOnPlatformThread());
699+
ASSERT_EQ(1, ++order);
700+
post_merge.CountDown();
701+
});
702+
703+
loop_raster->GetTaskRunner()->PostTask([&]() {
704+
ASSERT_TRUE(raster_thread_merger->IsOnRasterizingThread());
705+
ASSERT_FALSE(raster_thread_merger->IsOnPlatformThread());
706+
ASSERT_EQ(2, ++order);
707+
post_merge.CountDown();
708+
});
709+
710+
loop_raster->GetTaskRunner()->PostDelayedTask(
711+
[&]() {
712+
ASSERT_TRUE(raster_thread_merger->IsOnRasterizingThread());
713+
ASSERT_TRUE(raster_thread_merger->IsOnPlatformThread());
714+
ASSERT_EQ(fml::MessageLoop::GetCurrentTaskQueueId(), qid_platform);
715+
ASSERT_EQ(4, ++order);
716+
raster_thread_merger->DecrementLease();
717+
post_merge.CountDown();
718+
},
719+
fml::TimeDelta::FromMilliseconds(100));
720+
721+
loop_raster->RunExpiredTasksNow();
722+
post_merge.Wait();
723+
latch2.Signal();
724+
});
725+
726+
latch2.Wait();
727+
loop_platform->GetTaskRunner()->PostTask(
728+
[&]() { loop_platform->Terminate(); });
729+
730+
thread_platform.join();
731+
thread_raster.join();
732+
}
660733
} // namespace testing
661734
} // namespace fml

fml/shared_thread_merger.cc

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,14 @@ bool SharedThreadMerger::MergeWithLease(RasterThreadMergerId caller,
3131
return success;
3232
}
3333

34+
void SharedThreadMerger::FlushSubsumedTaskQueueNow() {
35+
std::scoped_lock lock(mutex_);
36+
if (IsMergedUnSafe()) {
37+
return;
38+
}
39+
task_queues_->FlushExpiredTasksNow(subsumed_);
40+
}
41+
3442
bool SharedThreadMerger::UnMergeNowUnSafe() {
3543
FML_CHECK(IsAllLeaseTermsZeroUnSafe())
3644
<< "all lease term records must be zero before calling "

fml/shared_thread_merger.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,8 @@ class SharedThreadMerger
5454
// See the doc of |RasterThreadMerger::DecrementLease|.
5555
bool DecrementLease(RasterThreadMergerId caller);
5656

57+
void FlushSubsumedTaskQueueNow();
58+
5759
private:
5860
fml::TaskQueueId owner_;
5961
fml::TaskQueueId subsumed_;

fml/task_runner.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,4 +62,9 @@ void TaskRunner::RunNowOrPostTask(fml::RefPtr<fml::TaskRunner> runner,
6262
}
6363
}
6464

65+
void TaskRunner::RunNowOrPostSyncTask(fml::RefPtr<fml::TaskRunner> runner,
66+
const fml::closure& task) {
67+
std::scoped_lock lock(gThreadMergingLock);
68+
RunNowOrPostTask(runner, task);
69+
}
6570
} // namespace fml

fml/task_runner.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,11 @@ class TaskRunner : public fml::RefCountedThreadSafe<TaskRunner>,
6262
static void RunNowOrPostTask(fml::RefPtr<fml::TaskRunner> runner,
6363
const fml::closure& task);
6464

65+
/// Similar to |RunNowOrPostTask|, but it needs to synchronize with thread
66+
/// merging operation.
67+
static void RunNowOrPostSyncTask(fml::RefPtr<fml::TaskRunner> runner,
68+
const fml::closure& task);
69+
6570
protected:
6671
explicit TaskRunner(fml::RefPtr<MessageLoopImpl> loop);
6772

0 commit comments

Comments
 (0)