Skip to content

Commit 8667223

Browse files
mkustermanncommit-bot@chromium.org
authored and
commit-bot@chromium.org
committed
[vm/concurrency] Have custom weak table per isolate used for snapshot writing
Currently the snapshotter will just reset the weak tables once it's done. Once we have a shared heap this would not work anymore since the resetting of the weak tables can happen while another isolate is in the middle of snapshotting. We therefore make each isolate have it's own weak table used for snapshotting messages. The memory for the weak tables is managed by a std::unqiue_ptr in the isolate. Issue #36097 Change-Id: Ic0f4c4a96b6e66606be9e004259d2fee995f7099 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/114858 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
1 parent 402c310 commit 8667223

File tree

6 files changed

+96
-19
lines changed

6 files changed

+96
-19
lines changed

runtime/vm/heap/heap.cc

+20-7
Original file line numberDiff line numberDiff line change
@@ -877,26 +877,39 @@ void Heap::SetWeakEntry(RawObject* raw_obj, WeakSelector sel, intptr_t val) {
877877

878878
void Heap::ForwardWeakEntries(RawObject* before_object,
879879
RawObject* after_object) {
880+
const auto before_space =
881+
before_object->IsNewObject() ? Heap::kNew : Heap::kOld;
882+
const auto after_space =
883+
after_object->IsNewObject() ? Heap::kNew : Heap::kOld;
884+
880885
for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
881-
WeakTable* before_table =
882-
GetWeakTable(before_object->IsNewObject() ? Heap::kNew : Heap::kOld,
883-
static_cast<Heap::WeakSelector>(sel));
886+
const auto selector = static_cast<Heap::WeakSelector>(sel);
887+
auto before_table = GetWeakTable(before_space, selector);
884888
intptr_t entry = before_table->RemoveValue(before_object);
885889
if (entry != 0) {
886-
WeakTable* after_table =
887-
GetWeakTable(after_object->IsNewObject() ? Heap::kNew : Heap::kOld,
888-
static_cast<Heap::WeakSelector>(sel));
890+
auto after_table = GetWeakTable(after_space, selector);
889891
after_table->SetValue(after_object, entry);
890892
}
891893
}
894+
895+
// We only come here during hot reload, in which case we assume that none of
896+
// the isolates is in the middle of sending messages.
897+
RELEASE_ASSERT(isolate()->forward_table_new() == nullptr);
898+
RELEASE_ASSERT(isolate()->forward_table_old() == nullptr);
892899
}
893900

894901
void Heap::ForwardWeakTables(ObjectPointerVisitor* visitor) {
902+
// NOTE: This method is only used by the compactor, so there is no need to
903+
// process the `Heap::kNew` tables.
895904
for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
896905
WeakSelector selector = static_cast<Heap::WeakSelector>(sel);
897-
GetWeakTable(Heap::kNew, selector)->Forward(visitor);
898906
GetWeakTable(Heap::kOld, selector)->Forward(visitor);
899907
}
908+
909+
// Isolates might have forwarding tables (used for during snapshoting in
910+
// isolate communication).
911+
auto table_old = isolate()->forward_table_old();
912+
if (table_old != nullptr) table_old->Forward(visitor);
900913
}
901914

902915
#ifndef PRODUCT

runtime/vm/heap/scavenger.cc

+28-8
Original file line numberDiff line numberDiff line change
@@ -847,12 +847,8 @@ uword Scavenger::ProcessWeakProperty(RawWeakProperty* raw_weak,
847847
}
848848

849849
void Scavenger::ProcessWeakReferences() {
850-
// Rehash the weak tables now that we know which objects survive this cycle.
851-
for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
852-
WeakTable* table =
853-
heap_->GetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel));
854-
heap_->SetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel),
855-
WeakTable::NewFrom(table));
850+
auto rehash_weak_table = [](WeakTable* table, WeakTable* replacement_new,
851+
WeakTable* replacement_old) {
856852
intptr_t size = table->size();
857853
for (intptr_t i = 0; i < size; i++) {
858854
if (table->IsValidEntryAt(i)) {
@@ -864,16 +860,40 @@ void Scavenger::ProcessWeakReferences() {
864860
// The object has survived. Preserve its record.
865861
uword new_addr = ForwardedAddr(header);
866862
raw_obj = RawObject::FromAddr(new_addr);
867-
heap_->SetWeakEntry(raw_obj, static_cast<Heap::WeakSelector>(sel),
868-
table->ValueAt(i));
863+
auto replacement =
864+
raw_obj->IsNewObject() ? replacement_new : replacement_old;
865+
replacement->SetValue(raw_obj, table->ValueAt(i));
869866
}
870867
}
871868
}
869+
};
870+
871+
// Rehash the weak tables now that we know which objects survive this cycle.
872+
for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
873+
const auto selector = static_cast<Heap::WeakSelector>(sel);
874+
auto table = heap_->GetWeakTable(Heap::kNew, selector);
875+
auto table_old = heap_->GetWeakTable(Heap::kOld, selector);
876+
877+
// Create a new weak table for the new-space.
878+
auto table_new = WeakTable::NewFrom(table);
879+
rehash_weak_table(table, table_new, table_old);
880+
heap_->SetWeakTable(Heap::kNew, selector, table_new);
881+
872882
// Remove the old table as it has been replaced with the newly allocated
873883
// table above.
874884
delete table;
875885
}
876886

887+
// Each isolate might have a weak table used for fast snapshot writing (i.e.
888+
// isolate communication). Rehash those tables if need be.
889+
auto isolate = heap_->isolate();
890+
auto table = isolate->forward_table_new();
891+
if (table != NULL) {
892+
auto replacement = WeakTable::NewFrom(table);
893+
rehash_weak_table(table, replacement, isolate->forward_table_old());
894+
isolate->set_forward_table_new(replacement);
895+
}
896+
877897
// The queued weak properties at this point do not refer to reachable keys,
878898
// so we clear their key and value fields.
879899
{

runtime/vm/isolate.cc

+9
Original file line numberDiff line numberDiff line change
@@ -2009,6 +2009,15 @@ void Isolate::MaybeIncreaseReloadEveryNStackOverflowChecks() {
20092009
}
20102010
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
20112011

2012+
void Isolate::set_forward_table_new(WeakTable* table) {
2013+
std::unique_ptr<WeakTable> value(table);
2014+
forward_table_new_ = std::move(value);
2015+
}
2016+
void Isolate::set_forward_table_old(WeakTable* table) {
2017+
std::unique_ptr<WeakTable> value(table);
2018+
forward_table_old_ = std::move(value);
2019+
}
2020+
20122021
void Isolate::Shutdown() {
20132022
ASSERT(this == Isolate::Current());
20142023
BackgroundCompiler::Stop(this);

runtime/vm/isolate.h

+13
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ class StoreBuffer;
9090
class StubCode;
9191
class ThreadRegistry;
9292
class UserTag;
93+
class WeakTable;
9394

9495
class PendingLazyDeopt {
9596
public:
@@ -945,6 +946,14 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
945946

946947
void MaybeIncreaseReloadEveryNStackOverflowChecks();
947948

949+
// The weak table used in the snapshot writer for the purpose of fast message
950+
// sending.
951+
WeakTable* forward_table_new() { return forward_table_new_.get(); }
952+
void set_forward_table_new(WeakTable* table);
953+
954+
WeakTable* forward_table_old() { return forward_table_old_.get(); }
955+
void set_forward_table_old(WeakTable* table);
956+
948957
static void NotifyLowMemory();
949958

950959
private:
@@ -1176,6 +1185,10 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
11761185

11771186
ReversePcLookupCache* reverse_pc_lookup_cache_ = nullptr;
11781187

1188+
// Used during message sending of messages between isolates.
1189+
std::unique_ptr<WeakTable> forward_table_new_;
1190+
std::unique_ptr<WeakTable> forward_table_old_;
1191+
11791192
static Dart_IsolateGroupCreateCallback create_group_callback_;
11801193
static Dart_InitializeIsolateCallback initialize_callback_;
11811194
static Dart_IsolateShutdownCallback shutdown_callback_;

runtime/vm/snapshot.cc

+22-3
Original file line numberDiff line numberDiff line change
@@ -959,10 +959,13 @@ ForwardList::ForwardList(Thread* thread, intptr_t first_object_id)
959959
nodes_(),
960960
first_unprocessed_object_id_(first_object_id) {
961961
ASSERT(first_object_id > 0);
962+
isolate()->set_forward_table_new(new WeakTable());
963+
isolate()->set_forward_table_old(new WeakTable());
962964
}
963965

964966
ForwardList::~ForwardList() {
965-
heap()->ResetObjectIdTable();
967+
isolate()->set_forward_table_new(nullptr);
968+
isolate()->set_forward_table_old(nullptr);
966969
}
967970

968971
intptr_t ForwardList::AddObject(Zone* zone,
@@ -976,17 +979,33 @@ intptr_t ForwardList::AddObject(Zone* zone,
976979
ASSERT(node != NULL);
977980
nodes_.Add(node);
978981
ASSERT(object_id != 0);
979-
heap()->SetObjectId(raw, object_id);
982+
SetObjectId(raw, object_id);
980983
return object_id;
981984
}
982985

983986
intptr_t ForwardList::FindObject(RawObject* raw) {
984987
NoSafepointScope no_safepoint;
985-
intptr_t id = heap()->GetObjectId(raw);
988+
intptr_t id = GetObjectId(raw);
986989
ASSERT(id == 0 || NodeForObjectId(id)->obj()->raw() == raw);
987990
return (id == 0) ? static_cast<intptr_t>(kInvalidIndex) : id;
988991
}
989992

993+
void ForwardList::SetObjectId(RawObject* object, intptr_t id) {
994+
if (object->IsNewObject()) {
995+
isolate()->forward_table_new()->SetValue(object, id);
996+
} else {
997+
isolate()->forward_table_old()->SetValue(object, id);
998+
}
999+
}
1000+
1001+
intptr_t ForwardList::GetObjectId(RawObject* object) {
1002+
if (object->IsNewObject()) {
1003+
return isolate()->forward_table_new()->GetValue(object);
1004+
} else {
1005+
return isolate()->forward_table_old()->GetValue(object);
1006+
}
1007+
}
1008+
9901009
bool SnapshotWriter::CheckAndWritePredefinedObject(RawObject* rawobj) {
9911010
// Check if object can be written in one of the following ways:
9921011
// - Smi: the Smi value is written as is (last bit is not tagged).

runtime/vm/snapshot.h

+4-1
Original file line numberDiff line numberDiff line change
@@ -605,13 +605,16 @@ class ForwardList {
605605
private:
606606
intptr_t first_object_id() const { return first_object_id_; }
607607
intptr_t next_object_id() const { return nodes_.length() + first_object_id_; }
608-
Heap* heap() const { return thread_->isolate()->heap(); }
608+
Isolate* isolate() const { return thread_->isolate(); }
609609

610610
Thread* thread_;
611611
const intptr_t first_object_id_;
612612
GrowableArray<Node*> nodes_;
613613
intptr_t first_unprocessed_object_id_;
614614

615+
void SetObjectId(RawObject* object, intptr_t id);
616+
intptr_t GetObjectId(RawObject* object);
617+
615618
DISALLOW_COPY_AND_ASSIGN(ForwardList);
616619
};
617620

0 commit comments

Comments
 (0)