From c580c67ac21c5d8580b7d03284758cfce136fd98 Mon Sep 17 00:00:00 2001 From: "Aleksei.Glushko" Date: Mon, 29 Jul 2024 12:48:49 +0000 Subject: [PATCH] [K/N] Remember StableRefs, released during RC colleciton (KT-70159) Merge-request: KT-MR-17260 Merged-by: Alexey Glushko --- .../runtime/src/gc/cms/cpp/Barriers.cpp | 16 ++++-- .../runtime/src/gc/cms/cpp/Barriers.hpp | 4 +- .../gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp | 39 ++++++++++++- .../runtime/src/gc/cms/cpp/GCImpl.cpp | 6 +- .../runtime/src/gc/common/cpp/GC.hpp | 4 +- .../src/gc/common/cpp/MarkAndSweepUtils.hpp | 6 +- .../runtime/src/gc/noop/cpp/GCImpl.cpp | 4 +- .../runtime/src/gc/pmcs/cpp/Barriers.cpp | 4 +- .../runtime/src/gc/pmcs/cpp/Barriers.hpp | 2 +- .../runtime/src/gc/pmcs/cpp/GCImpl.cpp | 4 +- .../runtime/src/gc/stms/cpp/GCImpl.cpp | 4 +- .../runtime/src/main/cpp/ReferenceOps.hpp | 2 +- .../runtime/src/mm/cpp/ReferenceOps.cpp | 4 +- .../runtime/src/mm/cpp/SpecialRefRegistry.cpp | 2 +- .../runtime/src/mm/cpp/SpecialRefRegistry.hpp | 55 ++++++++++++------- .../src/mm/cpp/SpecialRefRegistryTest.cpp | 2 +- 16 files changed, 107 insertions(+), 51 deletions(-) diff --git a/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp index 656b980771044..1a89280c35911 100644 --- a/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp +++ b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp @@ -126,8 +126,14 @@ void gc::barriers::disableBarriers() noexcept { namespace { // TODO decide whether it's really beneficial to NO_INLINE the slow path -NO_INLINE void beforeHeapRefUpdateSlowPath(mm::DirectRefAccessor ref, ObjHeader* value) noexcept { - auto prev = ref.load(); +NO_INLINE void beforeHeapRefUpdateSlowPath(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept { + ObjHeader* prev; + if (loadAtomic) { + prev = ref.loadAtomic(std::memory_order_relaxed); + } else { + prev = ref.load(); + } + if (prev != nullptr && prev->heap()) { // TODO Redundant if the destination object is black. // Yet at the moment there is now efficient way to distinguish black and gray objects. @@ -143,11 +149,11 @@ NO_INLINE void beforeHeapRefUpdateSlowPath(mm::DirectRefAccessor ref, ObjHeader* } // namespace -ALWAYS_INLINE void gc::barriers::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept { +ALWAYS_INLINE void gc::barriers::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept { auto phase = currentPhase(); BarriersLogDebug(phase, "Write *%p <- %p (%p overwritten)", ref.location(), value, ref.load()); if (__builtin_expect(phase == BarriersPhase::kMarkClosure, false)) { - beforeHeapRefUpdateSlowPath(ref, value); + beforeHeapRefUpdateSlowPath(ref, value, loadAtomic); } } @@ -175,7 +181,7 @@ NO_INLINE ObjHeader* weakRefReadInWeakSweepSlowPath(ObjHeader* weakReferee) noex } // namespace -ALWAYS_INLINE ObjHeader* gc::barriers::weakRefReadBarrier(std::atomic& weakReferee) noexcept { +ALWAYS_INLINE ObjHeader* gc::barriers::weakRefReadBarrier(std_support::atomic_ref weakReferee) noexcept { if (__builtin_expect(currentPhase() != BarriersPhase::kDisabled, false)) { // Mark dispatcher requires weak reads be protected by the following: auto weakReadProtector = markDispatcher().weakReadProtector(); diff --git a/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp index 00e2e0eb54da2..957fbf74055ef 100644 --- a/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp +++ b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp @@ -34,8 +34,8 @@ void enableBarriers(int64_t epoch) noexcept; void switchToWeakProcessingBarriers() noexcept; void disableBarriers() noexcept; -void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept; +void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept; -ObjHeader* weakRefReadBarrier(std::atomic& weakReferee) noexcept; +ObjHeader* weakRefReadBarrier(std_support::atomic_ref weakReferee) noexcept; } // namespace kotlin::gc::barriers diff --git a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp index 07dea9a52a5fe..f353ec54c6577 100644 --- a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp +++ b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp @@ -6,6 +6,7 @@ #include "ConcurrentMarkAndSweep.hpp" #include +#include #include "gtest/gtest.h" @@ -110,5 +111,41 @@ TYPED_TEST_P(TracingGCTest, WeakResurrectionAtMarkTermination) { } } -REGISTER_TYPED_TEST_SUITE_WITH_LISTS(TracingGCTest, TRACING_GC_TEST_LIST, WeakResurrectionAtMarkTermination); +TYPED_TEST_P(TracingGCTest, ReleaseStableRefDuringRSCollection) { + std::vector mutators(kDefaultThreadCount); + + std::atomic readyThreads = 0; + std::atomic gcDone = false; + + std::vector> mutatorFutures; + for (int i = 0; i < kDefaultThreadCount; ++i) { + mutatorFutures.push_back(mutators[i].Execute([&](mm::ThreadData& threadData, Mutator& mutator) { + auto& obj = AllocateObject(threadData); + auto stableRef = mm::StableRef::create(obj.header()); + + ++readyThreads; + while (!mm::IsThreadSuspensionRequested()) {} + + mm::safePoint(); + + mutator.AddStackRoot(obj.header()); + std::move(stableRef).dispose(); + + while (!gcDone) { mm::safePoint(); } + + EXPECT_THAT(mutator.Alive(), testing::Contains(obj.header())); + })); + } + + while (readyThreads < kDefaultThreadCount) {} + + mm::GlobalData::Instance().gcScheduler().scheduleAndWaitFinalized(); + gcDone = true; + + for (auto& future : mutatorFutures) { + future.wait(); + } +} + +REGISTER_TYPED_TEST_SUITE_WITH_LISTS(TracingGCTest, TRACING_GC_TEST_LIST, WeakResurrectionAtMarkTermination, ReleaseStableRefDuringRSCollection); INSTANTIATE_TYPED_TEST_SUITE_P(CMS, TracingGCTest, ConcurrentMarkAndSweepTest); diff --git a/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp index 42dbd634f8b78..08eca683bde94 100644 --- a/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp +++ b/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp @@ -87,11 +87,11 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept { return impl_->gc().mainThreadFinalizerProcessor().available(); } -ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept { - barriers::beforeHeapRefUpdate(ref, value); +ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept { + barriers::beforeHeapRefUpdate(ref, value, loadAtomic); } -ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic& weakReferee) noexcept { +ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept { RETURN_OBJ(gc::barriers::weakRefReadBarrier(weakReferee)); } diff --git a/kotlin-native/runtime/src/gc/common/cpp/GC.hpp b/kotlin-native/runtime/src/gc/common/cpp/GC.hpp index bce4ac22e2740..9bb51c2d339e4 100644 --- a/kotlin-native/runtime/src/gc/common/cpp/GC.hpp +++ b/kotlin-native/runtime/src/gc/common/cpp/GC.hpp @@ -87,8 +87,8 @@ class GC : private Pinned { std::unique_ptr impl_; }; -void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept; -OBJ_GETTER(weakRefReadBarrier, std::atomic& weakReferee) noexcept; +void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept; +OBJ_GETTER(weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept; bool isMarked(ObjHeader* object) noexcept; diff --git a/kotlin-native/runtime/src/gc/common/cpp/MarkAndSweepUtils.hpp b/kotlin-native/runtime/src/gc/common/cpp/MarkAndSweepUtils.hpp index d1b06577d637f..ac447cd884fdb 100644 --- a/kotlin-native/runtime/src/gc/common/cpp/MarkAndSweepUtils.hpp +++ b/kotlin-native/runtime/src/gc/common/cpp/MarkAndSweepUtils.hpp @@ -162,8 +162,8 @@ void collectRootSet(GCHandle handle, typename Traits::MarkQueue& markQueue, F&& template void processWeaks(GCHandle gcHandle, mm::SpecialRefRegistry& registry) noexcept { auto handle = gcHandle.processWeaks(); - for (auto& object : registry.lockForIter()) { - auto* obj = object.load(std::memory_order_relaxed); + for (auto objRef : registry.lockForIter()) { + auto* obj = objRef.load(std::memory_order_relaxed); if (!obj) { // We already processed it at some point. handle.addUndisposed(); @@ -176,7 +176,7 @@ void processWeaks(GCHandle gcHandle, mm::SpecialRefRegistry& registry) noexcept continue; } // Object is not alive. Clear it out. - object.store(nullptr, std::memory_order_relaxed); + objRef.store(nullptr, std::memory_order_relaxed); handle.addNulled(); } } diff --git a/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp index 929e34a2ce372..1f711195b20d2 100644 --- a/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp +++ b/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp @@ -62,9 +62,9 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept { return false; } -ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {} +ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {} -ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic& weakReferee) noexcept { +ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept { RETURN_OBJ(weakReferee.load(std::memory_order_relaxed)); } diff --git a/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.cpp b/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.cpp index 104efb3aebdf3..60de50ee2c7c6 100644 --- a/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.cpp +++ b/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.cpp @@ -30,7 +30,7 @@ ObjHeader* weakRefBarrierImpl(ObjHeader* weakReferee) noexcept { return weakReferee; } -NO_INLINE ObjHeader* weakRefReadSlowPath(std::atomic& weakReferee) noexcept { +NO_INLINE ObjHeader* weakRefReadSlowPath(std_support::atomic_ref weakReferee) noexcept { // reread an action to avoid register pollution outside the function auto barrier = weakRefBarrier.load(std::memory_order_seq_cst); @@ -107,7 +107,7 @@ void gc::DisableWeakRefBarriers() noexcept { } } -OBJ_GETTER(gc::WeakRefRead, std::atomic& weakReferee) noexcept { +OBJ_GETTER(gc::WeakRefRead, std_support::atomic_ref weakReferee) noexcept { if (!compiler::concurrentWeakSweep()) { RETURN_OBJ(weakReferee.load(std::memory_order_relaxed)); } diff --git a/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.hpp b/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.hpp index 95673756bf0df..51840869c75be 100644 --- a/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.hpp +++ b/kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.hpp @@ -32,6 +32,6 @@ class BarriersThreadData : private Pinned { void EnableWeakRefBarriers(int64_t epoch) noexcept; void DisableWeakRefBarriers() noexcept; -OBJ_GETTER(WeakRefRead, std::atomic& weakReferee) noexcept; +OBJ_GETTER(WeakRefRead, std_support::atomic_ref weakReferee) noexcept; } // namespace kotlin::gc diff --git a/kotlin-native/runtime/src/gc/pmcs/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/pmcs/cpp/GCImpl.cpp index 2b83d0fdd61f1..a8e468d394e53 100644 --- a/kotlin-native/runtime/src/gc/pmcs/cpp/GCImpl.cpp +++ b/kotlin-native/runtime/src/gc/pmcs/cpp/GCImpl.cpp @@ -87,9 +87,9 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept { return impl_->gc().mainThreadFinalizerProcessor().available(); } -ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {} +ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {} -ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic& weakReferee) noexcept { +ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept { RETURN_RESULT_OF(gc::WeakRefRead, weakReferee); } diff --git a/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp index 11ca585359805..7964d654b9eeb 100644 --- a/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp +++ b/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp @@ -80,9 +80,9 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept { return impl_->gc().mainThreadFinalizerProcessor().available(); } -ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {} +ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {} -ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic& weakReferee) noexcept { +ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept { RETURN_OBJ(weakReferee.load(std::memory_order_relaxed)); } diff --git a/kotlin-native/runtime/src/main/cpp/ReferenceOps.hpp b/kotlin-native/runtime/src/main/cpp/ReferenceOps.hpp index d908e25ab7614..d1452784c43c6 100644 --- a/kotlin-native/runtime/src/main/cpp/ReferenceOps.hpp +++ b/kotlin-native/runtime/src/main/cpp/ReferenceOps.hpp @@ -210,6 +210,6 @@ class RefField : private Pinned { ObjHeader* value_ = nullptr; }; -OBJ_GETTER(weakRefReadBarrier, std::atomic& weakReferee) noexcept; +OBJ_GETTER(weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept; } diff --git a/kotlin-native/runtime/src/mm/cpp/ReferenceOps.cpp b/kotlin-native/runtime/src/mm/cpp/ReferenceOps.cpp index 43300a12dece9..988fe9d3e3a04 100644 --- a/kotlin-native/runtime/src/mm/cpp/ReferenceOps.cpp +++ b/kotlin-native/runtime/src/mm/cpp/ReferenceOps.cpp @@ -17,12 +17,12 @@ template<> ALWAYS_INLINE void mm::RefAccessor::afterLoad() noexcept {} // on heap template<> ALWAYS_INLINE void mm::RefAccessor::beforeStore(ObjHeader* value) noexcept { - gc::beforeHeapRefUpdate(direct(), value); + gc::beforeHeapRefUpdate(direct(), value, false); } template<> ALWAYS_INLINE void mm::RefAccessor::afterStore(ObjHeader*) noexcept {} template<> ALWAYS_INLINE void mm::RefAccessor::beforeLoad() noexcept {} template<> ALWAYS_INLINE void mm::RefAccessor::afterLoad() noexcept {} -ALWAYS_INLINE OBJ_GETTER(mm::weakRefReadBarrier, std::atomic& weakReferee) noexcept { +ALWAYS_INLINE OBJ_GETTER(mm::weakRefReadBarrier, std_support::atomic_ref weakReferee) noexcept { RETURN_RESULT_OF(gc::weakRefReadBarrier, weakReferee); } diff --git a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.cpp b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.cpp index 3296b902e0529..e29a2269d2f67 100644 --- a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.cpp +++ b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.cpp @@ -56,7 +56,7 @@ mm::SpecialRefRegistry::Node* mm::SpecialRefRegistry::nextRoot(Node* current) no auto [candidatePrev, candidateNext] = eraseFromRoots(current, candidate); // We removed candidate. But should we have? if (candidate->rc_.load(std::memory_order_relaxed) > 0) { - RuntimeAssert(candidate->obj_.load(std::memory_order_relaxed) != nullptr, "candidate cannot have a null obj_"); + RuntimeAssert(candidate->objAtomic().load(std::memory_order_relaxed) != nullptr, "candidate cannot have a null obj_"); // Ooops. Let's put it back. Okay to put into the head. insertIntoRootsHead(*candidate); } diff --git a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp index 05320d6b3b337..983efee1208ab 100644 --- a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp +++ b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp @@ -88,7 +88,7 @@ class SpecialRefRegistry : private Pinned { auto rc = rc_.exchange(disposedMarker, std::memory_order_release); if (compiler::runtimeAssertsEnabled()) { if (rc > 0) { - auto* obj = obj_.load(std::memory_order_relaxed); + auto* obj = objAtomic().load(std::memory_order_relaxed); // In objc export if ObjCClass extends from KtClass // doing retain+autorelease inside [ObjCClass dealloc] will cause // this->dispose() be called after this->retain() but before @@ -107,19 +107,19 @@ class SpecialRefRegistry : private Pinned { auto rc = rc_.load(std::memory_order_relaxed); RuntimeAssert(rc >= 0, "Dereferencing StableRef@%p with rc %d", this, rc); } - return obj_.load(std::memory_order_relaxed); + return objAtomic().load(std::memory_order_relaxed); } OBJ_GETTER0(tryRef) noexcept { AssertThreadState(ThreadState::kRunnable); - RETURN_RESULT_OF(mm::weakRefReadBarrier, obj_); + RETURN_RESULT_OF(mm::weakRefReadBarrier, objAtomic()); } void retainRef() noexcept { auto rc = rc_.fetch_add(1, std::memory_order_relaxed); RuntimeAssert(rc >= 0, "Retaining StableRef@%p with rc %d", this, rc); if (rc == 0) { - if (!obj_.load(std::memory_order_relaxed)) { + if (!objAtomic().load(std::memory_order_relaxed)) { // In objc export if ObjCClass extends from KtClass // calling retain inside [ObjCClass dealloc] will cause // node.retainRef() be called after node.obj_ was cleared but @@ -129,18 +129,22 @@ class SpecialRefRegistry : private Pinned { return; } - // TODO: With CMS root-set-write barrier for marking `obj_` should be here. - // Until we have the barrier, the object must already be in the roots. - // If 0->1 happened from `[ObjCClass _tryRetain]`, it would first hold the object - // on the stack via `tryRef`. - // If 0->1 happened during construction: - // * First of all, currently it's impossible because the `Node` is created with rc=1 and not inserted - // into the roots list until publishing. - // * Even if the above changes, for the construction, the object must have been passed in from somewhere, - // so it must be reachable anyway. - // If 0->1 happened because an object is passing through the interop border for the second time (or more) - // (e.g. accessing a non-permanent global a couple of times). Follows the construction case above: - // "the object must have been passed in from somewhere, so it must be reachable anyway". + // With the current CMS implementation no barrier is required here. + // The CMS builds Snapshot-at-the-beginning mark closure, + // which means, it has to remember only the concurrent deletion of references, not creation. + // TODO: A write-into-root-set barrier might be required here for other concurrent mark strategies. + + // In case of non-concurrent root set scanning, it is only required for the object to already be in roots. + // If 0->1 happened from `[ObjCClass _tryRetain]`, it would first hold the object + // on the stack via `tryRef`. + // If 0->1 happened during construction: + // * First of all, currently it's impossible because the `Node` is created with rc=1 and not inserted + // into the roots list until publishing. + // * Even if the above changes, for the construction, the object must have been passed in from somewhere, + // so it must be reachable anyway. + // If 0->1 happened because an object is passing through the interop border for the second time (or more) + // (e.g. accessing a non-permanent global a couple of times). Follows the construction case above: + // "the object must have been passed in from somewhere, so it must be reachable anyway". // 0->1 changes require putting this node into the root set. SpecialRefRegistry::instance().insertIntoRootsHead(*this); @@ -148,8 +152,14 @@ class SpecialRefRegistry : private Pinned { } void releaseRef() noexcept { - auto rc = rc_.fetch_sub(1, std::memory_order_relaxed); - RuntimeAssert(rc > 0, "Releasing StableRef@%p with rc %d", this, rc); + auto rcBefore = rc_.fetch_sub(1, std::memory_order_relaxed); + RuntimeAssert(rcBefore > 0, "Releasing StableRef@%p with rc %d", this, rcBefore); + if (rcBefore == 1) { + // It's potentially a removal from global root set. + // The CMS GC scans global root set concurrently. + // Notify GC about the removal. + gc::beforeHeapRefUpdate(mm::DirectRefAccessor(obj_), nullptr, true); + } } RawSpecialRef* asRaw() noexcept { return reinterpret_cast(this); } @@ -169,7 +179,10 @@ class SpecialRefRegistry : private Pinned { // Synchronization between GC and mutators happens via enabling/disabling // the barriers. // TODO: Try to handle it atomically only when the GC is in progress. - std::atomic obj_; + std_support::atomic_ref objAtomic() noexcept { return std_support::atomic_ref{obj_}; } + std_support::atomic_ref objAtomic() const noexcept { return std_support::atomic_ref{obj_}; } + ObjHeader* obj_; + // Only ever updated using relaxed memory ordering. Any synchronization // with nextRoot_ is achieved via acquire-release of nextRoot_. std::atomic rc_; // After dispose() will be disposedMarker. @@ -221,7 +234,7 @@ class SpecialRefRegistry : private Pinned { ObjHeader* operator*() const noexcept { // Ignoring rc here. If someone nulls out rc during root // scanning, it's okay to be conservative and still make it a root. - return node_->obj_.load(std::memory_order_relaxed); + return node_->objAtomic().load(std::memory_order_relaxed); } RootsIterator& operator++() noexcept { @@ -258,7 +271,7 @@ class SpecialRefRegistry : private Pinned { class Iterator { public: - std::atomic& operator*() noexcept { return iterator_->obj_; } + std_support::atomic_ref operator*() noexcept { return iterator_->objAtomic(); } Iterator& operator++() noexcept { iterator_ = owner_->findAliveNode(std::next(iterator_)); diff --git a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistryTest.cpp b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistryTest.cpp index 8f733ef8c6af1..a0fb100a293e1 100644 --- a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistryTest.cpp +++ b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistryTest.cpp @@ -58,7 +58,7 @@ class SpecialRefRegistryTest : public testing::Test { std::vector all(Invalidated&&... invalidated) noexcept { std::set invalidatedSet({std::forward(invalidated)...}); std::vector result; - for (auto& obj : mm::SpecialRefRegistry::instance().lockForIter()) { + for (auto obj : mm::SpecialRefRegistry::instance().lockForIter()) { if (invalidatedSet.find(obj) != invalidatedSet.end()) { obj = nullptr; }