Skip to content

Commit

Permalink
[K/N] Remember StableRefs, released during RC colleciton (KT-70159)
Browse files Browse the repository at this point in the history
Merge-request: KT-MR-17260
Merged-by: Alexey Glushko <[email protected]>
  • Loading branch information
Aleksei.Glushko authored and qodana-bot committed Aug 8, 2024
1 parent 565a35c commit c580c67
Show file tree
Hide file tree
Showing 16 changed files with 107 additions and 51 deletions.
16 changes: 11 additions & 5 deletions kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,14 @@ void gc::barriers::disableBarriers() noexcept {
namespace {

// TODO decide whether it's really beneficial to NO_INLINE the slow path
NO_INLINE void beforeHeapRefUpdateSlowPath(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {
auto prev = ref.load();
NO_INLINE void beforeHeapRefUpdateSlowPath(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {
ObjHeader* prev;
if (loadAtomic) {
prev = ref.loadAtomic(std::memory_order_relaxed);
} else {
prev = ref.load();
}

if (prev != nullptr && prev->heap()) {
// TODO Redundant if the destination object is black.
// Yet at the moment there is now efficient way to distinguish black and gray objects.
Expand All @@ -143,11 +149,11 @@ NO_INLINE void beforeHeapRefUpdateSlowPath(mm::DirectRefAccessor ref, ObjHeader*

} // namespace

ALWAYS_INLINE void gc::barriers::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {
ALWAYS_INLINE void gc::barriers::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {
auto phase = currentPhase();
BarriersLogDebug(phase, "Write *%p <- %p (%p overwritten)", ref.location(), value, ref.load());
if (__builtin_expect(phase == BarriersPhase::kMarkClosure, false)) {
beforeHeapRefUpdateSlowPath(ref, value);
beforeHeapRefUpdateSlowPath(ref, value, loadAtomic);
}
}

Expand Down Expand Up @@ -175,7 +181,7 @@ NO_INLINE ObjHeader* weakRefReadInWeakSweepSlowPath(ObjHeader* weakReferee) noex

} // namespace

ALWAYS_INLINE ObjHeader* gc::barriers::weakRefReadBarrier(std::atomic<ObjHeader*>& weakReferee) noexcept {
ALWAYS_INLINE ObjHeader* gc::barriers::weakRefReadBarrier(std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
if (__builtin_expect(currentPhase() != BarriersPhase::kDisabled, false)) {
// Mark dispatcher requires weak reads be protected by the following:
auto weakReadProtector = markDispatcher().weakReadProtector();
Expand Down
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ void enableBarriers(int64_t epoch) noexcept;
void switchToWeakProcessingBarriers() noexcept;
void disableBarriers() noexcept;

void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept;
void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept;

ObjHeader* weakRefReadBarrier(std::atomic<ObjHeader*>& weakReferee) noexcept;
ObjHeader* weakRefReadBarrier(std_support::atomic_ref<ObjHeader*> weakReferee) noexcept;

} // namespace kotlin::gc::barriers
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "ConcurrentMarkAndSweep.hpp"

#include <mutex>
#include <atomic>

#include "gtest/gtest.h"

Expand Down Expand Up @@ -110,5 +111,41 @@ TYPED_TEST_P(TracingGCTest, WeakResurrectionAtMarkTermination) {
}
}

REGISTER_TYPED_TEST_SUITE_WITH_LISTS(TracingGCTest, TRACING_GC_TEST_LIST, WeakResurrectionAtMarkTermination);
TYPED_TEST_P(TracingGCTest, ReleaseStableRefDuringRSCollection) {
std::vector<Mutator> mutators(kDefaultThreadCount);

std::atomic<size_t> readyThreads = 0;
std::atomic<bool> gcDone = false;

std::vector<std::future<void>> mutatorFutures;
for (int i = 0; i < kDefaultThreadCount; ++i) {
mutatorFutures.push_back(mutators[i].Execute([&](mm::ThreadData& threadData, Mutator& mutator) {
auto& obj = AllocateObject(threadData);
auto stableRef = mm::StableRef::create(obj.header());

++readyThreads;
while (!mm::IsThreadSuspensionRequested()) {}

mm::safePoint();

mutator.AddStackRoot(obj.header());
std::move(stableRef).dispose();

while (!gcDone) { mm::safePoint(); }

EXPECT_THAT(mutator.Alive(), testing::Contains(obj.header()));
}));
}

while (readyThreads < kDefaultThreadCount) {}

mm::GlobalData::Instance().gcScheduler().scheduleAndWaitFinalized();
gcDone = true;

for (auto& future : mutatorFutures) {
future.wait();
}
}

REGISTER_TYPED_TEST_SUITE_WITH_LISTS(TracingGCTest, TRACING_GC_TEST_LIST, WeakResurrectionAtMarkTermination, ReleaseStableRefDuringRSCollection);
INSTANTIATE_TYPED_TEST_SUITE_P(CMS, TracingGCTest, ConcurrentMarkAndSweepTest);
6 changes: 3 additions & 3 deletions kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,11 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept {
return impl_->gc().mainThreadFinalizerProcessor().available();
}

ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {
barriers::beforeHeapRefUpdate(ref, value);
ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {
barriers::beforeHeapRefUpdate(ref, value, loadAtomic);
}

ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept {
ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
RETURN_OBJ(gc::barriers::weakRefReadBarrier(weakReferee));
}

Expand Down
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/gc/common/cpp/GC.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ class GC : private Pinned {
std::unique_ptr<Impl> impl_;
};

void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept;
OBJ_GETTER(weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept;
void beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept;
OBJ_GETTER(weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept;

bool isMarked(ObjHeader* object) noexcept;

Expand Down
6 changes: 3 additions & 3 deletions kotlin-native/runtime/src/gc/common/cpp/MarkAndSweepUtils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,8 @@ void collectRootSet(GCHandle handle, typename Traits::MarkQueue& markQueue, F&&
template <typename Traits>
void processWeaks(GCHandle gcHandle, mm::SpecialRefRegistry& registry) noexcept {
auto handle = gcHandle.processWeaks();
for (auto& object : registry.lockForIter()) {
auto* obj = object.load(std::memory_order_relaxed);
for (auto objRef : registry.lockForIter()) {
auto* obj = objRef.load(std::memory_order_relaxed);
if (!obj) {
// We already processed it at some point.
handle.addUndisposed();
Expand All @@ -176,7 +176,7 @@ void processWeaks(GCHandle gcHandle, mm::SpecialRefRegistry& registry) noexcept
continue;
}
// Object is not alive. Clear it out.
object.store(nullptr, std::memory_order_relaxed);
objRef.store(nullptr, std::memory_order_relaxed);
handle.addNulled();
}
}
Expand Down
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept {
return false;
}

ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {}
ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {}

ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept {
ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
RETURN_OBJ(weakReferee.load(std::memory_order_relaxed));
}

Expand Down
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ ObjHeader* weakRefBarrierImpl(ObjHeader* weakReferee) noexcept {
return weakReferee;
}

NO_INLINE ObjHeader* weakRefReadSlowPath(std::atomic<ObjHeader*>& weakReferee) noexcept {
NO_INLINE ObjHeader* weakRefReadSlowPath(std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
// reread an action to avoid register pollution outside the function
auto barrier = weakRefBarrier.load(std::memory_order_seq_cst);

Expand Down Expand Up @@ -107,7 +107,7 @@ void gc::DisableWeakRefBarriers() noexcept {
}
}

OBJ_GETTER(gc::WeakRefRead, std::atomic<ObjHeader*>& weakReferee) noexcept {
OBJ_GETTER(gc::WeakRefRead, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
if (!compiler::concurrentWeakSweep()) {
RETURN_OBJ(weakReferee.load(std::memory_order_relaxed));
}
Expand Down
2 changes: 1 addition & 1 deletion kotlin-native/runtime/src/gc/pmcs/cpp/Barriers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,6 @@ class BarriersThreadData : private Pinned {
void EnableWeakRefBarriers(int64_t epoch) noexcept;
void DisableWeakRefBarriers() noexcept;

OBJ_GETTER(WeakRefRead, std::atomic<ObjHeader*>& weakReferee) noexcept;
OBJ_GETTER(WeakRefRead, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept;

} // namespace kotlin::gc
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/gc/pmcs/cpp/GCImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept {
return impl_->gc().mainThreadFinalizerProcessor().available();
}

ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {}
ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {}

ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept {
ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
RETURN_RESULT_OF(gc::WeakRefRead, weakReferee);
}

Expand Down
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ bool gc::GC::mainThreadFinalizerProcessorAvailable() noexcept {
return impl_->gc().mainThreadFinalizerProcessor().available();
}

ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value) noexcept {}
ALWAYS_INLINE void gc::beforeHeapRefUpdate(mm::DirectRefAccessor ref, ObjHeader* value, bool loadAtomic) noexcept {}

ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept {
ALWAYS_INLINE OBJ_GETTER(gc::weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
RETURN_OBJ(weakReferee.load(std::memory_order_relaxed));
}

Expand Down
2 changes: 1 addition & 1 deletion kotlin-native/runtime/src/main/cpp/ReferenceOps.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,6 @@ class RefField : private Pinned {
ObjHeader* value_ = nullptr;
};

OBJ_GETTER(weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept;
OBJ_GETTER(weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept;

}
4 changes: 2 additions & 2 deletions kotlin-native/runtime/src/mm/cpp/ReferenceOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ template<> ALWAYS_INLINE void mm::RefAccessor<true>::afterLoad() noexcept {}

// on heap
template<> ALWAYS_INLINE void mm::RefAccessor<false>::beforeStore(ObjHeader* value) noexcept {
gc::beforeHeapRefUpdate(direct(), value);
gc::beforeHeapRefUpdate(direct(), value, false);
}
template<> ALWAYS_INLINE void mm::RefAccessor<false>::afterStore(ObjHeader*) noexcept {}
template<> ALWAYS_INLINE void mm::RefAccessor<false>::beforeLoad() noexcept {}
template<> ALWAYS_INLINE void mm::RefAccessor<false>::afterLoad() noexcept {}

ALWAYS_INLINE OBJ_GETTER(mm::weakRefReadBarrier, std::atomic<ObjHeader*>& weakReferee) noexcept {
ALWAYS_INLINE OBJ_GETTER(mm::weakRefReadBarrier, std_support::atomic_ref<ObjHeader*> weakReferee) noexcept {
RETURN_RESULT_OF(gc::weakRefReadBarrier, weakReferee);
}
2 changes: 1 addition & 1 deletion kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ mm::SpecialRefRegistry::Node* mm::SpecialRefRegistry::nextRoot(Node* current) no
auto [candidatePrev, candidateNext] = eraseFromRoots(current, candidate);
// We removed candidate. But should we have?
if (candidate->rc_.load(std::memory_order_relaxed) > 0) {
RuntimeAssert(candidate->obj_.load(std::memory_order_relaxed) != nullptr, "candidate cannot have a null obj_");
RuntimeAssert(candidate->objAtomic().load(std::memory_order_relaxed) != nullptr, "candidate cannot have a null obj_");
// Ooops. Let's put it back. Okay to put into the head.
insertIntoRootsHead(*candidate);
}
Expand Down
55 changes: 34 additions & 21 deletions kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class SpecialRefRegistry : private Pinned {
auto rc = rc_.exchange(disposedMarker, std::memory_order_release);
if (compiler::runtimeAssertsEnabled()) {
if (rc > 0) {
auto* obj = obj_.load(std::memory_order_relaxed);
auto* obj = objAtomic().load(std::memory_order_relaxed);
// In objc export if ObjCClass extends from KtClass
// doing retain+autorelease inside [ObjCClass dealloc] will cause
// this->dispose() be called after this->retain() but before
Expand All @@ -107,19 +107,19 @@ class SpecialRefRegistry : private Pinned {
auto rc = rc_.load(std::memory_order_relaxed);
RuntimeAssert(rc >= 0, "Dereferencing StableRef@%p with rc %d", this, rc);
}
return obj_.load(std::memory_order_relaxed);
return objAtomic().load(std::memory_order_relaxed);
}

OBJ_GETTER0(tryRef) noexcept {
AssertThreadState(ThreadState::kRunnable);
RETURN_RESULT_OF(mm::weakRefReadBarrier, obj_);
RETURN_RESULT_OF(mm::weakRefReadBarrier, objAtomic());
}

void retainRef() noexcept {
auto rc = rc_.fetch_add(1, std::memory_order_relaxed);
RuntimeAssert(rc >= 0, "Retaining StableRef@%p with rc %d", this, rc);
if (rc == 0) {
if (!obj_.load(std::memory_order_relaxed)) {
if (!objAtomic().load(std::memory_order_relaxed)) {
// In objc export if ObjCClass extends from KtClass
// calling retain inside [ObjCClass dealloc] will cause
// node.retainRef() be called after node.obj_ was cleared but
Expand All @@ -129,27 +129,37 @@ class SpecialRefRegistry : private Pinned {
return;
}

// TODO: With CMS root-set-write barrier for marking `obj_` should be here.
// Until we have the barrier, the object must already be in the roots.
// If 0->1 happened from `[ObjCClass _tryRetain]`, it would first hold the object
// on the stack via `tryRef`.
// If 0->1 happened during construction:
// * First of all, currently it's impossible because the `Node` is created with rc=1 and not inserted
// into the roots list until publishing.
// * Even if the above changes, for the construction, the object must have been passed in from somewhere,
// so it must be reachable anyway.
// If 0->1 happened because an object is passing through the interop border for the second time (or more)
// (e.g. accessing a non-permanent global a couple of times). Follows the construction case above:
// "the object must have been passed in from somewhere, so it must be reachable anyway".
// With the current CMS implementation no barrier is required here.
// The CMS builds Snapshot-at-the-beginning mark closure,
// which means, it has to remember only the concurrent deletion of references, not creation.
// TODO: A write-into-root-set barrier might be required here for other concurrent mark strategies.

// In case of non-concurrent root set scanning, it is only required for the object to already be in roots.
// If 0->1 happened from `[ObjCClass _tryRetain]`, it would first hold the object
// on the stack via `tryRef`.
// If 0->1 happened during construction:
// * First of all, currently it's impossible because the `Node` is created with rc=1 and not inserted
// into the roots list until publishing.
// * Even if the above changes, for the construction, the object must have been passed in from somewhere,
// so it must be reachable anyway.
// If 0->1 happened because an object is passing through the interop border for the second time (or more)
// (e.g. accessing a non-permanent global a couple of times). Follows the construction case above:
// "the object must have been passed in from somewhere, so it must be reachable anyway".

// 0->1 changes require putting this node into the root set.
SpecialRefRegistry::instance().insertIntoRootsHead(*this);
}
}

void releaseRef() noexcept {
auto rc = rc_.fetch_sub(1, std::memory_order_relaxed);
RuntimeAssert(rc > 0, "Releasing StableRef@%p with rc %d", this, rc);
auto rcBefore = rc_.fetch_sub(1, std::memory_order_relaxed);
RuntimeAssert(rcBefore > 0, "Releasing StableRef@%p with rc %d", this, rcBefore);
if (rcBefore == 1) {
// It's potentially a removal from global root set.
// The CMS GC scans global root set concurrently.
// Notify GC about the removal.
gc::beforeHeapRefUpdate(mm::DirectRefAccessor(obj_), nullptr, true);
}
}

RawSpecialRef* asRaw() noexcept { return reinterpret_cast<RawSpecialRef*>(this); }
Expand All @@ -169,7 +179,10 @@ class SpecialRefRegistry : private Pinned {
// Synchronization between GC and mutators happens via enabling/disabling
// the barriers.
// TODO: Try to handle it atomically only when the GC is in progress.
std::atomic<ObjHeader*> obj_;
std_support::atomic_ref<ObjHeader*> objAtomic() noexcept { return std_support::atomic_ref{obj_}; }
std_support::atomic_ref<ObjHeader* const> objAtomic() const noexcept { return std_support::atomic_ref{obj_}; }
ObjHeader* obj_;

// Only ever updated using relaxed memory ordering. Any synchronization
// with nextRoot_ is achieved via acquire-release of nextRoot_.
std::atomic<Rc> rc_; // After dispose() will be disposedMarker.
Expand Down Expand Up @@ -221,7 +234,7 @@ class SpecialRefRegistry : private Pinned {
ObjHeader* operator*() const noexcept {
// Ignoring rc here. If someone nulls out rc during root
// scanning, it's okay to be conservative and still make it a root.
return node_->obj_.load(std::memory_order_relaxed);
return node_->objAtomic().load(std::memory_order_relaxed);
}

RootsIterator& operator++() noexcept {
Expand Down Expand Up @@ -258,7 +271,7 @@ class SpecialRefRegistry : private Pinned {

class Iterator {
public:
std::atomic<ObjHeader*>& operator*() noexcept { return iterator_->obj_; }
std_support::atomic_ref<ObjHeader*> operator*() noexcept { return iterator_->objAtomic(); }

Iterator& operator++() noexcept {
iterator_ = owner_->findAliveNode(std::next(iterator_));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class SpecialRefRegistryTest : public testing::Test {
std::vector<ObjHeader*> all(Invalidated&&... invalidated) noexcept {
std::set<ObjHeader*> invalidatedSet({std::forward<Invalidated>(invalidated)...});
std::vector<ObjHeader*> result;
for (auto& obj : mm::SpecialRefRegistry::instance().lockForIter()) {
for (auto obj : mm::SpecialRefRegistry::instance().lockForIter()) {
if (invalidatedSet.find(obj) != invalidatedSet.end()) {
obj = nullptr;
}
Expand Down

0 comments on commit c580c67

Please sign in to comment.