Skip to content

Commit

Permalink
Merge pull request #29 from udesou/fix/refactor-block-for-gc
Browse files Browse the repository at this point in the history
Refactoring the code to reuse most of jl_gc_collect in block_for_gc
  • Loading branch information
udesou authored Sep 12, 2023
2 parents 83796a7 + 66a49cc commit 5c406d9
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 25 deletions.
25 changes: 25 additions & 0 deletions src/gc-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -601,6 +601,31 @@ JL_DLLEXPORT int jl_gc_enable(int on)
return prev;
}

void jl_gc_wait_for_the_world(jl_ptls_t* gc_all_tls_states, int gc_n_threads)
{
JL_TIMING(GC, GC_Stop);
#ifdef USE_TRACY
TracyCZoneCtx ctx = JL_TIMING_DEFAULT_BLOCK->tracy_ctx;
TracyCZoneColor(ctx, 0x696969);
#endif
assert(gc_n_threads);
if (gc_n_threads > 1)
jl_wake_libuv();
for (int i = 0; i < gc_n_threads; i++) {
jl_ptls_t ptls2 = gc_all_tls_states[i];
if (ptls2 != NULL) {
// This acquire load pairs with the release stores
// in the signal handler of safepoint so we are sure that
// all the stores on those threads are visible.
// We're currently also using atomic store release in mutator threads
// (in jl_gc_state_set), but we may want to use signals to flush the
// memory operations on those threads lazily instead.
while (!jl_atomic_load_relaxed(&ptls2->gc_state) || !jl_atomic_load_acquire(&ptls2->gc_state))
jl_cpu_pause(); // yield?
}
}
}

JL_DLLEXPORT int jl_gc_is_enabled(void)
{
jl_ptls_t ptls = jl_current_task->ptls;
Expand Down
25 changes: 0 additions & 25 deletions src/gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -344,31 +344,6 @@ NOINLINE uintptr_t gc_get_stack_ptr(void)

#define should_timeout() 0

void jl_gc_wait_for_the_world(jl_ptls_t* gc_all_tls_states, int gc_n_threads)
{
JL_TIMING(GC, GC_Stop);
#ifdef USE_TRACY
TracyCZoneCtx ctx = JL_TIMING_DEFAULT_BLOCK->tracy_ctx;
TracyCZoneColor(ctx, 0x696969);
#endif
assert(gc_n_threads);
if (gc_n_threads > 1)
jl_wake_libuv();
for (int i = 0; i < gc_n_threads; i++) {
jl_ptls_t ptls2 = gc_all_tls_states[i];
if (ptls2 != NULL) {
// This acquire load pairs with the release stores
// in the signal handler of safepoint so we are sure that
// all the stores on those threads are visible.
// We're currently also using atomic store release in mutator threads
// (in jl_gc_state_set), but we may want to use signals to flush the
// memory operations on those threads lazily instead.
while (!jl_atomic_load_relaxed(&ptls2->gc_state) || !jl_atomic_load_acquire(&ptls2->gc_state))
jl_cpu_pause(); // yield?
}
}
}

// malloc wrappers, aligned allocation

#if defined(_OS_WINDOWS_)
Expand Down

0 comments on commit 5c406d9

Please sign in to comment.