Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SelfProfiler API refactoring and part one of event review #64840

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 3 additions & 21 deletions src/librustc/session/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use syntax::source_map;
use syntax::parse::{self, ParseSess};
use syntax::symbol::Symbol;
use syntax_pos::{MultiSpan, Span};
use crate::util::profiling::SelfProfiler;
use crate::util::profiling::{SelfProfiler, SelfProfilerRef};

use rustc_target::spec::{PanicStrategy, RelroLevel, Target, TargetTriple};
use rustc_data_structures::flock;
Expand Down Expand Up @@ -129,7 +129,7 @@ pub struct Session {
pub profile_channel: Lock<Option<mpsc::Sender<ProfileQueriesMsg>>>,

/// Used by `-Z self-profile`.
pub self_profiling: Option<Arc<SelfProfiler>>,
pub prof: SelfProfilerRef,

/// Some measurements that are being gathered during compilation.
pub perf_stats: PerfStats,
Expand Down Expand Up @@ -835,24 +835,6 @@ impl Session {
}
}

#[inline(never)]
#[cold]
fn profiler_active<F: FnOnce(&SelfProfiler) -> ()>(&self, f: F) {
match &self.self_profiling {
None => bug!("profiler_active() called but there was no profiler active"),
Some(profiler) => {
f(&profiler);
}
}
}

#[inline(always)]
pub fn profiler<F: FnOnce(&SelfProfiler) -> ()>(&self, f: F) {
if unlikely!(self.self_profiling.is_some()) {
self.profiler_active(f)
}
}

pub fn print_perf_stats(&self) {
println!(
"Total time spent computing symbol hashes: {}",
Expand Down Expand Up @@ -1257,7 +1239,7 @@ fn build_session_(
imported_macro_spans: OneThread::new(RefCell::new(FxHashMap::default())),
incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)),
cgu_reuse_tracker,
self_profiling: self_profiler,
prof: SelfProfilerRef::new(self_profiler),
profile_channel: Lock::new(None),
perf_stats: PerfStats {
symbol_hash_time: Lock::new(Duration::from_secs(0)),
Expand Down
4 changes: 4 additions & 0 deletions src/librustc/ty/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ use crate::ty::CanonicalPolyFnSig;
use crate::util::common::ErrorReported;
use crate::util::nodemap::{DefIdMap, DefIdSet, ItemLocalMap, ItemLocalSet};
use crate::util::nodemap::{FxHashMap, FxHashSet};
use crate::util::profiling::SelfProfilerRef;

use errors::DiagnosticBuilder;
use arena::SyncDroplessArena;
Expand Down Expand Up @@ -995,6 +996,8 @@ pub struct GlobalCtxt<'tcx> {

pub dep_graph: DepGraph,

pub prof: SelfProfilerRef,

/// Common objects.
pub common: Common<'tcx>,

Expand Down Expand Up @@ -1225,6 +1228,7 @@ impl<'tcx> TyCtxt<'tcx> {
arena: WorkerLocal::new(|_| Arena::default()),
interners,
dep_graph,
prof: s.prof.clone(),
common,
types: common_types,
lifetimes: common_lifetimes,
Expand Down
26 changes: 11 additions & 15 deletions src/librustc/ty/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
let mut lock = cache.get_shard_by_value(key).lock();
if let Some(value) = lock.results.get(key) {
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
tcx.sess.profiler(|p| p.record_query_hit(Q::NAME));
tcx.prof.query_cache_hit(Q::NAME);
let result = (value.value.clone(), value.index);
#[cfg(debug_assertions)]
{
Expand All @@ -128,7 +128,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
// in another thread has completed. Record how long we wait in the
// self-profiler.
#[cfg(parallel_compiler)]
tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME));
tcx.prof.query_blocked_start(Q::NAME);

job.clone()
},
Expand Down Expand Up @@ -170,7 +170,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
#[cfg(parallel_compiler)]
{
let result = job.r#await(tcx, span);
tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME));
tcx.prof.query_blocked_end(Q::NAME);

if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
Expand Down Expand Up @@ -382,8 +382,9 @@ impl<'tcx> TyCtxt<'tcx> {
}

if Q::ANON {

profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_query(Q::NAME));
let prof_timer = self.prof.query_provider(Q::NAME);

let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.job.clone(), diagnostics, |tcx| {
Expand All @@ -393,7 +394,7 @@ impl<'tcx> TyCtxt<'tcx> {
})
});

self.sess.profiler(|p| p.end_query(Q::NAME));
drop(prof_timer);
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);

self.dep_graph.read_index(dep_node_index);
Expand Down Expand Up @@ -451,9 +452,8 @@ impl<'tcx> TyCtxt<'tcx> {
// First we try to load the result from the on-disk cache.
let result = if Q::cache_on_disk(self, key.clone(), None) &&
self.sess.opts.debugging_opts.incremental_queries {
self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME));
let _prof_timer = self.prof.incr_cache_loading(Q::NAME);
let result = Q::try_load_from_disk(self, prev_dep_node_index);
self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME));

// We always expect to find a cached result for things that
// can be forced from `DepNode`.
Expand All @@ -469,21 +469,17 @@ impl<'tcx> TyCtxt<'tcx> {

let result = if let Some(result) = result {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::NAME));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't need this event?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was pondering this question too. I opted to remove it because we already record the incr_cache_loading event above and strictly speaking this is not an in-memory cache hit. However, I also want to replace the query_provider event right below with something that indicates that this is not a regular query provider invocation but was triggered instead of a cache load.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, sounds good


result
} else {
// We could not load a result from the on-disk cache, so
// recompute.

self.sess.profiler(|p| p.start_query(Q::NAME));
let _prof_timer = self.prof.query_provider(Q::NAME);

// The dep-graph for this computation is already in-place.
let result = self.dep_graph.with_ignore(|| {
Q::compute(self, key)
});

self.sess.profiler(|p| p.end_query(Q::NAME));
result
};

Expand Down Expand Up @@ -551,7 +547,7 @@ impl<'tcx> TyCtxt<'tcx> {
key, dep_node);

profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_query(Q::NAME));
let prof_timer = self.prof.query_provider(Q::NAME);

let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.job.clone(), diagnostics, |tcx| {
Expand All @@ -571,7 +567,7 @@ impl<'tcx> TyCtxt<'tcx> {
})
});

self.sess.profiler(|p| p.end_query(Q::NAME));
drop(prof_timer);
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);

if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
Expand Down Expand Up @@ -619,7 +615,7 @@ impl<'tcx> TyCtxt<'tcx> {
let _ = self.get_query::<Q>(DUMMY_SP, key);
} else {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
self.sess.profiler(|p| p.record_query_hit(Q::NAME));
self.prof.query_cache_hit(Q::NAME);
}
}

Expand Down
Loading