Skip to content

Commit

Permalink
Merge pull request sched-ext#648 from hodgesds/layered-llc-stats
Browse files Browse the repository at this point in the history
scx_layered: Add stats for XNUMA/XLLC migrations
  • Loading branch information
hodgesds committed Sep 12, 2024
2 parents ec7f756 + ae57f8d commit 47034a8
Show file tree
Hide file tree
Showing 4 changed files with 169 additions and 9 deletions.
4 changes: 4 additions & 0 deletions scheds/rust/scx_layered/src/bpf/intf.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ enum layer_stat_idx {
LSTAT_YIELD,
LSTAT_YIELD_IGNORE,
LSTAT_MIGRATION,
LSTAT_XNUMA_MIGRATION,
LSTAT_XLLC_MIGRATION,
NR_LSTATS,
};

Expand All @@ -86,6 +88,8 @@ struct cpu_ctx {
u64 lstats[MAX_LAYERS][NR_LSTATS];
u64 ran_current_for;
u32 layer_idx;
u32 node_idx;
u32 cache_idx;
};

struct cache_ctx {
Expand Down
155 changes: 147 additions & 8 deletions scheds/rust/scx_layered/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,50 @@ static __noinline u64 layer_dsq_id(u32 layer_id, u32 llc_id)
return (layer_id * nr_llcs) + llc_id;
}

static inline u64 llc_hi_fallback_dsq_id(u32 llc_id)
{
return HI_FALLBACK_DSQ + llc_id;
}

static u64 llc_hi_fallback_dsq_iter_offset(int llc_offset, int idx)
{
int offset = llc_offset + idx;

if (offset >= nr_llcs)
return llc_hi_fallback_dsq_id(offset - nr_llcs);

return llc_hi_fallback_dsq_id(idx + llc_offset);
}

static u32 cpu_to_llc_id(s32 cpu_id)
{
const volatile u32 *llc_ptr;

llc_ptr = MEMBER_VPTR(cpu_llc_id_map, [cpu_id]);
if (!llc_ptr) {
scx_bpf_error("Couldn't look up llc ID for cpu %d", cpu_id);
return 0;
}
return *llc_ptr;
}

static int llc_iter_cpu_offset(int idx, s32 cpu)
{
int offset;

if (cpu <= 0)
return idx;

offset = (cpu % nr_llcs) + idx;

return offset >= nr_llcs ? offset - nr_llcs : offset;
}

static inline u64 cpu_hi_fallback_dsq_id(s32 cpu)
{
return llc_hi_fallback_dsq_id(cpu_to_llc_id(cpu));
}

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, u32);
Expand Down Expand Up @@ -136,9 +180,6 @@ static u32 cpu_to_llc_id(s32 cpu_id)
return *llc_ptr;
}

/*
* Numa node context
*/
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
Expand All @@ -147,6 +188,30 @@ struct {
__uint(map_flags, 0);
} node_data SEC(".maps");

static struct node_ctx *lookup_node_ctx(u32 node)
{
struct node_ctx *nodec;

nodec = bpf_map_lookup_elem(&node_data, &node);
return nodec;
}

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, struct cache_ctx);
__uint(max_entries, MAX_DOMS);
__uint(map_flags, 0);
} cache_data SEC(".maps");

static struct cache_ctx *lookup_cache_ctx(u32 cache_idx)
{
struct cache_ctx *cachec;

cachec = bpf_map_lookup_elem(&cache_data, &cache_idx);
return cachec;
}

static void gstat_inc(enum global_stat_idx idx, struct cpu_ctx *cctx)
{
if (idx < 0 || idx >= NR_GSTATS) {
Expand Down Expand Up @@ -870,6 +935,7 @@ void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev)
struct cpu_ctx *cctx, *sib_cctx;
u32 idx, llc_id, layer_idx;
u64 dsq_id;
u32 node_id = cpu_node_id(cpu);

if (!(cctx = lookup_cpu_ctx(-1)))
return;
Expand Down Expand Up @@ -1166,6 +1232,7 @@ static s32 create_node(u32 node_id)
u32 cpu;
struct bpf_cpumask *cpumask;
struct node_ctx *nodec;
struct cpu_ctx *cctx;
s32 ret;

nodec = bpf_map_lookup_elem(&node_data, &node_id);
Expand Down Expand Up @@ -1198,8 +1265,58 @@ static s32 create_node(u32 node_id)
break;
}

if (*nmask & (1LLU << (cpu % 64)))
if (*nmask & (1LLU << (cpu % 64))) {
bpf_cpumask_set_cpu(cpu, cpumask);
if (!(cctx = lookup_cpu_ctx(-1))) {
scx_bpf_error("cpu ctx error");
ret = -ENOENT;
break;
}
cctx->node_idx = node_id;
}
}

bpf_rcu_read_unlock();
return ret;
}

static s32 create_cache(u32 cache_id)
{
u32 cpu, llc_id;
struct bpf_cpumask *cpumask;
struct cache_ctx *cachec;
struct cpu_ctx *cctx;
s32 ret;

cachec = bpf_map_lookup_elem(&cache_data, &cache_id);
if (!cachec) {
scx_bpf_error("No cache%u", cache_id);
return -ENOENT;
}
cachec->id = cache_id;

ret = create_save_cpumask(&cachec->cpumask);
if (ret)
return ret;

bpf_rcu_read_lock();
cpumask = cachec->cpumask;
if (!cpumask) {
bpf_rcu_read_unlock();
scx_bpf_error("Failed to lookup node cpumask");
return -ENOENT;
}

bpf_for(cpu, 0, MAX_CPUS) {
llc_id = cpu_to_llc_id(cpu);
if (llc_id != cache_id)
continue;

bpf_cpumask_set_cpu(cpu, cpumask);
if (!(cctx = lookup_cpu_ctx(-1))) {
scx_bpf_error("cpu ctx error"); ret = -ENOENT; break;
}
cctx->cache_idx = cache_id;
}

bpf_rcu_read_unlock();
Expand All @@ -1225,14 +1342,27 @@ void BPF_STRUCT_OPS(layered_running, struct task_struct *p)
struct cpu_ctx *cctx;
struct task_ctx *tctx;
struct layer *layer;
struct node_ctx *nodec;
struct cache_ctx *cachec;
s32 task_cpu = scx_bpf_task_cpu(p);

if (!(cctx = lookup_cpu_ctx(-1)) || !(tctx = lookup_task_ctx(p)) ||
!(layer = lookup_layer(tctx->layer)))
return;

if (tctx->last_cpu >= 0 && tctx->last_cpu != task_cpu)
if (tctx->last_cpu >= 0 && tctx->last_cpu != task_cpu) {
lstat_inc(LSTAT_MIGRATION, layer, cctx);
if (!(nodec = lookup_node_ctx(cctx->node_idx)))
return;
if (nodec->cpumask &&
!bpf_cpumask_test_cpu(tctx->last_cpu, nodec->cpumask))
lstat_inc(LSTAT_XNUMA_MIGRATION, layer, cctx);
if (!(cachec = lookup_cache_ctx(cctx->cache_idx)))
return;
if (cachec->cpumask &&
!bpf_cpumask_test_cpu(tctx->last_cpu, cachec->cpumask))
lstat_inc(LSTAT_XLLC_MIGRATION, layer, cctx);
}
tctx->last_cpu = task_cpu;

if (vtime_before(layer->vtime_now, p->scx.dsq_vtime))
Expand Down Expand Up @@ -1485,6 +1615,7 @@ void BPF_STRUCT_OPS(layered_dump, struct scx_dump_ctx *dctx)
u64 now = bpf_ktime_get_ns();
int i, j, idx;
struct layer *layer;
u64 dsq_id;

bpf_for(i, 0, nr_layers) {
layer = lookup_layer(i);
Expand Down Expand Up @@ -1512,9 +1643,12 @@ void BPF_STRUCT_OPS(layered_dump, struct scx_dump_ctx *dctx)
scx_bpf_dump("\n");
}

scx_bpf_dump("HI_FALLBACK nr_queued=%d -%llums\n",
scx_bpf_dsq_nr_queued(HI_FALLBACK_DSQ),
dsq_first_runnable_for_ms(HI_FALLBACK_DSQ, now));
bpf_for(i, 0, nr_llcs) {
dsq_id = llc_hi_fallback_dsq_id(i);
scx_bpf_dump("HI_FALLBACK[%d] nr_queued=%d -%llums\n",
dsq_id, scx_bpf_dsq_nr_queued(dsq_id),
dsq_first_runnable_for_ms(dsq_id, now));
}
scx_bpf_dump("LO_FALLBACK nr_queued=%d -%llums\n",
scx_bpf_dsq_nr_queued(LO_FALLBACK_DSQ),
dsq_first_runnable_for_ms(LO_FALLBACK_DSQ, now));
Expand Down Expand Up @@ -1560,6 +1694,11 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
if (ret)
return ret;
}
bpf_for(i, 0, nr_llcs) {
ret = create_cache(i);
if (ret)
return ret;
}

dbg("CFG: Dumping configuration, nr_online_cpus=%d smt_enabled=%d",
nr_online_cpus, smt_enabled);
Expand Down
9 changes: 9 additions & 0 deletions scheds/rust/scx_layered/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1685,6 +1685,15 @@ impl<'a, 'b> Scheduler<'a, 'b> {
node.llcs().len()
);
skel.maps.rodata_data.nr_llcs += node.llcs().len() as u32;
let raw_numa_slice = node.span().as_raw_slice();
let node_cpumask_slice = &mut skel.maps.rodata_data.numa_cpumasks[node.id()];
let (left, _) = node_cpumask_slice.split_at_mut(raw_numa_slice.len());
left.clone_from_slice(raw_numa_slice);
debug!(
"node {} mask: {:?}",
node.id(),
skel.maps.rodata_data.numa_cpumasks[node.id()]
);

for (_, llc) in node.llcs() {
debug!("configuring llc {:?} for node {:?}", llc.id(), node.id());
Expand Down
10 changes: 9 additions & 1 deletion scheds/rust/scx_layered/src/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,10 @@ pub struct LayerStats {
pub yield_ignore: u64,
#[stat(desc = "% migrated across CPUs")]
pub migration: f64,
#[stat(desc = "% migrated across NUMA nodes")]
pub xnuma_migration: f64,
#[stat(desc = "% migrated across LLCs")]
pub xllc_migration: f64,
#[stat(desc = "mask of allocated CPUs", _om_skip)]
pub cpus: Vec<u32>,
#[stat(desc = "# of CPUs assigned")]
Expand Down Expand Up @@ -188,6 +192,8 @@ impl LayerStats {
yielded: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_YIELD),
yield_ignore: lstat(bpf_intf::layer_stat_idx_LSTAT_YIELD_IGNORE) as u64,
migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_MIGRATION),
xnuma_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XNUMA_MIGRATION),
xllc_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLLC_MIGRATION),
cpus: Self::bitvec_to_u32s(&layer.cpus),
cur_nr_cpus: layer.cpus.count_ones() as u32,
min_nr_cpus: nr_cpus_range.0 as u32,
Expand Down Expand Up @@ -235,10 +241,12 @@ impl LayerStats {

writeln!(
w,
" {:<width$} open_idle={} mig={} affn_viol={}",
" {:<width$} open_idle={} mig={} xnuma_mig={} xllc_mig={} affn_viol={}",
"",
fmt_pct(self.open_idle),
fmt_pct(self.migration),
fmt_pct(self.xnuma_migration),
fmt_pct(self.xllc_migration),
fmt_pct(self.affn_viol),
width = header_width,
)?;
Expand Down

0 comments on commit 47034a8

Please sign in to comment.