Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "scx_flatcg: Keep cgroup rb nodes stashed" #375

Merged
merged 1 commit into from
Jun 21, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 18 additions & 8 deletions scheds/c/scx_flatcg.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ struct cgv_node {
struct bpf_rb_node rb_node;
__u64 cvtime;
__u64 cgid;
struct bpf_refcount refcount;
};

private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
Expand Down Expand Up @@ -289,17 +288,14 @@ static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
}

stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
if (!stash || !stash->node) {
if (!stash) {
scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
return;
}

cgv_node = bpf_refcount_acquire(stash->node);
/* NULL if the node is already on the rbtree */
cgv_node = bpf_kptr_xchg(&stash->node, NULL);
if (!cgv_node) {
/*
* Node never leaves cgv_node_stash, this should only happen if
* fcg_cgroup_exit deletes the stashed node
*/
stat_inc(FCG_STAT_ENQ_RACE);
return;
}
Expand Down Expand Up @@ -612,6 +608,7 @@ void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
static bool try_pick_next_cgroup(u64 *cgidp)
{
struct bpf_rb_node *rb_node;
struct cgv_node_stash *stash;
struct cgv_node *cgv_node;
struct fcg_cgrp_ctx *cgc;
struct cgroup *cgrp;
Expand Down Expand Up @@ -695,6 +692,12 @@ static bool try_pick_next_cgroup(u64 *cgidp)
return true;

out_stash:
stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
if (!stash) {
stat_inc(FCG_STAT_PNC_GONE);
goto out_free;
}

/*
* Paired with cmpxchg in cgrp_enqueued(). If they see the following
* transition, they'll enqueue the cgroup. If they are earlier, we'll
Expand All @@ -707,9 +710,16 @@ static bool try_pick_next_cgroup(u64 *cgidp)
bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
bpf_spin_unlock(&cgv_tree_lock);
stat_inc(FCG_STAT_PNC_RACE);
return false;
} else {
cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
if (cgv_node) {
scx_bpf_error("unexpected !NULL cgv_node stash");
goto out_free;
}
}

return false;

out_free:
bpf_obj_drop(cgv_node);
return false;
Expand Down