Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions scheds/include/scx/common.bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct task_struct *scx_bpf_task_acquire_remote_curr(s32 cpu) __ksym;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
Expand Down
9 changes: 8 additions & 1 deletion scheds/rust/scx_cosmos/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,14 @@ static inline const struct cpumask *get_idle_smtmask(s32 cpu)
*/
static inline bool is_cpu_idle(s32 cpu)
{
return scx_bpf_cpu_rq(cpu)->curr->flags & PF_IDLE;
struct task_struct *p = scx_bpf_task_acquire_remote_curr(cpu);
bool is_idle;

if (!p)
return false;
is_idle = p->flags & PF_IDLE;
bpf_task_release(p);
return is_idle;
}

/*
Expand Down
7 changes: 3 additions & 4 deletions scheds/rust/scx_flash/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1985,15 +1985,13 @@ static int tickless_timerfn(void *map, int *key, struct bpf_timer *timer)
*/
bpf_rcu_read_lock();
bpf_for(cpu, 0, nr_cpu_ids) {
struct task_struct *p;
struct rq *rq = scx_bpf_cpu_rq(cpu);
struct task_struct *p = scx_bpf_task_acquire_remote_curr(cpu);

if (!rq)
if (!p)
continue;
/*
* Ignore CPU if idle task is running.
*/
p = rq->curr;
if (p->flags & PF_IDLE)
continue;

Expand All @@ -2009,6 +2007,7 @@ static int tickless_timerfn(void *map, int *key, struct bpf_timer *timer)
*/
if (p->scx.slice == SCX_SLICE_INF)
p->scx.slice = slice_min;
bpf_task_release(p);
}
bpf_rcu_read_unlock();

Expand Down
10 changes: 6 additions & 4 deletions scheds/rust/scx_lavd/src/bpf/preempt.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,11 +189,10 @@ static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
* set the victim task's time slice to zero so the victim task yields
* the CPU in the next scheduling point.
*/
struct rq *victim_rq;
struct task_struct *victim_p;

victim_rq = scx_bpf_cpu_rq(victim_cpuc->cpu_id);
if (victim_rq && (victim_p = victim_rq->curr)) {
victim_p = scx_bpf_task_acquire_remote_curr(victim_cpuc->cpu_id);
if (victim_p) {
/*
* Finding a victim is racy, but we do not coordinate. Thus,
* two different CPUs can choose the same victim CPU. We do not
Expand All @@ -213,8 +212,10 @@ static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
* (SCX_SLICE_DFL, 20 msec).
*/
u64 old = victim_cpuc->est_stopping_clk;
if (!old)
if (!old) {
bpf_task_release(victim_p);
return;
}

/*
* If the new slice is one, this is the last time to be kicked,
Expand All @@ -232,6 +233,7 @@ static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
if (victim_p->scx.slice > new_slice)
WRITE_ONCE(victim_p->scx.slice, new_slice);
}
bpf_task_release(victim_p);
}
}

Expand Down
8 changes: 4 additions & 4 deletions scheds/rust/scx_layered/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1249,7 +1249,6 @@ static bool try_preempt_cpu(s32 cand, struct task_struct *p, struct task_ctx *ta
struct layer *layer, u64 flags)
{
struct cpu_ctx *cpuc, *cand_cpuc, *sib_cpuc = NULL;
struct rq *rq;
struct task_struct *curr;
const struct cpumask *idle_cpumask;
bool cand_idle;
Expand All @@ -1276,19 +1275,20 @@ static bool try_preempt_cpu(s32 cand, struct task_struct *p, struct task_ctx *ta
if (scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cand))
return false;

rq = scx_bpf_cpu_rq(cand);
if (!rq)
curr = scx_bpf_task_acquire_remote_curr(cand);
if (!curr)
return false;
curr = rq->curr;

if (ext_sched_class_addr && idle_sched_class_addr &&
((u64)curr->sched_class != ext_sched_class_addr) &&
((u64)curr->sched_class != idle_sched_class_addr)) {
bpf_task_release(curr);
if (!(cpuc = lookup_cpu_ctx(-1)))
return false;
gstat_inc(GSTAT_SKIP_PREEMPT, cpuc);
return false;
}
bpf_task_release(curr);

/*
* Don't preempt if protection against is in effect. However, open
Expand Down
9 changes: 7 additions & 2 deletions scheds/rust/scx_tickless/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -414,12 +414,17 @@ static int sched_timerfn(void *map, int *key, struct bpf_timer *timer)
*/
bpf_for(cpu, 0, nr_cpu_ids) {
struct task_struct *p;
bool idle;

/*
* Ignore CPU if idle task is running.
*/
p = scx_bpf_cpu_rq(cpu)->curr;
if (p->flags & PF_IDLE)
p = scx_bpf_task_acquire_remote_curr(cpu);
if (!p)
continue;
idle = p->flags & PF_IDLE;
bpf_task_release(p);
if (idle)
continue;

/*
Expand Down
Loading