Skip to content

Commit

Permalink
Merge pull request #6 from sched-ext/merge_latest
Browse files Browse the repository at this point in the history
Let's merge some of the fixes to the 6.8 release branch
  • Loading branch information
htejun authored Jan 31, 2024
2 parents b962e86 + 9ba1cda commit 97c4853
Show file tree
Hide file tree
Showing 13 changed files with 393 additions and 51 deletions.
5 changes: 5 additions & 0 deletions include/linux/sched/ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,11 @@ struct scx_init_task_args {
/* the cgroup the task is joining */
struct cgroup *cgroup;
#endif
/*
* Set if ops.init_task() is being invoked on the fork path, as opposed
* to the scheduler transition path.
*/
bool fork;
};

/* argument container for ops.exit_task() */
Expand Down
12 changes: 7 additions & 5 deletions kernel/sched/ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -2375,7 +2375,7 @@ static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
}

static int scx_ops_init_task(struct task_struct *p, struct task_group *tg)
static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
{
int ret;

Expand All @@ -2384,6 +2384,7 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg)
if (SCX_HAS_OP(init_task)) {
struct scx_init_task_args args = {
SCX_INIT_TASK_ARGS_CGROUP(tg)
.fork = fork,
};

ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, init_task, p, &args);
Expand Down Expand Up @@ -2498,7 +2499,7 @@ int scx_fork(struct task_struct *p)
percpu_rwsem_assert_held(&scx_fork_rwsem);

if (scx_enabled())
return scx_ops_init_task(p, task_group(p));
return scx_ops_init_task(p, task_group(p), true);
else
return 0;
}
Expand Down Expand Up @@ -3622,13 +3623,13 @@ static int scx_ops_enable(struct sched_ext_ops *ops)
scx_create_rt_helper("sched_ext_ops_helper"));
if (!scx_ops_helper) {
ret = -ENOMEM;
goto err;
goto err_unlock;
}
}

if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
ret = -EBUSY;
goto err;
goto err_unlock;
}

scx_exit_info = alloc_exit_info();
Expand Down Expand Up @@ -3782,7 +3783,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops)
get_task_struct(p);
spin_unlock_irq(&scx_tasks_lock);

ret = scx_ops_init_task(p, task_group(p));
ret = scx_ops_init_task(p, task_group(p), false);
if (ret) {
put_task_struct(p);
spin_lock_irq(&scx_tasks_lock);
Expand Down Expand Up @@ -3876,6 +3877,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops)
free_exit_info(scx_exit_info);
scx_exit_info = NULL;
}
err_unlock:
mutex_unlock(&scx_ops_enable_mutex);
return ret;

Expand Down
20 changes: 6 additions & 14 deletions tools/testing/selftests/scx/.gitignore
Original file line number Diff line number Diff line change
@@ -1,14 +1,6 @@
ddsp_bogus_dsq_fail
ddsp_vtimelocal_fail
enq_last_no_enq_fails
enq_select_cpu_fails
init_enable_count
minimal
runner
select_cpu_dfl
select_cpu_dfl_nodispatch
select_cpu_dispatch
select_cpu_dispatch_dbl_dsp
select_cpu_dispatch_bad_dsq
select_cpu_vtime
build/
*
!*.c
!*.h
!Makefile
!.gitignore
!config
15 changes: 9 additions & 6 deletions tools/testing/selftests/scx/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -155,21 +155,28 @@ override define CLEAN
rm -f runner
endef

# Every testcase takes all of the BPF progs are dependencies by default. This
# allows testcases to load any BPF scheduler, which is useful for testcases
# that don't need their own prog to run their test.
all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubst %.c,%.skel.h,$(prog)))

auto-test-targets := \
enq_last_no_enq_fails \
enq_select_cpu_fails \
ddsp_bogus_dsq_fail \
ddsp_vtimelocal_fail \
init_enable_count \
maximal \
maybe_null \
minimal \
reload_loop \
select_cpu_dfl \
select_cpu_dfl_nodispatch \
select_cpu_dispatch \
select_cpu_dispatch_bad_dsq \
select_cpu_dispatch_dbl_dsp \
select_cpu_vtime \
test_example
test_example \

testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets)))

Expand All @@ -182,11 +189,7 @@ $(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR)
# Note that we must do double expansion here in order to support conditionally
# compiling BPF object files only if one is present, as the wildcard Make
# function doesn't support using implicit rules otherwise.
.SECONDEXPANSION:
$(testcase-targets): $(SCXOBJ_DIR)/%.o: %.c $(SCXOBJ_DIR)/runner.o \
$$(if $$(wildcard $$*.bpf.c), $(INCLUDE_DIR)/%.bpf.skel.h) \
$$(if $$(wildcard $$*_fail.bpf.c), $(INCLUDE_DIR)/%_fail.bpf.skel.h) \
| $(SCXOBJ_DIR)
$(testcase-targets): $(SCXOBJ_DIR)/%.o: %.c $(SCXOBJ_DIR)/runner.o $(all_test_bpfprogs) | $(SCXOBJ_DIR)
$(eval test=$(patsubst %.o,%.c,$(notdir $@)))
$(CC) $(CFLAGS) -c $< -o $@ $(SCXOBJ_DIR)/runner.o

Expand Down
6 changes: 6 additions & 0 deletions tools/testing/selftests/scx/init_enable_count.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,19 @@
char _license[] SEC("license") = "GPL";

u64 init_task_cnt, exit_task_cnt, enable_cnt, disable_cnt;
u64 init_fork_cnt, init_transition_cnt;
volatile const bool switch_all;

s32 BPF_STRUCT_OPS_SLEEPABLE(cnt_init_task, struct task_struct *p,
struct scx_init_task_args *args)
{
__sync_fetch_and_add(&init_task_cnt, 1);

if (args->fork)
__sync_fetch_and_add(&init_fork_cnt, 1);
else
__sync_fetch_and_add(&init_transition_cnt, 1);

return 0;
}

Expand Down
36 changes: 34 additions & 2 deletions tools/testing/selftests/scx/init_enable_count.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,39 @@ static enum scx_test_status run_test(bool global)
{
struct init_enable_count *skel;
struct bpf_link *link;
const u32 num_children = 5;
const u32 num_children = 5, num_pre_forks = 1024;
int ret, i, status;
struct sched_param param = {};
pid_t pids[num_children];
pid_t pids[num_pre_forks];

skel = open_load_prog(global);

/*
* Fork a bunch of children before we attach the scheduler so that we
* ensure (at least in practical terms) that there are more tasks that
* transition from SCHED_OTHER -> SCHED_EXT than there are tasks that
* take the fork() path either below or in other processes.
*/
for (i = 0; i < num_pre_forks; i++) {
pids[i] = fork();
SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
if (pids[i] == 0) {
sleep(1);
exit(0);
}
}

link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
SCX_FAIL_IF(!link, "Failed to attach struct_ops");

for (i = 0; i < num_pre_forks; i++) {
SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
"Failed to wait for pre-forked child\n");

SCX_FAIL_IF(status != 0, "Pre-forked child %d exited with status %d\n", i,
status);
}

/* SCHED_EXT children */
for (i = 0; i < num_children; i++) {
pids[i] = fork();
Expand Down Expand Up @@ -101,6 +125,14 @@ static enum scx_test_status run_test(bool global)
SCX_EQ(skel->bss->enable_cnt, num_children);
SCX_EQ(skel->bss->disable_cnt, num_children);
}
/*
* We forked a ton of tasks before we attached the scheduler above, so
* this should be fine. Technically it could be flaky if a ton of forks
* are happening at the same time in other processes, but that should
* be exceedingly unlikely.
*/
SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt);
SCX_GE(skel->bss->init_fork_cnt, 2 * num_children);

bpf_link__destroy(link);
init_enable_count__destroy(skel);
Expand Down
164 changes: 164 additions & 0 deletions tools/testing/selftests/scx/maximal.bpf.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* A scheduler with every callback defined.
*
* This scheduler defines every callback.
*
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2024 David Vernet <[email protected]>
*/

#include <scx/common.bpf.h>

char _license[] SEC("license") = "GPL";

s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
u64 wake_flags)
{
return prev_cpu;
}

void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
{
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
}

void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
{}

void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
{
scx_bpf_consume(SCX_DSQ_GLOBAL);
}

void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
{}

void BPF_STRUCT_OPS(maximal_running, struct task_struct *p)
{}

void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable)
{}

void BPF_STRUCT_OPS(maximal_quiescent, struct task_struct *p, u64 deq_flags)
{}

bool BPF_STRUCT_OPS(maximal_yield, struct task_struct *from,
struct task_struct *to)
{
return false;
}

bool BPF_STRUCT_OPS(maximal_core_sched_before, struct task_struct *a,
struct task_struct *b)
{
return false;
}

void BPF_STRUCT_OPS(maximal_set_weight, struct task_struct *p, u32 weight)
{}

void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p,
const struct cpumask *cpumask)
{}

void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle)
{}

void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu,
struct scx_cpu_acquire_args *args)
{}

void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu,
struct scx_cpu_release_args *args)
{}

void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu)
{}

void BPF_STRUCT_OPS(maximal_cpu_offline, s32 cpu)
{}

s32 BPF_STRUCT_OPS(maximal_init_task, struct task_struct *p,
struct scx_init_task_args *args)
{
return 0;
}

void BPF_STRUCT_OPS(maximal_enable, struct task_struct *p)
{}

void BPF_STRUCT_OPS(maximal_exit_task, struct task_struct *p,
struct scx_exit_task_args *args)
{}

void BPF_STRUCT_OPS(maximal_disable, struct task_struct *p)
{}

s32 BPF_STRUCT_OPS(maximal_cgroup_init, struct cgroup *cgrp,
struct scx_cgroup_init_args *args)
{
return 0;
}

void BPF_STRUCT_OPS(maximal_cgroup_exit, struct cgroup *cgrp)
{}

s32 BPF_STRUCT_OPS(maximal_cgroup_prep_move, struct task_struct *p,
struct cgroup *from, struct cgroup *to)
{
return 0;
}

void BPF_STRUCT_OPS(maximal_cgroup_move, struct task_struct *p,
struct cgroup *from, struct cgroup *to)
{}

void BPF_STRUCT_OPS(maximal_cgroup_cancel_move, struct task_struct *p,
struct cgroup *from, struct cgroup *to)
{}

void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
{}

s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
{
return 0;
}

void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
{}

SEC(".struct_ops.link")
struct sched_ext_ops maximal_ops = {
.select_cpu = maximal_select_cpu,
.enqueue = maximal_enqueue,
.dequeue = maximal_dequeue,
.dispatch = maximal_dispatch,
.runnable = maximal_runnable,
.running = maximal_running,
.stopping = maximal_stopping,
.quiescent = maximal_quiescent,
.yield = maximal_yield,
.core_sched_before = maximal_core_sched_before,
.set_weight = maximal_set_weight,
.set_cpumask = maximal_set_cpumask,
.update_idle = maximal_update_idle,
.cpu_acquire = maximal_cpu_acquire,
.cpu_release = maximal_cpu_release,
.cpu_online = maximal_cpu_online,
.cpu_offline = maximal_cpu_offline,
.init_task = maximal_init_task,
.enable = maximal_enable,
.exit_task = maximal_exit_task,
.disable = maximal_disable,
.cgroup_init = maximal_cgroup_init,
.cgroup_exit = maximal_cgroup_exit,
.cgroup_prep_move = maximal_cgroup_prep_move,
.cgroup_move = maximal_cgroup_move,
.cgroup_cancel_move = maximal_cgroup_cancel_move,
.cgroup_set_weight = maximal_cgroup_set_weight,
.init = maximal_init,
.exit = maximal_exit,
.name = "maximal",
};
Loading

0 comments on commit 97c4853

Please sign in to comment.